diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c410b6cdd..3608d961b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -83,6 +83,23 @@ jobs: - name: 'Run sensitive keyword linter' run: 'node scripts/lint.js --sensitive-keywords' + - name: 'Build CLI package' + run: 'npm run build --workspace=packages/cli' + + - name: 'Generate settings schema' + run: 'npm run generate:settings-schema' + + - name: 'Check settings schema is up-to-date' + run: | + if [[ -n $(git status --porcelain packages/vscode-ide-companion/schemas/settings.schema.json) ]]; then + echo "❌ Error: settings.schema.json is out of date!" + echo " Please run: npm run generate:settings-schema" + echo " Then commit the updated schema file." + git diff packages/vscode-ide-companion/schemas/settings.schema.json + exit 1 + fi + echo "✅ Settings schema is up-to-date" + # # Test: Node # diff --git a/.prettierignore b/.prettierignore index c9ae7e56a..5e9d79005 100644 --- a/.prettierignore +++ b/.prettierignore @@ -18,4 +18,5 @@ eslint.config.js gha-creds-*.json junit.xml Thumbs.db +packages/vscode-ide-companion/schemas/settings.schema.json packages/cli/src/services/insight/templates/insightTemplate.ts diff --git a/.qwen/commands/qc/code-review.md b/.qwen/commands/qc/code-review.md new file mode 100644 index 000000000..b5846485a --- /dev/null +++ b/.qwen/commands/qc/code-review.md @@ -0,0 +1,25 @@ +--- +description: Code review a pull request +--- + +You are an expert code reviewer. Follow these steps: + +1. If no PR number is provided in the args, use Bash(\"gh pr list\") to show open PRs +2. If a PR number is provided, use Bash(\"gh pr view \") to get PR details +3. Use Bash(\"gh pr diff \") to get the diff +4. Analyze the changes and provide a thorough code review that includes: + - Overview of what the PR does + - Analysis of code quality and style + - Specific suggestions for improvements + - Any potential issues or risks + +Keep your review concise but thorough. Focus on: +- Code correctness +- Following project conventions +- Performance implications +- Test coverage +- Security considerations + +Format your review with clear sections and bullet points. + +PR number: {{args}} diff --git a/.qwen/commands/qc/commit.md b/.qwen/commands/qc/commit.md new file mode 100644 index 000000000..76ef6b417 --- /dev/null +++ b/.qwen/commands/qc/commit.md @@ -0,0 +1,70 @@ +--- +description: Commit staged changes with an AI-generated commit message and push +--- + +# Commit and Push + +## Overview +Generate a clear, concise commit message based on staged changes, confirm with the user, then commit and push. + +## Steps + +### 1. Check repository status +- Run `git status` to check: + - Are there any staged changes? + - Are there unstaged changes? + - What is the current branch? + +### 2. Handle unstaged changes +- If there are unstaged changes, notify the user and list them +- Do NOT add or commit unstaged changes +- Proceed only with staged changes + +### 3. Review staged changes +- Run `git diff --staged` to see all staged changes +- Analyze the changes in depth to understand: + - What files were modified/added/deleted + - The nature of the changes (feature, fix, refactor, docs, etc.) + - The scope and impact of the changes + +### 4. Handle branch logic +- Get current branch name with `git branch --show-current` +- **If current branch is `main` or `master`:** + - Generate a proper branch name based on the changes + - Create and switch to the new branch: `git checkout -b ` +- **If current branch is NOT main/master:** + - Check if branch name matches the staged changes + - If branch name doesn't match changes, ask user: + - "Current branch `` doesn't seem to match these changes." + - "Options: (1) Create and switch to a new branch, (2) Commit directly on current branch" + - Wait for user decision + +### 5. Generate commit message +- Types: feat, fix, docs, style, refactor, test, chore +- Guidelines: + - Be clear and concise + - Reference issues if mentioned in changes + - Include scope in parentheses when applicable (e.g., `fix(insight):`, `feat(auth):`) + - Add bullet points for detailed changes if it addes more value, otherwise do not use bullets + - Include a footer explaining the purpose/impact of the changes + +**Format:** +``` +(): +- (optional) +- (optional) +- ... + +This . +``` + +### 6. Present the result and confirm with user +- Present the generated commit message +- Show which branch will be used +- Ask for confirmation: "Proceed with commit and push?" +- Wait for user approval + +### 7. Commit and push +- After user confirms: + - `git commit -m ""` + - `git push -u origin ` (use `-u` for new branches) diff --git a/.qwen/commands/qc/create-issue.md b/.qwen/commands/qc/create-issue.md new file mode 100644 index 000000000..54317621b --- /dev/null +++ b/.qwen/commands/qc/create-issue.md @@ -0,0 +1,42 @@ +--- +description: Draft and submit a GitHub issue based on a user-provided idea +--- + +# Create Issue + +## Overview +Take the user's idea or bug description, investigate the codebase to understand the full context, draft a GitHub issue for review, and submit it once approved. + +## Input +The user provides a brief description of a feature request or bug report: {{args}} + +## Steps + +1. **Understand the request** + - Read the user's description carefully + - Determine whether this is a feature request or a bug report + +2. **Investigate the codebase** + - Search for relevant code, files, and existing behavior related to the request + - Build a thorough understanding of how the current system works + - Identify any related issues or prior art if mentioned + +3. **Draft the issue** + - Write a markdown file for the user to review + - Use the appropriate template: + - Feature request: follow @.github/ISSUE_TEMPLATE/feature_request.yml + - Bug report: follow @.github/ISSUE_TEMPLATE/bug_report.yml + - Write from the user's perspective, not as an implementation spec + - Keep the language clear and concise, AVOID internal implementation details + +4. **Review with user** + - Present the draft file to the user + - Iterate on feedback until the user is satisfied + - Do NOT submit until the user explicitly asks to + +5. **Submit the issue** + - When the user confirms, create the issue using `gh issue create` + - Apply the appropriate labels: + - Feature request: `type/feature-request`, `status/needs-triage` + - Bug report: `type/bug`, `status/needs-triage` + - Report back the issue URL diff --git a/.qwen/commands/qc/create-pr.md b/.qwen/commands/qc/create-pr.md new file mode 100644 index 000000000..bf3c3c1e4 --- /dev/null +++ b/.qwen/commands/qc/create-pr.md @@ -0,0 +1,34 @@ +--- +description: Create a pull request based on staged code changes +--- + +# Create PR + +## Overview +Create a well-structured pull request with proper description and title. + +## Steps +1. **Review staged changes** + - Review all staged changes to understand what has been done + - Do not touch unstaged changes + +2. **Prepare branch** + - Create a new branch with proper name if current branch is main + - Ensure all changes are committed + - Push branch to remote + +3. **Write PR description** + - Use PR Template below + - Summarize changes clearly + - Include context and motivation + - List any breaking changes + - Link related issues if provided, or use "No linked issues" + - Add this line at the end of PR body: "🤖 Generated with [Qwen Code](https://github.com/QwenLM/qwen-code)", with a line separator + +4. **Set up PR** + - Create PR title and body + - Submit PR with gh command + +## PR Template + +@{.github/pull_request_template.md} \ No newline at end of file diff --git a/.qwen/skills/terminal-capture/SKILL.md b/.qwen/skills/terminal-capture/SKILL.md index adf8fff13..7fc99a18d 100644 --- a/.qwen/skills/terminal-capture/SKILL.md +++ b/.qwen/skills/terminal-capture/SKILL.md @@ -109,6 +109,38 @@ Supported key names: `ArrowUp`, `ArrowDown`, `ArrowLeft`, `ArrowRight`, `Enter`, Auto-screenshot is triggered after the key sequence ends (when the next step is not a `key`). +### `streaming` — Capture During Execution + +Capture multiple screenshots at intervals during long-running output (e.g., progress bars). Optionally generates an animated GIF. + +```typescript +{ + type: 'Run this command: bash progress.sh', + streaming: { + delayMs: 7000, // Wait before first capture (skip initial waiting phase) + intervalMs: 500, // Interval between captures + count: 20, // Maximum number of captures + gif: true, // Generate animated GIF (default: true, requires ffmpeg) + }, +} +``` + +- `delayMs` (optional): Milliseconds to wait after pressing Enter before starting captures. Useful for skipping model thinking/approval time. +- Captures stop early if terminal output is unchanged for 3 consecutive intervals. +- Duplicate frames (no output change) are automatically skipped. + +**GIF prerequisite**: If the scenario uses `streaming` with GIF enabled (default), check if `ffmpeg` is installed before running. If not, ask the user whether they'd like to install it: + +```bash +# Check +which ffmpeg + +# Install (macOS) +brew install ffmpeg +``` + +If the user declines, the scenario still runs — GIF generation is skipped with a warning. + ### `capture` / `captureFull` — Explicit Screenshot Use as a standalone step, or override automatic naming: @@ -178,20 +210,32 @@ This tool is commonly used for visual verification during PR reviews. For the co ## Full ScenarioConfig Type ```typescript -interface ScenarioConfig { - name: string; // Scenario name (also used as screenshot subdirectory name) - spawn: string[]; // Launch command ["node", "dist/cli.js", "--yolo"] - flow: FlowStep[]; // Interaction steps - terminal?: { - // Terminal configuration (all optional) - cols?: number; // Number of columns, default 100 - rows?: number; // Number of rows, default 28 - theme?: string; // Theme: dracula|one-dark|github-dark|monokai|night-owl - chrome?: boolean; // macOS window decorations, default true - title?: string; // Window title, default "Terminal" - fontSize?: number; // Font size - cwd?: string; // Working directory (relative to config file) +interface FlowStep { + type?: string; // Input text + key?: string | string[]; // Key press(es) + capture?: string; // Viewport screenshot filename + captureFull?: string; // Full scrollback screenshot filename + streaming?: { + delayMs?: number; // Delay before first capture (default: 0) + intervalMs: number; // Interval between captures in ms + count: number; // Maximum number of captures + gif?: boolean; // Generate animated GIF (default: true) }; - outputDir?: string; // Screenshot output directory (relative to config file) +} + +interface ScenarioConfig { + name: string; // Scenario name (also used as screenshot subdirectory name) + spawn: string[]; // Launch command ["node", "dist/cli.js", "--yolo"] + flow: FlowStep[]; // Interaction steps + terminal?: { + cols?: number; // Number of columns, default 100 + rows?: number; // Number of rows, default 28 + theme?: string; // Theme: dracula|one-dark|github-dark|monokai|night-owl + chrome?: boolean; // macOS window decorations, default true + title?: string; // Window title, default "Terminal" + fontSize?: number; // Font size + cwd?: string; // Working directory (relative to config file) + }; + outputDir?: string; // Screenshot output directory (relative to config file) } ``` diff --git a/integration-tests/fixtures/settings-migration/workspaces.json b/integration-tests/fixtures/settings-migration/workspaces.json new file mode 100644 index 000000000..af7a48f84 --- /dev/null +++ b/integration-tests/fixtures/settings-migration/workspaces.json @@ -0,0 +1,189 @@ +{ + "v1Settings": { + "theme": "dark", + "model": "gemini", + "autoAccept": true, + "hideTips": false, + "vimMode": true, + "checkpointing": true, + "disableAutoUpdate": true, + "disableLoadingPhrases": true, + "mcpServers": { + "fetch": { + "command": "node", + "args": ["fetch-server.js"] + } + }, + "customUserSetting": "preserved-value" + }, + "v1ComplexSettings": { + "theme": "dark", + "model": "gemini-1.5-pro", + "autoAccept": false, + "hideTips": true, + "vimMode": false, + "checkpointing": true, + "disableAutoUpdate": true, + "disableUpdateNag": false, + "disableLoadingPhrases": true, + "disableFuzzySearch": false, + "disableCacheControl": true, + "allowedTools": ["read-file", "write-file"], + "allowMCPServers": true, + "autoConfigureMaxOldSpaceSize": true, + "bugCommand": "/bug", + "chatCompression": "auto", + "coreTools": ["edit", "bash"], + "customThemes": [], + "customWittyPhrases": [], + "fileFiltering": true, + "folderTrust": true, + "ideMode": true, + "includeDirectories": ["src", "lib"], + "maxSessionTurns": 50, + "preferredEditor": "vscode", + "sandbox": false, + "summarizeToolOutput": true, + "telemetry": { + "enabled": false + }, + "useRipgrep": true, + "myCustomKey": "custom-value", + "anotherCustomSetting": { + "nested": true, + "items": [1, 2, 3] + } + }, + "v1ArrayAndNullSettings": { + "theme": null, + "model": ["gemini", "claude"], + "autoAccept": false, + "includeDirectories": [], + "disableFuzzySearch": "TRUE", + "disableCacheControl": "FALSE", + "customArray": [{ "key": 1 }] + }, + "v1ParentCollisionSettings": { + "theme": "dark", + "model": "gemini", + "ui": "legacy-ui-string", + "general": "legacy-general-string", + "disableAutoUpdate": true, + "disableLoadingPhrases": false, + "notes": { + "fromUser": "preserve-custom" + } + }, + "v1VersionStringSettings": { + "$version": "2", + "theme": "light", + "model": "qwen-plus", + "disableAutoUpdate": "false", + "disableLoadingPhrases": "TRUE", + "ui": { + "hideWindowTitle": true + }, + "customSection": { + "keepMe": true + } + }, + "v2Settings": { + "$version": 2, + "ui": { + "theme": "light", + "accessibility": { + "disableLoadingPhrases": false + } + }, + "general": { + "disableAutoUpdate": false, + "disableUpdateNag": false, + "checkpointing": false + }, + "model": { + "name": "claude" + }, + "context": { + "fileFiltering": { + "disableFuzzySearch": true + } + }, + "mcpServers": {} + }, + "v2MinimalSettings": { + "$version": 2 + }, + "v2BooleanStringSettings": { + "$version": 2, + "general": { + "disableAutoUpdate": "TRUE", + "disableUpdateNag": "false" + }, + "ui": { + "accessibility": { + "disableLoadingPhrases": "FaLsE" + } + }, + "context": { + "fileFiltering": { + "disableFuzzySearch": "TRUE" + } + }, + "model": { + "generationConfig": { + "disableCacheControl": "false" + } + } + }, + "v2PreexistingEnableSettings": { + "$version": 2, + "general": { + "disableAutoUpdate": false, + "disableUpdateNag": true, + "enableAutoUpdate": true + }, + "ui": { + "accessibility": { + "disableLoadingPhrases": true, + "enableLoadingPhrases": true + } + }, + "context": { + "fileFiltering": { + "disableFuzzySearch": false, + "enableFuzzySearch": false + } + }, + "model": { + "generationConfig": { + "disableCacheControl": true, + "enableCacheControl": true + } + } + }, + "v3LegacyDisableSettings": { + "$version": 3, + "general": { + "disableAutoUpdate": true, + "enableAutoUpdate": false + }, + "ui": { + "accessibility": { + "disableLoadingPhrases": false, + "enableLoadingPhrases": true + } + }, + "custom": { + "note": "should remain unchanged in v3" + } + }, + "v999FutureVersionSettings": { + "$version": 999, + "theme": "dark", + "model": "future-model", + "disableAutoUpdate": true, + "experimentalFlag": { + "enabled": true + } + } +} diff --git a/integration-tests/hook-integration/hooks.test.ts b/integration-tests/hook-integration/hooks.test.ts new file mode 100644 index 000000000..f134dc1ab --- /dev/null +++ b/integration-tests/hook-integration/hooks.test.ts @@ -0,0 +1,1946 @@ +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import { TestRig, validateModelOutput } from '../test-helper.js'; + +/** + * Hooks System Integration Tests + * + * Tests for complete hook system flow including: + * - UserPromptSubmit hooks: Triggered before prompt is sent to LLM + * - Stop hooks: Triggered when agent is about to stop + * + * Test categories: + * - Single hook scenarios (allow, block, modify, context, etc.) + * - Multiple hooks scenarios (parallel, sequential, mixed) + * - Error handling (timeout, missing command, exit codes) + * - Combined hooks (multiple hook types in same session) + */ +describe('Hooks System Integration', () => { + let rig: TestRig; + + beforeEach(() => { + rig = new TestRig(); + }); + + afterEach(async () => { + if (rig) { + await rig.cleanup(); + } + }); + + // ========================================================================== + // UserPromptSubmit Hooks + // Triggered before user prompt is sent to the LLM for processing + // ========================================================================== + describe('UserPromptSubmit Hooks', () => { + describe('Allow Decision', () => { + it('should allow prompt when hook returns allow decision', async () => { + const hookScript = + "console.log(JSON.stringify({decision: 'allow', reason: 'approved by hook'}));"; + + await rig.setup('ups-allow-decision', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${hookScript}"`, + name: 'ups-allow-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say hello'); + expect(result).toBeDefined(); + expect(result.length).toBeGreaterThan(0); + }); + + it('should allow tool execution with allow decision and verify tool was called', async () => { + const hookScript = + "console.log(JSON.stringify({decision: 'allow', reason: 'Tool execution approved'}));"; + + await rig.setup('ups-allow-tool', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${hookScript}"`, + name: 'ups-allow-tool-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + await rig.run('Create a file test.txt with content "hello"'); + + const foundToolCall = await rig.waitForToolCall('write_file'); + expect(foundToolCall).toBeTruthy(); + + const fileContent = rig.readFile('test.txt'); + expect(fileContent).toContain('hello'); + }); + }); + + describe('Block Decision', () => { + it('should block prompt when hook returns block decision', async () => { + const blockScript = `console.log(JSON.stringify({decision: 'block', reason: 'Prompt blocked by security policy'}));`; + + await rig.setup('ups-block-decision', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${blockScript}"`, + name: 'ups-block-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Create a file'); + + // Blocked prompts should show the block reason + expect(result.toLowerCase()).toContain('block'); + }); + + it('should block tool execution when hook returns block and verify no tool was called', async () => { + const blockScript = `console.log(JSON.stringify({decision: 'block', reason: 'File writing blocked by security policy'}));`; + + await rig.setup('ups-block-tool', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${blockScript}"`, + name: 'ups-block-tool-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Create a file test.txt with "hello"'); + + // Tool should not be called due to blocking hook + const toolLogs = rig.readToolLogs(); + const writeFileCalls = toolLogs.filter( + (t) => + t.toolRequest.name === 'write_file' && + t.toolRequest.success === true, + ); + expect(writeFileCalls).toHaveLength(0); + + // Result should mention the blocking reason + expect(result).toContain('block'); + }); + }); + + describe('Modify Prompt', () => { + it('should use modified prompt when hook provides modification', async () => { + const modifyScript = `console.log(JSON.stringify({decision: 'allow', hookSpecificOutput: {hookEventName: 'UserPromptSubmit', modifiedPrompt: 'Modified prompt content', additionalContext: 'Context added by hook'}}));`; + + await rig.setup('ups-modify-prompt', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${modifyScript}"`, + name: 'ups-modify-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say test'); + expect(result).toBeDefined(); + }); + }); + + describe('Additional Context', () => { + it('should include additional context in response when hook provides it', async () => { + const contextScript = `console.log(JSON.stringify({decision: 'allow', hookSpecificOutput: {additionalContext: 'Extra context information from hook'}}));`; + + await rig.setup('ups-add-context', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${contextScript}"`, + name: 'ups-context-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('What is 1+1?'); + expect(result).toBeDefined(); + }); + }); + + describe('Timeout Handling', () => { + it('should continue execution when hook times out', async () => { + await rig.setup('ups-timeout', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: 'sleep 60', + name: 'ups-timeout-hook', + timeout: 1000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say timeout test'); + // Should continue despite timeout + expect(result).toBeDefined(); + }); + }); + + describe('Error Handling', () => { + it('should continue execution when hook exits with non-blocking error (exit code 1)', async () => { + await rig.setup('ups-nonblocking-error', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: 'echo warning && exit 1', + name: 'ups-error-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say error test'); + // Non-blocking error should not prevent execution + expect(result).toBeDefined(); + }); + + it('should block execution when hook exits with blocking error (exit code 2)', async () => { + await rig.setup('ups-blocking-error', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: + 'node -e "console.error(\'Critical security error\'); process.exit(2)"', + name: 'ups-blocking-error-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Create a file'); + expect(result).toBeDefined(); + }); + + it('should continue execution when hook command does not exist', async () => { + await rig.setup('ups-missing-command', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: '/nonexistent/command/path', + name: 'ups-missing-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say missing test'); + // Missing command should not prevent execution (non-blocking) + expect(result).toBeDefined(); + }); + }); + + describe('Input Format Validation', () => { + it('should receive properly formatted input when hook is called', async () => { + const inputValidationScript = ` +const input = JSON.parse(process.argv[2] || '{}'); +const hasRequired = input.session_id && input.cwd && input.hook_event_name && input.prompt !== undefined; +console.log(JSON.stringify({ + decision: 'allow', + hookSpecificOutput: { + hookEventName: 'UserPromptSubmit', + additionalContext: hasRequired ? 'Valid input format' : 'Invalid input format' + } +})); +`; + + await rig.setup('ups-correct-input', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${inputValidationScript.replace(/\n/g, ' ')}"`, + name: 'ups-input-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say input test'); + validateModelOutput(result, 'input test', 'UPS: correct input'); + }); + }); + + describe('System Message', () => { + it('should include system message in response when hook provides it', async () => { + const systemMsgScript = `console.log(JSON.stringify({decision: 'allow', systemMessage: 'This is a system message from hook'}));`; + + await rig.setup('ups-system-message', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${systemMsgScript}"`, + name: 'ups-system-msg-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say system message'); + expect(result).toBeDefined(); + }); + }); + + describe('Multiple UserPromptSubmit Hooks', () => { + it('should block when one of multiple parallel hooks returns block', async () => { + const allowScript = `console.log(JSON.stringify({decision: 'allow', reason: 'Allowed'}));`; + const blockScript = `console.log(JSON.stringify({decision: 'block', reason: 'Blocked by security policy'}));`; + + await rig.setup('ups-multi-one-blocks', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${allowScript}"`, + name: 'ups-allow-hook', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${blockScript}"`, + name: 'ups-block-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Create a file'); + // When any hook blocks, the result should reflect the block + expect(result).toBeDefined(); + expect(result.toLowerCase()).toContain('block'); + }); + + it('should block when first sequential hook returns block', async () => { + const blockScript = `console.log(JSON.stringify({decision: 'block', reason: 'First hook blocks'}));`; + const allowScript = `console.log(JSON.stringify({decision: 'allow', reason: 'This should not run'}));`; + + await rig.setup('ups-seq-first-blocks', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + sequential: true, + hooks: [ + { + type: 'command', + command: `node -e "${blockScript}"`, + name: 'ups-seq-block-hook', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${allowScript}"`, + name: 'ups-seq-allow-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Create a file'); + // First hook blocks, second should not run + expect(result).toBeDefined(); + expect(result.toLowerCase()).toContain('block'); + }); + + it('should block when second sequential hook returns block', async () => { + const allowScript = `console.log(JSON.stringify({decision: 'allow', reason: 'First allows'}));`; + const blockScript = `console.log(JSON.stringify({decision: 'block', reason: 'Second hook blocks'}));`; + + await rig.setup('ups-seq-second-blocks', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + sequential: true, + hooks: [ + { + type: 'command', + command: `node -e "${allowScript}"`, + name: 'ups-seq-first-allow', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${blockScript}"`, + name: 'ups-seq-second-block', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Create a file'); + // Second hook blocks after first allows + expect(result).toBeDefined(); + expect(result.toLowerCase()).toContain('block'); + }); + + it('should handle multiple hooks all returning allow', async () => { + const allow1Script = `console.log(JSON.stringify({decision: 'allow', reason: 'First allows'}));`; + const allow2Script = `console.log(JSON.stringify({decision: 'allow', reason: 'Second allows'}));`; + const allow3Script = `console.log(JSON.stringify({decision: 'allow', reason: 'Third allows'}));`; + + await rig.setup('ups-multi-all-allow', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${allow1Script}"`, + name: 'ups-allow-1', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${allow2Script}"`, + name: 'ups-allow-2', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${allow3Script}"`, + name: 'ups-allow-3', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say hello'); + // All hooks allow, should complete normally + expect(result).toBeDefined(); + expect(result.length).toBeGreaterThan(0); + }); + + it('should handle multiple hooks all returning block', async () => { + const block1Script = `console.log(JSON.stringify({decision: 'block', reason: 'First blocks'}));`; + const block2Script = `console.log(JSON.stringify({decision: 'block', reason: 'Second blocks'}));`; + + await rig.setup('ups-multi-all-block', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${block1Script}"`, + name: 'ups-block-1', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${block2Script}"`, + name: 'ups-block-2', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Create a file'); + // All hooks block + expect(result).toBeDefined(); + expect(result.toLowerCase()).toContain('block'); + }); + + it('should concatenate additional context from multiple hooks', async () => { + const context1Script = `console.log(JSON.stringify({decision: 'allow', hookSpecificOutput: {additionalContext: 'context from hook 1'}}));`; + const context2Script = `console.log(JSON.stringify({decision: 'allow', hookSpecificOutput: {additionalContext: 'context from hook 2'}}));`; + + await rig.setup('ups-multi-context', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${context1Script}"`, + name: 'ups-context-1', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${context2Script}"`, + name: 'ups-context-2', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say hello'); + expect(result).toBeDefined(); + }); + + it('should handle hook with error alongside blocking hook', async () => { + const blockScript = `console.log(JSON.stringify({decision: 'block', reason: 'Blocked'}));`; + + await rig.setup('ups-error-with-block', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: '/nonexistent/command', + name: 'ups-error-hook', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${blockScript}"`, + name: 'ups-block-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Create a file'); + // Block should still work despite error in other hook + expect(result).toBeDefined(); + expect(result.toLowerCase()).toContain('block'); + }); + + it('should handle hook timeout alongside blocking hook', async () => { + const blockScript = `console.log(JSON.stringify({decision: 'block', reason: 'Blocked while other times out'}));`; + + await rig.setup('ups-timeout-with-block', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: 'sleep 60', + name: 'ups-timeout-hook', + timeout: 1000, + }, + { + type: 'command', + command: `node -e "${blockScript}"`, + name: 'ups-block-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Create a file'); + // Block should work despite timeout in other hook + expect(result).toBeDefined(); + expect(result.toLowerCase()).toContain('block'); + }); + + it('should handle multiple hook groups with different configurations', async () => { + const allow1Script = `console.log(JSON.stringify({decision: 'allow', reason: 'Group 1 allows'}));`; + const allow2Script = `console.log(JSON.stringify({decision: 'allow', reason: 'Group 2 allows'}));`; + + await rig.setup('ups-multi-groups', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${allow1Script}"`, + name: 'ups-group1-hook', + timeout: 5000, + }, + ], + }, + { + sequential: true, + hooks: [ + { + type: 'command', + command: `node -e "${allow2Script}"`, + name: 'ups-group2-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say hello'); + expect(result).toBeDefined(); + }); + + it('should block when one group blocks in multiple hook groups', async () => { + const allowScript = `console.log(JSON.stringify({decision: 'allow', reason: 'Group 1 allows'}));`; + const blockScript = `console.log(JSON.stringify({decision: 'block', reason: 'Group 2 blocks'}));`; + + await rig.setup('ups-multi-groups-one-blocks', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${allowScript}"`, + name: 'ups-group1-allow', + timeout: 5000, + }, + ], + }, + { + hooks: [ + { + type: 'command', + command: `node -e "${blockScript}"`, + name: 'ups-group2-block', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Create a file'); + // One group blocks, should be blocked + expect(result).toBeDefined(); + expect(result.toLowerCase()).toContain('block'); + }); + + it('should handle modified prompt from multiple hooks', async () => { + const modify1Script = `console.log(JSON.stringify({decision: 'allow', hookSpecificOutput: {modifiedPrompt: 'Modified by hook 1'}}));`; + const modify2Script = `console.log(JSON.stringify({decision: 'allow', hookSpecificOutput: {modifiedPrompt: 'Modified by hook 2'}}));`; + + await rig.setup('ups-multi-modify', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + sequential: true, + hooks: [ + { + type: 'command', + command: `node -e "${modify1Script}"`, + name: 'ups-modify-1', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${modify2Script}"`, + name: 'ups-modify-2', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say hello'); + expect(result).toBeDefined(); + }); + + it('should handle system messages from multiple hooks', async () => { + const msg1Script = `console.log(JSON.stringify({decision: 'allow', systemMessage: 'System message 1'}));`; + const msg2Script = `console.log(JSON.stringify({decision: 'allow', systemMessage: 'System message 2'}));`; + + await rig.setup('ups-multi-system-msg', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${msg1Script}"`, + name: 'ups-msg-1', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${msg2Script}"`, + name: 'ups-msg-2', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say hello'); + expect(result).toBeDefined(); + }); + }); + }); + + // ========================================================================== + // Stop Hooks + // Triggered when the agent is about to stop execution + // ========================================================================== + describe('Stop Hooks', () => { + describe('Allow Decision', () => { + it('should allow stopping when hook returns allow decision', async () => { + const allowStopScript = `console.log(JSON.stringify({decision: 'allow', reason: 'Stop allowed'}));`; + + await rig.setup('stop-allow', { + settings: { + hooks: { + enabled: true, + Stop: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${allowStopScript}"`, + name: 'stop-allow-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say stop test'); + expect(result).toBeDefined(); + }); + + it('should allow stopping and verify final response is produced', async () => { + const allowFinalScript = `console.log(JSON.stringify({decision: 'allow', hookSpecificOutput: {additionalContext: 'Final context from stop hook'}}));`; + + await rig.setup('stop-allow-final', { + settings: { + hooks: { + enabled: true, + Stop: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${allowFinalScript}"`, + name: 'stop-final-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say goodbye'); + expect(result).toBeDefined(); + expect(result.length).toBeGreaterThan(0); + }); + }); + + describe('Block Decision', () => { + it('should block stopping when hook returns block decision', async () => { + const blockStopScript = `console.log(JSON.stringify({decision: 'block', reason: 'Stop blocked by security policy'}));`; + + await rig.setup('stop-block-decision', { + settings: { + hooks: { + enabled: true, + Stop: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${blockStopScript}"`, + name: 'stop-block-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say hello'); + // Blocked stop should show the block reason + expect(result).toBeDefined(); + expect(result.toLowerCase()).toContain('block'); + }); + + it('should block stopping with custom reason', async () => { + const blockReasonScript = `console.log(JSON.stringify({decision: 'block', reason: 'Custom block reason: task incomplete'}));`; + + await rig.setup('stop-block-custom-reason', { + settings: { + hooks: { + enabled: true, + Stop: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${blockReasonScript}"`, + name: 'stop-block-reason-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say goodbye'); + expect(result).toBeDefined(); + expect(result.toLowerCase()).toContain('block'); + }); + }); + + describe('Continue False', () => { + it('should request continue execution when hook returns continue: false', async () => { + const continueScript = `console.log(JSON.stringify({continue: false, stopReason: 'More work needed'}));`; + + await rig.setup('stop-continue-false', { + settings: { + hooks: { + enabled: true, + Stop: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${continueScript}"`, + name: 'stop-continue-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say continue'); + // When continue: false, the agent may try to continue + expect(result).toBeDefined(); + }); + }); + + describe('Additional Context', () => { + it('should include additional context in final response', async () => { + const contextScript = `console.log(JSON.stringify({decision: 'allow', hookSpecificOutput: {additionalContext: 'Final context from hook'}}));`; + + await rig.setup('stop-add-context', { + settings: { + hooks: { + enabled: true, + Stop: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${contextScript}"`, + name: 'stop-context-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('What is 3+3?'); + expect(result).toBeDefined(); + }); + + it('should concatenate multiple additionalContext from multiple hooks', async () => { + const context1Script = `console.log(JSON.stringify({decision: 'allow', hookSpecificOutput: {additionalContext: 'context1'}}));`; + const context2Script = `console.log(JSON.stringify({decision: 'allow', hookSpecificOutput: {additionalContext: 'context2'}}));`; + + await rig.setup('stop-multi-context', { + settings: { + hooks: { + enabled: true, + Stop: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${context1Script}"`, + name: 'stop-context-1', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${context2Script}"`, + name: 'stop-context-2', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say multi context'); + expect(result).toBeDefined(); + }); + }); + + describe('Stop Reason', () => { + it('should include stop reason when hook provides it', async () => { + const reasonScript = `console.log(JSON.stringify({decision: 'allow', stopReason: 'Custom stop reason from hook'}));`; + + await rig.setup('stop-set-reason', { + settings: { + hooks: { + enabled: true, + Stop: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${reasonScript}"`, + name: 'stop-reason-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say reason test'); + expect(result).toBeDefined(); + }); + }); + + describe('Timeout Handling', () => { + it('should continue stopping when hook times out', async () => { + await rig.setup('stop-timeout', { + settings: { + hooks: { + enabled: true, + Stop: [ + { + hooks: [ + { + type: 'command', + command: 'sleep 60', + name: 'stop-timeout-hook', + timeout: 1000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say timeout'); + // Timeout should not prevent stopping + expect(result).toBeDefined(); + }); + }); + + describe('Error Handling', () => { + it('should continue stopping when hook has non-blocking error', async () => { + await rig.setup('stop-error', { + settings: { + hooks: { + enabled: true, + Stop: [ + { + hooks: [ + { + type: 'command', + command: 'echo warning && exit 1', + name: 'stop-error-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say error'); + // Error should not prevent stopping + expect(result).toBeDefined(); + }); + + it('should continue stopping when hook command does not exist', async () => { + await rig.setup('stop-missing-command', { + settings: { + hooks: { + enabled: true, + Stop: [ + { + hooks: [ + { + type: 'command', + command: '/nonexistent/stop/command', + name: 'stop-missing-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say missing'); + // Missing command should not prevent stopping + expect(result).toBeDefined(); + }); + }); + + describe('System Message', () => { + it('should include system message in final response', async () => { + const systemMsgScript = `console.log(JSON.stringify({decision: 'allow', systemMessage: 'Final system message from stop hook'}));`; + + await rig.setup('stop-system-message', { + settings: { + hooks: { + enabled: true, + Stop: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${systemMsgScript}"`, + name: 'stop-system-msg-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say final'); + expect(result).toBeDefined(); + }); + }); + + describe('Multiple Stop Hooks', () => { + it('should block when one of multiple parallel stop hooks returns block', async () => { + const allowScript = `console.log(JSON.stringify({decision: 'allow', reason: 'Stop allowed'}));`; + const blockScript = `console.log(JSON.stringify({decision: 'block', reason: 'Stop blocked by security policy'}));`; + + await rig.setup('stop-multi-one-blocks', { + settings: { + hooks: { + enabled: true, + Stop: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${allowScript}"`, + name: 'stop-allow-hook', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${blockScript}"`, + name: 'stop-block-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say multi stop'); + // When any hook blocks, the result should reflect the block + expect(result).toBeDefined(); + expect(result.toLowerCase()).toContain('block'); + }); + + it('should block when first sequential stop hook returns block', async () => { + const blockScript = `console.log(JSON.stringify({decision: 'block', reason: 'First hook blocks stop'}));`; + const allowScript = `console.log(JSON.stringify({decision: 'allow', reason: 'This should not run'}));`; + + await rig.setup('stop-seq-first-blocks', { + settings: { + hooks: { + enabled: true, + Stop: [ + { + sequential: true, + hooks: [ + { + type: 'command', + command: `node -e "${blockScript}"`, + name: 'stop-seq-block-hook', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${allowScript}"`, + name: 'stop-seq-allow-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say sequential stop'); + // First hook blocks, second should not run + expect(result).toBeDefined(); + expect(result.toLowerCase()).toContain('block'); + }); + + it('should block when second sequential stop hook returns block', async () => { + const allowScript = `console.log(JSON.stringify({decision: 'allow', reason: 'First allows'}));`; + const blockScript = `console.log(JSON.stringify({decision: 'block', reason: 'Second hook blocks stop'}));`; + + await rig.setup('stop-seq-second-blocks', { + settings: { + hooks: { + enabled: true, + Stop: [ + { + sequential: true, + hooks: [ + { + type: 'command', + command: `node -e "${allowScript}"`, + name: 'stop-seq-first-allow', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${blockScript}"`, + name: 'stop-seq-second-block', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say seq second blocks'); + // Second hook blocks after first allows + expect(result).toBeDefined(); + expect(result.toLowerCase()).toContain('block'); + }); + + it('should handle multiple stop hooks all returning allow', async () => { + const allow1Script = `console.log(JSON.stringify({decision: 'allow', reason: 'First allows'}));`; + const allow2Script = `console.log(JSON.stringify({decision: 'allow', reason: 'Second allows'}));`; + const allow3Script = `console.log(JSON.stringify({decision: 'allow', reason: 'Third allows'}));`; + + await rig.setup('stop-multi-all-allow', { + settings: { + hooks: { + enabled: true, + Stop: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${allow1Script}"`, + name: 'stop-allow-1', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${allow2Script}"`, + name: 'stop-allow-2', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${allow3Script}"`, + name: 'stop-allow-3', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say all allow'); + // All hooks allow, should complete normally + expect(result).toBeDefined(); + expect(result.length).toBeGreaterThan(0); + }); + + it('should handle multiple stop hooks all returning block', async () => { + const block1Script = `console.log(JSON.stringify({decision: 'block', reason: 'First blocks'}));`; + const block2Script = `console.log(JSON.stringify({decision: 'block', reason: 'Second blocks'}));`; + + await rig.setup('stop-multi-all-block', { + settings: { + hooks: { + enabled: true, + Stop: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${block1Script}"`, + name: 'stop-block-1', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${block2Script}"`, + name: 'stop-block-2', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say all block'); + // All hooks block + expect(result).toBeDefined(); + expect(result.toLowerCase()).toContain('block'); + }); + + it('should handle multiple continue: false from different stop hooks', async () => { + const continue1Script = `console.log(JSON.stringify({continue: false, stopReason: 'First needs more work'}));`; + const continue2Script = `console.log(JSON.stringify({continue: false, stopReason: 'Second needs more work'}));`; + + await rig.setup('stop-multi-continue-false', { + settings: { + hooks: { + enabled: true, + Stop: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${continue1Script}"`, + name: 'stop-continue-1', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${continue2Script}"`, + name: 'stop-continue-2', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say multi continue'); + // Multiple continue: false should be handled + expect(result).toBeDefined(); + }); + + it('should handle mixed allow and continue: false in stop hooks', async () => { + const allowScript = `console.log(JSON.stringify({decision: 'allow', reason: 'Allow stop'}));`; + const continueScript = `console.log(JSON.stringify({continue: false, stopReason: 'Need more work'}));`; + + await rig.setup('stop-mixed-allow-continue', { + settings: { + hooks: { + enabled: true, + Stop: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${allowScript}"`, + name: 'stop-allow-hook', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${continueScript}"`, + name: 'stop-continue-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say mixed'); + expect(result).toBeDefined(); + }); + + it('should handle block with higher priority than continue: false', async () => { + const blockScript = `console.log(JSON.stringify({decision: 'block', reason: 'Security block'}));`; + const continueScript = `console.log(JSON.stringify({continue: false, stopReason: 'Need more work'}));`; + + await rig.setup('stop-block-vs-continue', { + settings: { + hooks: { + enabled: true, + Stop: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${blockScript}"`, + name: 'stop-block-priority', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${continueScript}"`, + name: 'stop-continue-lower', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say block priority'); + // Block should take priority + expect(result).toBeDefined(); + expect(result.toLowerCase()).toContain('block'); + }); + + it('should handle stop hook with error alongside blocking hook', async () => { + const blockScript = `console.log(JSON.stringify({decision: 'block', reason: 'Blocked'}));`; + + await rig.setup('stop-error-with-block', { + settings: { + hooks: { + enabled: true, + Stop: [ + { + hooks: [ + { + type: 'command', + command: '/nonexistent/command', + name: 'stop-error-hook', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${blockScript}"`, + name: 'stop-block-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say error with block'); + // Block should still work despite error in other hook + expect(result).toBeDefined(); + expect(result.toLowerCase()).toContain('block'); + }); + + it('should handle stop hook timeout alongside blocking hook', async () => { + const blockScript = `console.log(JSON.stringify({decision: 'block', reason: 'Blocked while other times out'}));`; + + await rig.setup('stop-timeout-with-block', { + settings: { + hooks: { + enabled: true, + Stop: [ + { + hooks: [ + { + type: 'command', + command: 'sleep 60', + name: 'stop-timeout-hook', + timeout: 1000, + }, + { + type: 'command', + command: `node -e "${blockScript}"`, + name: 'stop-block-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say timeout with block'); + // Block should work despite timeout in other hook + expect(result).toBeDefined(); + expect(result.toLowerCase()).toContain('block'); + }); + }); + }); + + // ========================================================================== + // Multiple Hooks (General) + // Tests for hook execution modes: sequential vs parallel + // ========================================================================== + describe('Multiple Hooks', () => { + describe('Sequential Execution', () => { + it('should execute hooks sequentially when sequential: true', async () => { + const hook1Script = `console.log(JSON.stringify({decision: 'allow', hookSpecificOutput: {additionalContext: 'first'}}));`; + const hook2Script = `console.log(JSON.stringify({decision: 'allow', hookSpecificOutput: {additionalContext: 'second'}}));`; + + await rig.setup('multi-sequential', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + sequential: true, + hooks: [ + { + type: 'command', + command: `node -e "${hook1Script}"`, + name: 'seq-hook-1', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${hook2Script}"`, + name: 'seq-hook-2', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say sequential'); + expect(result).toBeDefined(); + }); + + it('should stop at first blocking hook and not execute subsequent', async () => { + const blockScript = `console.log(JSON.stringify({decision: 'block', reason: 'Blocked by first hook'}));`; + const allowScript = `console.log(JSON.stringify({decision: 'allow'}));`; + + await rig.setup('multi-first-blocks', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + sequential: true, + hooks: [ + { + type: 'command', + command: `node -e "${blockScript}"`, + name: 'seq-block-hook', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${allowScript}"`, + name: 'seq-should-not-run', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Create a file'); + // First hook blocks, second should not run + expect(result.toLowerCase()).toContain('block'); + }); + + it('should pass output from first hook to second hook input', async () => { + const passScript1 = `console.log(JSON.stringify({decision: 'allow', hookSpecificOutput: {additionalContext: 'from first', passthrough: 'data'}}));`; + const passScript2 = `console.log(JSON.stringify({decision: 'allow', hookSpecificOutput: {additionalContext: 'received passthrough'}}));`; + + await rig.setup('multi-passthrough', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + sequential: true, + hooks: [ + { + type: 'command', + command: `node -e "${passScript1}"`, + name: 'passthrough-hook-1', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${passScript2}"`, + name: 'passthrough-hook-2', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say passthrough'); + expect(result).toBeDefined(); + }); + }); + + describe('Parallel Execution', () => { + it('should execute hooks in parallel when sequential is not set', async () => { + const hook1Script = `console.log(JSON.stringify({decision: 'allow'}));`; + const hook2Script = `console.log(JSON.stringify({decision: 'allow'}));`; + + await rig.setup('multi-parallel', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${hook1Script}"`, + name: 'parallel-hook-1', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${hook2Script}"`, + name: 'parallel-hook-2', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say parallel'); + expect(result).toBeDefined(); + }); + + it('should handle mixed success/failure results from parallel hooks', async () => { + const allowScript = `console.log(JSON.stringify({decision: 'allow'}));`; + + await rig.setup('multi-mixed', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${allowScript}"`, + name: 'mixed-allow-hook', + timeout: 5000, + }, + { + type: 'command', + command: '/nonexistent/command', + name: 'mixed-error-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say mixed'); + // Mixed results: one succeeds, one fails - should continue + expect(result).toBeDefined(); + }); + + it('should allow when any hook returns allow in parallel (OR logic)', async () => { + const blockScript = `console.log(JSON.stringify({decision: 'block', reason: 'blocked'}));`; + const allowScript = `console.log(JSON.stringify({decision: 'allow'}));`; + + await rig.setup('multi-or-logic', { + settings: { + hooks: { + enabled: true, + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${blockScript}"`, + name: 'block-hook', + timeout: 5000, + }, + { + type: 'command', + command: `node -e "${allowScript}"`, + name: 'allow-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say or logic'); + // With OR logic, allow should win + expect(result).toBeDefined(); + }); + }); + }); + + // ========================================================================== + // Combined Hooks + // Tests for using multiple hook types (UserPromptSubmit + Stop) together + // ========================================================================== + describe('Combined Hooks', () => { + it('should execute both Stop and UserPromptSubmit hooks in same session', async () => { + const stopScript = `console.log(JSON.stringify({decision: 'allow'}));`; + const upsScript = `console.log(JSON.stringify({decision: 'allow'}));`; + + await rig.setup('combined-both-hooks', { + settings: { + hooksConfig: { enabled: true }, + hooks: { + Stop: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${stopScript}"`, + name: 'stop-hook', + timeout: 5000, + }, + ], + }, + ], + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: `node -e "${upsScript}"`, + name: 'ups-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say both hooks'); + expect(result).toBeDefined(); + }); + }); + + // ========================================================================== + // Hook Script File Tests + // Tests for executing hooks from external script files + // ========================================================================== + describe('Hook Script File Tests', () => { + it('should execute hook from script file', async () => { + await rig.setup('script-file-hook', { + settings: { + hooksConfig: { enabled: true }, + hooks: { + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: + "node -e \"console.log(JSON.stringify({decision: 'allow', reason: 'Approved by script file', hookSpecificOutput: {additionalContext: 'Script file executed successfully'}}))\"", + name: 'script-file-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Say script file test'); + expect(result).toBeDefined(); + }); + + it('should execute blocking hook from script file', async () => { + await rig.setup('script-file-block-hook', { + settings: { + hooksConfig: { enabled: true }, + hooks: { + UserPromptSubmit: [ + { + hooks: [ + { + type: 'command', + command: + "node -e \"console.log(JSON.stringify({decision: 'block', reason: 'Blocked by security script'}))\"", + name: 'script-block-hook', + timeout: 5000, + }, + ], + }, + ], + }, + trusted: true, + }, + }); + + const result = await rig.run('Create a file'); + + // Prompt should be blocked + expect(result.toLowerCase()).toContain('block'); + }); + }); +}); diff --git a/integration-tests/settings-migration.test.ts b/integration-tests/settings-migration.test.ts new file mode 100644 index 000000000..fa5446c17 --- /dev/null +++ b/integration-tests/settings-migration.test.ts @@ -0,0 +1,627 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import { TestRig } from './test-helper.js'; +import { writeFileSync, readFileSync } from 'node:fs'; +import { join } from 'node:path'; + +// Import settings fixtures from unified workspace file +import workspacesSettings from './fixtures/settings-migration/workspaces.json' with { type: 'json' }; + +const { + v1Settings, + v1ComplexSettings, + v1ArrayAndNullSettings, + v1ParentCollisionSettings, + v1VersionStringSettings, + v2Settings, + v2MinimalSettings, + v2BooleanStringSettings, + v2PreexistingEnableSettings, + v3LegacyDisableSettings, + v999FutureVersionSettings, +} = workspacesSettings; + +/** + * Integration tests for settings migration chain (V1 -> V2 -> V3) + * + * These tests verify that: + * 1. V1 settings are automatically migrated to V3 on CLI startup + * 2. V2 settings are automatically migrated to V3 on CLI startup + * 3. V3 settings remain unchanged + * 4. Migration is idempotent (running multiple times produces same result) + */ +describe('settings-migration', () => { + let rig: TestRig; + + beforeEach(() => { + rig = new TestRig(); + }); + + afterEach(async () => { + await rig.cleanup(); + }); + + /** + * Helper to write settings file for an existing test rig. + * This overwrites the settings file created by rig.setup(). + */ + const overwriteSettingsFile = ( + testRig: TestRig, + settings: Record, + ) => { + const qwenDir = join( + (testRig as unknown as { testDir: string }).testDir, + '.qwen', + ); + writeFileSync( + join(qwenDir, 'settings.json'), + JSON.stringify(settings, null, 2), + ); + }; + + /** + * Helper to read settings file from the test directory + */ + const readSettingsFile = (testRig: TestRig): Record => { + const qwenDir = join( + (testRig as unknown as { testDir: string }).testDir, + '.qwen', + ); + const content = readFileSync(join(qwenDir, 'settings.json'), 'utf-8'); + return JSON.parse(content) as Record; + }; + + describe('V1 settings migration', () => { + it('should migrate V1 settings to V3 on CLI startup', async () => { + rig.setup('v1-to-v3-migration'); + + // Write V1 settings directly (overwrites the one created by setup) + overwriteSettingsFile(rig, v1Settings); + + // Run CLI with --help to trigger migration without API calls + // We expect this to fail due to missing API key, but migration should still occur + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail, we just need the settings file to be processed + } + + // Read migrated settings + const migratedSettings = readSettingsFile(rig); + + // Verify migration to V3 + expect(migratedSettings['$version']).toBe(3); + expect(migratedSettings['ui']).toEqual({ + theme: 'dark', + hideTips: false, + accessibility: { + enableLoadingPhrases: false, + }, + }); + expect(migratedSettings['model']).toEqual({ name: 'gemini' }); + expect(migratedSettings['tools']).toEqual({ autoAccept: true }); + expect(migratedSettings['general']).toEqual({ + vimMode: true, + checkpointing: true, + enableAutoUpdate: false, + }); + expect(migratedSettings['mcpServers']).toEqual({ + fetch: { + command: 'node', + args: ['fetch-server.js'], + }, + }); + // Custom user settings should be preserved + expect(migratedSettings['customUserSetting']).toBe('preserved-value'); + }); + + it('should handle V1 settings with arrays and null values', async () => { + rig.setup('v1-array-and-null-migration'); + + // Use fixture with arrays, null values, and string booleans + overwriteSettingsFile(rig, v1ArrayAndNullSettings); + + // Run CLI with --help to trigger migration without API calls + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + + // Read migrated settings + const migratedSettings = readSettingsFile(rig); + + // Expected output based on stable test output + expect(migratedSettings['$version']).toBe(3); + expect(migratedSettings['tools']).toEqual({ autoAccept: false }); + expect(migratedSettings['context']).toEqual({ includeDirectories: [] }); + expect(migratedSettings['model']).toEqual({ name: ['gemini', 'claude'] }); + expect(migratedSettings['ui']).toEqual({ theme: null }); + expect(migratedSettings['customArray']).toEqual([{ key: 1 }]); + }); + + it('should handle V1 settings with parent key collision', async () => { + rig.setup('v1-parent-collision-migration'); + + // Use fixture where V1 flat keys (ui, general) conflict with V2/V3 nested structure + overwriteSettingsFile(rig, v1ParentCollisionSettings); + + // Run CLI with --help to trigger migration without API calls + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + + // Read migrated settings + const migratedSettings = readSettingsFile(rig); + + // Should be migrated to V3 + expect(migratedSettings['$version']).toBe(3); + // Legacy string values for ui/general should be preserved as-is (user data) + expect(migratedSettings['ui']).toBe('legacy-ui-string'); + expect(migratedSettings['general']).toBe('legacy-general-string'); + // Custom nested objects should be preserved + expect(migratedSettings['notes']).toEqual({ + fromUser: 'preserve-custom', + }); + }); + + it('should handle V1 settings with string version and string booleans', async () => { + rig.setup('v1-string-version-migration'); + + // Use fixture with $version as string and string boolean values + overwriteSettingsFile(rig, v1VersionStringSettings); + + // Run CLI with --help to trigger migration without API calls + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + + // Read migrated settings + const migratedSettings = readSettingsFile(rig); + + // Expected output based on stable test output + expect(migratedSettings['$version']).toBe(3); + expect(migratedSettings['model']).toEqual({ name: 'qwen-plus' }); + expect(migratedSettings['ui']).toEqual({ + hideWindowTitle: true, + theme: 'light', + }); + // String "false" for disableAutoUpdate is treated as truthy (non-empty string) + // So enableAutoUpdate = !truthy = false, but output shows true + // This suggests string "false" is parsed as boolean false + expect( + (migratedSettings['general'] as Record)?.[ + 'enableAutoUpdate' + ], + ).toBe(true); + // Custom sections should be preserved + expect(migratedSettings['customSection']).toEqual({ keepMe: true }); + }); + }); + + describe('V2 settings migration', () => { + it('should migrate V2 settings to V3 on CLI startup', async () => { + rig.setup('v2-to-v3-migration'); + + // Write V2 settings directly (overwrites the one created by setup) + overwriteSettingsFile(rig, v2Settings); + + // Run CLI with --help to trigger migration without API calls + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + + // Read migrated settings + const migratedSettings = readSettingsFile(rig); + + // Verify migration to V3 + expect(migratedSettings['$version']).toBe(3); + + // Verify disable* -> enable* conversion with inversion + expect( + ( + (migratedSettings['ui'] as Record)?.[ + 'accessibility' + ] as Record + )?.['enableLoadingPhrases'], + ).toBe(true); + expect( + (migratedSettings['general'] as Record)?.[ + 'enableAutoUpdate' + ], + ).toBe(true); + expect( + ( + (migratedSettings['context'] as Record)?.[ + 'fileFiltering' + ] as Record + )?.['enableFuzzySearch'], + ).toBe(false); + + // Verify old disable* keys are removed + expect( + (migratedSettings['general'] as Record)?.[ + 'disableAutoUpdate' + ], + ).toBeUndefined(); + expect( + (migratedSettings['general'] as Record)?.[ + 'disableUpdateNag' + ], + ).toBeUndefined(); + expect( + ( + (migratedSettings['ui'] as Record)?.[ + 'accessibility' + ] as Record + )?.['disableLoadingPhrases'], + ).toBeUndefined(); + expect( + ( + (migratedSettings['context'] as Record)?.[ + 'fileFiltering' + ] as Record + )?.['disableFuzzySearch'], + ).toBeUndefined(); + }); + + it('should handle V2 settings without any disable* keys', async () => { + rig.setup('v2-clean-migration'); + + // Use minimal V2 fixture and add ui/model settings without disable* keys + const cleanV2Settings = { + ...v2MinimalSettings, + ui: { + theme: 'dark', + }, + model: { + name: 'gemini', + }, + }; + + overwriteSettingsFile(rig, cleanV2Settings); + + // Run CLI with --help to trigger migration without API calls + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + + // Read migrated settings + const migratedSettings = readSettingsFile(rig); + + // Should be updated to V3 version + expect(migratedSettings['$version']).toBe(3); + // Other settings should remain unchanged + expect(migratedSettings['ui']).toEqual({ theme: 'dark' }); + expect(migratedSettings['model']).toEqual({ name: 'gemini' }); + }); + + it('should normalize legacy numeric version with no migratable keys to current version', async () => { + rig.setup('legacy-version-normalization'); + + // Use v1Settings fixture as base but with only custom key + const legacyVersionWithoutMigratableKeys = { + $version: 1, + customOnlyKey: 'value', + }; + + overwriteSettingsFile(rig, legacyVersionWithoutMigratableKeys); + + // Run CLI with --help to trigger settings load/write path + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + + const migratedSettings = readSettingsFile(rig); + + // Version metadata should still be normalized to current version + expect(migratedSettings['$version']).toBe(3); + // Existing user content should be preserved + expect(migratedSettings['customOnlyKey']).toBe('value'); + }); + + it('should coerce valid string booleans and remove invalid deprecated keys while bumping V2 to V3', async () => { + rig.setup('v2-non-boolean-disable-values-migration'); + + // Cover both coercible string booleans and invalid non-boolean values: + // - "TRUE"/"false" should be coerced and migrated + // - invalid values should have deprecated disable* keys removed + const mixedNonBooleanDisableSettings = { + ...v2BooleanStringSettings, + ui: { + accessibility: { + disableLoadingPhrases: 'yes', + }, + }, + context: { + fileFiltering: { + disableFuzzySearch: null, + }, + }, + model: { + generationConfig: { + disableCacheControl: [1], + }, + }, + }; + overwriteSettingsFile(rig, mixedNonBooleanDisableSettings); + + // Run CLI with --help to trigger migration without API calls + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + + // Read migrated settings + const migratedSettings = readSettingsFile(rig); + + // Coercible strings are migrated; invalid disable* values are removed. + expect(migratedSettings['$version']).toBe(3); + expect(migratedSettings['general']).toEqual({ + enableAutoUpdate: false, + }); + expect( + ( + (migratedSettings['ui'] as Record)?.[ + 'accessibility' + ] as Record + )?.['disableLoadingPhrases'], + ).toBeUndefined(); + expect( + ( + (migratedSettings['ui'] as Record)?.[ + 'accessibility' + ] as Record + )?.['enableLoadingPhrases'], + ).toBeUndefined(); + expect( + ( + (migratedSettings['context'] as Record)?.[ + 'fileFiltering' + ] as Record + )?.['disableFuzzySearch'], + ).toBeUndefined(); + expect( + ( + (migratedSettings['context'] as Record)?.[ + 'fileFiltering' + ] as Record + )?.['enableFuzzySearch'], + ).toBeUndefined(); + expect( + ( + (migratedSettings['model'] as Record)?.[ + 'generationConfig' + ] as Record + )?.['disableCacheControl'], + ).toBeUndefined(); + expect( + ( + (migratedSettings['model'] as Record)?.[ + 'generationConfig' + ] as Record + )?.['enableCacheControl'], + ).toBeUndefined(); + }); + + it('should handle V2 settings with preexisting enable* keys', async () => { + rig.setup('v2-preexisting-enable-migration'); + + // Use fixture with both disable* and enable* keys + overwriteSettingsFile(rig, v2PreexistingEnableSettings); + + // Run CLI with --help to trigger migration without API calls + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + + // Read migrated settings + const migratedSettings = readSettingsFile(rig); + + // Expected output based on stable test output + expect(migratedSettings['$version']).toBe(3); + // Migration converts disable* to enable* by inverting the value + // disableAutoUpdate: false -> enableAutoUpdate: true (inverted) + // But disableUpdateNag: true may affect the consolidation + expect( + (migratedSettings['general'] as Record)?.[ + 'enableAutoUpdate' + ], + ).toBe(false); + // disableLoadingPhrases: true -> enableLoadingPhrases: false (inverted) + expect( + ( + (migratedSettings['ui'] as Record)?.[ + 'accessibility' + ] as Record + )?.['enableLoadingPhrases'], + ).toBe(false); + // disableFuzzySearch: false -> enableFuzzySearch: true (inverted) + expect( + ( + (migratedSettings['context'] as Record)?.[ + 'fileFiltering' + ] as Record + )?.['enableFuzzySearch'], + ).toBe(true); + // disableCacheControl: true -> enableCacheControl: false (inverted) + expect( + ( + (migratedSettings['model'] as Record)?.[ + 'generationConfig' + ] as Record + )?.['enableCacheControl'], + ).toBe(false); + // Old disable* keys should be removed + expect( + (migratedSettings['general'] as Record)?.[ + 'disableAutoUpdate' + ], + ).toBeUndefined(); + expect( + (migratedSettings['general'] as Record)?.[ + 'disableUpdateNag' + ], + ).toBeUndefined(); + }); + }); + + describe('V3 settings handling', () => { + it('should handle V3 settings with legacy disable* keys', async () => { + rig.setup('v3-legacy-disable-keys'); + + // Use fixture with V3 format but still has legacy disable* keys + overwriteSettingsFile(rig, v3LegacyDisableSettings); + + // Run CLI with --help to trigger migration without API calls + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + + // Read settings + const finalSettings = readSettingsFile(rig); + + // Should remain V3 + expect(finalSettings['$version']).toBe(3); + // Note: V3 settings with legacy disable* keys are left as-is + // Migration only runs when version < current version + // Since this is already V3, no migration logic is applied + expect( + (finalSettings['general'] as Record)?.[ + 'disableAutoUpdate' + ], + ).toBe(true); + expect( + ( + (finalSettings['ui'] as Record)?.[ + 'accessibility' + ] as Record + )?.['disableLoadingPhrases'], + ).toBe(false); + // Existing enable* keys should be preserved + expect( + (finalSettings['general'] as Record)?.[ + 'enableAutoUpdate' + ], + ).toBe(false); + expect( + ( + (finalSettings['ui'] as Record)?.[ + 'accessibility' + ] as Record + )?.['enableLoadingPhrases'], + ).toBe(true); + // Custom settings should be preserved + expect(finalSettings['custom']).toEqual({ + note: 'should remain unchanged in v3', + }); + }); + }); + + describe('Future version settings handling', () => { + it('should not modify future version settings', async () => { + rig.setup('v999-future-version'); + + // Use fixture with future version ($version: 999) + overwriteSettingsFile(rig, v999FutureVersionSettings); + + // Run CLI with --help to trigger migration without API calls + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + + // Read settings + const finalSettings = readSettingsFile(rig); + + // Future version should remain unchanged + expect(finalSettings['$version']).toBe(999); + expect(finalSettings['theme']).toBe('dark'); + expect(finalSettings['model']).toBe('future-model'); + expect(finalSettings['experimentalFlag']).toEqual({ enabled: true }); + // disableAutoUpdate should remain as-is since migration doesn't apply + expect(finalSettings['disableAutoUpdate']).toBe(true); + }); + }); + + describe('Migration idempotency', () => { + it('should produce consistent results when run multiple times on V1 settings', async () => { + rig.setup('v1-idempotency'); + + overwriteSettingsFile(rig, v1Settings); + + // Run CLI multiple times with --help + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + const firstRunSettings = readSettingsFile(rig); + + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + const secondRunSettings = readSettingsFile(rig); + + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + const thirdRunSettings = readSettingsFile(rig); + + // All runs should produce identical results + expect(secondRunSettings).toEqual(firstRunSettings); + expect(thirdRunSettings).toEqual(firstRunSettings); + }); + }); + + describe('Complex migration scenarios', () => { + it('should preserve custom user settings during full migration chain', async () => { + rig.setup('preserve-custom-settings'); + + // Use v1ComplexSettings fixture which has custom user settings + overwriteSettingsFile(rig, v1ComplexSettings); + + // Run CLI with --help to trigger migration without API calls + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + + // Read migrated settings + const migratedSettings = readSettingsFile(rig); + + // Custom keys should be preserved (v1ComplexSettings has 'custom-value' and { nested: true, items: [1, 2, 3] }) + expect(migratedSettings['myCustomKey']).toBe('custom-value'); + expect(migratedSettings['anotherCustomSetting']).toEqual({ + nested: true, + items: [1, 2, 3], + }); + }); + }); +}); diff --git a/integration-tests/terminal-capture/motivation.md b/integration-tests/terminal-capture/motivation.md index 388019369..3d004ddee 100644 --- a/integration-tests/terminal-capture/motivation.md +++ b/integration-tests/terminal-capture/motivation.md @@ -40,6 +40,10 @@ Playwright element screenshot | WYSIWYG | xterm.js fully renders ANSI, no manual output cleaning needed | | Theme Support | Built-in 5 themes (Dracula, One Dark, GitHub Dark, Monokai, Night Owl) | | Full-length | `captureFull()` supports capturing scrollback buffer content | +| Streaming Capture | Capture multiple frames at intervals during execution (e.g., progress bars) | +| Animated GIF | Auto-generate GIF from streaming frames via ffmpeg | +| Early Stop | Streaming stops early if output stabilizes; duplicate frames are skipped | +| Auto Cleanup | Output directory is cleared before each run to prevent stale screenshots | | Deterministic Naming | Screenshot filenames auto-generated by step sequence for easy regression comparison | | Batch Execution | `run.ts` executes all scenarios in one command | @@ -90,8 +94,14 @@ scenarios/screenshots/ 02-01.png # Step 2 input state 02-02.png # Step 2 result full-flow.png # Final state full-length image - context/ + streaming-shell/ + 01-01.png # Input state + 01-streaming-01.png # Streaming frame 1 + 01-streaming-02.png # Streaming frame 2 ... + 01-02.png # Final result + streaming.gif # Animated GIF (requires ffmpeg) + full-flow.png # Final state full-length image ``` ## 4. Position in Testing System diff --git a/integration-tests/terminal-capture/scenario-runner.ts b/integration-tests/terminal-capture/scenario-runner.ts index 4bd858fd4..93640694b 100644 --- a/integration-tests/terminal-capture/scenario-runner.ts +++ b/integration-tests/terminal-capture/scenario-runner.ts @@ -10,7 +10,9 @@ */ import { TerminalCapture, THEMES } from './terminal-capture.js'; -import { dirname, resolve, isAbsolute } from 'node:path'; +import { dirname, resolve, isAbsolute, join } from 'node:path'; +import { execSync } from 'node:child_process'; +import { writeFileSync, unlinkSync, rmSync, existsSync } from 'node:fs'; // ───────────────────────────────────────────── // Schema — Minimal @@ -29,6 +31,18 @@ export interface FlowStep { capture?: string; /** Explicit screenshot: full scrollback buffer long image (standalone capture when no type) */ captureFull?: string; + /** + * Streaming capture: capture multiple screenshots during execution at intervals. + * Useful for demonstrating real-time output like progress bars. + */ + streaming?: { + /** Delay before starting captures in milliseconds (skip initial waiting phase) */ + delayMs?: number; + /** Interval between captures in milliseconds */ + intervalMs: number; + /** Maximum number of captures */ + count: number; + }; } export interface ScenarioConfig { @@ -50,6 +64,8 @@ export interface ScenarioConfig { }; /** Screenshot output directory (relative to config file) */ outputDir?: string; + /** Generate animated GIF from all screenshots in order (default: true) */ + gif?: boolean; } // ───────────────────────────────────────────── @@ -105,6 +121,11 @@ export async function runScenario( ? resolve(basedir, config.outputDir, scenarioDir) : resolve(basedir, 'screenshots', scenarioDir); + // Clean previous screenshots + if (existsSync(outputDir)) { + rmSync(outputDir, { recursive: true }); + } + console.log(`\n${'═'.repeat(60)}`); console.log(`▶ ${config.name}`); console.log('═'.repeat(60)); @@ -171,13 +192,66 @@ export async function runScenario( if (autoEnter) { // ── Auto-press Enter → Wait for stabilization → 02 screenshot ── await terminal.type('\n'); - console.log(` ⏳ waiting for output to settle...`); - await terminal.idle(2000, 60000); - console.log(` ✅ settled`); - const resultName = step.capture ?? `${pad(seq)}-02.png`; - console.log(` ${label} 📸 result: ${resultName}`); - screenshots.push(await terminal.capture(resultName)); + // Streaming capture: capture multiple screenshots during execution + if (step.streaming) { + const { delayMs = 0, intervalMs, count } = step.streaming; + console.log( + ` 🎬 streaming capture: ${count} shots @ ${intervalMs}ms intervals${delayMs ? ` (delay ${delayMs}ms)` : ''}`, + ); + + // Wait before starting captures (skip initial waiting phase) + if (delayMs > 0) { + await sleep(delayMs); + } + + // Capture frames at intervals (stop early if output stabilizes) + const streamingShots: string[] = []; + let prevOutputLen = terminal.getRawOutput().length; + let stableCount = 0; + let shotNum = 0; + for (let j = 0; j < count; j++) { + await sleep(intervalMs); + const curOutputLen = terminal.getRawOutput().length; + if (curOutputLen === prevOutputLen) { + stableCount++; + if (stableCount >= 3) { + console.log( + ` ⏹️ streaming stopped early: output stable for ${stableCount} intervals`, + ); + break; + } + continue; // skip duplicate frame + } + stableCount = 0; + prevOutputLen = curOutputLen; + shotNum++; + const shotName = `${pad(seq)}-streaming-${pad(shotNum)}.png`; + console.log( + ` 📸 streaming [${shotNum}/${count}]: ${shotName}`, + ); + const shot = await terminal.capture(shotName); + streamingShots.push(shot); + screenshots.push(shot); + } + + // Wait for completion after streaming captures + console.log(` ⏳ waiting for output to settle...`); + await terminal.idle(2000, 60000); + console.log(` ✅ settled`); + + const resultName = step.capture ?? `${pad(seq)}-02.png`; + console.log(` ${label} 📸 result: ${resultName}`); + screenshots.push(await terminal.capture(resultName)); + } else { + console.log(` ⏳ waiting for output to settle...`); + await terminal.idle(2000, 60000); + console.log(` ✅ settled`); + + const resultName = step.capture ?? `${pad(seq)}-02.png`; + console.log(` ${label} 📸 result: ${resultName}`); + screenshots.push(await terminal.capture(resultName)); + } // full-flow: Only the last type step auto-captures full-length image const isLastType = !config.flow.slice(i + 1).some((s) => s.type); @@ -245,6 +319,19 @@ export async function runScenario( } } + // Generate animated GIF from all screenshots (excluding full-flow captures) + if (config.gif !== false) { + const gifFrames = screenshots.filter( + (s) => !s.endsWith('full-flow.png') && !s.includes('-full-'), + ); + if (gifFrames.length > 0) { + const gifPath = generateGif(gifFrames, outputDir); + if (gifPath) { + console.log(` 🎞️ GIF: ${gifPath}`); + } + } + } + const duration = Date.now() - startTime; console.log( `\n ✅ ${config.name} — ${screenshots.length} screenshots, ${(duration / 1000).toFixed(1)}s`, @@ -302,3 +389,41 @@ const KEY_MAP: Record = { function resolveKey(key: string): string { return KEY_MAP[key] ?? key; } + +/** Generate animated GIF from PNG frames using ffmpeg (concat demuxer). */ +function generateGif(frames: string[], outputDir: string): string | null { + if (frames.length === 0) return null; + + const STREAMING_DURATION = 0.3; // 300ms for streaming frames + const STATIC_DURATION = 1.0; // 1s for non-streaming and edge frames + + const gifPath = join(outputDir, 'streaming.gif'); + const listFile = join(outputDir, 'frames.txt'); + + try { + const lines: string[] = []; + for (let i = 0; i < frames.length; i++) { + const isStreaming = frames[i].includes('-streaming-'); + const duration = isStreaming ? STREAMING_DURATION : STATIC_DURATION; + lines.push(`file '${resolve(frames[i])}'`, `duration ${duration}`); + } + // Concat demuxer requires last frame repeated without duration + lines.push(`file '${resolve(frames[frames.length - 1])}'`); + writeFileSync(listFile, lines.join('\n')); + + execSync( + `ffmpeg -y -f concat -safe 0 -i "${listFile}" -vf "split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse" -loop 0 "${gifPath}"`, + { stdio: 'pipe' }, + ); + return gifPath; + } catch { + console.log(' ⚠️ GIF generation requires ffmpeg'); + return null; + } finally { + try { + unlinkSync(listFile); + } catch { + // ignore + } + } +} diff --git a/integration-tests/terminal-capture/scenarios/message-components.ts b/integration-tests/terminal-capture/scenarios/message-components.ts new file mode 100644 index 000000000..621eb1ef8 --- /dev/null +++ b/integration-tests/terminal-capture/scenarios/message-components.ts @@ -0,0 +1,32 @@ +import type { ScenarioConfig } from '../scenario-runner.js'; + +/** + * Tests the message component refactoring for PR #2120. + * Captures info, warning, and error messages to verify proper icon/prefix display. + * + * This scenario tests: + * - Info message prefix (● filled circle) + * - Error message prefix (✕) + * - User message prefix (>) + * - Assistant message prefix (✦) + */ +export default { + name: 'message-components', + spawn: ['node', 'dist/cli.js', '--yolo'], + terminal: { title: 'qwen-code', cwd: '../../..' }, + flow: [ + // Test info message via /skills command (instant, no streaming) + { type: '/skills' }, + // Test error message via unknown skill (instant, no streaming) + { type: '/skills nonexistent-skill-xyz' }, + // Test user and assistant messages (streams from LLM) + { + type: 'Say "Hello, this is a test of message prefixes!" and nothing else.', + streaming: { + delayMs: 3000, + intervalMs: 1000, + count: 10, + }, + }, + ], +} satisfies ScenarioConfig; diff --git a/integration-tests/terminal-capture/scenarios/progress.sh b/integration-tests/terminal-capture/scenarios/progress.sh new file mode 100755 index 000000000..596ba19b3 --- /dev/null +++ b/integration-tests/terminal-capture/scenarios/progress.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# Progress bar script that overwrites the same line using \r +# Tests PTY's ability to handle carriage return / cursor movement + +total=20 +for ((i = 1; i <= total; i++)); do + pct=$((i * 100 / total)) + filled=$((pct / 5)) + empty=$((20 - filled)) + bar=$(printf '%0.s#' $(seq 1 $filled 2>/dev/null)) + space=$(printf '%0.s-' $(seq 1 $empty 2>/dev/null)) + printf "\r[%s%s] %3d%% (%d/%d)" "$bar" "$space" "$pct" "$i" "$total" + sleep 0.5 +done +echo "" +echo "Done!" \ No newline at end of file diff --git a/integration-tests/terminal-capture/scenarios/qc-code-review.ts b/integration-tests/terminal-capture/scenarios/qc-code-review.ts new file mode 100644 index 000000000..75b281539 --- /dev/null +++ b/integration-tests/terminal-capture/scenarios/qc-code-review.ts @@ -0,0 +1,17 @@ +import type { ScenarioConfig } from '../scenario-runner.js'; + +export default { + name: '/qc:code-review', + spawn: ['node', 'dist/cli.js', '--yolo'], + terminal: { title: 'qwen-code', cwd: '../../..' }, + flow: [ + { + type: '/qc:code-review 2117', + streaming: { + delayMs: 10000, // Wait for initial model thinking/approval + intervalMs: 800, // Capture every 800ms + count: 30, // Max 30 captures + }, + }, + ], +} satisfies ScenarioConfig; diff --git a/integration-tests/terminal-capture/scenarios/streaming-insight.ts b/integration-tests/terminal-capture/scenarios/streaming-insight.ts new file mode 100644 index 000000000..f1875f20a --- /dev/null +++ b/integration-tests/terminal-capture/scenarios/streaming-insight.ts @@ -0,0 +1,23 @@ +import type { ScenarioConfig } from '../scenario-runner.js'; + +/** + * Demonstrates streaming capture with the /insight command. + * The insight command analyzes the codebase and streams results, + * making it ideal for demonstrating streaming capture. + */ +export default { + name: 'streaming-insight', + spawn: ['node', 'dist/cli.js', '--yolo'], + terminal: { title: 'qwen-code', cwd: '../../..' }, + flow: [ + { + type: '/insight', + // /insight takes time to analyze the codebase and streams results + // Capture frames during the analysis to show real-time progress + streaming: { + intervalMs: 5000, // Capture every 5 seconds + count: 50, // Up to 250 seconds of capture + }, + }, + ], +} satisfies ScenarioConfig; diff --git a/integration-tests/terminal-capture/scenarios/streaming-shell.ts b/integration-tests/terminal-capture/scenarios/streaming-shell.ts new file mode 100644 index 000000000..e166d9a0d --- /dev/null +++ b/integration-tests/terminal-capture/scenarios/streaming-shell.ts @@ -0,0 +1,24 @@ +import type { ScenarioConfig } from '../scenario-runner.js'; + +/** + * Demonstrates streaming shell execution output with PTY enabled by default. + * Tests the render throttle behavior and progress bar handling. + * Captures multiple screenshots during execution to show real-time output. + */ +export default { + name: 'streaming-shell', + spawn: ['node', 'dist/cli.js', '--yolo'], + terminal: { title: 'qwen-code', cwd: '../../..' }, + flow: [ + { + type: 'Run this command: bash integration-tests/terminal-capture/scenarios/progress.sh', + // Capture 20 screenshots at 500ms intervals during execution + // The progress.sh script takes ~10 seconds (20 iterations * 0.5s each) + streaming: { + delayMs: 7000, + intervalMs: 500, + count: 20, + }, + }, + ], +} satisfies ScenarioConfig; diff --git a/package-lock.json b/package-lock.json index 96b91827f..5df32acc0 100644 --- a/package-lock.json +++ b/package-lock.json @@ -27,6 +27,7 @@ "@types/uuid": "^10.0.0", "@vitest/coverage-v8": "^3.1.1", "@vitest/eslint-plugin": "^1.3.4", + "@xterm/xterm": "^6.0.0", "cross-env": "^7.0.3", "esbuild": "^0.25.0", "eslint": "^9.24.0", @@ -5629,6 +5630,16 @@ "integrity": "sha512-5xXB7kdQlFBP82ViMJTwwEc3gKCLGKR/eoxQm4zge7GPBl86tCdI0IdPJjoKd8mUSFXz5V7i/25sfsEkP4j46g==", "license": "MIT" }, + "node_modules/@xterm/xterm": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/@xterm/xterm/-/xterm-6.0.0.tgz", + "integrity": "sha512-TQwDdQGtwwDt+2cgKDLn0IRaSxYu1tSUjgKarSDkUM0ZNiSRXFpjxEsvc/Zgc5kq5omJ+V0a8/kIM2WD3sMOYg==", + "dev": true, + "license": "MIT", + "workspaces": [ + "addons/*" + ] + }, "node_modules/abort-controller": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", @@ -19471,6 +19482,7 @@ "google-auth-library": "^10.5.0", "html-to-text": "^9.0.5", "https-proxy-agent": "^7.0.6", + "iconv-lite": "^0.6.3", "ignore": "^7.0.0", "jsonrepair": "^3.13.0", "marked": "^15.0.12", diff --git a/package.json b/package.json index 3b01bc667..ef9f25eff 100644 --- a/package.json +++ b/package.json @@ -20,6 +20,7 @@ "dev": "node scripts/dev.js", "debug": "cross-env DEBUG=1 node --inspect-brk scripts/start.js", "generate": "node scripts/generate-git-commit-info.js", + "generate:settings-schema": "tsx scripts/generate-settings-schema.ts", "build": "node scripts/build.js", "build-and-start": "npm run build && npm run start", "build:vscode": "node scripts/build_vscode_companion.js", @@ -84,6 +85,7 @@ "@types/uuid": "^10.0.0", "@vitest/coverage-v8": "^3.1.1", "@vitest/eslint-plugin": "^1.3.4", + "@xterm/xterm": "^6.0.0", "cross-env": "^7.0.3", "esbuild": "^0.25.0", "eslint": "^9.24.0", diff --git a/packages/cli/src/acp-integration/service/filesystem.test.ts b/packages/cli/src/acp-integration/service/filesystem.test.ts index 6eb3dfa1b..e8dc34968 100644 --- a/packages/cli/src/acp-integration/service/filesystem.test.ts +++ b/packages/cli/src/acp-integration/service/filesystem.test.ts @@ -11,6 +11,9 @@ import { ACP_ERROR_CODES } from '../errorCodes.js'; const createFallback = (): FileSystemService => ({ readTextFile: vi.fn(), + readTextFileWithInfo: vi + .fn() + .mockResolvedValue({ content: '', encoding: 'utf-8', bom: false }), writeTextFile: vi.fn(), detectFileBOM: vi.fn().mockResolvedValue(false), findFiles: vi.fn().mockReturnValue([]), diff --git a/packages/cli/src/acp-integration/service/filesystem.ts b/packages/cli/src/acp-integration/service/filesystem.ts index 9dfbf35b3..b20d5f0ff 100644 --- a/packages/cli/src/acp-integration/service/filesystem.ts +++ b/packages/cli/src/acp-integration/service/filesystem.ts @@ -4,7 +4,10 @@ * SPDX-License-Identifier: Apache-2.0 */ -import type { FileSystemService } from '@qwen-code/qwen-code-core'; +import type { + FileSystemService, + FileReadResult, +} from '@qwen-code/qwen-code-core'; import type * as acp from '../acp.js'; import { ACP_ERROR_CODES } from '../errorCodes.js'; @@ -54,10 +57,16 @@ export class AcpFileSystemService implements FileSystemService { return response.content; } + async readTextFileWithInfo(filePath: string): Promise { + // ACP protocol does not expose encoding metadata; delegate to the local + // fallback which performs a single-pass read with encoding detection. + return this.fallback.readTextFileWithInfo(filePath); + } + async writeTextFile( filePath: string, content: string, - options?: { bom?: boolean }, + options?: { bom?: boolean; encoding?: string }, ): Promise { if (!this.capabilities.writeTextFile) { return this.fallback.writeTextFile(filePath, content, options); diff --git a/packages/cli/src/commands/hooks.tsx b/packages/cli/src/commands/hooks.tsx new file mode 100644 index 000000000..c747c61c2 --- /dev/null +++ b/packages/cli/src/commands/hooks.tsx @@ -0,0 +1,25 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { CommandModule } from 'yargs'; +import { enableCommand } from './hooks/enable.js'; +import { disableCommand } from './hooks/disable.js'; + +export const hooksCommand: CommandModule = { + command: 'hooks ', + aliases: ['hook'], + describe: 'Manage Qwen Code hooks.', + builder: (yargs) => + yargs + .command(enableCommand) + .command(disableCommand) + .demandCommand(1, 'You need at least one command before continuing.') + .version(false), + handler: () => { + // This handler is not called when a subcommand is provided. + // Yargs will show the help menu. + }, +}; diff --git a/packages/cli/src/commands/hooks/disable.ts b/packages/cli/src/commands/hooks/disable.ts new file mode 100644 index 000000000..8d1324cdb --- /dev/null +++ b/packages/cli/src/commands/hooks/disable.ts @@ -0,0 +1,75 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { CommandModule } from 'yargs'; +import { createDebugLogger, getErrorMessage } from '@qwen-code/qwen-code-core'; +import { loadSettings, SettingScope } from '../../config/settings.js'; + +const debugLogger = createDebugLogger('HOOKS_DISABLE'); + +interface DisableArgs { + hookName: string; +} + +/** + * Disable a hook by adding it to the disabled list + */ +export async function handleDisableHook(hookName: string): Promise { + const workingDir = process.cwd(); + const settings = loadSettings(workingDir); + + try { + // Get current hooks settings + const mergedSettings = settings.merged as + | Record + | undefined; + const hooksSettings = (mergedSettings?.['hooks'] || {}) as Record< + string, + unknown + >; + const disabledHooks = (hooksSettings['disabled'] || []) as string[]; + + // Check if hook is already disabled + if (disabledHooks.includes(hookName)) { + debugLogger.info(`Hook "${hookName}" is already disabled.`); + return; + } + + // Add hook to disabled list + const newDisabledHooks = [...disabledHooks, hookName]; + const newHooksSettings = { + ...hooksSettings, + disabled: newDisabledHooks, + }; + + // Save updated settings + settings.setValue( + SettingScope.Workspace, + 'hooks' as keyof typeof settings.merged, + newHooksSettings as never, + ); + + debugLogger.info(`✓ Hook "${hookName}" has been disabled.`); + } catch (error) { + debugLogger.error(`Error disabling hook: ${getErrorMessage(error)}`); + } +} + +export const disableCommand: CommandModule = { + command: 'disable ', + describe: 'Disable an active hook', + builder: (yargs) => + yargs.positional('hook-name', { + describe: 'Name of the hook to disable', + type: 'string', + demandOption: true, + }), + handler: async (argv) => { + const args = argv as unknown as DisableArgs; + await handleDisableHook(args.hookName); + process.exit(0); + }, +}; diff --git a/packages/cli/src/commands/hooks/enable.ts b/packages/cli/src/commands/hooks/enable.ts new file mode 100644 index 000000000..863b5b32c --- /dev/null +++ b/packages/cli/src/commands/hooks/enable.ts @@ -0,0 +1,75 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { CommandModule } from 'yargs'; +import { createDebugLogger, getErrorMessage } from '@qwen-code/qwen-code-core'; +import { loadSettings, SettingScope } from '../../config/settings.js'; + +const debugLogger = createDebugLogger('HOOKS_ENABLE'); + +interface EnableArgs { + hookName: string; +} + +/** + * Enable a hook by removing it from the disabled list + */ +export async function handleEnableHook(hookName: string): Promise { + const workingDir = process.cwd(); + const settings = loadSettings(workingDir); + + try { + // Get current hooks settings + const mergedSettings = settings.merged as + | Record + | undefined; + const hooksSettings = (mergedSettings?.['hooks'] || {}) as Record< + string, + unknown + >; + const disabledHooks = (hooksSettings['disabled'] || []) as string[]; + + // Check if hook is in disabled list + if (!disabledHooks.includes(hookName)) { + debugLogger.info(`Hook "${hookName}" is not disabled.`); + return; + } + + // Remove hook from disabled list + const newDisabledHooks = disabledHooks.filter((h) => h !== hookName); + const newHooksSettings = { + ...hooksSettings, + disabled: newDisabledHooks, + }; + + // Save updated settings + settings.setValue( + SettingScope.Workspace, + 'hooks' as keyof typeof settings.merged, + newHooksSettings as never, + ); + + debugLogger.info(`✓ Hook "${hookName}" has been enabled.`); + } catch (error) { + debugLogger.error(`Error enabling hook: ${getErrorMessage(error)}`); + } +} + +export const enableCommand: CommandModule = { + command: 'enable ', + describe: 'Enable a disabled hook', + builder: (yargs) => + yargs.positional('hook-name', { + describe: 'Name of the hook to enable', + type: 'string', + demandOption: true, + }), + handler: async (argv) => { + const args = argv as unknown as EnableArgs; + await handleEnableHook(args.hookName); + process.exit(0); + }, +}; diff --git a/packages/cli/src/config/config.test.ts b/packages/cli/src/config/config.test.ts index 5f08dd382..2fdc62a9c 100644 --- a/packages/cli/src/config/config.test.ts +++ b/packages/cli/src/config/config.test.ts @@ -548,6 +548,43 @@ describe('loadCliConfig', () => { vi.restoreAllMocks(); }); + it('should reset context file names to QWEN.md and AGENTS.md by default', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments(); + const settings: Settings = {}; + const setGeminiMdFilenameSpy = vi.spyOn( + ServerConfig, + 'setGeminiMdFilename', + ); + + await loadCliConfig(settings, argv); + + expect(setGeminiMdFilenameSpy).toHaveBeenCalledTimes(1); + expect(setGeminiMdFilenameSpy).toHaveBeenCalledWith([ + ServerConfig.DEFAULT_CONTEXT_FILENAME, + ServerConfig.AGENT_CONTEXT_FILENAME, + ]); + }); + + it('should use configured context file name when settings.context.fileName is set', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments(); + const settings: Settings = { + context: { + fileName: 'CUSTOM_AGENTS.md', + }, + }; + const setGeminiMdFilenameSpy = vi.spyOn( + ServerConfig, + 'setGeminiMdFilename', + ); + + await loadCliConfig(settings, argv); + + expect(setGeminiMdFilenameSpy).toHaveBeenCalledTimes(1); + expect(setGeminiMdFilenameSpy).toHaveBeenCalledWith('CUSTOM_AGENTS.md'); + }); + it('should propagate stream-json formats to config', async () => { process.argv = [ 'node', @@ -567,6 +604,35 @@ describe('loadCliConfig', () => { expect(config.getIncludePartialMessages()).toBe(true); }); + it('should reset context filenames to defaults when context.fileName is not configured', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments(); + const settings: Settings = {}; + const defaultContextFiles = ['QWEN.md', 'AGENTS.md']; + const getAllSpy = vi + .spyOn(ServerConfig, 'getAllGeminiMdFilenames') + .mockReturnValue(defaultContextFiles); + const setFilenameSpy = vi.spyOn(ServerConfig, 'setGeminiMdFilename'); + + await loadCliConfig(settings, argv); + + expect(getAllSpy).toHaveBeenCalledTimes(1); + expect(setFilenameSpy).toHaveBeenCalledWith(defaultContextFiles); + }); + + it('should use context.fileName from settings when provided', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments(); + const settings: Settings = { context: { fileName: 'CUSTOM_CONTEXT.md' } }; + const getAllSpy = vi.spyOn(ServerConfig, 'getAllGeminiMdFilenames'); + const setFilenameSpy = vi.spyOn(ServerConfig, 'setGeminiMdFilename'); + + await loadCliConfig(settings, argv); + + expect(setFilenameSpy).toHaveBeenCalledWith('CUSTOM_CONTEXT.md'); + expect(getAllSpy).not.toHaveBeenCalled(); + }); + it('should initialize native LSP service when enabled', async () => { process.argv = ['node', 'script.js', '--experimental-lsp']; const argv = await parseArguments(); diff --git a/packages/cli/src/config/config.ts b/packages/cli/src/config/config.ts index 48961cdca..88153fe75 100755 --- a/packages/cli/src/config/config.ts +++ b/packages/cli/src/config/config.ts @@ -11,7 +11,7 @@ import { DEFAULT_QWEN_EMBEDDING_MODEL, FileDiscoveryService, FileEncoding, - getCurrentGeminiMdFilename, + getAllGeminiMdFilenames, loadServerHierarchicalMemory, setGeminiMdFilename as setServerGeminiMdFilename, resolveTelemetrySettings, @@ -33,6 +33,7 @@ import { NativeLspService, } from '@qwen-code/qwen-code-core'; import { extensionsCommand } from '../commands/extensions.js'; +import { hooksCommand } from '../commands/hooks.js'; import type { Settings } from './settings.js'; import { resolveCliGenerationConfig, @@ -124,6 +125,7 @@ export interface CliArgs { acp: boolean | undefined; experimentalAcp: boolean | undefined; experimentalLsp: boolean | undefined; + experimentalHooks: boolean | undefined; extensions: string[] | undefined; listExtensions: boolean | undefined; openaiLogging: boolean | undefined; @@ -337,6 +339,12 @@ export async function parseArguments(): Promise { 'Enable experimental LSP (Language Server Protocol) feature for code intelligence', default: false, }) + .option('experimental-hooks', { + type: 'boolean', + description: + 'Enable experimental hooks feature for lifecycle event customization', + default: false, + }) .option('channel', { type: 'string', choices: ['VSCode', 'ACP', 'SDK', 'CI'], @@ -561,7 +569,9 @@ export async function parseArguments(): Promise { // Register MCP subcommands .command(mcpCommand) // Register Extension subcommands - .command(extensionsCommand); + .command(extensionsCommand) + // Register Hooks subcommands + .command(hooksCommand); yargsInstance .version(await getCliVersion()) // This will enable the --version flag based on package.json @@ -580,9 +590,11 @@ export async function parseArguments(): Promise { // and not return to main CLI logic if ( result._.length > 0 && - (result._[0] === 'mcp' || result._[0] === 'extensions') + (result._[0] === 'mcp' || + result._[0] === 'extensions' || + result._[0] === 'hooks') ) { - // MCP commands handle their own execution and process exit + // MCP/Extensions/Hooks commands handle their own execution and process exit process.exit(0); } @@ -688,8 +700,8 @@ export async function loadCliConfig( if (settings.context?.fileName) { setServerGeminiMdFilename(settings.context.fileName); } else { - // Reset to default if not provided in settings. - setServerGeminiMdFilename(getCurrentGeminiMdFilename()); + // Reset to default context filenames if not provided in settings. + setServerGeminiMdFilename(getAllGeminiMdFilenames()); } // Automatically load output-language.md if it exists @@ -1011,7 +1023,7 @@ export async function loadCliConfig( useBuiltinRipgrep: settings.tools?.useBuiltinRipgrep, shouldUseNodePtyShell: settings.tools?.shell?.enableInteractiveShell, skipNextSpeakerCheck: settings.model?.skipNextSpeakerCheck, - skipLoopDetection: settings.model?.skipLoopDetection ?? false, + skipLoopDetection: settings.model?.skipLoopDetection ?? true, skipStartupContext: settings.model?.skipStartupContext ?? false, truncateToolOutputThreshold: settings.tools?.truncateToolOutputThreshold, truncateToolOutputLines: settings.tools?.truncateToolOutputLines, @@ -1021,6 +1033,10 @@ export async function loadCliConfig( output: { format: outputSettingsFormat, }, + hooks: settings.hooks, + hooksConfig: settings.hooksConfig, + enableHooks: + argv.experimentalHooks === true || settings.hooksConfig?.enabled === true, channel: argv.channel, // Precedence: explicit CLI flag > settings file > default(true). // NOTE: do NOT set a yargs default for `chat-recording`, otherwise argv will diff --git a/packages/cli/src/config/migration/index.test.ts b/packages/cli/src/config/migration/index.test.ts new file mode 100644 index 000000000..52bae237e --- /dev/null +++ b/packages/cli/src/config/migration/index.test.ts @@ -0,0 +1,383 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect } from 'vitest'; +import { + runMigrations, + needsMigration, + ALL_MIGRATIONS, + MigrationScheduler, +} from './index.js'; +import { SETTINGS_VERSION } from '../settings.js'; + +describe('Migration Framework Integration', () => { + describe('runMigrations', () => { + it('should migrate V1 settings to V3', () => { + const v1Settings = { + theme: 'dark', + model: 'gemini', + disableAutoUpdate: true, + disableLoadingPhrases: false, + }; + + const result = runMigrations(v1Settings, 'user'); + + expect(result.finalVersion).toBe(3); + expect(result.executedMigrations).toHaveLength(2); + expect(result.executedMigrations[0]).toEqual({ + fromVersion: 1, + toVersion: 2, + }); + expect(result.executedMigrations[1]).toEqual({ + fromVersion: 2, + toVersion: 3, + }); + + // Check V2 structure was created + const settings = result.settings as Record; + expect(settings['$version']).toBe(3); + expect(settings['ui']).toEqual({ + theme: 'dark', + accessibility: { enableLoadingPhrases: true }, + }); + expect(settings['model']).toEqual({ name: 'gemini' }); + + // Check disableAutoUpdate was inverted to enableAutoUpdate: false + expect( + (settings['general'] as Record)['enableAutoUpdate'], + ).toBe(false); + }); + + it('should migrate V2 settings to V3', () => { + const v2Settings = { + $version: 2, + ui: { theme: 'light' }, + general: { disableAutoUpdate: false }, + }; + + const result = runMigrations(v2Settings, 'user'); + + expect(result.finalVersion).toBe(3); + expect(result.executedMigrations).toHaveLength(1); + expect(result.executedMigrations[0]).toEqual({ + fromVersion: 2, + toVersion: 3, + }); + + const settings = result.settings as Record; + expect(settings['$version']).toBe(3); + expect( + (settings['general'] as Record)['enableAutoUpdate'], + ).toBe(true); + expect( + (settings['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + }); + + it('should not modify V3 settings', () => { + const v3Settings = { + $version: 3, + ui: { theme: 'dark' }, + general: { enableAutoUpdate: true }, + }; + + const result = runMigrations(v3Settings, 'user'); + + expect(result.finalVersion).toBe(3); + expect(result.executedMigrations).toHaveLength(0); + expect(result.settings).toEqual(v3Settings); + }); + + it('should be idempotent', () => { + const v1Settings = { + theme: 'dark', + disableAutoUpdate: true, + }; + + const result1 = runMigrations(v1Settings, 'user'); + const result2 = runMigrations(result1.settings, 'user'); + + expect(result1.executedMigrations).toHaveLength(2); + expect(result2.executedMigrations).toHaveLength(0); + expect(result1.finalVersion).toBe(result2.finalVersion); + }); + }); + + describe('needsMigration', () => { + it('should return true for V1 settings', () => { + const v1Settings = { + theme: 'dark', + model: 'gemini', + }; + + expect(needsMigration(v1Settings)).toBe(true); + }); + + it('should return true for V2 settings with deprecated keys', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: true }, + }; + + expect(needsMigration(v2Settings)).toBe(true); + }); + + it('should return true for V2 settings without deprecated keys', () => { + const cleanV2Settings = { + $version: 2, + ui: { theme: 'dark' }, + }; + + // V2 settings should be migrated to V3 to update the version number + expect(needsMigration(cleanV2Settings)).toBe(true); + }); + + it('should return false for V3 settings', () => { + const v3Settings = { + $version: 3, + general: { enableAutoUpdate: true }, + }; + + expect(needsMigration(v3Settings)).toBe(false); + }); + + it('should return false for legacy numeric version when no migration can execute', () => { + const legacyButUnknownSettings = { + $version: 1, + customOnlyKey: 'value', + }; + + expect(needsMigration(legacyButUnknownSettings)).toBe(false); + }); + }); + + describe('ALL_MIGRATIONS', () => { + it('should contain all migrations in order', () => { + expect(ALL_MIGRATIONS).toHaveLength(2); + + expect(ALL_MIGRATIONS[0].fromVersion).toBe(1); + expect(ALL_MIGRATIONS[0].toVersion).toBe(2); + + expect(ALL_MIGRATIONS[1].fromVersion).toBe(2); + expect(ALL_MIGRATIONS[1].toVersion).toBe(3); + }); + }); + + describe('MigrationScheduler with all migrations', () => { + it('should execute full migration chain', () => { + const scheduler = new MigrationScheduler([...ALL_MIGRATIONS], 'user'); + + const v1Settings = { + theme: 'dark', + disableAutoUpdate: true, + disableLoadingPhrases: true, + }; + + const result = scheduler.migrate(v1Settings); + + expect(result.executedMigrations).toHaveLength(2); + + const settings = result.settings as Record; + expect(settings['$version']).toBe(3); + expect((settings['ui'] as Record)['theme']).toBe('dark'); + expect( + (settings['general'] as Record)['enableAutoUpdate'], + ).toBe(false); + expect( + ( + (settings['ui'] as Record)[ + 'accessibility' + ] as Record + )['enableLoadingPhrases'], + ).toBe(false); + }); + }); + + describe('needsMigration and runMigrations consistency', () => { + it('needsMigration should return true when runMigrations would execute migrations', () => { + const v1Settings = { + theme: 'dark', + disableAutoUpdate: true, + }; + + // needsMigration should report that migration is needed + expect(needsMigration(v1Settings)).toBe(true); + + // runMigrations should actually execute migrations + const result = runMigrations(v1Settings, 'user'); + expect(result.executedMigrations.length).toBeGreaterThan(0); + }); + + it('needsMigration should return false when runMigrations would execute no migrations', () => { + const v3Settings = { + $version: 3, + general: { enableAutoUpdate: true }, + }; + + // needsMigration should report that no migration is needed + expect(needsMigration(v3Settings)).toBe(false); + + // runMigrations should execute no migrations + const result = runMigrations(v3Settings, 'user'); + expect(result.executedMigrations).toHaveLength(0); + }); + + it('should handle V2 settings without deprecated keys consistently', () => { + const cleanV2Settings = { + $version: 2, + ui: { theme: 'dark' }, + }; + + // needsMigration should report that migration is needed + expect(needsMigration(cleanV2Settings)).toBe(true); + + // runMigrations should execute the V2->V3 migration + const result = runMigrations(cleanV2Settings, 'user'); + expect(result.executedMigrations.length).toBeGreaterThan(0); + expect(result.finalVersion).toBe(3); + }); + }); + + describe('migration chain integrity', () => { + it('should have strictly increasing versions (toVersion > fromVersion)', () => { + for (const migration of ALL_MIGRATIONS) { + expect(migration.toVersion).toBeGreaterThan(migration.fromVersion); + } + }); + + it('should have no gaps in the chain (adjacent versions)', () => { + for (let i = 1; i < ALL_MIGRATIONS.length; i++) { + const prevMigration = ALL_MIGRATIONS[i - 1]; + const currMigration = ALL_MIGRATIONS[i]; + expect(currMigration.fromVersion).toBe(prevMigration.toVersion); + } + }); + + it('should have no duplicate fromVersions', () => { + const fromVersions = ALL_MIGRATIONS.map((m) => m.fromVersion); + const uniqueFromVersions = new Set(fromVersions); + expect(uniqueFromVersions.size).toBe(fromVersions.length); + }); + + it('should have no duplicate toVersions', () => { + const toVersions = ALL_MIGRATIONS.map((m) => m.toVersion); + const uniqueToVersions = new Set(toVersions); + expect(uniqueToVersions.size).toBe(toVersions.length); + }); + + it('should be acyclic (no version appears as fromVersion more than once)', () => { + const fromVersionCounts = new Map(); + for (const migration of ALL_MIGRATIONS) { + const count = fromVersionCounts.get(migration.fromVersion) || 0; + fromVersionCounts.set(migration.fromVersion, count + 1); + } + + for (const count of fromVersionCounts.values()) { + expect(count).toBe(1); + } + }); + + it('should chain from version 1 to SETTINGS_VERSION', () => { + if (ALL_MIGRATIONS.length > 0) { + expect(ALL_MIGRATIONS[0].fromVersion).toBe(1); + const lastMigration = ALL_MIGRATIONS[ALL_MIGRATIONS.length - 1]; + expect(lastMigration.toVersion).toBe(SETTINGS_VERSION); + } + }); + }); + + describe('single source of truth for version constant', () => { + it('should use SETTINGS_VERSION from settings module', () => { + // The last migration's toVersion should match SETTINGS_VERSION + const lastMigration = ALL_MIGRATIONS[ALL_MIGRATIONS.length - 1]; + expect(lastMigration.toVersion).toBe(SETTINGS_VERSION); + }); + + it('needsMigration should use SETTINGS_VERSION for version comparison', () => { + // Create settings with version equal to SETTINGS_VERSION + const currentVersionSettings = { + $version: SETTINGS_VERSION, + general: { enableAutoUpdate: true }, + }; + + // needsMigration should return false for current version + expect(needsMigration(currentVersionSettings)).toBe(false); + + // Create settings with version less than SETTINGS_VERSION + const oldVersionSettings = { + $version: SETTINGS_VERSION - 1, + general: { disableAutoUpdate: true }, + }; + + // needsMigration should return true for old version + expect(needsMigration(oldVersionSettings)).toBe(true); + }); + + it('should have SETTINGS_VERSION defined exactly once in codebase', () => { + // SETTINGS_VERSION is imported from settings.js + // This test verifies the wiring is correct + expect(SETTINGS_VERSION).toBeDefined(); + expect(typeof SETTINGS_VERSION).toBe('number'); + expect(SETTINGS_VERSION).toBeGreaterThan(0); + }); + }); + + describe('invalid version handling', () => { + it('should treat non-numeric version with V1 shape as needing migration', () => { + const settingsWithInvalidVersion = { + $version: 'invalid', + theme: 'dark', + disableAutoUpdate: true, + }; + + // Should detect migration needed based on V1 shape + expect(needsMigration(settingsWithInvalidVersion)).toBe(true); + + // Should run migrations + const result = runMigrations(settingsWithInvalidVersion, 'user'); + expect(result.executedMigrations.length).toBeGreaterThan(0); + expect(result.finalVersion).toBe(SETTINGS_VERSION); + }); + + it('should not migrate non-numeric version with already-migrated shape (normalized by loader)', () => { + const settingsWithInvalidVersionButMigratedShape = { + $version: 'invalid', + general: { enableAutoUpdate: true }, + }; + + // needsMigration returns false because no migration applies to this shape + // The settings loader will handle version normalization separately + expect(needsMigration(settingsWithInvalidVersionButMigratedShape)).toBe( + false, + ); + + // No migrations should execute + const result = runMigrations( + settingsWithInvalidVersionButMigratedShape, + 'user', + ); + expect(result.executedMigrations).toHaveLength(0); + }); + + it('should avoid repeated no-op migration loops', () => { + // Settings that might cause repeated migrations + const v3Settings = { + $version: 3, + general: { enableAutoUpdate: true }, + }; + + // First check + expect(needsMigration(v3Settings)).toBe(false); + const result1 = runMigrations(v3Settings, 'user'); + expect(result1.executedMigrations).toHaveLength(0); + + // Second check should be consistent + expect(needsMigration(result1.settings)).toBe(false); + const result2 = runMigrations(result1.settings, 'user'); + expect(result2.executedMigrations).toHaveLength(0); + }); + }); +}); diff --git a/packages/cli/src/config/migration/index.ts b/packages/cli/src/config/migration/index.ts new file mode 100644 index 000000000..40d176cbe --- /dev/null +++ b/packages/cli/src/config/migration/index.ts @@ -0,0 +1,106 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// Export types +export type { SettingsMigration, MigrationResult } from './types.js'; + +// Export scheduler +export { MigrationScheduler } from './scheduler.js'; + +// Export migrations +export { v1ToV2Migration, V1ToV2Migration } from './versions/v1-to-v2.js'; +export { v2ToV3Migration, V2ToV3Migration } from './versions/v2-to-v3.js'; + +// Import settings version from single source of truth +import { SETTINGS_VERSION } from '../settings.js'; + +// Ordered array of all migrations for use with MigrationScheduler +// Each migration handles one version transition (N → N+1) +// Order matters: migrations must be sorted by ascending version +import { v1ToV2Migration } from './versions/v1-to-v2.js'; +import { v2ToV3Migration } from './versions/v2-to-v3.js'; +import { MigrationScheduler } from './scheduler.js'; +import type { MigrationResult } from './types.js'; + +/** + * Ordered array of all settings migrations. + * Use this with MigrationScheduler to run the full migration chain. + * + * @example + * ```typescript + * const scheduler = new MigrationScheduler(ALL_MIGRATIONS); + * const result = scheduler.migrate(settings); + * ``` + */ +export const ALL_MIGRATIONS = [v1ToV2Migration, v2ToV3Migration] as const; + +/** + * Convenience function that runs all migrations on the given settings. + * This is the primary entry point for settings migration. + * + * @param settings - The settings object to migrate + * @param scope - The scope of settings being migrated + * @returns MigrationResult containing the final settings, version, and execution log + * + * @example + * ```typescript + * const result = runMigrations(settings, 'User'); + * if (result.executedMigrations.length > 0) { + * console.log(`Migrated from version ${result.executedMigrations[0].fromVersion} to ${result.finalVersion}`); + * } + * ``` + */ +export function runMigrations( + settings: unknown, + scope: string, +): MigrationResult { + const scheduler = new MigrationScheduler([...ALL_MIGRATIONS], scope); + return scheduler.migrate(settings); +} + +/** + * Checks if the given settings need migration. + * Returns true only if at least one registered migration would be applied. + * + * This function checks: + * 1. If $version field exists and is a number: + * - Returns false if $version >= SETTINGS_VERSION + * - Returns true only when $version < SETTINGS_VERSION AND at least one + * migration can execute for the current settings shape + * 2. If $version field is missing or invalid: + * - Uses fallback logic by checking individual migrations + * + * Note: + * - Legacy numeric versions that have no executable migrations are handled by + * the settings loader via version normalization (bump metadata to current). + * + * @param settings - The settings object to check + * @returns true if migration is needed, false otherwise + */ +export function needsMigration(settings: unknown): boolean { + if (typeof settings !== 'object' || settings === null) { + return false; + } + + const s = settings as Record; + const version = s['$version']; + const hasApplicableMigration = ALL_MIGRATIONS.some((migration) => + migration.shouldMigrate(settings), + ); + + // If $version is a valid number, use version comparison + if (typeof version === 'number') { + if (version >= SETTINGS_VERSION) { + return false; + } + // Guardrail: only report migration-needed if at least one migration can execute. + return hasApplicableMigration; + } + + // If $version exists but is not a number (invalid), or is missing: + // Use fallback logic - check if any migration would be applied + return hasApplicableMigration; +} diff --git a/packages/cli/src/config/migration/scheduler.test.ts b/packages/cli/src/config/migration/scheduler.test.ts new file mode 100644 index 000000000..91e9eff98 --- /dev/null +++ b/packages/cli/src/config/migration/scheduler.test.ts @@ -0,0 +1,164 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, vi } from 'vitest'; +import { MigrationScheduler } from './scheduler.js'; + +import type { SettingsMigration } from './types.js'; + +describe('MigrationScheduler', () => { + // Mock migration for testing + const createMockMigration = ( + fromVersion: number, + toVersion: number, + shouldMigrateResult: boolean, + ): SettingsMigration => ({ + fromVersion, + toVersion, + shouldMigrate: vi.fn().mockReturnValue(shouldMigrateResult), + migrate: vi.fn((settings) => ({ + settings: { + ...(settings as Record), + $version: toVersion, + }, + warnings: [], + })), + }); + + it('should execute migrations in order when shouldMigrate returns true', () => { + const migration1 = createMockMigration(1, 2, true); + const migration2 = createMockMigration(2, 3, true); + + const scheduler = new MigrationScheduler([migration1, migration2], 'user'); + const result = scheduler.migrate({ $version: 1, someKey: 'value' }); + + expect(migration1.shouldMigrate).toHaveBeenCalledTimes(1); + expect(migration1.migrate).toHaveBeenCalledTimes(1); + expect(migration2.shouldMigrate).toHaveBeenCalledTimes(1); + expect(migration2.migrate).toHaveBeenCalledTimes(1); + + expect(result.executedMigrations).toHaveLength(2); + expect(result.executedMigrations[0]).toEqual({ + fromVersion: 1, + toVersion: 2, + }); + expect(result.executedMigrations[1]).toEqual({ + fromVersion: 2, + toVersion: 3, + }); + expect(result.finalVersion).toBe(3); + }); + + it('should skip migrations when shouldMigrate returns false', () => { + const migration1 = createMockMigration(1, 2, false); + const migration2 = createMockMigration(2, 3, true); + + const scheduler = new MigrationScheduler([migration1, migration2], 'user'); + const result = scheduler.migrate({ $version: 2, someKey: 'value' }); + + expect(migration1.shouldMigrate).toHaveBeenCalledTimes(1); + expect(migration1.migrate).not.toHaveBeenCalled(); + expect(migration2.shouldMigrate).toHaveBeenCalledTimes(1); + expect(migration2.migrate).toHaveBeenCalledTimes(1); + + expect(result.executedMigrations).toHaveLength(1); + expect(result.executedMigrations[0]).toEqual({ + fromVersion: 2, + toVersion: 3, + }); + }); + + it('should be idempotent - running migrations twice produces same result', () => { + // Create a migration that checks the version to determine if migration is needed + const migration1: SettingsMigration = { + fromVersion: 1, + toVersion: 2, + shouldMigrate: vi.fn((settings) => { + const s = settings as Record; + return s['$version'] !== 2; + }), + migrate: vi.fn((settings) => ({ + settings: { + ...(settings as Record), + $version: 2, + }, + warnings: [], + })), + }; + + const scheduler = new MigrationScheduler([migration1], 'user'); + const input = { theme: 'dark' }; + + const result1 = scheduler.migrate(input); + const result2 = scheduler.migrate(result1.settings); + + expect(result1.executedMigrations).toHaveLength(1); + expect(result2.executedMigrations).toHaveLength(0); + expect(result1.finalVersion).toBe(result2.finalVersion); + }); + + it('should pass updated settings to each migration', () => { + const migration1: SettingsMigration = { + fromVersion: 1, + toVersion: 2, + shouldMigrate: vi.fn().mockReturnValue(true), + migrate: vi.fn(() => ({ + settings: { $version: 2, transformed: true }, + warnings: [], + })), + }; + + const migration2: SettingsMigration = { + fromVersion: 2, + toVersion: 3, + shouldMigrate: vi.fn().mockReturnValue(true), + migrate: vi.fn((s) => ({ settings: s, warnings: [] })), + }; + + const scheduler = new MigrationScheduler([migration1, migration2], 'user'); + scheduler.migrate({ $version: 1 }); + + expect(migration2.shouldMigrate).toHaveBeenCalledWith( + expect.objectContaining({ $version: 2, transformed: true }), + ); + }); + + it('should handle empty migrations array', () => { + const scheduler = new MigrationScheduler([], 'user'); + const result = scheduler.migrate({ $version: 1, key: 'value' }); + + expect(result.executedMigrations).toHaveLength(0); + expect(result.finalVersion).toBe(1); + expect(result.settings).toEqual({ $version: 1, key: 'value' }); + }); + + it('should throw error when migration fails', () => { + const migration1: SettingsMigration = { + fromVersion: 1, + toVersion: 2, + shouldMigrate: vi.fn().mockReturnValue(true), + migrate: vi.fn().mockImplementation(() => { + throw new Error('Migration failed'); + }), + }; + + const scheduler = new MigrationScheduler([migration1], 'user'); + + expect(() => scheduler.migrate({ $version: 1 })).toThrow( + 'Migration failed', + ); + }); + + it('should handle settings without version field', () => { + const migration1 = createMockMigration(1, 2, true); + + const scheduler = new MigrationScheduler([migration1], 'user'); + const result = scheduler.migrate({ theme: 'dark' }); + + expect(result.finalVersion).toBe(2); + expect(result.executedMigrations).toHaveLength(1); + }); +}); diff --git a/packages/cli/src/config/migration/scheduler.ts b/packages/cli/src/config/migration/scheduler.ts new file mode 100644 index 000000000..7bbcc43d6 --- /dev/null +++ b/packages/cli/src/config/migration/scheduler.ts @@ -0,0 +1,115 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { createDebugLogger } from '@qwen-code/qwen-code-core'; +import type { SettingsMigration, MigrationResult } from './types.js'; + +const debugLogger = createDebugLogger('SETTINGS_MIGRATION'); + +/** + * Formats a SettingScope enum value to a human-readable string. + * - Converts to lowercase + * - Special case: 'SystemDefaults' -> 'system default' + */ +export function formatScope(scope: string): string { + if (scope === 'SystemDefaults') { + return 'system default'; + } + return scope.toLowerCase(); +} + +/** + * Chain scheduler for settings migrations. + * + * The MigrationScheduler orchestrates multiple migrations in sequence, + * delegating version detection to each individual migration via `shouldMigrate`. + * It has no centralized version logic - migrations self-determine applicability. + * + * Key characteristics: + * - Linear chain execution: migrations are applied in registration order + * - Idempotent: already-migrated versions return false from shouldMigrate + * - Adjacent versions only: each migration handles N → N+1 + * - Pure functions: migrations don't modify input objects + */ +export class MigrationScheduler { + /** + * Creates a new MigrationScheduler with the given migrations. + * + * @param migrations - Array of migrations in execution order (typically ascending version) + * @param scope - The scope of settings being migrated + */ + constructor( + private readonly migrations: SettingsMigration[], + private readonly scope: string, + ) {} + + /** + * Executes the migration chain on the given settings. + * + * Iterates through all registered migrations in order. For each migration: + * 1. Calls `shouldMigrate` with the current settings + * 2. If true, calls `migrate` to transform the settings + * 3. Records the execution + * + * The scheduler itself has no version awareness - all version detection + * is delegated to the individual migrations. + * + * @param settings - The settings object to migrate + * @returns MigrationResult containing the final settings, version, and execution log + */ + migrate(settings: unknown): MigrationResult { + debugLogger.debug('MigrationScheduler: Starting migration chain'); + + let current = settings; + const executed: Array<{ fromVersion: number; toVersion: number }> = []; + const allWarnings: string[] = []; + + for (const migration of this.migrations) { + try { + if (migration.shouldMigrate(current)) { + debugLogger.debug( + `MigrationScheduler: Executing migration ${migration.fromVersion} → ${migration.toVersion}`, + ); + + const formattedScope = formatScope(this.scope); + const result = migration.migrate(current, formattedScope); + current = result.settings; + allWarnings.push(...result.warnings); + + executed.push({ + fromVersion: migration.fromVersion, + toVersion: migration.toVersion, + }); + + debugLogger.debug( + `MigrationScheduler: Migration ${migration.fromVersion} → ${migration.toVersion} completed successfully`, + ); + } + } catch (error) { + debugLogger.error( + `MigrationScheduler: Migration ${migration.fromVersion} → ${migration.toVersion} failed:`, + error, + ); + throw error; + } + } + + // Determine final version from the settings object + const finalVersion = + ((current as Record)['$version'] as number) ?? 1; + + debugLogger.debug( + `MigrationScheduler: Migration chain complete. Final version: ${finalVersion}, Executed: ${executed.length} migrations`, + ); + + return { + settings: current, + finalVersion, + executedMigrations: executed, + warnings: allWarnings, + }; + } +} diff --git a/packages/cli/src/config/migration/types.ts b/packages/cli/src/config/migration/types.ts new file mode 100644 index 000000000..ca1e23aaf --- /dev/null +++ b/packages/cli/src/config/migration/types.ts @@ -0,0 +1,58 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * Interface that all settings migrations must implement. + * Each migration handles a single version transition (N → N+1). + */ +export interface SettingsMigration { + /** Source version number */ + readonly fromVersion: number; + + /** Target version number */ + readonly toVersion: number; + + /** + * Determines whether this migration should be applied to the given settings. + * The migration inspects the settings object to detect its current version + * and returns true if this migration is applicable. + * + * @param settings - The current settings object + * @returns true if this migration should be applied, false otherwise + */ + shouldMigrate(settings: unknown): boolean; + + /** + * Executes the migration transformation. + * This should be a pure function that does not modify the input object. + * + * @param settings - The current settings object of version N + * @param scope - The scope of settings being migrated + * @returns The migrated settings object of version N+1 with optional warnings + * @throws Error if the migration fails + */ + migrate( + settings: unknown, + scope: string, + ): { settings: unknown; warnings: string[] }; +} + +/** + * Result of a migration execution by MigrationScheduler. + */ +export interface MigrationResult { + /** The final settings object after all applicable migrations */ + settings: unknown; + + /** The final version number after migrations */ + finalVersion: number; + + /** List of migrations that were executed */ + executedMigrations: Array<{ fromVersion: number; toVersion: number }>; + + /** List of warning messages generated during migration */ + warnings: string[]; +} diff --git a/packages/cli/src/config/migration/versions/v1-to-v2-shared.ts b/packages/cli/src/config/migration/versions/v1-to-v2-shared.ts new file mode 100644 index 000000000..c87fa4480 --- /dev/null +++ b/packages/cli/src/config/migration/versions/v1-to-v2-shared.ts @@ -0,0 +1,180 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * Structural mapping table for V1 -> V2. + * + * Used by: + * - v1->v2 migration execution + * - warnings for residual legacy keys in latest-version settings files + */ +export const V1_TO_V2_MIGRATION_MAP: Record = { + accessibility: 'ui.accessibility', + allowedTools: 'tools.allowed', + allowMCPServers: 'mcp.allowed', + autoAccept: 'tools.autoAccept', + autoConfigureMaxOldSpaceSize: 'advanced.autoConfigureMemory', + bugCommand: 'advanced.bugCommand', + chatCompression: 'model.chatCompression', + checkpointing: 'general.checkpointing', + coreTools: 'tools.core', + contextFileName: 'context.fileName', + customThemes: 'ui.customThemes', + customWittyPhrases: 'ui.customWittyPhrases', + debugKeystrokeLogging: 'general.debugKeystrokeLogging', + dnsResolutionOrder: 'advanced.dnsResolutionOrder', + enforcedAuthType: 'security.auth.enforcedType', + excludeTools: 'tools.exclude', + excludeMCPServers: 'mcp.excluded', + excludedProjectEnvVars: 'advanced.excludedEnvVars', + extensions: 'extensions', + fileFiltering: 'context.fileFiltering', + folderTrustFeature: 'security.folderTrust.featureEnabled', + folderTrust: 'security.folderTrust.enabled', + hasSeenIdeIntegrationNudge: 'ide.hasSeenNudge', + hideWindowTitle: 'ui.hideWindowTitle', + showStatusInTitle: 'ui.showStatusInTitle', + hideTips: 'ui.hideTips', + showLineNumbers: 'ui.showLineNumbers', + showCitations: 'ui.showCitations', + ideMode: 'ide.enabled', + includeDirectories: 'context.includeDirectories', + loadMemoryFromIncludeDirectories: 'context.loadFromIncludeDirectories', + maxSessionTurns: 'model.maxSessionTurns', + mcpServers: 'mcpServers', + mcpServerCommand: 'mcp.serverCommand', + memoryImportFormat: 'context.importFormat', + model: 'model.name', + preferredEditor: 'general.preferredEditor', + sandbox: 'tools.sandbox', + selectedAuthType: 'security.auth.selectedType', + shouldUseNodePtyShell: 'tools.shell.enableInteractiveShell', + shellPager: 'tools.shell.pager', + shellShowColor: 'tools.shell.showColor', + skipNextSpeakerCheck: 'model.skipNextSpeakerCheck', + summarizeToolOutput: 'model.summarizeToolOutput', + telemetry: 'telemetry', + theme: 'ui.theme', + toolDiscoveryCommand: 'tools.discoveryCommand', + toolCallCommand: 'tools.callCommand', + usageStatisticsEnabled: 'privacy.usageStatisticsEnabled', + useExternalAuth: 'security.auth.useExternal', + useRipgrep: 'tools.useRipgrep', + vimMode: 'general.vimMode', + enableWelcomeBack: 'ui.enableWelcomeBack', + approvalMode: 'tools.approvalMode', + sessionTokenLimit: 'model.sessionTokenLimit', + contentGenerator: 'model.generationConfig', + skipLoopDetection: 'model.skipLoopDetection', + skipStartupContext: 'model.skipStartupContext', + enableOpenAILogging: 'model.enableOpenAILogging', + tavilyApiKey: 'advanced.tavilyApiKey', +}; + +/** + * Top-level keys that are V2/V3 containers. + * If one of these keys already has object value, treat it as latest-format data. + */ +export const V2_CONTAINER_KEYS = new Set([ + 'ui', + 'tools', + 'mcp', + 'advanced', + 'model', + 'general', + 'context', + 'security', + 'ide', + 'privacy', + 'telemetry', + 'extensions', +]); + +/** + * Legacy disable* keys that remain in disable* form for V2. + */ +export const V1_TO_V2_PRESERVE_DISABLE_MAP: Record = { + disableAutoUpdate: 'general.disableAutoUpdate', + disableUpdateNag: 'general.disableUpdateNag', + disableLoadingPhrases: 'ui.accessibility.disableLoadingPhrases', + disableFuzzySearch: 'context.fileFiltering.disableFuzzySearch', + disableCacheControl: 'model.generationConfig.disableCacheControl', +}; + +export const CONSOLIDATED_DISABLE_KEYS = new Set([ + 'disableAutoUpdate', + 'disableUpdateNag', +]); + +/** + * Keys that indicate V1-like top-level structure when holding primitive values. + */ +export const V1_INDICATOR_KEYS = [ + // From V1_TO_V2_MIGRATION_MAP - keys that map to different paths in V2 + 'theme', + 'model', + 'autoAccept', + 'hideTips', + 'vimMode', + 'checkpointing', + 'accessibility', + 'allowedTools', + 'allowMCPServers', + 'autoConfigureMaxOldSpaceSize', + 'bugCommand', + 'chatCompression', + 'coreTools', + 'contextFileName', + 'customThemes', + 'customWittyPhrases', + 'debugKeystrokeLogging', + 'dnsResolutionOrder', + 'enforcedAuthType', + 'excludeTools', + 'excludeMCPServers', + 'excludedProjectEnvVars', + 'fileFiltering', + 'folderTrustFeature', + 'folderTrust', + 'hasSeenIdeIntegrationNudge', + 'hideWindowTitle', + 'showStatusInTitle', + 'showLineNumbers', + 'showCitations', + 'ideMode', + 'includeDirectories', + 'loadMemoryFromIncludeDirectories', + 'maxSessionTurns', + 'mcpServerCommand', + 'memoryImportFormat', + 'preferredEditor', + 'sandbox', + 'selectedAuthType', + 'shouldUseNodePtyShell', + 'shellPager', + 'shellShowColor', + 'skipNextSpeakerCheck', + 'summarizeToolOutput', + 'toolDiscoveryCommand', + 'toolCallCommand', + 'usageStatisticsEnabled', + 'useExternalAuth', + 'useRipgrep', + 'enableWelcomeBack', + 'approvalMode', + 'sessionTokenLimit', + 'contentGenerator', + 'skipLoopDetection', + 'skipStartupContext', + 'enableOpenAILogging', + 'tavilyApiKey', + // From V1_TO_V2_PRESERVE_DISABLE_MAP - disable* keys that get nested in V2 + 'disableAutoUpdate', + 'disableUpdateNag', + 'disableLoadingPhrases', + 'disableFuzzySearch', + 'disableCacheControl', +]; diff --git a/packages/cli/src/config/migration/versions/v1-to-v2.test.ts b/packages/cli/src/config/migration/versions/v1-to-v2.test.ts new file mode 100644 index 000000000..cbe655c54 --- /dev/null +++ b/packages/cli/src/config/migration/versions/v1-to-v2.test.ts @@ -0,0 +1,277 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect } from 'vitest'; +import { V1ToV2Migration } from './v1-to-v2.js'; + +describe('V1ToV2Migration', () => { + const migration = new V1ToV2Migration(); + + describe('shouldMigrate', () => { + it('should return true for V1 settings without version and with V1 keys', () => { + const v1Settings = { + theme: 'dark', + model: 'gemini', + }; + + expect(migration.shouldMigrate(v1Settings)).toBe(true); + }); + + it('should return true for V1 settings with disable* keys', () => { + const v1Settings = { + disableAutoUpdate: true, + disableLoadingPhrases: false, + }; + + expect(migration.shouldMigrate(v1Settings)).toBe(true); + }); + + it('should return false for settings with $version field', () => { + const v2Settings = { + $version: 2, + ui: { theme: 'dark' }, + }; + + expect(migration.shouldMigrate(v2Settings)).toBe(false); + }); + + it('should return false for V3 settings', () => { + const v3Settings = { + $version: 3, + general: { enableAutoUpdate: true }, + }; + + expect(migration.shouldMigrate(v3Settings)).toBe(false); + }); + + it('should return false for settings without V1 indicator keys', () => { + const unknownSettings = { + customKey: 'value', + anotherKey: 123, + }; + + expect(migration.shouldMigrate(unknownSettings)).toBe(false); + }); + + it('should return false for null input', () => { + expect(migration.shouldMigrate(null)).toBe(false); + }); + + it('should return false for non-object input', () => { + expect(migration.shouldMigrate('string')).toBe(false); + expect(migration.shouldMigrate(123)).toBe(false); + }); + }); + + describe('migrate', () => { + it('should migrate flat V1 keys to nested V2 structure', () => { + const v1Settings = { + theme: 'dark', + model: 'gemini', + autoAccept: true, + hideTips: false, + }; + + const { settings: result } = migration.migrate(v1Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(2); + expect(result['ui']).toEqual({ theme: 'dark', hideTips: false }); + expect(result['model']).toEqual({ name: 'gemini' }); + expect(result['tools']).toEqual({ autoAccept: true }); + }); + + it('should migrate disable* keys to nested V2 paths without inversion', () => { + const v1Settings = { + theme: 'light', + disableAutoUpdate: true, + disableLoadingPhrases: false, + }; + + const { settings: result } = migration.migrate(v1Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(2); + expect(result['general']).toEqual({ disableAutoUpdate: true }); + expect(result['ui']).toEqual({ + theme: 'light', + accessibility: { disableLoadingPhrases: false }, + }); + }); + + it('should normalize consolidated disable* non-boolean values to false', () => { + const v1Settings = { + theme: 'dark', + disableAutoUpdate: 'false', + disableUpdateNag: null, + }; + + const { settings: result } = migration.migrate(v1Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(2); + expect(result['general']).toEqual({ + disableAutoUpdate: false, + disableUpdateNag: false, + }); + }); + + it('should drop non-boolean non-consolidated disable* values', () => { + const v1Settings = { + theme: 'dark', + disableLoadingPhrases: 'TRUE', + disableFuzzySearch: 1, + }; + + const { settings: result } = migration.migrate(v1Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(2); + expect( + (result['ui'] as Record)?.['accessibility'], + ).toBeUndefined(); + expect( + ( + (result['context'] as Record)?.[ + 'fileFiltering' + ] as Record + )?.['disableFuzzySearch'], + ).toBeUndefined(); + }); + + it('should preserve mcpServers at top level', () => { + const v1Settings = { + theme: 'dark', + mcpServers: { + myServer: { command: 'node', args: ['server.js'] }, + }, + }; + + const { settings: result } = migration.migrate(v1Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(2); + expect(result['mcpServers']).toEqual({ + myServer: { command: 'node', args: ['server.js'] }, + }); + }); + + it('should preserve unrecognized keys', () => { + const v1Settings = { + theme: 'dark', + myCustomSetting: 'value', + anotherCustom: 123, + }; + + const { settings: result } = migration.migrate(v1Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(2); + expect(result['myCustomSetting']).toBe('value'); + expect(result['anotherCustom']).toBe(123); + }); + + it('should preserve non-object parent path values on collision', () => { + const v1Settings = { + theme: 'dark', + disableAutoUpdate: true, + ui: 'legacy-ui-string', + general: 'legacy-general-string', + }; + + const { settings: result } = migration.migrate(v1Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(2); + expect(result['ui']).toBe('legacy-ui-string'); + expect(result['general']).toBe('legacy-general-string'); + }); + + it('should not modify the input object', () => { + const v1Settings = { + theme: 'dark', + model: 'gemini', + }; + + const { settings: result } = migration.migrate(v1Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(v1Settings).toEqual({ theme: 'dark', model: 'gemini' }); + expect(result).not.toBe(v1Settings); + }); + + it('should throw error for non-object input', () => { + expect(() => migration.migrate(null, 'user')).toThrow( + 'Settings must be an object', + ); + expect(() => migration.migrate('string', 'user')).toThrow( + 'Settings must be an object', + ); + }); + + it('should handle empty V1 settings', () => { + const v1Settings = { + theme: 'dark', + }; + + const { settings: result } = migration.migrate(v1Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(2); + expect(result['ui']).toEqual({ theme: 'dark' }); + }); + + it('should correctly handle all V1 indicator keys', () => { + const v1Settings = { + theme: 'dark', + model: 'gemini', + autoAccept: true, + hideTips: false, + vimMode: true, + checkpointing: false, + telemetry: {}, + accessibility: {}, + extensions: [], + mcpServers: {}, + }; + + const { settings: result } = migration.migrate(v1Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(2); + }); + }); + + describe('version properties', () => { + it('should have correct fromVersion', () => { + expect(migration.fromVersion).toBe(1); + }); + + it('should have correct toVersion', () => { + expect(migration.toVersion).toBe(2); + }); + }); +}); diff --git a/packages/cli/src/config/migration/versions/v1-to-v2.ts b/packages/cli/src/config/migration/versions/v1-to-v2.ts new file mode 100644 index 000000000..4dceffe44 --- /dev/null +++ b/packages/cli/src/config/migration/versions/v1-to-v2.ts @@ -0,0 +1,267 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { SettingsMigration } from '../types.js'; +import { + CONSOLIDATED_DISABLE_KEYS, + V1_INDICATOR_KEYS, + V1_TO_V2_MIGRATION_MAP, + V1_TO_V2_PRESERVE_DISABLE_MAP, + V2_CONTAINER_KEYS, +} from './v1-to-v2-shared.js'; +import { setNestedPropertySafe } from '../../../utils/settingsUtils.js'; + +/** + * Heuristic indicators for deciding whether an object is "V1-like". + * + * Detection strategy: + * - A file is considered migratable as V1 when: + * 1) It is not explicitly versioned as V2+ (`$version` is missing or invalid), and + * 2) At least one indicator key appears in a legacy-compatible top-level shape. + * - Indicator list intentionally excludes keys that are valid top-level entries in + * both old and new structures to reduce false positives. + * + * Shape rule: + * - Object values for indicator keys are treated as already-nested V2-like content + * and do not alone trigger migration. + * - Primitive/array/null values on indicator keys are treated as legacy V1 signals. + */ + +/** + * V1 -> V2 migration (structural normalization stage). + * + * Migration contract: + * - Input: settings in legacy V1-like shape (mostly flat, may contain mixed partial V2). + * - Output: V2-compatible nested structure with `$version: 2`. + * - No semantic inversion of disable* naming in this stage. + * + * Data-preservation strategy: + * - Prefer transforming known keys into canonical V2 locations. + * - Preserve unrecognized keys verbatim. + * - Preserve parent-path scalar values when nested writes would collide with them. + * - Preserve/merge existing partial V2 objects where safe. + * + * This class intentionally optimizes for backward compatibility and non-destructive + * behavior over aggressive normalization. + */ +export class V1ToV2Migration implements SettingsMigration { + readonly fromVersion = 1; + readonly toVersion = 2; + + /** + * Determines whether this migration should execute. + * + * Decision strategy: + * - Hard-stop when `$version` is a number >= 2 (already V2+). + * - Otherwise, scan indicator keys and trigger only when at least one indicator is + * still in legacy top-level shape (primitive/array/null). + * + * Mixed-shape tolerance: + * - Files that are partially migrated are supported; V2-like object-valued indicators + * are ignored while legacy-shaped indicators can still trigger migration. + */ + shouldMigrate(settings: unknown): boolean { + if (typeof settings !== 'object' || settings === null) { + return false; + } + + const s = settings as Record; + + // If $version exists and is a number >= 2, it's not V1 + const version = s['$version']; + if (typeof version === 'number' && version >= 2) { + return false; + } + + // Check for V1 indicator keys with primitive values + // A setting is considered V1 if ANY indicator key has a primitive value + // (string, number, boolean, null, or array) at the top level. + // Keys with object values are skipped as they may already be in V2 format. + return V1_INDICATOR_KEYS.some((key) => { + if (!(key in s)) { + return false; + } + const value = s[key]; + // Skip keys with object values - they may already be in V2 nested format + // But don't let them block migration of other keys + if ( + typeof value === 'object' && + value !== null && + !Array.isArray(value) + ) { + // This key appears to be in V2 format, skip it but continue + // checking other keys + return false; + } + // Found a key with primitive value - this is V1 format + return true; + }); + } + + /** + * Performs non-destructive V1 -> V2 transformation. + * + * Detailed strategy: + * 1) Relocate known V1 keys using `V1_TO_V2_MIGRATION_MAP`. + * - If a source value is already an object and maps to a child path of itself + * (partial V2 shape), merge child properties into target path. + * 2) Relocate disable* keys into V2 disable* locations. + * - Consolidated keys (`disableAutoUpdate`, `disableUpdateNag`): normalize to + * boolean with stable-compatible presence semantics (`value === true`). + * - Other disable* keys: migrate only boolean values. + * 3) Preserve `mcpServers` top-level placement. + * 4) Carry over remaining keys: + * - If a key is parent of migrated nested paths, merge unprocessed object children. + * - If parent value is non-object, preserve that scalar/array/null as-is. + * - Otherwise copy untouched key/value. + * 5) Stamp `$version = 2`. + * + * The method is pure with respect to input mutation. + */ + migrate( + settings: unknown, + _scope: string, + ): { settings: unknown; warnings: string[] } { + if (typeof settings !== 'object' || settings === null) { + throw new Error('Settings must be an object'); + } + + const source = settings as Record; + const result: Record = {}; + const processedKeys = new Set(); + const warnings: string[] = []; + + // Step 1: Map known V1 keys to V2 nested paths + for (const [v1Key, v2Path] of Object.entries(V1_TO_V2_MIGRATION_MAP)) { + if (v1Key in source) { + const value = source[v1Key]; + + // Safety check: If this key is a V2 container (like 'model') and it's + // already an object, it's likely already in V2 format. Skip migration + // to prevent double-nesting (e.g., model.name.name). + if ( + V2_CONTAINER_KEYS.has(v1Key) && + typeof value === 'object' && + value !== null && + !Array.isArray(value) + ) { + // This is already a V2 container, carry it over as-is + result[v1Key] = value; + processedKeys.add(v1Key); + continue; + } + + // If value is already an object and the path matches the key, + // it might be a partial V2 structure. Merge its contents. + if ( + typeof value === 'object' && + value !== null && + !Array.isArray(value) && + v2Path.startsWith(v1Key + '.') + ) { + // Merge nested properties from this partial V2 structure + for (const [nestedKey, nestedValue] of Object.entries(value)) { + setNestedPropertySafe( + result, + `${v2Path}.${nestedKey}`, + nestedValue, + ); + } + } else { + setNestedPropertySafe(result, v2Path, value); + } + processedKeys.add(v1Key); + } + } + + // Step 2: Map V1 disable* keys to V2 nested disable* paths + for (const [v1Key, v2Path] of Object.entries( + V1_TO_V2_PRESERVE_DISABLE_MAP, + )) { + if (v1Key in source) { + const value = source[v1Key]; + if (CONSOLIDATED_DISABLE_KEYS.has(v1Key)) { + // Preserve stable behavior: consolidated keys use presence semantics. + // Only literal true remains true; all other present values become false. + setNestedPropertySafe(result, v2Path, value === true); + } else if (typeof value === 'boolean') { + // Non-consolidated disable* keys only migrate when explicitly boolean. + setNestedPropertySafe(result, v2Path, value); + } + processedKeys.add(v1Key); + } + } + + // Step 3: Preserve mcpServers at the top level + if ('mcpServers' in source) { + result['mcpServers'] = source['mcpServers']; + processedKeys.add('mcpServers'); + } + + // Step 4: Carry over any unrecognized keys (including unknown nested objects) + // Important: Skip keys that are parent paths of already-migrated properties + // to avoid overwriting merged structures (e.g., 'ui' should not overwrite 'ui.theme') + for (const key of Object.keys(source)) { + if (!processedKeys.has(key)) { + // Check if this key is a parent of any already-migrated path + const isParentOfMigratedPath = Array.from(processedKeys).some( + (processedKey) => { + // Get the v2 path for this processed key + const v2Path = + V1_TO_V2_MIGRATION_MAP[processedKey] || + V1_TO_V2_PRESERVE_DISABLE_MAP[processedKey]; + if (!v2Path) return false; + // Check if the v2 path starts with this key + '.' + return v2Path.startsWith(key + '.'); + }, + ); + + if (isParentOfMigratedPath) { + // This key is a parent of an already-migrated path + // Merge its unprocessed children instead of overwriting + const existingValue = source[key]; + if ( + typeof existingValue === 'object' && + existingValue !== null && + !Array.isArray(existingValue) + ) { + for (const [nestedKey, nestedValue] of Object.entries( + existingValue, + )) { + // Only merge if this nested key wasn't already processed + const fullNestedPath = `${key}.${nestedKey}`; + const wasProcessed = Array.from(processedKeys).some( + (processedKey) => { + const v2Path = + V1_TO_V2_MIGRATION_MAP[processedKey] || + V1_TO_V2_PRESERVE_DISABLE_MAP[processedKey]; + return v2Path === fullNestedPath; + }, + ); + if (!wasProcessed) { + setNestedPropertySafe(result, fullNestedPath, nestedValue); + } + } + } else { + // Preserve non-object parent values to match legacy overwrite semantics. + result[key] = source[key]; + } + } else { + // Not a parent path, safe to copy as-is + result[key] = source[key]; + } + } + } + + // Step 5: Set version to 2 + result['$version'] = 2; + + return { settings: result, warnings }; + } +} + +/** Singleton instance of V1→V2 migration */ +export const v1ToV2Migration = new V1ToV2Migration(); diff --git a/packages/cli/src/config/migration/versions/v2-to-v3.test.ts b/packages/cli/src/config/migration/versions/v2-to-v3.test.ts new file mode 100644 index 000000000..a1ba9b46d --- /dev/null +++ b/packages/cli/src/config/migration/versions/v2-to-v3.test.ts @@ -0,0 +1,598 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect } from 'vitest'; +import { V2ToV3Migration } from './v2-to-v3.js'; + +describe('V2ToV3Migration', () => { + const migration = new V2ToV3Migration(); + + describe('shouldMigrate', () => { + it('should return true for V2 settings with deprecated disable* keys', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: true }, + }; + + expect(migration.shouldMigrate(v2Settings)).toBe(true); + }); + + it('should return true for V2 settings with ui.accessibility.disableLoadingPhrases', () => { + const v2Settings = { + $version: 2, + ui: { accessibility: { disableLoadingPhrases: false } }, + }; + + expect(migration.shouldMigrate(v2Settings)).toBe(true); + }); + + it('should return false for V3 settings', () => { + const v3Settings = { + $version: 3, + general: { enableAutoUpdate: true }, + }; + + expect(migration.shouldMigrate(v3Settings)).toBe(false); + }); + + it('should return false for V1 settings without version', () => { + const v1Settings = { + theme: 'dark', + disableAutoUpdate: true, + }; + + expect(migration.shouldMigrate(v1Settings)).toBe(false); + }); + + it('should return true for V2 settings without deprecated keys', () => { + const cleanV2Settings = { + $version: 2, + ui: { theme: 'dark' }, + general: { enableAutoUpdate: true }, + }; + + // V2 settings should always be migrated to V3 to update the version number + expect(migration.shouldMigrate(cleanV2Settings)).toBe(true); + }); + + it('should return false for null input', () => { + expect(migration.shouldMigrate(null)).toBe(false); + }); + + it('should return false for non-object input', () => { + expect(migration.shouldMigrate('string')).toBe(false); + expect(migration.shouldMigrate(123)).toBe(false); + }); + }); + + describe('migrate', () => { + it('should migrate disableAutoUpdate to enableAutoUpdate with inverted value', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: true }, + }; + + const { settings: result } = migration.migrate(v2Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBe(false); + expect( + (result['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + }); + + it('should migrate disableLoadingPhrases to enableLoadingPhrases', () => { + const v2Settings = { + $version: 2, + ui: { accessibility: { disableLoadingPhrases: true } }, + }; + + const { settings: result } = migration.migrate(v2Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['ui'] as Record)['accessibility'], + ).toEqual({ + enableLoadingPhrases: false, + }); + }); + + it('should migrate disableFuzzySearch to enableFuzzySearch', () => { + const v2Settings = { + $version: 2, + context: { fileFiltering: { disableFuzzySearch: false } }, + }; + + const { settings: result } = migration.migrate(v2Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['context'] as Record)['fileFiltering'], + ).toEqual({ + enableFuzzySearch: true, + }); + }); + + it('should migrate disableCacheControl to enableCacheControl', () => { + const v2Settings = { + $version: 2, + model: { generationConfig: { disableCacheControl: true } }, + }; + + const { settings: result } = migration.migrate(v2Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['model'] as Record)['generationConfig'], + ).toEqual({ + enableCacheControl: false, + }); + }); + + it('should handle consolidated disableAutoUpdate and disableUpdateNag', () => { + const v2Settings = { + $version: 2, + general: { + disableAutoUpdate: true, + disableUpdateNag: false, + }, + }; + + const { settings: result } = migration.migrate(v2Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(3); + // If ANY disable* is true, enable should be false + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBe(false); + expect( + (result['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + expect( + (result['general'] as Record)['disableUpdateNag'], + ).toBeUndefined(); + }); + + it('should set enableAutoUpdate to true when both disable* are false', () => { + const v2Settings = { + $version: 2, + general: { + disableAutoUpdate: false, + disableUpdateNag: false, + }, + }; + + const { settings: result } = migration.migrate(v2Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBe(true); + }); + + it('should preserve other settings during migration', () => { + const v2Settings = { + $version: 2, + ui: { + theme: 'dark', + accessibility: { disableLoadingPhrases: true }, + }, + model: { + name: 'gemini', + }, + }; + + const { settings: result } = migration.migrate(v2Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(3); + expect((result['ui'] as Record)['theme']).toBe('dark'); + expect((result['model'] as Record)['name']).toBe( + 'gemini', + ); + expect( + (result['ui'] as Record)['accessibility'], + ).toEqual({ + enableLoadingPhrases: false, + }); + }); + + it('should not modify the input object', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: true }, + }; + + const result = migration.migrate(v2Settings, 'user'); + + expect(v2Settings.general).toEqual({ disableAutoUpdate: true }); + expect(result).not.toBe(v2Settings); + }); + + it('should throw error for non-object input', () => { + expect(() => migration.migrate(null, 'user')).toThrow( + 'Settings must be an object', + ); + expect(() => migration.migrate('string', 'user')).toThrow( + 'Settings must be an object', + ); + }); + + it('should handle multiple deprecated keys in one migration', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: false }, + ui: { accessibility: { disableLoadingPhrases: false } }, + context: { fileFiltering: { disableFuzzySearch: false } }, + }; + + const { settings: result } = migration.migrate(v2Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBe(true); + expect( + (result['ui'] as Record)['accessibility'], + ).toEqual({ + enableLoadingPhrases: true, + }); + expect( + (result['context'] as Record)['fileFiltering'], + ).toEqual({ + enableFuzzySearch: true, + }); + }); + + it('should coerce string "true" and remove deprecated key', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: 'true' }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBe(false); + expect(warnings).toHaveLength(0); + }); + + it('should coerce string "false" and remove deprecated key', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: 'false' }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBe(true); + expect(warnings).toHaveLength(0); + }); + + it('should coerce case-insensitive strings for consolidated keys', () => { + const v2Settings = { + $version: 2, + general: { + disableAutoUpdate: 'TRUE', + disableUpdateNag: 'FALSE', + }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + expect( + (result['general'] as Record)['disableUpdateNag'], + ).toBeUndefined(); + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBe(false); + expect(warnings).toHaveLength(0); + }); + + it('should remove number value and emit warning', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: 123 }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBeUndefined(); + expect(warnings).toHaveLength(1); + expect(warnings[0]).toContain('general.disableAutoUpdate'); + }); + + it('should remove invalid string value and emit warning', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: 'invalid-string' }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBeUndefined(); + expect(warnings).toHaveLength(1); + expect(warnings[0]).toContain('general.disableAutoUpdate'); + }); + + it('should coerce disableCacheControl string "true"', () => { + const v2Settings = { + $version: 2, + model: { generationConfig: { disableCacheControl: 'true' } }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['model'] as Record)['generationConfig'], + ).toEqual({ + enableCacheControl: false, + }); + expect(warnings).toHaveLength(0); + }); + + it('should coerce disableCacheControl string "false"', () => { + const v2Settings = { + $version: 2, + model: { generationConfig: { disableCacheControl: 'false' } }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['model'] as Record)['generationConfig'], + ).toEqual({ + enableCacheControl: true, + }); + expect(warnings).toHaveLength(0); + }); + + it('should remove disableCacheControl number value and emit warning', () => { + const v2Settings = { + $version: 2, + model: { generationConfig: { disableCacheControl: 456 } }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['model'] as Record)['generationConfig'], + ).toEqual({}); + expect( + ( + (result['model'] as Record)[ + 'generationConfig' + ] as Record + )['enableCacheControl'], + ).toBeUndefined(); + expect(warnings).toHaveLength(1); + expect(warnings[0]).toContain( + 'model.generationConfig.disableCacheControl', + ); + }); + + it('should handle mixed valid and invalid disableAutoUpdate and disableUpdateNag', () => { + const v2Settings = { + $version: 2, + general: { + disableAutoUpdate: true, + disableUpdateNag: 'invalid', + }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + // Only valid values should contribute to the consolidated result + // Since disableAutoUpdate is true, enableAutoUpdate should be false + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBe(false); + expect( + (result['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + expect( + (result['general'] as Record)['disableUpdateNag'], + ).toBeUndefined(); + expect(warnings).toHaveLength(1); + expect(warnings[0]).toContain('general.disableUpdateNag'); + }); + + it('should remove object value for disable key and emit warning', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: { nested: 'value' } }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBeUndefined(); + expect(warnings).toHaveLength(1); + expect(warnings[0]).toContain('general.disableAutoUpdate'); + }); + + it('should remove array value for disable key and emit warning', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: [1, 2, 3] }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBeUndefined(); + expect(warnings).toHaveLength(1); + expect(warnings[0]).toContain('general.disableAutoUpdate'); + }); + + it('should remove null value for disable key and emit warning', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: null }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBeUndefined(); + expect(warnings).toHaveLength(1); + expect(warnings[0]).toContain('general.disableAutoUpdate'); + }); + }); + + describe('version properties', () => { + it('should have correct fromVersion', () => { + expect(migration.fromVersion).toBe(2); + }); + + it('should have correct toVersion', () => { + expect(migration.toVersion).toBe(3); + }); + }); +}); diff --git a/packages/cli/src/config/migration/versions/v2-to-v3.ts b/packages/cli/src/config/migration/versions/v2-to-v3.ts new file mode 100644 index 000000000..6c0133443 --- /dev/null +++ b/packages/cli/src/config/migration/versions/v2-to-v3.ts @@ -0,0 +1,222 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { SettingsMigration } from '../types.js'; +import { + deleteNestedPropertySafe, + getNestedProperty, + setNestedPropertySafe, +} from '../../../utils/settingsUtils.js'; + +/** + * Path mapping for boolean polarity migration (V2 disable* -> V3 enable*). + * + * Strategy: + * - For each mapped path, values are normalized before migration: + * - boolean values are accepted directly + * - string values "true"/"false" (case-insensitive, trim-aware) are coerced + * - all other present values are treated as invalid + * - Transformation is inversion-based: disable=true -> enable=false, disable=false -> enable=true. + * - Deprecated disable* keys are removed whenever present (valid or invalid). + * - Invalid values do not create enable* keys and produce warnings. + */ +const V2_TO_V3_BOOLEAN_MAP: Record = { + 'general.disableAutoUpdate': 'general.enableAutoUpdate', + 'general.disableUpdateNag': 'general.enableAutoUpdate', + 'ui.accessibility.disableLoadingPhrases': + 'ui.accessibility.enableLoadingPhrases', + 'context.fileFiltering.disableFuzzySearch': + 'context.fileFiltering.enableFuzzySearch', + 'model.generationConfig.disableCacheControl': + 'model.generationConfig.enableCacheControl', +}; + +/** + * Consolidated old paths that collapse into one V3 field. + * + * Current policy: + * - `general.disableAutoUpdate` and `general.disableUpdateNag` both drive + * `general.enableAutoUpdate`. + * - If any valid normalized source is true, target becomes false. + * - If at least one valid normalized source exists, consolidated target is emitted. + * - Invalid present values are removed and warned, and do not contribute to target calculation. + */ +const CONSOLIDATED_V2_PATHS: Record = { + 'general.enableAutoUpdate': [ + 'general.disableAutoUpdate', + 'general.disableUpdateNag', + ], +}; + +/** + * Normalizes deprecated disable* values for migration. + * + * Returns: + * - `isPresent=false` when the path does not exist + * - `isPresent=true, isValid=true` when value is boolean or coercible string + * - `isPresent=true, isValid=false` for invalid values (number/object/array/null/other strings) + */ +function normalizeDisableValue(value: unknown): { + isPresent: boolean; + isValid: boolean; + booleanValue?: boolean; +} { + if (value === undefined) { + return { isPresent: false, isValid: false }; + } + if (typeof value === 'boolean') { + return { isPresent: true, isValid: true, booleanValue: value }; + } + if (typeof value === 'string') { + const normalized = value.trim().toLowerCase(); + if (normalized === 'true') { + return { isPresent: true, isValid: true, booleanValue: true }; + } + if (normalized === 'false') { + return { isPresent: true, isValid: true, booleanValue: false }; + } + } + return { isPresent: true, isValid: false }; +} + +/** + * V2 -> V3 migration (boolean polarity normalization stage). + * + * Migration contract: + * - Input: V2 settings object (`$version: 2`). + * - Output: `$version: 3` with deprecated disable* fields removed and + * valid values migrated to enable* equivalents. + * + * Compatibility strategy: + * - Accept boolean values and coercible strings "true"/"false". + * - Remove invalid deprecated values (rather than preserving them). + * - Emit warnings for each removed invalid deprecated key. + * - Always bump version to 3 so future loads are idempotent and skip repeated checks. + */ +export class V2ToV3Migration implements SettingsMigration { + readonly fromVersion = 2; + readonly toVersion = 3; + + /** + * Migration trigger rule. + * + * Execute only when `$version === 2`. + * This includes V2 files with no migratable disable* booleans so that version + * metadata still advances to 3. + */ + shouldMigrate(settings: unknown): boolean { + if (typeof settings !== 'object' || settings === null) { + return false; + } + + const s = settings as Record; + + // Migrate if $version is 2 + return s['$version'] === 2; + } + + /** + * Applies V2 -> V3 transformation with deterministic deprecated-key cleanup. + * + * Detailed strategy: + * 1) Clone input. + * 2) Process consolidated paths first: + * - Inspect each source path. + * - Normalize each present value (boolean / coercible string / invalid). + * - Always delete present deprecated source key. + * - Valid normalized values contribute to aggregate. + * - Invalid values emit warnings. + * - Emit consolidated target when at least one valid source was consumed. + * 3) Process remaining one-to-one mappings: + * - For each unmapped source, normalize value. + * - If valid -> delete old key and write inverted target. + * - If invalid -> delete old key and emit warning. + * 4) Set `$version = 3`. + * + * Guarantees: + * - Input object is not mutated. + * - Valid migration and invalid cleanup are deterministic. + * - Deprecated disable* keys are not retained after migration. + */ + migrate( + settings: unknown, + scope: string, + ): { settings: unknown; warnings: string[] } { + if (typeof settings !== 'object' || settings === null) { + throw new Error('Settings must be an object'); + } + + // Deep clone to avoid mutating input + const result = structuredClone(settings) as Record; + const processedPaths = new Set(); + const warnings: string[] = []; + + // Step 1: Handle consolidated paths (multiple old paths → single new path) + // Policy: if ANY of the old disable* settings is true, the new enable* should be false + for (const [newPath, oldPaths] of Object.entries(CONSOLIDATED_V2_PATHS)) { + let hasAnyDisable = false; + let hasAnyBooleanValue = false; + + for (const oldPath of oldPaths) { + const oldValue = getNestedProperty(result, oldPath); + const normalized = normalizeDisableValue(oldValue); + if (!normalized.isPresent) { + continue; + } + + deleteNestedPropertySafe(result, oldPath); + processedPaths.add(oldPath); + + if (normalized.isValid) { + hasAnyBooleanValue = true; + if (normalized.booleanValue === true) { + hasAnyDisable = true; + } + } else { + warnings.push( + `Removed deprecated setting '${oldPath}' from ${scope} settings because the value is invalid. Expected boolean.`, + ); + } + } + + if (hasAnyBooleanValue) { + // enableAutoUpdate = !hasAnyDisable (if any disable* was true, enable should be false) + setNestedPropertySafe(result, newPath, !hasAnyDisable); + } + } + + // Step 2: Handle remaining individual disable* → enable* mappings + for (const [oldPath, newPath] of Object.entries(V2_TO_V3_BOOLEAN_MAP)) { + if (processedPaths.has(oldPath)) { + continue; + } + + const oldValue = getNestedProperty(result, oldPath); + const normalized = normalizeDisableValue(oldValue); + if (!normalized.isPresent) { + continue; + } + + deleteNestedPropertySafe(result, oldPath); + if (normalized.isValid) { + // Set new property with inverted value + setNestedPropertySafe(result, newPath, !normalized.booleanValue); + } else { + warnings.push( + `Removed deprecated setting '${oldPath}' from ${scope} settings because the value is invalid. Expected boolean or string "true"/"false".`, + ); + } + } + + // Step 3: Always update version to 3 + result['$version'] = 3; + + return { settings: result, warnings }; + } +} + +/** Singleton instance of V2→V3 migration */ +export const v2ToV3Migration = new V2ToV3Migration(); diff --git a/packages/cli/src/config/settings.test.ts b/packages/cli/src/config/settings.test.ts index 762ad4b10..2234c9ea4 100644 --- a/packages/cli/src/config/settings.test.ts +++ b/packages/cli/src/config/settings.test.ts @@ -18,16 +18,6 @@ vi.mock('os', async (importOriginal) => { }; }); -// Mock './settings.js' to ensure it uses the mocked 'os.homedir()' for its internal constants. -vi.mock('./settings.js', async (importActual) => { - const originalModule = await importActual(); - return { - __esModule: true, // Ensure correct module shape - ...originalModule, // Re-export all original members - // We are relying on originalModule's USER_SETTINGS_PATH being constructed with mocked os.homedir() - }; -}); - // Mock trustedFolders vi.mock('./trustedFolders.js', () => ({ isWorkspaceTrusted: vi @@ -46,7 +36,6 @@ import { afterEach, type Mocked, type Mock, - fail, } from 'vitest'; import * as fs from 'node:fs'; // fs will be mocked separately import stripJsonComments from 'strip-json-comments'; // Will be mocked separately @@ -60,13 +49,12 @@ import { getSystemSettingsPath, getSystemDefaultsPath, SETTINGS_DIRECTORY_NAME, // This is from the original module, but used by the mock. - migrateSettingsToV1, - needsMigration, type Settings, loadEnvironment, SETTINGS_VERSION, SETTINGS_VERSION_KEY, } from './settings.js'; +import { needsMigration } from './migration/index.js'; import { FatalConfigError, QWEN_DIR } from '@qwen-code/qwen-code-core'; const MOCK_WORKSPACE_DIR = '/mock/workspace'; @@ -84,6 +72,23 @@ type TestSettings = Settings & { nestedObj?: { [key: string]: unknown }; }; +vi.mock('node:fs', async (importOriginal) => { + // Get all the functions from the real 'fs' module + const actualFs = await importOriginal(); + + return { + ...actualFs, // Keep all the real functions + // Now, just override the ones we need for the test + existsSync: vi.fn(), + readFileSync: vi.fn(), + writeFileSync: vi.fn(), + renameSync: vi.fn(), + mkdirSync: vi.fn(), + realpathSync: (p: string) => p, + }; +}); + +// Also mock 'fs' for compatibility vi.mock('fs', async (importOriginal) => { // Get all the functions from the real 'fs' module const actualFs = await importOriginal(); @@ -588,19 +593,22 @@ describe('Settings Loading and Merging', () => { loadSettings(MOCK_WORKSPACE_DIR); - // Verify that fs.writeFileSync was called (to add version) - // but NOT fs.renameSync (no backup needed, just adding version) - expect(fs.renameSync).not.toHaveBeenCalled(); - expect(fs.writeFileSync).toHaveBeenCalledTimes(1); - - const writeCall = (fs.writeFileSync as Mock).mock.calls[0]; - const writtenPath = writeCall[0]; + // Version normalization now uses writeWithBackupSync (temp write + rename) + // Verify that writeFileSync was called with the temp file path + const writeCall = (fs.writeFileSync as Mock).mock.calls.find( + (call: unknown[]) => call[0] === `${USER_SETTINGS_PATH}.tmp`, + ); + expect(writeCall).toBeDefined(); + if (!writeCall) { + throw new Error('Expected temp write call for version normalization'); + } const writtenContent = JSON.parse(writeCall[1] as string); - expect(writtenPath).toBe(USER_SETTINGS_PATH); expect(writtenContent[SETTINGS_VERSION_KEY]).toBe(SETTINGS_VERSION); expect(writtenContent.ui?.theme).toBe('dark'); expect(writtenContent.model?.name).toBe('qwen-coder'); + // Verify writeWithBackupSync was called by checking temp file write + expect(fs.writeFileSync).toHaveBeenCalled(); }); it('should correctly handle partially migrated settings without version field', () => { @@ -728,14 +736,85 @@ describe('Settings Loading and Merging', () => { loadSettings(MOCK_WORKSPACE_DIR); // Version should be bumped to 3 even though no keys needed migration + // writeWithBackupSync writes to a temp file first, then renames const writeCall = (fs.writeFileSync as Mock).mock.calls.find( - (call: unknown[]) => call[0] === USER_SETTINGS_PATH, + (call: unknown[]) => call[0] === `${USER_SETTINGS_PATH}.tmp`, ); expect(writeCall).toBeDefined(); + if (!writeCall) { + throw new Error('Expected temp write call for V2->V3 version bump'); + } const writtenContent = JSON.parse(writeCall[1] as string); expect(writtenContent.$version).toBe(SETTINGS_VERSION); }); + it('should normalize invalid version metadata when no migration is applicable', () => { + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === USER_SETTINGS_PATH, + ); + const invalidVersionSettings = { + $version: 'invalid-version', + general: { + enableAutoUpdate: true, + }, + }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(invalidVersionSettings); + return '{}'; + }, + ); + + loadSettings(MOCK_WORKSPACE_DIR); + + const writeCall = (fs.writeFileSync as Mock).mock.calls.find( + (call: unknown[]) => call[0] === `${USER_SETTINGS_PATH}.tmp`, + ); + expect(writeCall).toBeDefined(); + if (!writeCall) { + throw new Error( + 'Expected temp write call for invalid version normalization', + ); + } + const writtenContent = JSON.parse(writeCall[1] as string); + expect(writtenContent.$version).toBe(SETTINGS_VERSION); + expect(writtenContent.general?.enableAutoUpdate).toBe(true); + }); + + it('should normalize legacy numeric version when no migration can execute', () => { + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === USER_SETTINGS_PATH, + ); + const staleVersionSettings = { + $version: 1, + // No V1/V2 indicators recognized by migrations + customOnlyKey: 'value', + }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(staleVersionSettings); + return '{}'; + }, + ); + + loadSettings(MOCK_WORKSPACE_DIR); + + const writeCall = (fs.writeFileSync as Mock).mock.calls.find( + (call: unknown[]) => call[0] === `${USER_SETTINGS_PATH}.tmp`, + ); + expect(writeCall).toBeDefined(); + if (!writeCall) { + throw new Error( + 'Expected temp write call for stale version normalization', + ); + } + const writtenContent = JSON.parse(writeCall[1] as string); + expect(writtenContent.$version).toBe(SETTINGS_VERSION); + expect(writtenContent.customOnlyKey).toBe('value'); + }); + it('should correctly merge and migrate legacy array properties from multiple scopes', () => { (mockFsExistsSync as Mock).mockReturnValue(true); const legacyUserSettings = { @@ -1613,7 +1692,7 @@ describe('Settings Loading and Merging', () => { try { loadSettings(MOCK_WORKSPACE_DIR); - fail('loadSettings should have thrown a FatalConfigError'); + throw new Error('loadSettings should have thrown a FatalConfigError'); } catch (e) { expect(e).toBeInstanceOf(FatalConfigError); const error = e as FatalConfigError; @@ -2255,385 +2334,6 @@ describe('Settings Loading and Merging', () => { }); }); - describe('migrateSettingsToV1', () => { - it('should handle an empty object', () => { - const v2Settings = {}; - const v1Settings = migrateSettingsToV1(v2Settings); - expect(v1Settings).toEqual({}); - }); - - it('should migrate a simple v2 settings object to v1', () => { - const v2Settings = { - general: { - preferredEditor: 'vscode', - vimMode: true, - }, - ui: { - theme: 'dark', - }, - }; - const v1Settings = migrateSettingsToV1(v2Settings); - expect(v1Settings).toEqual({ - preferredEditor: 'vscode', - vimMode: true, - theme: 'dark', - }); - }); - - it('should handle nested properties correctly', () => { - const v2Settings = { - security: { - folderTrust: { - enabled: true, - }, - auth: { - selectedType: 'oauth', - }, - }, - advanced: { - autoConfigureMemory: true, - }, - }; - const v1Settings = migrateSettingsToV1(v2Settings); - expect(v1Settings).toEqual({ - folderTrust: true, - selectedAuthType: 'oauth', - autoConfigureMaxOldSpaceSize: true, - }); - }); - - it('should preserve mcpServers at the top level', () => { - const v2Settings = { - general: { - preferredEditor: 'vscode', - }, - mcpServers: { - 'my-server': { - command: 'npm start', - }, - }, - }; - const v1Settings = migrateSettingsToV1(v2Settings); - expect(v1Settings).toEqual({ - preferredEditor: 'vscode', - mcpServers: { - 'my-server': { - command: 'npm start', - }, - }, - }); - }); - - it('should carry over unrecognized top-level properties', () => { - const v2Settings = { - general: { - vimMode: false, - }, - unrecognized: 'value', - another: { - nested: true, - }, - }; - const v1Settings = migrateSettingsToV1(v2Settings); - expect(v1Settings).toEqual({ - vimMode: false, - unrecognized: 'value', - another: { - nested: true, - }, - }); - }); - - it('should handle a complex object with mixed properties', () => { - const v2Settings = { - general: { - disableAutoUpdate: true, - }, - ui: { - hideTips: true, - customThemes: { - myTheme: {}, - }, - }, - model: { - name: 'gemini-pro', - chatCompression: { - contextPercentageThreshold: 0.5, - }, - }, - mcpServers: { - 'server-1': { - command: 'node server.js', - }, - }, - unrecognized: { - should: 'be-preserved', - }, - }; - const v1Settings = migrateSettingsToV1(v2Settings); - expect(v1Settings).toEqual({ - disableAutoUpdate: true, - hideTips: true, - customThemes: { - myTheme: {}, - }, - model: 'gemini-pro', - chatCompression: { - contextPercentageThreshold: 0.5, - }, - mcpServers: { - 'server-1': { - command: 'node server.js', - }, - }, - unrecognized: { - should: 'be-preserved', - }, - }); - }); - - it('should not migrate a v1 settings object', () => { - const v1Settings = { - preferredEditor: 'vscode', - vimMode: true, - theme: 'dark', - }; - const migratedSettings = migrateSettingsToV1(v1Settings); - expect(migratedSettings).toEqual({ - preferredEditor: 'vscode', - vimMode: true, - theme: 'dark', - }); - }); - - it('should migrate a full v2 settings object to v1', () => { - const v2Settings: TestSettings = { - general: { - preferredEditor: 'code', - vimMode: true, - }, - ui: { - theme: 'dark', - }, - privacy: { - usageStatisticsEnabled: false, - }, - model: { - name: 'gemini-pro', - chatCompression: { - contextPercentageThreshold: 0.8, - }, - }, - context: { - fileName: 'CONTEXT.md', - includeDirectories: ['/src'], - }, - tools: { - sandbox: true, - exclude: ['toolA'], - }, - mcp: { - allowed: ['server1'], - }, - security: { - folderTrust: { - enabled: true, - }, - }, - advanced: { - dnsResolutionOrder: 'ipv4first', - excludedEnvVars: ['SECRET'], - }, - mcpServers: { - 'my-server': { - command: 'npm start', - }, - }, - unrecognizedTopLevel: { - value: 'should be preserved', - }, - }; - - const v1Settings = migrateSettingsToV1(v2Settings); - - expect(v1Settings).toEqual({ - preferredEditor: 'code', - vimMode: true, - theme: 'dark', - usageStatisticsEnabled: false, - model: 'gemini-pro', - chatCompression: { - contextPercentageThreshold: 0.8, - }, - contextFileName: 'CONTEXT.md', - includeDirectories: ['/src'], - sandbox: true, - excludeTools: ['toolA'], - allowMCPServers: ['server1'], - folderTrust: true, - dnsResolutionOrder: 'ipv4first', - excludedProjectEnvVars: ['SECRET'], - mcpServers: { - 'my-server': { - command: 'npm start', - }, - }, - unrecognizedTopLevel: { - value: 'should be preserved', - }, - }); - }); - - it('should handle partial v2 settings', () => { - const v2Settings: TestSettings = { - general: { - vimMode: false, - }, - ui: {}, - model: { - name: 'gemini-1.5-pro', - }, - unrecognized: 'value', - }; - - const v1Settings = migrateSettingsToV1(v2Settings); - - expect(v1Settings).toEqual({ - vimMode: false, - model: 'gemini-1.5-pro', - unrecognized: 'value', - }); - }); - - it('should handle settings with different data types', () => { - const v2Settings: TestSettings = { - general: { - vimMode: false, - }, - model: { - maxSessionTurns: -1, - }, - context: { - includeDirectories: [], - }, - security: { - folderTrust: { - enabled: false, - }, - }, - }; - - const v1Settings = migrateSettingsToV1(v2Settings); - - expect(v1Settings).toEqual({ - vimMode: false, - maxSessionTurns: -1, - includeDirectories: [], - folderTrust: false, - }); - }); - - it('should preserve unrecognized top-level keys', () => { - const v2Settings: TestSettings = { - general: { - vimMode: true, - }, - customTopLevel: { - a: 1, - b: [2], - }, - anotherOne: 'hello', - }; - - const v1Settings = migrateSettingsToV1(v2Settings); - - expect(v1Settings).toEqual({ - vimMode: true, - customTopLevel: { - a: 1, - b: [2], - }, - anotherOne: 'hello', - }); - }); - - it('should handle an empty v2 settings object', () => { - const v2Settings = {}; - const v1Settings = migrateSettingsToV1(v2Settings); - expect(v1Settings).toEqual({}); - }); - - it('should correctly handle mcpServers at the top level', () => { - const v2Settings: TestSettings = { - mcpServers: { - serverA: { command: 'a' }, - }, - mcp: { - allowed: ['serverA'], - }, - }; - - const v1Settings = migrateSettingsToV1(v2Settings); - - expect(v1Settings).toEqual({ - mcpServers: { - serverA: { command: 'a' }, - }, - allowMCPServers: ['serverA'], - }); - }); - - it('should correctly migrate customWittyPhrases', () => { - const v2Settings: Partial = { - ui: { - customWittyPhrases: ['test phrase'], - }, - }; - const v1Settings = migrateSettingsToV1(v2Settings as Settings); - expect(v1Settings).toEqual({ - customWittyPhrases: ['test phrase'], - }); - }); - - it('should remove version field when migrating to V1', () => { - const v2Settings = { - [SETTINGS_VERSION_KEY]: SETTINGS_VERSION, - ui: { - theme: 'dark', - }, - model: { - name: 'qwen-coder', - }, - }; - const v1Settings = migrateSettingsToV1(v2Settings); - - // Version field should not be present in V1 settings - expect(v1Settings[SETTINGS_VERSION_KEY]).toBeUndefined(); - // Other fields should be properly migrated - expect(v1Settings).toEqual({ - theme: 'dark', - model: 'qwen-coder', - }); - }); - - it('should handle version field in unrecognized properties', () => { - const v2Settings = { - [SETTINGS_VERSION_KEY]: SETTINGS_VERSION, - general: { - vimMode: true, - }, - someUnrecognizedKey: 'value', - }; - const v1Settings = migrateSettingsToV1(v2Settings); - - // Version field should be filtered out - expect(v1Settings[SETTINGS_VERSION_KEY]).toBeUndefined(); - // Unrecognized keys should be preserved - expect(v1Settings['someUnrecognizedKey']).toBe('value'); - expect(v1Settings['vimMode']).toBe(true); - }); - }); - describe('loadEnvironment', () => { function setup({ isFolderTrustEnabled = true, diff --git a/packages/cli/src/config/settings.ts b/packages/cli/src/config/settings.ts index 434990508..831c3c2c6 100644 --- a/packages/cli/src/config/settings.ts +++ b/packages/cli/src/config/settings.ts @@ -14,6 +14,8 @@ import { QWEN_DIR, getErrorMessage, Storage, + setDebugLogSession, + sanitizeCwd, createDebugLogger, } from '@qwen-code/qwen-code-core'; import stripJsonComments from 'strip-json-comments'; @@ -29,9 +31,16 @@ import { getSettingsSchema, } from './settingsSchema.js'; import { resolveEnvVarsInObject } from '../utils/envVarResolver.js'; -import { customDeepMerge, type MergeableObject } from '../utils/deepMerge.js'; +import { setNestedPropertySafe } from '../utils/settingsUtils.js'; +import { customDeepMerge } from '../utils/deepMerge.js'; import { updateSettingsFilePreservingFormat } from '../utils/commentJson.js'; -import { writeStderrLine } from '../utils/stdioHelpers.js'; +const debugLogger = createDebugLogger('SETTINGS'); +import { runMigrations, needsMigration } from './migration/index.js'; +import { + V1_TO_V2_MIGRATION_MAP, + V2_CONTAINER_KEYS, +} from './migration/versions/v1-to-v2-shared.js'; +import { writeWithBackupSync } from '../utils/writeWithBackup.js'; const debugLogger = createDebugLogger('SETTINGS'); @@ -57,113 +66,10 @@ export const USER_SETTINGS_PATH = Storage.getGlobalSettingsPath(); export const USER_SETTINGS_DIR = path.dirname(USER_SETTINGS_PATH); export const DEFAULT_EXCLUDED_ENV_VARS = ['DEBUG', 'DEBUG_MODE']; -const MIGRATE_V2_OVERWRITE = true; - // Settings version to track migration state export const SETTINGS_VERSION = 3; export const SETTINGS_VERSION_KEY = '$version'; -const MIGRATION_MAP: Record = { - accessibility: 'ui.accessibility', - allowedTools: 'tools.allowed', - allowMCPServers: 'mcp.allowed', - autoAccept: 'tools.autoAccept', - autoConfigureMaxOldSpaceSize: 'advanced.autoConfigureMemory', - bugCommand: 'advanced.bugCommand', - chatCompression: 'model.chatCompression', - checkpointing: 'general.checkpointing', - coreTools: 'tools.core', - contextFileName: 'context.fileName', - customThemes: 'ui.customThemes', - customWittyPhrases: 'ui.customWittyPhrases', - debugKeystrokeLogging: 'general.debugKeystrokeLogging', - dnsResolutionOrder: 'advanced.dnsResolutionOrder', - enforcedAuthType: 'security.auth.enforcedType', - excludeTools: 'tools.exclude', - excludeMCPServers: 'mcp.excluded', - excludedProjectEnvVars: 'advanced.excludedEnvVars', - extensions: 'extensions', - fileFiltering: 'context.fileFiltering', - folderTrustFeature: 'security.folderTrust.featureEnabled', - folderTrust: 'security.folderTrust.enabled', - hasSeenIdeIntegrationNudge: 'ide.hasSeenNudge', - hideWindowTitle: 'ui.hideWindowTitle', - showStatusInTitle: 'ui.showStatusInTitle', - hideTips: 'ui.hideTips', - showLineNumbers: 'ui.showLineNumbers', - showCitations: 'ui.showCitations', - ideMode: 'ide.enabled', - includeDirectories: 'context.includeDirectories', - loadMemoryFromIncludeDirectories: 'context.loadFromIncludeDirectories', - maxSessionTurns: 'model.maxSessionTurns', - mcpServers: 'mcpServers', - mcpServerCommand: 'mcp.serverCommand', - memoryImportFormat: 'context.importFormat', - model: 'model.name', - preferredEditor: 'general.preferredEditor', - sandbox: 'tools.sandbox', - selectedAuthType: 'security.auth.selectedType', - shouldUseNodePtyShell: 'tools.shell.enableInteractiveShell', - shellPager: 'tools.shell.pager', - shellShowColor: 'tools.shell.showColor', - skipNextSpeakerCheck: 'model.skipNextSpeakerCheck', - summarizeToolOutput: 'model.summarizeToolOutput', - telemetry: 'telemetry', - theme: 'ui.theme', - toolDiscoveryCommand: 'tools.discoveryCommand', - toolCallCommand: 'tools.callCommand', - usageStatisticsEnabled: 'privacy.usageStatisticsEnabled', - useExternalAuth: 'security.auth.useExternal', - useRipgrep: 'tools.useRipgrep', - vimMode: 'general.vimMode', - - enableWelcomeBack: 'ui.enableWelcomeBack', - approvalMode: 'tools.approvalMode', - sessionTokenLimit: 'model.sessionTokenLimit', - contentGenerator: 'model.generationConfig', - skipLoopDetection: 'model.skipLoopDetection', - skipStartupContext: 'model.skipStartupContext', - enableOpenAILogging: 'model.enableOpenAILogging', - tavilyApiKey: 'advanced.tavilyApiKey', -}; - -// Settings that need boolean inversion during migration (V1 -> V3) -// Old negative naming -> new positive naming with inverted value -const INVERTED_BOOLEAN_MIGRATIONS: Record = { - disableAutoUpdate: 'general.enableAutoUpdate', - disableUpdateNag: 'general.enableAutoUpdate', - disableLoadingPhrases: 'ui.accessibility.enableLoadingPhrases', - disableFuzzySearch: 'context.fileFiltering.enableFuzzySearch', - disableCacheControl: 'model.generationConfig.enableCacheControl', -}; - -// Consolidated settings: multiple old V1 keys that map to a single new key. -// Policy: if ANY of the old disable* settings is true, the new enable* should be false. -const CONSOLIDATED_SETTINGS: Record = { - 'general.enableAutoUpdate': ['disableAutoUpdate', 'disableUpdateNag'], -}; - -// V2 nested paths that need inversion when migrating to V3 -const INVERTED_V2_PATHS: Record = { - 'general.disableAutoUpdate': 'general.enableAutoUpdate', - 'general.disableUpdateNag': 'general.enableAutoUpdate', - 'ui.accessibility.disableLoadingPhrases': - 'ui.accessibility.enableLoadingPhrases', - 'context.fileFiltering.disableFuzzySearch': - 'context.fileFiltering.enableFuzzySearch', - 'model.generationConfig.disableCacheControl': - 'model.generationConfig.enableCacheControl', -}; - -// Consolidated V2 paths: multiple old paths that map to a single new path. -// Policy: if ANY of the old disable* settings is true, the new enable* should be false. -const CONSOLIDATED_V2_PATHS: Record = { - 'general.enableAutoUpdate': [ - 'general.disableAutoUpdate', - 'general.disableUpdateNag', - ], -}; - export function getSystemSettingsPath(): string { if (process.env['QWEN_CODE_SYSTEM_SETTINGS_PATH']) { return process.env['QWEN_CODE_SYSTEM_SETTINGS_PATH']; @@ -221,312 +127,6 @@ export interface SettingsFile { rawJson?: string; } -function setNestedProperty( - obj: Record, - path: string, - value: unknown, -) { - const keys = path.split('.'); - const lastKey = keys.pop(); - if (!lastKey) return; - - let current: Record = obj; - for (const key of keys) { - if (current[key] === undefined) { - current[key] = {}; - } - const next = current[key]; - if (typeof next === 'object' && next !== null) { - current = next as Record; - } else { - // This path is invalid, so we stop. - return; - } - } - current[lastKey] = value; -} - -// Dynamically determine the top-level keys from the V2 settings structure. -const KNOWN_V2_CONTAINERS = new Set([ - ...Object.values(MIGRATION_MAP).map((path) => path.split('.')[0]), - ...Object.values(INVERTED_BOOLEAN_MIGRATIONS).map( - (path) => path.split('.')[0], - ), -]); - -export function needsMigration(settings: Record): boolean { - // Check version field first - if present and matches current version, no migration needed - if (SETTINGS_VERSION_KEY in settings) { - const version = settings[SETTINGS_VERSION_KEY]; - if (typeof version === 'number' && version >= SETTINGS_VERSION) { - return false; - } - } - - // Fallback to legacy detection: A file needs migration if it contains any - // top-level key that is moved to a nested location in V2. - const hasV1Keys = Object.entries(MIGRATION_MAP).some(([v1Key, v2Path]) => { - if (v1Key === v2Path || !(v1Key in settings)) { - return false; - } - // If a key exists that is both a V1 key and a V2 container (like 'model'), - // we need to check the type. If it's an object, it's a V2 container and not - // a V1 key that needs migration. - if ( - KNOWN_V2_CONTAINERS.has(v1Key) && - typeof settings[v1Key] === 'object' && - settings[v1Key] !== null - ) { - return false; - } - return true; - }); - - // Also check for old inverted boolean keys (disable* -> enable*) - const hasInvertedBooleanKeys = Object.keys(INVERTED_BOOLEAN_MIGRATIONS).some( - (v1Key) => v1Key in settings, - ); - - return hasV1Keys || hasInvertedBooleanKeys; -} - -/** - * Migrates V1 (flat) settings directly to V3. - * This includes both structural migration (flat -> nested) and boolean - * inversion (disable* -> enable*), so migrateV2ToV3 will be skipped. - */ -function migrateV1ToV3( - flatSettings: Record, -): Record | null { - if (!needsMigration(flatSettings)) { - return null; - } - - const v2Settings: Record = {}; - const flatKeys = new Set(Object.keys(flatSettings)); - - for (const [oldKey, newPath] of Object.entries(MIGRATION_MAP)) { - if (flatKeys.has(oldKey)) { - // Safety check: If this key is a V2 container (like 'model') and it's - // already an object, it's likely already in V2 format. Skip migration - // to prevent double-nesting (e.g., model.name.name). - if ( - KNOWN_V2_CONTAINERS.has(oldKey) && - typeof flatSettings[oldKey] === 'object' && - flatSettings[oldKey] !== null && - !Array.isArray(flatSettings[oldKey]) - ) { - // This is already a V2 container, carry it over as-is - v2Settings[oldKey] = flatSettings[oldKey]; - flatKeys.delete(oldKey); - continue; - } - - setNestedProperty(v2Settings, newPath, flatSettings[oldKey]); - flatKeys.delete(oldKey); - } - } - - // Handle consolidated settings first (multiple old keys -> single new key) - // Policy: if ANY of the old disable* settings is true, the new enable* should be false - for (const [newPath, oldKeys] of Object.entries(CONSOLIDATED_SETTINGS)) { - let hasAnyDisable = false; - let hasAnyValue = false; - for (const oldKey of oldKeys) { - if (flatKeys.has(oldKey)) { - hasAnyValue = true; - const oldValue = flatSettings[oldKey]; - if (typeof oldValue === 'boolean' && oldValue === true) { - hasAnyDisable = true; - } - flatKeys.delete(oldKey); - } - } - if (hasAnyValue) { - // enableAutoUpdate = !hasAnyDisable (if any disable* was true, enable should be false) - setNestedProperty(v2Settings, newPath, !hasAnyDisable); - } - } - - // Handle remaining V1 settings that need boolean inversion (disable* -> enable*) - // Skip keys that were already handled by consolidated settings - const consolidatedKeys = new Set(Object.values(CONSOLIDATED_SETTINGS).flat()); - for (const [oldKey, newPath] of Object.entries(INVERTED_BOOLEAN_MIGRATIONS)) { - if (consolidatedKeys.has(oldKey)) { - continue; - } - if (flatKeys.has(oldKey)) { - const oldValue = flatSettings[oldKey]; - if (typeof oldValue === 'boolean') { - setNestedProperty(v2Settings, newPath, !oldValue); - } - flatKeys.delete(oldKey); - } - } - - // Preserve mcpServers at the top level - if (flatSettings['mcpServers']) { - v2Settings['mcpServers'] = flatSettings['mcpServers']; - flatKeys.delete('mcpServers'); - } - - // Carry over any unrecognized keys - for (const remainingKey of flatKeys) { - const existingValue = v2Settings[remainingKey]; - const newValue = flatSettings[remainingKey]; - - if ( - typeof existingValue === 'object' && - existingValue !== null && - !Array.isArray(existingValue) && - typeof newValue === 'object' && - newValue !== null && - !Array.isArray(newValue) - ) { - const pathAwareGetStrategy = (path: string[]) => - getMergeStrategyForPath([remainingKey, ...path]); - v2Settings[remainingKey] = customDeepMerge( - pathAwareGetStrategy, - {}, - newValue as MergeableObject, - existingValue as MergeableObject, - ); - } else { - v2Settings[remainingKey] = newValue; - } - } - - // Set version field to indicate this is a V2 settings file - v2Settings[SETTINGS_VERSION_KEY] = SETTINGS_VERSION; - - return v2Settings; -} - -// Migrate V2 settings to V3 (invert disable* -> enable* booleans) -function migrateV2ToV3( - settings: Record, -): Record | null { - const version = settings[SETTINGS_VERSION_KEY]; - if (typeof version === 'number' && version >= 3) { - return null; - } - - let changed = false; - const result = structuredClone(settings); - const processedPaths = new Set(); - - // Handle consolidated V2 paths first (multiple old paths -> single new path) - // Policy: if ANY of the old disable* settings is true, the new enable* should be false - for (const [newPath, oldPaths] of Object.entries(CONSOLIDATED_V2_PATHS)) { - let hasAnyDisable = false; - let hasAnyValue = false; - for (const oldPath of oldPaths) { - const oldValue = getNestedProperty(result, oldPath); - if (typeof oldValue === 'boolean') { - hasAnyValue = true; - if (oldValue === true) { - hasAnyDisable = true; - } - deleteNestedProperty(result, oldPath); - processedPaths.add(oldPath); - changed = true; - } - } - if (hasAnyValue) { - // enableAutoUpdate = !hasAnyDisable (if any disable* was true, enable should be false) - setNestedProperty(result, newPath, !hasAnyDisable); - } - } - - // Handle remaining V2 paths that need inversion - for (const [oldPath, newPath] of Object.entries(INVERTED_V2_PATHS)) { - if (processedPaths.has(oldPath)) { - continue; - } - const oldValue = getNestedProperty(result, oldPath); - if (typeof oldValue === 'boolean') { - // Remove old property - deleteNestedProperty(result, oldPath); - // Set new property with inverted value - setNestedProperty(result, newPath, !oldValue); - changed = true; - } - } - - if (changed) { - result[SETTINGS_VERSION_KEY] = SETTINGS_VERSION; - return result; - } - - // Even if no changes, bump version to 3 to skip future migration checks - if (typeof version === 'number' && version < SETTINGS_VERSION) { - result[SETTINGS_VERSION_KEY] = SETTINGS_VERSION; - return result; - } - - return null; -} - -function deleteNestedProperty( - obj: Record, - path: string, -): void { - const keys = path.split('.'); - const lastKey = keys.pop(); - if (!lastKey) return; - - let current: Record = obj; - for (const key of keys) { - const next = current[key]; - if (typeof next !== 'object' || next === null) { - return; - } - current = next as Record; - } - delete current[lastKey]; -} - -function getNestedProperty( - obj: Record, - path: string, -): unknown { - const keys = path.split('.'); - let current: unknown = obj; - for (const key of keys) { - if (typeof current !== 'object' || current === null || !(key in current)) { - return undefined; - } - current = (current as Record)[key]; - } - return current; -} - -const REVERSE_MIGRATION_MAP: Record = Object.fromEntries( - Object.entries(MIGRATION_MAP).map(([key, value]) => [value, key]), -); - -// Reverse map for old V2 paths (before rename) to V1 keys. -// Used when migrating settings that still have old V2 naming (e.g., general.disableAutoUpdate). -const OLD_V2_TO_V1_MAP: Record = {}; -for (const [oldV2Path, newV3Path] of Object.entries(INVERTED_V2_PATHS)) { - // Find the V1 key that maps to this V3 path - for (const [v1Key, v3Path] of Object.entries(INVERTED_BOOLEAN_MIGRATIONS)) { - if (v3Path === newV3Path) { - OLD_V2_TO_V1_MAP[oldV2Path] = v1Key; - break; - } - } -} - -// Reverse map for new V3 paths to V1 keys (with boolean inversion). -// Used when migrating settings that have new V3 naming (e.g., general.enableAutoUpdate). -const V3_TO_V1_INVERTED_MAP: Record = Object.fromEntries( - Object.entries(INVERTED_BOOLEAN_MIGRATIONS).map(([v1Key, v3Path]) => [ - v3Path, - v1Key, - ]), -); - function getSettingsFileKeyWarnings( settings: Record, settingsFilePath: string, @@ -540,7 +140,7 @@ function getSettingsFileKeyWarnings( const ignoredLegacyKeys = new Set(); // Ignored legacy keys (V1 top-level keys that moved to a nested V2 path). - for (const [oldKey, newPath] of Object.entries(MIGRATION_MAP)) { + for (const [oldKey, newPath] of Object.entries(V1_TO_V2_MIGRATION_MAP)) { if (oldKey === newPath) { continue; } @@ -553,7 +153,7 @@ function getSettingsFileKeyWarnings( // If this key is a V2 container (like 'model') and it's already an object, // it's likely already in V2 format. Don't warn. if ( - KNOWN_V2_CONTAINERS.has(oldKey) && + V2_CONTAINER_KEYS.has(oldKey) && typeof oldValue === 'object' && oldValue !== null && !Array.isArray(oldValue) @@ -589,7 +189,8 @@ function getSettingsFileKeyWarnings( } /** - * Collects warnings for ignored legacy and unknown settings keys. + * Collects warnings for ignored legacy and unknown settings keys, + * as well as migration warnings. * * For `$version: 2` settings files, we do not apply implicit migrations. * Instead, we surface actionable, de-duplicated warnings in the terminal UI. @@ -597,6 +198,11 @@ function getSettingsFileKeyWarnings( export function getSettingsWarnings(loadedSettings: LoadedSettings): string[] { const warningSet = new Set(); + // Add migration warnings first + for (const warning of loadedSettings.migrationWarnings) { + warningSet.add(`Warning: ${warning}`); + } + for (const scope of [SettingScope.User, SettingScope.Workspace]) { const settingsFile = loadedSettings.forScope(scope); if (settingsFile.rawJson === undefined) { @@ -619,75 +225,6 @@ export function getSettingsWarnings(loadedSettings: LoadedSettings): string[] { return [...warningSet]; } -export function migrateSettingsToV1( - v2Settings: Record, -): Record { - const v1Settings: Record = {}; - const v2Keys = new Set(Object.keys(v2Settings)); - - for (const [newPath, oldKey] of Object.entries(REVERSE_MIGRATION_MAP)) { - const value = getNestedProperty(v2Settings, newPath); - if (value !== undefined) { - v1Settings[oldKey] = value; - v2Keys.delete(newPath.split('.')[0]); - } - } - - // Handle old V2 inverted paths (no value inversion needed) - // e.g., general.disableAutoUpdate -> disableAutoUpdate - for (const [oldV2Path, v1Key] of Object.entries(OLD_V2_TO_V1_MAP)) { - const value = getNestedProperty(v2Settings, oldV2Path); - if (value !== undefined) { - v1Settings[v1Key] = value; - v2Keys.delete(oldV2Path.split('.')[0]); - } - } - - // Handle new V3 inverted paths (WITH value inversion) - // e.g., general.enableAutoUpdate -> disableAutoUpdate (inverted) - for (const [v3Path, v1Key] of Object.entries(V3_TO_V1_INVERTED_MAP)) { - const value = getNestedProperty(v2Settings, v3Path); - if (value !== undefined && typeof value === 'boolean') { - v1Settings[v1Key] = !value; - v2Keys.delete(v3Path.split('.')[0]); - } - } - - // Preserve mcpServers at the top level - if (v2Settings['mcpServers']) { - v1Settings['mcpServers'] = v2Settings['mcpServers']; - v2Keys.delete('mcpServers'); - } - - // Carry over any unrecognized keys - for (const remainingKey of v2Keys) { - // Skip the version field - it's only for V2 format - if (remainingKey === SETTINGS_VERSION_KEY) { - continue; - } - - const value = v2Settings[remainingKey]; - if (value === undefined) { - continue; - } - - // Don't carry over empty objects that were just containers for migrated settings. - if ( - KNOWN_V2_CONTAINERS.has(remainingKey) && - typeof value === 'object' && - value !== null && - !Array.isArray(value) && - Object.keys(value).length === 0 - ) { - continue; - } - - v1Settings[remainingKey] = value; - } - - return v1Settings; -} - function mergeSettings( system: Settings, systemDefaults: Settings, @@ -721,6 +258,7 @@ export class LoadedSettings { workspace: SettingsFile, isTrusted: boolean, migratedInMemorScopes: Set, + migrationWarnings: string[] = [], ) { this.system = system; this.systemDefaults = systemDefaults; @@ -728,6 +266,7 @@ export class LoadedSettings { this.workspace = workspace; this.isTrusted = isTrusted; this.migratedInMemorScopes = migratedInMemorScopes; + this.migrationWarnings = migrationWarnings; this._merged = this.computeMergedSettings(); } @@ -737,6 +276,7 @@ export class LoadedSettings { readonly workspace: SettingsFile; readonly isTrusted: boolean; readonly migratedInMemorScopes: Set; + readonly migrationWarnings: string[]; private _merged: Settings; @@ -771,8 +311,8 @@ export class LoadedSettings { setValue(scope: SettingScope, key: string, value: unknown): void { const settingsFile = this.forScope(scope); - setNestedProperty(settingsFile.settings, key, value); - setNestedProperty(settingsFile.originalSettings, key, value); + setNestedPropertySafe(settingsFile.settings, key, value); + setNestedPropertySafe(settingsFile.originalSettings, key, value); this._merged = this.computeMergedSettings(); saveSettings(settingsFile); } @@ -796,6 +336,7 @@ export function createMinimalSettings(): LoadedSettings { emptySettingsFile, false, new Set(), + [], ); } @@ -936,6 +477,16 @@ export function loadEnvironment(settings: Settings): void { export function loadSettings( workspaceDir: string = process.cwd(), ): LoadedSettings { + // Set up a temporary debug log session for the startup phase. + // This allows migration errors to be logged to file instead of being + // exposed to users via stderr. The Config class will override this + // with the actual session once initialized. + const resolvedWorkspaceDir = path.resolve(workspaceDir); + const sanitizedProjectId = sanitizeCwd(resolvedWorkspaceDir); + setDebugLogSession({ + getSessionId: () => `startup-${sanitizedProjectId}`, + }); + let systemSettings: Settings = {}; let systemDefaultSettings: Settings = {}; let userSettings: Settings = {}; @@ -946,7 +497,7 @@ export function loadSettings( const migratedInMemorScopes = new Set(); // Resolve paths to their canonical representation to handle symlinks - const resolvedWorkspaceDir = path.resolve(workspaceDir); + // Note: resolvedWorkspaceDir is already defined at the top of the function const resolvedHomeDir = path.resolve(homedir()); let realWorkspaceDir = resolvedWorkspaceDir; @@ -967,7 +518,7 @@ export function loadSettings( const loadAndMigrate = ( filePath: string, scope: SettingScope, - ): { settings: Settings; rawJson?: string } => { + ): { settings: Settings; rawJson?: string; migrationWarnings?: string[] } => { try { if (fs.existsSync(filePath)) { const content = fs.readFileSync(filePath, 'utf-8'); @@ -986,74 +537,59 @@ export function loadSettings( } let settingsObject = rawSettings as Record; + const hasVersionKey = SETTINGS_VERSION_KEY in settingsObject; + const versionValue = settingsObject[SETTINGS_VERSION_KEY]; + const hasInvalidVersion = + hasVersionKey && typeof versionValue !== 'number'; + const hasLegacyNumericVersion = + typeof versionValue === 'number' && versionValue < SETTINGS_VERSION; + let migrationWarnings: string[] | undefined; + + const persistSettingsObject = (warningPrefix: string) => { + try { + writeWithBackupSync( + filePath, + JSON.stringify(settingsObject, null, 2), + ); + } catch (e) { + debugLogger.error(`${warningPrefix}: ${getErrorMessage(e)}`); + } + }; + if (needsMigration(settingsObject)) { - const migratedSettings = migrateV1ToV3(settingsObject); - if (migratedSettings) { - if (MIGRATE_V2_OVERWRITE) { - try { - fs.renameSync(filePath, `${filePath}.orig`); - fs.writeFileSync( - filePath, - JSON.stringify(migratedSettings, null, 2), - 'utf-8', - ); - } catch (e) { - writeStderrLine( - `Error migrating settings file on disk: ${getErrorMessage( - e, - )}`, - ); - } - } else { - migratedInMemorScopes.add(scope); - } - settingsObject = migratedSettings; + const migrationResult = runMigrations(settingsObject, scope); + if (migrationResult.executedMigrations.length > 0) { + settingsObject = migrationResult.settings as Record< + string, + unknown + >; + migrationWarnings = migrationResult.warnings; + persistSettingsObject('Error migrating settings file on disk'); + } else if (hasLegacyNumericVersion || hasInvalidVersion) { + // Migration was deemed needed but nothing executed. Normalize version metadata + // to avoid repeated no-op checks on startup. + settingsObject[SETTINGS_VERSION_KEY] = SETTINGS_VERSION; + debugLogger.warn( + `Settings version metadata in ${filePath} could not be migrated by any registered migration. Normalizing ${SETTINGS_VERSION_KEY} to ${SETTINGS_VERSION}.`, + ); + persistSettingsObject('Error normalizing settings version on disk'); } - } else if (!(SETTINGS_VERSION_KEY in settingsObject)) { - // No migration needed, but version field is missing - add it for future optimizations + } else if ( + !hasVersionKey || + hasInvalidVersion || + hasLegacyNumericVersion + ) { + // No migration needed/executable, but version metadata is missing or invalid. + // Normalize it to current version to avoid repeated startup work. settingsObject[SETTINGS_VERSION_KEY] = SETTINGS_VERSION; - if (MIGRATE_V2_OVERWRITE) { - try { - fs.writeFileSync( - filePath, - JSON.stringify(settingsObject, null, 2), - 'utf-8', - ); - } catch (e) { - writeStderrLine( - `Error adding version to settings file: ${getErrorMessage(e)}`, - ); - } - } + persistSettingsObject('Error normalizing settings version on disk'); } - // V2 to V3 migration (invert disable* -> enable* booleans) - const v3Migrated = migrateV2ToV3(settingsObject); - if (v3Migrated) { - if (MIGRATE_V2_OVERWRITE) { - try { - // Only backup if not already backed up by V1->V2 migration - const backupPath = `${filePath}.orig`; - if (!fs.existsSync(backupPath)) { - fs.renameSync(filePath, backupPath); - } - fs.writeFileSync( - filePath, - JSON.stringify(v3Migrated, null, 2), - 'utf-8', - ); - } catch (e) { - writeStderrLine( - `Error migrating settings file to V3: ${getErrorMessage(e)}`, - ); - } - } else { - migratedInMemorScopes.add(scope); - } - settingsObject = v3Migrated; - } - - return { settings: settingsObject as Settings, rawJson: content }; + return { + settings: settingsObject as Settings, + rawJson: content, + migrationWarnings, + }; } } catch (error: unknown) { settingsErrors.push({ @@ -1071,7 +607,11 @@ export function loadSettings( ); const userResult = loadAndMigrate(USER_SETTINGS_PATH, SettingScope.User); - let workspaceResult: { settings: Settings; rawJson?: string } = { + let workspaceResult: { + settings: Settings; + rawJson?: string; + migrationWarnings?: string[]; + } = { settings: {} as Settings, rawJson: undefined, }; @@ -1141,6 +681,14 @@ export function loadSettings( ); } + // Collect all migration warnings from all scopes + const allMigrationWarnings: string[] = [ + ...(systemResult.migrationWarnings ?? []), + ...(systemDefaultsResult.migrationWarnings ?? []), + ...(userResult.migrationWarnings ?? []), + ...(workspaceResult.migrationWarnings ?? []), + ]; + return new LoadedSettings( { path: systemSettingsPath, @@ -1168,6 +716,7 @@ export function loadSettings( }, isTrusted, migratedInMemorScopes, + allMigrationWarnings, ); } @@ -1179,21 +728,14 @@ export function saveSettings(settingsFile: SettingsFile): void { fs.mkdirSync(dirPath, { recursive: true }); } - let settingsToSave = settingsFile.originalSettings; - if (!MIGRATE_V2_OVERWRITE) { - settingsToSave = migrateSettingsToV1( - settingsToSave as Record, - ) as Settings; - } - // Use the format-preserving update function updateSettingsFilePreservingFormat( settingsFile.path, - settingsToSave as Record, + settingsFile.originalSettings as Record, ); } catch (error) { - writeStderrLine('Error saving user settings file.'); - writeStderrLine(error instanceof Error ? error.message : String(error)); + debugLogger.error('Error saving user settings file.'); + debugLogger.error(error instanceof Error ? error.message : String(error)); throw error; } } diff --git a/packages/cli/src/config/settingsSchema.ts b/packages/cli/src/config/settingsSchema.ts index 1150a1bf6..b2d24712b 100644 --- a/packages/cli/src/config/settingsSchema.ts +++ b/packages/cli/src/config/settingsSchema.ts @@ -589,7 +589,7 @@ const SETTINGS_SCHEMA = { label: 'Skip Loop Detection', category: 'Model', requiresRestart: false, - default: false, + default: true, description: 'Disable all loop detection checks (streaming and LLM).', showInDialog: false, }, @@ -1176,6 +1176,75 @@ const SETTINGS_SCHEMA = { description: 'Configuration for web search providers.', showInDialog: false, }, + + hooksConfig: { + type: 'object', + label: 'Hooks Config', + category: 'Advanced', + requiresRestart: false, + default: {}, + description: + 'Hook configurations for intercepting and customizing agent behavior.', + showInDialog: false, + properties: { + enabled: { + type: 'boolean', + label: 'Enable Hooks', + category: 'Advanced', + requiresRestart: true, + default: true, + description: + 'Canonical toggle for the hooks system. When disabled, no hooks will be executed.', + showInDialog: false, + }, + disabled: { + type: 'array', + label: 'Disabled Hooks', + category: 'Advanced', + requiresRestart: false, + default: [] as string[], + description: + 'List of hook names (commands) that should be disabled. Hooks in this list will not execute even if configured.', + showInDialog: false, + mergeStrategy: MergeStrategy.UNION, + }, + }, + }, + + hooks: { + type: 'object', + label: 'Hooks', + category: 'Advanced', + requiresRestart: false, + default: {}, + description: + 'Hook event configurations for extending CLI behavior at various lifecycle points.', + showInDialog: false, + properties: { + UserPromptSubmit: { + type: 'array', + label: 'Before Agent Hooks', + category: 'Advanced', + requiresRestart: false, + default: [], + description: + 'Hooks that execute before agent processing. Can modify prompts or inject context.', + showInDialog: false, + mergeStrategy: MergeStrategy.CONCAT, + }, + Stop: { + type: 'array', + label: 'After Agent Hooks', + category: 'Advanced', + requiresRestart: false, + default: [], + description: + 'Hooks that execute after agent processing. Can post-process responses or log interactions.', + showInDialog: false, + mergeStrategy: MergeStrategy.CONCAT, + }, + }, + }, } as const satisfies SettingsSchema; export type SettingsSchemaType = typeof SETTINGS_SCHEMA; diff --git a/packages/cli/src/gemini.test.tsx b/packages/cli/src/gemini.test.tsx index 6c48658ad..e4efea1f5 100644 --- a/packages/cli/src/gemini.test.tsx +++ b/packages/cli/src/gemini.test.tsx @@ -190,6 +190,7 @@ describe('gemini.tsx main function', () => { }, setValue: vi.fn(), forScope: () => ({ settings: {}, originalSettings: {}, path: '' }), + migrationWarnings: [], } as never); try { await main(); @@ -262,7 +263,7 @@ describe('gemini.tsx main function', () => { 'isRaw', ); Object.defineProperty(process.stdin, 'isTTY', { - value: true, + value: false, // 在 stream-json 模式下应为 false configurable: true, }); Object.defineProperty(process.stdin, 'isRaw', { @@ -322,6 +323,7 @@ describe('gemini.tsx main function', () => { }, setValue: vi.fn(), forScope: () => ({ settings: {}, originalSettings: {}, path: '' }), + migrationWarnings: [], } as never); vi.mocked(parseArguments).mockResolvedValue({ @@ -344,6 +346,9 @@ describe('gemini.tsx main function', () => { getInputFormat: () => 'stream-json', getContentGeneratorConfig: () => ({ authType: 'test-auth' }), getWarnings: () => [], + getUsageStatisticsEnabled: () => true, + getSessionId: () => 'test-session-id', + getOutputFormat: () => OutputFormat.TEXT, } as unknown as Config; vi.mocked(loadCliConfig).mockResolvedValue(configStub); @@ -442,6 +447,7 @@ describe('gemini.tsx main function kitty protocol', () => { getScreenReader: () => false, getGeminiMdFileCount: () => 0, getWarnings: () => [], + getUsageStatisticsEnabled: () => true, } as unknown as Config); vi.mocked(loadSettings).mockReturnValue({ errors: [], @@ -452,6 +458,7 @@ describe('gemini.tsx main function kitty protocol', () => { }, setValue: vi.fn(), forScope: () => ({ settings: {}, originalSettings: {}, path: '' }), + migrationWarnings: [], } as never); vi.mocked(parseArguments).mockResolvedValue({ model: undefined, @@ -497,6 +504,7 @@ describe('gemini.tsx main function kitty protocol', () => { authType: undefined, maxSessionTurns: undefined, experimentalLsp: undefined, + experimentalHooks: undefined, channel: undefined, chatRecording: undefined, sessionId: undefined, diff --git a/packages/cli/src/gemini.tsx b/packages/cli/src/gemini.tsx index c5e742ee6..58a735c73 100644 --- a/packages/cli/src/gemini.tsx +++ b/packages/cli/src/gemini.tsx @@ -385,17 +385,16 @@ export async function main() { setMaxSizedBoxDebugging(isDebugMode); // Check input format early to determine initialization flow - const inputFormat = - typeof config.getInputFormat === 'function' + // In TTY mode, ignore stream-json input format to prevent process from hanging + const inputFormat = process.stdin.isTTY + ? InputFormat.TEXT + : typeof config.getInputFormat === 'function' ? config.getInputFormat() : InputFormat.TEXT; // For stream-json mode, defer config.initialize() until after the initialize control request // For other modes, initialize normally - let initializationResult: InitializationResult | undefined; - if (inputFormat !== InputFormat.STREAM_JSON) { - initializationResult = await initializeApp(config, settings); - } + const initializationResult = await initializeApp(config, settings); if (config.getExperimentalZedIntegration()) { return runAcpAgent(config, settings, argv); diff --git a/packages/cli/src/services/BuiltinCommandLoader.ts b/packages/cli/src/services/BuiltinCommandLoader.ts index cda06daad..08ee98eb2 100644 --- a/packages/cli/src/services/BuiltinCommandLoader.ts +++ b/packages/cli/src/services/BuiltinCommandLoader.ts @@ -21,6 +21,7 @@ import { editorCommand } from '../ui/commands/editorCommand.js'; import { exportCommand } from '../ui/commands/exportCommand.js'; import { extensionsCommand } from '../ui/commands/extensionsCommand.js'; import { helpCommand } from '../ui/commands/helpCommand.js'; +import { hooksCommand } from '../ui/commands/hooksCommand.js'; import { ideCommand } from '../ui/commands/ideCommand.js'; import { initCommand } from '../ui/commands/initCommand.js'; import { languageCommand } from '../ui/commands/languageCommand.js'; @@ -72,6 +73,7 @@ export class BuiltinCommandLoader implements ICommandLoader { exportCommand, extensionsCommand, helpCommand, + hooksCommand, await ideCommand(), initCommand, languageCommand, diff --git a/packages/cli/src/ui/commands/hooksCommand.ts b/packages/cli/src/ui/commands/hooksCommand.ts new file mode 100644 index 000000000..04951db7a --- /dev/null +++ b/packages/cli/src/ui/commands/hooksCommand.ts @@ -0,0 +1,322 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { + SlashCommand, + SlashCommandActionReturn, + CommandContext, + MessageActionReturn, +} from './types.js'; +import { CommandKind } from './types.js'; +import { t } from '../../i18n/index.js'; +import type { HookRegistryEntry } from '@qwen-code/qwen-code-core'; + +/** + * Format hook source for display + */ +function formatHookSource(source: string): string { + switch (source) { + case 'project': + return 'Project'; + case 'user': + return 'User'; + case 'system': + return 'System'; + case 'extensions': + return 'Extension'; + default: + return source; + } +} + +/** + * Format hook status for display + */ +function formatHookStatus(enabled: boolean): string { + return enabled ? '✓ Enabled' : '✗ Disabled'; +} + +const listCommand: SlashCommand = { + name: 'list', + get description() { + return t('List all configured hooks'); + }, + kind: CommandKind.BUILT_IN, + action: async ( + context: CommandContext, + _args: string, + ): Promise => { + const { config } = context.services; + if (!config) { + return { + type: 'message', + messageType: 'error', + content: t('Config not loaded.'), + }; + } + + const hookSystem = config.getHookSystem(); + if (!hookSystem) { + return { + type: 'message', + messageType: 'info', + content: t( + 'Hooks are not enabled. Enable hooks in settings to use this feature.', + ), + }; + } + + const registry = hookSystem.getRegistry(); + const allHooks = registry.getAllHooks(); + + if (allHooks.length === 0) { + return { + type: 'message', + messageType: 'info', + content: t( + 'No hooks configured. Add hooks in your settings.json file.', + ), + }; + } + + // Group hooks by event + const hooksByEvent = new Map(); + for (const hook of allHooks) { + const eventName = hook.eventName; + if (!hooksByEvent.has(eventName)) { + hooksByEvent.set(eventName, []); + } + hooksByEvent.get(eventName)!.push(hook); + } + + let output = `**Configured Hooks (${allHooks.length} total)**\n\n`; + + for (const [eventName, hooks] of hooksByEvent) { + output += `### ${eventName}\n`; + for (const hook of hooks) { + const name = hook.config.name || hook.config.command || 'unnamed'; + const source = formatHookSource(hook.source); + const status = formatHookStatus(hook.enabled); + const matcher = hook.matcher ? ` (matcher: ${hook.matcher})` : ''; + output += `- **${name}** [${source}] ${status}${matcher}\n`; + } + output += '\n'; + } + + return { + type: 'message', + messageType: 'info', + content: output, + }; + }, +}; + +const enableCommand: SlashCommand = { + name: 'enable', + get description() { + return t('Enable a disabled hook'); + }, + kind: CommandKind.BUILT_IN, + action: async ( + context: CommandContext, + args: string, + ): Promise => { + const hookName = args.trim(); + if (!hookName) { + return { + type: 'message', + messageType: 'error', + content: t( + 'Please specify a hook name. Usage: /hooks enable ', + ), + }; + } + + const { config } = context.services; + if (!config) { + return { + type: 'message', + messageType: 'error', + content: t('Config not loaded.'), + }; + } + + const hookSystem = config.getHookSystem(); + if (!hookSystem) { + return { + type: 'message', + messageType: 'error', + content: t('Hooks are not enabled.'), + }; + } + + const registry = hookSystem.getRegistry(); + registry.setHookEnabled(hookName, true); + + return { + type: 'message', + messageType: 'info', + content: t('Hook "{{name}}" has been enabled for this session.', { + name: hookName, + }), + }; + }, + completion: async (context: CommandContext, partialArg: string) => { + const { config } = context.services; + if (!config) return []; + + const hookSystem = config.getHookSystem(); + if (!hookSystem) return []; + + const registry = hookSystem.getRegistry(); + const allHooks = registry.getAllHooks(); + + // Return disabled hooks for enable command (deduplicated by name) + const disabledHookNames = allHooks + .filter((hook) => !hook.enabled) + .map((hook) => hook.config.name || hook.config.command || '') + .filter((name) => name && name.startsWith(partialArg)); + return [...new Set(disabledHookNames)]; + }, +}; + +const disableCommand: SlashCommand = { + name: 'disable', + get description() { + return t('Disable an active hook'); + }, + kind: CommandKind.BUILT_IN, + action: async ( + context: CommandContext, + args: string, + ): Promise => { + const hookName = args.trim(); + if (!hookName) { + return { + type: 'message', + messageType: 'error', + content: t( + 'Please specify a hook name. Usage: /hooks disable ', + ), + }; + } + + const { config } = context.services; + if (!config) { + return { + type: 'message', + messageType: 'error', + content: t('Config not loaded.'), + }; + } + + const hookSystem = config.getHookSystem(); + if (!hookSystem) { + return { + type: 'message', + messageType: 'error', + content: t('Hooks are not enabled.'), + }; + } + + const registry = hookSystem.getRegistry(); + registry.setHookEnabled(hookName, false); + + return { + type: 'message', + messageType: 'info', + content: t('Hook "{{name}}" has been disabled for this session.', { + name: hookName, + }), + }; + }, + completion: async (context: CommandContext, partialArg: string) => { + const { config } = context.services; + if (!config) return []; + + const hookSystem = config.getHookSystem(); + if (!hookSystem) return []; + + const registry = hookSystem.getRegistry(); + const allHooks = registry.getAllHooks(); + + // Return enabled hooks for disable command (deduplicated by name) + const enabledHookNames = allHooks + .filter((hook) => hook.enabled) + .map((hook) => hook.config.name || hook.config.command || '') + .filter((name) => name && name.startsWith(partialArg)); + return [...new Set(enabledHookNames)]; + }, +}; + +export const hooksCommand: SlashCommand = { + name: 'hooks', + get description() { + return t('Manage Qwen Code hooks'); + }, + kind: CommandKind.BUILT_IN, + subCommands: [listCommand, enableCommand, disableCommand], + action: async ( + context: CommandContext, + args: string, + ): Promise => { + // If no subcommand provided, show list + if (!args.trim()) { + const result = await listCommand.action?.(context, ''); + return result ?? { type: 'message', messageType: 'info', content: '' }; + } + + const [subcommand, ...rest] = args.trim().split(/\s+/); + const subArgs = rest.join(' '); + + let result: SlashCommandActionReturn | void; + switch (subcommand.toLowerCase()) { + case 'list': + result = await listCommand.action?.(context, subArgs); + break; + case 'enable': + result = await enableCommand.action?.(context, subArgs); + break; + case 'disable': + result = await disableCommand.action?.(context, subArgs); + break; + default: + return { + type: 'message', + messageType: 'error', + content: t( + 'Unknown subcommand: {{cmd}}. Available: list, enable, disable', + { + cmd: subcommand, + }, + ), + }; + } + return result ?? { type: 'message', messageType: 'info', content: '' }; + }, + completion: async (context: CommandContext, partialArg: string) => { + const subcommands = ['list', 'enable', 'disable']; + const parts = partialArg.split(/\s+/); + + if (parts.length <= 1) { + // Complete subcommand + return subcommands.filter((cmd) => cmd.startsWith(partialArg)); + } + + // Complete subcommand arguments + const [subcommand, ...rest] = parts; + const subArgs = rest.join(' '); + + switch (subcommand.toLowerCase()) { + case 'enable': + return enableCommand.completion?.(context, subArgs) ?? []; + case 'disable': + return disableCommand.completion?.(context, subArgs) ?? []; + default: + return []; + } + }, +}; diff --git a/packages/cli/src/ui/components/HistoryItemDisplay.tsx b/packages/cli/src/ui/components/HistoryItemDisplay.tsx index 3bb6780ca..a82847cc8 100644 --- a/packages/cli/src/ui/components/HistoryItemDisplay.tsx +++ b/packages/cli/src/ui/components/HistoryItemDisplay.tsx @@ -8,19 +8,23 @@ import type React from 'react'; import { useMemo } from 'react'; import { escapeAnsiCtrlCodes } from '../utils/textUtils.js'; import type { HistoryItem } from '../types.js'; -import { UserMessage } from './messages/UserMessage.js'; -import { UserShellMessage } from './messages/UserShellMessage.js'; -import { GeminiMessage } from './messages/GeminiMessage.js'; -import { InfoMessage } from './messages/InfoMessage.js'; -import { ErrorMessage } from './messages/ErrorMessage.js'; +import { + UserMessage, + UserShellMessage, + AssistantMessage, + AssistantMessageContent, + ThinkMessage, + ThinkMessageContent, +} from './messages/ConversationMessages.js'; import { ToolGroupMessage } from './messages/ToolGroupMessage.js'; -import { GeminiMessageContent } from './messages/GeminiMessageContent.js'; -import { GeminiThoughtMessage } from './messages/GeminiThoughtMessage.js'; -import { GeminiThoughtMessageContent } from './messages/GeminiThoughtMessageContent.js'; import { CompressionMessage } from './messages/CompressionMessage.js'; import { SummaryMessage } from './messages/SummaryMessage.js'; -import { WarningMessage } from './messages/WarningMessage.js'; -import { RetryCountdownMessage } from './messages/RetryCountdownMessage.js'; +import { + InfoMessage, + WarningMessage, + ErrorMessage, + RetryCountdownMessage, +} from './messages/StatusMessages.js'; import { Box } from 'ink'; import { AboutBox } from './AboutBox.js'; import { StatsDisplay } from './StatsDisplay.js'; @@ -61,6 +65,11 @@ const HistoryItemDisplayComponent: React.FC = ({ embeddedShellFocused, availableTerminalHeightGemini, }) => { + const marginTop = + item.type === 'gemini_content' || item.type === 'gemini_thought_content' + ? 0 + : 1; + const itemForDisplay = useMemo(() => escapeAnsiCtrlCodes(item), [item]); const contentWidth = terminalWidth - 4; const boxWidth = mainAreaWidth || contentWidth; @@ -69,6 +78,7 @@ const HistoryItemDisplayComponent: React.FC = ({ @@ -80,7 +90,7 @@ const HistoryItemDisplayComponent: React.FC = ({ )} {itemForDisplay.type === 'gemini' && ( - = ({ /> )} {itemForDisplay.type === 'gemini_content' && ( - = ({ /> )} {itemForDisplay.type === 'gemini_thought' && ( - = ({ /> )} {itemForDisplay.type === 'gemini_thought_content' && ( - > should render a full gemini item when using availableTerminalHeightGemini 1`] = ` -" ✦ Example code block: +" + ✦ Example code block: 1 Line 1 2 Line 2 3 Line 3 @@ -109,7 +110,8 @@ exports[` > should render a full gemini_content item when `; exports[` > should render a truncated gemini item 1`] = ` -" ✦ Example code block: +" + ✦ Example code block: ... first 41 lines hidden ... 42 Line 42 43 Line 43 diff --git a/packages/cli/src/ui/components/messages/ConversationMessages.tsx b/packages/cli/src/ui/components/messages/ConversationMessages.tsx new file mode 100644 index 000000000..526bc9cfe --- /dev/null +++ b/packages/cli/src/ui/components/messages/ConversationMessages.tsx @@ -0,0 +1,261 @@ +/** + * @license + * Copyright 2025 Qwen Team + * SPDX-License-Identifier: Apache-2.0 + */ + +import type React from 'react'; +import { Box, Text } from 'ink'; +import stringWidth from 'string-width'; +import { MarkdownDisplay } from '../../utils/MarkdownDisplay.js'; +import { theme } from '../../semantic-colors.js'; +import { + SCREEN_READER_MODEL_PREFIX, + SCREEN_READER_USER_PREFIX, +} from '../../textConstants.js'; + +interface UserMessageProps { + text: string; +} + +interface UserShellMessageProps { + text: string; +} + +interface AssistantMessageProps { + text: string; + isPending: boolean; + availableTerminalHeight?: number; + contentWidth: number; +} + +interface AssistantMessageContentProps { + text: string; + isPending: boolean; + availableTerminalHeight?: number; + contentWidth: number; +} + +interface ThinkMessageProps { + text: string; + isPending: boolean; + availableTerminalHeight?: number; + contentWidth: number; +} + +interface ThinkMessageContentProps { + text: string; + isPending: boolean; + availableTerminalHeight?: number; + contentWidth: number; +} + +interface PrefixedTextMessageProps { + text: string; + prefix: string; + prefixColor: string; + textColor: string; + ariaLabel?: string; + marginTop?: number; + alignSelf?: 'auto' | 'flex-start' | 'center' | 'flex-end'; +} + +interface PrefixedMarkdownMessageProps { + text: string; + prefix: string; + prefixColor: string; + isPending: boolean; + availableTerminalHeight?: number; + contentWidth: number; + ariaLabel?: string; + textColor?: string; +} + +interface ContinuationMarkdownMessageProps { + text: string; + isPending: boolean; + availableTerminalHeight?: number; + contentWidth: number; + basePrefix: string; + textColor?: string; +} + +function getPrefixWidth(prefix: string): number { + // Reserve one extra column so text never touches the prefix glyph. + return stringWidth(prefix) + 1; +} + +const PrefixedTextMessage: React.FC = ({ + text, + prefix, + prefixColor, + textColor, + ariaLabel, + marginTop = 0, + alignSelf, +}) => { + const prefixWidth = getPrefixWidth(prefix); + + return ( + + + + {prefix} + + + + + {text} + + + + ); +}; + +const PrefixedMarkdownMessage: React.FC = ({ + text, + prefix, + prefixColor, + isPending, + availableTerminalHeight, + contentWidth, + ariaLabel, + textColor, +}) => { + const prefixWidth = getPrefixWidth(prefix); + + return ( + + + + {prefix} + + + + + + + ); +}; + +const ContinuationMarkdownMessage: React.FC< + ContinuationMarkdownMessageProps +> = ({ + text, + isPending, + availableTerminalHeight, + contentWidth, + basePrefix, + textColor, +}) => { + const prefixWidth = getPrefixWidth(basePrefix); + + return ( + + + + ); +}; + +export const UserMessage: React.FC = ({ text }) => ( + +); + +export const UserShellMessage: React.FC = ({ text }) => { + const commandToDisplay = text.startsWith('!') ? text.substring(1) : text; + + return ( + + ); +}; + +export const AssistantMessage: React.FC = ({ + text, + isPending, + availableTerminalHeight, + contentWidth, +}) => ( + +); + +export const AssistantMessageContent: React.FC< + AssistantMessageContentProps +> = ({ text, isPending, availableTerminalHeight, contentWidth }) => ( + +); + +export const ThinkMessage: React.FC = ({ + text, + isPending, + availableTerminalHeight, + contentWidth, +}) => ( + +); + +export const ThinkMessageContent: React.FC = ({ + text, + isPending, + availableTerminalHeight, + contentWidth, +}) => ( + +); diff --git a/packages/cli/src/ui/components/messages/ErrorMessage.tsx b/packages/cli/src/ui/components/messages/ErrorMessage.tsx deleted file mode 100644 index 14cb8a91f..000000000 --- a/packages/cli/src/ui/components/messages/ErrorMessage.tsx +++ /dev/null @@ -1,38 +0,0 @@ -/** - * @license - * Copyright 2025 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import type React from 'react'; -import { Text, Box } from 'ink'; -import { theme } from '../../semantic-colors.js'; - -interface ErrorMessageProps { - text: string; - /** Optional inline hint displayed after the error text in secondary/dimmed color */ - hint?: string; -} - -/** - * Renders an error message with a "✕" prefix. - * When a hint is provided (e.g., retry countdown), it is displayed inline - * in parentheses with a dimmed secondary color, similar to the ESC hint - * style used in LoadingIndicator. - */ -export const ErrorMessage: React.FC = ({ text, hint }) => { - const prefix = '✕ '; - const prefixWidth = prefix.length; - - return ( - - - {prefix} - - - {text} - {hint && ({hint})} - - - ); -}; diff --git a/packages/cli/src/ui/components/messages/GeminiMessage.tsx b/packages/cli/src/ui/components/messages/GeminiMessage.tsx deleted file mode 100644 index 987cbf38a..000000000 --- a/packages/cli/src/ui/components/messages/GeminiMessage.tsx +++ /dev/null @@ -1,46 +0,0 @@ -/** - * @license - * Copyright 2025 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import type React from 'react'; -import { Text, Box } from 'ink'; -import { MarkdownDisplay } from '../../utils/MarkdownDisplay.js'; -import { theme } from '../../semantic-colors.js'; -import { SCREEN_READER_MODEL_PREFIX } from '../../textConstants.js'; - -interface GeminiMessageProps { - text: string; - isPending: boolean; - availableTerminalHeight?: number; - contentWidth: number; -} - -export const GeminiMessage: React.FC = ({ - text, - isPending, - availableTerminalHeight, - contentWidth, -}) => { - const prefix = '✦ '; - const prefixWidth = prefix.length; - - return ( - - - - {prefix} - - - - - - - ); -}; diff --git a/packages/cli/src/ui/components/messages/GeminiMessageContent.tsx b/packages/cli/src/ui/components/messages/GeminiMessageContent.tsx deleted file mode 100644 index 29a82298f..000000000 --- a/packages/cli/src/ui/components/messages/GeminiMessageContent.tsx +++ /dev/null @@ -1,43 +0,0 @@ -/** - * @license - * Copyright 2025 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import type React from 'react'; -import { Box } from 'ink'; -import { MarkdownDisplay } from '../../utils/MarkdownDisplay.js'; - -interface GeminiMessageContentProps { - text: string; - isPending: boolean; - availableTerminalHeight?: number; - contentWidth: number; -} - -/* - * Gemini message content is a semi-hacked component. The intention is to represent a partial - * of GeminiMessage and is only used when a response gets too long. In that instance messages - * are split into multiple GeminiMessageContent's to enable the root component in - * App.tsx to be as performant as humanly possible. - */ -export const GeminiMessageContent: React.FC = ({ - text, - isPending, - availableTerminalHeight, - contentWidth, -}) => { - const originalPrefix = '✦ '; - const prefixWidth = originalPrefix.length; - - return ( - - - - ); -}; diff --git a/packages/cli/src/ui/components/messages/GeminiThoughtMessage.tsx b/packages/cli/src/ui/components/messages/GeminiThoughtMessage.tsx deleted file mode 100644 index b595c9d06..000000000 --- a/packages/cli/src/ui/components/messages/GeminiThoughtMessage.tsx +++ /dev/null @@ -1,48 +0,0 @@ -/** - * @license - * Copyright 2025 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import type React from 'react'; -import { Text, Box } from 'ink'; -import { MarkdownDisplay } from '../../utils/MarkdownDisplay.js'; -import { theme } from '../../semantic-colors.js'; - -interface GeminiThoughtMessageProps { - text: string; - isPending: boolean; - availableTerminalHeight?: number; - contentWidth: number; -} - -/** - * Displays model thinking/reasoning text with a softer, dimmed style - * to visually distinguish it from regular content output. - */ -export const GeminiThoughtMessage: React.FC = ({ - text, - isPending, - availableTerminalHeight, - contentWidth, -}) => { - const prefix = '✦ '; - const prefixWidth = prefix.length; - - return ( - - - {prefix} - - - - - - ); -}; diff --git a/packages/cli/src/ui/components/messages/GeminiThoughtMessageContent.tsx b/packages/cli/src/ui/components/messages/GeminiThoughtMessageContent.tsx deleted file mode 100644 index 0f20c45d2..000000000 --- a/packages/cli/src/ui/components/messages/GeminiThoughtMessageContent.tsx +++ /dev/null @@ -1,40 +0,0 @@ -/** - * @license - * Copyright 2025 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import type React from 'react'; -import { Box } from 'ink'; -import { MarkdownDisplay } from '../../utils/MarkdownDisplay.js'; -import { theme } from '../../semantic-colors.js'; - -interface GeminiThoughtMessageContentProps { - text: string; - isPending: boolean; - availableTerminalHeight?: number; - contentWidth: number; -} - -/** - * Continuation component for thought messages, similar to GeminiMessageContent. - * Used when a thought response gets too long and needs to be split for performance. - */ -export const GeminiThoughtMessageContent: React.FC< - GeminiThoughtMessageContentProps -> = ({ text, isPending, availableTerminalHeight, contentWidth }) => { - const originalPrefix = '✦ '; - const prefixWidth = originalPrefix.length; - - return ( - - - - ); -}; diff --git a/packages/cli/src/ui/components/messages/InfoMessage.tsx b/packages/cli/src/ui/components/messages/InfoMessage.tsx deleted file mode 100644 index af036237a..000000000 --- a/packages/cli/src/ui/components/messages/InfoMessage.tsx +++ /dev/null @@ -1,37 +0,0 @@ -/** - * @license - * Copyright 2025 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import type React from 'react'; -import { Text, Box } from 'ink'; -import { theme } from '../../semantic-colors.js'; -import { RenderInline } from '../../utils/InlineMarkdownRenderer.js'; - -interface InfoMessageProps { - text: string; -} - -export const InfoMessage: React.FC = ({ text }) => { - // Don't render anything if text is empty - if (!text || text.trim() === '') { - return null; - } - - const prefix = 'ℹ '; - const prefixWidth = prefix.length; - - return ( - - - {prefix} - - - - - - - - ); -}; diff --git a/packages/cli/src/ui/components/messages/RetryCountdownMessage.tsx b/packages/cli/src/ui/components/messages/RetryCountdownMessage.tsx deleted file mode 100644 index 0f4727574..000000000 --- a/packages/cli/src/ui/components/messages/RetryCountdownMessage.tsx +++ /dev/null @@ -1,41 +0,0 @@ -/** - * @license - * Copyright 2025 Qwen - * SPDX-License-Identifier: Apache-2.0 - */ - -import type React from 'react'; -import { Text, Box } from 'ink'; -import { theme } from '../../semantic-colors.js'; - -interface RetryCountdownMessageProps { - text: string; -} - -/** - * Displays a retry countdown message in a dimmed/secondary style - * to visually distinguish it from error messages. - */ -export const RetryCountdownMessage: React.FC = ({ - text, -}) => { - if (!text || text.trim() === '') { - return null; - } - - const prefix = '↻ '; - const prefixWidth = prefix.length; - - return ( - - - {prefix} - - - - {text} - - - - ); -}; diff --git a/packages/cli/src/ui/components/messages/StatusMessages.tsx b/packages/cli/src/ui/components/messages/StatusMessages.tsx new file mode 100644 index 000000000..e6e945bbd --- /dev/null +++ b/packages/cli/src/ui/components/messages/StatusMessages.tsx @@ -0,0 +1,105 @@ +/** + * @license + * Copyright 2025 Qwen Team + * SPDX-License-Identifier: Apache-2.0 + */ + +import type React from 'react'; +import { Box, Text } from 'ink'; +import stringWidth from 'string-width'; +import { theme } from '../../semantic-colors.js'; +import { RenderInline } from '../../utils/InlineMarkdownRenderer.js'; + +interface StatusMessageProps { + text: string; + prefix: string; + prefixColor: string; + textColor: string; + children?: React.ReactNode; +} + +interface StatusTextProps { + text: string; +} + +/** + * Shared renderer for status-like history messages (info/warning/error/retry). + * Keeps prefix spacing and wrapping behavior consistent across variants. + */ +export const StatusMessage: React.FC = ({ + text, + prefix, + prefixColor, + textColor, + children, +}) => { + if (!text || text.trim() === '') { + return null; + } + + const prefixWidth = stringWidth(prefix) + 1; + + return ( + + + {prefix} + + + + + {children} + + + + ); +}; + +export const InfoMessage: React.FC = ({ text }) => ( + +); + +export const SuccessMessage: React.FC = ({ text }) => ( + +); + +export const WarningMessage: React.FC = ({ text }) => ( + +); + +export const ErrorMessage: React.FC = ({ + text, + hint, +}) => ( + + {hint && ({hint})} + +); + +export const RetryCountdownMessage: React.FC = ({ text }) => ( + +); diff --git a/packages/cli/src/ui/components/messages/UserMessage.tsx b/packages/cli/src/ui/components/messages/UserMessage.tsx deleted file mode 100644 index 5cc2b965c..000000000 --- a/packages/cli/src/ui/components/messages/UserMessage.tsx +++ /dev/null @@ -1,38 +0,0 @@ -/** - * @license - * Copyright 2025 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import type React from 'react'; -import { Text, Box } from 'ink'; -import { theme } from '../../semantic-colors.js'; -import { SCREEN_READER_USER_PREFIX } from '../../textConstants.js'; -import { isSlashCommand as checkIsSlashCommand } from '../../utils/commandUtils.js'; - -interface UserMessageProps { - text: string; -} - -export const UserMessage: React.FC = ({ text }) => { - const prefix = '> '; - const prefixWidth = prefix.length; - const isSlashCommand = checkIsSlashCommand(text); - - const textColor = isSlashCommand ? theme.text.accent : theme.text.secondary; - - return ( - - - - {prefix} - - - - - {text} - - - - ); -}; diff --git a/packages/cli/src/ui/components/messages/UserShellMessage.tsx b/packages/cli/src/ui/components/messages/UserShellMessage.tsx deleted file mode 100644 index 3b7bc7724..000000000 --- a/packages/cli/src/ui/components/messages/UserShellMessage.tsx +++ /dev/null @@ -1,25 +0,0 @@ -/** - * @license - * Copyright 2025 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import type React from 'react'; -import { Box, Text } from 'ink'; -import { theme } from '../../semantic-colors.js'; - -interface UserShellMessageProps { - text: string; -} - -export const UserShellMessage: React.FC = ({ text }) => { - // Remove leading '!' if present, as App.tsx adds it for the processor. - const commandToDisplay = text.startsWith('!') ? text.substring(1) : text; - - return ( - - $ - {commandToDisplay} - - ); -}; diff --git a/packages/cli/src/ui/components/messages/WarningMessage.tsx b/packages/cli/src/ui/components/messages/WarningMessage.tsx deleted file mode 100644 index 589ca4b07..000000000 --- a/packages/cli/src/ui/components/messages/WarningMessage.tsx +++ /dev/null @@ -1,33 +0,0 @@ -/** - * @license - * Copyright 2025 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import type React from 'react'; -import { Box, Text } from 'ink'; -import { Colors } from '../../colors.js'; -import { RenderInline } from '../../utils/InlineMarkdownRenderer.js'; -import { theme } from '../../semantic-colors.js'; - -interface WarningMessageProps { - text: string; -} - -export const WarningMessage: React.FC = ({ text }) => { - const prefix = '⚠ '; - const prefixWidth = 3; - - return ( - - - {prefix} - - - - - - - - ); -}; diff --git a/packages/cli/src/ui/contexts/KeypressContext.test.tsx b/packages/cli/src/ui/contexts/KeypressContext.test.tsx index c28cd9525..d69bada5b 100644 --- a/packages/cli/src/ui/contexts/KeypressContext.test.tsx +++ b/packages/cli/src/ui/contexts/KeypressContext.test.tsx @@ -1335,6 +1335,40 @@ describe('KeypressContext - Kitty Protocol', () => { ); }); + describe('Printable CSI-u keys', () => { + it('parses kitty CSI-u space as a space key with literal sequence', () => { + const keyHandler = vi.fn(); + const { result } = renderHook(() => useKeypressContext(), { wrapper }); + act(() => result.current.subscribe(keyHandler)); + + act(() => stdin.sendKittySequence(`\x1b[32u`)); + + expect(keyHandler).toHaveBeenCalledWith( + expect.objectContaining({ + name: 'space', + sequence: ' ', + kittyProtocol: true, + }), + ); + }); + + it('parses kitty CSI-u printable letters as literal input', () => { + const keyHandler = vi.fn(); + const { result } = renderHook(() => useKeypressContext(), { wrapper }); + act(() => result.current.subscribe(keyHandler)); + + act(() => stdin.sendKittySequence(`\x1b[100u`)); // 'd' + + expect(keyHandler).toHaveBeenCalledWith( + expect.objectContaining({ + name: 'd', + sequence: 'd', + kittyProtocol: true, + }), + ); + }); + }); + describe('Shift+Tab forms', () => { it.each([ { sequence: `\x1b[Z`, description: 'legacy reverse Tab' }, diff --git a/packages/cli/src/ui/contexts/KeypressContext.tsx b/packages/cli/src/ui/contexts/KeypressContext.tsx index c4e192609..4496f5e1b 100644 --- a/packages/cli/src/ui/contexts/KeypressContext.tsx +++ b/packages/cli/src/ui/contexts/KeypressContext.tsx @@ -332,6 +332,36 @@ export function KeypressProvider({ }; } + // Printable CSI-u keys (including space) should behave like regular + // character input so downstream text inputs receive the literal char. + if ( + terminator === 'u' && + !ctrl && + keyCode >= 32 && + keyCode !== 127 && + keyCode <= 0x10ffff + ) { + const char = String.fromCodePoint(keyCode); + const printableName = + char === ' ' + ? 'space' + : /^[A-Za-z]$/.test(char) + ? char.toLowerCase() + : char; + return { + key: { + name: printableName, + ctrl: false, + meta: alt, + shift, + paste: false, + sequence: char, + kittyProtocol: true, + }, + length: m[0].length, + }; + } + // Ctrl+letters if ( ctrl && diff --git a/packages/cli/src/ui/hooks/useGeminiStream.ts b/packages/cli/src/ui/hooks/useGeminiStream.ts index 97616d25a..173065f41 100644 --- a/packages/cli/src/ui/hooks/useGeminiStream.ts +++ b/packages/cli/src/ui/hooks/useGeminiStream.ts @@ -1038,6 +1038,15 @@ export const useGeminiStream = ( clearRetryCountdown(); } break; + case ServerGeminiEventType.HookSystemMessage: + // Display system message from hooks (e.g., Ralph Loop iteration info) + // This is handled as a content event to show in the UI + geminiMessageBuffer = handleContentEvent( + event.value + '\n', + geminiMessageBuffer, + userMessageTimestamp, + ); + break; default: { // enforces exhaustive switch-case const unreachable: never = event; diff --git a/packages/cli/src/ui/themes/no-color.ts b/packages/cli/src/ui/themes/no-color.ts index 3d5b4d4e7..c3a7cbce4 100644 --- a/packages/cli/src/ui/themes/no-color.ts +++ b/packages/cli/src/ui/themes/no-color.ts @@ -33,6 +33,7 @@ const noColorSemanticColors: SemanticColors = { secondary: '', link: '', accent: '', + code: '', }, background: { primary: '', diff --git a/packages/cli/src/ui/themes/semantic-tokens.ts b/packages/cli/src/ui/themes/semantic-tokens.ts index 2aa27a09c..d3047f0f0 100644 --- a/packages/cli/src/ui/themes/semantic-tokens.ts +++ b/packages/cli/src/ui/themes/semantic-tokens.ts @@ -12,6 +12,7 @@ export interface SemanticColors { secondary: string; link: string; accent: string; + code: string; }; background: { primary: string; @@ -45,6 +46,7 @@ export const lightSemanticColors: SemanticColors = { secondary: lightTheme.Gray, link: lightTheme.AccentBlue, accent: lightTheme.AccentPurple, + code: lightTheme.LightBlue, }, background: { primary: lightTheme.Background, @@ -77,6 +79,7 @@ export const darkSemanticColors: SemanticColors = { secondary: darkTheme.Gray, link: darkTheme.AccentBlue, accent: darkTheme.AccentPurple, + code: darkTheme.LightBlue, }, background: { primary: darkTheme.Background, @@ -109,6 +112,7 @@ export const ansiSemanticColors: SemanticColors = { secondary: ansiTheme.Gray, link: ansiTheme.AccentBlue, accent: ansiTheme.AccentPurple, + code: ansiTheme.LightBlue, }, background: { primary: ansiTheme.Background, diff --git a/packages/cli/src/ui/themes/theme.ts b/packages/cli/src/ui/themes/theme.ts index 3ae3bbead..5fee07729 100644 --- a/packages/cli/src/ui/themes/theme.ts +++ b/packages/cli/src/ui/themes/theme.ts @@ -40,6 +40,7 @@ export interface CustomTheme { secondary?: string; link?: string; accent?: string; + code?: string; }; background?: { primary?: string; @@ -174,6 +175,7 @@ export class Theme { secondary: this.colors.Gray, link: this.colors.AccentBlue, accent: this.colors.AccentPurple, + code: this.colors.LightBlue, }, background: { primary: this.colors.Background, @@ -269,7 +271,7 @@ export function createCustomTheme(customTheme: CustomTheme): Theme { type: 'custom', Background: customTheme.background?.primary ?? customTheme.Background ?? '', Foreground: customTheme.text?.primary ?? customTheme.Foreground ?? '', - LightBlue: customTheme.text?.link ?? customTheme.LightBlue ?? '', + LightBlue: customTheme.text?.code ?? customTheme.LightBlue ?? '', AccentBlue: customTheme.text?.link ?? customTheme.AccentBlue ?? '', AccentPurple: customTheme.text?.accent ?? customTheme.AccentPurple ?? '', AccentCyan: customTheme.text?.link ?? customTheme.AccentCyan ?? '', @@ -433,6 +435,7 @@ export function createCustomTheme(customTheme: CustomTheme): Theme { secondary: customTheme.text?.secondary ?? colors.Gray, link: customTheme.text?.link ?? colors.AccentBlue, accent: customTheme.text?.accent ?? colors.AccentPurple, + code: customTheme.text?.code ?? colors.LightBlue, }, background: { primary: customTheme.background?.primary ?? colors.Background, diff --git a/packages/cli/src/utils/settingsUtils.ts b/packages/cli/src/utils/settingsUtils.ts index 1bd5988eb..0effeb738 100644 --- a/packages/cli/src/utils/settingsUtils.ts +++ b/packages/cli/src/utils/settingsUtils.ts @@ -129,6 +129,13 @@ export function getNestedValue( return undefined; } +export function getNestedProperty( + obj: Record, + path: string, +): unknown { + return getNestedValue(obj, path.split('.')); +} + /** * Get the effective value for a setting, considering inheritance from higher scopes * Always returns a value (never undefined) - falls back to default if not set anywhere @@ -382,30 +389,69 @@ export function settingExistsInScope( return value !== undefined; } -/** - * Recursively sets a value in a nested object using a key path array. - */ -function setNestedValue( +export function setNestedPropertyForce( obj: Record, - path: string[], + path: string, value: unknown, -): Record { - const [first, ...rest] = path; - if (!first) { - return obj; +): void { + const keys = path.split('.'); + const lastKey = keys.pop(); + if (!lastKey) return; + + let current: Record = obj; + for (const key of keys) { + if (!current[key] || typeof current[key] !== 'object') { + current[key] = {}; + } + current = current[key] as Record; } - if (rest.length === 0) { - obj[first] = value; - return obj; + current[lastKey] = value; +} + +export function setNestedPropertySafe( + obj: Record, + path: string, + value: unknown, +): void { + const keys = path.split('.'); + const lastKey = keys.pop(); + if (!lastKey) return; + + let current: Record = obj; + for (const key of keys) { + if (current[key] === undefined) { + current[key] = {}; + } + const next = current[key]; + if (typeof next === 'object' && next !== null) { + current = next as Record; + } else { + return; + } } - if (!obj[first] || typeof obj[first] !== 'object') { - obj[first] = {}; + current[lastKey] = value; +} + +export function deleteNestedPropertySafe( + obj: Record, + path: string, +): void { + const keys = path.split('.'); + const lastKey = keys.pop(); + if (!lastKey) return; + + let current: Record = obj; + for (const key of keys) { + const next = current[key]; + if (typeof next !== 'object' || next === null) { + return; + } + current = next as Record; } - setNestedValue(obj[first] as Record, rest, value); - return obj; + delete current[lastKey]; } /** @@ -416,9 +462,8 @@ export function setPendingSettingValue( value: boolean, pendingSettings: Settings, ): Settings { - const path = key.split('.'); const newSettings = JSON.parse(JSON.stringify(pendingSettings)); - setNestedValue(newSettings, path, value); + setNestedPropertyForce(newSettings, key, value); return newSettings; } @@ -430,9 +475,8 @@ export function setPendingSettingValueAny( value: SettingsValue, pendingSettings: Settings, ): Settings { - const path = key.split('.'); const newSettings = structuredClone(pendingSettings); - setNestedValue(newSettings, path, value); + setNestedPropertyForce(newSettings, key, value); return newSettings; } diff --git a/packages/cli/src/utils/writeWithBackup.test.ts b/packages/cli/src/utils/writeWithBackup.test.ts new file mode 100644 index 000000000..219bda81b --- /dev/null +++ b/packages/cli/src/utils/writeWithBackup.test.ts @@ -0,0 +1,232 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import * as fs from 'node:fs'; +import * as path from 'node:path'; +import * as os from 'node:os'; +import { writeWithBackup, writeWithBackupSync } from './writeWithBackup.js'; + +describe('writeWithBackup', () => { + let tempDir: string; + + beforeEach(() => { + tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'writeWithBackup-test-')); + }); + + afterEach(() => { + // Clean up temp directory + try { + fs.rmSync(tempDir, { recursive: true, force: true }); + } catch (_e) { + // Ignore cleanup errors + } + }); + + describe('writeWithBackupSync', () => { + it('should write content to a new file', () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + const content = 'Hello, World!'; + + writeWithBackupSync(targetPath, content); + + expect(fs.existsSync(targetPath)).toBe(true); + expect(fs.readFileSync(targetPath, 'utf-8')).toBe(content); + }); + + it('should backup existing file before writing', () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + const originalContent = 'Original content'; + const newContent = 'New content'; + + fs.writeFileSync(targetPath, originalContent); + writeWithBackupSync(targetPath, newContent); + + expect(fs.readFileSync(targetPath, 'utf-8')).toBe(newContent); + expect(fs.existsSync(`${targetPath}.orig`)).toBe(true); + expect(fs.readFileSync(`${targetPath}.orig`, 'utf-8')).toBe( + originalContent, + ); + }); + + it('should use custom backup suffix', () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + const originalContent = 'Original'; + + fs.writeFileSync(targetPath, originalContent); + writeWithBackupSync(targetPath, 'New', { backupSuffix: '.bak' }); + + expect(fs.existsSync(`${targetPath}.bak`)).toBe(true); + expect(fs.existsSync(`${targetPath}.orig`)).toBe(false); + }); + + it('should clean up temp file on failure', () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + const tempPath = `${targetPath}.tmp`; + + // Create a situation where rename will fail (e.g., by creating a directory at target) + fs.mkdirSync(targetPath); + + expect(() => writeWithBackupSync(targetPath, 'content')).toThrow(); + expect(fs.existsSync(tempPath)).toBe(false); + }); + + it('should preserve original file content when write fails after backup', () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + const originalContent = 'Original content that must be preserved'; + + // Create original file + fs.writeFileSync(targetPath, originalContent); + + // Create a situation where rename will fail (by creating a directory at temp path) + const tempPath = `${targetPath}.tmp`; + fs.mkdirSync(tempPath); + + // The write should fail + expect(() => writeWithBackupSync(targetPath, 'New content')).toThrow(); + + // Original file should still exist with original content + expect(fs.existsSync(targetPath)).toBe(true); + expect(fs.statSync(targetPath).isFile()).toBe(true); + expect(fs.readFileSync(targetPath, 'utf-8')).toBe(originalContent); + + // Cleanup + fs.rmdirSync(tempPath); + }); + + it('should restore original file from backup when rename fails', () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + const backupPath = `${targetPath}.orig`; + const originalContent = 'Original content'; + const newContent = 'New content'; + + // Create original file + fs.writeFileSync(targetPath, originalContent); + + // Write new content successfully first + writeWithBackupSync(targetPath, newContent); + + // Verify backup exists with original content + expect(fs.existsSync(backupPath)).toBe(true); + expect(fs.readFileSync(backupPath, 'utf-8')).toBe(originalContent); + + // Verify target has new content + expect(fs.readFileSync(targetPath, 'utf-8')).toBe(newContent); + + // Now simulate a failure scenario: delete target and try to restore from backup + fs.unlinkSync(targetPath); + + // Restore from backup manually to verify backup integrity + fs.copyFileSync(backupPath, targetPath); + expect(fs.readFileSync(targetPath, 'utf-8')).toBe(originalContent); + }); + + it('should include recovery information in error message', () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + + // Create a situation where rename will fail (directory at target) + fs.mkdirSync(targetPath); + + let errorMessage = ''; + try { + writeWithBackupSync(targetPath, 'content'); + } catch (error) { + errorMessage = error instanceof Error ? error.message : String(error); + } + + // Error message should be descriptive + expect(errorMessage).toContain('directory'); + expect(errorMessage.length).toBeGreaterThan(10); + }); + + it('should handle backup failure with descriptive error', () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + const backupPath = `${targetPath}.orig`; + const originalContent = 'Original content'; + + // Create original file + fs.writeFileSync(targetPath, originalContent); + + // Create a directory at backup path to cause backup to fail + fs.mkdirSync(backupPath); + + let errorMessage = ''; + try { + writeWithBackupSync(targetPath, 'New content'); + } catch (error) { + errorMessage = error instanceof Error ? error.message : String(error); + } + + // Error message should mention backup failure + expect(errorMessage).toContain('backup'); + + // Original file should still exist + expect(fs.existsSync(targetPath)).toBe(true); + expect(fs.readFileSync(targetPath, 'utf-8')).toBe(originalContent); + + // Cleanup + fs.rmdirSync(backupPath); + }); + + it('should clean up temp file when backup creation fails', () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + const tempPath = `${targetPath}.tmp`; + const backupPath = `${targetPath}.orig`; + const originalContent = 'Original content'; + + // Create original file + fs.writeFileSync(targetPath, originalContent); + + // Create a directory at backup path to cause backup to fail + fs.mkdirSync(backupPath); + + // The write should fail + expect(() => writeWithBackupSync(targetPath, 'New content')).toThrow(); + + // Temp file should be cleaned up + expect(fs.existsSync(tempPath)).toBe(false); + + // Cleanup + fs.rmdirSync(backupPath); + }); + }); + + describe('writeWithBackup (async)', () => { + it('should write content to a new file', async () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + const content = 'Hello, World!'; + + await writeWithBackup(targetPath, content); + + expect(fs.existsSync(targetPath)).toBe(true); + expect(fs.readFileSync(targetPath, 'utf-8')).toBe(content); + }); + + it('should backup existing file before writing', async () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + const originalContent = 'Original content'; + const newContent = 'New content'; + + fs.writeFileSync(targetPath, originalContent); + await writeWithBackup(targetPath, newContent); + + expect(fs.readFileSync(targetPath, 'utf-8')).toBe(newContent); + expect(fs.existsSync(`${targetPath}.orig`)).toBe(true); + expect(fs.readFileSync(`${targetPath}.orig`, 'utf-8')).toBe( + originalContent, + ); + }); + + it('should use custom encoding', async () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + const content = 'Hello, World!'; + + await writeWithBackup(targetPath, content, { encoding: 'utf8' }); + + expect(fs.readFileSync(targetPath, 'utf-8')).toBe(content); + }); + }); +}); diff --git a/packages/cli/src/utils/writeWithBackup.ts b/packages/cli/src/utils/writeWithBackup.ts new file mode 100644 index 000000000..2c341ae38 --- /dev/null +++ b/packages/cli/src/utils/writeWithBackup.ts @@ -0,0 +1,169 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as fs from 'node:fs'; + +/** + * Options for writeWithBackup function. + */ +export interface WriteWithBackupOptions { + /** Suffix for backup file (default: '.orig') */ + backupSuffix?: string; + /** File encoding (default: 'utf-8') */ + encoding?: BufferEncoding; +} + +/** + * Safely writes content to a file with backup protection. + * + * This function ensures data safety by: + * 1. Writing content to a temporary file first + * 2. Backing up the existing target file (if any) + * 3. Renaming the temporary file to the target path + * + * If any step fails, an error is thrown and no partial changes are left on disk. + * The backup file (if created) can be used for manual recovery. + * + * Note: This is not 100% atomic but provides good protection. In the worst case, + * a .orig backup file remains that can be manually restored. + * + * @param targetPath - The path to write to + * @param content - The content to write + * @param options - Optional configuration + * @throws Error if any step of the write process fails + * + * @example + * ```typescript + * await writeWithBackup('/path/to/settings.json', JSON.stringify(settings, null, 2)); + * // If /path/to/settings.json existed, it's now backed up to /path/to/settings.json.orig + * ``` + */ +export async function writeWithBackup( + targetPath: string, + content: string, + options: WriteWithBackupOptions = {}, +): Promise { + // Async version delegates to sync version since file operations are synchronous + writeWithBackupSync(targetPath, content, options); +} + +/** + * Synchronous version of writeWithBackup. + * + * @param targetPath - The path to write to + * @param content - The content to write + * @param options - Optional configuration + * @throws Error if any step of the write process fails + */ +export function writeWithBackupSync( + targetPath: string, + content: string, + options: WriteWithBackupOptions = {}, +): void { + const { backupSuffix = '.orig', encoding = 'utf-8' } = options; + const tempPath = `${targetPath}.tmp`; + const backupPath = `${targetPath}${backupSuffix}`; + + // Clean up any existing temp file from previous failed attempts + try { + if (fs.existsSync(tempPath)) { + fs.unlinkSync(tempPath); + } + } catch (_e) { + // Ignore cleanup errors + } + + try { + // Step 1: Write to temporary file + fs.writeFileSync(tempPath, content, { encoding }); + + // Step 2: If target exists, back it up + if (fs.existsSync(targetPath)) { + // Check if target is a directory - we can't write to a directory + const targetStat = fs.statSync(targetPath); + if (targetStat.isDirectory()) { + // Clean up temp file before throwing + try { + fs.unlinkSync(tempPath); + } catch (_e) { + // Ignore cleanup error + } + throw new Error( + `Cannot write to '${targetPath}' because it is a directory`, + ); + } + + try { + fs.renameSync(targetPath, backupPath); + } catch (backupError) { + // Clean up temp file before throwing + try { + fs.unlinkSync(tempPath); + } catch (_e) { + // Ignore cleanup error + } + throw new Error( + `Failed to backup existing file: ${backupError instanceof Error ? backupError.message : String(backupError)}`, + ); + } + } + + // Step 3: Rename temp file to target + try { + fs.renameSync(tempPath, targetPath); + } catch (renameError) { + let restoreFailedMessage: string | undefined; + let backupExisted = false; + + // Attempt to restore backup if rename failed + if (fs.existsSync(backupPath)) { + backupExisted = true; + try { + fs.renameSync(backupPath, targetPath); + } catch (restoreError) { + restoreFailedMessage = + restoreError instanceof Error + ? restoreError.message + : String(restoreError); + } + } + + const writeFailureMessage = + renameError instanceof Error + ? renameError.message + : String(renameError); + + if (restoreFailedMessage) { + throw new Error( + `Failed to write file: ${writeFailureMessage}. ` + + `Automatic restore failed: ${restoreFailedMessage}. ` + + `Manual recovery may be required using backup file '${backupPath}'.`, + ); + } + + if (backupExisted) { + throw new Error( + `Failed to write file: ${writeFailureMessage}. ` + + `Target was automatically restored from backup '${backupPath}'.`, + ); + } + + throw new Error( + `Failed to write file: ${writeFailureMessage}. No backup file was available for restoration.`, + ); + } + } catch (error) { + // Ensure temp file is cleaned up on any error + try { + if (fs.existsSync(tempPath)) { + fs.unlinkSync(tempPath); + } + } catch (_e) { + // Ignore cleanup error + } + throw error; + } +} diff --git a/packages/core/package.json b/packages/core/package.json index c80f40474..43219cbcc 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -42,6 +42,7 @@ "ajv-formats": "^3.0.0", "async-mutex": "^0.5.0", "chardet": "^2.1.0", + "iconv-lite": "^0.6.3", "chokidar": "^4.0.3", "diff": "^7.0.0", "dotenv": "^17.1.0", diff --git a/packages/core/src/config/config.test.ts b/packages/core/src/config/config.test.ts index 2be01125f..828ef9c3e 100644 --- a/packages/core/src/config/config.test.ts +++ b/packages/core/src/config/config.test.ts @@ -118,6 +118,7 @@ vi.mock('../tools/memoryTool', () => ({ MemoryTool: createToolMock('save_memory'), setGeminiMdFilename: vi.fn(), getCurrentGeminiMdFilename: vi.fn(() => 'QWEN.md'), // Mock the original filename + getAllGeminiMdFilenames: vi.fn(() => ['QWEN.md', 'AGENTS.md']), DEFAULT_CONTEXT_FILENAME: 'QWEN.md', QWEN_CONFIG_DIR: '.qwen', })); diff --git a/packages/core/src/config/config.ts b/packages/core/src/config/config.ts index 285ad2bce..bb9bbf6ec 100644 --- a/packages/core/src/config/config.ts +++ b/packages/core/src/config/config.ts @@ -84,6 +84,13 @@ import { ExtensionManager, type Extension, } from '../extension/extensionManager.js'; +import { HookSystem } from '../hooks/index.js'; +import { MessageBus } from '../confirmation-bus/message-bus.js'; +import { + MessageBusType, + type HookExecutionRequest, + type HookExecutionResponse, +} from '../confirmation-bus/types.js'; // Utils import { shouldAttemptBrowserLaunch } from '../utils/browser.js'; @@ -377,6 +384,12 @@ export interface ConfigParameters { channel?: string; /** Model providers configuration grouped by authType */ modelProvidersConfig?: ModelProvidersConfig; + /** Enable hook system for lifecycle events */ + enableHooks?: boolean; + /** Hooks configuration from settings */ + hooks?: Record; + /** Hooks config settings (enabled, disabled list) */ + hooksConfig?: Record; /** Warnings generated during configuration resolution */ warnings?: string[]; } @@ -519,6 +532,11 @@ export class Config { private readonly eventEmitter?: EventEmitter; private readonly channel: string | undefined; private readonly defaultFileEncoding: FileEncodingType; + private readonly enableHooks: boolean; + private readonly hooks?: Record; + private readonly hooksConfig?: Record; + private hookSystem?: HookSystem; + private messageBus?: MessageBus; constructor(params: ConfigParameters) { this.sessionId = params.sessionId ?? randomUUID(); @@ -673,6 +691,9 @@ export class Config { enabledExtensionOverrides: this.overrideExtensions, isWorkspaceTrusted: this.isTrustedFolder(), }); + this.enableHooks = params.enableHooks ?? false; + this.hooks = params.hooks; + this.hooksConfig = params.hooksConfig; } /** @@ -696,6 +717,75 @@ export class Config { await this.extensionManager.refreshCache(); this.debugLogger.debug('Extension manager initialized'); + // Initialize hook system if enabled + if (this.enableHooks) { + this.hookSystem = new HookSystem(this); + await this.hookSystem.initialize(); + this.debugLogger.debug('Hook system initialized'); + + // Initialize MessageBus for hook execution + this.messageBus = new MessageBus(); + + // Subscribe to HOOK_EXECUTION_REQUEST to execute hooks + this.messageBus.subscribe( + MessageBusType.HOOK_EXECUTION_REQUEST, + async (request: HookExecutionRequest) => { + try { + const hookSystem = this.hookSystem; + if (!hookSystem) { + this.messageBus?.publish({ + type: MessageBusType.HOOK_EXECUTION_RESPONSE, + correlationId: request.correlationId, + success: false, + error: new Error('Hook system not initialized'), + } as HookExecutionResponse); + return; + } + + // Execute the appropriate hook based on eventName + let result; + const input = request.input || {}; + switch (request.eventName) { + case 'UserPromptSubmit': + result = await hookSystem.fireUserPromptSubmitEvent( + (input['prompt'] as string) || '', + ); + break; + case 'Stop': + result = await hookSystem.fireStopEvent( + (input['stop_hook_active'] as boolean) || false, + (input['last_assistant_message'] as string) || '', + ); + break; + default: + this.debugLogger.warn( + `Unknown hook event: ${request.eventName}`, + ); + result = undefined; + } + + // Send response + this.messageBus?.publish({ + type: MessageBusType.HOOK_EXECUTION_RESPONSE, + correlationId: request.correlationId, + success: true, + output: result, + } as HookExecutionResponse); + } catch (error) { + this.debugLogger.warn(`Hook execution failed: ${error}`); + this.messageBus?.publish({ + type: MessageBusType.HOOK_EXECUTION_RESPONSE, + correlationId: request.correlationId, + success: false, + error: error instanceof Error ? error : new Error(String(error)), + } as HookExecutionResponse); + } + }, + ); + + this.debugLogger.debug('MessageBus initialized with hook subscription'); + } + this.subagentManager = new SubagentManager(this); this.skillManager = new SkillManager(this); await this.skillManager.startWatching(); @@ -1384,6 +1474,66 @@ export class Config { return this.extensionManager; } + /** + * Get the hook system instance if hooks are enabled. + * Returns undefined if hooks are not enabled. + */ + getHookSystem(): HookSystem | undefined { + return this.hookSystem; + } + + /** + * Check if hooks are enabled. + */ + getEnableHooks(): boolean { + return this.enableHooks; + } + + /** + * Get the message bus instance. + * Returns undefined if not set. + */ + getMessageBus(): MessageBus | undefined { + return this.messageBus; + } + + /** + * Set the message bus instance. + * This is called by the CLI layer to inject the MessageBus. + */ + setMessageBus(messageBus: MessageBus): void { + this.messageBus = messageBus; + } + + /** + * Get the list of disabled hook names. + * This is used by the HookRegistry to filter out disabled hooks. + */ + getDisabledHooks(): string[] { + const hooksConfig = this.hooksConfig; + if (!hooksConfig) return []; + const disabled = hooksConfig['disabled']; + return Array.isArray(disabled) ? (disabled as string[]) : []; + } + + /** + * Get project-level hooks configuration. + * This is used by the HookRegistry to load project-specific hooks. + */ + getProjectHooks(): Record | undefined { + // This will be populated from settings by the CLI layer + // The core Config doesn't have direct access to settings + return undefined; + } + + /** + * Get all hooks configuration (merged from all sources). + * This is used by the HookRegistry to load hooks. + */ + getHooks(): Record | undefined { + return this.hooks; + } + getExtensions(): Extension[] { const extensions = this.extensionManager.getLoadedExtensions(); if (this.overrideExtensions) { @@ -1620,6 +1770,21 @@ export class Config { return this.chatRecordingService; } + /** + * Returns the transcript file path for the current session. + * This is the path to the JSONL file where the conversation is recorded. + * Returns empty string if chat recording is disabled. + */ + getTranscriptPath(): string { + if (!this.chatRecordingEnabled) { + return ''; + } + const projectDir = this.storage.getProjectDir(); + const sessionId = this.getSessionId(); + const safeFilename = `${sessionId}.jsonl`; + return path.join(projectDir, 'chats', safeFilename); + } + /** * Gets or creates a SessionService for managing chat sessions. */ diff --git a/packages/core/src/config/storage.ts b/packages/core/src/config/storage.ts index f9d0107e5..3293280a8 100644 --- a/packages/core/src/config/storage.ts +++ b/packages/core/src/config/storage.ts @@ -7,7 +7,7 @@ import * as path from 'node:path'; import * as os from 'node:os'; import * as fs from 'node:fs'; -import { getProjectHash } from '../utils/paths.js'; +import { getProjectHash, sanitizeCwd } from '../utils/paths.js'; export const QWEN_DIR = '.qwen'; export const GOOGLE_ACCOUNTS_FILENAME = 'google_accounts.json'; @@ -82,7 +82,7 @@ export class Storage { } getProjectDir(): string { - const projectId = this.sanitizeCwd(this.getProjectRoot()); + const projectId = sanitizeCwd(this.getProjectRoot()); const projectsDir = path.join(Storage.getGlobalQwenDir(), PROJECT_DIR_NAME); return path.join(projectsDir, projectId); } @@ -140,10 +140,4 @@ export class Storage { getHistoryFilePath(): string { return path.join(this.getProjectTempDir(), 'shell_history'); } - - private sanitizeCwd(cwd: string): string { - // On Windows, normalize to lowercase for case-insensitive matching - const normalizedCwd = os.platform() === 'win32' ? cwd.toLowerCase() : cwd; - return normalizedCwd.replace(/[^a-zA-Z0-9]/g, '-'); - } } diff --git a/packages/core/src/confirmation-bus/message-bus.ts b/packages/core/src/confirmation-bus/message-bus.ts new file mode 100644 index 000000000..fcd2caab7 --- /dev/null +++ b/packages/core/src/confirmation-bus/message-bus.ts @@ -0,0 +1,125 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { randomUUID } from 'node:crypto'; +import { EventEmitter } from 'node:events'; +import { MessageBusType, type Message } from './types.js'; +import { safeJsonStringify } from '../utils/safeJsonStringify.js'; +import { createDebugLogger } from '../utils/debugLogger.js'; + +const debugLogger = createDebugLogger('TRUSTED_HOOKS'); + +export class MessageBus extends EventEmitter { + constructor(private readonly debug = false) { + super(); + this.debug = debug; + } + + private isValidMessage(message: Message): boolean { + if (!message || !message.type) { + return false; + } + + if ( + message.type === MessageBusType.TOOL_CONFIRMATION_REQUEST && + !('correlationId' in message) + ) { + return false; + } + + return true; + } + + private emitMessage(message: Message): void { + this.emit(message.type, message); + } + + async publish(message: Message): Promise { + if (this.debug) { + debugLogger.debug(`[MESSAGE_BUS] publish: ${safeJsonStringify(message)}`); + } + try { + if (!this.isValidMessage(message)) { + throw new Error( + `Invalid message structure: ${safeJsonStringify(message)}`, + ); + } + + if (message.type === MessageBusType.TOOL_CONFIRMATION_REQUEST) { + // Allow all tool confirmations by default (policy engine removed) + this.emitMessage({ + type: MessageBusType.TOOL_CONFIRMATION_RESPONSE, + correlationId: message.correlationId, + confirmed: true, + }); + } else if (message.type === MessageBusType.HOOK_EXECUTION_REQUEST) { + // Allow all hook executions by default (policy engine removed) + this.emitMessage(message); + } else { + // For all other message types, just emit them + this.emitMessage(message); + } + } catch (error) { + this.emit('error', error); + } + } + + subscribe( + type: T['type'], + listener: (message: T) => void, + ): void { + this.on(type, listener); + } + + unsubscribe( + type: T['type'], + listener: (message: T) => void, + ): void { + this.off(type, listener); + } + + /** + * Request-response pattern: Publish a message and wait for a correlated response + * This enables synchronous-style communication over the async MessageBus + * The correlation ID is generated internally and added to the request + */ + async request( + request: Omit, + responseType: TResponse['type'], + timeoutMs: number = 60000, + ): Promise { + const correlationId = randomUUID(); + + return new Promise((resolve, reject) => { + const timeoutId = setTimeout(() => { + cleanup(); + reject(new Error(`Request timed out waiting for ${responseType}`)); + }, timeoutMs); + + const cleanup = () => { + clearTimeout(timeoutId); + this.unsubscribe(responseType, responseHandler); + }; + + const responseHandler = (response: TResponse) => { + // Check if this response matches our request + if ( + 'correlationId' in response && + response.correlationId === correlationId + ) { + cleanup(); + resolve(response); + } + }; + + // Subscribe to responses + this.subscribe(responseType, responseHandler); + + // Publish the request with correlation ID + this.publish({ ...request, correlationId } as TRequest); + }); + } +} diff --git a/packages/core/src/confirmation-bus/types.ts b/packages/core/src/confirmation-bus/types.ts new file mode 100644 index 000000000..7a699bacb --- /dev/null +++ b/packages/core/src/confirmation-bus/types.ts @@ -0,0 +1,128 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { type FunctionCall } from '@google/genai'; +import type { + ToolConfirmationOutcome, + ToolConfirmationPayload, +} from '../tools/tools.js'; + +export enum MessageBusType { + TOOL_CONFIRMATION_REQUEST = 'tool-confirmation-request', + TOOL_CONFIRMATION_RESPONSE = 'tool-confirmation-response', + TOOL_EXECUTION_SUCCESS = 'tool-execution-success', + TOOL_EXECUTION_FAILURE = 'tool-execution-failure', + HOOK_EXECUTION_REQUEST = 'hook-execution-request', + HOOK_EXECUTION_RESPONSE = 'hook-execution-response', +} + +export interface ToolConfirmationRequest { + type: MessageBusType.TOOL_CONFIRMATION_REQUEST; + toolCall: FunctionCall; + correlationId: string; + serverName?: string; + /** + * Optional rich details for the confirmation UI (diffs, counts, etc.) + */ + details?: SerializableConfirmationDetails; +} + +export interface ToolConfirmationResponse { + type: MessageBusType.TOOL_CONFIRMATION_RESPONSE; + correlationId: string; + confirmed: boolean; + /** + * The specific outcome selected by the user. + * + * TODO: Make required after migration. + */ + outcome?: ToolConfirmationOutcome; + /** + * Optional payload (e.g., modified content for 'modify_with_editor'). + */ + payload?: ToolConfirmationPayload; + /** + * When true, indicates that policy decision was ASK_USER and the tool should + * show its legacy confirmation UI instead of auto-proceeding. + */ + requiresUserConfirmation?: boolean; +} + +/** + * Data-only versions of ToolCallConfirmationDetails for bus transmission. + */ +export type SerializableConfirmationDetails = + | { + type: 'info'; + title: string; + prompt: string; + urls?: string[]; + } + | { + type: 'edit'; + title: string; + fileName: string; + filePath: string; + fileDiff: string; + originalContent: string | null; + newContent: string; + isModifying?: boolean; + } + | { + type: 'exec'; + title: string; + command: string; + rootCommand: string; + rootCommands: string[]; + commands?: string[]; + } + | { + type: 'mcp'; + title: string; + serverName: string; + toolName: string; + toolDisplayName: string; + } + | { + type: 'exit_plan_mode'; + title: string; + planPath: string; + }; + +export interface ToolExecutionSuccess { + type: MessageBusType.TOOL_EXECUTION_SUCCESS; + toolCall: FunctionCall; + result: T; +} + +export interface ToolExecutionFailure { + type: MessageBusType.TOOL_EXECUTION_FAILURE; + toolCall: FunctionCall; + error: E; +} + +export interface HookExecutionRequest { + type: MessageBusType.HOOK_EXECUTION_REQUEST; + eventName: string; + input: Record; + correlationId: string; +} + +export interface HookExecutionResponse { + type: MessageBusType.HOOK_EXECUTION_RESPONSE; + correlationId: string; + success: boolean; + output?: Record; + error?: Error; +} + +export type Message = + | ToolConfirmationRequest + | ToolConfirmationResponse + | ToolExecutionSuccess + | ToolExecutionFailure + | HookExecutionRequest + | HookExecutionResponse; diff --git a/packages/core/src/core/client.test.ts b/packages/core/src/core/client.test.ts index b5234045e..8121e1464 100644 --- a/packages/core/src/core/client.test.ts +++ b/packages/core/src/core/client.test.ts @@ -1,6 +1,6 @@ /** * @license - * Copyright 2025 Google LLC + * Copyright 2025 Qwen Team * SPDX-License-Identifier: Apache-2.0 */ @@ -356,6 +356,8 @@ describe('Gemini Client (client.ts)', () => { getSkipLoopDetection: vi.fn().mockReturnValue(false), getChatRecordingService: vi.fn().mockReturnValue(undefined), getResumedSessionData: vi.fn().mockReturnValue(undefined), + getEnableHooks: vi.fn().mockReturnValue(false), + getMessageBus: vi.fn().mockReturnValue(undefined), } as unknown as Config; client = new GeminiClient(mockConfig); @@ -2270,7 +2272,6 @@ Other open files: // Replace loop detector with spies const ldMock = { - turnStarted: vi.fn().mockResolvedValue(false), addAndCheck: vi.fn().mockReturnValue(false), reset: vi.fn(), }; @@ -2301,7 +2302,6 @@ Other open files: } // Assert - loop detection methods should not be called when skipLoopDetection is true - expect(ldMock.turnStarted).not.toHaveBeenCalled(); expect(ldMock.addAndCheck).not.toHaveBeenCalled(); }); }); diff --git a/packages/core/src/core/client.ts b/packages/core/src/core/client.ts index 9f3625c38..5c7cfb2a8 100644 --- a/packages/core/src/core/client.ts +++ b/packages/core/src/core/client.ts @@ -1,6 +1,6 @@ /** * @license - * Copyright 2025 Google LLC + * Copyright 2026 Qwen Team * SPDX-License-Identifier: Apache-2.0 */ @@ -69,9 +69,19 @@ import { checkNextSpeaker } from '../utils/nextSpeakerChecker.js'; import { flatMapTextParts } from '../utils/partUtils.js'; import { retryWithBackoff } from '../utils/retry.js'; +// Hook types and utilities +import { + MessageBusType, + type HookExecutionRequest, + type HookExecutionResponse, +} from '../confirmation-bus/types.js'; +import { partToString } from '../utils/partUtils.js'; +import { createHookOutput } from '../hooks/types.js'; + // IDE integration import { ideContextStore } from '../ide/ideContext.js'; import { type File, type IdeContext } from '../ide/types.js'; +import type { StopHookOutput } from '../hooks/types.js'; const MAX_TURNS = 100; @@ -407,6 +417,51 @@ export class GeminiClient { options?: { isContinuation: boolean }, turns: number = MAX_TURNS, ): AsyncGenerator { + // Fire UserPromptSubmit hook through MessageBus (only if hooks are enabled) + const hooksEnabled = this.config.getEnableHooks(); + const messageBus = this.config.getMessageBus(); + if (hooksEnabled && messageBus) { + const promptText = partToString(request); + const response = await messageBus.request< + HookExecutionRequest, + HookExecutionResponse + >( + { + type: MessageBusType.HOOK_EXECUTION_REQUEST, + eventName: 'UserPromptSubmit', + input: { + prompt: promptText, + }, + }, + MessageBusType.HOOK_EXECUTION_RESPONSE, + ); + const hookOutput = response.output + ? createHookOutput('UserPromptSubmit', response.output) + : undefined; + + if ( + hookOutput?.isBlockingDecision() || + hookOutput?.shouldStopExecution() + ) { + yield { + type: GeminiEventType.Error, + value: { + error: new Error( + `UserPromptSubmit hook blocked processing: ${hookOutput.getEffectiveReason()}`, + ), + }, + }; + return new Turn(this.getChat(), prompt_id); + } + + // Add additional context from hooks to the request + const additionalContext = hookOutput?.getAdditionalContext(); + if (additionalContext) { + const requestArray = Array.isArray(request) ? request : [request]; + request = [...requestArray, { text: additionalContext }]; + } + } + if (!options?.isContinuation) { this.loopDetector.reset(prompt_id); this.lastPromptId = prompt_id; @@ -486,14 +541,6 @@ export class GeminiClient { const turn = new Turn(this.getChat(), prompt_id); - if (!this.config.getSkipLoopDetection()) { - const loopDetected = await this.loopDetector.turnStarted(signal); - if (loopDetected) { - yield { type: GeminiEventType.LoopDetected }; - return turn; - } - } - // append system reminders to the request let requestToSent = await flatMapTextParts(request, async (text) => [text]); if (!options?.isContinuation) { @@ -536,6 +583,65 @@ export class GeminiClient { return turn; } } + // Fire Stop hook through MessageBus (only if hooks are enabled) + // This must be done before any early returns to ensure hooks are always triggered + if (hooksEnabled && messageBus && !turn.pendingToolCalls.length) { + // Get response text from the chat history + const history = this.getHistory(); + const lastModelMessage = history + .filter((msg) => msg.role === 'model') + .pop(); + const responseText = + lastModelMessage?.parts + ?.filter((p): p is { text: string } => 'text' in p) + .map((p) => p.text) + .join('') || '[no response text]'; + + const response = await messageBus.request< + HookExecutionRequest, + HookExecutionResponse + >( + { + type: MessageBusType.HOOK_EXECUTION_REQUEST, + eventName: 'Stop', + input: { + stop_hook_active: true, + last_assistant_message: responseText, + }, + }, + MessageBusType.HOOK_EXECUTION_RESPONSE, + ); + const hookOutput = response.output + ? createHookOutput('Stop', response.output) + : undefined; + + const stopOutput = hookOutput as StopHookOutput | undefined; + + // For Stop hooks, blocking/stop execution should force continuation + if ( + stopOutput?.isBlockingDecision() || + stopOutput?.shouldStopExecution() + ) { + // Emit system message if provided (e.g., "🔄 Ralph iteration 5") + if (stopOutput.systemMessage) { + yield { + type: GeminiEventType.HookSystemMessage, + value: stopOutput.systemMessage, + }; + } + + const continueReason = stopOutput.getEffectiveReason(); + const continueRequest = [{ text: continueReason }]; + return yield* this.sendMessageStream( + continueRequest, + signal, + prompt_id, + { isContinuation: true }, + boundedTurns - 1, + ); + } + } + if (!turn.pendingToolCalls.length && signal && !signal.aborted) { if (this.config.getSkipNextSpeakerCheck()) { return turn; @@ -557,9 +663,9 @@ export class GeminiClient { ); if (nextSpeakerCheck?.next_speaker === 'model') { const nextRequest = [{ text: 'Please continue.' }]; - // This recursive call's events will be yielded out, but the final - // turn object will be from the top-level call. - yield* this.sendMessageStream( + // This recursive call's events will be yielded out, and the final + // turn object from the recursive call will be returned. + return yield* this.sendMessageStream( nextRequest, signal, prompt_id, @@ -568,6 +674,7 @@ export class GeminiClient { ); } } + return turn; } diff --git a/packages/core/src/core/geminiChat.test.ts b/packages/core/src/core/geminiChat.test.ts index 1e68344ed..4f69b62eb 100644 --- a/packages/core/src/core/geminiChat.test.ts +++ b/packages/core/src/core/geminiChat.test.ts @@ -79,7 +79,7 @@ vi.mock('../telemetry/uiTelemetry.js', () => ({ }, })); -describe('GeminiChat', () => { +describe('GeminiChat', async () => { let mockContentGenerator: ContentGenerator; let chat: GeminiChat; let mockConfig: Config; @@ -132,6 +132,44 @@ describe('GeminiChat', () => { vi.resetAllMocks(); }); + /** + * Helper: consume a stream and expect it to throw InvalidStreamError + * after all transient retries exhaust. Uses fake timers to skip delays. + * Must be called within a vi.useFakeTimers() / vi.useRealTimers() block. + */ + async function expectStreamExhaustion( + stream: AsyncGenerator, + ): Promise { + const collecting = (async () => { + for await (const _ of stream) { + /* consume */ + } + })(); + // Get assertion promise first (don't await), then advance timers. + const resultPromise = (async () => { + await expect(collecting).rejects.toThrow(InvalidStreamError); + })(); + await vi.advanceTimersByTimeAsync(0); + await vi.advanceTimersByTimeAsync(35_000); + await resultPromise; + } + + async function collectStreamWithFakeTimers( + stream: AsyncGenerator, + advanceByMs: number = 10_000, + ): Promise { + const events: StreamEvent[] = []; + const collecting = (async () => { + for await (const event of stream) { + events.push(event); + } + return events; + })(); + await vi.advanceTimersByTimeAsync(0); + await vi.advanceTimersByTimeAsync(advanceByMs); + return collecting; + } + describe('sendMessageStream', () => { it('should succeed if a tool call is followed by an empty part', async () => { // 1. Mock a stream that contains a tool call, then an invalid (empty) part. @@ -187,48 +225,44 @@ describe('GeminiChat', () => { }); it('should fail if the stream ends with an empty part and has no finishReason', async () => { - // 1. Mock a stream that ends with an invalid part and has no finish reason. - const streamWithNoFinish = (async function* () { - yield { - candidates: [ - { - content: { - role: 'model', - parts: [{ text: 'Initial content...' }], + vi.useFakeTimers(); + try { + const streamWithNoFinish = (async function* () { + yield { + candidates: [ + { + content: { + role: 'model', + parts: [{ text: 'Initial content...' }], + }, }, - }, - ], - } as unknown as GenerateContentResponse; - // This second chunk is invalid and has no finishReason, so it should fail. - yield { - candidates: [ - { - content: { - role: 'model', - parts: [{ text: '' }], + ], + } as unknown as GenerateContentResponse; + yield { + candidates: [ + { + content: { + role: 'model', + parts: [{ text: '' }], + }, }, - }, - ], - } as unknown as GenerateContentResponse; - })(); + ], + } as unknown as GenerateContentResponse; + })(); - vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue( - streamWithNoFinish, - ); + vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue( + streamWithNoFinish, + ); - // 2. Action & Assert: The stream should fail because there's no finish reason. - const stream = await chat.sendMessageStream( - 'test-model', - { message: 'test message' }, - 'prompt-id-no-finish-empty-end', - ); - await expect( - (async () => { - for await (const _ of stream) { - /* consume stream */ - } - })(), - ).rejects.toThrow(InvalidStreamError); + const stream = await chat.sendMessageStream( + 'test-model', + { message: 'test message' }, + 'prompt-id-no-finish-empty-end', + ); + await expectStreamExhaustion(stream); + } finally { + vi.useRealTimers(); + } }); it('should succeed if the stream ends with an invalid part but has a finishReason and contained a valid part', async () => { @@ -443,63 +477,62 @@ describe('GeminiChat', () => { ); }); it('should throw an error when a tool call is followed by an empty stream response', async () => { - // 1. Setup: A history where the model has just made a function call. - const initialHistory: Content[] = [ - { - role: 'user', - parts: [{ text: 'Find a good Italian restaurant for me.' }], - }, - { - role: 'model', - parts: [ - { - functionCall: { + vi.useFakeTimers(); + try { + // 1. Setup: A history where the model has just made a function call. + const initialHistory: Content[] = [ + { + role: 'user', + parts: [{ text: 'Find a good Italian restaurant for me.' }], + }, + { + role: 'model', + parts: [ + { + functionCall: { + name: 'find_restaurant', + args: { cuisine: 'Italian' }, + }, + }, + ], + }, + ]; + chat.setHistory(initialHistory); + + // 2. Mock the API to return an empty/thought-only stream. + const emptyStreamResponse = (async function* () { + yield { + candidates: [ + { + content: { role: 'model', parts: [{ thought: true }] }, + finishReason: 'STOP', + }, + ], + } as unknown as GenerateContentResponse; + })(); + vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue( + emptyStreamResponse, + ); + + // 3. Action: Send the function response back to the model and consume the stream. + const stream = await chat.sendMessageStream( + 'test-model', + { + message: { + functionResponse: { name: 'find_restaurant', - args: { cuisine: 'Italian' }, + response: { name: 'Vesuvio' }, }, }, - ], - }, - ]; - chat.setHistory(initialHistory); - - // 2. Mock the API to return an empty/thought-only stream. - const emptyStreamResponse = (async function* () { - yield { - candidates: [ - { - content: { role: 'model', parts: [{ thought: true }] }, - finishReason: 'STOP', - }, - ], - } as unknown as GenerateContentResponse; - })(); - vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue( - emptyStreamResponse, - ); - - // 3. Action: Send the function response back to the model and consume the stream. - const stream = await chat.sendMessageStream( - 'test-model', - { - message: { - functionResponse: { - name: 'find_restaurant', - response: { name: 'Vesuvio' }, - }, }, - }, - 'prompt-id-stream-1', - ); + 'prompt-id-stream-1', + ); - // 4. Assert: The stream processing should throw an InvalidStreamError. - await expect( - (async () => { - for await (const _ of stream) { - // This loop consumes the stream to trigger the internal logic. - } - })(), - ).rejects.toThrow(InvalidStreamError); + // 4. Assert: The stream processing should throw an InvalidStreamError. + await expectStreamExhaustion(stream); + } finally { + vi.useRealTimers(); + } }); it('should succeed when there is a tool call without finish reason', async () => { @@ -546,73 +579,69 @@ describe('GeminiChat', () => { }); it('should throw InvalidStreamError when no tool call and no finish reason', async () => { - // Setup: Stream with text but no finish reason and no tool call - const streamWithoutFinishReason = (async function* () { - yield { - candidates: [ - { - content: { - role: 'model', - parts: [{ text: 'some response' }], + vi.useFakeTimers(); + try { + // Setup: Stream with text but no finish reason and no tool call + const streamWithoutFinishReason = (async function* () { + yield { + candidates: [ + { + content: { + role: 'model', + parts: [{ text: 'some response' }], + }, + // No finishReason }, - // No finishReason - }, - ], - } as unknown as GenerateContentResponse; - })(); + ], + } as unknown as GenerateContentResponse; + })(); - vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue( - streamWithoutFinishReason, - ); + vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue( + streamWithoutFinishReason, + ); - const stream = await chat.sendMessageStream( - 'test-model', - { message: 'test' }, - 'prompt-id-1', - ); - - await expect( - (async () => { - for await (const _ of stream) { - // consume stream - } - })(), - ).rejects.toThrow(InvalidStreamError); + const stream = await chat.sendMessageStream( + 'test-model', + { message: 'test' }, + 'prompt-id-1', + ); + await expectStreamExhaustion(stream); + } finally { + vi.useRealTimers(); + } }); it('should throw InvalidStreamError when no tool call and empty response text', async () => { - // Setup: Stream with finish reason but empty response (only thoughts) - const streamWithEmptyResponse = (async function* () { - yield { - candidates: [ - { - content: { - role: 'model', - parts: [{ thought: 'thinking...' }], + vi.useFakeTimers(); + try { + // Setup: Stream with finish reason but empty response (only thoughts) + const streamWithEmptyResponse = (async function* () { + yield { + candidates: [ + { + content: { + role: 'model', + parts: [{ thought: 'thinking...' }], + }, + finishReason: 'STOP', }, - finishReason: 'STOP', - }, - ], - } as unknown as GenerateContentResponse; - })(); + ], + } as unknown as GenerateContentResponse; + })(); - vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue( - streamWithEmptyResponse, - ); + vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue( + streamWithEmptyResponse, + ); - const stream = await chat.sendMessageStream( - 'test-model', - { message: 'test' }, - 'prompt-id-1', - ); - - await expect( - (async () => { - for await (const _ of stream) { - // consume stream - } - })(), - ).rejects.toThrow(InvalidStreamError); + const stream = await chat.sendMessageStream( + 'test-model', + { message: 'test' }, + 'prompt-id-1', + ); + await expectStreamExhaustion(stream); + } finally { + vi.useRealTimers(); + } }); it('should succeed when there is finish reason and response text', async () => { @@ -651,6 +680,50 @@ describe('GeminiChat', () => { ).resolves.not.toThrow(); }); + it('should not lose finish reason when last chunk only has usage metadata', async () => { + const streamWithTrailingUsageOnlyChunk = (async function* () { + yield { + candidates: [ + { + content: { + role: 'model', + parts: [{ text: 'valid response' }], + }, + finishReason: 'STOP', + }, + ], + } as unknown as GenerateContentResponse; + + // Some providers emit a trailing usage-only chunk after finishReason. + yield { + candidates: [], + usageMetadata: { + promptTokenCount: 11, + candidatesTokenCount: 5, + totalTokenCount: 16, + }, + } as unknown as GenerateContentResponse; + })(); + + vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue( + streamWithTrailingUsageOnlyChunk, + ); + + const stream = await chat.sendMessageStream( + 'test-model', + { message: 'test' }, + 'prompt-id-1', + ); + + await expect( + (async () => { + for await (const _ of stream) { + // consume stream + } + })(), + ).resolves.not.toThrow(); + }); + it('should call generateContentStream with the correct parameters', async () => { const response = (async function* () { yield { @@ -774,122 +847,87 @@ describe('GeminiChat', () => { }); describe('sendMessageStream with retries', () => { - it('should yield a RETRY event when an invalid stream is encountered', async () => { - // ARRANGE: Mock the stream to fail once, then succeed. - vi.mocked(mockContentGenerator.generateContentStream) - .mockImplementationOnce(async () => - // First attempt: An invalid stream with an empty text part. - (async function* () { - yield { - candidates: [{ content: { parts: [{ text: '' }] } }], - } as unknown as GenerateContentResponse; - })(), - ) - .mockImplementationOnce(async () => - // Second attempt (the retry): A minimal valid stream. - (async function* () { - yield { - candidates: [ - { - content: { parts: [{ text: 'Success' }] }, - finishReason: 'STOP', - }, - ], - } as unknown as GenerateContentResponse; - })(), - ); - - // ACT: Send a message and collect all events from the stream. - const stream = await chat.sendMessageStream( - 'test-model', - { message: 'test' }, - 'prompt-id-yield-retry', - ); - const events: StreamEvent[] = []; - for await (const event of stream) { - events.push(event); - } - - // ASSERT: Check that a RETRY event was present in the stream's output. - const retryEvent = events.find((e) => e.type === StreamEventType.RETRY); - - expect(retryEvent).toBeDefined(); - expect(retryEvent?.type).toBe(StreamEventType.RETRY); - }); it('should retry on invalid content, succeed, and report metrics', async () => { - // Use mockImplementationOnce to provide a fresh, promise-wrapped generator for each attempt. - vi.mocked(mockContentGenerator.generateContentStream) - .mockImplementationOnce(async () => - // First call returns an invalid stream - (async function* () { - yield { - candidates: [{ content: { parts: [{ text: '' }] } }], // Invalid empty text part - } as unknown as GenerateContentResponse; - })(), - ) - .mockImplementationOnce(async () => - // Second call returns a valid stream - (async function* () { - yield { - candidates: [ - { - content: { parts: [{ text: 'Successful response' }] }, - finishReason: 'STOP', - }, - ], - } as unknown as GenerateContentResponse; - })(), + vi.useFakeTimers(); + try { + // Use mockImplementationOnce to provide a fresh, promise-wrapped generator for each attempt. + vi.mocked(mockContentGenerator.generateContentStream) + .mockImplementationOnce(async () => + // First call returns an invalid stream + (async function* () { + yield { + candidates: [{ content: { parts: [{ text: '' }] } }], // Invalid empty text part + } as unknown as GenerateContentResponse; + })(), + ) + .mockImplementationOnce(async () => + // Second call returns a valid stream + (async function* () { + yield { + candidates: [ + { + content: { parts: [{ text: 'Successful response' }] }, + finishReason: 'STOP', + }, + ], + } as unknown as GenerateContentResponse; + })(), + ); + + const stream = await chat.sendMessageStream( + 'test-model', + { message: 'test' }, + 'prompt-id-retry-success', ); + const chunks = await collectStreamWithFakeTimers(stream); - const stream = await chat.sendMessageStream( - 'test-model', - { message: 'test' }, - 'prompt-id-retry-success', - ); - const chunks: StreamEvent[] = []; - for await (const chunk of stream) { - chunks.push(chunk); + // Assertions + expect(mockLogContentRetry).toHaveBeenCalledTimes(1); + expect(mockLogContentRetryFailure).not.toHaveBeenCalled(); + expect( + mockContentGenerator.generateContentStream, + ).toHaveBeenCalledTimes(2); + + // Check for a retry event + expect(chunks.some((c) => c.type === StreamEventType.RETRY)).toBe(true); + + // Check for the successful content chunk + expect( + chunks.some( + (c) => + c.type === StreamEventType.CHUNK && + c.value.candidates?.[0]?.content?.parts?.[0]?.text === + 'Successful response', + ), + ).toBe(true); + + // Check that history was recorded correctly once, with no duplicates. + const history = chat.getHistory(); + expect(history.length).toBe(2); + expect(history[0]).toEqual({ + role: 'user', + parts: [{ text: 'test' }], + }); + expect(history[1]).toEqual({ + role: 'model', + parts: [{ text: 'Successful response' }], + }); + + // Verify that token counting is not called when usageMetadata is missing + expect( + uiTelemetryService.setLastPromptTokenCount, + ).not.toHaveBeenCalled(); + } finally { + vi.useRealTimers(); } - - // Assertions - expect(mockLogContentRetry).toHaveBeenCalledTimes(1); - expect(mockLogContentRetryFailure).not.toHaveBeenCalled(); - expect(mockContentGenerator.generateContentStream).toHaveBeenCalledTimes( - 2, - ); - - // Check for a retry event - expect(chunks.some((c) => c.type === StreamEventType.RETRY)).toBe(true); - - // Check for the successful content chunk - expect( - chunks.some( - (c) => - c.type === StreamEventType.CHUNK && - c.value.candidates?.[0]?.content?.parts?.[0]?.text === - 'Successful response', - ), - ).toBe(true); - - // Check that history was recorded correctly once, with no duplicates. - const history = chat.getHistory(); - expect(history.length).toBe(2); - expect(history[0]).toEqual({ - role: 'user', - parts: [{ text: 'test' }], - }); - expect(history[1]).toEqual({ - role: 'model', - parts: [{ text: 'Successful response' }], - }); - - // Verify that token counting is not called when usageMetadata is missing - expect(uiTelemetryService.setLastPromptTokenCount).not.toHaveBeenCalled(); }); it('should fail after all retries on persistent invalid content and report metrics', async () => { - vi.mocked(mockContentGenerator.generateContentStream).mockImplementation( - async () => + vi.useFakeTimers(); + try { + vi.mocked( + mockContentGenerator.generateContentStream, + ).mockImplementation(async () => (async function* () { yield { candidates: [ @@ -902,33 +940,86 @@ describe('GeminiChat', () => { ], } as unknown as GenerateContentResponse; })(), - ); + ); - const stream = await chat.sendMessageStream( - 'test-model', - { message: 'test' }, - 'prompt-id-retry-fail', - ); - await expect(async () => { - for await (const _ of stream) { - // Must loop to trigger the internal logic that throws. - } - }).rejects.toThrow(InvalidStreamError); + const stream = await chat.sendMessageStream( + 'test-model', + { message: 'test' }, + 'prompt-id-retry-fail', + ); + await expectStreamExhaustion(stream); - // Should be called 2 times (initial + 1 retry) - expect(mockContentGenerator.generateContentStream).toHaveBeenCalledTimes( - 2, - ); - expect(mockLogContentRetry).toHaveBeenCalledTimes(1); - expect(mockLogContentRetryFailure).toHaveBeenCalledTimes(1); + // Should be called 3 times (1 initial + 2 transient retries) + expect( + mockContentGenerator.generateContentStream, + ).toHaveBeenCalledTimes(3); + expect(mockLogContentRetry).toHaveBeenCalledTimes(2); + expect(mockLogContentRetryFailure).toHaveBeenCalledTimes(1); - // History should still contain the user message. - const history = chat.getHistory(); - expect(history.length).toBe(1); - expect(history[0]).toEqual({ - role: 'user', - parts: [{ text: 'test' }], - }); + // History should still contain the user message. + const history = chat.getHistory(); + expect(history.length).toBe(1); + expect(history[0]).toEqual({ + role: 'user', + parts: [{ text: 'test' }], + }); + } finally { + vi.useRealTimers(); + } + }); + + it('should retry usage-only empty streams and succeed on a later attempt', async () => { + vi.useFakeTimers(); + try { + vi.mocked(mockContentGenerator.generateContentStream) + .mockImplementationOnce(async () => + (async function* () { + yield { + usageMetadata: { + promptTokenCount: 10, + candidatesTokenCount: 0, + totalTokenCount: 10, + }, + } as unknown as GenerateContentResponse; + })(), + ) + .mockImplementationOnce(async () => + (async function* () { + yield { + candidates: [ + { + content: { + parts: [{ text: 'Recovered after empty stream' }], + }, + finishReason: 'STOP', + }, + ], + } as unknown as GenerateContentResponse; + })(), + ); + + const stream = await chat.sendMessageStream( + 'test-model', + { message: 'test' }, + 'prompt-id-empty-usage-retry', + ); + const events = await collectStreamWithFakeTimers(stream); + + expect( + mockContentGenerator.generateContentStream, + ).toHaveBeenCalledTimes(2); + expect(mockLogContentRetry).toHaveBeenCalledTimes(1); + expect( + events.some( + (e) => + e.type === StreamEventType.CHUNK && + e.value.candidates?.[0]?.content?.parts?.[0]?.text === + 'Recovered after empty stream', + ), + ).toBe(true); + } finally { + vi.useRealTimers(); + } }); it('should retry on TPM throttling StreamContentError with fixed delay', async () => { diff --git a/packages/core/src/core/geminiChat.ts b/packages/core/src/core/geminiChat.ts index 2e1923355..f58bcdb61 100644 --- a/packages/core/src/core/geminiChat.ts +++ b/packages/core/src/core/geminiChat.ts @@ -64,6 +64,16 @@ const INVALID_CONTENT_RETRY_OPTIONS: ContentRetryOptions = { initialDelayMs: 500, }; +// Some providers occasionally return transient stream anomalies: either an +// empty stream (usage metadata only, no candidates), a stream that finishes +// normally but contains no usable text, or a stream cut off without a finish +// reason. All are retried with an independent budget (similar to rate-limit +// retries) so they do not consume each other's retry budgets. +const INVALID_STREAM_RETRY_CONFIG = { + maxRetries: 2, + initialDelayMs: 2000, +}; + /** * Options for retrying on rate-limit throttling errors returned as stream content. * Fixed 60s delay matches the DashScope per-minute quota window. @@ -285,6 +295,7 @@ export class GeminiChat { try { let lastError: unknown = new Error('Request failed after all retries.'); let rateLimitRetryCount = 0; + let invalidStreamRetryCount = 0; // Read per-config overrides; fall back to built-in defaults. const cgConfig = self.config.getContentGeneratorConfig(); @@ -298,7 +309,11 @@ export class GeminiChat { attempt++ ) { try { - if (attempt > 0 || rateLimitRetryCount > 0) { + if ( + attempt > 0 || + rateLimitRetryCount > 0 || + invalidStreamRetryCount > 0 + ) { yield { type: StreamEventType.RETRY }; } @@ -348,10 +363,46 @@ export class GeminiChat { continue; } - const isContentError = error instanceof InvalidStreamError; + // Transient stream anomalies (NO_FINISH_REASON / NO_RESPONSE_TEXT): + // independent retry budget, similar to rate-limit handling. + // Does NOT consume the content retry budget. + const isTransientStreamError = error instanceof InvalidStreamError; + if ( + isTransientStreamError && + invalidStreamRetryCount < INVALID_STREAM_RETRY_CONFIG.maxRetries + ) { + invalidStreamRetryCount++; + const delayMs = + INVALID_STREAM_RETRY_CONFIG.initialDelayMs * + invalidStreamRetryCount; + debugLogger.warn( + `Invalid stream [${(error as InvalidStreamError).type}] ` + + `(retry ${invalidStreamRetryCount}/${INVALID_STREAM_RETRY_CONFIG.maxRetries}). ` + + `Waiting ${delayMs / 1000}s before retrying...`, + ); + logContentRetry( + self.config, + new ContentRetryEvent( + invalidStreamRetryCount - 1, + (error as InvalidStreamError).type, + delayMs, + model, + ), + ); + yield { type: StreamEventType.RETRY }; + // Don't count transient retries against content retry limit. + attempt--; + await new Promise((res) => setTimeout(res, delayMs)); + continue; + } + // Transient budget exhausted — stop immediately. + if (isTransientStreamError) { + break; + } + // Other content validation errors (e.g. NO_FINISH_REASON). + const isContentError = error instanceof InvalidStreamError; if (isContentError) { - // Check if we have more attempts left. if (attempt < INVALID_CONTENT_RETRY_OPTIONS.maxAttempts - 1) { logContentRetry( self.config, @@ -378,11 +429,12 @@ export class GeminiChat { if (lastError) { if (lastError instanceof InvalidStreamError) { + const totalAttempts = invalidStreamRetryCount + 1; logContentRetryFailure( self.config, new ContentRetryFailureEvent( - INVALID_CONTENT_RETRY_OPTIONS.maxAttempts, - (lastError as InvalidStreamError).type, + totalAttempts, + lastError.type, model, ), ); @@ -563,8 +615,11 @@ export class GeminiChat { let hasFinishReason = false; for await (const chunk of streamResponse) { - hasFinishReason = + // Use ||= to avoid later usage-only chunks (no candidates) overwriting + // a finishReason that was already seen in an earlier chunk. + hasFinishReason ||= chunk?.candidates?.some((candidate) => candidate.finishReason) ?? false; + if (isValidResponse(chunk)) { const content = chunk.candidates?.[0]?.content; if (content?.parts) { diff --git a/packages/core/src/core/openaiContentGenerator/pipeline.test.ts b/packages/core/src/core/openaiContentGenerator/pipeline.test.ts index d71e23e91..6969a51ef 100644 --- a/packages/core/src/core/openaiContentGenerator/pipeline.test.ts +++ b/packages/core/src/core/openaiContentGenerator/pipeline.test.ts @@ -980,6 +980,147 @@ describe('ContentGenerationPipeline', () => { totalTokenCount: 30, }); }); + + it('should not duplicate function calls when trailing chunks arrive after finish+usage merge', async () => { + // Reproduces the real-world bug: some providers (e.g. bailian/glm-5) + // send trailing empty chunks AFTER the finish+usage pair. Before the + // fix, each trailing chunk re-triggered the merge logic and yielded + // the finish response again (with the same function-call parts), + // causing duplicate tool-call execution in the UI. + const request: GenerateContentParameters = { + model: 'test-model', + contents: [{ parts: [{ text: 'Hello' }], role: 'user' }], + }; + const userPromptId = 'test-prompt-id'; + + // Chunk 1: content text + const mockChunk1 = { + id: 'chunk-1', + choices: [ + { delta: { content: 'I will create a todo' }, finish_reason: null }, + ], + } as OpenAI.Chat.ChatCompletionChunk; + + // Chunk 2: finish reason (with tool calls) + const mockChunk2 = { + id: 'chunk-2', + choices: [{ delta: {}, finish_reason: 'tool_calls' }], + } as OpenAI.Chat.ChatCompletionChunk; + + // Chunk 3: usage metadata only + const mockChunk3 = { + id: 'chunk-3', + choices: [], + usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 }, + } as unknown as OpenAI.Chat.ChatCompletionChunk; + + // Chunk 4: trailing empty chunk (the problematic one) + const mockChunk4 = { + id: 'chunk-4', + choices: [], + } as unknown as OpenAI.Chat.ChatCompletionChunk; + + const mockStream = { + async *[Symbol.asyncIterator]() { + yield mockChunk1; + yield mockChunk2; + yield mockChunk3; + yield mockChunk4; + }, + }; + + // Converter output for chunk 1: text content + const mockContentResponse = new GenerateContentResponse(); + mockContentResponse.candidates = [ + { + content: { + parts: [{ text: 'I will create a todo' }], + role: 'model', + }, + }, + ]; + + // Converter output for chunk 2: finish + function call + const mockFinishResponse = new GenerateContentResponse(); + mockFinishResponse.candidates = [ + { + content: { + parts: [ + { + functionCall: { + name: 'todoWrite', + args: { text: 'buy milk' }, + }, + }, + ], + role: 'model', + }, + finishReason: FinishReason.STOP, + }, + ]; + + // Converter output for chunk 3: usage only + const mockUsageResponse = new GenerateContentResponse(); + mockUsageResponse.candidates = []; + mockUsageResponse.usageMetadata = { + promptTokenCount: 10, + candidatesTokenCount: 20, + totalTokenCount: 30, + }; + + // Converter output for chunk 4: trailing empty + const mockTrailingResponse = new GenerateContentResponse(); + mockTrailingResponse.candidates = []; + + (mockConverter.convertGeminiRequestToOpenAI as Mock).mockReturnValue([]); + (mockConverter.convertOpenAIChunkToGemini as Mock) + .mockReturnValueOnce(mockContentResponse) + .mockReturnValueOnce(mockFinishResponse) + .mockReturnValueOnce(mockUsageResponse) + .mockReturnValueOnce(mockTrailingResponse); + (mockClient.chat.completions.create as Mock).mockResolvedValue( + mockStream, + ); + + // Act + const resultGenerator = await pipeline.executeStream( + request, + userPromptId, + ); + const results = []; + for await (const result of resultGenerator) { + results.push(result); + } + + // Assert: exactly 2 results — content chunk + ONE merged finish chunk. + // Before the fix this was 3 (the trailing chunk triggered a duplicate). + expect(results).toHaveLength(2); + expect(results[0]).toBe(mockContentResponse); + + // The merged result should have the function call and usage metadata + const mergedResult = results[1]!; + expect(mergedResult.candidates?.[0]?.finishReason).toBe( + FinishReason.STOP, + ); + expect( + mergedResult.candidates?.[0]?.content?.parts?.[0]?.functionCall?.name, + ).toBe('todoWrite'); + expect(mergedResult.usageMetadata).toEqual({ + promptTokenCount: 10, + candidatesTokenCount: 20, + totalTokenCount: 30, + }); + + // Count function-call parts across ALL yielded results — must be exactly 1 + let totalFunctionCalls = 0; + for (const result of results) { + const parts = result.candidates?.[0]?.content?.parts ?? []; + totalFunctionCalls += parts.filter( + (p: { functionCall?: unknown }) => p.functionCall, + ).length; + } + expect(totalFunctionCalls).toBe(1); + }); }); describe('buildRequest', () => { diff --git a/packages/core/src/core/openaiContentGenerator/pipeline.ts b/packages/core/src/core/openaiContentGenerator/pipeline.ts index 8d2cc9fc7..5c6cdc682 100644 --- a/packages/core/src/core/openaiContentGenerator/pipeline.ts +++ b/packages/core/src/core/openaiContentGenerator/pipeline.ts @@ -127,8 +127,15 @@ export class ContentGenerationPipeline { // Reset streaming tool calls to prevent data pollution from previous streams this.converter.resetStreamingToolCalls(); - // State for handling chunk merging + // State for handling chunk merging. + // pendingFinishResponse holds a finish chunk waiting to be merged with + // a subsequent usage-metadata chunk before yielding. + // finishYielded is set to true once the merged finish response has been + // yielded, so that any further trailing chunks are treated as normal + // chunks instead of triggering another merge (which would duplicate the + // function-call parts from the finish chunk). let pendingFinishResponse: GenerateContentResponse | null = null; + let finishYielded = false; try { // Stage 2a: Convert and yield each chunk while preserving original @@ -155,7 +162,29 @@ export class ContentGenerationPipeline { continue; } - // Stage 2c: Handle chunk merging for providers that send finishReason and usageMetadata separately + // Stage 2c: Handle chunk merging for providers that send + // finishReason and usageMetadata in separate chunks. + // Once the merged finish response has been yielded, skip + // further merging so trailing chunks don't duplicate the + // function-call parts carried by the finish chunk. + if (finishYielded) { + // Finish already yielded — absorb any remaining usage + // metadata but do NOT yield another response. + // Note: pendingFinishResponse is guaranteed non-null here because + // finishYielded is only set to true inside the `if (pendingFinishResponse)` + // block below. TypeScript cannot infer this through the callback + // assignment in handleChunkMerging, so an explicit cast is needed. + if (response.usageMetadata) { + const pending = + pendingFinishResponse as GenerateContentResponse | null; + if (pending) { + pending.usageMetadata = response.usageMetadata; + } + } + collectedGeminiResponses.push(response); + continue; + } + const shouldYield = this.handleChunkMerging( response, collectedGeminiResponses, @@ -168,15 +197,18 @@ export class ContentGenerationPipeline { // If we have a pending finish response, yield it instead if (pendingFinishResponse) { yield pendingFinishResponse; - pendingFinishResponse = null; + finishYielded = true; + // Keep pendingFinishResponse alive so late-arriving usage + // metadata can still be merged (see finishYielded block above). } else { yield response; } } } - // Stage 2d: If there's still a pending finish response at the end, yield it - if (pendingFinishResponse) { + // Stage 2d: If there's still a pending finish response at the end + // (e.g. no usage chunk arrived after the finish chunk), yield it. + if (pendingFinishResponse && !finishYielded) { yield pendingFinishResponse; } diff --git a/packages/core/src/core/openaiContentGenerator/streamingToolCallParser.test.ts b/packages/core/src/core/openaiContentGenerator/streamingToolCallParser.test.ts index dc4d696d5..1735097be 100644 --- a/packages/core/src/core/openaiContentGenerator/streamingToolCallParser.test.ts +++ b/packages/core/src/core/openaiContentGenerator/streamingToolCallParser.test.ts @@ -813,7 +813,12 @@ describe('StreamingToolCallParser', () => { it('should return true when a tool call is inside a string literal', () => { // Simulate truncation mid-string: {"file_path": "/tmp/test.txt", "content": "some text - parser.addChunk(0, '{"file_path": "/tmp/test.txt"', 'call_1', 'write_file'); + parser.addChunk( + 0, + '{"file_path": "/tmp/test.txt"', + 'call_1', + 'write_file', + ); parser.addChunk(0, ', "content": "some text'); const state = parser.getState(0); expect(state.inString).toBe(true); diff --git a/packages/core/src/core/turn.ts b/packages/core/src/core/turn.ts index 99eb983de..08f379d68 100644 --- a/packages/core/src/core/turn.ts +++ b/packages/core/src/core/turn.ts @@ -64,6 +64,7 @@ export enum GeminiEventType { LoopDetected = 'loop_detected', Citation = 'citation', Retry = 'retry', + HookSystemMessage = 'hook_system_message', } export type ServerGeminiRetryEvent = { @@ -202,6 +203,11 @@ export type ServerGeminiCitationEvent = { value: string; }; +export type ServerGeminiHookSystemMessageEvent = { + type: GeminiEventType.HookSystemMessage; + value: string; +}; + // The original union type, now composed of the individual types export type ServerGeminiStreamEvent = | ServerGeminiChatCompressedEvent @@ -209,6 +215,7 @@ export type ServerGeminiStreamEvent = | ServerGeminiContentEvent | ServerGeminiErrorEvent | ServerGeminiFinishedEvent + | ServerGeminiHookSystemMessageEvent | ServerGeminiLoopDetectedEvent | ServerGeminiMaxSessionTurnsEvent | ServerGeminiThoughtEvent diff --git a/packages/core/src/extension/claude-converter.test.ts b/packages/core/src/extension/claude-converter.test.ts index b4d16c8f4..502e8196e 100644 --- a/packages/core/src/extension/claude-converter.test.ts +++ b/packages/core/src/extension/claude-converter.test.ts @@ -368,4 +368,69 @@ describe('convertClaudePluginPackage', () => { // Clean up converted directory fs.rmSync(result.convertedDir, { recursive: true, force: true }); }); + + it('should successfully convert agent files with Windows CRLF endings', async () => { + // Setup: Create a plugin with a source agents folder containing a CRLF agent + const pluginSourceDir = path.join(testDir, 'plugin-crlf-agents'); + fs.mkdirSync(pluginSourceDir, { recursive: true }); + + // Create source agents directory (renamed to src-agents to avoid skip-logic bug) + const agentsDir = path.join(pluginSourceDir, 'src-agents'); + fs.mkdirSync(agentsDir, { recursive: true }); + + // Write a .md file with CRLF endings + const crlfAgentContent = `---\r\nname: cool-agent\r\ndescription: A cool agent\r\n---\r\n\r\nSystem prompt body\r\n`; + fs.writeFileSync( + path.join(agentsDir, 'agent.md'), + crlfAgentContent, + 'utf-8', + ); + + // Create marketplace.json specifying to load this agent + const marketplaceDir = path.join(pluginSourceDir, '.claude-plugin'); + fs.mkdirSync(marketplaceDir, { recursive: true }); + + const marketplaceConfig: ClaudeMarketplaceConfig = { + name: 'test-marketplace', + owner: { name: 'Test Owner', email: 'test@example.com' }, + plugins: [ + { + name: 'crlf-agents-plugin', + version: '1.0.0', + source: './', + strict: false, + agents: ['./src-agents/agent.md'], + }, + ], + }; + + fs.writeFileSync( + path.join(marketplaceDir, 'marketplace.json'), + JSON.stringify(marketplaceConfig, null, 2), + 'utf-8', + ); + + // Act: Convert + const result = await convertClaudePluginPackage( + pluginSourceDir, + 'crlf-agents-plugin', + ); + + // Verify: agent file was properly parsed and converted into .qwen/agents folder structure + const convertedAgentsDir = path.join(result.convertedDir, 'agents'); + expect(fs.existsSync(convertedAgentsDir)).toBe(true); + + const convertedFiles = fs.readdirSync(convertedAgentsDir); + expect(convertedFiles).toContain('agent.md'); // The filename is preserved from source + + // Verify it was actually parsed by checking the converted content format + const convertedContent = fs.readFileSync( + path.join(convertedAgentsDir, 'agent.md'), + 'utf-8', + ); + expect(convertedContent).toContain('name: cool-agent'); + + // Clean up + fs.rmSync(result.convertedDir, { recursive: true, force: true }); + }); }); diff --git a/packages/core/src/extension/claude-converter.ts b/packages/core/src/extension/claude-converter.ts index 68da9cfff..98639b197 100644 --- a/packages/core/src/extension/claude-converter.ts +++ b/packages/core/src/extension/claude-converter.ts @@ -24,6 +24,7 @@ import { stringify as stringifyYaml, } from '../utils/yaml-parser.js'; import { createDebugLogger } from '../utils/debugLogger.js'; +import { normalizeContent } from '../utils/textUtils.js'; const debugLogger = createDebugLogger('CLAUDE_CONVERTER'); @@ -226,10 +227,11 @@ async function convertAgentFiles(agentsDir: string): Promise { try { const content = await fs.promises.readFile(filePath, 'utf-8'); + const normalizedContent = normalizeContent(content); // Parse frontmatter const frontmatterRegex = /^---\n([\s\S]*?)\n---\n([\s\S]*)$/; - const match = content.match(frontmatterRegex); + const match = normalizedContent.match(frontmatterRegex); if (!match) { // No frontmatter, skip this file @@ -387,15 +389,15 @@ export async function convertClaudePluginPackage( const strict = marketplacePlugin.strict ?? false; let mergedConfig: ClaudePluginConfig; - if (strict) { - const pluginJsonPath = path.join( - pluginSource, - '.claude-plugin', - 'plugin.json', - ); - if (!fs.existsSync(pluginJsonPath)) { - throw new Error(`Strict mode requires plugin.json at ${pluginJsonPath}`); - } + const pluginJsonPath = path.join( + pluginSource, + '.claude-plugin', + 'plugin.json', + ); + if (strict && !fs.existsSync(pluginJsonPath)) { + throw new Error(`Strict mode requires plugin.json at ${pluginJsonPath}`); + } + if (fs.existsSync(pluginJsonPath)) { const pluginContent = fs.readFileSync(pluginJsonPath, 'utf-8'); const pluginConfig: ClaudePluginConfig = JSON.parse(pluginContent); mergedConfig = mergeClaudeConfigs(marketplacePlugin, pluginConfig); @@ -552,6 +554,18 @@ async function collectResources( const srcFile = path.join(resolvedPath, file); const destFile = path.join(finalDestDir, file); + // Check if the source is a regular file (skip sockets, FIFOs, directories behind symlinks, etc.) + try { + const fileStat = fs.statSync(srcFile); + if (!fileStat.isFile()) { + debugLogger.debug(`Skipping non-regular file: ${srcFile}`); + continue; + } + } catch { + debugLogger.debug(`Failed to stat file, skipping: ${srcFile}`); + continue; + } + // Ensure parent directory exists const destFileDir = path.dirname(destFile); if (!fs.existsSync(destFileDir)) { diff --git a/packages/core/src/extension/extensionManager.ts b/packages/core/src/extension/extensionManager.ts index 2da26995a..629de747a 100644 --- a/packages/core/src/extension/extensionManager.ts +++ b/packages/core/src/extension/extensionManager.ts @@ -1238,7 +1238,21 @@ export async function copyExtension( source: string, destination: string, ): Promise { - await fs.promises.cp(source, destination, { recursive: true }); + await fs.promises.cp(source, destination, { + recursive: true, + dereference: true, + filter: async (src: string) => { + try { + const stats = await fs.promises.stat(src); + // Only copy regular files and directories + // Skip sockets, FIFOs, block devices, and character devices + return stats.isFile() || stats.isDirectory(); + } catch { + // If we can't stat the file, skip it + return false; + } + }, + }); } export function getExtensionId( diff --git a/packages/core/src/extension/gemini-converter.ts b/packages/core/src/extension/gemini-converter.ts index 7f5c2d054..b5461369e 100644 --- a/packages/core/src/extension/gemini-converter.ts +++ b/packages/core/src/extension/gemini-converter.ts @@ -130,9 +130,24 @@ export async function copyDirectory( if (entry.isDirectory()) { await copyDirectory(sourcePath, destPath); - } else { + } else if (entry.isSymbolicLink()) { + // Resolve symlink and copy the target content + try { + const realPath = fs.realpathSync(sourcePath); + const targetStat = fs.statSync(realPath); + if (targetStat.isDirectory()) { + await copyDirectory(realPath, destPath); + } else if (targetStat.isFile()) { + fs.copyFileSync(realPath, destPath); + } + // Skip sockets, FIFOs, etc. + } catch { + // Skip broken symlinks + } + } else if (entry.isFile()) { fs.copyFileSync(sourcePath, destPath); } + // Skip sockets, FIFOs, block devices, and character devices } } diff --git a/packages/core/src/extension/github.test.ts b/packages/core/src/extension/github.test.ts index e98e6498a..8c31b1284 100644 --- a/packages/core/src/extension/github.test.ts +++ b/packages/core/src/extension/github.test.ts @@ -69,6 +69,8 @@ describe('git extension helpers', () => { await cloneFromGit(installMetadata, destination); expect(mockGit.clone).toHaveBeenCalledWith('http://my-repo.com', './', [ + '-c', + 'core.symlinks=true', '--depth', '1', ]); diff --git a/packages/core/src/extension/github.ts b/packages/core/src/extension/github.ts index 9e1d46ed4..5ef49d35b 100644 --- a/packages/core/src/extension/github.ts +++ b/packages/core/src/extension/github.ts @@ -75,7 +75,12 @@ export async function cloneFromGit( // We let git handle the source as is. } } - await git.clone(sourceUrl, './', ['--depth', '1']); + await git.clone(sourceUrl, './', [ + '-c', + 'core.symlinks=true', + '--depth', + '1', + ]); const remotes = await git.getRemotes(true); if (remotes.length === 0) { diff --git a/packages/core/src/hooks/hookAggregator.test.ts b/packages/core/src/hooks/hookAggregator.test.ts new file mode 100644 index 000000000..129713b66 --- /dev/null +++ b/packages/core/src/hooks/hookAggregator.test.ts @@ -0,0 +1,618 @@ +/** + * @license + * Copyright 2026 Qwen Team + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect } from 'vitest'; +import { HookAggregator } from './hookAggregator.js'; +import { HookEventName, HookType, createHookOutput } from './types.js'; +import type { + HookExecutionResult, + HookOutput, + PermissionRequestHookOutput, +} from './types.js'; + +describe('HookAggregator', () => { + const aggregator = new HookAggregator(); + + describe('aggregateResults', () => { + it('should return undefined finalOutput when no results', () => { + const result = aggregator.aggregateResults([], HookEventName.PreToolUse); + expect(result.success).toBe(true); + expect(result.finalOutput).toBeUndefined(); + expect(result.allOutputs).toEqual([]); + expect(result.errors).toEqual([]); + }); + + it('should aggregate successful results', () => { + const results: HookExecutionResult[] = [ + { + hookConfig: { type: HookType.Command, command: 'echo test' }, + eventName: HookEventName.PreToolUse, + success: true, + output: { continue: true }, + duration: 100, + }, + ]; + + const result = aggregator.aggregateResults( + results, + HookEventName.PreToolUse, + ); + expect(result.success).toBe(true); + expect(result.finalOutput).toBeDefined(); + }); + + it('should set success false when there are errors', () => { + const results: HookExecutionResult[] = [ + { + hookConfig: { type: HookType.Command, command: 'echo test' }, + eventName: HookEventName.PreToolUse, + success: false, + error: new Error('Hook failed'), + duration: 100, + }, + ]; + + const result = aggregator.aggregateResults( + results, + HookEventName.PreToolUse, + ); + expect(result.success).toBe(false); + expect(result.errors).toHaveLength(1); + }); + + it('should calculate total duration', () => { + const results: HookExecutionResult[] = [ + { + hookConfig: { type: HookType.Command, command: 'echo 1' }, + eventName: HookEventName.PreToolUse, + success: true, + duration: 100, + }, + { + hookConfig: { type: HookType.Command, command: 'echo 2' }, + eventName: HookEventName.PreToolUse, + success: true, + duration: 200, + }, + ]; + + const result = aggregator.aggregateResults( + results, + HookEventName.PreToolUse, + ); + expect(result.totalDuration).toBe(300); + }); + }); + + describe('mergeWithOrLogic - PreToolUse', () => { + it('should concatenate reasons', () => { + const outputs: HookOutput[] = [ + { reason: 'first reason', decision: 'allow' }, + { reason: 'second reason', decision: 'allow' }, + ]; + + const results: HookExecutionResult[] = outputs.map((output) => ({ + hookConfig: { type: HookType.Command, command: 'echo test' }, + eventName: HookEventName.PreToolUse, + success: true, + output, + duration: 100, + })); + + const result = aggregator.aggregateResults( + results, + HookEventName.PreToolUse, + ); + expect(result.finalOutput?.reason).toBe('first reason\nsecond reason'); + }); + + it('should block when any hook blocks', () => { + const outputs: HookOutput[] = [ + { reason: 'allowed', decision: 'allow' }, + { reason: 'blocked', decision: 'block' }, + ]; + + const results: HookExecutionResult[] = outputs.map((output) => ({ + hookConfig: { type: HookType.Command, command: 'echo test' }, + eventName: HookEventName.PreToolUse, + success: true, + output, + duration: 100, + })); + + const result = aggregator.aggregateResults( + results, + HookEventName.PreToolUse, + ); + expect(result.finalOutput?.decision).toBe('block'); + }); + + it('should use last stopReason', () => { + const outputs: HookOutput[] = [ + { continue: false, stopReason: 'first stop' }, + { continue: false, stopReason: 'second stop' }, + ]; + + const results: HookExecutionResult[] = outputs.map((output) => ({ + hookConfig: { type: HookType.Command, command: 'echo test' }, + eventName: HookEventName.Stop, + success: true, + output, + duration: 100, + })); + + const result = aggregator.aggregateResults(results, HookEventName.Stop); + expect(result.finalOutput?.stopReason).toBe('second stop'); + }); + + it('should concatenate additionalContext', () => { + const outputs: HookOutput[] = [ + { hookSpecificOutput: { additionalContext: 'context 1' } }, + { hookSpecificOutput: { additionalContext: 'context 2' } }, + ]; + + const results: HookExecutionResult[] = outputs.map((output) => ({ + hookConfig: { type: HookType.Command, command: 'echo test' }, + eventName: HookEventName.PreToolUse, + success: true, + output, + duration: 100, + })); + + const result = aggregator.aggregateResults( + results, + HookEventName.PreToolUse, + ); + expect( + result.finalOutput?.hookSpecificOutput?.['additionalContext'], + ).toBe('context 1\ncontext 2'); + }); + + it('should preserve other hookSpecificOutput fields', () => { + const outputs: HookOutput[] = [ + { + hookSpecificOutput: { + additionalContext: 'ctx', + tailToolCallRequest: { name: 'A' }, + }, + }, + { hookSpecificOutput: { additionalContext: 'ctx2' } }, + ]; + + const results: HookExecutionResult[] = outputs.map((output) => ({ + hookConfig: { type: HookType.Command, command: 'echo test' }, + eventName: HookEventName.PostToolUse, + success: true, + output, + duration: 100, + })); + + const result = aggregator.aggregateResults( + results, + HookEventName.PostToolUse, + ); + expect( + result.finalOutput?.hookSpecificOutput?.['tailToolCallRequest'], + ).toEqual({ name: 'A' }); + expect( + result.finalOutput?.hookSpecificOutput?.['additionalContext'], + ).toBe('ctx\nctx2'); + }); + }); + + describe('mergePermissionRequestOutputs', () => { + it('should prioritize deny over allow', () => { + const outputs: HookOutput[] = [ + { hookSpecificOutput: { decision: { behavior: 'allow' } } }, + { hookSpecificOutput: { decision: { behavior: 'deny' } } }, + ]; + + const results: HookExecutionResult[] = outputs.map((output) => ({ + hookConfig: { type: HookType.Command, command: 'echo test' }, + eventName: HookEventName.PermissionRequest, + success: true, + output, + duration: 100, + })); + + const result = aggregator.aggregateResults( + results, + HookEventName.PermissionRequest, + ); + + // Use accessor to verify - this ensures output is consumable by PermissionRequestHookOutput + const hookOutput = createHookOutput( + HookEventName.PermissionRequest, + result.finalOutput ?? {}, + ) as PermissionRequestHookOutput; + expect(hookOutput.isPermissionDenied()).toBe(true); + }); + + it('should concatenate messages', () => { + const outputs: HookOutput[] = [ + { + hookSpecificOutput: { + decision: { message: 'msg1', behavior: 'allow' }, + }, + }, + { + hookSpecificOutput: { + decision: { message: 'msg2', behavior: 'allow' }, + }, + }, + ]; + + const results: HookExecutionResult[] = outputs.map((output) => ({ + hookConfig: { type: HookType.Command, command: 'echo test' }, + eventName: HookEventName.PermissionRequest, + success: true, + output, + duration: 100, + })); + + const result = aggregator.aggregateResults( + results, + HookEventName.PermissionRequest, + ); + + const hookOutput = createHookOutput( + HookEventName.PermissionRequest, + result.finalOutput ?? {}, + ) as PermissionRequestHookOutput; + expect(hookOutput.getDenyMessage()).toBe('msg1\nmsg2'); + }); + + it('should use last updatedInput', () => { + const outputs: HookOutput[] = [ + { + hookSpecificOutput: { + decision: { updatedInput: { arg: '1' }, behavior: 'allow' }, + }, + }, + { + hookSpecificOutput: { + decision: { updatedInput: { arg: '2' }, behavior: 'allow' }, + }, + }, + ]; + + const results: HookExecutionResult[] = outputs.map((output) => ({ + hookConfig: { type: HookType.Command, command: 'echo test' }, + eventName: HookEventName.PermissionRequest, + success: true, + output, + duration: 100, + })); + + const result = aggregator.aggregateResults( + results, + HookEventName.PermissionRequest, + ); + + const hookOutput = createHookOutput( + HookEventName.PermissionRequest, + result.finalOutput ?? {}, + ) as PermissionRequestHookOutput; + expect(hookOutput.getUpdatedToolInput()).toEqual({ arg: '2' }); + }); + + it('should concatenate updatedPermissions', () => { + const outputs: HookOutput[] = [ + { + hookSpecificOutput: { + decision: { + updatedPermissions: [{ type: 'read' }], + behavior: 'allow', + }, + }, + }, + { + hookSpecificOutput: { + decision: { + updatedPermissions: [{ type: 'write' }], + behavior: 'allow', + }, + }, + }, + ]; + + const results: HookExecutionResult[] = outputs.map((output) => ({ + hookConfig: { type: HookType.Command, command: 'echo test' }, + eventName: HookEventName.PermissionRequest, + success: true, + output, + duration: 100, + })); + + const result = aggregator.aggregateResults( + results, + HookEventName.PermissionRequest, + ); + + const hookOutput = createHookOutput( + HookEventName.PermissionRequest, + result.finalOutput ?? {}, + ) as PermissionRequestHookOutput; + expect(hookOutput.getUpdatedPermissions()).toEqual([ + { type: 'read' }, + { type: 'write' }, + ]); + }); + + it('should set interrupt true if any hook sets it', () => { + const outputs: HookOutput[] = [ + { + hookSpecificOutput: { + decision: { behavior: 'deny', interrupt: false }, + }, + }, + { + hookSpecificOutput: { + decision: { behavior: 'deny', interrupt: true }, + }, + }, + ]; + + const results: HookExecutionResult[] = outputs.map((output) => ({ + hookConfig: { type: HookType.Command, command: 'echo test' }, + eventName: HookEventName.PermissionRequest, + success: true, + output, + duration: 100, + })); + + const result = aggregator.aggregateResults( + results, + HookEventName.PermissionRequest, + ); + + const hookOutput = createHookOutput( + HookEventName.PermissionRequest, + result.finalOutput ?? {}, + ) as PermissionRequestHookOutput; + expect(hookOutput.shouldInterrupt()).toBe(true); + }); + + it('should produce output consumable by PermissionRequestHookOutput accessors', () => { + const outputs: HookOutput[] = [ + { + hookSpecificOutput: { + decision: { + behavior: 'allow', + message: 'first msg', + updatedInput: { arg: '1' }, + }, + }, + }, + { + hookSpecificOutput: { + decision: { + behavior: 'deny', + message: 'second msg', + updatedInput: { arg: '2' }, + }, + }, + }, + ]; + + const results: HookExecutionResult[] = outputs.map((output) => ({ + hookConfig: { type: HookType.Command, command: 'echo test' }, + eventName: HookEventName.PermissionRequest, + success: true, + output, + duration: 100, + })); + + const result = aggregator.aggregateResults( + results, + HookEventName.PermissionRequest, + ); + + // Verify the output can be consumed by PermissionRequestHookOutput accessors + const hookOutput = createHookOutput( + HookEventName.PermissionRequest, + result.finalOutput ?? {}, + ) as PermissionRequestHookOutput; + + expect(hookOutput.isPermissionDenied()).toBe(true); + expect(hookOutput.getUpdatedToolInput()).toEqual({ arg: '2' }); + expect(hookOutput.getDenyMessage()).toBe('first msg\nsecond msg'); + }); + }); + + describe('mergeSimple (default case)', () => { + it('should use later values for simple fields', () => { + const outputs: HookOutput[] = [ + { reason: 'first', continue: true }, + { reason: 'second', continue: false }, + ]; + + const results: HookExecutionResult[] = outputs.map((output) => ({ + hookConfig: { type: HookType.Command, command: 'echo test' }, + eventName: HookEventName.Notification, + success: true, + output, + duration: 100, + })); + + const result = aggregator.aggregateResults( + results, + HookEventName.Notification, + ); + expect(result.finalOutput?.reason).toBe('second'); + expect(result.finalOutput?.continue).toBe(false); + }); + + it('should concatenate additionalContext from multiple hooks', () => { + const outputs: HookOutput[] = [ + { + hookSpecificOutput: { + additionalContext: 'ctx1', + otherField: 'value1', + }, + }, + { hookSpecificOutput: { additionalContext: 'ctx2' } }, + ]; + + const results: HookExecutionResult[] = outputs.map((output) => ({ + hookConfig: { type: HookType.Command, command: 'echo test' }, + eventName: HookEventName.Notification, + success: true, + output, + duration: 100, + })); + + const result = aggregator.aggregateResults( + results, + HookEventName.Notification, + ); + // mergeSimple concatenates additionalContext with newlines + expect( + result.finalOutput?.hookSpecificOutput?.['additionalContext'], + ).toBe('ctx1\nctx2'); + // otherField is overwritten (later value wins since it's not special-cased) + expect( + result.finalOutput?.hookSpecificOutput?.['otherField'], + ).toBeUndefined(); + }); + }); + + describe('createSpecificHookOutput', () => { + it('should create PreToolUseHookOutput for PreToolUse', () => { + const output: HookOutput = { continue: true }; + const results: HookExecutionResult[] = [ + { + hookConfig: { type: HookType.Command, command: 'echo test' }, + eventName: HookEventName.PreToolUse, + success: true, + output, + duration: 100, + }, + ]; + + const result = aggregator.aggregateResults( + results, + HookEventName.PreToolUse, + ); + // The finalOutput should be an instance of PreToolUseHookOutput + expect(result.finalOutput).toBeDefined(); + expect((result.finalOutput as { continue?: boolean }).continue).toBe( + true, + ); + }); + + it('should create StopHookOutput for Stop', () => { + const output: HookOutput = { stopReason: 'test' }; + const results: HookExecutionResult[] = [ + { + hookConfig: { type: HookType.Command, command: 'echo test' }, + eventName: HookEventName.Stop, + success: true, + output, + duration: 100, + }, + ]; + + const result = aggregator.aggregateResults(results, HookEventName.Stop); + expect(result.finalOutput).toBeDefined(); + expect((result.finalOutput as { stopReason?: string }).stopReason).toBe( + 'test', + ); + }); + + it('should create PermissionRequestHookOutput for PermissionRequest', () => { + const output: HookOutput = { + hookSpecificOutput: { decision: { behavior: 'allow' } }, + }; + const results: HookExecutionResult[] = [ + { + hookConfig: { type: HookType.Command, command: 'echo test' }, + eventName: HookEventName.PermissionRequest, + success: true, + output, + duration: 100, + }, + ]; + + const result = aggregator.aggregateResults( + results, + HookEventName.PermissionRequest, + ); + expect(result.finalOutput).toBeDefined(); + }); + }); + + describe('edge cases', () => { + it('should handle empty outputs array', () => { + const results: HookExecutionResult[] = []; + const result = aggregator.aggregateResults( + results, + HookEventName.PreToolUse, + ); + expect(result.finalOutput).toBeUndefined(); + }); + + it('should handle single output', () => { + const output: HookOutput = { decision: 'allow', reason: 'single' }; + const results: HookExecutionResult[] = [ + { + hookConfig: { type: HookType.Command, command: 'echo test' }, + eventName: HookEventName.PreToolUse, + success: true, + output, + duration: 100, + }, + ]; + + const result = aggregator.aggregateResults( + results, + HookEventName.PreToolUse, + ); + expect(result.finalOutput?.decision).toBe('allow'); + expect(result.finalOutput?.reason).toBe('single'); + }); + + it('should handle outputs without hookSpecificOutput', () => { + const outputs: HookOutput[] = [{ decision: 'allow' }, { reason: 'test' }]; + + const results: HookExecutionResult[] = outputs.map((output) => ({ + hookConfig: { type: HookType.Command, command: 'echo test' }, + eventName: HookEventName.PreToolUse, + success: true, + output, + duration: 100, + })); + + const result = aggregator.aggregateResults( + results, + HookEventName.PreToolUse, + ); + expect(result.finalOutput?.decision).toBe('allow'); + expect(result.finalOutput?.reason).toBe('test'); + }); + + it('should handle decision allow when no block', () => { + const outputs: HookOutput[] = [ + { decision: 'allow' }, + { decision: 'allow' }, + ]; + + const results: HookExecutionResult[] = outputs.map((output) => ({ + hookConfig: { type: HookType.Command, command: 'echo test' }, + eventName: HookEventName.PreToolUse, + success: true, + output, + duration: 100, + })); + + const result = aggregator.aggregateResults( + results, + HookEventName.PreToolUse, + ); + expect(result.finalOutput?.decision).toBe('allow'); + }); + }); +}); diff --git a/packages/core/src/hooks/hookAggregator.ts b/packages/core/src/hooks/hookAggregator.ts new file mode 100644 index 000000000..48af7a2a9 --- /dev/null +++ b/packages/core/src/hooks/hookAggregator.ts @@ -0,0 +1,368 @@ +/** + * @license + * Copyright 2026 Qwen Team + * SPDX-License-Identifier: Apache-2.0 + */ + +import { + HookEventName, + DefaultHookOutput, + PreToolUseHookOutput, + StopHookOutput, + PermissionRequestHookOutput, +} from './types.js'; +import type { HookOutput, HookExecutionResult } from './types.js'; + +/** + * Aggregated result from multiple hook executions + */ +export interface AggregatedHookResult { + success: boolean; + allOutputs: HookOutput[]; + errors: Error[]; + totalDuration: number; + finalOutput?: HookOutput; +} + +/** + * HookAggregator merges multiple hook outputs using event-specific rules. + * + * Different events have different merging strategies: + * - PreToolUse/PostToolUse: OR logic for decisions, concatenation for messages + */ +export class HookAggregator { + /** + * Aggregate results from multiple hook executions + */ + aggregateResults( + results: HookExecutionResult[], + eventName: HookEventName, + ): AggregatedHookResult { + const allOutputs: HookOutput[] = []; + const errors: Error[] = []; + let totalDuration = 0; + + for (const result of results) { + totalDuration += result.duration; + + if (!result.success && result.error) { + errors.push(result.error); + } + + if (result.output) { + allOutputs.push(result.output); + } + } + + const success = errors.length === 0; + const finalOutput = this.mergeOutputs(allOutputs, eventName); + + return { + success, + allOutputs, + errors, + totalDuration, + finalOutput, + }; + } + + /** + * Merge multiple hook outputs based on event type + */ + private mergeOutputs( + outputs: HookOutput[], + eventName: HookEventName, + ): HookOutput | undefined { + if (outputs.length === 0) { + return undefined; + } + + if (outputs.length === 1) { + return this.createSpecificHookOutput(outputs[0], eventName); + } + + let merged: HookOutput; + + switch (eventName) { + case HookEventName.PreToolUse: + case HookEventName.PostToolUse: + case HookEventName.PostToolUseFailure: + case HookEventName.Stop: + merged = this.mergeWithOrLogic(outputs); + break; + case HookEventName.PermissionRequest: + merged = this.mergePermissionRequestOutputs(outputs); + break; + default: + merged = this.mergeSimple(outputs); + } + + return this.createSpecificHookOutput(merged, eventName); + } + + /** + * Merge outputs using OR logic for decisions and concatenation for messages. + * + * Rules: + * - Any "block" or "deny" decision results in blocking (most restrictive wins) + * - Reasons are concatenated with newlines + * - continue=false takes precedence over continue=true + * - Additional context is concatenated + */ + private mergeWithOrLogic(outputs: HookOutput[]): HookOutput { + const merged: HookOutput = {}; + const reasons: string[] = []; + const additionalContexts: string[] = []; + let hasBlock = false; + let hasContinueFalse = false; + let stopReason: string | undefined; + const otherHookSpecificFields: Record = {}; + + for (const output of outputs) { + // Check for blocking decisions + if (output.decision === 'block' || output.decision === 'deny') { + hasBlock = true; + } + + // Collect reasons + if (output.reason) { + reasons.push(output.reason); + } + + // Check continue flag + if (output.continue === false) { + hasContinueFalse = true; + if (output.stopReason) { + stopReason = output.stopReason; + } + } + + // Extract additional context + this.extractAdditionalContext(output, additionalContexts); + + // Collect other hookSpecificOutput fields (later values win) + if (output.hookSpecificOutput) { + for (const [key, value] of Object.entries(output.hookSpecificOutput)) { + if (key !== 'additionalContext') { + otherHookSpecificFields[key] = value; + } + } + } + + // Copy other fields (later values win for simple fields) + if (output.suppressOutput !== undefined) { + merged.suppressOutput = output.suppressOutput; + } + if (output.systemMessage !== undefined) { + merged.systemMessage = output.systemMessage; + } + } + + // Set merged decision + if (hasBlock) { + merged.decision = 'block'; + } else if (outputs.some((o) => o.decision === 'allow')) { + merged.decision = 'allow'; + } + + // Set merged reason + if (reasons.length > 0) { + merged.reason = reasons.join('\n'); + } + + // Set continue flag + if (hasContinueFalse) { + merged.continue = false; + if (stopReason) { + merged.stopReason = stopReason; + } + } + + // Build hookSpecificOutput + const hookSpecificOutput: Record = { + ...otherHookSpecificFields, + }; + if (additionalContexts.length > 0) { + hookSpecificOutput['additionalContext'] = additionalContexts.join('\n'); + } + + if (Object.keys(hookSpecificOutput).length > 0) { + merged.hookSpecificOutput = hookSpecificOutput; + } + + return merged; + } + + /** + * Merge outputs for mergePermissionRequestOutputs events. + * + * Rules: + * - behavior: deny wins over allow (security priority) + * - message: concatenated with newlines + * - updatedInput: later values win + * - updatedPermissions: concatenated + * - interrupt: true wins over false + */ + private mergePermissionRequestOutputs(outputs: HookOutput[]): HookOutput { + const merged: HookOutput = {}; + const messages: string[] = []; + let hasDeny = false; + let hasAllow = false; + let interrupt = false; + let updatedInput: Record | undefined; + const allUpdatedPermissions: Array<{ type: string; tool?: string }> = []; + + for (const output of outputs) { + const specific = output.hookSpecificOutput; + if (!specific) continue; + + const decision = specific['decision'] as + | { + behavior?: string; + message?: string; + updatedInput?: Record; + updatedPermissions?: Array<{ type: string; tool?: string }>; + interrupt?: boolean; + } + | undefined; + + if (!decision) continue; + + // Check behavior + if (decision['behavior'] === 'deny') { + hasDeny = true; + } else if (decision['behavior'] === 'allow') { + hasAllow = true; + } + + // Collect message + if (decision['message']) { + messages.push(decision['message'] as string); + } + + // Check interrupt - true wins + if (decision['interrupt'] === true) { + interrupt = true; + } + + // Collect updatedInput - use last non-empty + if (decision['updatedInput']) { + updatedInput = decision['updatedInput'] as Record; + } + + // Collect updatedPermissions + if (decision['updatedPermissions']) { + allUpdatedPermissions.push( + ...(decision['updatedPermissions'] as Array<{ + type: string; + tool?: string; + }>), + ); + } + + // Copy other fields + if (output.continue !== undefined) { + merged.continue = output.continue; + } + if (output.reason !== undefined) { + merged.reason = output.reason; + } + } + + // Build merged decision + const mergedDecision: Record = {}; + + if (hasDeny) { + mergedDecision['behavior'] = 'deny'; + } else if (hasAllow) { + mergedDecision['behavior'] = 'allow'; + } + + if (messages.length > 0) { + mergedDecision['message'] = messages.join('\n'); + } + + if (interrupt) { + mergedDecision['interrupt'] = true; + } + + if (updatedInput) { + mergedDecision['updatedInput'] = updatedInput; + } + + if (allUpdatedPermissions.length > 0) { + mergedDecision['updatedPermissions'] = allUpdatedPermissions; + } + + merged.hookSpecificOutput = { + ...merged.hookSpecificOutput, + decision: mergedDecision, + }; + + return merged; + } + + /** + * Simple merge for events without special logic + */ + private mergeSimple(outputs: HookOutput[]): HookOutput { + const additionalContexts: string[] = []; + let merged: HookOutput = {}; + + for (const output of outputs) { + // Collect additionalContext for concatenation + this.extractAdditionalContext(output, additionalContexts); + merged = { ...merged, ...output }; + } + + // Merge additionalContext with concatenation + if (additionalContexts.length > 0) { + merged.hookSpecificOutput = { + ...merged.hookSpecificOutput, + additionalContext: additionalContexts.join('\n'), + }; + } + + return merged; + } + + /** + * Create the appropriate specific hook output class based on event type + */ + private createSpecificHookOutput( + output: HookOutput, + eventName: HookEventName, + ): DefaultHookOutput { + switch (eventName) { + case HookEventName.PreToolUse: + return new PreToolUseHookOutput(output); + case HookEventName.Stop: + return new StopHookOutput(output); + case HookEventName.PermissionRequest: + return new PermissionRequestHookOutput(output); + default: + return new DefaultHookOutput(output); + } + } + + /** + * Extract additional context from hook-specific outputs + */ + private extractAdditionalContext( + output: HookOutput, + contexts: string[], + ): void { + const specific = output.hookSpecificOutput; + if (!specific) { + return; + } + + // Extract additionalContext from various hook types + if ( + 'additionalContext' in specific && + typeof specific['additionalContext'] === 'string' + ) { + contexts.push(specific['additionalContext']); + } + } +} diff --git a/packages/core/src/hooks/hookEventHandler.test.ts b/packages/core/src/hooks/hookEventHandler.test.ts new file mode 100644 index 000000000..f556a8c30 --- /dev/null +++ b/packages/core/src/hooks/hookEventHandler.test.ts @@ -0,0 +1,278 @@ +/** + * @license + * Copyright 2026 Qwen Team + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, vi, beforeEach, type Mock } from 'vitest'; +import { HookEventHandler } from './hookEventHandler.js'; +import { HookEventName, HookType, HooksConfigSource } from './types.js'; +import type { Config } from '../config/config.js'; +import type { + HookPlanner, + HookRunner, + HookAggregator, + AggregatedHookResult, +} from './index.js'; +import type { HookConfig, HookOutput } from './types.js'; + +describe('HookEventHandler', () => { + let mockConfig: Config; + let mockHookPlanner: HookPlanner; + let mockHookRunner: HookRunner; + let mockHookAggregator: HookAggregator; + let hookEventHandler: HookEventHandler; + + beforeEach(() => { + mockConfig = { + getSessionId: vi.fn().mockReturnValue('test-session-id'), + getTranscriptPath: vi.fn().mockReturnValue('/test/transcript'), + getWorkingDir: vi.fn().mockReturnValue('/test/cwd'), + } as unknown as Config; + + mockHookPlanner = { + createExecutionPlan: vi.fn(), + } as unknown as HookPlanner; + + mockHookRunner = { + executeHooksSequential: vi.fn(), + executeHooksParallel: vi.fn(), + } as unknown as HookRunner; + + mockHookAggregator = { + aggregateResults: vi.fn(), + } as unknown as HookAggregator; + + hookEventHandler = new HookEventHandler( + mockConfig, + mockHookPlanner, + mockHookRunner, + mockHookAggregator, + ); + }); + + const createMockExecutionPlan = ( + hookConfigs: HookConfig[] = [], + sequential: boolean = false, + ) => ({ + hookConfigs, + sequential, + eventName: HookEventName.PreToolUse, + }); + + const createMockAggregatedResult = ( + success: boolean = true, + finalOutput?: HookOutput, + ): AggregatedHookResult => ({ + success, + allOutputs: [], + errors: [], + totalDuration: 100, + finalOutput, + }); + + describe('fireUserPromptSubmitEvent', () => { + it('should execute hooks for UserPromptSubmit event', async () => { + const mockPlan = createMockExecutionPlan([]); + const mockAggregated = createMockAggregatedResult(true); + + vi.mocked(mockHookPlanner.createExecutionPlan).mockReturnValue(mockPlan); + vi.mocked(mockHookRunner.executeHooksParallel).mockResolvedValue([]); + vi.mocked(mockHookAggregator.aggregateResults).mockReturnValue( + mockAggregated, + ); + + const result = + await hookEventHandler.fireUserPromptSubmitEvent('test prompt'); + + expect(mockHookPlanner.createExecutionPlan).toHaveBeenCalledWith( + HookEventName.UserPromptSubmit, + undefined, + ); + expect(result.success).toBe(true); + }); + + it('should include prompt in the hook input', async () => { + const mockPlan = createMockExecutionPlan([ + { + type: HookType.Command, + command: 'echo test', + source: HooksConfigSource.Project, + }, + ]); + vi.mocked(mockHookPlanner.createExecutionPlan).mockReturnValue(mockPlan); + vi.mocked(mockHookRunner.executeHooksParallel).mockResolvedValue([]); + vi.mocked(mockHookAggregator.aggregateResults).mockReturnValue( + createMockAggregatedResult(true), + ); + + await hookEventHandler.fireUserPromptSubmitEvent('my test prompt'); + + const mockCalls = (mockHookRunner.executeHooksParallel as Mock).mock + .calls; + const input = mockCalls[0][2] as { prompt: string }; + expect(input.prompt).toBe('my test prompt'); + }); + }); + + describe('fireStopEvent', () => { + it('should execute hooks for Stop event', async () => { + const mockPlan = createMockExecutionPlan([]); + const mockAggregated = createMockAggregatedResult(true); + + vi.mocked(mockHookPlanner.createExecutionPlan).mockReturnValue(mockPlan); + vi.mocked(mockHookRunner.executeHooksParallel).mockResolvedValue([]); + vi.mocked(mockHookAggregator.aggregateResults).mockReturnValue( + mockAggregated, + ); + + const result = await hookEventHandler.fireStopEvent(true, 'last message'); + + expect(mockHookPlanner.createExecutionPlan).toHaveBeenCalledWith( + HookEventName.Stop, + undefined, + ); + expect(result.success).toBe(true); + }); + + it('should include stop parameters in hook input', async () => { + const mockPlan = createMockExecutionPlan([ + { + type: HookType.Command, + command: 'echo test', + source: HooksConfigSource.Project, + }, + ]); + vi.mocked(mockHookPlanner.createExecutionPlan).mockReturnValue(mockPlan); + vi.mocked(mockHookRunner.executeHooksParallel).mockResolvedValue([]); + vi.mocked(mockHookAggregator.aggregateResults).mockReturnValue( + createMockAggregatedResult(true), + ); + + await hookEventHandler.fireStopEvent(true, 'last assistant message'); + + const mockCalls = (mockHookRunner.executeHooksParallel as Mock).mock + .calls; + const input = mockCalls[0][2] as { + stop_hook_active: boolean; + last_assistant_message: string; + }; + expect(input.stop_hook_active).toBe(true); + expect(input.last_assistant_message).toBe('last assistant message'); + }); + + it('should handle continue=false in final output', async () => { + const mockPlan = createMockExecutionPlan([]); + vi.mocked(mockHookPlanner.createExecutionPlan).mockReturnValue(mockPlan); + vi.mocked(mockHookRunner.executeHooksParallel).mockResolvedValue([]); + vi.mocked(mockHookAggregator.aggregateResults).mockReturnValue( + createMockAggregatedResult(true, { + continue: false, + stopReason: 'test stop', + }), + ); + + await hookEventHandler.fireStopEvent(); + + expect(true).toBe(true); + }); + + it('should handle missing finalOutput gracefully', async () => { + const mockPlan = createMockExecutionPlan([]); + vi.mocked(mockHookPlanner.createExecutionPlan).mockReturnValue(mockPlan); + vi.mocked(mockHookRunner.executeHooksParallel).mockResolvedValue([]); + vi.mocked(mockHookAggregator.aggregateResults).mockReturnValue( + createMockAggregatedResult(true, undefined), + ); + + const result = await hookEventHandler.fireStopEvent(); + + expect(result.success).toBe(true); + expect(result.finalOutput).toBeUndefined(); + }); + }); + + describe('sequential vs parallel execution', () => { + it('should execute hooks sequentially when plan.sequential is true', async () => { + const mockPlan = createMockExecutionPlan( + [ + { + type: HookType.Command, + command: 'echo test', + source: HooksConfigSource.Project, + }, + ], + true, + ); + + vi.mocked(mockHookPlanner.createExecutionPlan).mockReturnValue(mockPlan); + vi.mocked(mockHookRunner.executeHooksSequential).mockResolvedValue([]); + vi.mocked(mockHookAggregator.aggregateResults).mockReturnValue( + createMockAggregatedResult(true), + ); + + await hookEventHandler.fireUserPromptSubmitEvent('test'); + + expect(mockHookRunner.executeHooksSequential).toHaveBeenCalled(); + expect(mockHookRunner.executeHooksParallel).not.toHaveBeenCalled(); + }); + + it('should execute hooks in parallel when plan.sequential is false', async () => { + const mockPlan = createMockExecutionPlan( + [ + { + type: HookType.Command, + command: 'echo test', + source: HooksConfigSource.Project, + }, + ], + false, + ); + + vi.mocked(mockHookPlanner.createExecutionPlan).mockReturnValue(mockPlan); + vi.mocked(mockHookRunner.executeHooksParallel).mockResolvedValue([]); + vi.mocked(mockHookAggregator.aggregateResults).mockReturnValue( + createMockAggregatedResult(true), + ); + + await hookEventHandler.fireUserPromptSubmitEvent('test'); + + expect(mockHookRunner.executeHooksParallel).toHaveBeenCalled(); + expect(mockHookRunner.executeHooksSequential).not.toHaveBeenCalled(); + }); + }); + + describe('error handling', () => { + it('should return error result when hook execution throws', async () => { + vi.mocked(mockHookPlanner.createExecutionPlan).mockImplementation(() => { + throw new Error('Planner error'); + }); + + const result = await hookEventHandler.fireUserPromptSubmitEvent('test'); + + expect(result.success).toBe(false); + expect(result.errors).toHaveLength(1); + expect(result.errors[0].message).toBe('Planner error'); + }); + + it('should return error result when hook runner throws', async () => { + const mockPlan = createMockExecutionPlan([ + { + type: HookType.Command, + command: 'echo test', + source: HooksConfigSource.Project, + }, + ]); + vi.mocked(mockHookPlanner.createExecutionPlan).mockReturnValue(mockPlan); + vi.mocked(mockHookRunner.executeHooksParallel).mockRejectedValue( + new Error('Runner error'), + ); + + const result = await hookEventHandler.fireUserPromptSubmitEvent('test'); + + expect(result.success).toBe(false); + expect(result.errors).toHaveLength(1); + expect(result.errors[0].message).toBe('Runner error'); + }); + }); +}); diff --git a/packages/core/src/hooks/hookEventHandler.ts b/packages/core/src/hooks/hookEventHandler.ts new file mode 100644 index 000000000..2fd5f2892 --- /dev/null +++ b/packages/core/src/hooks/hookEventHandler.ts @@ -0,0 +1,192 @@ +/** + * @license + * Copyright 2026 Qwen Team + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { Config } from '../config/config.js'; +import type { HookPlanner, HookEventContext } from './hookPlanner.js'; +import type { HookRunner } from './hookRunner.js'; +import type { HookAggregator, AggregatedHookResult } from './hookAggregator.js'; +import { HookEventName } from './types.js'; +import type { + HookConfig, + HookInput, + HookExecutionResult, + UserPromptSubmitInput, + StopInput, +} from './types.js'; +import { createDebugLogger } from '../utils/debugLogger.js'; + +const debugLogger = createDebugLogger('TRUSTED_HOOKS'); + +/** + * Hook event bus that coordinates hook execution across the system + */ +export class HookEventHandler { + private readonly config: Config; + private readonly hookPlanner: HookPlanner; + private readonly hookRunner: HookRunner; + private readonly hookAggregator: HookAggregator; + + constructor( + config: Config, + hookPlanner: HookPlanner, + hookRunner: HookRunner, + hookAggregator: HookAggregator, + ) { + this.config = config; + this.hookPlanner = hookPlanner; + this.hookRunner = hookRunner; + this.hookAggregator = hookAggregator; + } + + /** + * Fire a UserPromptSubmit event + * Called by handleHookExecutionRequest - executes hooks directly + */ + async fireUserPromptSubmitEvent( + prompt: string, + ): Promise { + const input: UserPromptSubmitInput = { + ...this.createBaseInput(HookEventName.UserPromptSubmit), + prompt, + }; + + return this.executeHooks(HookEventName.UserPromptSubmit, input); + } + + /** + * Fire a Stop event + * Called by handleHookExecutionRequest - executes hooks directly + */ + async fireStopEvent( + stopHookActive: boolean = false, + lastAssistantMessage: string = '', + ): Promise { + const input: StopInput = { + ...this.createBaseInput(HookEventName.Stop), + stop_hook_active: stopHookActive, + last_assistant_message: lastAssistantMessage, + }; + + return this.executeHooks(HookEventName.Stop, input); + } + + /** + * Execute hooks for a specific event (direct execution without MessageBus) + * Used as fallback when MessageBus is not available + */ + private async executeHooks( + eventName: HookEventName, + input: HookInput, + context?: HookEventContext, + ): Promise { + try { + // Create execution plan + const plan = this.hookPlanner.createExecutionPlan(eventName, context); + + if (!plan || plan.hookConfigs.length === 0) { + return { + success: true, + allOutputs: [], + errors: [], + totalDuration: 0, + }; + } + + const onHookStart = (_config: HookConfig, _index: number) => { + // Hook start event (telemetry removed) + }; + + const onHookEnd = (_config: HookConfig, _result: HookExecutionResult) => { + // Hook end event (telemetry removed) + }; + + // Execute hooks according to the plan's strategy + const results = plan.sequential + ? await this.hookRunner.executeHooksSequential( + plan.hookConfigs, + eventName, + input, + onHookStart, + onHookEnd, + ) + : await this.hookRunner.executeHooksParallel( + plan.hookConfigs, + eventName, + input, + onHookStart, + onHookEnd, + ); + + // Aggregate results + const aggregated = this.hookAggregator.aggregateResults( + results, + eventName, + ); + + // Process common hook output fields centrally + this.processCommonHookOutputFields(aggregated); + + return aggregated; + } catch (error) { + debugLogger.error(`Hook event bus error for ${eventName}: ${error}`); + + return { + success: false, + allOutputs: [], + errors: [error instanceof Error ? error : new Error(String(error))], + totalDuration: 0, + }; + } + } + + /** + * Create base hook input with common fields + */ + private createBaseInput(eventName: HookEventName): HookInput { + // Get the transcript path from the Config + const transcriptPath = this.config.getTranscriptPath(); + + return { + session_id: this.config.getSessionId(), + transcript_path: transcriptPath, + cwd: this.config.getWorkingDir(), + hook_event_name: eventName, + timestamp: new Date().toISOString(), + }; + } + + /** + * Process common hook output fields centrally + */ + private processCommonHookOutputFields( + aggregated: AggregatedHookResult, + ): void { + if (!aggregated.finalOutput) { + return; + } + + // Handle systemMessage - show to user in transcript mode (not to agent) + const systemMessage = aggregated.finalOutput.systemMessage; + if (systemMessage && !aggregated.finalOutput.suppressOutput) { + debugLogger.warn(`Hook system message: ${systemMessage}`); + } + + // Handle suppressOutput - already handled by not logging above when true + + // Handle continue=false - this should stop the entire agent execution + if (aggregated.finalOutput.continue === false) { + const stopReason = + aggregated.finalOutput.stopReason || + aggregated.finalOutput.reason || + 'No reason provided'; + debugLogger.debug(`Hook requested to stop execution: ${stopReason}`); + + // Note: The actual stopping of execution must be handled by integration points + // as they need to interpret this signal in the context of their specific workflow + // This is just logging the request centrally + } + } +} diff --git a/packages/core/src/hooks/hookPlanner.test.ts b/packages/core/src/hooks/hookPlanner.test.ts new file mode 100644 index 000000000..e3bb99076 --- /dev/null +++ b/packages/core/src/hooks/hookPlanner.test.ts @@ -0,0 +1,366 @@ +/** + * @license + * Copyright 2026 Qwen Team + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import type { HookRegistry, HookRegistryEntry } from './hookRegistry.js'; +import { HookPlanner } from './hookPlanner.js'; +import { HookEventName, HookType, HooksConfigSource } from './types.js'; + +describe('HookPlanner', () => { + let mockRegistry: HookRegistry; + let planner: HookPlanner; + + beforeEach(() => { + mockRegistry = { + getHooksForEvent: vi.fn(), + } as unknown as HookRegistry; + planner = new HookPlanner(mockRegistry); + }); + + describe('createExecutionPlan', () => { + it('should return null when no hooks for event', () => { + vi.mocked(mockRegistry.getHooksForEvent).mockReturnValue([]); + + const result = planner.createExecutionPlan(HookEventName.PreToolUse); + + expect(result).toBeNull(); + }); + + it('should return null when no hooks match context', () => { + const entry: HookRegistryEntry = { + config: { type: HookType.Command, command: 'echo test' }, + source: HooksConfigSource.Project, + eventName: HookEventName.PreToolUse, + matcher: 'bash', + enabled: true, + }; + vi.mocked(mockRegistry.getHooksForEvent).mockReturnValue([entry]); + + const result = planner.createExecutionPlan(HookEventName.PreToolUse, { + toolName: 'glob', + }); + + expect(result).toBeNull(); + }); + + it('should create plan with matching hooks', () => { + const entry: HookRegistryEntry = { + config: { + type: HookType.Command, + command: 'echo test', + name: 'test-hook', + }, + source: HooksConfigSource.Project, + eventName: HookEventName.PreToolUse, + enabled: true, + }; + vi.mocked(mockRegistry.getHooksForEvent).mockReturnValue([entry]); + + const result = planner.createExecutionPlan(HookEventName.PreToolUse); + + expect(result).not.toBeNull(); + expect(result!.eventName).toBe(HookEventName.PreToolUse); + expect(result!.hookConfigs).toHaveLength(1); + expect(result!.sequential).toBe(false); + }); + + it('should set sequential to true when any hook has sequential=true', () => { + const entry: HookRegistryEntry = { + config: { type: HookType.Command, command: 'echo test' }, + source: HooksConfigSource.Project, + eventName: HookEventName.PreToolUse, + sequential: true, + enabled: true, + }; + vi.mocked(mockRegistry.getHooksForEvent).mockReturnValue([entry]); + + const result = planner.createExecutionPlan(HookEventName.PreToolUse); + + expect(result!.sequential).toBe(true); + }); + + it('should deduplicate hooks with same config', () => { + const config = { type: HookType.Command, command: 'echo test' }; + const entry1: HookRegistryEntry = { + config, + source: HooksConfigSource.Project, + eventName: HookEventName.PreToolUse, + enabled: true, + }; + const entry2: HookRegistryEntry = { + config, + source: HooksConfigSource.Project, + eventName: HookEventName.PreToolUse, + enabled: true, + }; + vi.mocked(mockRegistry.getHooksForEvent).mockReturnValue([ + entry1, + entry2, + ]); + + const result = planner.createExecutionPlan(HookEventName.PreToolUse); + + expect(result!.hookConfigs).toHaveLength(1); + }); + }); + + describe('matchesContext', () => { + it('should match all when no matcher', () => { + const entry: HookRegistryEntry = { + config: { type: HookType.Command, command: 'echo test' }, + source: HooksConfigSource.Project, + eventName: HookEventName.PreToolUse, + enabled: true, + }; + vi.mocked(mockRegistry.getHooksForEvent).mockReturnValue([entry]); + + const result = planner.createExecutionPlan(HookEventName.PreToolUse, { + toolName: 'bash', + }); + + expect(result).not.toBeNull(); + }); + + it('should match all when no context', () => { + const entry: HookRegistryEntry = { + config: { type: HookType.Command, command: 'echo test' }, + source: HooksConfigSource.Project, + eventName: HookEventName.PreToolUse, + matcher: 'bash', + enabled: true, + }; + vi.mocked(mockRegistry.getHooksForEvent).mockReturnValue([entry]); + + const result = planner.createExecutionPlan(HookEventName.PreToolUse); + + expect(result).not.toBeNull(); + }); + + it('should match empty string as wildcard', () => { + const entry: HookRegistryEntry = { + config: { type: HookType.Command, command: 'echo test' }, + source: HooksConfigSource.Project, + eventName: HookEventName.PreToolUse, + matcher: '', + enabled: true, + }; + vi.mocked(mockRegistry.getHooksForEvent).mockReturnValue([entry]); + + const result = planner.createExecutionPlan(HookEventName.PreToolUse, { + toolName: 'bash', + }); + + expect(result).not.toBeNull(); + }); + + it('should match asterisk as wildcard', () => { + const entry: HookRegistryEntry = { + config: { type: HookType.Command, command: 'echo test' }, + source: HooksConfigSource.Project, + eventName: HookEventName.PreToolUse, + matcher: '*', + enabled: true, + }; + vi.mocked(mockRegistry.getHooksForEvent).mockReturnValue([entry]); + + const result = planner.createExecutionPlan(HookEventName.PreToolUse, { + toolName: 'bash', + }); + + expect(result).not.toBeNull(); + }); + + it('should match tool name with exact string', () => { + const entry: HookRegistryEntry = { + config: { type: HookType.Command, command: 'echo test' }, + source: HooksConfigSource.Project, + eventName: HookEventName.PreToolUse, + matcher: 'bash', + enabled: true, + }; + vi.mocked(mockRegistry.getHooksForEvent).mockReturnValue([entry]); + + const result = planner.createExecutionPlan(HookEventName.PreToolUse, { + toolName: 'bash', + }); + + expect(result).not.toBeNull(); + }); + + it('should not match tool name with different exact string', () => { + const entry: HookRegistryEntry = { + config: { type: HookType.Command, command: 'echo test' }, + source: HooksConfigSource.Project, + eventName: HookEventName.PreToolUse, + matcher: 'bash', + enabled: true, + }; + vi.mocked(mockRegistry.getHooksForEvent).mockReturnValue([entry]); + + const result = planner.createExecutionPlan(HookEventName.PreToolUse, { + toolName: 'glob', + }); + + expect(result).toBeNull(); + }); + + it('should match tool name with regex', () => { + const entry: HookRegistryEntry = { + config: { type: HookType.Command, command: 'echo test' }, + source: HooksConfigSource.Project, + eventName: HookEventName.PreToolUse, + matcher: '^bash.*', + enabled: true, + }; + vi.mocked(mockRegistry.getHooksForEvent).mockReturnValue([entry]); + + const result = planner.createExecutionPlan(HookEventName.PreToolUse, { + toolName: 'bash', + }); + + expect(result).not.toBeNull(); + }); + + it('should match tool name with regex wildcard', () => { + const entry: HookRegistryEntry = { + config: { type: HookType.Command, command: 'echo test' }, + source: HooksConfigSource.Project, + eventName: HookEventName.PreToolUse, + matcher: '.*', + enabled: true, + }; + vi.mocked(mockRegistry.getHooksForEvent).mockReturnValue([entry]); + + const result = planner.createExecutionPlan(HookEventName.PreToolUse, { + toolName: 'any-tool', + }); + + expect(result).not.toBeNull(); + }); + + it('should match trigger with exact string', () => { + const entry: HookRegistryEntry = { + config: { type: HookType.Command, command: 'echo test' }, + source: HooksConfigSource.Project, + eventName: HookEventName.SessionStart, + matcher: 'user', + enabled: true, + }; + vi.mocked(mockRegistry.getHooksForEvent).mockReturnValue([entry]); + + const result = planner.createExecutionPlan(HookEventName.SessionStart, { + trigger: 'user', + }); + + expect(result).not.toBeNull(); + }); + + it('should not match trigger with different string', () => { + const entry: HookRegistryEntry = { + config: { type: HookType.Command, command: 'echo test' }, + source: HooksConfigSource.Project, + eventName: HookEventName.SessionStart, + matcher: 'user', + enabled: true, + }; + vi.mocked(mockRegistry.getHooksForEvent).mockReturnValue([entry]); + + const result = planner.createExecutionPlan(HookEventName.SessionStart, { + trigger: 'api', + }); + + expect(result).toBeNull(); + }); + + it('should match when context has both toolName and trigger (prefers toolName)', () => { + const entry: HookRegistryEntry = { + config: { type: HookType.Command, command: 'echo test' }, + source: HooksConfigSource.Project, + eventName: HookEventName.PreToolUse, + matcher: 'bash', + enabled: true, + }; + vi.mocked(mockRegistry.getHooksForEvent).mockReturnValue([entry]); + + const result = planner.createExecutionPlan(HookEventName.PreToolUse, { + toolName: 'bash', + trigger: 'api', + }); + + expect(result).not.toBeNull(); + }); + + it('should match with trimmed matcher', () => { + const entry: HookRegistryEntry = { + config: { type: HookType.Command, command: 'echo test' }, + source: HooksConfigSource.Project, + eventName: HookEventName.PreToolUse, + matcher: ' bash ', + enabled: true, + }; + vi.mocked(mockRegistry.getHooksForEvent).mockReturnValue([entry]); + + const result = planner.createExecutionPlan(HookEventName.PreToolUse, { + toolName: 'bash', + }); + + expect(result).not.toBeNull(); + }); + + it('should fallback to exact match when regex is invalid', () => { + const entry: HookRegistryEntry = { + config: { type: HookType.Command, command: 'echo test' }, + source: HooksConfigSource.Project, + eventName: HookEventName.PreToolUse, + matcher: '[invalid(regex', // Invalid regex + enabled: true, + }; + vi.mocked(mockRegistry.getHooksForEvent).mockReturnValue([entry]); + + // Should fallback to exact match - should NOT match 'bash' + const result = planner.createExecutionPlan(HookEventName.PreToolUse, { + toolName: 'bash', + }); + + expect(result).toBeNull(); + }); + + it('should match using fallback exact match when regex is invalid', () => { + const entry: HookRegistryEntry = { + config: { type: HookType.Command, command: 'echo test' }, + source: HooksConfigSource.Project, + eventName: HookEventName.PreToolUse, + matcher: '[invalid(regex', // Invalid regex + enabled: true, + }; + vi.mocked(mockRegistry.getHooksForEvent).mockReturnValue([entry]); + + // Should fallback to exact match - should match '[invalid(regex' + const result = planner.createExecutionPlan(HookEventName.PreToolUse, { + toolName: '[invalid(regex', + }); + + expect(result).not.toBeNull(); + }); + + it('should handle complex invalid regex gracefully', () => { + const entry: HookRegistryEntry = { + config: { type: HookType.Command, command: 'echo test' }, + source: HooksConfigSource.Project, + eventName: HookEventName.PreToolUse, + matcher: '(unclosed', + enabled: true, + }; + vi.mocked(mockRegistry.getHooksForEvent).mockReturnValue([entry]); + + const result = planner.createExecutionPlan(HookEventName.PreToolUse, { + toolName: 'bash', + }); + + expect(result).toBeNull(); + }); + }); +}); diff --git a/packages/core/src/hooks/hookPlanner.ts b/packages/core/src/hooks/hookPlanner.ts new file mode 100644 index 000000000..3eef01543 --- /dev/null +++ b/packages/core/src/hooks/hookPlanner.ts @@ -0,0 +1,146 @@ +/** + * @license + * Copyright 2026 Qwen Team + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { HookRegistry, HookRegistryEntry } from './hookRegistry.js'; +import type { HookExecutionPlan } from './types.js'; +import { getHookKey, type HookEventName } from './types.js'; +import { createDebugLogger } from '../utils/debugLogger.js'; + +const debugLogger = createDebugLogger('TRUSTED_HOOKS'); + +/** + * Hook planner that selects matching hooks and creates execution plans + */ +export class HookPlanner { + private readonly hookRegistry: HookRegistry; + + constructor(hookRegistry: HookRegistry) { + this.hookRegistry = hookRegistry; + } + + /** + * Create execution plan for a hook event + */ + createExecutionPlan( + eventName: HookEventName, + context?: HookEventContext, + ): HookExecutionPlan | null { + const hookEntries = this.hookRegistry.getHooksForEvent(eventName); + + if (hookEntries.length === 0) { + return null; + } + + // Filter hooks by matcher + const matchingEntries = hookEntries.filter((entry) => + this.matchesContext(entry, context), + ); + + if (matchingEntries.length === 0) { + return null; + } + + // Deduplicate identical hooks + const deduplicatedEntries = this.deduplicateHooks(matchingEntries); + + // Extract hook configs + const hookConfigs = deduplicatedEntries.map((entry) => entry.config); + + // Determine execution strategy - if ANY hook definition has sequential=true, run all sequentially + const sequential = deduplicatedEntries.some( + (entry) => entry.sequential === true, + ); + + const plan: HookExecutionPlan = { + eventName, + hookConfigs, + sequential, + }; + + return plan; + } + + /** + * Check if a hook entry matches the given context + */ + private matchesContext( + entry: HookRegistryEntry, + context?: HookEventContext, + ): boolean { + if (!entry.matcher || !context) { + return true; // No matcher means match all + } + + const matcher = entry.matcher.trim(); + + if (matcher === '' || matcher === '*') { + return true; // Empty string or wildcard matches all + } + + // For tool events, match against tool name + if (context.toolName) { + return this.matchesToolName(matcher, context.toolName); + } + + // For other events, match against trigger/source + if (context.trigger) { + return this.matchesTrigger(matcher, context.trigger); + } + + return true; + } + + /** + * Match tool name against matcher pattern + */ + private matchesToolName(matcher: string, toolName: string): boolean { + try { + // Attempt to treat the matcher as a regular expression. + const regex = new RegExp(matcher); + return regex.test(toolName); + } catch (error) { + // If it's not a valid regex, treat it as a literal string for an exact match. + debugLogger.warn( + `Invalid regex in hook matcher "${matcher}" for tool "${toolName}", falling back to exact match: ${error}`, + ); + return matcher === toolName; + } + } + + /** + * Match trigger/source against matcher pattern + */ + private matchesTrigger(matcher: string, trigger: string): boolean { + return matcher === trigger; + } + + /** + * Deduplicate identical hook configurations + */ + private deduplicateHooks(entries: HookRegistryEntry[]): HookRegistryEntry[] { + const seen = new Set(); + const deduplicated: HookRegistryEntry[] = []; + + for (const entry of entries) { + const key = getHookKey(entry.config); + + if (!seen.has(key)) { + seen.add(key); + deduplicated.push(entry); + } + } + + return deduplicated; + } +} + +/** + * Context information for hook event matching + */ +export interface HookEventContext { + toolName?: string; + trigger?: string; +} diff --git a/packages/core/src/hooks/hookRegistry.test.ts b/packages/core/src/hooks/hookRegistry.test.ts new file mode 100644 index 000000000..a9e79f5fa --- /dev/null +++ b/packages/core/src/hooks/hookRegistry.test.ts @@ -0,0 +1,636 @@ +/** + * @license + * Copyright 2026 Qwen Team + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import type { HookRegistryConfig, FeedbackEmitter } from './hookRegistry.js'; +import { HookRegistry } from './hookRegistry.js'; +import { HookEventName, HooksConfigSource, HookType } from './types.js'; +import type { HookConfig } from './types.js'; + +// Mock TrustedHooksManager +vi.mock('./trustedHooks.js', () => ({ + TrustedHooksManager: vi.fn().mockImplementation(() => ({ + getUntrustedHooks: vi.fn().mockReturnValue([]), + trustHooks: vi.fn(), + })), +})); + +describe('HookRegistry', () => { + let mockConfig: HookRegistryConfig; + let mockFeedbackEmitter: FeedbackEmitter; + + beforeEach(() => { + mockConfig = { + getProjectRoot: vi.fn().mockReturnValue('/test/project'), + isTrustedFolder: vi.fn().mockReturnValue(true), + getHooks: vi.fn().mockReturnValue(undefined), + getProjectHooks: vi.fn().mockReturnValue(undefined), + getDisabledHooks: vi.fn().mockReturnValue([]), + getExtensions: vi.fn().mockReturnValue([]), + }; + mockFeedbackEmitter = { + emitFeedback: vi.fn(), + }; + vi.clearAllMocks(); + }); + + describe('initialize', () => { + it('should initialize with empty hooks when no config provided', async () => { + const registry = new HookRegistry(mockConfig); + await registry.initialize(); + expect(registry.getAllHooks()).toHaveLength(0); + }); + + it('should process project hooks from config', async () => { + const hooksConfig = { + [HookEventName.PreToolUse]: [ + { + hooks: [ + { + type: HookType.Command, + command: 'echo test', + name: 'test-hook', + }, + ], + }, + ], + }; + mockConfig.getHooks = vi.fn().mockReturnValue(hooksConfig); + + const registry = new HookRegistry(mockConfig); + await registry.initialize(); + + const allHooks = registry.getAllHooks(); + expect(allHooks).toHaveLength(1); + expect(allHooks[0].eventName).toBe(HookEventName.PreToolUse); + expect(allHooks[0].source).toBe(HooksConfigSource.Project); + }); + + it('should not process project hooks in untrusted folder', async () => { + mockConfig.isTrustedFolder = vi.fn().mockReturnValue(false); + const hooksConfig = { + [HookEventName.PreToolUse]: [ + { + hooks: [{ type: HookType.Command, command: 'echo test' }], + }, + ], + }; + mockConfig.getHooks = vi.fn().mockReturnValue(hooksConfig); + + const registry = new HookRegistry(mockConfig); + await registry.initialize(); + + expect(registry.getAllHooks()).toHaveLength(0); + }); + }); + + describe('getHooksForEvent', () => { + it('should return hooks for specific event', async () => { + const hooksConfig = { + [HookEventName.PreToolUse]: [ + { + hooks: [ + { type: HookType.Command, command: 'echo pre', name: 'pre-hook' }, + ], + }, + ], + [HookEventName.PostToolUse]: [ + { + hooks: [ + { + type: HookType.Command, + command: 'echo post', + name: 'post-hook', + }, + ], + }, + ], + }; + mockConfig.getHooks = vi.fn().mockReturnValue(hooksConfig); + + const registry = new HookRegistry(mockConfig); + await registry.initialize(); + + const preHooks = registry.getHooksForEvent(HookEventName.PreToolUse); + expect(preHooks).toHaveLength(1); + expect(preHooks[0].config.name).toBe('pre-hook'); + + const postHooks = registry.getHooksForEvent(HookEventName.PostToolUse); + expect(postHooks).toHaveLength(1); + expect(postHooks[0].config.name).toBe('post-hook'); + }); + + it('should filter out disabled hooks', async () => { + mockConfig.getDisabledHooks = vi.fn().mockReturnValue(['disabled-hook']); + const hooksConfig = { + [HookEventName.PreToolUse]: [ + { + hooks: [ + { + type: HookType.Command, + command: 'echo enabled', + name: 'enabled-hook', + }, + { + type: HookType.Command, + command: 'echo disabled', + name: 'disabled-hook', + }, + ], + }, + ], + }; + mockConfig.getHooks = vi.fn().mockReturnValue(hooksConfig); + + const registry = new HookRegistry(mockConfig); + await registry.initialize(); + + const hooks = registry.getHooksForEvent(HookEventName.PreToolUse); + expect(hooks).toHaveLength(1); + expect(hooks[0].config.name).toBe('enabled-hook'); + }); + + it('should sort hooks by source priority', async () => { + // This test requires multiple sources, which would need getUserHooks + // For now, we test with extensions which are processed after project hooks + const projectHooks = { + [HookEventName.PreToolUse]: [ + { + hooks: [ + { + type: HookType.Command, + command: 'echo project', + name: 'project-hook', + }, + ], + }, + ], + }; + mockConfig.getHooks = vi.fn().mockReturnValue(projectHooks); + + const registry = new HookRegistry(mockConfig); + await registry.initialize(); + + const hooks = registry.getHooksForEvent(HookEventName.PreToolUse); + expect(hooks).toHaveLength(1); + expect(hooks[0].source).toBe(HooksConfigSource.Project); + }); + }); + + describe('setHookEnabled', () => { + it('should enable a disabled hook', async () => { + mockConfig.getDisabledHooks = vi.fn().mockReturnValue(['test-hook']); + const hooksConfig = { + [HookEventName.PreToolUse]: [ + { + hooks: [ + { + type: HookType.Command, + command: 'echo test', + name: 'test-hook', + }, + ], + }, + ], + }; + mockConfig.getHooks = vi.fn().mockReturnValue(hooksConfig); + + const registry = new HookRegistry(mockConfig); + await registry.initialize(); + + expect(registry.getHooksForEvent(HookEventName.PreToolUse)).toHaveLength( + 0, + ); + + registry.setHookEnabled('test-hook', true); + + const hooks = registry.getHooksForEvent(HookEventName.PreToolUse); + expect(hooks).toHaveLength(1); + expect(hooks[0].enabled).toBe(true); + }); + + it('should disable an enabled hook', async () => { + const hooksConfig = { + [HookEventName.PreToolUse]: [ + { + hooks: [ + { + type: HookType.Command, + command: 'echo test', + name: 'test-hook', + }, + ], + }, + ], + }; + mockConfig.getHooks = vi.fn().mockReturnValue(hooksConfig); + + const registry = new HookRegistry(mockConfig); + await registry.initialize(); + + expect(registry.getHooksForEvent(HookEventName.PreToolUse)).toHaveLength( + 1, + ); + + registry.setHookEnabled('test-hook', false); + + expect(registry.getHooksForEvent(HookEventName.PreToolUse)).toHaveLength( + 0, + ); + }); + + it('should update all hooks with matching name', async () => { + const hooksConfig = { + [HookEventName.PreToolUse]: [ + { + hooks: [ + { type: HookType.Command, command: 'echo 1', name: 'same-name' }, + ], + }, + ], + [HookEventName.PostToolUse]: [ + { + hooks: [ + { type: HookType.Command, command: 'echo 2', name: 'same-name' }, + ], + }, + ], + }; + mockConfig.getHooks = vi.fn().mockReturnValue(hooksConfig); + + const registry = new HookRegistry(mockConfig); + await registry.initialize(); + + expect(registry.getAllHooks()).toHaveLength(2); + expect(registry.getHooksForEvent(HookEventName.PreToolUse)).toHaveLength( + 1, + ); + expect(registry.getHooksForEvent(HookEventName.PostToolUse)).toHaveLength( + 1, + ); + + registry.setHookEnabled('same-name', false); + + expect(registry.getHooksForEvent(HookEventName.PreToolUse)).toHaveLength( + 0, + ); + expect(registry.getHooksForEvent(HookEventName.PostToolUse)).toHaveLength( + 0, + ); + }); + }); + + describe('hook validation', () => { + it('should discard hooks with invalid type', async () => { + const hooksConfig = { + [HookEventName.PreToolUse]: [ + { + hooks: [ + { + type: 'invalid-type', + command: 'echo test', + } as unknown as HookConfig, + ], + }, + ], + }; + mockConfig.getHooks = vi.fn().mockReturnValue(hooksConfig); + + const registry = new HookRegistry(mockConfig); + await registry.initialize(); + + expect(registry.getAllHooks()).toHaveLength(0); + }); + + it('should discard command hooks without command field', async () => { + const hooksConfig = { + [HookEventName.PreToolUse]: [ + { + hooks: [{ type: HookType.Command } as HookConfig], + }, + ], + }; + mockConfig.getHooks = vi.fn().mockReturnValue(hooksConfig); + + const registry = new HookRegistry(mockConfig); + await registry.initialize(); + + expect(registry.getAllHooks()).toHaveLength(0); + }); + + it('should skip invalid event names', async () => { + const hooksConfig = { + InvalidEventName: [ + { + hooks: [{ type: HookType.Command, command: 'echo test' }], + }, + ], + }; + mockConfig.getHooks = vi.fn().mockReturnValue(hooksConfig); + + const registry = new HookRegistry(mockConfig, mockFeedbackEmitter); + await registry.initialize(); + + expect(registry.getAllHooks()).toHaveLength(0); + expect(mockFeedbackEmitter.emitFeedback).toHaveBeenCalledWith( + 'warning', + expect.stringContaining('Invalid hook event name'), + ); + }); + + it('should skip hooks config fields like enabled and disabled', async () => { + const hooksConfig = { + enabled: ['hook1'], + disabled: ['hook2'], + [HookEventName.PreToolUse]: [ + { + hooks: [ + { + type: HookType.Command, + command: 'echo test', + name: 'valid-hook', + }, + ], + }, + ], + }; + mockConfig.getHooks = vi.fn().mockReturnValue(hooksConfig); + + const registry = new HookRegistry(mockConfig); + await registry.initialize(); + + expect(registry.getAllHooks()).toHaveLength(1); + expect(registry.getAllHooks()[0].config.name).toBe('valid-hook'); + }); + }); + + describe('duplicate detection', () => { + it('should skip duplicate hooks with same name+source+event+matcher+sequential', async () => { + const hooksConfig = { + [HookEventName.PreToolUse]: [ + { + matcher: '*.ts', + sequential: true, + hooks: [ + { + type: HookType.Command, + command: 'echo test', + name: 'dup-hook', + }, + { + type: HookType.Command, + command: 'echo test', + name: 'dup-hook', + }, + ], + }, + ], + }; + mockConfig.getHooks = vi.fn().mockReturnValue(hooksConfig); + + const registry = new HookRegistry(mockConfig); + await registry.initialize(); + + expect(registry.getAllHooks()).toHaveLength(1); + }); + + it('should allow hooks with same name but different matcher', async () => { + const hooksConfig = { + [HookEventName.PreToolUse]: [ + { + matcher: '*.ts', + hooks: [ + { type: HookType.Command, command: 'echo ts', name: 'my-hook' }, + ], + }, + { + matcher: '*.js', + hooks: [ + { type: HookType.Command, command: 'echo js', name: 'my-hook' }, + ], + }, + ], + }; + mockConfig.getHooks = vi.fn().mockReturnValue(hooksConfig); + + const registry = new HookRegistry(mockConfig); + await registry.initialize(); + + expect(registry.getAllHooks()).toHaveLength(2); + }); + + it('should allow hooks with same name but different sequential', async () => { + const hooksConfig = { + [HookEventName.PreToolUse]: [ + { + sequential: true, + hooks: [ + { type: HookType.Command, command: 'echo seq', name: 'my-hook' }, + ], + }, + { + sequential: false, + hooks: [ + { type: HookType.Command, command: 'echo par', name: 'my-hook' }, + ], + }, + ], + }; + mockConfig.getHooks = vi.fn().mockReturnValue(hooksConfig); + + const registry = new HookRegistry(mockConfig); + await registry.initialize(); + + expect(registry.getAllHooks()).toHaveLength(2); + }); + }); + + describe('extension hooks', () => { + it('should process hooks from active extensions', async () => { + const extensionHooks = { + [HookEventName.PreToolUse]: [ + { + hooks: [ + { type: HookType.Command, command: 'echo ext', name: 'ext-hook' }, + ], + }, + ], + }; + mockConfig.getExtensions = vi + .fn() + .mockReturnValue([{ isActive: true, hooks: extensionHooks }]); + + const registry = new HookRegistry(mockConfig); + await registry.initialize(); + + const allHooks = registry.getAllHooks(); + expect(allHooks).toHaveLength(1); + expect(allHooks[0].source).toBe(HooksConfigSource.Extensions); + expect(allHooks[0].config.name).toBe('ext-hook'); + }); + + it('should skip hooks from inactive extensions', async () => { + const extensionHooks = { + [HookEventName.PreToolUse]: [ + { + hooks: [{ type: HookType.Command, command: 'echo ext' }], + }, + ], + }; + mockConfig.getExtensions = vi + .fn() + .mockReturnValue([{ isActive: false, hooks: extensionHooks }]); + + const registry = new HookRegistry(mockConfig); + await registry.initialize(); + + expect(registry.getAllHooks()).toHaveLength(0); + }); + + it('should process multiple extensions', async () => { + mockConfig.getExtensions = vi.fn().mockReturnValue([ + { + isActive: true, + hooks: { + [HookEventName.PreToolUse]: [ + { + hooks: [ + { + type: HookType.Command, + command: 'echo ext1', + name: 'ext1-hook', + }, + ], + }, + ], + }, + }, + { + isActive: true, + hooks: { + [HookEventName.PreToolUse]: [ + { + hooks: [ + { + type: HookType.Command, + command: 'echo ext2', + name: 'ext2-hook', + }, + ], + }, + ], + }, + }, + ]); + + const registry = new HookRegistry(mockConfig); + await registry.initialize(); + + expect(registry.getAllHooks()).toHaveLength(2); + }); + }); + + describe('hook metadata', () => { + it('should preserve matcher in registry entry', async () => { + const hooksConfig = { + [HookEventName.PreToolUse]: [ + { + matcher: 'ReadFileTool', + hooks: [ + { + type: HookType.Command, + command: 'echo test', + name: 'matcher-hook', + }, + ], + }, + ], + }; + mockConfig.getHooks = vi.fn().mockReturnValue(hooksConfig); + + const registry = new HookRegistry(mockConfig); + await registry.initialize(); + + const hooks = registry.getAllHooks(); + expect(hooks[0].matcher).toBe('ReadFileTool'); + }); + + it('should preserve sequential flag in registry entry', async () => { + const hooksConfig = { + [HookEventName.PreToolUse]: [ + { + sequential: true, + hooks: [ + { + type: HookType.Command, + command: 'echo test', + name: 'seq-hook', + }, + ], + }, + ], + }; + mockConfig.getHooks = vi.fn().mockReturnValue(hooksConfig); + + const registry = new HookRegistry(mockConfig); + await registry.initialize(); + + const hooks = registry.getAllHooks(); + expect(hooks[0].sequential).toBe(true); + }); + + it('should add source to hook config', async () => { + const hooksConfig = { + [HookEventName.PreToolUse]: [ + { + hooks: [ + { + type: HookType.Command, + command: 'echo test', + name: 'source-hook', + }, + ], + }, + ], + }; + mockConfig.getHooks = vi.fn().mockReturnValue(hooksConfig); + + const registry = new HookRegistry(mockConfig); + await registry.initialize(); + + const hooks = registry.getAllHooks(); + expect(hooks[0].config.source).toBe(HooksConfigSource.Project); + }); + }); + + describe('getAllHooks', () => { + it('should return a copy of entries array', async () => { + const hooksConfig = { + [HookEventName.PreToolUse]: [ + { + hooks: [ + { + type: HookType.Command, + command: 'echo test', + name: 'test-hook', + }, + ], + }, + ], + }; + mockConfig.getHooks = vi.fn().mockReturnValue(hooksConfig); + + const registry = new HookRegistry(mockConfig); + await registry.initialize(); + + const hooks1 = registry.getAllHooks(); + const hooks2 = registry.getAllHooks(); + + expect(hooks1).toEqual(hooks2); + expect(hooks1).not.toBe(hooks2); // Different array reference + }); + }); +}); diff --git a/packages/core/src/hooks/hookRegistry.ts b/packages/core/src/hooks/hookRegistry.ts new file mode 100644 index 000000000..54251c495 --- /dev/null +++ b/packages/core/src/hooks/hookRegistry.ts @@ -0,0 +1,353 @@ +/** + * @license + * Copyright 2026 Qwen Team + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { HookDefinition, HookConfig } from './types.js'; +import { + HookEventName, + HooksConfigSource, + HOOKS_CONFIG_FIELDS, +} from './types.js'; +import { createDebugLogger } from '../utils/debugLogger.js'; +import { TrustedHooksManager } from './trustedHooks.js'; + +const debugLogger = createDebugLogger('HOOK_REGISTRY'); + +/** + * Extension with hooks support + */ +export interface ExtensionWithHooks { + isActive: boolean; + hooks?: { [K in HookEventName]?: HookDefinition[] }; +} + +/** + * Configuration interface for HookRegistry + * This abstracts the Config dependency to make the registry more flexible + */ +export interface HookRegistryConfig { + getProjectRoot(): string; + isTrustedFolder(): boolean; + getHooks(): { [K in HookEventName]?: HookDefinition[] } | undefined; + getProjectHooks(): { [K in HookEventName]?: HookDefinition[] } | undefined; + getDisabledHooks(): string[]; + getExtensions(): ExtensionWithHooks[]; +} + +/** + * Feedback emitter interface for warning/info messages + */ +export interface FeedbackEmitter { + emitFeedback(type: 'warning' | 'info' | 'error', message: string): void; +} + +/** + * Hook registry entry with source information + */ +export interface HookRegistryEntry { + config: HookConfig; + source: HooksConfigSource; + eventName: HookEventName; + matcher?: string; + sequential?: boolean; + enabled: boolean; +} + +/** + * Hook registry that loads and validates hook definitions from multiple sources + */ +export class HookRegistry { + private readonly config: HookRegistryConfig; + private readonly feedbackEmitter?: FeedbackEmitter; + private entries: HookRegistryEntry[] = []; + + constructor(config: HookRegistryConfig, feedbackEmitter?: FeedbackEmitter) { + this.config = config; + this.feedbackEmitter = feedbackEmitter; + } + + /** + * Initialize the registry by processing hooks from config + */ + async initialize(): Promise { + this.entries = []; + this.processHooksFromConfig(); + + debugLogger.debug( + `Hook registry initialized with ${this.entries.length} hook entries`, + ); + } + + /** + * Get all hook entries for a specific event + */ + getHooksForEvent(eventName: HookEventName): HookRegistryEntry[] { + return this.entries + .filter((entry) => entry.eventName === eventName && entry.enabled) + .sort( + (a, b) => + this.getSourcePriority(a.source) - this.getSourcePriority(b.source), + ); + } + + /** + * Get all registered hooks + */ + getAllHooks(): HookRegistryEntry[] { + return [...this.entries]; + } + + /** + * Enable or disable a specific hook + */ + setHookEnabled(hookName: string, enabled: boolean): void { + const updated = this.entries.filter((entry) => { + const name = this.getHookName(entry); + if (name === hookName) { + entry.enabled = enabled; + return true; + } + return false; + }); + + if (updated.length > 0) { + debugLogger.info( + `${enabled ? 'Enabled' : 'Disabled'} ${updated.length} hook(s) matching "${hookName}"`, + ); + } else { + debugLogger.warn(`No hooks found matching "${hookName}"`); + } + } + + /** + * Get hook name for identification and display purposes + */ + private getHookName( + entry: HookRegistryEntry | { config: HookConfig }, + ): string { + return entry.config.name || entry.config.command || 'unknown-command'; + } + + /** + * Check for untrusted project hooks and warn the user + */ + private checkProjectHooksTrust(): void { + const projectHooks = this.config.getProjectHooks(); + if (!projectHooks) return; + + try { + const trustedHooksManager = new TrustedHooksManager(); + const untrusted = trustedHooksManager.getUntrustedHooks( + this.config.getProjectRoot(), + projectHooks, + ); + + if (untrusted.length > 0) { + const message = `WARNING: The following project-level hooks have been detected in this workspace: +${untrusted.map((h: string) => ` - ${h}`).join('\n')} + +These hooks will be executed. If you did not configure these hooks or do not trust this project, +please review the project settings (.qwen/settings.json) and remove them.`; + this.feedbackEmitter?.emitFeedback('warning', message); + + // Trust them so we don't warn again + trustedHooksManager.trustHooks( + this.config.getProjectRoot(), + projectHooks, + ); + } + } catch { + debugLogger.warn('Failed to check project hooks trust'); + } + } + + /** + * Process hooks from the config that was already loaded by the CLI + */ + private processHooksFromConfig(): void { + if (this.config.isTrustedFolder()) { + this.checkProjectHooksTrust(); + } + + // Get hooks from the main config (this comes from the merged settings) + const configHooks = this.config.getHooks(); + if (configHooks) { + if (this.config.isTrustedFolder()) { + this.processHooksConfiguration(configHooks, HooksConfigSource.Project); + } else { + debugLogger.warn( + 'Project hooks disabled because the folder is not trusted.', + ); + } + } + + // Get hooks from extensions + const extensions = this.config.getExtensions() || []; + for (const extension of extensions) { + if (extension.isActive && extension.hooks) { + this.processHooksConfiguration( + extension.hooks, + HooksConfigSource.Extensions, + ); + } + } + } + + /** + * Process hooks configuration and add entries + */ + private processHooksConfiguration( + hooksConfig: { [K in HookEventName]?: HookDefinition[] }, + source: HooksConfigSource, + ): void { + for (const [eventName, definitions] of Object.entries(hooksConfig)) { + if (HOOKS_CONFIG_FIELDS.includes(eventName)) { + continue; + } + + if (!this.isValidEventName(eventName)) { + this.feedbackEmitter?.emitFeedback( + 'warning', + `Invalid hook event name: "${eventName}" from ${source} config. Skipping.`, + ); + continue; + } + + const typedEventName = eventName; + + if (!Array.isArray(definitions)) { + debugLogger.warn( + `Hook definitions for event "${eventName}" from source "${source}" is not an array. Skipping.`, + ); + continue; + } + + for (const definition of definitions) { + this.processHookDefinition(definition, typedEventName, source); + } + } + } + + /** + * Process a single hook definition + */ + private processHookDefinition( + definition: HookDefinition, + eventName: HookEventName, + source: HooksConfigSource, + ): void { + if ( + !definition || + typeof definition !== 'object' || + !Array.isArray(definition.hooks) + ) { + debugLogger.warn( + `Discarding invalid hook definition for ${eventName} from ${source}:`, + definition, + ); + return; + } + + // Get disabled hooks list from settings + const disabledHooks = this.config.getDisabledHooks(); + + for (const hookConfig of definition.hooks) { + if ( + hookConfig && + typeof hookConfig === 'object' && + this.validateHookConfig(hookConfig, eventName, source) + ) { + // Check if this hook is in the disabled list + const hookName = this.getHookName({ config: hookConfig }); + const isDisabled = disabledHooks.includes(hookName); + + // Check for duplicate hooks (same name+command+source+eventName+matcher+sequential) + const isDuplicate = this.entries.some( + (existing) => + existing.eventName === eventName && + existing.source === source && + this.getHookName(existing) === hookName && + existing.matcher === definition.matcher && + existing.sequential === definition.sequential, + ); + if (isDuplicate) { + debugLogger.debug( + `Skipping duplicate hook "${hookName}" for ${eventName} from ${source}`, + ); + continue; + } + + // Add source to hook config + hookConfig.source = source; + + this.entries.push({ + config: hookConfig, + source, + eventName, + matcher: definition.matcher, + sequential: definition.sequential, + enabled: !isDisabled, + }); + } else { + // Invalid hooks are logged and discarded here, they won't reach HookRunner + debugLogger.warn( + `Discarding invalid hook configuration for ${eventName} from ${source}:`, + hookConfig, + ); + } + } + } + + /** + * Validate a hook configuration + */ + private validateHookConfig( + config: HookConfig, + eventName: HookEventName, + source: HooksConfigSource, + ): boolean { + if (!config.type || !['command', 'plugin'].includes(config.type)) { + debugLogger.warn( + `Invalid hook ${eventName} from ${source} type: ${config.type}`, + ); + return false; + } + + if (config.type === 'command' && !config.command) { + debugLogger.warn( + `Command hook ${eventName} from ${source} missing command field`, + ); + return false; + } + + return true; + } + + /** + * Check if an event name is valid + */ + private isValidEventName(eventName: string): eventName is HookEventName { + const validEventNames: string[] = Object.values(HookEventName); + return validEventNames.includes(eventName); + } + + /** + * Get source priority (lower number = higher priority) + */ + private getSourcePriority(source: HooksConfigSource): number { + switch (source) { + case HooksConfigSource.Project: + return 1; + case HooksConfigSource.User: + return 2; + case HooksConfigSource.System: + return 3; + case HooksConfigSource.Extensions: + return 4; + default: + return 999; + } + } +} diff --git a/packages/core/src/hooks/hookRunner.test.ts b/packages/core/src/hooks/hookRunner.test.ts new file mode 100644 index 000000000..6be326ef0 --- /dev/null +++ b/packages/core/src/hooks/hookRunner.test.ts @@ -0,0 +1,684 @@ +/** + * @license + * Copyright 2026 Qwen Team + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { HookRunner } from './hookRunner.js'; +import { HookEventName, HookType, HooksConfigSource } from './types.js'; +import type { HookConfig, HookInput } from './types.js'; + +// Hoisted mock +const mockSpawn = vi.hoisted(() => vi.fn()); + +vi.mock('node:child_process', async () => { + const actual = await vi.importActual('node:child_process'); + return { + ...actual, + spawn: mockSpawn, + }; +}); + +describe('HookRunner', () => { + let hookRunner: HookRunner; + + beforeEach(() => { + hookRunner = new HookRunner(); + vi.clearAllMocks(); + }); + + const createMockInput = (overrides: Partial = {}): HookInput => ({ + session_id: 'test-session', + transcript_path: '/test/transcript', + cwd: '/test', + hook_event_name: 'test-event', + timestamp: '2024-01-01T00:00:00Z', + ...overrides, + }); + + const createMockProcess = ( + exitCode: number = 0, + stdout: string = '', + stderr: string = '', + ) => { + const mockProcess = { + stdin: { + on: vi.fn(), + write: vi.fn(), + end: vi.fn(), + }, + stdout: { + on: vi.fn((event: string, callback: (data: Buffer) => void) => { + if (event === 'data' && stdout) { + setTimeout(() => callback(Buffer.from(stdout)), 0); + } + }), + }, + stderr: { + on: vi.fn((event: string, callback: (data: Buffer) => void) => { + if (event === 'data' && stderr) { + setTimeout(() => callback(Buffer.from(stderr)), 0); + } + }), + }, + on: vi.fn((event: string, callback: (code: number) => void) => { + if (event === 'close') { + setTimeout(() => callback(exitCode), 0); + } + }), + kill: vi.fn(), + }; + return mockProcess; + }; + + describe('executeHook', () => { + it('should return error when hook command is missing', async () => { + const hookConfig: HookConfig = { + type: HookType.Command, + command: '', + source: HooksConfigSource.Project, + }; + const input = createMockInput(); + + const result = await hookRunner.executeHook( + hookConfig, + HookEventName.PreToolUse, + input, + ); + + expect(result.success).toBe(false); + expect(result.error?.message).toBe('Command hook missing command'); + }); + + it('should execute hook and return success for exit code 0', async () => { + const mockProcess = createMockProcess(0, 'hello'); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfig: HookConfig = { + type: HookType.Command, + command: 'echo hello', + source: HooksConfigSource.Project, + }; + const input = createMockInput(); + + const result = await hookRunner.executeHook( + hookConfig, + HookEventName.PreToolUse, + input, + ); + + expect(result.success).toBe(true); + expect(result.stdout).toBe('hello'); + expect(mockSpawn).toHaveBeenCalled(); + }); + + it('should return failure for non-zero exit code', async () => { + const mockProcess = createMockProcess(1, '', 'error'); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfig: HookConfig = { + type: HookType.Command, + command: 'exit 1', + source: HooksConfigSource.Project, + }; + const input = createMockInput(); + + const result = await hookRunner.executeHook( + hookConfig, + HookEventName.PreToolUse, + input, + ); + + expect(result.success).toBe(false); + expect(result.exitCode).toBe(1); + }); + + it('should parse JSON output from stdout', async () => { + const output = JSON.stringify({ + decision: 'allow', + systemMessage: 'test', + }); + const mockProcess = createMockProcess(0, output); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfig: HookConfig = { + type: HookType.Command, + command: 'echo json', + source: HooksConfigSource.Project, + }; + const input = createMockInput(); + + const result = await hookRunner.executeHook( + hookConfig, + HookEventName.PreToolUse, + input, + ); + + expect(result.success).toBe(true); + expect(result.output?.decision).toBe('allow'); + expect(result.output?.systemMessage).toBe('test'); + }); + + it('should convert plain text to allow output on success', async () => { + const mockProcess = createMockProcess(0, 'some text output'); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfig: HookConfig = { + type: HookType.Command, + command: 'echo text', + source: HooksConfigSource.Project, + }; + const input = createMockInput(); + + const result = await hookRunner.executeHook( + hookConfig, + HookEventName.PreToolUse, + input, + ); + + expect(result.success).toBe(true); + expect(result.output?.decision).toBe('allow'); + expect(result.output?.systemMessage).toBe('some text output'); + }); + + it('should convert plain text to deny output on exit code 2', async () => { + const mockProcess = createMockProcess(2, '', 'error message'); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfig: HookConfig = { + type: HookType.Command, + command: 'echo error && exit 2', + source: HooksConfigSource.Project, + }; + const input = createMockInput(); + + const result = await hookRunner.executeHook( + hookConfig, + HookEventName.PreToolUse, + input, + ); + + expect(result.success).toBe(false); + expect(result.output?.decision).toBe('deny'); + expect(result.output?.reason).toBe('error message'); + }); + + it('should ignore stdout on exit code 2 and use stderr only', async () => { + // Exit code 2 should ignore stdout and use stderr as the error message + const mockProcess = createMockProcess( + 2, + 'stdout should be ignored', + 'stderr error message', + ); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfig: HookConfig = { + type: HookType.Command, + command: 'echo stdout && echo stderr >&2 && exit 2', + source: HooksConfigSource.Project, + }; + const input = createMockInput(); + + const result = await hookRunner.executeHook( + hookConfig, + HookEventName.PreToolUse, + input, + ); + + expect(result.success).toBe(false); + expect(result.output?.decision).toBe('deny'); + expect(result.output?.reason).toBe('stderr error message'); + }); + + it('should not parse JSON on exit code 2', async () => { + // Exit code 2 should ignore JSON in stdout + const mockProcess = createMockProcess( + 2, + '{"decision":"allow"}', + 'blocking error', + ); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfig: HookConfig = { + type: HookType.Command, + command: 'echo json && exit 2', + source: HooksConfigSource.Project, + }; + const input = createMockInput(); + + const result = await hookRunner.executeHook( + hookConfig, + HookEventName.PreToolUse, + input, + ); + + // Should NOT parse JSON, should use stderr as reason + expect(result.success).toBe(false); + expect(result.output?.decision).toBe('deny'); + expect(result.output?.reason).toBe('blocking error'); + }); + + it('should handle exit code 1 as non-blocking warning', async () => { + const mockProcess = createMockProcess(1, '', 'warning'); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfig: HookConfig = { + type: HookType.Command, + command: 'exit 1', + source: HooksConfigSource.Project, + }; + const input = createMockInput(); + + const result = await hookRunner.executeHook( + hookConfig, + HookEventName.PreToolUse, + input, + ); + + expect(result.success).toBe(false); + expect(result.output?.decision).toBe('allow'); + expect(result.output?.systemMessage).toBe('Warning: warning'); + }); + + it('should include duration in result', async () => { + const mockProcess = createMockProcess(0, 'test'); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfig: HookConfig = { + type: HookType.Command, + command: 'echo test', + source: HooksConfigSource.Project, + }; + const input = createMockInput(); + + const result = await hookRunner.executeHook( + hookConfig, + HookEventName.PreToolUse, + input, + ); + + expect(result.duration).toBeGreaterThanOrEqual(0); + }); + + it('should handle process error', async () => { + const mockProcess = { + stdin: { on: vi.fn(), write: vi.fn(), end: vi.fn() }, + stdout: { on: vi.fn() }, + stderr: { on: vi.fn() }, + on: vi.fn((event: string, callback: (error: Error) => void) => { + if (event === 'error') { + callback(new Error('spawn error')); + } + }), + kill: vi.fn(), + }; + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfig: HookConfig = { + type: HookType.Command, + command: 'echo test', + source: HooksConfigSource.Project, + }; + const input = createMockInput(); + + const result = await hookRunner.executeHook( + hookConfig, + HookEventName.PreToolUse, + input, + ); + + expect(result.success).toBe(false); + expect(result.error).toBeDefined(); + }); + }); + + describe('executeHooksParallel', () => { + it('should execute multiple hooks in parallel', async () => { + const mockProcess = createMockProcess(0, 'result'); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfigs: HookConfig[] = [ + { + type: HookType.Command, + command: 'echo hook1', + source: HooksConfigSource.Project, + }, + { + type: HookType.Command, + command: 'echo hook2', + source: HooksConfigSource.Project, + }, + ]; + const input = createMockInput(); + + const results = await hookRunner.executeHooksParallel( + hookConfigs, + HookEventName.PreToolUse, + input, + ); + + expect(results).toHaveLength(2); + expect(results[0].success).toBe(true); + expect(results[1].success).toBe(true); + }); + + it('should call onHookStart and onHookEnd callbacks', async () => { + const mockProcess = createMockProcess(0, 'result'); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfigs: HookConfig[] = [ + { + type: HookType.Command, + command: 'echo test', + source: HooksConfigSource.Project, + }, + ]; + const input = createMockInput(); + const onHookStart = vi.fn(); + const onHookEnd = vi.fn(); + + await hookRunner.executeHooksParallel( + hookConfigs, + HookEventName.PreToolUse, + input, + onHookStart, + onHookEnd, + ); + + expect(onHookStart).toHaveBeenCalledTimes(1); + expect(onHookEnd).toHaveBeenCalledTimes(1); + }); + }); + + describe('executeHooksSequential', () => { + it('should execute hooks sequentially', async () => { + const mockProcess = createMockProcess(0, 'result'); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfigs: HookConfig[] = [ + { + type: HookType.Command, + command: 'echo first', + source: HooksConfigSource.Project, + }, + { + type: HookType.Command, + command: 'echo second', + source: HooksConfigSource.Project, + }, + ]; + const input = createMockInput(); + + const results = await hookRunner.executeHooksSequential( + hookConfigs, + HookEventName.PreToolUse, + input, + ); + + expect(results).toHaveLength(2); + expect(results[0].success).toBe(true); + expect(results[1].success).toBe(true); + }); + + it('should call onHookStart and onHookEnd callbacks', async () => { + const mockProcess = createMockProcess(0, 'result'); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfigs: HookConfig[] = [ + { + type: HookType.Command, + command: 'echo test', + source: HooksConfigSource.Project, + }, + ]; + const input = createMockInput(); + const onHookStart = vi.fn(); + const onHookEnd = vi.fn(); + + await hookRunner.executeHooksSequential( + hookConfigs, + HookEventName.PreToolUse, + input, + onHookStart, + onHookEnd, + ); + + expect(onHookStart).toHaveBeenCalledTimes(1); + expect(onHookEnd).toHaveBeenCalledTimes(1); + }); + }); + + describe('output truncation', () => { + it('should truncate stdout when exceeding MAX_OUTPUT_LENGTH', async () => { + // Create a process that outputs more than 1MB of data + const largeOutput = 'x'.repeat(2 * 1024 * 1024); // 2MB + const mockProcess = createMockProcess(0, largeOutput); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfig: HookConfig = { + type: HookType.Command, + command: 'echo large', + source: HooksConfigSource.Project, + }; + const input = createMockInput(); + + const result = await hookRunner.executeHook( + hookConfig, + HookEventName.PreToolUse, + input, + ); + + // stdout should be truncated to MAX_OUTPUT_LENGTH (1MB) + expect(result.stdout?.length).toBeLessThanOrEqual(1024 * 1024); + }); + + it('should truncate stderr when exceeding MAX_OUTPUT_LENGTH', async () => { + const largeOutput = 'x'.repeat(2 * 1024 * 1024); // 2MB + const mockProcess = createMockProcess(0, '', largeOutput); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfig: HookConfig = { + type: HookType.Command, + command: 'echo large', + source: HooksConfigSource.Project, + }; + const input = createMockInput(); + + const result = await hookRunner.executeHook( + hookConfig, + HookEventName.PreToolUse, + input, + ); + + // stderr should be truncated to MAX_OUTPUT_LENGTH (1MB) + expect(result.stderr?.length).toBeLessThanOrEqual(1024 * 1024); + }); + + it('should handle partial truncation gracefully', async () => { + // Output exactly at the limit + const exactOutput = 'x'.repeat(1024 * 1024); // 1MB exactly + const mockProcess = createMockProcess(0, exactOutput); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfig: HookConfig = { + type: HookType.Command, + command: 'echo exact', + source: HooksConfigSource.Project, + }; + const input = createMockInput(); + + const result = await hookRunner.executeHook( + hookConfig, + HookEventName.PreToolUse, + input, + ); + + expect(result.stdout?.length).toBe(1024 * 1024); + }); + }); + + describe('expandCommand', () => { + it('should expand GEMINI_PROJECT_DIR placeholder', async () => { + const mockProcess = createMockProcess(0, 'result'); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfig: HookConfig = { + type: HookType.Command, + command: 'echo $GEMINI_PROJECT_DIR', + source: HooksConfigSource.Project, + }; + const input = createMockInput({ cwd: '/test/project' }); + + await hookRunner.executeHook(hookConfig, HookEventName.PreToolUse, input); + + // Verify spawn was called with expanded command + const spawnCall = mockSpawn.mock.calls[0]; + const command = spawnCall[1][spawnCall[1].length - 1]; // Last arg is the command + expect(command).toContain('/test/project'); + }); + + it('should expand CLAUDE_PROJECT_DIR placeholder for compatibility', async () => { + const mockProcess = createMockProcess(0, 'result'); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfig: HookConfig = { + type: HookType.Command, + command: 'echo $CLAUDE_PROJECT_DIR', + source: HooksConfigSource.Project, + }; + const input = createMockInput({ cwd: '/test/project' }); + + await hookRunner.executeHook(hookConfig, HookEventName.PreToolUse, input); + + const spawnCall = mockSpawn.mock.calls[0]; + const command = spawnCall[1][spawnCall[1].length - 1]; // Last arg is the command + expect(command).toContain('/test/project'); + }); + + it('should not modify command without placeholders', async () => { + const mockProcess = createMockProcess(0, 'result'); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfig: HookConfig = { + type: HookType.Command, + command: 'echo hello', + source: HooksConfigSource.Project, + }; + const input = createMockInput({ cwd: '/test/project' }); + + await hookRunner.executeHook(hookConfig, HookEventName.PreToolUse, input); + + const spawnCall = mockSpawn.mock.calls[0]; + const command = spawnCall[1][spawnCall[1].length - 1]; // Last arg is the command + expect(command).toBe('echo hello'); + }); + }); + + describe('convertPlainTextToHookOutput', () => { + it('should convert plain text to allow output on success', async () => { + const mockProcess = createMockProcess(0, 'plain text response'); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfig: HookConfig = { + type: HookType.Command, + command: 'echo text', + source: HooksConfigSource.Project, + }; + const input = createMockInput(); + + const result = await hookRunner.executeHook( + hookConfig, + HookEventName.PreToolUse, + input, + ); + + expect(result.success).toBe(true); + expect(result.output?.decision).toBe('allow'); + expect(result.output?.systemMessage).toBe('plain text response'); + }); + + it('should convert non-zero exit code to deny output', async () => { + const mockProcess = createMockProcess(3, '', 'error message'); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfig: HookConfig = { + type: HookType.Command, + command: 'exit 3', + source: HooksConfigSource.Project, + }; + const input = createMockInput(); + + const result = await hookRunner.executeHook( + hookConfig, + HookEventName.PreToolUse, + input, + ); + + expect(result.success).toBe(false); + expect(result.output?.decision).toBe('deny'); + expect(result.output?.reason).toBe('error message'); + }); + + it('should use stderr when stdout is empty on success', async () => { + const mockProcess = createMockProcess(0, '', 'stderr output'); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfig: HookConfig = { + type: HookType.Command, + command: 'echo test', + source: HooksConfigSource.Project, + }; + const input = createMockInput(); + + const result = await hookRunner.executeHook( + hookConfig, + HookEventName.PreToolUse, + input, + ); + + expect(result.output?.systemMessage).toBe('stderr output'); + }); + + it('should handle empty output gracefully', async () => { + const mockProcess = createMockProcess(0, '', ''); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfig: HookConfig = { + type: HookType.Command, + command: 'echo test', + source: HooksConfigSource.Project, + }; + const input = createMockInput(); + + const result = await hookRunner.executeHook( + hookConfig, + HookEventName.PreToolUse, + input, + ); + + expect(result.output).toBeUndefined(); + }); + + it('should parse nested JSON strings', async () => { + const nestedJson = JSON.stringify(JSON.stringify({ decision: 'allow' })); + const mockProcess = createMockProcess(0, nestedJson); + mockSpawn.mockImplementation(() => mockProcess); + + const hookConfig: HookConfig = { + type: HookType.Command, + command: 'echo json', + source: HooksConfigSource.Project, + }; + const input = createMockInput(); + + const result = await hookRunner.executeHook( + hookConfig, + HookEventName.PreToolUse, + input, + ); + + expect(result.output?.decision).toBe('allow'); + }); + }); +}); diff --git a/packages/core/src/hooks/hookRunner.ts b/packages/core/src/hooks/hookRunner.ts new file mode 100644 index 000000000..c688e4324 --- /dev/null +++ b/packages/core/src/hooks/hookRunner.ts @@ -0,0 +1,427 @@ +/** + * @license + * Copyright 2026 Qwen Team + * SPDX-License-Identifier: Apache-2.0 + */ + +import { spawn } from 'node:child_process'; +import { HookEventName } from './types.js'; +import type { + HookConfig, + HookInput, + HookOutput, + HookExecutionResult, + PreToolUseInput, + UserPromptSubmitInput, +} from './types.js'; +import { createDebugLogger } from '../utils/debugLogger.js'; +import { + escapeShellArg, + getShellConfiguration, + type ShellType, +} from '../utils/shell-utils.js'; + +const debugLogger = createDebugLogger('TRUSTED_HOOKS'); + +/** + * Default timeout for hook execution (60 seconds) + */ +const DEFAULT_HOOK_TIMEOUT = 60000; + +/** + * Maximum length for stdout/stderr output (1MB) + * Prevents memory issues from unbounded output + */ +const MAX_OUTPUT_LENGTH = 1024 * 1024; + +/** + * Exit code constants for hook execution + */ +const EXIT_CODE_SUCCESS = 0; +const EXIT_CODE_NON_BLOCKING_ERROR = 1; + +/** + * Hook runner that executes command hooks + */ +export class HookRunner { + /** + * Execute a single hook + */ + async executeHook( + hookConfig: HookConfig, + eventName: HookEventName, + input: HookInput, + ): Promise { + const startTime = Date.now(); + + try { + return await this.executeCommandHook( + hookConfig, + eventName, + input, + startTime, + ); + } catch (error) { + const duration = Date.now() - startTime; + const hookId = hookConfig.name || hookConfig.command || 'unknown'; + const errorMessage = `Hook execution failed for event '${eventName}' (hook: ${hookId}): ${error}`; + debugLogger.warn(`Hook execution error (non-fatal): ${errorMessage}`); + + return { + hookConfig, + eventName, + success: false, + error: error instanceof Error ? error : new Error(errorMessage), + duration, + }; + } + } + + /** + * Execute multiple hooks in parallel + */ + async executeHooksParallel( + hookConfigs: HookConfig[], + eventName: HookEventName, + input: HookInput, + onHookStart?: (config: HookConfig, index: number) => void, + onHookEnd?: (config: HookConfig, result: HookExecutionResult) => void, + ): Promise { + const promises = hookConfigs.map(async (config, index) => { + onHookStart?.(config, index); + const result = await this.executeHook(config, eventName, input); + onHookEnd?.(config, result); + return result; + }); + + return Promise.all(promises); + } + + /** + * Execute multiple hooks sequentially + */ + async executeHooksSequential( + hookConfigs: HookConfig[], + eventName: HookEventName, + input: HookInput, + onHookStart?: (config: HookConfig, index: number) => void, + onHookEnd?: (config: HookConfig, result: HookExecutionResult) => void, + ): Promise { + const results: HookExecutionResult[] = []; + let currentInput = input; + + for (let i = 0; i < hookConfigs.length; i++) { + const config = hookConfigs[i]; + onHookStart?.(config, i); + const result = await this.executeHook(config, eventName, currentInput); + onHookEnd?.(config, result); + results.push(result); + + // If the hook succeeded and has output, use it to modify the input for the next hook + if (result.success && result.output) { + currentInput = this.applyHookOutputToInput( + currentInput, + result.output, + eventName, + ); + } + } + + return results; + } + + /** + * Apply hook output to modify input for the next hook in sequential execution + */ + private applyHookOutputToInput( + originalInput: HookInput, + hookOutput: HookOutput, + eventName: HookEventName, + ): HookInput { + // Create a copy of the original input + const modifiedInput = { ...originalInput }; + + // Apply modifications based on hook output and event type + if (hookOutput.hookSpecificOutput) { + switch (eventName) { + case HookEventName.UserPromptSubmit: + if ('additionalContext' in hookOutput.hookSpecificOutput) { + // For UserPromptSubmit, we could modify the prompt with additional context + const additionalContext = + hookOutput.hookSpecificOutput['additionalContext']; + if ( + typeof additionalContext === 'string' && + 'prompt' in modifiedInput + ) { + (modifiedInput as UserPromptSubmitInput).prompt += + '\n\n' + additionalContext; + } + } + break; + + case HookEventName.PreToolUse: + if ('tool_input' in hookOutput.hookSpecificOutput) { + const newToolInput = hookOutput.hookSpecificOutput[ + 'tool_input' + ] as Record; + if (newToolInput && 'tool_input' in modifiedInput) { + (modifiedInput as PreToolUseInput).tool_input = { + ...(modifiedInput as PreToolUseInput).tool_input, + ...newToolInput, + }; + } + } + break; + + default: + // For other events, no special input modification is needed + break; + } + } + + return modifiedInput; + } + + /** + * Execute a command hook + */ + private async executeCommandHook( + hookConfig: HookConfig, + eventName: HookEventName, + input: HookInput, + startTime: number, + ): Promise { + const timeout = hookConfig.timeout ?? DEFAULT_HOOK_TIMEOUT; + + return new Promise((resolve) => { + if (!hookConfig.command) { + const errorMessage = 'Command hook missing command'; + debugLogger.warn( + `Hook configuration error (non-fatal): ${errorMessage}`, + ); + resolve({ + hookConfig, + eventName, + success: false, + error: new Error(errorMessage), + duration: Date.now() - startTime, + }); + return; + } + + let stdout = ''; + let stderr = ''; + let timedOut = false; + + const shellConfig = getShellConfiguration(); + const command = this.expandCommand( + hookConfig.command, + input, + shellConfig.shell, + ); + + const env = { + ...process.env, + GEMINI_PROJECT_DIR: input.cwd, + CLAUDE_PROJECT_DIR: input.cwd, // For compatibility + QWEN_PROJECT_DIR: input.cwd, // For Qwen Code compatibility + ...hookConfig.env, + }; + + const child = spawn( + shellConfig.executable, + [...shellConfig.argsPrefix, command], + { + env, + cwd: input.cwd, + stdio: ['pipe', 'pipe', 'pipe'], + shell: false, + }, + ); + + // Set up timeout + const timeoutHandle = setTimeout(() => { + timedOut = true; + child.kill('SIGTERM'); + + // Force kill after 5 seconds + setTimeout(() => { + if (!child.killed) { + child.kill('SIGKILL'); + } + }, 5000); + }, timeout); + + // Send input to stdin + if (child.stdin) { + child.stdin.on('error', (err: NodeJS.ErrnoException) => { + // Ignore EPIPE errors which happen when the child process closes stdin early + if (err.code !== 'EPIPE') { + debugLogger.debug(`Hook stdin error: ${err}`); + } + }); + + // Wrap write operations in try-catch to handle synchronous EPIPE errors + // that occur when the child process exits before we finish writing + try { + child.stdin.write(JSON.stringify(input)); + child.stdin.end(); + } catch (err) { + // Ignore EPIPE errors which happen when the child process closes stdin early + if (err instanceof Error && 'code' in err && err.code !== 'EPIPE') { + debugLogger.debug(`Hook stdin write error: ${err}`); + } + } + } + + // Collect stdout + child.stdout?.on('data', (data: Buffer) => { + if (stdout.length < MAX_OUTPUT_LENGTH) { + const remaining = MAX_OUTPUT_LENGTH - stdout.length; + stdout += data.slice(0, remaining).toString(); + if (data.length > remaining) { + debugLogger.warn( + `Hook stdout exceeded max length (${MAX_OUTPUT_LENGTH} bytes), truncating`, + ); + } + } + }); + + // Collect stderr + child.stderr?.on('data', (data: Buffer) => { + if (stderr.length < MAX_OUTPUT_LENGTH) { + const remaining = MAX_OUTPUT_LENGTH - stderr.length; + stderr += data.slice(0, remaining).toString(); + if (data.length > remaining) { + debugLogger.warn( + `Hook stderr exceeded max length (${MAX_OUTPUT_LENGTH} bytes), truncating`, + ); + } + } + }); + + // Handle process exit + child.on('close', (exitCode) => { + clearTimeout(timeoutHandle); + const duration = Date.now() - startTime; + + if (timedOut) { + resolve({ + hookConfig, + eventName, + success: false, + error: new Error(`Hook timed out after ${timeout}ms`), + stdout, + stderr, + duration, + }); + return; + } + + // Parse output + // Exit code 2 is a blocking error - ignore stdout, use stderr only + let output: HookOutput | undefined; + const isBlockingError = exitCode === 2; + + // For exit code 2, only use stderr (ignore stdout) + const textToParse = isBlockingError + ? stderr.trim() + : stdout.trim() || stderr.trim(); + + if (textToParse) { + // Only parse JSON on exit 0 + if (!isBlockingError) { + try { + let parsed = JSON.parse(textToParse); + if (typeof parsed === 'string') { + parsed = JSON.parse(parsed); + } + if (parsed && typeof parsed === 'object') { + output = parsed as HookOutput; + } + } catch { + // Not JSON, convert plain text to structured output + output = this.convertPlainTextToHookOutput( + textToParse, + exitCode || EXIT_CODE_SUCCESS, + ); + } + } else { + // Exit code 2: blocking error, use stderr as reason + output = this.convertPlainTextToHookOutput(textToParse, exitCode); + } + } + + resolve({ + hookConfig, + eventName, + success: exitCode === EXIT_CODE_SUCCESS, + output, + stdout, + stderr, + exitCode: exitCode || EXIT_CODE_SUCCESS, + duration, + }); + }); + + // Handle process errors + child.on('error', (error) => { + clearTimeout(timeoutHandle); + const duration = Date.now() - startTime; + + resolve({ + hookConfig, + eventName, + success: false, + error, + stdout, + stderr, + duration, + }); + }); + }); + } + + /** + * Expand command with environment variables and input context + */ + private expandCommand( + command: string, + input: HookInput, + shellType: ShellType, + ): string { + debugLogger.debug(`Expanding hook command: ${command} (cwd: ${input.cwd})`); + const escapedCwd = escapeShellArg(input.cwd, shellType); + return command + .replace(/\$GEMINI_PROJECT_DIR/g, () => escapedCwd) + .replace(/\$CLAUDE_PROJECT_DIR/g, () => escapedCwd); // For compatibility + } + + /** + * Convert plain text output to structured HookOutput + */ + private convertPlainTextToHookOutput( + text: string, + exitCode: number, + ): HookOutput { + if (exitCode === EXIT_CODE_SUCCESS) { + // Success - treat as system message or additional context + return { + decision: 'allow', + systemMessage: text, + }; + } else if (exitCode === EXIT_CODE_NON_BLOCKING_ERROR) { + // Non-blocking error (EXIT_CODE_NON_BLOCKING_ERROR = 1) + return { + decision: 'allow', + systemMessage: `Warning: ${text}`, + }; + } else { + // All other non-zero exit codes (including 2) are blocking + return { + decision: 'deny', + reason: text, + }; + } + } +} diff --git a/packages/core/src/hooks/hookSystem.test.ts b/packages/core/src/hooks/hookSystem.test.ts new file mode 100644 index 000000000..51f2d3050 --- /dev/null +++ b/packages/core/src/hooks/hookSystem.test.ts @@ -0,0 +1,328 @@ +/** + * @license + * Copyright 2026 Qwen Team + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { HookSystem } from './hookSystem.js'; +import { HookRegistry } from './hookRegistry.js'; +import { HookRunner } from './hookRunner.js'; +import { HookAggregator } from './hookAggregator.js'; +import { HookPlanner } from './hookPlanner.js'; +import { HookEventHandler } from './hookEventHandler.js'; +import { + HookType, + HooksConfigSource, + HookEventName, + type HookDecision, +} from './types.js'; +import type { Config } from '../config/config.js'; + +vi.mock('./hookRegistry.js'); +vi.mock('./hookRunner.js'); +vi.mock('./hookAggregator.js'); +vi.mock('./hookPlanner.js'); +vi.mock('./hookEventHandler.js'); + +describe('HookSystem', () => { + let mockConfig: Config; + let mockHookRegistry: HookRegistry; + let mockHookRunner: HookRunner; + let mockHookAggregator: HookAggregator; + let mockHookPlanner: HookPlanner; + let mockHookEventHandler: HookEventHandler; + let hookSystem: HookSystem; + + beforeEach(() => { + mockConfig = { + getSessionId: vi.fn().mockReturnValue('test-session-id'), + getTranscriptPath: vi.fn().mockReturnValue('/test/transcript'), + getWorkingDir: vi.fn().mockReturnValue('/test/cwd'), + } as unknown as Config; + + mockHookRegistry = { + initialize: vi.fn().mockResolvedValue(undefined), + setHookEnabled: vi.fn(), + getAllHooks: vi.fn().mockReturnValue([]), + } as unknown as HookRegistry; + + mockHookRunner = { + executeHooksSequential: vi.fn(), + executeHooksParallel: vi.fn(), + } as unknown as HookRunner; + + mockHookAggregator = { + aggregateResults: vi.fn(), + } as unknown as HookAggregator; + + mockHookPlanner = { + createExecutionPlan: vi.fn(), + } as unknown as HookPlanner; + + mockHookEventHandler = { + fireUserPromptSubmitEvent: vi.fn(), + fireStopEvent: vi.fn(), + } as unknown as HookEventHandler; + + vi.mocked(HookRegistry).mockImplementation(() => mockHookRegistry); + vi.mocked(HookRunner).mockImplementation(() => mockHookRunner); + vi.mocked(HookAggregator).mockImplementation(() => mockHookAggregator); + vi.mocked(HookPlanner).mockImplementation(() => mockHookPlanner); + vi.mocked(HookEventHandler).mockImplementation(() => mockHookEventHandler); + + hookSystem = new HookSystem(mockConfig); + }); + + describe('constructor', () => { + it('should create instance with all dependencies', () => { + expect(HookRegistry).toHaveBeenCalledWith(mockConfig); + expect(HookRunner).toHaveBeenCalled(); + expect(HookAggregator).toHaveBeenCalled(); + expect(HookPlanner).toHaveBeenCalledWith(mockHookRegistry); + expect(HookEventHandler).toHaveBeenCalledWith( + mockConfig, + mockHookPlanner, + mockHookRunner, + mockHookAggregator, + ); + }); + }); + + describe('initialize', () => { + it('should initialize hook registry', async () => { + await hookSystem.initialize(); + + expect(mockHookRegistry.initialize).toHaveBeenCalled(); + }); + }); + + describe('getEventHandler', () => { + it('should return the hook event handler', () => { + const eventHandler = hookSystem.getEventHandler(); + + expect(eventHandler).toBe(mockHookEventHandler); + }); + }); + + describe('getRegistry', () => { + it('should return the hook registry', () => { + const registry = hookSystem.getRegistry(); + + expect(registry).toBe(mockHookRegistry); + }); + }); + + describe('setHookEnabled', () => { + it('should enable a hook', () => { + hookSystem.setHookEnabled('test-hook', true); + + expect(mockHookRegistry.setHookEnabled).toHaveBeenCalledWith( + 'test-hook', + true, + ); + }); + + it('should disable a hook', () => { + hookSystem.setHookEnabled('test-hook', false); + + expect(mockHookRegistry.setHookEnabled).toHaveBeenCalledWith( + 'test-hook', + false, + ); + }); + }); + + describe('getAllHooks', () => { + it('should return all registered hooks', () => { + const mockHooks = [ + { + config: { + type: HookType.Command, + command: 'echo test', + source: HooksConfigSource.Project, + }, + source: HooksConfigSource.Project, + eventName: HookEventName.PreToolUse, + enabled: true, + }, + ]; + vi.mocked(mockHookRegistry.getAllHooks).mockReturnValue(mockHooks); + + const hooks = hookSystem.getAllHooks(); + + expect(hooks).toEqual(mockHooks); + expect(mockHookRegistry.getAllHooks).toHaveBeenCalled(); + }); + }); + + describe('fireStopEvent', () => { + it('should fire stop event and return output', async () => { + const mockResult = { + success: true, + allOutputs: [], + errors: [], + totalDuration: 50, + finalOutput: { + continue: false, + stopReason: 'user_stop', + }, + }; + vi.mocked(mockHookEventHandler.fireStopEvent).mockResolvedValue( + mockResult, + ); + + const result = await hookSystem.fireStopEvent(true, 'last message'); + + expect(mockHookEventHandler.fireStopEvent).toHaveBeenCalledWith( + true, + 'last message', + ); + expect(result).toBeDefined(); + }); + + it('should use default parameters when not provided', async () => { + const mockResult = { + success: true, + allOutputs: [], + errors: [], + totalDuration: 0, + finalOutput: undefined, + }; + vi.mocked(mockHookEventHandler.fireStopEvent).mockResolvedValue( + mockResult, + ); + + await hookSystem.fireStopEvent(); + + expect(mockHookEventHandler.fireStopEvent).toHaveBeenCalledWith( + false, + '', + ); + }); + + it('should return undefined when no final output', async () => { + const mockResult = { + success: true, + allOutputs: [], + errors: [], + totalDuration: 0, + finalOutput: undefined, + }; + vi.mocked(mockHookEventHandler.fireStopEvent).mockResolvedValue( + mockResult, + ); + + const result = await hookSystem.fireStopEvent(); + + expect(result).toBeUndefined(); + }); + }); + + describe('fireUserPromptSubmitEvent', () => { + it('should fire UserPromptSubmit event and return output', async () => { + const mockResult = { + success: true, + allOutputs: [], + errors: [], + totalDuration: 50, + finalOutput: { + continue: true, + decision: 'allow' as HookDecision, + }, + }; + vi.mocked( + mockHookEventHandler.fireUserPromptSubmitEvent, + ).mockResolvedValue(mockResult); + + const result = await hookSystem.fireUserPromptSubmitEvent('test prompt'); + + expect( + mockHookEventHandler.fireUserPromptSubmitEvent, + ).toHaveBeenCalledWith('test prompt'); + expect(result).toBeDefined(); + }); + + it('should pass prompt to event handler', async () => { + const mockResult = { + success: true, + allOutputs: [], + errors: [], + totalDuration: 0, + finalOutput: { + decision: 'allow' as HookDecision, + }, + }; + vi.mocked( + mockHookEventHandler.fireUserPromptSubmitEvent, + ).mockResolvedValue(mockResult); + + await hookSystem.fireUserPromptSubmitEvent('my custom prompt'); + + expect( + mockHookEventHandler.fireUserPromptSubmitEvent, + ).toHaveBeenCalledWith('my custom prompt'); + }); + + it('should return undefined when no final output', async () => { + const mockResult = { + success: true, + allOutputs: [], + errors: [], + totalDuration: 0, + finalOutput: undefined, + }; + vi.mocked( + mockHookEventHandler.fireUserPromptSubmitEvent, + ).mockResolvedValue(mockResult); + + const result = await hookSystem.fireUserPromptSubmitEvent('test'); + + expect(result).toBeUndefined(); + }); + + it('should return DefaultHookOutput with blocking decision', async () => { + const mockResult = { + success: true, + allOutputs: [], + errors: [], + totalDuration: 50, + finalOutput: { + decision: 'block' as HookDecision, + reason: 'Blocked by policy', + }, + }; + vi.mocked( + mockHookEventHandler.fireUserPromptSubmitEvent, + ).mockResolvedValue(mockResult); + + const result = await hookSystem.fireUserPromptSubmitEvent('test'); + + expect(result).toBeDefined(); + expect(result?.isBlockingDecision()).toBe(true); + }); + + it('should return DefaultHookOutput with additional context', async () => { + const mockResult = { + success: true, + allOutputs: [], + errors: [], + totalDuration: 50, + finalOutput: { + decision: 'allow' as HookDecision, + hookSpecificOutput: { + additionalContext: 'Some additional context', + }, + }, + }; + vi.mocked( + mockHookEventHandler.fireUserPromptSubmitEvent, + ).mockResolvedValue(mockResult); + + const result = await hookSystem.fireUserPromptSubmitEvent('test'); + + expect(result).toBeDefined(); + expect(result?.getAdditionalContext()).toBe('Some additional context'); + }); + }); +}); diff --git a/packages/core/src/hooks/hookSystem.ts b/packages/core/src/hooks/hookSystem.ts new file mode 100644 index 000000000..8a40cbd9e --- /dev/null +++ b/packages/core/src/hooks/hookSystem.ts @@ -0,0 +1,103 @@ +/** + * @license + * Copyright 2026 Qwen Team + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { Config } from '../config/config.js'; +import { HookRegistry } from './hookRegistry.js'; +import { HookRunner } from './hookRunner.js'; +import { HookAggregator } from './hookAggregator.js'; +import { HookPlanner } from './hookPlanner.js'; +import { HookEventHandler } from './hookEventHandler.js'; +import type { HookRegistryEntry } from './hookRegistry.js'; +import { createDebugLogger } from '../utils/debugLogger.js'; +import type { DefaultHookOutput } from './types.js'; +import { createHookOutput } from './types.js'; + +const debugLogger = createDebugLogger('TRUSTED_HOOKS'); + +/** + * Main hook system that coordinates all hook-related functionality + */ + +export class HookSystem { + private readonly hookRegistry: HookRegistry; + private readonly hookRunner: HookRunner; + private readonly hookAggregator: HookAggregator; + private readonly hookPlanner: HookPlanner; + private readonly hookEventHandler: HookEventHandler; + + constructor(config: Config) { + // Initialize components + this.hookRegistry = new HookRegistry(config); + this.hookRunner = new HookRunner(); + this.hookAggregator = new HookAggregator(); + this.hookPlanner = new HookPlanner(this.hookRegistry); + this.hookEventHandler = new HookEventHandler( + config, + this.hookPlanner, + this.hookRunner, + this.hookAggregator, + ); + } + + /** + * Initialize the hook system + */ + async initialize(): Promise { + await this.hookRegistry.initialize(); + debugLogger.debug('Hook system initialized successfully'); + } + + /** + * Get the hook event bus for firing events + */ + getEventHandler(): HookEventHandler { + return this.hookEventHandler; + } + + /** + * Get hook registry for management operations + */ + getRegistry(): HookRegistry { + return this.hookRegistry; + } + + /** + * Enable or disable a hook + */ + setHookEnabled(hookName: string, enabled: boolean): void { + this.hookRegistry.setHookEnabled(hookName, enabled); + } + + /** + * Get all registered hooks for display/management + */ + getAllHooks(): HookRegistryEntry[] { + return this.hookRegistry.getAllHooks(); + } + + async fireUserPromptSubmitEvent( + prompt: string, + ): Promise { + const result = + await this.hookEventHandler.fireUserPromptSubmitEvent(prompt); + return result.finalOutput + ? createHookOutput('UserPromptSubmit', result.finalOutput) + : undefined; + } + + async fireStopEvent( + stopHookActive: boolean = false, + lastAssistantMessage: string = '', + ): Promise { + const result = await this.hookEventHandler.fireStopEvent( + stopHookActive, + lastAssistantMessage, + ); + return result.finalOutput + ? createHookOutput('Stop', result.finalOutput) + : undefined; + } +} diff --git a/packages/core/src/hooks/index.ts b/packages/core/src/hooks/index.ts new file mode 100644 index 000000000..779f3b332 --- /dev/null +++ b/packages/core/src/hooks/index.ts @@ -0,0 +1,22 @@ +/** + * @license + * Copyright 2026 Qwen Team + * SPDX-License-Identifier: Apache-2.0 + */ + +// Export types +export * from './types.js'; + +// Export core components +export { HookSystem } from './hookSystem.js'; +export { HookRegistry } from './hookRegistry.js'; +export { HookRunner } from './hookRunner.js'; +export { HookAggregator } from './hookAggregator.js'; +export { HookPlanner } from './hookPlanner.js'; +export { HookEventHandler } from './hookEventHandler.js'; + +// Export interfaces and enums +export type { HookRegistryEntry } from './hookRegistry.js'; +export { HooksConfigSource as ConfigSource } from './types.js'; +export type { AggregatedHookResult } from './hookAggregator.js'; +export type { HookEventContext } from './hookPlanner.js'; diff --git a/packages/core/src/hooks/trustedHooks.ts b/packages/core/src/hooks/trustedHooks.ts new file mode 100644 index 000000000..135fcc5b2 --- /dev/null +++ b/packages/core/src/hooks/trustedHooks.ts @@ -0,0 +1,118 @@ +/** + * @license + * Copyright 2026 Qwen Team + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as fs from 'node:fs'; +import * as path from 'node:path'; +import { Storage } from '../config/storage.js'; +import { + getHookKey, + type HookDefinition, + type HookEventName, +} from './types.js'; +import { createDebugLogger } from '../utils/debugLogger.js'; + +const debugLogger = createDebugLogger('TRUSTED_HOOKS'); + +interface TrustedHooksConfig { + [projectPath: string]: string[]; // Array of trusted hook keys (name:command) +} + +export class TrustedHooksManager { + private configPath: string; + private trustedHooks: TrustedHooksConfig = {}; + + constructor() { + this.configPath = path.join( + Storage.getGlobalQwenDir(), + 'trusted_hooks.json', + ); + this.load(); + } + + private load(): void { + try { + if (fs.existsSync(this.configPath)) { + const content = fs.readFileSync(this.configPath, 'utf-8'); + this.trustedHooks = JSON.parse(content); + } + } catch (error) { + debugLogger.warn('Failed to load trusted hooks config', error); + this.trustedHooks = {}; + } + } + + private save(): void { + try { + const dir = path.dirname(this.configPath); + if (!fs.existsSync(dir)) { + fs.mkdirSync(dir, { recursive: true }); + } + fs.writeFileSync( + this.configPath, + JSON.stringify(this.trustedHooks, null, 2), + ); + } catch (error) { + debugLogger.warn('Failed to save trusted hooks config', error); + } + } + + /** + * Get untrusted hooks for a project + * @param projectPath Absolute path to the project root + * @param hooks The hooks configuration to check + * @returns List of untrusted hook commands/names + */ + getUntrustedHooks( + projectPath: string, + hooks: { [K in HookEventName]?: HookDefinition[] }, + ): string[] { + const trustedKeys = new Set(this.trustedHooks[projectPath] || []); + const untrusted: string[] = []; + + for (const eventName of Object.keys(hooks)) { + const definitions = hooks[eventName as HookEventName]; + if (!Array.isArray(definitions)) continue; + + for (const def of definitions) { + if (!def || !Array.isArray(def.hooks)) continue; + for (const hook of def.hooks) { + const key = getHookKey(hook); + if (!trustedKeys.has(key)) { + // Return friendly name or command + untrusted.push(hook.name || hook.command || 'unknown-hook'); + } + } + } + } + + return Array.from(new Set(untrusted)); // Deduplicate + } + + /** + * Trust all provided hooks for a project + */ + trustHooks( + projectPath: string, + hooks: { [K in HookEventName]?: HookDefinition[] }, + ): void { + const currentTrusted = new Set(this.trustedHooks[projectPath] || []); + + for (const eventName of Object.keys(hooks)) { + const definitions = hooks[eventName as HookEventName]; + if (!Array.isArray(definitions)) continue; + + for (const def of definitions) { + if (!def || !Array.isArray(def.hooks)) continue; + for (const hook of def.hooks) { + currentTrusted.add(getHookKey(hook)); + } + } + } + + this.trustedHooks[projectPath] = Array.from(currentTrusted); + this.save(); + } +} diff --git a/packages/core/src/hooks/types.ts b/packages/core/src/hooks/types.ts new file mode 100644 index 000000000..49ac7a5ef --- /dev/null +++ b/packages/core/src/hooks/types.ts @@ -0,0 +1,678 @@ +/** + * @license + * Copyright 2026 Qwen Team + * SPDX-License-Identifier: Apache-2.0 + */ + +export enum HooksConfigSource { + Project = 'project', + User = 'user', + System = 'system', + Extensions = 'extensions', +} + +/** + * Event names for the hook system + */ +export enum HookEventName { + // PreToolUse - Before tool execution + PreToolUse = 'PreToolUse', + // PostToolUse - After tool execution + PostToolUse = 'PostToolUse', + // PostToolUseFailure - After tool execution fails + PostToolUseFailure = 'PostToolUseFailure', + // Notification - When notifications are sent + Notification = 'Notification', + // UserPromptSubmit - When the user submits a prompt + UserPromptSubmit = 'UserPromptSubmit', + // SessionStart - When a new session is started + SessionStart = 'SessionStart', + // Stop - Right before Claude concludes its response + Stop = 'Stop', + // SubagentStart - When a subagent (Task tool call) is started + SubagentStart = 'SubagentStart', + // SubagentStop - Right before a subagent (Task tool call) concludes its response + SubagentStop = 'SubagentStop', + // PreCompact - Before conversation compaction + PreCompact = 'PreCompact', + // SessionEnd - When a session is ending + SessionEnd = 'SessionEnd', + // When a permission dialog is displayed + PermissionRequest = 'PermissionRequest', +} + +/** + * Fields in the hooks configuration that are not hook event names + */ +export const HOOKS_CONFIG_FIELDS = ['enabled', 'disabled', 'notifications']; + +/** + * Hook configuration entry + */ +export interface CommandHookConfig { + type: HookType.Command; + command: string; + name?: string; + description?: string; + timeout?: number; + source?: HooksConfigSource; + env?: Record; +} + +export type HookConfig = CommandHookConfig; + +/** + * Hook definition with matcher + */ +export interface HookDefinition { + matcher?: string; + sequential?: boolean; + hooks: HookConfig[]; +} + +/** + * Hook implementation types + */ +export enum HookType { + Command = 'command', +} + +/** + * Generate a unique key for a hook configuration + */ +export function getHookKey(hook: HookConfig): string { + const name = hook.name ?? ''; + return name ? `${name}:${hook.command}` : hook.command; +} + +/** + * Decision types for hook outputs + */ +export type HookDecision = 'ask' | 'block' | 'deny' | 'approve' | 'allow'; + +/** + * Base hook input - common fields for all events + */ +export interface HookInput { + session_id: string; + transcript_path: string; + cwd: string; + hook_event_name: string; + timestamp: string; +} + +/** + * Base hook output - common fields for all events + */ +export interface HookOutput { + continue?: boolean; + stopReason?: string; + suppressOutput?: boolean; + systemMessage?: string; + decision?: HookDecision; + reason?: string; + hookSpecificOutput?: Record; +} + +/** + * Factory function to create the appropriate hook output class based on event name + * Returns specialized HookOutput subclasses for events with specific methods + */ +export function createHookOutput( + eventName: string, + data: Partial, +): DefaultHookOutput { + switch (eventName) { + case HookEventName.PreToolUse: + return new PreToolUseHookOutput(data); + case HookEventName.Stop: + return new StopHookOutput(data); + case HookEventName.PermissionRequest: + return new PermissionRequestHookOutput(data); + default: + return new DefaultHookOutput(data); + } +} + +/** + * Default implementation of HookOutput with utility methods + */ +export class DefaultHookOutput implements HookOutput { + continue?: boolean; + stopReason?: string; + suppressOutput?: boolean; + systemMessage?: string; + decision?: HookDecision; + reason?: string; + hookSpecificOutput?: Record; + + constructor(data: Partial = {}) { + this.continue = data.continue; + this.stopReason = data.stopReason; + this.suppressOutput = data.suppressOutput; + this.systemMessage = data.systemMessage; + this.decision = data.decision; + this.reason = data.reason; + this.hookSpecificOutput = data.hookSpecificOutput; + } + + /** + * Check if this output represents a blocking decision + */ + isBlockingDecision(): boolean { + return this.decision === 'block' || this.decision === 'deny'; + } + + /** + * Check if this output requests to stop execution + */ + shouldStopExecution(): boolean { + return this.continue === false; + } + + /** + * Get the effective reason for blocking or stopping + */ + getEffectiveReason(): string { + return this.stopReason || this.reason || 'No reason provided'; + } + + /** + * Get sanitized additional context for adding to responses. + */ + getAdditionalContext(): string | undefined { + if ( + this.hookSpecificOutput && + 'additionalContext' in this.hookSpecificOutput + ) { + const context = this.hookSpecificOutput['additionalContext']; + if (typeof context !== 'string') { + return undefined; + } + + // Sanitize by escaping < and > to prevent tag injection + return context.replace(//g, '>'); + } + return undefined; + } + + /** + * Check if execution should be blocked and return error info + */ + getBlockingError(): { blocked: boolean; reason: string } { + if (this.isBlockingDecision()) { + return { + blocked: true, + reason: this.getEffectiveReason(), + }; + } + return { blocked: false, reason: '' }; + } + + /** + * Check if context clearing was requested by hook. + */ + shouldClearContext(): boolean { + return false; + } +} + +/** + * Specific hook output class for PreToolUse events. + */ +export class PreToolUseHookOutput extends DefaultHookOutput { + /** + * Get modified tool input if provided by hook + */ + getModifiedToolInput(): Record | undefined { + if (this.hookSpecificOutput && 'tool_input' in this.hookSpecificOutput) { + const input = this.hookSpecificOutput['tool_input']; + if ( + typeof input === 'object' && + input !== null && + !Array.isArray(input) + ) { + return input as Record; + } + } + return undefined; + } +} + +/** + * Specific hook output class for Stop events. + */ +export class StopHookOutput extends DefaultHookOutput { + override stopReason?: string; + + constructor(data: Partial = {}) { + super(data); + this.stopReason = data.stopReason; + } + + /** + * Get the stop reason if provided + */ + getStopReason(): string | undefined { + if (!this.stopReason) { + return undefined; + } + return `Stop hook feedback:\n${this.stopReason}`; + } +} + +/** + * Permission suggestion type + */ +export interface PermissionSuggestion { + type: string; + tool?: string; +} + +/** + * Input for PermissionRequest hook events + */ +export interface PermissionRequestInput extends HookInput { + permission_mode: PermissionMode; + tool_name: string; + tool_input: Record; + permission_suggestions?: PermissionSuggestion[]; +} + +/** + * Decision object for PermissionRequest hooks + */ +export interface PermissionRequestDecision { + behavior: 'allow' | 'deny'; + updatedInput?: Record; + updatedPermissions?: PermissionSuggestion[]; + message?: string; + interrupt?: boolean; +} + +/** + * Specific hook output class for PermissionRequest events. + */ +export class PermissionRequestHookOutput extends DefaultHookOutput { + /** + * Get the permission decision if provided by hook + */ + getPermissionDecision(): PermissionRequestDecision | undefined { + if (this.hookSpecificOutput && 'decision' in this.hookSpecificOutput) { + const decision = this.hookSpecificOutput['decision']; + if ( + typeof decision === 'object' && + decision !== null && + !Array.isArray(decision) + ) { + return decision as PermissionRequestDecision; + } + } + return undefined; + } + + /** + * Check if the permission was denied + */ + isPermissionDenied(): boolean { + const decision = this.getPermissionDecision(); + return decision?.behavior === 'deny'; + } + + /** + * Get the deny message if permission was denied + */ + getDenyMessage(): string | undefined { + const decision = this.getPermissionDecision(); + return decision?.message; + } + + /** + * Check if execution should be interrupted after denial + */ + shouldInterrupt(): boolean { + const decision = this.getPermissionDecision(); + return decision?.interrupt === true; + } + + /** + * Get updated tool input if permission was allowed with modifications + */ + getUpdatedToolInput(): Record | undefined { + const decision = this.getPermissionDecision(); + return decision?.updatedInput; + } + + /** + * Get updated permissions if permission was allowed with permission updates + */ + getUpdatedPermissions(): PermissionSuggestion[] | undefined { + const decision = this.getPermissionDecision(); + return decision?.updatedPermissions; + } +} + +/** + * Context for MCP tool executions. + * Contains non-sensitive connection information about the MCP server + * identity. Since server_name is user controlled and arbitrary, we + * also include connection information (e.g., command or url) to + * help identify the MCP server. + * + * NOTE: In the future, consider defining a shared sanitized interface + * from MCPServerConfig to avoid duplication and ensure consistency. + */ +export interface McpToolContext { + server_name: string; + tool_name: string; // Original tool name from the MCP server + + // Connection info (mutually exclusive based on transport type) + command?: string; // For stdio transport + args?: string[]; // For stdio transport + cwd?: string; // For stdio transport + + url?: string; // For SSE/HTTP transport + + tcp?: string; // For WebSocket transport +} + +export interface PreToolUseInput extends HookInput { + permission_mode?: PermissionMode; + tool_name: string; + tool_input: Record; + mcp_context?: McpToolContext; + original_request_name?: string; +} + +/** + * PreToolUse hook output + */ +export interface PreToolUseOutput extends HookOutput { + hookSpecificOutput?: { + hookEventName: 'PreToolUse'; + tool_input?: Record; + }; +} + +/** + * PostToolUse hook input + */ +export interface PostToolUseInput extends HookInput { + tool_name: string; + tool_input: Record; + tool_response: Record; + mcp_context?: McpToolContext; + original_request_name?: string; +} + +/** + * PostToolUse hook output + */ +export interface PostToolUseOutput extends HookOutput { + hookSpecificOutput?: { + hookEventName: 'PostToolUse'; + additionalContext?: string; + + /** + * Optional request to execute another tool immediately after this one. + * The result of this tail call will replace the original tool's response. + */ + tailToolCallRequest?: { + name: string; + args: Record; + }; + }; +} + +/** + * PostToolUseFailure hook input + * Fired when a tool execution fails + */ +export interface PostToolUseFailureInput extends HookInput { + tool_use_id: string; // Unique identifier for the tool use + tool_name: string; + tool_input: Record; + error: string; // Error message describing the failure + error_type?: string; // Type of error (e.g., 'timeout', 'network', 'permission', etc.) + is_interrupt?: boolean; // Whether the failure was caused by user interruption +} + +/** + * PostToolUseFailure hook output + * Supports all three hook types: command, prompt, and agent + */ +export interface PostToolUseFailureOutput extends HookOutput { + hookSpecificOutput?: { + hookEventName: 'PostToolUseFailure'; + additionalContext?: string; + }; +} + +/** + * UserPromptSubmit hook input + */ +export interface UserPromptSubmitInput extends HookInput { + prompt: string; +} + +/** + * UserPromptSubmit hook output + */ +export interface UserPromptSubmitOutput extends HookOutput { + hookSpecificOutput?: { + hookEventName: 'UserPromptSubmit'; + additionalContext?: string; + }; +} + +/** + * Notification types + */ +export enum NotificationType { + ToolPermission = 'ToolPermission', +} + +/** + * Notification hook input + */ +export interface NotificationInput extends HookInput { + permission_mode?: PermissionMode; + notification_type: NotificationType; + message: string; + title?: string; + details: Record; +} + +/** + * Notification hook output + */ +export interface NotificationOutput extends HookOutput { + hookSpecificOutput?: { + hookEventName: 'Notification'; + additionalContext?: string; + }; +} + +/** + * Stop hook input + */ +export interface StopInput extends HookInput { + stop_hook_active: boolean; + last_assistant_message: string; +} + +/** + * Stop hook output + */ +export interface StopOutput extends HookOutput { + hookSpecificOutput?: { + hookEventName: 'Stop'; + additionalContext?: string; + }; +} + +/** + * SessionStart source types + */ +export enum SessionStartSource { + Startup = 'startup', + Resume = 'resume', + Clear = 'clear', + Compact = 'compact', +} + +export enum PermissionMode { + Default = 'default', + Plan = 'plan', + AcceptEdit = 'accept_edit', + DontAsk = 'dont_ask', + BypassPermissions = 'bypass_permissions', +} + +/** + * SessionStart hook input + */ +export interface SessionStartInput extends HookInput { + permission_mode?: PermissionMode; + source: SessionStartSource; + model?: string; +} + +/** + * SessionStart hook output + */ +export interface SessionStartOutput extends HookOutput { + hookSpecificOutput?: { + hookEventName: 'SessionStart'; + additionalContext?: string; + }; +} + +/** + * SessionEnd reason types + */ +export enum SessionEndReason { + Clear = 'clear', + Logout = 'logout', + PromptInputExit = 'prompt_input_exit', + Bypass_permissions_disabled = 'bypass_permissions_disabled', + Other = 'other', +} + +/** + * SessionEnd hook input + */ +export interface SessionEndInput extends HookInput { + reason: SessionEndReason; +} + +/** + * SessionEnd hook output + */ +export interface SessionEndOutput extends HookOutput { + hookSpecificOutput?: { + hookEventName: 'SessionEnd'; + additionalContext?: string; + }; +} + +/** + * PreCompress trigger types + */ +export enum PreCompactTrigger { + Manual = 'manual', + Auto = 'auto', +} + +/** + * PreCompress hook input + */ +export interface PreCompactInput extends HookInput { + trigger: PreCompactTrigger; + custom_instructions?: string; +} + +/** + * PreCompress hook output + */ +export interface PreCompactOutput extends HookOutput { + hookSpecificOutput?: { + hookEventName: 'PreCompact'; + additionalContext?: string; + }; +} + +export enum AgentType { + Bash = 'Bash', + Explorer = 'Explorer', + Plan = 'Plan', + Custom = 'Custom', +} + +/** + * SubagentStart hook input + * Fired when a subagent (Task tool call) is started + */ +export interface SubagentStartInput extends HookInput { + permission_mode?: PermissionMode; + agent_id: string; + agent_type: AgentType; +} + +/** + * SubagentStart hook output + */ +export interface SubagentStartOutput extends HookOutput { + hookSpecificOutput?: { + hookEventName: 'SubagentStart'; + additionalContext?: string; + }; +} + +/** + * SubagentStop hook input + * Fired right before a subagent (Task tool call) concludes its response + */ +export interface SubagentStopInput extends HookInput { + permission_mode?: PermissionMode; + stop_hook_active: boolean; + agent_id: string; + agent_type: AgentType; + agent_transcript_path: string; + last_assistant_message: string; +} + +/** + * SubagentStop hook output + * Supports all three hook types: command, prompt, and agent + */ +export interface SubagentStopOutput extends HookOutput { + hookSpecificOutput?: { + hookEventName: 'SubagentStop'; + additionalContext?: string; + }; +} + +/** + * Hook execution result + */ +export interface HookExecutionResult { + hookConfig: HookConfig; + eventName: HookEventName; + success: boolean; + output?: HookOutput; + stdout?: string; + stderr?: string; + exitCode?: number; + duration: number; + error?: Error; +} + +/** + * Hook execution plan for an event + */ +export interface HookExecutionPlan { + eventName: HookEventName; + hookConfigs: HookConfig[]; + sequential: boolean; +} diff --git a/packages/core/src/ide/ide-client.test.ts b/packages/core/src/ide/ide-client.test.ts index 72f780896..88788fc57 100644 --- a/packages/core/src/ide/ide-client.test.ts +++ b/packages/core/src/ide/ide-client.test.ts @@ -14,8 +14,15 @@ import { type Mocked, type Mock, } from 'vitest'; -import { IdeClient, IDEConnectionStatus } from './ide-client.js'; +import { + IdeClient, + IDEConnectionStatus, + getIdeServerHost, + _resetCachedIdeServerHost, +} from './ide-client.js'; import * as fs from 'node:fs'; +import type { FileHandle } from 'node:fs/promises'; +import * as dns from 'node:dns'; import { getIdeProcessInfo } from './process-utils.js'; import { Client } from '@modelcontextprotocol/sdk/client/index.js'; import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js'; @@ -35,7 +42,17 @@ vi.mock('node:fs', async (importOriginal) => { stat: vi.fn(), }, realpathSync: (p: string) => p, - existsSync: () => false, + existsSync: vi.fn().mockReturnValue(false), + }; +}); +vi.mock('node:dns', async (importOriginal) => { + const actual = await importOriginal(); + return { + ...(actual as object), + promises: { + ...actual.promises, + lookup: vi.fn(), + }, }; }); vi.mock('./process-utils.js'); @@ -51,9 +68,13 @@ describe('IdeClient', () => { let mockStdioTransport: Mocked; beforeEach(async () => { - // Reset singleton instance for test isolation - (IdeClient as unknown as { instance: IdeClient | undefined }).instance = - undefined; + // Reset singleton instance and cached host for test isolation + ( + IdeClient as unknown as { + instancePromise: Promise | null; + } + ).instancePromise = null; + _resetCachedIdeServerHost(); // Mock environment variables process.env['QWEN_CODE_IDE_WORKSPACE_PATH'] = '/test/workspace'; @@ -94,6 +115,7 @@ describe('IdeClient', () => { }); afterEach(() => { + vi.useRealTimers(); vi.restoreAllMocks(); }); @@ -183,6 +205,49 @@ describe('IdeClient', () => { ); }); + it('should fall back to host.docker.internal when localhost fails in container', async () => { + process.env['QWEN_CODE_IDE_SERVER_PORT'] = '9090'; + vi.mocked(fs.promises.readFile).mockRejectedValue( + new Error('File not found'), + ); + ( + vi.mocked(fs.promises.readdir) as Mock< + (path: fs.PathLike) => Promise + > + ).mockResolvedValue([]); + vi.mocked(fs.existsSync).mockImplementation( + (filePath: fs.PathLike) => filePath === '/.dockerenv', + ); + (dns.promises.lookup as unknown as Mock).mockResolvedValue({ + address: '192.168.65.254', + family: 4, + }); + mockClient.connect + .mockRejectedValueOnce(new Error('localhost unreachable')) + .mockResolvedValueOnce(undefined); + + const ideClient = await IdeClient.getInstance(); + await ideClient.connect(); + + // Localhost is always tried first. + expect(StreamableHTTPClientTransport).toHaveBeenNthCalledWith( + 1, + new URL('http://127.0.0.1:9090/mcp'), + expect.any(Object), + ); + // In a container, host.docker.internal is used as fallback. + expect(StreamableHTTPClientTransport).toHaveBeenNthCalledWith( + 2, + new URL('http://host.docker.internal:9090/mcp'), + expect.any(Object), + ); + expect(ideClient.getConnectionStatus().status).toBe( + IDEConnectionStatus.Connected, + ); + + delete process.env['QWEN_CODE_IDE_SERVER_PORT']; + }); + it('should connect using stdio when stdio config is in environment variables', async () => { vi.mocked(fs.promises.readFile).mockRejectedValue( new Error('File not found'), @@ -358,6 +423,107 @@ describe('IdeClient', () => { expect(result).toEqual(config); delete process.env['QWEN_CODE_IDE_SERVER_PORT']; }); + + it('should scan IDE lock directory when env and legacy config are unavailable', async () => { + const latestConfig = { + port: '2000', + workspacePath: '/test/workspace', + }; + + vi.mocked(fs.promises.readFile).mockImplementation( + async (filePath: fs.PathLike | FileHandle) => { + const file = String(filePath); + if (file === path.join('/tmp', 'qwen-code-ide-server-12345.json')) { + throw new Error('not found'); + } + if (file === path.join('/home/test', '.qwen', 'ide', '1000.lock')) { + return JSON.stringify({ + port: '1000', + workspacePath: '/older/workspace', + }); + } + if (file === path.join('/home/test', '.qwen', 'ide', '2000.lock')) { + return JSON.stringify(latestConfig); + } + throw new Error(`unexpected path: ${file}`); + }, + ); + ( + vi.mocked(fs.promises.readdir) as Mock< + (path: fs.PathLike) => Promise + > + ).mockResolvedValue(['1000.lock', '2000.lock']); + ( + vi.mocked(fs.promises.stat) as Mock< + (path: fs.PathLike) => Promise + > + ).mockImplementation(async (filePath: fs.PathLike) => { + const file = String(filePath); + return { + mtimeMs: file.endsWith('2000.lock') ? 2000 : 1000, + } as fs.Stats; + }); + + const ideClient = await IdeClient.getInstance(); + const result = await ( + ideClient as unknown as { + getConnectionConfigFromFile: () => Promise; + } + ).getConnectionConfigFromFile(); + + expect(result).toEqual(latestConfig); + expect(fs.promises.readdir).toHaveBeenCalledWith( + path.join('/home/test', '.qwen', 'ide'), + ); + }); + + it('should return undefined when scanned lock files do not match current workspace', async () => { + vi.mocked(fs.promises.readFile).mockImplementation( + async (filePath: fs.PathLike | FileHandle) => { + const file = String(filePath); + if (file === path.join('/tmp', 'qwen-code-ide-server-12345.json')) { + throw new Error('not found'); + } + if (file === path.join('/home/test', '.qwen', 'ide', '1000.lock')) { + return JSON.stringify({ + port: '1000', + workspacePath: '/another/workspace', + }); + } + if (file === path.join('/home/test', '.qwen', 'ide', '2000.lock')) { + return JSON.stringify({ + port: '2000', + workspacePath: '/yet/another/workspace', + }); + } + throw new Error(`unexpected path: ${file}`); + }, + ); + ( + vi.mocked(fs.promises.readdir) as Mock< + (path: fs.PathLike) => Promise + > + ).mockResolvedValue(['1000.lock', '2000.lock']); + ( + vi.mocked(fs.promises.stat) as Mock< + (path: fs.PathLike) => Promise + > + ).mockImplementation(async (filePath: fs.PathLike) => { + const file = String(filePath); + return { + mtimeMs: file.endsWith('2000.lock') ? 2000 : 1000, + } as fs.Stats; + }); + + const ideClient = await IdeClient.getInstance(); + const result = await ( + ideClient as unknown as { + getConnectionConfigFromFile: () => Promise; + } + ).getConnectionConfigFromFile(); + + expect(result).toBeUndefined(); + }); }); describe('isDiffingEnabled', () => { @@ -479,3 +645,120 @@ describe('IdeClient', () => { }); }); }); + +describe('getIdeServerHost', () => { + const dnsLookupMock = dns.promises.lookup as unknown as Mock; + + function mockDnsResolvable(reachable: boolean): void { + if (reachable) { + dnsLookupMock.mockResolvedValue({ address: '192.168.65.254', family: 4 }); + } else { + dnsLookupMock.mockRejectedValue(new Error('ENOTFOUND')); + } + } + + beforeEach(() => { + _resetCachedIdeServerHost(); + vi.mocked(fs.existsSync).mockReturnValue(false); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + it('should return 127.0.0.1 when not in a container', async () => { + const host = await getIdeServerHost(); + + expect(host).toBe('127.0.0.1'); + expect(dnsLookupMock).not.toHaveBeenCalled(); + }); + + it('should return host.docker.internal when in a container and the host is reachable', async () => { + vi.mocked(fs.existsSync).mockImplementation( + (filePath: fs.PathLike) => filePath === '/.dockerenv', + ); + mockDnsResolvable(true); + + const host = await getIdeServerHost(); + + expect(host).toBe('host.docker.internal'); + expect(dnsLookupMock).toHaveBeenCalledWith('host.docker.internal'); + }); + + it('should fall back to 127.0.0.1 when in a container but host.docker.internal is not reachable', async () => { + vi.mocked(fs.existsSync).mockImplementation( + (filePath: fs.PathLike) => filePath === '/.dockerenv', + ); + mockDnsResolvable(false); + + const host = await getIdeServerHost(); + + expect(host).toBe('127.0.0.1'); + expect(dnsLookupMock).toHaveBeenCalledWith('host.docker.internal'); + }); + + it('should detect container via /run/.containerenv', async () => { + vi.mocked(fs.existsSync).mockImplementation( + (filePath: fs.PathLike) => filePath === '/run/.containerenv', + ); + mockDnsResolvable(true); + + const host = await getIdeServerHost(); + + expect(host).toBe('host.docker.internal'); + }); + + it('should cache the result and not perform DNS lookup again', async () => { + vi.mocked(fs.existsSync).mockImplementation( + (filePath: fs.PathLike) => filePath === '/.dockerenv', + ); + mockDnsResolvable(true); + + const host1 = await getIdeServerHost(); + const host2 = await getIdeServerHost(); + + expect(host1).toBe('host.docker.internal'); + expect(host2).toBe('host.docker.internal'); + expect(dnsLookupMock).toHaveBeenCalledTimes(1); + }); + + it('should fall back to 127.0.0.1 when DNS lookup times out in a container', async () => { + vi.useFakeTimers(); + vi.mocked(fs.existsSync).mockImplementation( + (filePath: fs.PathLike) => filePath === '/.dockerenv', + ); + // Simulate dns.promises.lookup that never resolves + dnsLookupMock.mockReturnValue(new Promise(() => {})); + + const hostPromise = getIdeServerHost(); + await vi.advanceTimersByTimeAsync(3000); + const host = await hostPromise; + + expect(host).toBe('127.0.0.1'); + expect(dnsLookupMock).toHaveBeenCalledWith('host.docker.internal'); + }); + + it('should perform only one DNS lookup when called concurrently', async () => { + vi.useRealTimers(); + vi.mocked(fs.existsSync).mockImplementation( + (filePath: fs.PathLike) => filePath === '/.dockerenv', + ); + + // Simulate a slow DNS lookup + dnsLookupMock.mockImplementation( + () => + new Promise((resolve) => + setTimeout( + () => resolve({ address: '192.168.65.254', family: 4 }), + 50, + ), + ), + ); + + const promises = Array.from({ length: 5 }, () => getIdeServerHost()); + const results = await Promise.all(promises); + + expect(results.every((r) => r === 'host.docker.internal')).toBe(true); + expect(dnsLookupMock).toHaveBeenCalledTimes(1); + }); +}); diff --git a/packages/core/src/ide/ide-client.ts b/packages/core/src/ide/ide-client.ts index d839004ad..b4835e30e 100644 --- a/packages/core/src/ide/ide-client.ts +++ b/packages/core/src/ide/ide-client.ts @@ -4,6 +4,7 @@ * SPDX-License-Identifier: Apache-2.0 */ +import * as dns from 'node:dns'; import * as fs from 'node:fs'; import { isSubpath } from '../utils/paths.js'; import { detectIde, type IdeInfo } from '../ide/detect-ide.js'; @@ -585,7 +586,33 @@ export class IdeClient { } // Legacy discovery for VSCode extension < v0.5.1. - return this.getLegacyConnectionConfig(portFromEnv); + const legacyConfig = await this.getLegacyConnectionConfig(portFromEnv); + if (legacyConfig) { + return legacyConfig; + } + + // Scan lock directory as a last resort when neither env var nor legacy + // file is available (e.g. code-server where the env var is not injected). + // Configs are sorted by modification time (most recent first). Pick the + // first one whose workspace matches the current working directory. + if (!portFromEnv) { + const ideDir = Storage.getGlobalIdeDir(); + const configs = await this.getAllConnectionConfigs(ideDir); + if (configs.length > 0) { + debugLogger.debug( + `Discovered ${configs.length} IDE lock file(s) via directory scan`, + ); + const cwd = process.cwd(); + const match = configs.find( + (c) => + c.workspacePath !== undefined && + IdeClient.validateWorkspacePath(c.workspacePath, cwd).isValid, + ); + return match; + } + } + + return undefined; } // Legacy connection files were written in the global temp directory. @@ -671,11 +698,13 @@ export class IdeClient { .map(({ parsed }) => parsed); } - private createProxyAwareFetch() { - // ignore proxy for '127.0.0.1' by deafult to allow connecting to the ide mcp server + private createProxyAwareFetch(ideHost: string) { + // Ignore proxy for IDE server host to allow connecting to the ide mcp + // server even when HTTP_PROXY is set const existingNoProxy = process.env['NO_PROXY'] || ''; + const noProxyHosts = [existingNoProxy, ideHost]; const agent = new EnvHttpProxyAgent({ - noProxy: [existingNoProxy, '127.0.0.1'].filter(Boolean).join(','), + noProxy: noProxyHosts.filter(Boolean).join(','), }); const undiciPromise = import('undici'); return async (url: string | URL, init?: RequestInit): Promise => { @@ -778,9 +807,34 @@ export class IdeClient { } private async establishHttpConnection(port: string): Promise { + // Always try localhost first. This covers the most common scenarios: + // non-container environments, and code-server where the extension runs + // inside the same container as the CLI. + const connected = await this.tryHttpConnect(port, LOCAL_HOST); + if (connected) { + return true; + } + + // If localhost failed and we are inside a container, the IDE server may + // be running on the host machine (e.g. VS Code Dev Containers). Try + // host.docker.internal as a fallback when it is DNS-resolvable. + const ideHost = await getIdeServerHost(); + if (ideHost === CONTAINER_HOST) { + debugLogger.debug( + `Connection to ${LOCAL_HOST}:${port} failed, retrying with ${CONTAINER_HOST}`, + ); + return this.tryHttpConnect(port, CONTAINER_HOST); + } + + return false; + } + + private async tryHttpConnect(port: string, host: string): Promise { let transport: StreamableHTTPClientTransport | undefined; try { - debugLogger.debug('Attempting to connect to IDE via HTTP SSE'); + debugLogger.debug( + `Attempting to connect to IDE via HTTP at ${host}:${port}`, + ); this.client = new Client({ name: 'streamable-http-client', // TODO(#3487): use the CLI version here. @@ -788,9 +842,9 @@ export class IdeClient { }); transport = new StreamableHTTPClientTransport( - new URL(`http://${getIdeServerHost()}:${port}/mcp`), + new URL(`http://${host}:${port}/mcp`), { - fetch: this.createProxyAwareFetch(), + fetch: this.createProxyAwareFetch(host), requestInit: { headers: this.authToken ? { Authorization: `Bearer ${this.authToken}` } @@ -806,7 +860,8 @@ export class IdeClient { await this.discoverTools(); this.setState(IDEConnectionStatus.Connected); return true; - } catch (_error) { + } catch (error) { + debugLogger.debug(`HTTP connection to ${host}:${port} failed:`, error); if (transport) { try { await transport.close(); @@ -853,8 +908,76 @@ export class IdeClient { } } -function getIdeServerHost() { +const CONTAINER_HOST = 'host.docker.internal'; +const LOCAL_HOST = '127.0.0.1'; +const DNS_LOOKUP_TIMEOUT_MS = 3_000; + +/** + * Cached promise for IDE server host. Caching the promise itself handles both + * result caching and concurrent-call deduplication in one mechanism: a resolved + * promise returns instantly, and a pending promise is shared across callers. + */ +let hostPromise: Promise | undefined; + +/** + * Reset the cached host promise. Exported for testing only. + * @internal + */ +export function _resetCachedIdeServerHost(): void { + hostPromise = undefined; +} + +/** + * Check if a hostname is DNS-resolvable, with a timeout guard. + */ +async function isHostResolvable(hostname: string): Promise { + try { + const timeout = new Promise((_, reject) => { + const timer = setTimeout( + () => reject(new Error('DNS lookup timeout')), + DNS_LOOKUP_TIMEOUT_MS, + ); + timer.unref?.(); + }); + await Promise.race([dns.promises.lookup(hostname), timeout]); + return true; + } catch { + return false; + } +} + +/** + * Determine the IDE server host to connect to. + * + * In container environments (`/.dockerenv` or `/run/.containerenv`), verify + * `host.docker.internal` is DNS-resolvable and use it if reachable. + * Otherwise fall back to `127.0.0.1`. + * + * Results are cached; concurrent calls share a single lookup. + */ +async function resolveIdeServerHost(): Promise { const isInContainer = fs.existsSync('/.dockerenv') || fs.existsSync('/run/.containerenv'); - return isInContainer ? 'host.docker.internal' : '127.0.0.1'; + + if (!isInContainer) { + return LOCAL_HOST; + } + + const reachable = await isHostResolvable(CONTAINER_HOST); + if (reachable) { + debugLogger.debug('Container detected, host.docker.internal is reachable'); + return CONTAINER_HOST; + } + + debugLogger.debug( + 'Container detected, but host.docker.internal is NOT reachable, falling back to 127.0.0.1', + ); + return LOCAL_HOST; +} + +export async function getIdeServerHost(): Promise { + if (!hostPromise) { + hostPromise = resolveIdeServerHost(); + } + return hostPromise; } diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index a7c58ca0b..d0dcce945 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -302,3 +302,8 @@ export * from './qwen/qwenOAuth2.js'; export { makeFakeConfig } from './test-utils/config.js'; export * from './test-utils/index.js'; + +// Export hook types and components +export * from './hooks/types.js'; +export { HookSystem, HookRegistry } from './hooks/index.js'; +export type { HookRegistryEntry } from './hooks/index.js'; diff --git a/packages/core/src/services/fileSystemService.test.ts b/packages/core/src/services/fileSystemService.test.ts index 69898f72d..fe72829e2 100644 --- a/packages/core/src/services/fileSystemService.test.ts +++ b/packages/core/src/services/fileSystemService.test.ts @@ -10,6 +10,20 @@ import { StandardFileSystemService } from './fileSystemService.js'; vi.mock('fs/promises'); +vi.mock('../utils/fileUtils.js', async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + readFileWithEncoding: vi.fn(), + readFileWithEncodingInfo: vi.fn(), + }; +}); + +import { + readFileWithEncoding, + readFileWithEncodingInfo, +} from '../utils/fileUtils.js'; + describe('StandardFileSystemService', () => { let fileSystem: StandardFileSystemService; @@ -23,19 +37,19 @@ describe('StandardFileSystemService', () => { }); describe('readTextFile', () => { - it('should read file content using fs', async () => { + it('should read file content using readFileWithEncoding', async () => { const testContent = 'Hello, World!'; - vi.mocked(fs.readFile).mockResolvedValue(testContent); + vi.mocked(readFileWithEncoding).mockResolvedValue(testContent); const result = await fileSystem.readTextFile('/test/file.txt'); - expect(fs.readFile).toHaveBeenCalledWith('/test/file.txt', 'utf-8'); + expect(readFileWithEncoding).toHaveBeenCalledWith('/test/file.txt'); expect(result).toBe(testContent); }); - it('should propagate fs.readFile errors', async () => { + it('should propagate readFileWithEncoding errors', async () => { const error = new Error('ENOENT: File not found'); - vi.mocked(fs.readFile).mockRejectedValue(error); + vi.mocked(readFileWithEncoding).mockRejectedValue(error); await expect(fileSystem.readTextFile('/test/file.txt')).rejects.toThrow( 'ENOENT: File not found', @@ -43,6 +57,42 @@ describe('StandardFileSystemService', () => { }); }); + describe('readTextFileWithInfo', () => { + it('should return content, encoding, and bom via readFileWithEncodingInfo', async () => { + const mockResult = { content: 'Hello', encoding: 'utf-8', bom: false }; + vi.mocked(readFileWithEncodingInfo).mockResolvedValue(mockResult); + + const result = await fileSystem.readTextFileWithInfo('/test/file.txt'); + + expect(readFileWithEncodingInfo).toHaveBeenCalledWith('/test/file.txt'); + expect(result).toEqual(mockResult); + }); + + it('should return non-UTF-8 encoding info for GBK file', async () => { + const mockResult = { + content: '你好世界', + encoding: 'gb18030', + bom: false, + }; + vi.mocked(readFileWithEncodingInfo).mockResolvedValue(mockResult); + + const result = await fileSystem.readTextFileWithInfo('/test/gbk.txt'); + + expect(result.encoding).toBe('gb18030'); + expect(result.bom).toBe(false); + expect(result.content).toBe('你好世界'); + }); + + it('should propagate readFileWithEncodingInfo errors', async () => { + const error = new Error('ENOENT: File not found'); + vi.mocked(readFileWithEncodingInfo).mockRejectedValue(error); + + await expect( + fileSystem.readTextFileWithInfo('/test/file.txt'), + ).rejects.toThrow('ENOENT: File not found'); + }); + }); + describe('writeTextFile', () => { it('should write file content using fs', async () => { vi.mocked(fs.writeFile).mockResolvedValue(); @@ -120,6 +170,67 @@ describe('StandardFileSystemService', () => { } expect(bomCount).toBe(1); }); + it('should write file with non-UTF-8 encoding using iconv-lite', async () => { + vi.mocked(fs.writeFile).mockResolvedValue(); + + await fileSystem.writeTextFile('/test/file.txt', '你好世界', { + encoding: 'gbk', + }); + + // Verify that fs.writeFile was called with a Buffer (iconv-encoded) + const writeCall = vi.mocked(fs.writeFile).mock.calls[0]; + expect(writeCall[0]).toBe('/test/file.txt'); + expect(writeCall[1]).toBeInstanceOf(Buffer); + }); + + it('should write file as UTF-8 when encoding is utf-8', async () => { + vi.mocked(fs.writeFile).mockResolvedValue(); + + await fileSystem.writeTextFile('/test/file.txt', 'Hello', { + encoding: 'utf-8', + }); + + expect(fs.writeFile).toHaveBeenCalledWith( + '/test/file.txt', + 'Hello', + 'utf-8', + ); + }); + + it('should preserve UTF-16LE BOM when writing back a UTF-16LE file', async () => { + vi.mocked(fs.writeFile).mockResolvedValue(); + + await fileSystem.writeTextFile('/test/file.txt', 'Hello', { + encoding: 'utf-16le', + bom: true, + }); + + // iconv-lite encodes as UTF-16LE; with bom:true the FF FE BOM is prepended + const writeCall = vi.mocked(fs.writeFile).mock.calls[0]; + expect(writeCall[0]).toBe('/test/file.txt'); + expect(writeCall[1]).toBeInstanceOf(Buffer); + const buf = writeCall[1] as Buffer; + // First two bytes must be the UTF-16LE BOM: FF FE + expect(buf[0]).toBe(0xff); + expect(buf[1]).toBe(0xfe); + }); + + it('should not add BOM when writing UTF-16LE file without bom flag', async () => { + vi.mocked(fs.writeFile).mockResolvedValue(); + + await fileSystem.writeTextFile('/test/file.txt', 'Hello', { + encoding: 'utf-16le', + bom: false, + }); + + // No BOM prepended — raw iconv-encoded buffer written directly + const writeCall = vi.mocked(fs.writeFile).mock.calls[0]; + expect(writeCall[0]).toBe('/test/file.txt'); + expect(writeCall[1]).toBeInstanceOf(Buffer); + const buf = writeCall[1] as Buffer; + // First two bytes should NOT be FF FE (the UTF-16LE BOM) + expect(!(buf[0] === 0xff && buf[1] === 0xfe)).toBe(true); + }); }); describe('detectFileBOM', () => { diff --git a/packages/core/src/services/fileSystemService.ts b/packages/core/src/services/fileSystemService.ts index 91f36161c..787d68929 100644 --- a/packages/core/src/services/fileSystemService.ts +++ b/packages/core/src/services/fileSystemService.ts @@ -7,6 +7,16 @@ import fs from 'node:fs/promises'; import * as path from 'node:path'; import { globSync } from 'glob'; +import { + readFileWithEncoding, + readFileWithEncodingInfo, +} from '../utils/fileUtils.js'; +import type { FileReadResult } from '../utils/fileUtils.js'; +import { + iconvEncode, + iconvEncodingExists, + isUtf8CompatibleEncoding, +} from '../utils/iconvHelper.js'; /** * Supported file encodings for new files. @@ -33,6 +43,15 @@ export interface FileSystemService { */ readTextFile(filePath: string): Promise; + /** + * Read text content from a file, returning both the content and encoding metadata. + * Combines readTextFile + detectFileBOM + detectFileEncoding into a single I/O pass. + * + * @param filePath - The path to the file to read + * @returns The file content, encoding name, and whether a UTF-8 BOM was present + */ + readTextFileWithInfo(filePath: string): Promise; + /** * Write text content to a file * @@ -74,6 +93,14 @@ export interface WriteTextFileOptions { * @default false */ bom?: boolean; + + /** + * The encoding to use when writing the file. + * If specified and not UTF-8 compatible, iconv-lite will be used to encode. + * This is used to preserve the original encoding of non-UTF-8 files (e.g. GBK, Big5). + * @default undefined (writes as UTF-8) + */ + encoding?: string; } /** @@ -92,12 +119,44 @@ function hasUTF8BOM(buffer: Buffer): boolean { ); } +/** + * Return the BOM byte sequence for a given encoding name, or null if the + * encoding does not use a standard BOM. Used when writing back a file that + * originally had a BOM so the BOM is preserved. + */ +function getBOMBytesForEncoding(encoding: string): Buffer | null { + const lower = encoding.toLowerCase().replace(/[^a-z0-9]/g, ''); + switch (lower) { + case 'utf8': + return Buffer.from([0xef, 0xbb, 0xbf]); + case 'utf16le': + case 'utf16': + return Buffer.from([0xff, 0xfe]); + case 'utf16be': + return Buffer.from([0xfe, 0xff]); + case 'utf32le': + case 'utf32': + return Buffer.from([0xff, 0xfe, 0x00, 0x00]); + case 'utf32be': + return Buffer.from([0x00, 0x00, 0xfe, 0xff]); + default: + return null; + } +} + /** * Standard file system implementation */ export class StandardFileSystemService implements FileSystemService { async readTextFile(filePath: string): Promise { - return fs.readFile(filePath, FileEncoding.UTF8); + // Use encoding-aware reader that handles BOM and non-UTF-8 encodings (e.g. GBK) + return readFileWithEncoding(filePath); + } + + async readTextFileWithInfo(filePath: string): Promise { + // Single I/O pass: returns content, encoding, and BOM flag together, + // eliminating the need for separate detectFileEncoding / detectFileBOM calls. + return readFileWithEncodingInfo(filePath); } async writeTextFile( @@ -106,10 +165,32 @@ export class StandardFileSystemService implements FileSystemService { options?: WriteTextFileOptions, ): Promise { const bom = options?.bom ?? false; + const encoding = options?.encoding; - if (bom) { - // Prepend UTF-8 BOM (EF BB BF) - // If content already starts with BOM character, strip it first to avoid double BOM + // Check if a non-UTF-8 encoding is specified and supported by iconv-lite + const isNonUtf8Encoding = + encoding && + !isUtf8CompatibleEncoding(encoding) && + iconvEncodingExists(encoding); + + if (isNonUtf8Encoding) { + // Non-UTF-8 encoding (e.g. GBK, Big5, Shift_JIS, UTF-16LE, UTF-32BE…) + // Use iconv-lite to encode the content. When the file originally had a BOM + // (bom: true), prepend the correct BOM bytes for this encoding so the + // byte-order mark is preserved on write-back. + const encoded = iconvEncode(content, encoding); + if (bom) { + const bomBytes = getBOMBytesForEncoding(encoding); + await fs.writeFile( + filePath, + bomBytes ? Buffer.concat([bomBytes, encoded]) : encoded, + ); + } else { + await fs.writeFile(filePath, encoded); + } + } else if (bom) { + // UTF-8 BOM: prepend EF BB BF + // If content already starts with the BOM character, strip it first to avoid double BOM. const normalizedContent = content.charCodeAt(0) === 0xfeff ? content.slice(1) : content; const bomBuffer = Buffer.from([0xef, 0xbb, 0xbf]); diff --git a/packages/core/src/services/loopDetectionService.test.ts b/packages/core/src/services/loopDetectionService.test.ts index c7629e134..31a8699dc 100644 --- a/packages/core/src/services/loopDetectionService.test.ts +++ b/packages/core/src/services/loopDetectionService.test.ts @@ -4,10 +4,8 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; +import { beforeEach, describe, expect, it, vi } from 'vitest'; import type { Config } from '../config/config.js'; -import type { GeminiClient } from '../core/client.js'; -import type { BaseLlmClient } from '../core/baseLlmClient.js'; import type { ServerGeminiContentEvent, ServerGeminiStreamEvent, @@ -15,7 +13,6 @@ import type { } from '../core/turn.js'; import { GeminiEventType } from '../core/turn.js'; import * as loggers from '../telemetry/loggers.js'; -import { LoopType } from '../telemetry/types.js'; import { LoopDetectionService } from './loopDetectionService.js'; vi.mock('../telemetry/loggers.js', () => ({ @@ -623,142 +620,3 @@ describe('LoopDetectionService', () => { }); }); }); - -describe('LoopDetectionService LLM Checks', () => { - let service: LoopDetectionService; - let mockConfig: Config; - let mockGeminiClient: GeminiClient; - let mockBaseLlmClient: BaseLlmClient; - let abortController: AbortController; - - beforeEach(() => { - mockGeminiClient = { - getHistory: vi.fn().mockReturnValue([]), - } as unknown as GeminiClient; - - mockBaseLlmClient = { - generateJson: vi.fn(), - } as unknown as BaseLlmClient; - - mockConfig = { - getGeminiClient: () => mockGeminiClient, - getBaseLlmClient: () => mockBaseLlmClient, - getDebugMode: () => false, - getDebugLogger: () => ({ - debug: () => {}, - info: () => {}, - warn: () => {}, - error: () => {}, - }), - getTelemetryEnabled: () => true, - getModel: () => 'test-model', - } as unknown as Config; - - service = new LoopDetectionService(mockConfig); - abortController = new AbortController(); - vi.clearAllMocks(); - }); - - afterEach(() => { - vi.restoreAllMocks(); - }); - - const advanceTurns = async (count: number) => { - for (let i = 0; i < count; i++) { - await service.turnStarted(abortController.signal); - } - }; - - it('should not trigger LLM check before LLM_CHECK_AFTER_TURNS', async () => { - await advanceTurns(29); - expect(mockBaseLlmClient.generateJson).not.toHaveBeenCalled(); - }); - - it('should trigger LLM check on the 30th turn', async () => { - mockBaseLlmClient.generateJson = vi - .fn() - .mockResolvedValue({ confidence: 0.1 }); - await advanceTurns(30); - expect(mockBaseLlmClient.generateJson).toHaveBeenCalledTimes(1); - expect(mockBaseLlmClient.generateJson).toHaveBeenCalledWith( - expect.objectContaining({ - systemInstruction: expect.any(String), - contents: expect.any(Array), - model: expect.any(String), - schema: expect.any(Object), - promptId: expect.any(String), - }), - ); - }); - - it('should detect a cognitive loop when confidence is high', async () => { - // First check at turn 30 - mockBaseLlmClient.generateJson = vi - .fn() - .mockResolvedValue({ confidence: 0.85, reasoning: 'Repetitive actions' }); - await advanceTurns(30); - expect(mockBaseLlmClient.generateJson).toHaveBeenCalledTimes(1); - - // The confidence of 0.85 will result in a low interval. - // The interval will be: 5 + (15 - 5) * (1 - 0.85) = 5 + 10 * 0.15 = 6.5 -> rounded to 7 - await advanceTurns(6); // advance to turn 36 - - mockBaseLlmClient.generateJson = vi - .fn() - .mockResolvedValue({ confidence: 0.95, reasoning: 'Repetitive actions' }); - const finalResult = await service.turnStarted(abortController.signal); // This is turn 37 - - expect(finalResult).toBe(true); - expect(loggers.logLoopDetected).toHaveBeenCalledWith( - mockConfig, - expect.objectContaining({ - 'event.name': 'loop_detected', - loop_type: LoopType.LLM_DETECTED_LOOP, - }), - ); - }); - - it('should not detect a loop when confidence is low', async () => { - mockBaseLlmClient.generateJson = vi - .fn() - .mockResolvedValue({ confidence: 0.5, reasoning: 'Looks okay' }); - await advanceTurns(30); - const result = await service.turnStarted(abortController.signal); - expect(result).toBe(false); - expect(loggers.logLoopDetected).not.toHaveBeenCalled(); - }); - - it('should adjust the check interval based on confidence', async () => { - // Confidence is 0.0, so interval should be MAX_LLM_CHECK_INTERVAL (15) - mockBaseLlmClient.generateJson = vi - .fn() - .mockResolvedValue({ confidence: 0.0 }); - await advanceTurns(30); // First check at turn 30 - expect(mockBaseLlmClient.generateJson).toHaveBeenCalledTimes(1); - - await advanceTurns(14); // Advance to turn 44 - expect(mockBaseLlmClient.generateJson).toHaveBeenCalledTimes(1); - - await service.turnStarted(abortController.signal); // Turn 45 - expect(mockBaseLlmClient.generateJson).toHaveBeenCalledTimes(2); - }); - - it('should handle errors from generateJson gracefully', async () => { - mockBaseLlmClient.generateJson = vi - .fn() - .mockRejectedValue(new Error('API error')); - await advanceTurns(30); - const result = await service.turnStarted(abortController.signal); - expect(result).toBe(false); - expect(loggers.logLoopDetected).not.toHaveBeenCalled(); - }); - - it('should not trigger LLM check when disabled for session', async () => { - service.disableForSession(); - expect(loggers.logLoopDetectionDisabled).toHaveBeenCalledTimes(1); - await advanceTurns(30); - const result = await service.turnStarted(abortController.signal); - expect(result).toBe(false); - expect(mockBaseLlmClient.generateJson).not.toHaveBeenCalled(); - }); -}); diff --git a/packages/core/src/services/loopDetectionService.ts b/packages/core/src/services/loopDetectionService.ts index 9117d0120..d14e4223e 100644 --- a/packages/core/src/services/loopDetectionService.ts +++ b/packages/core/src/services/loopDetectionService.ts @@ -4,7 +4,6 @@ * SPDX-License-Identifier: Apache-2.0 */ -import type { Content } from '@google/genai'; import { createHash } from 'node:crypto'; import type { ServerGeminiStreamEvent } from '../core/turn.js'; import { GeminiEventType } from '../core/turn.js'; @@ -18,59 +17,12 @@ import { LoopType, } from '../telemetry/types.js'; import type { Config } from '../config/config.js'; -import { - isFunctionCall, - isFunctionResponse, -} from '../utils/messageInspectors.js'; -import { DEFAULT_QWEN_MODEL } from '../config/models.js'; -import { createDebugLogger } from '../utils/debugLogger.js'; - -const debugLogger = createDebugLogger('LOOP_DETECTION'); const TOOL_CALL_LOOP_THRESHOLD = 5; const CONTENT_LOOP_THRESHOLD = 10; const CONTENT_CHUNK_SIZE = 50; const MAX_HISTORY_LENGTH = 1000; -/** - * The number of recent conversation turns to include in the history when asking the LLM to check for a loop. - */ -const LLM_LOOP_CHECK_HISTORY_COUNT = 20; - -/** - * The number of turns that must pass in a single prompt before the LLM-based loop check is activated. - */ -const LLM_CHECK_AFTER_TURNS = 30; - -/** - * The default interval, in number of turns, at which the LLM-based loop check is performed. - * This value is adjusted dynamically based on the LLM's confidence. - */ -const DEFAULT_LLM_CHECK_INTERVAL = 3; - -/** - * The minimum interval for LLM-based loop checks. - * This is used when the confidence of a loop is high, to check more frequently. - */ -const MIN_LLM_CHECK_INTERVAL = 5; - -/** - * The maximum interval for LLM-based loop checks. - * This is used when the confidence of a loop is low, to check less frequently. - */ -const MAX_LLM_CHECK_INTERVAL = 15; - -const LOOP_DETECTION_SYSTEM_PROMPT = `You are a sophisticated AI diagnostic agent specializing in identifying when a conversational AI is stuck in an unproductive state. Your task is to analyze the provided conversation history and determine if the assistant has ceased to make meaningful progress. - -An unproductive state is characterized by one or more of the following patterns over the last 5 or more assistant turns: - -Repetitive Actions: The assistant repeats the same tool calls or conversational responses a decent number of times. This includes simple loops (e.g., tool_A, tool_A, tool_A) and alternating patterns (e.g., tool_A, tool_B, tool_A, tool_B, ...). - -Cognitive Loop: The assistant seems unable to determine the next logical step. It might express confusion, repeatedly ask the same questions, or generate responses that don't logically follow from the previous turns, indicating it's stuck and not advancing the task. - -Crucially, differentiate between a true unproductive state and legitimate, incremental progress. -For example, a series of 'tool_A' or 'tool_B' tool calls that make small, distinct changes to the same file (like adding docstrings to functions one by one) is considered forward progress and is NOT a loop. A loop would be repeatedly replacing the same text with the same content, or cycling between a small set of files with no net change.`; - /** * Service for detecting and preventing infinite loops in AI responses. * Monitors tool call repetitions and content sentence repetitions. @@ -90,11 +42,6 @@ export class LoopDetectionService { private loopDetected = false; private inCodeBlock = false; - // LLM loop track tracking - private turnsInCurrentPrompt = 0; - private llmCheckInterval = DEFAULT_LLM_CHECK_INTERVAL; - private lastCheckTurn = 0; - // Session-level disable flag private disabledForSession = false; @@ -145,33 +92,6 @@ export class LoopDetectionService { return this.loopDetected; } - /** - * Signals the start of a new turn in the conversation. - * - * This method increments the turn counter and, if specific conditions are met, - * triggers an LLM-based check to detect potential conversation loops. The check - * is performed periodically based on the `llmCheckInterval`. - * - * @param signal - An AbortSignal to allow for cancellation of the asynchronous LLM check. - * @returns A promise that resolves to `true` if a loop is detected, and `false` otherwise. - */ - async turnStarted(signal: AbortSignal) { - if (this.disabledForSession) { - return false; - } - this.turnsInCurrentPrompt++; - - if ( - this.turnsInCurrentPrompt >= LLM_CHECK_AFTER_TURNS && - this.turnsInCurrentPrompt - this.lastCheckTurn >= this.llmCheckInterval - ) { - this.lastCheckTurn = this.turnsInCurrentPrompt; - return await this.checkForLoopWithLLM(signal); - } - - return false; - } - private checkToolCallLoop(toolCall: { name: string; args: object }): boolean { const key = this.getToolCallKey(toolCall); if (this.lastToolCallKey === key) { @@ -371,94 +291,6 @@ export class LoopDetectionService { return originalChunk === currentChunk; } - private trimRecentHistory(recentHistory: Content[]): Content[] { - // A function response must be preceded by a function call. - // Continuously removes dangling function calls from the end of the history - // until the last turn is not a function call. - while ( - recentHistory.length > 0 && - isFunctionCall(recentHistory[recentHistory.length - 1]) - ) { - recentHistory.pop(); - } - - // A function response should follow a function call. - // Continuously removes leading function responses from the beginning of history - // until the first turn is not a function response. - while (recentHistory.length > 0 && isFunctionResponse(recentHistory[0])) { - recentHistory.shift(); - } - - return recentHistory; - } - - private async checkForLoopWithLLM(signal: AbortSignal) { - const recentHistory = this.config - .getGeminiClient() - .getHistory() - .slice(-LLM_LOOP_CHECK_HISTORY_COUNT); - - const trimmedHistory = this.trimRecentHistory(recentHistory); - - const taskPrompt = `Please analyze the conversation history to determine the possibility that the conversation is stuck in a repetitive, non-productive state. Provide your response in the requested JSON format.`; - - const contents = [ - ...trimmedHistory, - { role: 'user', parts: [{ text: taskPrompt }] }, - ]; - const schema: Record = { - type: 'object', - properties: { - reasoning: { - type: 'string', - description: - 'Your reasoning on if the conversation is looping without forward progress.', - }, - confidence: { - type: 'number', - description: - 'A number between 0.0 and 1.0 representing your confidence that the conversation is in an unproductive state.', - }, - }, - required: ['reasoning', 'confidence'], - }; - let result; - try { - result = await this.config.getBaseLlmClient().generateJson({ - contents, - schema, - model: this.config.getModel() || DEFAULT_QWEN_MODEL, - systemInstruction: LOOP_DETECTION_SYSTEM_PROMPT, - abortSignal: signal, - promptId: this.promptId, - }); - } catch (e) { - // Do nothing, treat it as a non-loop. - this.config.getDebugLogger().error(e); - return false; - } - - if (typeof result['confidence'] === 'number') { - if (result['confidence'] > 0.9) { - if (typeof result['reasoning'] === 'string' && result['reasoning']) { - debugLogger.warn(result['reasoning']); - } - logLoopDetected( - this.config, - new LoopDetectedEvent(LoopType.LLM_DETECTED_LOOP, this.promptId), - ); - return true; - } else { - this.llmCheckInterval = Math.round( - MIN_LLM_CHECK_INTERVAL + - (MAX_LLM_CHECK_INTERVAL - MIN_LLM_CHECK_INTERVAL) * - (1 - result['confidence']), - ); - } - } - return false; - } - /** * Resets all loop detection state. */ @@ -466,7 +298,6 @@ export class LoopDetectionService { this.promptId = promptId; this.resetToolCallCount(); this.resetContentTracking(); - this.resetLlmCheckTracking(); this.loopDetected = false; } @@ -482,10 +313,4 @@ export class LoopDetectionService { this.contentStats.clear(); this.lastContentIndex = 0; } - - private resetLlmCheckTracking(): void { - this.turnsInCurrentPrompt = 0; - this.llmCheckInterval = DEFAULT_LLM_CHECK_INTERVAL; - this.lastCheckTurn = 0; - } } diff --git a/packages/core/src/skills/skill-load.ts b/packages/core/src/skills/skill-load.ts index dc6f2c616..639b85071 100644 --- a/packages/core/src/skills/skill-load.ts +++ b/packages/core/src/skills/skill-load.ts @@ -3,6 +3,7 @@ import * as fs from 'fs/promises'; import * as path from 'path'; import { parse as parseYaml } from '../utils/yaml-parser.js'; import { createDebugLogger } from '../utils/debugLogger.js'; +import { normalizeContent } from '../utils/textUtils.js'; const debugLogger = createDebugLogger('SKILL_LOAD'); @@ -56,21 +57,6 @@ export async function loadSkillsFromDir( } } -/** - * Normalizes skill file content for consistent parsing across platforms. - * - Strips UTF-8 BOM to ensure frontmatter starts at the first character. - * - Normalizes line endings so skills authored on Windows (CRLF) parse correctly. - */ -function normalizeSkillFileContent(content: string): string { - // Strip UTF-8 BOM to ensure frontmatter starts at the first character. - let normalized = content.replace(/^\uFEFF/, ''); - - // Normalize line endings so skills authored on Windows (CRLF) parse correctly. - normalized = normalized.replace(/\r\n/g, '\n').replace(/\r/g, '\n'); - - return normalized; -} - export function parseSkillContent( content: string, filePath: string, @@ -78,7 +64,7 @@ export function parseSkillContent( debugLogger.debug(`Parsing skill content from: ${filePath}`); // Normalize content to handle BOM and CRLF line endings - const normalizedContent = normalizeSkillFileContent(content); + const normalizedContent = normalizeContent(content); // Split frontmatter and content // Use (?:\n|$) to allow frontmatter ending with or without trailing newline diff --git a/packages/core/src/skills/skill-manager.ts b/packages/core/src/skills/skill-manager.ts index 8ee69e9a0..05eabdd5a 100644 --- a/packages/core/src/skills/skill-manager.ts +++ b/packages/core/src/skills/skill-manager.ts @@ -20,6 +20,7 @@ import { SkillError, SkillErrorCode } from './types.js'; import type { Config } from '../config/config.js'; import { validateConfig } from './skill-load.js'; import { createDebugLogger } from '../utils/debugLogger.js'; +import { normalizeContent } from '../utils/textUtils.js'; const debugLogger = createDebugLogger('SKILL_MANAGER'); @@ -333,7 +334,7 @@ export class SkillManager { level: SkillLevel, ): SkillConfig { try { - const normalizedContent = normalizeSkillFileContent(content); + const normalizedContent = normalizeContent(content); // Split frontmatter and content const frontmatterRegex = /^---\n([\s\S]*?)\n---(?:\n|$)([\s\S]*)$/; @@ -649,13 +650,3 @@ export class SkillManager { } } } - -function normalizeSkillFileContent(content: string): string { - // Strip UTF-8 BOM to ensure frontmatter starts at the first character. - let normalized = content.replace(/^\uFEFF/, ''); - - // Normalize line endings so skills authored on Windows (CRLF) parse correctly. - normalized = normalized.replace(/\r\n/g, '\n').replace(/\r/g, '\n'); - - return normalized; -} diff --git a/packages/core/src/subagents/subagent-manager.test.ts b/packages/core/src/subagents/subagent-manager.test.ts index e04964ea1..cf3afb4c8 100644 --- a/packages/core/src/subagents/subagent-manager.test.ts +++ b/packages/core/src/subagents/subagent-manager.test.ts @@ -193,6 +193,21 @@ You are a helpful assistant. expect(config.filePath).toBe(validConfig.filePath); }); + it('should parse valid markdown content with CRLF line endings', () => { + const markdownWithCRLF = `---\r\nname: test-agent\r\ndescription: A test subagent\r\n---\r\n\r\nYou are a helpful assistant.\r\n`; + const config = manager.parseSubagentContent( + markdownWithCRLF, + validConfig.filePath!, + 'project', + ); + + expect(config.name).toBe('test-agent'); + expect(config.description).toBe('A test subagent'); + // The system prompt logic applies .trim(), so the trailing \r is removed regardless, + // but the central test is that frontmatterRegex didn't throw an error. + expect(config.systemPrompt).toBe('You are a helpful assistant.'); + }); + it('should parse content with tools', () => { const markdownWithTools = `--- name: test-agent diff --git a/packages/core/src/subagents/subagent-manager.ts b/packages/core/src/subagents/subagent-manager.ts index fea33040c..0552fa60c 100644 --- a/packages/core/src/subagents/subagent-manager.ts +++ b/packages/core/src/subagents/subagent-manager.ts @@ -29,6 +29,7 @@ import { SubagentValidator } from './validation.js'; import { SubAgentScope } from './subagent.js'; import type { Config } from '../config/config.js'; import { createDebugLogger } from '../utils/debugLogger.js'; +import { normalizeContent } from '../utils/textUtils.js'; const debugLogger = createDebugLogger('SUBAGENT_MANAGER'); import { BuiltinAgentRegistry } from './builtin-agents.js'; @@ -908,9 +909,11 @@ function parseSubagentContent( validator: SubagentValidator, ): SubagentConfig { try { + const normalizedContent = normalizeContent(content); + // Split frontmatter and content const frontmatterRegex = /^---\n([\s\S]*?)\n---\n([\s\S]*)$/; - const match = content.match(frontmatterRegex); + const match = normalizedContent.match(frontmatterRegex); if (!match) { throw new Error('Invalid format: missing YAML frontmatter'); diff --git a/packages/core/src/telemetry/types.ts b/packages/core/src/telemetry/types.ts index 98c8d5cac..d9c6b535d 100644 --- a/packages/core/src/telemetry/types.ts +++ b/packages/core/src/telemetry/types.ts @@ -362,7 +362,6 @@ export class RipgrepFallbackEvent implements BaseTelemetryEvent { export enum LoopType { CONSECUTIVE_IDENTICAL_TOOL_CALLS = 'consecutive_identical_tool_calls', CHANTING_IDENTICAL_SENTENCES = 'chanting_identical_sentences', - LLM_DETECTED_LOOP = 'llm_detected_loop', } export class LoopDetectedEvent implements BaseTelemetryEvent { diff --git a/packages/core/src/tools/edit.ts b/packages/core/src/tools/edit.ts index 016eb2854..61a318190 100644 --- a/packages/core/src/tools/edit.ts +++ b/packages/core/src/tools/edit.ts @@ -108,6 +108,10 @@ interface CalculatedEdit { occurrences: number; error?: { display: string; raw: string; type: ToolErrorType }; isNewFile: boolean; + /** Detected encoding of the existing file (e.g. 'utf-8', 'gbk') */ + encoding: string; + /** Whether the existing file has a UTF-8 BOM */ + bom: boolean; } class EditToolInvocation implements ToolInvocation { @@ -134,17 +138,22 @@ class EditToolInvocation implements ToolInvocation { let finalNewString = params.new_string; let finalOldString = params.old_string; let occurrences = 0; + let encoding = 'utf-8'; + let bom = false; let error: | { display: string; raw: string; type: ToolErrorType } | undefined = undefined; try { - currentContent = await this.config + const fileInfo = await this.config .getFileSystemService() - .readTextFile(params.file_path); + .readTextFileWithInfo(params.file_path); // Normalize line endings to LF for consistent processing. - currentContent = currentContent.replace(/\r\n/g, '\n'); + currentContent = fileInfo.content.replace(/\r\n/g, '\n'); fileExists = true; + // Encoding and BOM are returned from the same I/O pass, avoiding redundant reads. + encoding = fileInfo.encoding; + bom = fileInfo.bom; } catch (err: unknown) { if (!isNodeError(err) || err.code !== 'ENOENT') { // Rethrow unexpected FS errors (permissions, etc.) @@ -238,6 +247,8 @@ class EditToolInvocation implements ToolInvocation { occurrences, error, isNewFile, + encoding, + bom, }; } @@ -373,7 +384,7 @@ class EditToolInvocation implements ToolInvocation { this.ensureParentDirectoriesExist(this.params.file_path); // For new files, apply default file encoding setting - // For existing files, keep original content as-is (including any BOM character) + // For existing files, preserve the original encoding (BOM and charset) if (editData.isNewFile) { const useBOM = this.config.getDefaultFileEncoding() === FileEncoding.UTF8_BOM; @@ -385,7 +396,10 @@ class EditToolInvocation implements ToolInvocation { } else { await this.config .getFileSystemService() - .writeTextFile(this.params.file_path, editData.newContent); + .writeTextFile(this.params.file_path, editData.newContent, { + bom: editData.bom, + encoding: editData.encoding, + }); } const fileName = path.basename(this.params.file_path); diff --git a/packages/core/src/tools/write-file.test.ts b/packages/core/src/tools/write-file.test.ts index b0d7a2b0d..e096b0a72 100644 --- a/packages/core/src/tools/write-file.test.ts +++ b/packages/core/src/tools/write-file.test.ts @@ -759,6 +759,7 @@ describe('WriteFileTool', () => { // Verify writeTextFile was called with bom: true expect(writeSpy).toHaveBeenCalledWith(filePath, newContent, { bom: true, + encoding: 'utf-8', }); // Cleanup @@ -785,6 +786,7 @@ describe('WriteFileTool', () => { // Verify writeTextFile was called with bom: false expect(writeSpy).toHaveBeenCalledWith(filePath, newContent, { bom: false, + encoding: 'utf-8', }); // Cleanup diff --git a/packages/core/src/tools/write-file.ts b/packages/core/src/tools/write-file.ts index 1ccb7bf0b..4085e3b69 100644 --- a/packages/core/src/tools/write-file.ts +++ b/packages/core/src/tools/write-file.ts @@ -243,17 +243,25 @@ class WriteFileToolInvocation extends BaseToolInvocation< // Check if file exists and has BOM to preserve encoding // For new files, use the configured default encoding let useBOM = false; + let detectedEncoding: string | undefined; if (!isNewFile) { - useBOM = await this.config + // Use readTextFileWithInfo for a single I/O pass that returns encoding + // and BOM metadata together, avoiding separate detectFileBOM / detectFileEncoding calls. + const fileInfo = await this.config .getFileSystemService() - .detectFileBOM(file_path); + .readTextFileWithInfo(file_path); + useBOM = fileInfo.bom; + detectedEncoding = fileInfo.encoding; } else { useBOM = this.config.getDefaultFileEncoding() === FileEncoding.UTF8_BOM; } await this.config .getFileSystemService() - .writeTextFile(file_path, fileContent, { bom: useBOM }); + .writeTextFile(file_path, fileContent, { + bom: useBOM, + encoding: detectedEncoding, + }); // Generate diff for display result const fileName = path.basename(file_path); diff --git a/packages/core/src/utils/fileUtils.test.ts b/packages/core/src/utils/fileUtils.test.ts index b21ee79e2..6dc38e4d7 100644 --- a/packages/core/src/utils/fileUtils.test.ts +++ b/packages/core/src/utils/fileUtils.test.ts @@ -28,6 +28,8 @@ import { processSingleFileContent, detectBOM, readFileWithEncoding, + readFileWithEncodingInfo, + detectFileEncoding, fileExists, } from './fileUtils.js'; import type { Config } from '../config/config.js'; @@ -407,6 +409,153 @@ describe('fileUtils', () => { const result = await readFileWithEncoding(filePath); expect(result).toBe(''); }); + + it('should read GBK-encoded file with Chinese characters correctly', async () => { + // GBK encoding of "你好世界这是中文内容用于测试编码检测" + // Needs enough content for chardet to reliably detect the encoding + const gbkBuffer = Buffer.from([ + 0xc4, 0xe3, 0xba, 0xc3, 0xca, 0xc0, 0xbd, 0xe7, 0xd5, 0xe2, 0xca, + 0xc7, 0xd6, 0xd0, 0xce, 0xc4, 0xc4, 0xda, 0xc8, 0xdd, 0xd3, 0xc3, + 0xd3, 0xda, 0xb2, 0xe2, 0xca, 0xd4, 0xb1, 0xe0, 0xc2, 0xeb, 0xbc, + 0xec, 0xb2, 0xe2, + ]); + const filePath = path.join(testDir, 'gbk-chinese.txt'); + await fsPromises.writeFile(filePath, gbkBuffer); + + const result = await readFileWithEncoding(filePath); + expect(result).toBe('你好世界这是中文内容用于测试编码检测'); + }); + + it('should read GBK-encoded file with mixed ASCII and Chinese correctly', async () => { + // GBK encoding of "// 这是注释内容用于测试\nhello你好世界测试中文编码检测\n函数返回值正确" + // Needs enough Chinese content for chardet to reliably detect as GB18030/GBK + const gbkBuffer = Buffer.from([ + 0x2f, 0x2f, 0x20, 0xd5, 0xe2, 0xca, 0xc7, 0xd7, 0xa2, 0xca, 0xcd, + 0xc4, 0xda, 0xc8, 0xdd, 0xd3, 0xc3, 0xd3, 0xda, 0xb2, 0xe2, 0xca, + 0xd4, 0x0a, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0xc4, 0xe3, 0xba, 0xc3, + 0xca, 0xc0, 0xbd, 0xe7, 0xb2, 0xe2, 0xca, 0xd4, 0xd6, 0xd0, 0xce, + 0xc4, 0xb1, 0xe0, 0xc2, 0xeb, 0xbc, 0xec, 0xb2, 0xe2, 0x0a, 0xba, + 0xaf, 0xca, 0xfd, 0xb7, 0xb5, 0xbb, 0xd8, 0xd6, 0xb5, 0xd5, 0xfd, + 0xc8, 0xb7, + ]); + const filePath = path.join(testDir, 'gbk-mixed.txt'); + await fsPromises.writeFile(filePath, gbkBuffer); + + const result = await readFileWithEncoding(filePath); + expect(result).toContain('hello'); + expect(result).toContain('你好世界'); + expect(result).toContain('函数返回值正确'); + }); + }); + + describe('readFileWithEncodingInfo', () => { + it('should return bom: false and encoding utf-8 for plain UTF-8 file', async () => { + const filePath = path.join(testDir, 'info-utf8.txt'); + await fsPromises.writeFile(filePath, 'Hello', 'utf8'); + + const result = await readFileWithEncodingInfo(filePath); + expect(result.content).toBe('Hello'); + expect(result.encoding).toBe('utf-8'); + expect(result.bom).toBe(false); + }); + + it('should return bom: true and encoding utf-8 for UTF-8 BOM file', async () => { + const utf8Bom = Buffer.from([0xef, 0xbb, 0xbf]); + const filePath = path.join(testDir, 'info-utf8-bom.txt'); + await fsPromises.writeFile( + filePath, + Buffer.concat([utf8Bom, Buffer.from('Hello', 'utf8')]), + ); + + const result = await readFileWithEncodingInfo(filePath); + expect(result.content).toBe('Hello'); + expect(result.encoding).toBe('utf-8'); + expect(result.bom).toBe(true); + }); + + it('should return bom: true and encoding utf-16le for UTF-16LE BOM file', async () => { + const utf16leBom = Buffer.from([0xff, 0xfe]); + const utf16leContent = Buffer.from('Hi', 'utf16le'); + const filePath = path.join(testDir, 'info-utf16le.txt'); + await fsPromises.writeFile( + filePath, + Buffer.concat([utf16leBom, utf16leContent]), + ); + + const result = await readFileWithEncodingInfo(filePath); + expect(result.content).toBe('Hi'); + expect(result.encoding).toBe('utf-16le'); + // Non-UTF-8 BOM should also be flagged so it is preserved on write-back + expect(result.bom).toBe(true); + }); + + it('should return bom: false for GBK file (no BOM)', async () => { + const gbkBuffer = Buffer.from([ + 0xc4, 0xe3, 0xba, 0xc3, 0xca, 0xc0, 0xbd, 0xe7, 0xd5, 0xe2, 0xca, + 0xc7, 0xd6, 0xd0, 0xce, 0xc4, 0xc4, 0xda, 0xc8, 0xdd, 0xd3, 0xc3, + 0xd3, 0xda, 0xb2, 0xe2, 0xca, 0xd4, 0xb1, 0xe0, 0xc2, 0xeb, 0xbc, + 0xec, 0xb2, 0xe2, + ]); + const filePath = path.join(testDir, 'info-gbk.txt'); + await fsPromises.writeFile(filePath, gbkBuffer); + + const result = await readFileWithEncodingInfo(filePath); + expect(result.bom).toBe(false); + expect(result.encoding).toBe('gb18030'); + expect(result.content).toBe('你好世界这是中文内容用于测试编码检测'); + }); + }); + + describe('detectFileEncoding', () => { + it('should detect UTF-8 for plain ASCII file', async () => { + const filePath = path.join(testDir, 'ascii.txt'); + await fsPromises.writeFile(filePath, 'Hello World', 'utf8'); + + const encoding = await detectFileEncoding(filePath); + expect(encoding).toBe('utf-8'); + }); + + it('should detect UTF-8 for file with UTF-8 BOM', async () => { + const utf8Bom = Buffer.from([0xef, 0xbb, 0xbf]); + const content = Buffer.from('Hello', 'utf8'); + const filePath = path.join(testDir, 'utf8-bom-detect.txt'); + await fsPromises.writeFile(filePath, Buffer.concat([utf8Bom, content])); + + const encoding = await detectFileEncoding(filePath); + expect(encoding).toBe('utf-8'); + }); + + it('should detect GBK encoding for Chinese text in GBK', async () => { + // GBK encoding of "你好世界这是中文内容用于测试编码检测" + // Needs enough content for chardet to reliably detect + const gbkBuffer = Buffer.from([ + 0xc4, 0xe3, 0xba, 0xc3, 0xca, 0xc0, 0xbd, 0xe7, 0xd5, 0xe2, 0xca, + 0xc7, 0xd6, 0xd0, 0xce, 0xc4, 0xc4, 0xda, 0xc8, 0xdd, 0xd3, 0xc3, + 0xd3, 0xda, 0xb2, 0xe2, 0xca, 0xd4, 0xb1, 0xe0, 0xc2, 0xeb, 0xbc, + 0xec, 0xb2, 0xe2, + ]); + const filePath = path.join(testDir, 'gbk-detect.txt'); + await fsPromises.writeFile(filePath, gbkBuffer); + + const encoding = await detectFileEncoding(filePath); + // chardet detects GBK as 'gb18030' (its superset) + expect(encoding).toBe('gb18030'); + }); + + it('should return utf-8 for empty file', async () => { + const filePath = path.join(testDir, 'empty-detect.txt'); + await fsPromises.writeFile(filePath, ''); + + const encoding = await detectFileEncoding(filePath); + expect(encoding).toBe('utf-8'); + }); + + it('should return utf-8 for non-existent file', async () => { + const filePath = path.join(testDir, 'nonexistent-detect.txt'); + + const encoding = await detectFileEncoding(filePath); + expect(encoding).toBe('utf-8'); + }); }); describe('isBinaryFile with BOM awareness', () => { diff --git a/packages/core/src/utils/fileUtils.ts b/packages/core/src/utils/fileUtils.ts index aab6935cb..05de408ef 100644 --- a/packages/core/src/utils/fileUtils.ts +++ b/packages/core/src/utils/fileUtils.ts @@ -9,10 +9,16 @@ import fsPromises from 'node:fs/promises'; import path from 'node:path'; import type { PartUnion } from '@google/genai'; import mime from 'mime/lite'; +import { + iconvDecode, + iconvEncodingExists, + isUtf8CompatibleEncoding, +} from './iconvHelper.js'; import { ToolErrorType } from '../tools/tool-error.js'; import { BINARY_EXTENSIONS } from './ignorePatterns.js'; import type { Config } from '../config/config.js'; import { createDebugLogger } from './debugLogger.js'; +import { detectEncodingFromBuffer } from './systemEncoding.js'; const debugLogger = createDebugLogger('FILE_UTILS'); @@ -118,23 +124,41 @@ function decodeUTF32(buf: Buffer, littleEndian: boolean): string { } /** - * Read a file as text, honoring BOM encodings (UTF‑8/16/32) and stripping the BOM. - * Falls back to utf8 when no BOM is present. + * Check whether a buffer is valid UTF-8 by attempting a strict decode. + * If any invalid byte sequence is encountered, TextDecoder with `fatal: true` throws. */ -export async function readFileWithEncoding(filePath: string): Promise { - // Read the file once; detect BOM and decode from the single buffer. - const full = await fs.promises.readFile(filePath); - if (full.length === 0) return ''; - - const bom = detectBOM(full); - if (!bom) { - // No BOM → treat as UTF‑8 - return full.toString('utf8'); +function isValidUtf8(buffer: Buffer): boolean { + try { + new TextDecoder('utf-8', { fatal: true }).decode(buffer); + return true; + } catch { + return false; } +} - // Strip BOM and decode per encoding - const content = full.subarray(bom.bomLength); - switch (bom.encoding) { +/** + * Result of reading a file with encoding detection. + */ +export interface FileReadResult { + /** Decoded text content of the file (BOM stripped if present). */ + content: string; + /** Detected encoding name (e.g. 'utf-8', 'gb18030', 'utf-16le'). */ + encoding: string; + /** + * Whether the file had a Unicode BOM (UTF-8, UTF-16 LE/BE, or UTF-32 LE/BE). + * When true, the same BOM should be re-written on save to preserve the file's + * original byte-order mark. + */ + bom: boolean; +} + +/** + * Internal helper: decode a buffer given a BOMInfo. + * Returns the decoded string for each supported BOM encoding. + */ +function decodeBOMBuffer(buf: Buffer, bomInfo: BOMInfo): string { + const content = buf.subarray(bomInfo.bomLength); + switch (bomInfo.encoding) { case 'utf8': return content.toString('utf8'); case 'utf16le': @@ -151,6 +175,153 @@ export async function readFileWithEncoding(filePath: string): Promise { } } +/** + * Map a BOMInfo encoding to a canonical encoding name string. + */ +function bomEncodingToName(bomEncoding: UnicodeEncoding): string { + switch (bomEncoding) { + case 'utf8': + return 'utf-8'; + case 'utf16le': + return 'utf-16le'; + case 'utf16be': + return 'utf-16be'; + case 'utf32le': + return 'utf-32le'; + case 'utf32be': + return 'utf-32be'; + default: + return 'utf-8'; + } +} + +/** + * Read a file as text, honoring BOM encodings (UTF‑8/16/32) and stripping the BOM. + * For files without BOM, validates UTF-8 first. If invalid UTF-8, uses chardet + * to detect encoding (e.g. GBK, Big5, Shift_JIS) and iconv-lite to decode. + * Falls back to utf8 when detection fails. + * + * Returns both the decoded content and the detected encoding/BOM information + * in a single I/O pass, avoiding redundant file reads. + */ +export async function readFileWithEncodingInfo( + filePath: string, +): Promise { + // Read the file once; detect BOM and decode from the single buffer. + const full = await fs.promises.readFile(filePath); + if (full.length === 0) return { content: '', encoding: 'utf-8', bom: false }; + + const bomInfo = detectBOM(full); + if (bomInfo) { + return { + content: decodeBOMBuffer(full, bomInfo), + encoding: bomEncodingToName(bomInfo.encoding), + // Mark bom: true for all Unicode BOM variants (UTF-8/16/32) so that + // the BOM is re-written on save and the file's original format is preserved. + bom: true, + }; + } + + // No BOM — check if it's valid UTF-8 first (fast path for the common case) + if (isValidUtf8(full)) { + return { content: full.toString('utf8'), encoding: 'utf-8', bom: false }; + } + + // Not valid UTF-8 — try chardet-based encoding detection + const detected = detectEncodingFromBuffer(full); + if (detected && !isUtf8CompatibleEncoding(detected)) { + try { + if (iconvEncodingExists(detected)) { + return { + content: iconvDecode(full, detected), + encoding: detected, + bom: false, + }; + } + } catch (e) { + debugLogger.warn( + `Failed to decode file ${filePath} as ${detected}: ${e instanceof Error ? e.message : String(e)}`, + ); + } + } + + // Final fallback: UTF-8 with replacement characters + return { content: full.toString('utf8'), encoding: 'utf-8', bom: false }; +} + +/** + * Read a file as text, honoring BOM encodings (UTF‑8/16/32) and stripping the BOM. + * For files without BOM, validates UTF-8 first. If invalid UTF-8, uses chardet + * to detect encoding (e.g. GBK, Big5, Shift_JIS) and iconv-lite to decode. + * Falls back to utf8 when detection fails. + */ +export async function readFileWithEncoding(filePath: string): Promise { + const result = await readFileWithEncodingInfo(filePath); + return result.content; +} + +/** + * Detect the encoding of a file by reading a sample from its beginning. + * Returns the encoding name (e.g. 'utf-8', 'gbk', 'shift_jis'). + * Uses BOM detection first, then UTF-8 validation, then chardet as fallback. + */ +export async function detectFileEncoding(filePath: string): Promise { + let fh: fs.promises.FileHandle | null = null; + try { + fh = await fs.promises.open(filePath, 'r'); + const stats = await fh.stat(); + if (stats.size === 0) return 'utf-8'; + + // Read a sample (up to 8KB) for detection + const sampleSize = Math.min(8192, stats.size); + const buf = Buffer.alloc(sampleSize); + const { bytesRead } = await fh.read(buf, 0, sampleSize, 0); + if (bytesRead === 0) return 'utf-8'; + const sample = buf.subarray(0, bytesRead); + + // 1. Check for BOM + const bom = detectBOM(sample); + if (bom) { + switch (bom.encoding) { + case 'utf8': + return 'utf-8'; + case 'utf16le': + return 'utf-16le'; + case 'utf16be': + return 'utf-16be'; + case 'utf32le': + return 'utf-32le'; + case 'utf32be': + return 'utf-32be'; + default: + return 'utf-8'; + } + } + + // 2. Validate UTF-8 + if (isValidUtf8(sample)) return 'utf-8'; + + // 3. Use chardet for detection + const detected = detectEncodingFromBuffer(sample); + if (detected && !isUtf8CompatibleEncoding(detected)) { + return detected; + } + + return 'utf-8'; + } catch { + // If file can't be read, default to UTF-8 + return 'utf-8'; + } finally { + if (fh) { + try { + await fh.close(); + } catch { + // Ignore close errors + } + } + } +} + /** * Looks up the specific MIME type for a file path. * @param filePath Path to the file. diff --git a/packages/core/src/utils/iconvHelper.ts b/packages/core/src/utils/iconvHelper.ts new file mode 100644 index 000000000..12c1a56c8 --- /dev/null +++ b/packages/core/src/utils/iconvHelper.ts @@ -0,0 +1,65 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * Helper module to bridge iconv-lite CJS module with our ESM codebase. + * iconv-lite v0.6.x uses ambient `declare module` type declarations + * that are incompatible with NodeNext module resolution. + * This module provides properly-typed wrappers. + */ + +interface IconvLite { + decode(buffer: Buffer, encoding: string): string; + encode(content: string, encoding: string): Buffer; + encodingExists(encoding: string): boolean; +} + +// iconv-lite is a CJS module. Under NodeNext resolution, its ambient type +// declarations don't map correctly. We import the default export (which is +// the CJS module.exports object) and cast it to a proper interface. +import iconvModule from 'iconv-lite'; +const iconvLite: IconvLite = iconvModule as unknown as IconvLite; + +/** + * Decode a buffer using the specified encoding. + * @param buffer The buffer to decode + * @param encoding The encoding to use (e.g. 'gbk', 'big5', 'shift_jis') + * @returns The decoded string + */ +export function iconvDecode(buffer: Buffer, encoding: string): string { + return iconvLite.decode(buffer, encoding); +} + +/** + * Encode a string to a buffer using the specified encoding. + * @param content The string to encode + * @param encoding The encoding to use (e.g. 'gbk', 'big5', 'shift_jis') + * @returns The encoded buffer + */ +export function iconvEncode(content: string, encoding: string): Buffer { + return iconvLite.encode(content, encoding); +} + +/** + * Check if an encoding is supported by iconv-lite. + * @param encoding The encoding name to check + * @returns True if the encoding is supported + */ +export function iconvEncodingExists(encoding: string): boolean { + return iconvLite.encodingExists(encoding); +} + +/** + * Check whether an encoding name represents a UTF-8 compatible encoding + * that Node's Buffer can handle natively without iconv-lite. + * Normalizes encoding names (e.g. 'utf-8', 'UTF8', 'us-ascii' all match). + * @param encoding The encoding name to check + * @returns True if the encoding is UTF-8 or ASCII compatible + */ +export function isUtf8CompatibleEncoding(encoding: string): boolean { + const lower = encoding.toLowerCase().replace(/[^a-z0-9]/g, ''); + return lower === 'utf8' || lower === 'ascii' || lower === 'usascii'; +} diff --git a/packages/core/src/utils/ignorePatterns.test.ts b/packages/core/src/utils/ignorePatterns.test.ts index 646c4b6bb..722f72edb 100644 --- a/packages/core/src/utils/ignorePatterns.test.ts +++ b/packages/core/src/utils/ignorePatterns.test.ts @@ -14,7 +14,7 @@ import type { Config } from '../config/config.js'; // Mock the memoryTool module vi.mock('../tools/memoryTool.js', () => ({ - getCurrentGeminiMdFilename: vi.fn(() => 'GEMINI.md'), + getAllGeminiMdFilenames: vi.fn(() => ['GEMINI.md', 'AGENTS.md']), })); describe('FileExclusions', () => { @@ -56,6 +56,7 @@ describe('FileExclusions', () => { // Should include dynamic patterns expect(patterns).toContain('**/GEMINI.md'); + expect(patterns).toContain('**/AGENTS.md'); }); it('should respect includeDefaults option', () => { @@ -68,6 +69,7 @@ describe('FileExclusions', () => { expect(patterns).not.toContain('**/node_modules/**'); expect(patterns).not.toContain('**/.git/**'); expect(patterns).not.toContain('**/GEMINI.md'); + expect(patterns).not.toContain('**/AGENTS.md'); expect(patterns).toHaveLength(0); }); @@ -101,7 +103,9 @@ describe('FileExclusions', () => { }); expect(patternsWithDynamic).toContain('**/GEMINI.md'); + expect(patternsWithDynamic).toContain('**/AGENTS.md'); expect(patternsWithoutDynamic).not.toContain('**/GEMINI.md'); + expect(patternsWithoutDynamic).not.toContain('**/AGENTS.md'); }); }); @@ -114,6 +118,7 @@ describe('FileExclusions', () => { expect(patterns).toContain('**/node_modules/**'); expect(patterns).toContain('**/.git/**'); expect(patterns).toContain('**/GEMINI.md'); + expect(patterns).toContain('**/AGENTS.md'); // Should include additional excludes expect(patterns).toContain('**/*.log'); diff --git a/packages/core/src/utils/ignorePatterns.ts b/packages/core/src/utils/ignorePatterns.ts index 9f9776db5..b4a4c2e40 100644 --- a/packages/core/src/utils/ignorePatterns.ts +++ b/packages/core/src/utils/ignorePatterns.ts @@ -6,7 +6,7 @@ import path from 'node:path'; import type { Config } from '../config/config.js'; -import { getCurrentGeminiMdFilename } from '../tools/memoryTool.js'; +import { getAllGeminiMdFilenames } from '../tools/memoryTool.js'; /** * Common ignore patterns used across multiple tools for basic exclusions. @@ -119,7 +119,7 @@ export interface ExcludeOptions { runtimePatterns?: string[]; /** - * Whether to include dynamic patterns like the current Gemini MD filename. Defaults to true. + * Whether to include dynamic patterns like configured context filenames. Defaults to true. */ includeDynamicPatterns?: boolean; } @@ -158,9 +158,11 @@ export class FileExclusions { patterns.push(...DEFAULT_FILE_EXCLUDES); } - // Add dynamic patterns (like current Gemini MD filename) + // Add dynamic patterns (like context filenames) if (includeDynamicPatterns) { - patterns.push(`**/${getCurrentGeminiMdFilename()}`); + for (const filename of getAllGeminiMdFilenames()) { + patterns.push(`**/${filename}`); + } } // Add custom patterns from configuration diff --git a/packages/core/src/utils/paths.ts b/packages/core/src/utils/paths.ts index 96856a5dc..dc4434ece 100644 --- a/packages/core/src/utils/paths.ts +++ b/packages/core/src/utils/paths.ts @@ -202,6 +202,25 @@ export function getProjectHash(projectRoot: string): string { return crypto.createHash('sha256').update(normalizedPath).digest('hex'); } +/** + * Sanitizes a directory path to create a safe project ID. + * + * - On Windows: normalizes to lowercase for case-insensitive matching + * - Replaces all non-alphanumeric characters with hyphens + * + * This is used for: + * - Creating project-specific directories + * - Generating session IDs for debug logging during startup + * + * @param cwd - The directory path to sanitize + * @returns A sanitized string safe for use as a project identifier + */ +export function sanitizeCwd(cwd: string): string { + // On Windows, normalize to lowercase for case-insensitive matching + const normalizedCwd = os.platform() === 'win32' ? cwd.toLowerCase() : cwd; + return normalizedCwd.replace(/[^a-zA-Z0-9]/g, '-'); +} + /** * Checks if a path is a subpath of another path. * @param parentPath The parent path. diff --git a/packages/core/src/utils/textUtils.ts b/packages/core/src/utils/textUtils.ts index ab59c2d59..32c25b89f 100644 --- a/packages/core/src/utils/textUtils.ts +++ b/packages/core/src/utils/textUtils.ts @@ -55,12 +55,21 @@ export function isBinary( } /** - * Normalizes text for cross-platform parsing. - * - Strips UTF-8 BOM at start. - * - Converts CRLF and CR to LF. + * Normalizes text content by stripping the UTF-8 BOM and converting all CRLF (\r\n) + * or standalone CR (\r) line endings to LF (\n). + * + * This is crucial for cross-platform compatibility, particularly to prevent parsing + * failures on Windows where files may be saved with CRLF line endings. + * + * @param content The raw text content to normalize + * @returns The normalized string with uniform \n line endings */ export function normalizeContent(content: string): string { + // Strip UTF-8 BOM to ensure string processing starts at the first real character. let normalized = content.replace(/^\uFEFF/, ''); + + // Normalize line endings to LF (\n). normalized = normalized.replace(/\r\n/g, '\n').replace(/\r/g, '\n'); + return normalized; } diff --git a/packages/vscode-ide-companion/.vscodeignore b/packages/vscode-ide-companion/.vscodeignore index 18e07a04b..5d1a75d88 100644 --- a/packages/vscode-ide-companion/.vscodeignore +++ b/packages/vscode-ide-companion/.vscodeignore @@ -6,3 +6,5 @@ !LICENSE !NOTICES.txt !assets/ +!schemas/ +!schemas/** diff --git a/packages/vscode-ide-companion/package.json b/packages/vscode-ide-companion/package.json index 0f55c1248..f83d3cd86 100644 --- a/packages/vscode-ide-companion/package.json +++ b/packages/vscode-ide-companion/package.json @@ -31,6 +31,12 @@ "onStartupFinished" ], "contributes": { + "jsonValidation": [ + { + "fileMatch": "**/.qwen/settings.json", + "url": "./schemas/settings.schema.json" + } + ], "languages": [ { "id": "qwen-diff-editable" diff --git a/packages/vscode-ide-companion/schemas/settings.schema.json b/packages/vscode-ide-companion/schemas/settings.schema.json new file mode 100644 index 000000000..8b5fca2b0 --- /dev/null +++ b/packages/vscode-ide-companion/schemas/settings.schema.json @@ -0,0 +1,599 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "description": "Qwen Code settings configuration", + "properties": { + "mcpServers": { + "description": "Configuration for MCP servers.", + "type": "object", + "additionalProperties": true + }, + "modelProviders": { + "description": "Model providers configuration grouped by authType. Each authType contains an array of model configurations.", + "type": "object", + "additionalProperties": true + }, + "codingPlan": { + "description": "Coding Plan template version tracking and configuration.", + "type": "object", + "properties": { + "version": { + "description": "SHA256 hash of the Coding Plan template. Used to detect template updates.", + "type": "string" + } + } + }, + "env": { + "description": "Environment variables to set as fallback defaults. These are loaded with the lowest priority: system environment variables > .env files > settings.env.", + "type": "object", + "additionalProperties": true + }, + "general": { + "description": "General application settings.", + "type": "object", + "properties": { + "preferredEditor": { + "description": "The preferred editor to open files in.", + "type": "string" + }, + "vimMode": { + "description": "Enable Vim keybindings", + "type": "boolean", + "default": false + }, + "enableAutoUpdate": { + "description": "Enable automatic update checks and installations on startup.", + "type": "boolean", + "default": true + }, + "gitCoAuthor": { + "description": "Automatically add a Co-authored-by trailer to git commit messages when commits are made through Qwen Code.", + "type": "boolean", + "default": true + }, + "checkpointing": { + "description": "Session checkpointing settings.", + "type": "object", + "properties": { + "enabled": { + "description": "Enable session checkpointing for recovery", + "type": "boolean", + "default": false + } + } + }, + "debugKeystrokeLogging": { + "description": "Enable debug logging of keystrokes to the console.", + "type": "boolean", + "default": false + }, + "language": { + "description": "The language for the user interface. Use \"auto\" to detect from system settings. You can also use custom language codes (e.g., \"es\", \"fr\") by placing JS language files in ~/.qwen/locales/ (e.g., ~/.qwen/locales/es.js). Options: auto, en, zh, ru, de, ja, pt", + "enum": [ + "auto", + "en", + "zh", + "ru", + "de", + "ja", + "pt" + ], + "default": "auto" + }, + "outputLanguage": { + "description": "The language for LLM output. Use \"auto\" to detect from system settings, or set a specific language.", + "type": "string", + "default": "auto" + }, + "terminalBell": { + "description": "Play terminal bell sound when response completes or needs approval.", + "type": "boolean", + "default": true + }, + "chatRecording": { + "description": "Enable saving chat history to disk. Disabling this will also prevent --continue and --resume from working.", + "type": "boolean", + "default": true + }, + "defaultFileEncoding": { + "description": "Default encoding for new files. Use \"utf-8\" (default) for UTF-8 without BOM, or \"utf-8-bom\" for UTF-8 with BOM. Only change this if your project specifically requires BOM. Options: utf-8, utf-8-bom", + "enum": [ + "utf-8", + "utf-8-bom" + ], + "default": "utf-8" + } + } + }, + "output": { + "description": "Settings for the CLI output.", + "type": "object", + "properties": { + "format": { + "description": "The format of the CLI output. Options: text, json", + "enum": [ + "text", + "json" + ], + "default": "text" + } + } + }, + "ui": { + "description": "User interface settings.", + "type": "object", + "properties": { + "theme": { + "description": "The color theme for the UI.", + "type": "string", + "default": "Qwen Dark" + }, + "customThemes": { + "description": "Custom theme definitions.", + "type": "object", + "additionalProperties": true + }, + "hideWindowTitle": { + "description": "Hide the window title bar", + "type": "boolean", + "default": false + }, + "showStatusInTitle": { + "description": "Show Qwen Code status and thoughts in the terminal window title", + "type": "boolean", + "default": false + }, + "hideTips": { + "description": "Hide helpful tips in the UI", + "type": "boolean", + "default": false + }, + "showLineNumbers": { + "description": "Show line numbers in the code output.", + "type": "boolean", + "default": false + }, + "showCitations": { + "description": "Show citations for generated text in the chat.", + "type": "boolean", + "default": false + }, + "customWittyPhrases": { + "description": "Custom witty phrases to display during loading.", + "type": "array", + "items": { + "type": "string" + } + }, + "enableWelcomeBack": { + "description": "Show welcome back dialog when returning to a project with conversation history.", + "type": "boolean", + "default": true + }, + "enableUserFeedback": { + "description": "Show optional feedback dialog after conversations to help improve Qwen performance.", + "type": "boolean", + "default": true + }, + "accessibility": { + "description": "Accessibility settings.", + "type": "object", + "properties": { + "enableLoadingPhrases": { + "description": "Enable loading phrases (disable for accessibility)", + "type": "boolean", + "default": true + }, + "screenReader": { + "description": "Render output in plain-text to be more screen reader accessible", + "type": "boolean" + } + } + }, + "feedbackLastShownTimestamp": { + "description": "The last time the feedback dialog was shown.", + "type": "number", + "default": 0 + } + } + }, + "ide": { + "description": "IDE integration settings.", + "type": "object", + "properties": { + "enabled": { + "description": "Enable IDE integration mode", + "type": "boolean", + "default": false + }, + "hasSeenNudge": { + "description": "Whether the user has seen the IDE integration nudge.", + "type": "boolean", + "default": false + } + } + }, + "privacy": { + "description": "Privacy-related settings.", + "type": "object", + "properties": { + "usageStatisticsEnabled": { + "description": "Enable collection of usage statistics", + "type": "boolean", + "default": true + } + } + }, + "telemetry": { + "description": "Telemetry configuration.", + "type": "object", + "additionalProperties": true + }, + "model": { + "description": "Settings related to the generative model.", + "type": "object", + "properties": { + "name": { + "description": "The model to use for conversations.", + "type": "string" + }, + "maxSessionTurns": { + "description": "Maximum number of user/model/tool turns to keep in a session. -1 means unlimited.", + "type": "number", + "default": -1 + }, + "summarizeToolOutput": { + "description": "Settings for summarizing tool output.", + "type": "object", + "additionalProperties": true + }, + "chatCompression": { + "description": "Chat compression settings.", + "type": "object", + "additionalProperties": true + }, + "sessionTokenLimit": { + "description": "The maximum number of tokens allowed in a session.", + "type": "number" + }, + "skipNextSpeakerCheck": { + "description": "Skip the next speaker check.", + "type": "boolean", + "default": true + }, + "skipLoopDetection": { + "description": "Disable all loop detection checks (streaming and LLM).", + "type": "boolean", + "default": false + }, + "skipStartupContext": { + "description": "Avoid sending the workspace startup context at the beginning of each session.", + "type": "boolean", + "default": false + }, + "enableOpenAILogging": { + "description": "Enable OpenAI logging.", + "type": "boolean", + "default": false + }, + "openAILoggingDir": { + "description": "Custom directory path for OpenAI API logs. If not specified, defaults to logs/openai in the current working directory.", + "type": "string" + }, + "generationConfig": { + "description": "Generation configuration settings.", + "type": "object", + "properties": { + "timeout": { + "description": "Request timeout in milliseconds.", + "type": "number" + }, + "maxRetries": { + "description": "Maximum number of retries for failed requests.", + "type": "number" + }, + "enableCacheControl": { + "description": "Enable cache control for DashScope providers.", + "type": "boolean", + "default": true + }, + "schemaCompliance": { + "description": "The compliance mode for tool schemas sent to the model. Use \"openapi_30\" for strict OpenAPI 3.0 compatibility (e.g., for Gemini). Options: auto, openapi_30", + "enum": [ + "auto", + "openapi_30" + ], + "default": "auto" + }, + "contextWindowSize": { + "description": "Overrides the default context window size for the selected model. Use this setting when a provider's effective context limit differs from Qwen Code's default. This value defines the model's assumed maximum context capacity, not a per-request token limit.", + "type": "number" + } + } + } + } + }, + "context": { + "description": "Settings for managing context provided to the model.", + "type": "object", + "properties": { + "fileName": { + "description": "The name of the context file.", + "type": "object", + "additionalProperties": true + }, + "importFormat": { + "description": "The format to use when importing memory.", + "type": "string" + }, + "includeDirectories": { + "description": "Additional directories to include in the workspace context. Missing directories will be skipped with a warning.", + "type": "array", + "items": { + "type": "string" + } + }, + "loadFromIncludeDirectories": { + "description": "Whether to load memory files from include directories.", + "type": "boolean", + "default": false + }, + "fileFiltering": { + "description": "Settings for git-aware file filtering.", + "type": "object", + "properties": { + "respectGitIgnore": { + "description": "Respect .gitignore files when searching", + "type": "boolean", + "default": true + }, + "respectQwenIgnore": { + "description": "Respect .qwenignore files when searching", + "type": "boolean", + "default": true + }, + "enableRecursiveFileSearch": { + "description": "Enable recursive file search functionality", + "type": "boolean", + "default": true + }, + "enableFuzzySearch": { + "description": "Enable fuzzy search when searching for files.", + "type": "boolean", + "default": true + } + } + } + } + }, + "tools": { + "description": "Settings for built-in and custom tools.", + "type": "object", + "properties": { + "sandbox": { + "description": "Sandbox execution environment (can be a boolean or a path string).", + "type": "object", + "additionalProperties": true + }, + "shell": { + "description": "Settings for shell execution.", + "type": "object", + "properties": { + "enableInteractiveShell": { + "description": "Use node-pty for an interactive shell experience. Fallback to child_process still applies.", + "type": "boolean", + "default": false + }, + "pager": { + "description": "The pager command to use for shell output. Defaults to `cat`.", + "type": "string", + "default": "cat" + }, + "showColor": { + "description": "Show color in shell output.", + "type": "boolean", + "default": false + } + } + }, + "core": { + "description": "Paths to core tool definitions.", + "type": "array", + "items": { + "type": "string" + } + }, + "allowed": { + "description": "A list of tool names that will bypass the confirmation dialog.", + "type": "array", + "items": { + "type": "string" + } + }, + "exclude": { + "description": "Tool names to exclude from discovery.", + "type": "array", + "items": { + "type": "string" + } + }, + "approvalMode": { + "description": "Approval mode for tool usage. Controls how tools are approved before execution. Options: plan, default, auto-edit, yolo", + "enum": [ + "plan", + "default", + "auto-edit", + "yolo" + ], + "default": "default" + }, + "autoAccept": { + "description": "Automatically accept and execute tool calls that are considered safe (e.g., read-only operations) without explicit user confirmation.", + "type": "boolean", + "default": false + }, + "discoveryCommand": { + "description": "Command to run for tool discovery.", + "type": "string" + }, + "callCommand": { + "description": "Command to run for tool calls.", + "type": "string" + }, + "useRipgrep": { + "description": "Use ripgrep for file content search instead of the fallback implementation. Provides faster search performance.", + "type": "boolean", + "default": true + }, + "useBuiltinRipgrep": { + "description": "Use the bundled ripgrep binary. When set to false, the system-level \"rg\" command will be used instead. This setting is only effective when useRipgrep is true.", + "type": "boolean", + "default": true + }, + "enableToolOutputTruncation": { + "description": "Enable truncation of large tool outputs.", + "type": "boolean", + "default": true + }, + "truncateToolOutputThreshold": { + "description": "Truncate tool output if it is larger than this many characters. Set to -1 to disable.", + "type": "number", + "default": 25000 + }, + "truncateToolOutputLines": { + "description": "The number of lines to keep when truncating tool output.", + "type": "number", + "default": 1000 + } + } + }, + "mcp": { + "description": "Settings for Model Context Protocol (MCP) servers.", + "type": "object", + "properties": { + "serverCommand": { + "description": "Command to start an MCP server.", + "type": "string" + }, + "allowed": { + "description": "A list of MCP servers to allow.", + "type": "array", + "items": { + "type": "string" + } + }, + "excluded": { + "description": "A list of MCP servers to exclude.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "security": { + "description": "Security-related settings.", + "type": "object", + "properties": { + "folderTrust": { + "description": "Settings for folder trust.", + "type": "object", + "properties": { + "enabled": { + "description": "Setting to track whether Folder trust is enabled.", + "type": "boolean", + "default": false + } + } + }, + "auth": { + "description": "Authentication settings.", + "type": "object", + "properties": { + "selectedType": { + "description": "The currently selected authentication type.", + "type": "string" + }, + "enforcedType": { + "description": "The required auth type. If this does not match the selected auth type, the user will be prompted to re-authenticate.", + "type": "string" + }, + "useExternal": { + "description": "Whether to use an external authentication flow.", + "type": "boolean" + }, + "apiKey": { + "description": "API key for OpenAI compatible authentication.", + "type": "string" + }, + "baseUrl": { + "description": "Base URL for OpenAI compatible API.", + "type": "string" + } + } + } + } + }, + "advanced": { + "description": "Advanced settings for power users.", + "type": "object", + "properties": { + "autoConfigureMemory": { + "description": "Automatically configure Node.js memory limits", + "type": "boolean", + "default": false + }, + "dnsResolutionOrder": { + "description": "The DNS resolution order.", + "type": "string" + }, + "excludedEnvVars": { + "description": "Environment variables to exclude from project context.", + "type": "array", + "items": { + "type": "string" + }, + "default": [ + "DEBUG", + "DEBUG_MODE" + ] + }, + "bugCommand": { + "description": "Configuration for the bug report command.", + "type": "object", + "additionalProperties": true + }, + "tavilyApiKey": { + "description": "⚠️ DEPRECATED: Please use webSearch.provider configuration instead. Legacy API key for the Tavily API.", + "type": "string" + } + } + }, + "webSearch": { + "description": "Configuration for web search providers.", + "type": "object", + "additionalProperties": true + }, + "experimental": { + "description": "Setting to enable experimental features", + "type": "object", + "properties": { + "visionModelPreview": { + "description": "Enable vision model support and auto-switching functionality. When disabled, vision models like qwen-vl-max-latest will be hidden and auto-switching will not occur.", + "type": "boolean", + "default": true + }, + "vlmSwitchMode": { + "description": "Default behavior when images are detected in input. Values: once (one-time switch), session (switch for entire session), persist (continue with current model). If not set, user will be prompted each time. This is a temporary experimental feature.", + "type": "string" + } + } + }, + "$version": { + "type": "number", + "description": "Settings schema version for migration tracking.", + "default": 3 + } + }, + "additionalProperties": true +} diff --git a/scripts/build.js b/scripts/build.js index 68da1c6e8..0ce010b3b 100644 --- a/scripts/build.js +++ b/scripts/build.js @@ -56,6 +56,15 @@ for (const workspace of buildOrder) { stdio: 'inherit', cwd: root, }); + + // After cli is built, generate the JSON Schema for settings + // so the vscode-ide-companion extension can provide IntelliSense + if (workspace === 'packages/cli') { + execSync('npx tsx scripts/generate-settings-schema.ts', { + stdio: 'inherit', + cwd: root, + }); + } } // also build container image if sandboxing is enabled diff --git a/scripts/generate-settings-schema.ts b/scripts/generate-settings-schema.ts new file mode 100644 index 000000000..9d13e8166 --- /dev/null +++ b/scripts/generate-settings-schema.ts @@ -0,0 +1,146 @@ +/** + * @license + * Copyright 2025 Qwen team + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * Generates a JSON Schema from the internal SETTINGS_SCHEMA definition. + * + * Usage: npx tsx scripts/generate-settings-schema.ts + * + * This reads the TypeScript settings schema and converts it to a standard + * JSON Schema file that VS Code uses for IntelliSense in settings.json files. + * + * Prerequisites: npm run build (core package must be built first) + */ + +import * as fs from 'node:fs'; +import * as path from 'node:path'; +import { fileURLToPath } from 'node:url'; + +import type { + SettingDefinition, + SettingsSchema, +} from '../packages/cli/src/config/settingsSchema.js'; +import { getSettingsSchema } from '../packages/cli/src/config/settingsSchema.js'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); + +interface JsonSchemaProperty { + $schema?: string; + type?: string | string[]; + description?: string; + properties?: Record; + items?: JsonSchemaProperty; + enum?: (string | number)[]; + default?: unknown; + additionalProperties?: boolean | JsonSchemaProperty; +} + +function convertSettingToJsonSchema( + setting: SettingDefinition, +): JsonSchemaProperty { + const schema: JsonSchemaProperty = {}; + + if (setting.description) { + schema.description = setting.description; + } + + switch (setting.type) { + case 'boolean': + schema.type = 'boolean'; + break; + case 'string': + schema.type = 'string'; + break; + case 'number': + schema.type = 'number'; + break; + case 'array': + schema.type = 'array'; + schema.items = { type: 'string' }; + break; + case 'enum': + if (setting.options && setting.options.length > 0) { + schema.enum = setting.options.map((o) => o.value); + schema.description += + ' Options: ' + setting.options.map((o) => `${o.value}`).join(', '); + } else { + // Enum without predefined options - accept any string + schema.type = 'string'; + } + break; + case 'object': + schema.type = 'object'; + if (setting.properties) { + schema.properties = {}; + for (const [key, childDef] of Object.entries(setting.properties)) { + schema.properties[key] = convertSettingToJsonSchema( + childDef as SettingDefinition, + ); + } + } else { + schema.additionalProperties = true; + } + break; + } + + // Add default value for simple types only + if (setting.default !== undefined && setting.default !== null) { + const defaultVal = setting.default; + if ( + typeof defaultVal === 'boolean' || + typeof defaultVal === 'number' || + typeof defaultVal === 'string' + ) { + schema.default = defaultVal; + } else if (Array.isArray(defaultVal) && defaultVal.length > 0) { + schema.default = defaultVal; + } + } + + return schema; +} + +function generateJsonSchema( + settingsSchema: SettingsSchema, +): JsonSchemaProperty { + const jsonSchema: JsonSchemaProperty = { + $schema: 'http://json-schema.org/draft-07/schema#', + type: 'object', + description: 'Qwen Code settings configuration', + properties: {}, + additionalProperties: true, + }; + + for (const [key, setting] of Object.entries(settingsSchema)) { + jsonSchema.properties![key] = convertSettingToJsonSchema( + setting as SettingDefinition, + ); + } + + // Add $version property + jsonSchema.properties!['$version'] = { + type: 'number', + description: 'Settings schema version for migration tracking.', + default: 3, + }; + + return jsonSchema; +} + +const schema = getSettingsSchema(); +const jsonSchema = generateJsonSchema(schema as unknown as SettingsSchema); + +const outputDir = path.resolve( + __dirname, + '../packages/vscode-ide-companion/schemas', +); +const outputPath = path.join(outputDir, 'settings.schema.json'); + +fs.mkdirSync(outputDir, { recursive: true }); +fs.writeFileSync(outputPath, JSON.stringify(jsonSchema, null, 2) + '\n'); + +console.log(`Generated settings JSON Schema at: ${outputPath}`); diff --git a/scripts/installation/install-qwen-with-source.bat b/scripts/installation/install-qwen-with-source.bat index 5a919134c..fcc9d9ac3 100644 --- a/scripts/installation/install-qwen-with-source.bat +++ b/scripts/installation/install-qwen-with-source.bat @@ -2,8 +2,8 @@ REM Script to install Node.js and Qwen Code with source information REM This script handles the installation process and sets the installation source REM -REM Usage: install-qwen-with-source.bat --source [github|npm|internal|local-build] -REM install-qwen-with-source.bat -s [github|npm|internal|local-build] +REM Usage: install-qwen-with-source.bat --source +REM install-qwen-with-source.bat -s REM setlocal enabledelayedexpansion @@ -14,21 +14,21 @@ REM Parse command line arguments :parse_args if "%~1"=="" goto end_parse if /i "%~1"=="--source" ( - set "SOURCE=%~2" - shift - shift - goto parse_args + if not "%~2"=="" ( + set "SOURCE=%~2" + shift + shift + goto parse_args + ) ) if /i "%~1"=="-s" ( - set "SOURCE=%~2" - shift - shift - goto parse_args + if not "%~2"=="" ( + set "SOURCE=%~2" + shift + shift + goto parse_args + ) ) -if /i "%~1"=="github" set "SOURCE=github" -if /i "%~1"=="npm" set "SOURCE=npm" -if /i "%~1"=="internal" set "SOURCE=internal" -if /i "%~1"=="local-build" set "SOURCE=local-build" shift goto parse_args @@ -100,8 +100,8 @@ if exist "!NODEJS_PATH!\npm.cmd" ( REM Install Qwen Code with source information echo INFO: Installing Qwen Code with source: %SOURCE% -echo INFO: Running: %NPM_CMD% install -g @qwen-code/qwen-code -call "%NPM_CMD%" install -g @qwen-code/qwen-code +echo INFO: Running: %NPM_CMD% install -g @qwen-code/qwen-code@latest --registry https://registry.npmmirror.com +call "%NPM_CMD%" install -g @qwen-code/qwen-code@latest --registry https://registry.npmmirror.com if %ERRORLEVEL% EQU 0 ( echo SUCCESS: Qwen Code installed successfully! @@ -110,21 +110,25 @@ if %ERRORLEVEL% EQU 0 ( exit /b 1 ) -REM After installation, create source.json in the .qwen directory -echo INFO: Creating source.json in %USERPROFILE%\.qwen... +REM Create source.json only if --source or -s was explicitly provided +if not "!SOURCE!"=="unknown" ( + echo INFO: Creating source.json in %USERPROFILE%\.qwen... -set "QWEN_DIR=%USERPROFILE%\.qwen" -if not exist "%QWEN_DIR%" ( - mkdir "%QWEN_DIR%" + set "QWEN_DIR=%USERPROFILE%\.qwen" + if not exist "!QWEN_DIR!" ( + mkdir "!QWEN_DIR!" + ) + + REM Create the source.json file with the installation source + ( + echo { + echo "source": "!SOURCE!" + echo } + ) > "!QWEN_DIR!\source.json" + + echo SUCCESS: Installation source saved to %USERPROFILE%\.qwen\source.json ) -REM Create the source.json file with the installation source -echo { > "%QWEN_DIR%\source.json" -echo "source": "%SOURCE%" >> "%QWEN_DIR%\source.json" -echo } >> "%QWEN_DIR%\source.json" - -echo SUCCESS: Installation source saved to %USERPROFILE%\.qwen\source.json - REM Verify installation call :CheckCommandExists qwen if %ERRORLEVEL% EQU 0 ( @@ -138,6 +142,7 @@ echo. echo =========================================== echo SUCCESS: Installation completed! echo The source information is stored in %USERPROFILE%\.qwen\source.json +echo Tips: Please restart your terminal and run: qwen echo. echo =========================================== diff --git a/scripts/installation/install-qwen-with-source.sh b/scripts/installation/install-qwen-with-source.sh index 0991ec485..6f67e469b 100755 --- a/scripts/installation/install-qwen-with-source.sh +++ b/scripts/installation/install-qwen-with-source.sh @@ -84,7 +84,9 @@ get_shell_profile() { echo "${HOME}/.zshrc" ;; fish) - echo "${HOME}/.config/fish/config.fish" + # Fish uses its own syntax; bash/zsh export statements are not compatible. + # Return empty string to signal callers to skip automatic profile writes. + echo "" ;; *) echo "${HOME}/.profile" @@ -163,9 +165,16 @@ ensure_download_tool() { clean_npmrc_conflict() { local npmrc="${HOME}/.npmrc" if [[ -f "${npmrc}" ]]; then - log_info "Cleaning npmrc conflicts..." - grep -Ev '^(prefix|globalconfig) *= *' "${npmrc}" > "${npmrc}.tmp" || true - mv -f "${npmrc}.tmp" "${npmrc}" || true + # Only clean if conflicting entries actually exist + if grep -Eq '^(prefix|globalconfig) *= *' "${npmrc}" 2>/dev/null; then + log_info "Cleaning npmrc conflicts..." + # Backup original npmrc before modifying + cp -f "${npmrc}" "${npmrc}.bak" + log_info "Backed up original .npmrc to ${npmrc}.bak" + grep -Ev '^(prefix|globalconfig) *= *' "${npmrc}.bak" > "${npmrc}.tmp" || true + mv -f "${npmrc}.tmp" "${npmrc}" || true + log_success "Removed conflicting prefix/globalconfig entries from .npmrc" + fi fi } @@ -204,8 +213,13 @@ install_nvm() { local PROFILE_FILE PROFILE_FILE=$(get_shell_profile) + # Fish shell returns empty string from get_shell_profile because export/source + # syntax is incompatible with fish. Skip automatic profile writes for fish users. + if [[ -z "${PROFILE_FILE}" ]]; then + log_warning "Fish shell detected: automatic shell profile configuration is not supported." + log_info "Please add NVM configuration manually. See: https://github.com/nvm-sh/nvm#fish" # Check if profile file is writable - if [[ -f "${PROFILE_FILE}" ]] && [[ ! -w "${PROFILE_FILE}" ]]; then + elif [[ -f "${PROFILE_FILE}" ]] && [[ ! -w "${PROFILE_FILE}" ]]; then log_warning "Cannot write to ${PROFILE_FILE} (permission denied)" log_info "Skipping shell profile configuration" log_info "You may need to manually add NVM configuration to your shell profile" @@ -284,7 +298,13 @@ check_node_version() { local current_version current_version=$(node -v | sed 's/v//') local major_version - major_version=$(echo "${current_version}" | cut -d. -f1) + major_version=$(echo "${current_version}" | cut -d. -f1 | sed 's/[^0-9]//g') + + # Handle cases where major_version is empty or non-numeric + if [[ -z "${major_version}" ]]; then + log_warning "Unable to determine Node.js version from: $(node -v)" + return 1 + fi if [[ "${major_version}" -ge 20 ]]; then log_success "Node.js v${current_version} is already installed (>= 20)" @@ -356,55 +376,51 @@ fix_npm_permissions() { local NPM_GLOBAL_DIR NPM_GLOBAL_DIR=$(npm config get prefix 2>/dev/null) || true + + # Determine whether we need to fall back to ~/.npm-global: + # 1. prefix is empty or contains an error string + # 2. prefix is a system directory (would break sudo setuid binaries) + # 3. prefix directory is not writable + local use_user_dir=false + if [[ -z "${NPM_GLOBAL_DIR}" ]] || [[ "${NPM_GLOBAL_DIR}" == *"error"* ]]; then + log_info "npm prefix is unset or invalid, switching to user directory" + use_user_dir=true + else + # SAFETY CHECK: Never use system directories + case "${NPM_GLOBAL_DIR}" in + /|/usr|/usr/local|/bin|/sbin|/lib|/lib64|/opt|/snap|/var|/etc) + log_warning "npm prefix is a system directory (${NPM_GLOBAL_DIR}), switching to user directory to avoid breaking system binaries." + use_user_dir=true + ;; + esac + fi + + if [[ "${use_user_dir}" == false ]] && [[ ! -w "${NPM_GLOBAL_DIR}" ]]; then + log_warning "npm global directory is not writable: ${NPM_GLOBAL_DIR}, switching to user directory." + use_user_dir=true + fi + + if [[ "${use_user_dir}" == true ]]; then NPM_GLOBAL_DIR="${HOME}/.npm-global" + # Create the directory before setting prefix so npm config set succeeds + mkdir -p "${NPM_GLOBAL_DIR}" npm config set prefix "${NPM_GLOBAL_DIR}" - log_info "Set npm prefix to user directory: ${NPM_GLOBAL_DIR}" - return 0 - fi + log_success "npm prefix set to: ${NPM_GLOBAL_DIR}" - # SAFETY CHECK: Never modify system directories - # This prevents catastrophic failures like breaking sudo setuid binaries - case "${NPM_GLOBAL_DIR}" in - /|/usr|/usr/local|/bin|/sbin|/lib|/lib64|/opt|/snap|/var|/etc) - log_warning "npm prefix is a system directory (${NPM_GLOBAL_DIR})." - log_info "Using user directory instead to avoid breaking system binaries." - NPM_GLOBAL_DIR="${HOME}/.npm-global" - npm config set prefix "${NPM_GLOBAL_DIR}" - log_success "npm prefix set to: ${NPM_GLOBAL_DIR}" - return 0 - ;; - *) - # Safe to proceed with non-system directory - ;; - esac - - # Check if npm global directory is writable - if [[ -w "${NPM_GLOBAL_DIR}" ]]; then - log_info "npm global directory is writable" - return 0 - fi - - # If not writable, use user directory - log_warning "npm global directory is not writable: ${NPM_GLOBAL_DIR}" - log_info "Setting npm prefix to user directory..." - - NPM_GLOBAL_DIR="${HOME}/.npm-global" - mkdir -p "${NPM_GLOBAL_DIR}" - npm config set prefix "${NPM_GLOBAL_DIR}" - - log_success "npm prefix set to: ${NPM_GLOBAL_DIR}" - - # Add to PATH in shell profile - local PROFILE_FILE - PROFILE_FILE=$(get_shell_profile) - if ! grep -q '.npm-global/bin' "${PROFILE_FILE}" 2>/dev/null; then - { - echo "" - echo "# NPM global bin (added by Qwen Code installer)" - echo "export PATH=\"\$HOME/.npm-global/bin:\$PATH\"" - } >> "${PROFILE_FILE}" - log_info "Added npm global bin to PATH in ${PROFILE_FILE}" + # Only add ~/.npm-global/bin to PATH when we actually use it + local PROFILE_FILE + PROFILE_FILE=$(get_shell_profile) + if [[ -n "${PROFILE_FILE}" ]] && ! grep -q '.npm-global/bin' "${PROFILE_FILE}" 2>/dev/null; then + { + echo "" + echo "# NPM global bin (added by Qwen Code installer)" + echo "export PATH=\"\$HOME/.npm-global/bin:\$PATH\"" + } >> "${PROFILE_FILE}" 2>/dev/null || log_warning "Failed to write PATH update to ${PROFILE_FILE}" + log_info "Added npm global bin to PATH in ${PROFILE_FILE}" + fi + else + log_info "npm global directory is writable: ${NPM_GLOBAL_DIR}" fi return 0 @@ -421,14 +437,14 @@ install_qwen_code() { # Add npm global bin to PATH local NPM_GLOBAL_BIN - NPM_GLOBAL_BIN=$(npm bin -g 2>/dev/null) || true + NPM_GLOBAL_BIN=$(npm config get prefix 2>/dev/null)/bin if [[ -n "${NPM_GLOBAL_BIN}" ]]; then export PATH="${NPM_GLOBAL_BIN}:${PATH}" fi if command_exists qwen; then local QWEN_VERSION - QWEN_VERSION=$(qwen --version 2>/dev/null) || echo "unknown" + QWEN_VERSION=$(qwen --version 2>/dev/null || echo "unknown") log_success "Qwen Code is already installed: ${QWEN_VERSION}" log_info "Upgrading to the latest version..." fi @@ -439,13 +455,9 @@ install_qwen_code() { # Fix npm permissions if needed fix_npm_permissions - # Configure npm registry for faster downloads in China - npm config set registry https://registry.npmmirror.com - log_info "npm registry set to npmmirror" - # Install Qwen Code log_info "Installing Qwen Code..." - if npm install -g @qwen-code/qwen-code@latest; then + if npm install -g @qwen-code/qwen-code@latest --registry https://registry.npmmirror.com; then log_success "Qwen Code installed successfully!" # Verify installation @@ -532,7 +544,7 @@ main() { # shellcheck source=/dev/null [[ -s "${NVM_DIR}/nvm.sh" ]] && \. "${NVM_DIR}/nvm.sh" 2>/dev/null || true local NPM_GLOBAL_BIN - NPM_GLOBAL_BIN=$(npm bin -g 2>/dev/null) || true + NPM_GLOBAL_BIN=$(npm config get prefix 2>/dev/null)/bin if [[ -n "${NPM_GLOBAL_BIN}" ]]; then export PATH="${NPM_GLOBAL_BIN}:${PATH}" fi @@ -541,15 +553,16 @@ main() { if command_exists qwen; then log_success "Qwen Code is ready to use!" echo "" - echo "You can now run: qwen" + log_info "Tips: Please restart your terminal and run: qwen" + echo "" else - log_warning "To start using Qwen Code, please run:" + log_warning "Tips: To start using Qwen Code, please run:" echo "" local PROFILE_FILE PROFILE_FILE=$(get_shell_profile) echo " source ${PROFILE_FILE}" echo "" - echo "Or simply restart your terminal, then run: qwen" + log_info "Or simply restart your terminal, then run: qwen" fi }