diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c410b6cdd..3608d961b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -83,6 +83,23 @@ jobs: - name: 'Run sensitive keyword linter' run: 'node scripts/lint.js --sensitive-keywords' + - name: 'Build CLI package' + run: 'npm run build --workspace=packages/cli' + + - name: 'Generate settings schema' + run: 'npm run generate:settings-schema' + + - name: 'Check settings schema is up-to-date' + run: | + if [[ -n $(git status --porcelain packages/vscode-ide-companion/schemas/settings.schema.json) ]]; then + echo "❌ Error: settings.schema.json is out of date!" + echo " Please run: npm run generate:settings-schema" + echo " Then commit the updated schema file." + git diff packages/vscode-ide-companion/schemas/settings.schema.json + exit 1 + fi + echo "✅ Settings schema is up-to-date" + # # Test: Node # diff --git a/.prettierignore b/.prettierignore index c9ae7e56a..5e9d79005 100644 --- a/.prettierignore +++ b/.prettierignore @@ -18,4 +18,5 @@ eslint.config.js gha-creds-*.json junit.xml Thumbs.db +packages/vscode-ide-companion/schemas/settings.schema.json packages/cli/src/services/insight/templates/insightTemplate.ts diff --git a/.qwen/commands/qc/code-review.md b/.qwen/commands/qc/code-review.md new file mode 100644 index 000000000..b5846485a --- /dev/null +++ b/.qwen/commands/qc/code-review.md @@ -0,0 +1,25 @@ +--- +description: Code review a pull request +--- + +You are an expert code reviewer. Follow these steps: + +1. If no PR number is provided in the args, use Bash(\"gh pr list\") to show open PRs +2. If a PR number is provided, use Bash(\"gh pr view \") to get PR details +3. Use Bash(\"gh pr diff \") to get the diff +4. Analyze the changes and provide a thorough code review that includes: + - Overview of what the PR does + - Analysis of code quality and style + - Specific suggestions for improvements + - Any potential issues or risks + +Keep your review concise but thorough. Focus on: +- Code correctness +- Following project conventions +- Performance implications +- Test coverage +- Security considerations + +Format your review with clear sections and bullet points. + +PR number: {{args}} diff --git a/.qwen/commands/qc/commit.md b/.qwen/commands/qc/commit.md new file mode 100644 index 000000000..76ef6b417 --- /dev/null +++ b/.qwen/commands/qc/commit.md @@ -0,0 +1,70 @@ +--- +description: Commit staged changes with an AI-generated commit message and push +--- + +# Commit and Push + +## Overview +Generate a clear, concise commit message based on staged changes, confirm with the user, then commit and push. + +## Steps + +### 1. Check repository status +- Run `git status` to check: + - Are there any staged changes? + - Are there unstaged changes? + - What is the current branch? + +### 2. Handle unstaged changes +- If there are unstaged changes, notify the user and list them +- Do NOT add or commit unstaged changes +- Proceed only with staged changes + +### 3. Review staged changes +- Run `git diff --staged` to see all staged changes +- Analyze the changes in depth to understand: + - What files were modified/added/deleted + - The nature of the changes (feature, fix, refactor, docs, etc.) + - The scope and impact of the changes + +### 4. Handle branch logic +- Get current branch name with `git branch --show-current` +- **If current branch is `main` or `master`:** + - Generate a proper branch name based on the changes + - Create and switch to the new branch: `git checkout -b ` +- **If current branch is NOT main/master:** + - Check if branch name matches the staged changes + - If branch name doesn't match changes, ask user: + - "Current branch `` doesn't seem to match these changes." + - "Options: (1) Create and switch to a new branch, (2) Commit directly on current branch" + - Wait for user decision + +### 5. Generate commit message +- Types: feat, fix, docs, style, refactor, test, chore +- Guidelines: + - Be clear and concise + - Reference issues if mentioned in changes + - Include scope in parentheses when applicable (e.g., `fix(insight):`, `feat(auth):`) + - Add bullet points for detailed changes if it addes more value, otherwise do not use bullets + - Include a footer explaining the purpose/impact of the changes + +**Format:** +``` +(): +- (optional) +- (optional) +- ... + +This . +``` + +### 6. Present the result and confirm with user +- Present the generated commit message +- Show which branch will be used +- Ask for confirmation: "Proceed with commit and push?" +- Wait for user approval + +### 7. Commit and push +- After user confirms: + - `git commit -m ""` + - `git push -u origin ` (use `-u` for new branches) diff --git a/.qwen/commands/qc/create-issue.md b/.qwen/commands/qc/create-issue.md new file mode 100644 index 000000000..54317621b --- /dev/null +++ b/.qwen/commands/qc/create-issue.md @@ -0,0 +1,42 @@ +--- +description: Draft and submit a GitHub issue based on a user-provided idea +--- + +# Create Issue + +## Overview +Take the user's idea or bug description, investigate the codebase to understand the full context, draft a GitHub issue for review, and submit it once approved. + +## Input +The user provides a brief description of a feature request or bug report: {{args}} + +## Steps + +1. **Understand the request** + - Read the user's description carefully + - Determine whether this is a feature request or a bug report + +2. **Investigate the codebase** + - Search for relevant code, files, and existing behavior related to the request + - Build a thorough understanding of how the current system works + - Identify any related issues or prior art if mentioned + +3. **Draft the issue** + - Write a markdown file for the user to review + - Use the appropriate template: + - Feature request: follow @.github/ISSUE_TEMPLATE/feature_request.yml + - Bug report: follow @.github/ISSUE_TEMPLATE/bug_report.yml + - Write from the user's perspective, not as an implementation spec + - Keep the language clear and concise, AVOID internal implementation details + +4. **Review with user** + - Present the draft file to the user + - Iterate on feedback until the user is satisfied + - Do NOT submit until the user explicitly asks to + +5. **Submit the issue** + - When the user confirms, create the issue using `gh issue create` + - Apply the appropriate labels: + - Feature request: `type/feature-request`, `status/needs-triage` + - Bug report: `type/bug`, `status/needs-triage` + - Report back the issue URL diff --git a/.qwen/commands/qc/create-pr.md b/.qwen/commands/qc/create-pr.md new file mode 100644 index 000000000..bf3c3c1e4 --- /dev/null +++ b/.qwen/commands/qc/create-pr.md @@ -0,0 +1,34 @@ +--- +description: Create a pull request based on staged code changes +--- + +# Create PR + +## Overview +Create a well-structured pull request with proper description and title. + +## Steps +1. **Review staged changes** + - Review all staged changes to understand what has been done + - Do not touch unstaged changes + +2. **Prepare branch** + - Create a new branch with proper name if current branch is main + - Ensure all changes are committed + - Push branch to remote + +3. **Write PR description** + - Use PR Template below + - Summarize changes clearly + - Include context and motivation + - List any breaking changes + - Link related issues if provided, or use "No linked issues" + - Add this line at the end of PR body: "🤖 Generated with [Qwen Code](https://github.com/QwenLM/qwen-code)", with a line separator + +4. **Set up PR** + - Create PR title and body + - Submit PR with gh command + +## PR Template + +@{.github/pull_request_template.md} \ No newline at end of file diff --git a/.qwen/skills/terminal-capture/SKILL.md b/.qwen/skills/terminal-capture/SKILL.md index adf8fff13..7fc99a18d 100644 --- a/.qwen/skills/terminal-capture/SKILL.md +++ b/.qwen/skills/terminal-capture/SKILL.md @@ -109,6 +109,38 @@ Supported key names: `ArrowUp`, `ArrowDown`, `ArrowLeft`, `ArrowRight`, `Enter`, Auto-screenshot is triggered after the key sequence ends (when the next step is not a `key`). +### `streaming` — Capture During Execution + +Capture multiple screenshots at intervals during long-running output (e.g., progress bars). Optionally generates an animated GIF. + +```typescript +{ + type: 'Run this command: bash progress.sh', + streaming: { + delayMs: 7000, // Wait before first capture (skip initial waiting phase) + intervalMs: 500, // Interval between captures + count: 20, // Maximum number of captures + gif: true, // Generate animated GIF (default: true, requires ffmpeg) + }, +} +``` + +- `delayMs` (optional): Milliseconds to wait after pressing Enter before starting captures. Useful for skipping model thinking/approval time. +- Captures stop early if terminal output is unchanged for 3 consecutive intervals. +- Duplicate frames (no output change) are automatically skipped. + +**GIF prerequisite**: If the scenario uses `streaming` with GIF enabled (default), check if `ffmpeg` is installed before running. If not, ask the user whether they'd like to install it: + +```bash +# Check +which ffmpeg + +# Install (macOS) +brew install ffmpeg +``` + +If the user declines, the scenario still runs — GIF generation is skipped with a warning. + ### `capture` / `captureFull` — Explicit Screenshot Use as a standalone step, or override automatic naming: @@ -178,20 +210,32 @@ This tool is commonly used for visual verification during PR reviews. For the co ## Full ScenarioConfig Type ```typescript -interface ScenarioConfig { - name: string; // Scenario name (also used as screenshot subdirectory name) - spawn: string[]; // Launch command ["node", "dist/cli.js", "--yolo"] - flow: FlowStep[]; // Interaction steps - terminal?: { - // Terminal configuration (all optional) - cols?: number; // Number of columns, default 100 - rows?: number; // Number of rows, default 28 - theme?: string; // Theme: dracula|one-dark|github-dark|monokai|night-owl - chrome?: boolean; // macOS window decorations, default true - title?: string; // Window title, default "Terminal" - fontSize?: number; // Font size - cwd?: string; // Working directory (relative to config file) +interface FlowStep { + type?: string; // Input text + key?: string | string[]; // Key press(es) + capture?: string; // Viewport screenshot filename + captureFull?: string; // Full scrollback screenshot filename + streaming?: { + delayMs?: number; // Delay before first capture (default: 0) + intervalMs: number; // Interval between captures in ms + count: number; // Maximum number of captures + gif?: boolean; // Generate animated GIF (default: true) }; - outputDir?: string; // Screenshot output directory (relative to config file) +} + +interface ScenarioConfig { + name: string; // Scenario name (also used as screenshot subdirectory name) + spawn: string[]; // Launch command ["node", "dist/cli.js", "--yolo"] + flow: FlowStep[]; // Interaction steps + terminal?: { + cols?: number; // Number of columns, default 100 + rows?: number; // Number of rows, default 28 + theme?: string; // Theme: dracula|one-dark|github-dark|monokai|night-owl + chrome?: boolean; // macOS window decorations, default true + title?: string; // Window title, default "Terminal" + fontSize?: number; // Font size + cwd?: string; // Working directory (relative to config file) + }; + outputDir?: string; // Screenshot output directory (relative to config file) } ``` diff --git a/integration-tests/acp-integration.test.ts b/integration-tests/acp-integration.test.ts index 07e53e960..a0f7a2629 100644 --- a/integration-tests/acp-integration.test.ts +++ b/integration-tests/acp-integration.test.ts @@ -472,6 +472,156 @@ function setupAcpTest( } }); + it('supports session/set_config_option for mode and model', async () => { + const rig = new TestRig(); + rig.setup('acp set config option'); + + const { sendRequest, cleanup, stderr } = setupAcpTest(rig); + + try { + // Initialize + await sendRequest('initialize', { + protocolVersion: 1, + clientCapabilities: { + fs: { readTextFile: true, writeTextFile: true }, + }, + }); + + await sendRequest('authenticate', { methodId: 'openai' }); + + // Create a new session + const newSession = (await sendRequest('session/new', { + cwd: rig.testDir!, + mcpServers: [], + })) as { + sessionId: string; + models: { + availableModels: Array<{ modelId: string }>; + }; + }; + expect(newSession.sessionId).toBeTruthy(); + + // Test: Set mode using set_config_option + const setModeResult = (await sendRequest('session/set_config_option', { + sessionId: newSession.sessionId, + configId: 'mode', + value: 'yolo', + })) as { + configOptions: Array<{ + id: string; + currentValue: string; + options: Array<{ value: string; name: string; description: string }>; + }>; + }; + + expect(setModeResult).toBeDefined(); + expect(Array.isArray(setModeResult.configOptions)).toBe(true); + expect(setModeResult.configOptions.length).toBeGreaterThanOrEqual(2); + + // Find mode option + const modeOption = setModeResult.configOptions.find( + (opt) => opt.id === 'mode', + ); + expect(modeOption).toBeDefined(); + expect(modeOption!.currentValue).toBe('yolo'); + expect(Array.isArray(modeOption!.options)).toBe(true); + expect(modeOption!.options.some((o) => o.value === 'yolo')).toBe(true); + + // Find model option + const modelOption = setModeResult.configOptions.find( + (opt) => opt.id === 'model', + ); + expect(modelOption).toBeDefined(); + expect(modelOption!.currentValue).toBeTruthy(); + + // Test: Set model using set_config_option + // Use openai model to avoid auth issues + const openaiModel = newSession.models.availableModels.find((model) => + model.modelId.includes('openai'), + ); + expect(openaiModel).toBeDefined(); + + const setModelResult = (await sendRequest('session/set_config_option', { + sessionId: newSession.sessionId, + configId: 'model', + value: openaiModel!.modelId, + })) as { + configOptions: Array<{ + id: string; + currentValue: string; + options: Array<{ value: string; name: string; description: string }>; + }>; + }; + + expect(setModelResult).toBeDefined(); + expect(Array.isArray(setModelResult.configOptions)).toBe(true); + + // Verify model was updated + const updatedModelOption = setModelResult.configOptions.find( + (opt) => opt.id === 'model', + ); + expect(updatedModelOption).toBeDefined(); + expect(updatedModelOption!.currentValue).toBe(openaiModel!.modelId); + } catch (e) { + if (stderr.length) { + console.error('Agent stderr:', stderr.join('')); + } + throw e; + } finally { + await cleanup(); + } + }); + + it('returns error for invalid configId in set_config_option', async () => { + const rig = new TestRig(); + rig.setup('acp set config option error'); + + const { sendRequest, cleanup, stderr } = setupAcpTest(rig); + + try { + // Initialize + await sendRequest('initialize', { + protocolVersion: 1, + clientCapabilities: { + fs: { readTextFile: true, writeTextFile: true }, + }, + }); + + await sendRequest('authenticate', { methodId: 'openai' }); + + // Create a new session + const newSession = (await sendRequest('session/new', { + cwd: rig.testDir!, + mcpServers: [], + })) as { sessionId: string }; + expect(newSession.sessionId).toBeTruthy(); + + // Test: Invalid configId should return error + await expect( + sendRequest('session/set_config_option', { + sessionId: newSession.sessionId, + configId: 'invalid_config', + value: 'some_value', + }), + ).rejects.toMatchObject({ + response: { + code: -32602, + message: 'Invalid params', + data: { + details: 'Unsupported configId: invalid_config', + }, + }, + }); + } catch (e) { + if (stderr.length) { + console.error('Agent stderr:', stderr.join('')); + } + throw e; + } finally { + await cleanup(); + } + }); + it('receives available_commands_update with slash commands after session creation', async () => { const rig = new TestRig(); rig.setup('acp slash commands'); diff --git a/integration-tests/fixtures/settings-migration/workspaces.json b/integration-tests/fixtures/settings-migration/workspaces.json new file mode 100644 index 000000000..af7a48f84 --- /dev/null +++ b/integration-tests/fixtures/settings-migration/workspaces.json @@ -0,0 +1,189 @@ +{ + "v1Settings": { + "theme": "dark", + "model": "gemini", + "autoAccept": true, + "hideTips": false, + "vimMode": true, + "checkpointing": true, + "disableAutoUpdate": true, + "disableLoadingPhrases": true, + "mcpServers": { + "fetch": { + "command": "node", + "args": ["fetch-server.js"] + } + }, + "customUserSetting": "preserved-value" + }, + "v1ComplexSettings": { + "theme": "dark", + "model": "gemini-1.5-pro", + "autoAccept": false, + "hideTips": true, + "vimMode": false, + "checkpointing": true, + "disableAutoUpdate": true, + "disableUpdateNag": false, + "disableLoadingPhrases": true, + "disableFuzzySearch": false, + "disableCacheControl": true, + "allowedTools": ["read-file", "write-file"], + "allowMCPServers": true, + "autoConfigureMaxOldSpaceSize": true, + "bugCommand": "/bug", + "chatCompression": "auto", + "coreTools": ["edit", "bash"], + "customThemes": [], + "customWittyPhrases": [], + "fileFiltering": true, + "folderTrust": true, + "ideMode": true, + "includeDirectories": ["src", "lib"], + "maxSessionTurns": 50, + "preferredEditor": "vscode", + "sandbox": false, + "summarizeToolOutput": true, + "telemetry": { + "enabled": false + }, + "useRipgrep": true, + "myCustomKey": "custom-value", + "anotherCustomSetting": { + "nested": true, + "items": [1, 2, 3] + } + }, + "v1ArrayAndNullSettings": { + "theme": null, + "model": ["gemini", "claude"], + "autoAccept": false, + "includeDirectories": [], + "disableFuzzySearch": "TRUE", + "disableCacheControl": "FALSE", + "customArray": [{ "key": 1 }] + }, + "v1ParentCollisionSettings": { + "theme": "dark", + "model": "gemini", + "ui": "legacy-ui-string", + "general": "legacy-general-string", + "disableAutoUpdate": true, + "disableLoadingPhrases": false, + "notes": { + "fromUser": "preserve-custom" + } + }, + "v1VersionStringSettings": { + "$version": "2", + "theme": "light", + "model": "qwen-plus", + "disableAutoUpdate": "false", + "disableLoadingPhrases": "TRUE", + "ui": { + "hideWindowTitle": true + }, + "customSection": { + "keepMe": true + } + }, + "v2Settings": { + "$version": 2, + "ui": { + "theme": "light", + "accessibility": { + "disableLoadingPhrases": false + } + }, + "general": { + "disableAutoUpdate": false, + "disableUpdateNag": false, + "checkpointing": false + }, + "model": { + "name": "claude" + }, + "context": { + "fileFiltering": { + "disableFuzzySearch": true + } + }, + "mcpServers": {} + }, + "v2MinimalSettings": { + "$version": 2 + }, + "v2BooleanStringSettings": { + "$version": 2, + "general": { + "disableAutoUpdate": "TRUE", + "disableUpdateNag": "false" + }, + "ui": { + "accessibility": { + "disableLoadingPhrases": "FaLsE" + } + }, + "context": { + "fileFiltering": { + "disableFuzzySearch": "TRUE" + } + }, + "model": { + "generationConfig": { + "disableCacheControl": "false" + } + } + }, + "v2PreexistingEnableSettings": { + "$version": 2, + "general": { + "disableAutoUpdate": false, + "disableUpdateNag": true, + "enableAutoUpdate": true + }, + "ui": { + "accessibility": { + "disableLoadingPhrases": true, + "enableLoadingPhrases": true + } + }, + "context": { + "fileFiltering": { + "disableFuzzySearch": false, + "enableFuzzySearch": false + } + }, + "model": { + "generationConfig": { + "disableCacheControl": true, + "enableCacheControl": true + } + } + }, + "v3LegacyDisableSettings": { + "$version": 3, + "general": { + "disableAutoUpdate": true, + "enableAutoUpdate": false + }, + "ui": { + "accessibility": { + "disableLoadingPhrases": false, + "enableLoadingPhrases": true + } + }, + "custom": { + "note": "should remain unchanged in v3" + } + }, + "v999FutureVersionSettings": { + "$version": 999, + "theme": "dark", + "model": "future-model", + "disableAutoUpdate": true, + "experimentalFlag": { + "enabled": true + } + } +} diff --git a/integration-tests/settings-migration.test.ts b/integration-tests/settings-migration.test.ts new file mode 100644 index 000000000..fa5446c17 --- /dev/null +++ b/integration-tests/settings-migration.test.ts @@ -0,0 +1,627 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import { TestRig } from './test-helper.js'; +import { writeFileSync, readFileSync } from 'node:fs'; +import { join } from 'node:path'; + +// Import settings fixtures from unified workspace file +import workspacesSettings from './fixtures/settings-migration/workspaces.json' with { type: 'json' }; + +const { + v1Settings, + v1ComplexSettings, + v1ArrayAndNullSettings, + v1ParentCollisionSettings, + v1VersionStringSettings, + v2Settings, + v2MinimalSettings, + v2BooleanStringSettings, + v2PreexistingEnableSettings, + v3LegacyDisableSettings, + v999FutureVersionSettings, +} = workspacesSettings; + +/** + * Integration tests for settings migration chain (V1 -> V2 -> V3) + * + * These tests verify that: + * 1. V1 settings are automatically migrated to V3 on CLI startup + * 2. V2 settings are automatically migrated to V3 on CLI startup + * 3. V3 settings remain unchanged + * 4. Migration is idempotent (running multiple times produces same result) + */ +describe('settings-migration', () => { + let rig: TestRig; + + beforeEach(() => { + rig = new TestRig(); + }); + + afterEach(async () => { + await rig.cleanup(); + }); + + /** + * Helper to write settings file for an existing test rig. + * This overwrites the settings file created by rig.setup(). + */ + const overwriteSettingsFile = ( + testRig: TestRig, + settings: Record, + ) => { + const qwenDir = join( + (testRig as unknown as { testDir: string }).testDir, + '.qwen', + ); + writeFileSync( + join(qwenDir, 'settings.json'), + JSON.stringify(settings, null, 2), + ); + }; + + /** + * Helper to read settings file from the test directory + */ + const readSettingsFile = (testRig: TestRig): Record => { + const qwenDir = join( + (testRig as unknown as { testDir: string }).testDir, + '.qwen', + ); + const content = readFileSync(join(qwenDir, 'settings.json'), 'utf-8'); + return JSON.parse(content) as Record; + }; + + describe('V1 settings migration', () => { + it('should migrate V1 settings to V3 on CLI startup', async () => { + rig.setup('v1-to-v3-migration'); + + // Write V1 settings directly (overwrites the one created by setup) + overwriteSettingsFile(rig, v1Settings); + + // Run CLI with --help to trigger migration without API calls + // We expect this to fail due to missing API key, but migration should still occur + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail, we just need the settings file to be processed + } + + // Read migrated settings + const migratedSettings = readSettingsFile(rig); + + // Verify migration to V3 + expect(migratedSettings['$version']).toBe(3); + expect(migratedSettings['ui']).toEqual({ + theme: 'dark', + hideTips: false, + accessibility: { + enableLoadingPhrases: false, + }, + }); + expect(migratedSettings['model']).toEqual({ name: 'gemini' }); + expect(migratedSettings['tools']).toEqual({ autoAccept: true }); + expect(migratedSettings['general']).toEqual({ + vimMode: true, + checkpointing: true, + enableAutoUpdate: false, + }); + expect(migratedSettings['mcpServers']).toEqual({ + fetch: { + command: 'node', + args: ['fetch-server.js'], + }, + }); + // Custom user settings should be preserved + expect(migratedSettings['customUserSetting']).toBe('preserved-value'); + }); + + it('should handle V1 settings with arrays and null values', async () => { + rig.setup('v1-array-and-null-migration'); + + // Use fixture with arrays, null values, and string booleans + overwriteSettingsFile(rig, v1ArrayAndNullSettings); + + // Run CLI with --help to trigger migration without API calls + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + + // Read migrated settings + const migratedSettings = readSettingsFile(rig); + + // Expected output based on stable test output + expect(migratedSettings['$version']).toBe(3); + expect(migratedSettings['tools']).toEqual({ autoAccept: false }); + expect(migratedSettings['context']).toEqual({ includeDirectories: [] }); + expect(migratedSettings['model']).toEqual({ name: ['gemini', 'claude'] }); + expect(migratedSettings['ui']).toEqual({ theme: null }); + expect(migratedSettings['customArray']).toEqual([{ key: 1 }]); + }); + + it('should handle V1 settings with parent key collision', async () => { + rig.setup('v1-parent-collision-migration'); + + // Use fixture where V1 flat keys (ui, general) conflict with V2/V3 nested structure + overwriteSettingsFile(rig, v1ParentCollisionSettings); + + // Run CLI with --help to trigger migration without API calls + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + + // Read migrated settings + const migratedSettings = readSettingsFile(rig); + + // Should be migrated to V3 + expect(migratedSettings['$version']).toBe(3); + // Legacy string values for ui/general should be preserved as-is (user data) + expect(migratedSettings['ui']).toBe('legacy-ui-string'); + expect(migratedSettings['general']).toBe('legacy-general-string'); + // Custom nested objects should be preserved + expect(migratedSettings['notes']).toEqual({ + fromUser: 'preserve-custom', + }); + }); + + it('should handle V1 settings with string version and string booleans', async () => { + rig.setup('v1-string-version-migration'); + + // Use fixture with $version as string and string boolean values + overwriteSettingsFile(rig, v1VersionStringSettings); + + // Run CLI with --help to trigger migration without API calls + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + + // Read migrated settings + const migratedSettings = readSettingsFile(rig); + + // Expected output based on stable test output + expect(migratedSettings['$version']).toBe(3); + expect(migratedSettings['model']).toEqual({ name: 'qwen-plus' }); + expect(migratedSettings['ui']).toEqual({ + hideWindowTitle: true, + theme: 'light', + }); + // String "false" for disableAutoUpdate is treated as truthy (non-empty string) + // So enableAutoUpdate = !truthy = false, but output shows true + // This suggests string "false" is parsed as boolean false + expect( + (migratedSettings['general'] as Record)?.[ + 'enableAutoUpdate' + ], + ).toBe(true); + // Custom sections should be preserved + expect(migratedSettings['customSection']).toEqual({ keepMe: true }); + }); + }); + + describe('V2 settings migration', () => { + it('should migrate V2 settings to V3 on CLI startup', async () => { + rig.setup('v2-to-v3-migration'); + + // Write V2 settings directly (overwrites the one created by setup) + overwriteSettingsFile(rig, v2Settings); + + // Run CLI with --help to trigger migration without API calls + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + + // Read migrated settings + const migratedSettings = readSettingsFile(rig); + + // Verify migration to V3 + expect(migratedSettings['$version']).toBe(3); + + // Verify disable* -> enable* conversion with inversion + expect( + ( + (migratedSettings['ui'] as Record)?.[ + 'accessibility' + ] as Record + )?.['enableLoadingPhrases'], + ).toBe(true); + expect( + (migratedSettings['general'] as Record)?.[ + 'enableAutoUpdate' + ], + ).toBe(true); + expect( + ( + (migratedSettings['context'] as Record)?.[ + 'fileFiltering' + ] as Record + )?.['enableFuzzySearch'], + ).toBe(false); + + // Verify old disable* keys are removed + expect( + (migratedSettings['general'] as Record)?.[ + 'disableAutoUpdate' + ], + ).toBeUndefined(); + expect( + (migratedSettings['general'] as Record)?.[ + 'disableUpdateNag' + ], + ).toBeUndefined(); + expect( + ( + (migratedSettings['ui'] as Record)?.[ + 'accessibility' + ] as Record + )?.['disableLoadingPhrases'], + ).toBeUndefined(); + expect( + ( + (migratedSettings['context'] as Record)?.[ + 'fileFiltering' + ] as Record + )?.['disableFuzzySearch'], + ).toBeUndefined(); + }); + + it('should handle V2 settings without any disable* keys', async () => { + rig.setup('v2-clean-migration'); + + // Use minimal V2 fixture and add ui/model settings without disable* keys + const cleanV2Settings = { + ...v2MinimalSettings, + ui: { + theme: 'dark', + }, + model: { + name: 'gemini', + }, + }; + + overwriteSettingsFile(rig, cleanV2Settings); + + // Run CLI with --help to trigger migration without API calls + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + + // Read migrated settings + const migratedSettings = readSettingsFile(rig); + + // Should be updated to V3 version + expect(migratedSettings['$version']).toBe(3); + // Other settings should remain unchanged + expect(migratedSettings['ui']).toEqual({ theme: 'dark' }); + expect(migratedSettings['model']).toEqual({ name: 'gemini' }); + }); + + it('should normalize legacy numeric version with no migratable keys to current version', async () => { + rig.setup('legacy-version-normalization'); + + // Use v1Settings fixture as base but with only custom key + const legacyVersionWithoutMigratableKeys = { + $version: 1, + customOnlyKey: 'value', + }; + + overwriteSettingsFile(rig, legacyVersionWithoutMigratableKeys); + + // Run CLI with --help to trigger settings load/write path + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + + const migratedSettings = readSettingsFile(rig); + + // Version metadata should still be normalized to current version + expect(migratedSettings['$version']).toBe(3); + // Existing user content should be preserved + expect(migratedSettings['customOnlyKey']).toBe('value'); + }); + + it('should coerce valid string booleans and remove invalid deprecated keys while bumping V2 to V3', async () => { + rig.setup('v2-non-boolean-disable-values-migration'); + + // Cover both coercible string booleans and invalid non-boolean values: + // - "TRUE"/"false" should be coerced and migrated + // - invalid values should have deprecated disable* keys removed + const mixedNonBooleanDisableSettings = { + ...v2BooleanStringSettings, + ui: { + accessibility: { + disableLoadingPhrases: 'yes', + }, + }, + context: { + fileFiltering: { + disableFuzzySearch: null, + }, + }, + model: { + generationConfig: { + disableCacheControl: [1], + }, + }, + }; + overwriteSettingsFile(rig, mixedNonBooleanDisableSettings); + + // Run CLI with --help to trigger migration without API calls + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + + // Read migrated settings + const migratedSettings = readSettingsFile(rig); + + // Coercible strings are migrated; invalid disable* values are removed. + expect(migratedSettings['$version']).toBe(3); + expect(migratedSettings['general']).toEqual({ + enableAutoUpdate: false, + }); + expect( + ( + (migratedSettings['ui'] as Record)?.[ + 'accessibility' + ] as Record + )?.['disableLoadingPhrases'], + ).toBeUndefined(); + expect( + ( + (migratedSettings['ui'] as Record)?.[ + 'accessibility' + ] as Record + )?.['enableLoadingPhrases'], + ).toBeUndefined(); + expect( + ( + (migratedSettings['context'] as Record)?.[ + 'fileFiltering' + ] as Record + )?.['disableFuzzySearch'], + ).toBeUndefined(); + expect( + ( + (migratedSettings['context'] as Record)?.[ + 'fileFiltering' + ] as Record + )?.['enableFuzzySearch'], + ).toBeUndefined(); + expect( + ( + (migratedSettings['model'] as Record)?.[ + 'generationConfig' + ] as Record + )?.['disableCacheControl'], + ).toBeUndefined(); + expect( + ( + (migratedSettings['model'] as Record)?.[ + 'generationConfig' + ] as Record + )?.['enableCacheControl'], + ).toBeUndefined(); + }); + + it('should handle V2 settings with preexisting enable* keys', async () => { + rig.setup('v2-preexisting-enable-migration'); + + // Use fixture with both disable* and enable* keys + overwriteSettingsFile(rig, v2PreexistingEnableSettings); + + // Run CLI with --help to trigger migration without API calls + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + + // Read migrated settings + const migratedSettings = readSettingsFile(rig); + + // Expected output based on stable test output + expect(migratedSettings['$version']).toBe(3); + // Migration converts disable* to enable* by inverting the value + // disableAutoUpdate: false -> enableAutoUpdate: true (inverted) + // But disableUpdateNag: true may affect the consolidation + expect( + (migratedSettings['general'] as Record)?.[ + 'enableAutoUpdate' + ], + ).toBe(false); + // disableLoadingPhrases: true -> enableLoadingPhrases: false (inverted) + expect( + ( + (migratedSettings['ui'] as Record)?.[ + 'accessibility' + ] as Record + )?.['enableLoadingPhrases'], + ).toBe(false); + // disableFuzzySearch: false -> enableFuzzySearch: true (inverted) + expect( + ( + (migratedSettings['context'] as Record)?.[ + 'fileFiltering' + ] as Record + )?.['enableFuzzySearch'], + ).toBe(true); + // disableCacheControl: true -> enableCacheControl: false (inverted) + expect( + ( + (migratedSettings['model'] as Record)?.[ + 'generationConfig' + ] as Record + )?.['enableCacheControl'], + ).toBe(false); + // Old disable* keys should be removed + expect( + (migratedSettings['general'] as Record)?.[ + 'disableAutoUpdate' + ], + ).toBeUndefined(); + expect( + (migratedSettings['general'] as Record)?.[ + 'disableUpdateNag' + ], + ).toBeUndefined(); + }); + }); + + describe('V3 settings handling', () => { + it('should handle V3 settings with legacy disable* keys', async () => { + rig.setup('v3-legacy-disable-keys'); + + // Use fixture with V3 format but still has legacy disable* keys + overwriteSettingsFile(rig, v3LegacyDisableSettings); + + // Run CLI with --help to trigger migration without API calls + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + + // Read settings + const finalSettings = readSettingsFile(rig); + + // Should remain V3 + expect(finalSettings['$version']).toBe(3); + // Note: V3 settings with legacy disable* keys are left as-is + // Migration only runs when version < current version + // Since this is already V3, no migration logic is applied + expect( + (finalSettings['general'] as Record)?.[ + 'disableAutoUpdate' + ], + ).toBe(true); + expect( + ( + (finalSettings['ui'] as Record)?.[ + 'accessibility' + ] as Record + )?.['disableLoadingPhrases'], + ).toBe(false); + // Existing enable* keys should be preserved + expect( + (finalSettings['general'] as Record)?.[ + 'enableAutoUpdate' + ], + ).toBe(false); + expect( + ( + (finalSettings['ui'] as Record)?.[ + 'accessibility' + ] as Record + )?.['enableLoadingPhrases'], + ).toBe(true); + // Custom settings should be preserved + expect(finalSettings['custom']).toEqual({ + note: 'should remain unchanged in v3', + }); + }); + }); + + describe('Future version settings handling', () => { + it('should not modify future version settings', async () => { + rig.setup('v999-future-version'); + + // Use fixture with future version ($version: 999) + overwriteSettingsFile(rig, v999FutureVersionSettings); + + // Run CLI with --help to trigger migration without API calls + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + + // Read settings + const finalSettings = readSettingsFile(rig); + + // Future version should remain unchanged + expect(finalSettings['$version']).toBe(999); + expect(finalSettings['theme']).toBe('dark'); + expect(finalSettings['model']).toBe('future-model'); + expect(finalSettings['experimentalFlag']).toEqual({ enabled: true }); + // disableAutoUpdate should remain as-is since migration doesn't apply + expect(finalSettings['disableAutoUpdate']).toBe(true); + }); + }); + + describe('Migration idempotency', () => { + it('should produce consistent results when run multiple times on V1 settings', async () => { + rig.setup('v1-idempotency'); + + overwriteSettingsFile(rig, v1Settings); + + // Run CLI multiple times with --help + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + const firstRunSettings = readSettingsFile(rig); + + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + const secondRunSettings = readSettingsFile(rig); + + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + const thirdRunSettings = readSettingsFile(rig); + + // All runs should produce identical results + expect(secondRunSettings).toEqual(firstRunSettings); + expect(thirdRunSettings).toEqual(firstRunSettings); + }); + }); + + describe('Complex migration scenarios', () => { + it('should preserve custom user settings during full migration chain', async () => { + rig.setup('preserve-custom-settings'); + + // Use v1ComplexSettings fixture which has custom user settings + overwriteSettingsFile(rig, v1ComplexSettings); + + // Run CLI with --help to trigger migration without API calls + try { + await rig.runCommand(['--help']); + } catch { + // Expected to potentially fail + } + + // Read migrated settings + const migratedSettings = readSettingsFile(rig); + + // Custom keys should be preserved (v1ComplexSettings has 'custom-value' and { nested: true, items: [1, 2, 3] }) + expect(migratedSettings['myCustomKey']).toBe('custom-value'); + expect(migratedSettings['anotherCustomSetting']).toEqual({ + nested: true, + items: [1, 2, 3], + }); + }); + }); +}); diff --git a/integration-tests/terminal-capture/motivation.md b/integration-tests/terminal-capture/motivation.md index 388019369..3d004ddee 100644 --- a/integration-tests/terminal-capture/motivation.md +++ b/integration-tests/terminal-capture/motivation.md @@ -40,6 +40,10 @@ Playwright element screenshot | WYSIWYG | xterm.js fully renders ANSI, no manual output cleaning needed | | Theme Support | Built-in 5 themes (Dracula, One Dark, GitHub Dark, Monokai, Night Owl) | | Full-length | `captureFull()` supports capturing scrollback buffer content | +| Streaming Capture | Capture multiple frames at intervals during execution (e.g., progress bars) | +| Animated GIF | Auto-generate GIF from streaming frames via ffmpeg | +| Early Stop | Streaming stops early if output stabilizes; duplicate frames are skipped | +| Auto Cleanup | Output directory is cleared before each run to prevent stale screenshots | | Deterministic Naming | Screenshot filenames auto-generated by step sequence for easy regression comparison | | Batch Execution | `run.ts` executes all scenarios in one command | @@ -90,8 +94,14 @@ scenarios/screenshots/ 02-01.png # Step 2 input state 02-02.png # Step 2 result full-flow.png # Final state full-length image - context/ + streaming-shell/ + 01-01.png # Input state + 01-streaming-01.png # Streaming frame 1 + 01-streaming-02.png # Streaming frame 2 ... + 01-02.png # Final result + streaming.gif # Animated GIF (requires ffmpeg) + full-flow.png # Final state full-length image ``` ## 4. Position in Testing System diff --git a/integration-tests/terminal-capture/scenario-runner.ts b/integration-tests/terminal-capture/scenario-runner.ts index 4bd858fd4..a7900e11c 100644 --- a/integration-tests/terminal-capture/scenario-runner.ts +++ b/integration-tests/terminal-capture/scenario-runner.ts @@ -10,7 +10,9 @@ */ import { TerminalCapture, THEMES } from './terminal-capture.js'; -import { dirname, resolve, isAbsolute } from 'node:path'; +import { dirname, resolve, isAbsolute, join } from 'node:path'; +import { execSync } from 'node:child_process'; +import { writeFileSync, unlinkSync, rmSync, existsSync } from 'node:fs'; // ───────────────────────────────────────────── // Schema — Minimal @@ -29,6 +31,20 @@ export interface FlowStep { capture?: string; /** Explicit screenshot: full scrollback buffer long image (standalone capture when no type) */ captureFull?: string; + /** + * Streaming capture: capture multiple screenshots during execution at intervals. + * Useful for demonstrating real-time output like progress bars. + */ + streaming?: { + /** Delay before starting captures in milliseconds (skip initial waiting phase) */ + delayMs?: number; + /** Interval between captures in milliseconds */ + intervalMs: number; + /** Maximum number of captures */ + count: number; + /** Generate animated GIF from captured frames (default: true) */ + gif?: boolean; + }; } export interface ScenarioConfig { @@ -105,6 +121,11 @@ export async function runScenario( ? resolve(basedir, config.outputDir, scenarioDir) : resolve(basedir, 'screenshots', scenarioDir); + // Clean previous screenshots + if (existsSync(outputDir)) { + rmSync(outputDir, { recursive: true }); + } + console.log(`\n${'═'.repeat(60)}`); console.log(`▶ ${config.name}`); console.log('═'.repeat(60)); @@ -171,13 +192,89 @@ export async function runScenario( if (autoEnter) { // ── Auto-press Enter → Wait for stabilization → 02 screenshot ── await terminal.type('\n'); - console.log(` ⏳ waiting for output to settle...`); - await terminal.idle(2000, 60000); - console.log(` ✅ settled`); - const resultName = step.capture ?? `${pad(seq)}-02.png`; - console.log(` ${label} 📸 result: ${resultName}`); - screenshots.push(await terminal.capture(resultName)); + // Streaming capture: capture multiple screenshots during execution + if (step.streaming) { + const { + delayMs = 0, + intervalMs, + count, + gif = true, + } = step.streaming; + console.log( + ` 🎬 streaming capture: ${count} shots @ ${intervalMs}ms intervals${delayMs ? ` (delay ${delayMs}ms)` : ''}`, + ); + + // Wait before starting captures (skip initial waiting phase) + if (delayMs > 0) { + await sleep(delayMs); + } + + // Capture frames at intervals (stop early if output stabilizes) + const streamingShots: string[] = []; + let prevOutputLen = terminal.getRawOutput().length; + let stableCount = 0; + let shotNum = 0; + for (let j = 0; j < count; j++) { + await sleep(intervalMs); + const curOutputLen = terminal.getRawOutput().length; + if (curOutputLen === prevOutputLen) { + stableCount++; + if (stableCount >= 3) { + console.log( + ` ⏹️ streaming stopped early: output stable for ${stableCount} intervals`, + ); + break; + } + continue; // skip duplicate frame + } + stableCount = 0; + prevOutputLen = curOutputLen; + shotNum++; + const shotName = `${pad(seq)}-streaming-${pad(shotNum)}.png`; + console.log( + ` 📸 streaming [${shotNum}/${count}]: ${shotName}`, + ); + const shot = await terminal.capture(shotName); + streamingShots.push(shot); + screenshots.push(shot); + } + + // Wait for completion after streaming captures + console.log(` ⏳ waiting for output to settle...`); + await terminal.idle(2000, 60000); + console.log(` ✅ settled`); + + const resultName = step.capture ?? `${pad(seq)}-02.png`; + console.log(` ${label} 📸 result: ${resultName}`); + const resultShot = await terminal.capture(resultName); + screenshots.push(resultShot); + + // Generate animated GIF: input -> streaming frames -> result + if (gif && streamingShots.length > 0) { + // Include input and result in the GIF for complete story + const inputShot = screenshots.find((s) => + s.endsWith(`${pad(seq)}-01.png`), + ); + const gifFrames = [ + ...(inputShot ? [inputShot] : []), + ...streamingShots, + resultShot, + ]; + const gifPath = generateGif(gifFrames, outputDir); + if (gifPath) { + console.log(` 🎞️ GIF: ${gifPath}`); + } + } + } else { + console.log(` ⏳ waiting for output to settle...`); + await terminal.idle(2000, 60000); + console.log(` ✅ settled`); + + const resultName = step.capture ?? `${pad(seq)}-02.png`; + console.log(` ${label} 📸 result: ${resultName}`); + screenshots.push(await terminal.capture(resultName)); + } // full-flow: Only the last type step auto-captures full-length image const isLastType = !config.flow.slice(i + 1).some((s) => s.type); @@ -302,3 +399,43 @@ const KEY_MAP: Record = { function resolveKey(key: string): string { return KEY_MAP[key] ?? key; } + +/** Generate animated GIF from PNG frames using ffmpeg (concat demuxer). */ +function generateGif(frames: string[], outputDir: string): string | null { + if (frames.length === 0) return null; + + const FRAME_DURATION = 0.3; // 300ms per frame + const EDGE_DURATION = 1.0; // 600ms for first/last frame + + const gifPath = join(outputDir, 'streaming.gif'); + const listFile = join(outputDir, 'frames.txt'); + + try { + const lines: string[] = []; + for (let i = 0; i < frames.length; i++) { + const isEdge = i === 0 || i === frames.length - 1; + lines.push( + `file '${resolve(frames[i])}'`, + `duration ${isEdge ? EDGE_DURATION : FRAME_DURATION}`, + ); + } + // Concat demuxer requires last frame repeated without duration + lines.push(`file '${resolve(frames[frames.length - 1])}'`); + writeFileSync(listFile, lines.join('\n')); + + execSync( + `ffmpeg -y -f concat -safe 0 -i "${listFile}" -vf "split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse" -loop 0 "${gifPath}"`, + { stdio: 'pipe' }, + ); + return gifPath; + } catch { + console.log(' ⚠️ GIF generation requires ffmpeg'); + return null; + } finally { + try { + unlinkSync(listFile); + } catch { + // ignore + } + } +} diff --git a/integration-tests/terminal-capture/scenarios/progress.sh b/integration-tests/terminal-capture/scenarios/progress.sh new file mode 100755 index 000000000..596ba19b3 --- /dev/null +++ b/integration-tests/terminal-capture/scenarios/progress.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# Progress bar script that overwrites the same line using \r +# Tests PTY's ability to handle carriage return / cursor movement + +total=20 +for ((i = 1; i <= total; i++)); do + pct=$((i * 100 / total)) + filled=$((pct / 5)) + empty=$((20 - filled)) + bar=$(printf '%0.s#' $(seq 1 $filled 2>/dev/null)) + space=$(printf '%0.s-' $(seq 1 $empty 2>/dev/null)) + printf "\r[%s%s] %3d%% (%d/%d)" "$bar" "$space" "$pct" "$i" "$total" + sleep 0.5 +done +echo "" +echo "Done!" \ No newline at end of file diff --git a/integration-tests/terminal-capture/scenarios/qc-code-review.ts b/integration-tests/terminal-capture/scenarios/qc-code-review.ts new file mode 100644 index 000000000..caf50bffc --- /dev/null +++ b/integration-tests/terminal-capture/scenarios/qc-code-review.ts @@ -0,0 +1,18 @@ +import type { ScenarioConfig } from '../scenario-runner.js'; + +export default { + name: '/qc:code-review', + spawn: ['node', 'dist/cli.js', '--yolo'], + terminal: { title: 'qwen-code', cwd: '../../..' }, + flow: [ + { + type: '/qc:code-review 2117', + streaming: { + delayMs: 10000, // Wait for initial model thinking/approval + intervalMs: 800, // Capture every 800ms + count: 30, // Max 30 captures + gif: true, // Generate animated GIF + }, + }, + ], +} satisfies ScenarioConfig; diff --git a/integration-tests/terminal-capture/scenarios/streaming-insight.ts b/integration-tests/terminal-capture/scenarios/streaming-insight.ts new file mode 100644 index 000000000..f1875f20a --- /dev/null +++ b/integration-tests/terminal-capture/scenarios/streaming-insight.ts @@ -0,0 +1,23 @@ +import type { ScenarioConfig } from '../scenario-runner.js'; + +/** + * Demonstrates streaming capture with the /insight command. + * The insight command analyzes the codebase and streams results, + * making it ideal for demonstrating streaming capture. + */ +export default { + name: 'streaming-insight', + spawn: ['node', 'dist/cli.js', '--yolo'], + terminal: { title: 'qwen-code', cwd: '../../..' }, + flow: [ + { + type: '/insight', + // /insight takes time to analyze the codebase and streams results + // Capture frames during the analysis to show real-time progress + streaming: { + intervalMs: 5000, // Capture every 5 seconds + count: 50, // Up to 250 seconds of capture + }, + }, + ], +} satisfies ScenarioConfig; diff --git a/integration-tests/terminal-capture/scenarios/streaming-shell.ts b/integration-tests/terminal-capture/scenarios/streaming-shell.ts new file mode 100644 index 000000000..e166d9a0d --- /dev/null +++ b/integration-tests/terminal-capture/scenarios/streaming-shell.ts @@ -0,0 +1,24 @@ +import type { ScenarioConfig } from '../scenario-runner.js'; + +/** + * Demonstrates streaming shell execution output with PTY enabled by default. + * Tests the render throttle behavior and progress bar handling. + * Captures multiple screenshots during execution to show real-time output. + */ +export default { + name: 'streaming-shell', + spawn: ['node', 'dist/cli.js', '--yolo'], + terminal: { title: 'qwen-code', cwd: '../../..' }, + flow: [ + { + type: 'Run this command: bash integration-tests/terminal-capture/scenarios/progress.sh', + // Capture 20 screenshots at 500ms intervals during execution + // The progress.sh script takes ~10 seconds (20 iterations * 0.5s each) + streaming: { + delayMs: 7000, + intervalMs: 500, + count: 20, + }, + }, + ], +} satisfies ScenarioConfig; diff --git a/package-lock.json b/package-lock.json index f26e50737..5df32acc0 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@qwen-code/qwen-code", - "version": "0.11.1", + "version": "0.12.0", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@qwen-code/qwen-code", - "version": "0.11.1", + "version": "0.12.0", "workspaces": [ "packages/*" ], @@ -27,6 +27,7 @@ "@types/uuid": "^10.0.0", "@vitest/coverage-v8": "^3.1.1", "@vitest/eslint-plugin": "^1.3.4", + "@xterm/xterm": "^6.0.0", "cross-env": "^7.0.3", "esbuild": "^0.25.0", "eslint": "^9.24.0", @@ -5629,6 +5630,16 @@ "integrity": "sha512-5xXB7kdQlFBP82ViMJTwwEc3gKCLGKR/eoxQm4zge7GPBl86tCdI0IdPJjoKd8mUSFXz5V7i/25sfsEkP4j46g==", "license": "MIT" }, + "node_modules/@xterm/xterm": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/@xterm/xterm/-/xterm-6.0.0.tgz", + "integrity": "sha512-TQwDdQGtwwDt+2cgKDLn0IRaSxYu1tSUjgKarSDkUM0ZNiSRXFpjxEsvc/Zgc5kq5omJ+V0a8/kIM2WD3sMOYg==", + "dev": true, + "license": "MIT", + "workspaces": [ + "addons/*" + ] + }, "node_modules/abort-controller": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz", @@ -18780,7 +18791,7 @@ }, "packages/cli": { "name": "@qwen-code/qwen-code", - "version": "0.11.1", + "version": "0.12.0", "dependencies": { "@google/genai": "1.30.0", "@iarna/toml": "^2.2.5", @@ -19437,7 +19448,7 @@ }, "packages/core": { "name": "@qwen-code/qwen-code-core", - "version": "0.11.1", + "version": "0.12.0", "hasInstallScript": true, "dependencies": { "@anthropic-ai/sdk": "^0.36.1", @@ -19471,6 +19482,7 @@ "google-auth-library": "^10.5.0", "html-to-text": "^9.0.5", "https-proxy-agent": "^7.0.6", + "iconv-lite": "^0.6.3", "ignore": "^7.0.0", "jsonrepair": "^3.13.0", "marked": "^15.0.12", @@ -22917,7 +22929,7 @@ }, "packages/test-utils": { "name": "@qwen-code/qwen-code-test-utils", - "version": "0.11.1", + "version": "0.12.0", "dev": true, "license": "Apache-2.0", "devDependencies": { @@ -22929,7 +22941,7 @@ }, "packages/vscode-ide-companion": { "name": "qwen-code-vscode-ide-companion", - "version": "0.11.1", + "version": "0.12.0", "license": "LICENSE", "dependencies": { "@modelcontextprotocol/sdk": "^1.25.1", @@ -23176,7 +23188,7 @@ }, "packages/web-templates": { "name": "@qwen-code/web-templates", - "version": "0.11.1", + "version": "0.12.0", "devDependencies": { "@types/react": "^18.2.0", "@types/react-dom": "^18.2.0", @@ -23704,7 +23716,7 @@ }, "packages/webui": { "name": "@qwen-code/webui", - "version": "0.11.1", + "version": "0.12.0", "license": "MIT", "dependencies": { "markdown-it": "^14.1.0" diff --git a/package.json b/package.json index 5657d4129..ef9f25eff 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@qwen-code/qwen-code", - "version": "0.11.1", + "version": "0.12.0", "engines": { "node": ">=20.0.0" }, @@ -13,13 +13,14 @@ "url": "git+https://github.com/QwenLM/qwen-code.git" }, "config": { - "sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.11.1" + "sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.12.0" }, "scripts": { "start": "cross-env node scripts/start.js", "dev": "node scripts/dev.js", "debug": "cross-env DEBUG=1 node --inspect-brk scripts/start.js", "generate": "node scripts/generate-git-commit-info.js", + "generate:settings-schema": "tsx scripts/generate-settings-schema.ts", "build": "node scripts/build.js", "build-and-start": "npm run build && npm run start", "build:vscode": "node scripts/build_vscode_companion.js", @@ -84,6 +85,7 @@ "@types/uuid": "^10.0.0", "@vitest/coverage-v8": "^3.1.1", "@vitest/eslint-plugin": "^1.3.4", + "@xterm/xterm": "^6.0.0", "cross-env": "^7.0.3", "esbuild": "^0.25.0", "eslint": "^9.24.0", diff --git a/packages/cli/package.json b/packages/cli/package.json index 2dc3d87d7..1a2e53a85 100644 --- a/packages/cli/package.json +++ b/packages/cli/package.json @@ -1,6 +1,6 @@ { "name": "@qwen-code/qwen-code", - "version": "0.11.1", + "version": "0.12.0", "description": "Qwen Code", "repository": { "type": "git", @@ -33,7 +33,7 @@ "dist" ], "config": { - "sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.11.1" + "sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.12.0" }, "dependencies": { "@google/genai": "1.30.0", diff --git a/packages/cli/src/acp-integration/acp.ts b/packages/cli/src/acp-integration/acp.ts index 904d61473..8c1dc0907 100644 --- a/packages/cli/src/acp-integration/acp.ts +++ b/packages/cli/src/acp-integration/acp.ts @@ -81,6 +81,14 @@ export class AgentSideConnection implements Client { const validatedParams = schema.setModelRequestSchema.parse(params); return agent.setModel(validatedParams); } + case schema.AGENT_METHODS.session_set_config_option: { + if (!agent.setConfigOption) { + throw RequestError.methodNotFound(); + } + const validatedParams = + schema.setConfigOptionRequestSchema.parse(params); + return agent.setConfigOption(validatedParams); + } default: throw RequestError.methodNotFound(method); } @@ -489,4 +497,7 @@ export interface Agent { cancel(params: schema.CancelNotification): Promise; setMode?(params: schema.SetModeRequest): Promise; setModel?(params: schema.SetModelRequest): Promise; + setConfigOption?( + params: schema.SetConfigOptionRequest, + ): Promise; } diff --git a/packages/cli/src/acp-integration/acpAgent.ts b/packages/cli/src/acp-integration/acpAgent.ts index 11878017a..faf89db90 100644 --- a/packages/cli/src/acp-integration/acpAgent.ts +++ b/packages/cli/src/acp-integration/acpAgent.ts @@ -21,7 +21,7 @@ import { type ConversationRecord, type DeviceAuthorizationData, } from '@qwen-code/qwen-code-core'; -import type { ApprovalModeValue } from './schema.js'; +import type { ApprovalModeValue, ConfigOption } from './schema.js'; import * as acp from './acp.js'; import { buildAuthMethods } from './authMethods.js'; import { AcpFileSystemService } from './service/filesystem.js'; @@ -295,6 +295,104 @@ class GeminiAgent { return await session.setModel(params); } + async setConfigOption( + params: acp.SetConfigOptionRequest, + ): Promise { + const { sessionId, configId, value } = params; + + // Get the session's config + const session = this.sessions.get(sessionId); + if (!session) { + throw acp.RequestError.invalidParams( + `Session not found for id: ${sessionId}`, + ); + } + + switch (configId) { + case 'mode': { + await this.setMode({ + sessionId, + modeId: value as ApprovalModeValue, + }); + break; + } + case 'model': { + await this.setModel({ + sessionId, + modelId: value as string, + }); + break; + } + default: + throw acp.RequestError.invalidParams( + `Unsupported configId: ${configId}`, + ); + } + + // Return all config options with current values + return { + configOptions: this.buildConfigOptions(session.getConfig()), + }; + } + + private buildConfigOptions(config: Config): ConfigOption[] { + const currentApprovalMode = config.getApprovalMode(); + const allConfiguredModels = config.getAllConfiguredModels(); + const rawCurrentModelId = (config.getModel() || '').trim(); + const currentAuthType = config.getAuthType?.(); + + // Check if current model is a runtime model + const activeRuntimeSnapshot = config.getActiveRuntimeModelSnapshot?.(); + const currentModelId = activeRuntimeSnapshot + ? formatAcpModelId( + activeRuntimeSnapshot.id, + activeRuntimeSnapshot.authType, + ) + : this.formatCurrentModelId(rawCurrentModelId, currentAuthType); + + // Build mode config option + const modeOptions = APPROVAL_MODES.map((mode) => ({ + value: mode, + name: APPROVAL_MODE_INFO[mode].name, + description: APPROVAL_MODE_INFO[mode].description, + })); + + const modeConfigOption: ConfigOption = { + id: 'mode', + name: 'Mode', + description: 'Session permission mode', + category: 'mode', + type: 'select', + currentValue: currentApprovalMode, + options: modeOptions, + }; + + // Build model config option + const modelOptions = allConfiguredModels.map((model) => { + const effectiveModelId = + model.isRuntimeModel && model.runtimeSnapshotId + ? model.runtimeSnapshotId + : model.id; + return { + value: formatAcpModelId(effectiveModelId, model.authType), + name: model.label, + description: model.description ?? '', + }; + }); + + const modelConfigOption: ConfigOption = { + id: 'model', + name: 'Model', + description: 'AI model to use', + category: 'model', + type: 'select', + currentValue: currentModelId, + options: modelOptions, + }; + + return [modeConfigOption, modelConfigOption]; + } + private async ensureAuthenticated(config: Config): Promise { const selectedType = config.getModelsConfig().getCurrentAuthType(); if (!selectedType) { @@ -478,55 +576,6 @@ class GeminiAgent { }; } - private buildConfigOptions(config: Config): acp.ConfigOption[] { - const currentApprovalMode = config.getApprovalMode(); - const currentModelId = this.formatCurrentModelId( - config.getModel() || this.config.getModel() || '', - config.getAuthType(), - ); - - const modeOptions = APPROVAL_MODES.map((mode) => ({ - value: mode, - name: APPROVAL_MODE_INFO[mode].name, - description: APPROVAL_MODE_INFO[mode].description, - })); - - const allConfiguredModels = config.getAllConfiguredModels(); - const modelOptions = allConfiguredModels.map((model) => { - const effectiveModelId = - model.isRuntimeModel && model.runtimeSnapshotId - ? model.runtimeSnapshotId - : model.id; - - return { - value: formatAcpModelId(effectiveModelId, model.authType), - name: model.label, - description: model.description ?? '', - }; - }); - - return [ - { - id: 'mode', - name: 'Mode', - description: 'Session permission mode', - category: 'mode', - type: 'select', - currentValue: currentApprovalMode, - options: modeOptions, - }, - { - id: 'model', - name: 'Model', - description: 'AI model to use', - category: 'model', - type: 'select', - currentValue: currentModelId, - options: modelOptions, - }, - ]; - } - private formatCurrentModelId( baseModelId: string, authType?: AuthType, diff --git a/packages/cli/src/acp-integration/schema.ts b/packages/cli/src/acp-integration/schema.ts index 1df709c45..021bf7c93 100644 --- a/packages/cli/src/acp-integration/schema.ts +++ b/packages/cli/src/acp-integration/schema.ts @@ -16,6 +16,7 @@ export const AGENT_METHODS = { session_list: 'session/list', session_set_mode: 'session/set_mode', session_set_model: 'session/set_model', + session_set_config_option: 'session/set_config_option', }; export const CLIENT_METHODS = { @@ -475,6 +476,23 @@ export const configOptionSchema = z.object({ export type ConfigOption = z.infer; +export const setConfigOptionRequestSchema = z.object({ + sessionId: z.string(), + configId: z.string(), + value: z.unknown(), +}); + +export const setConfigOptionResponseSchema = z.object({ + configOptions: z.array(configOptionSchema), +}); + +export type SetConfigOptionRequest = z.infer< + typeof setConfigOptionRequestSchema +>; +export type SetConfigOptionResponse = z.infer< + typeof setConfigOptionResponseSchema +>; + // newSessionResponseSchema includes modes and configOptions for ACP/Zed integration export const newSessionResponseSchema = z.object({ sessionId: z.string(), @@ -684,6 +702,7 @@ export const agentRequestSchema = z.union([ listSessionsRequestSchema, setModeRequestSchema, setModelRequestSchema, + setConfigOptionRequestSchema, ]); export const agentNotificationSchema = sessionNotificationSchema; diff --git a/packages/cli/src/acp-integration/service/filesystem.test.ts b/packages/cli/src/acp-integration/service/filesystem.test.ts index 6eb3dfa1b..e8dc34968 100644 --- a/packages/cli/src/acp-integration/service/filesystem.test.ts +++ b/packages/cli/src/acp-integration/service/filesystem.test.ts @@ -11,6 +11,9 @@ import { ACP_ERROR_CODES } from '../errorCodes.js'; const createFallback = (): FileSystemService => ({ readTextFile: vi.fn(), + readTextFileWithInfo: vi + .fn() + .mockResolvedValue({ content: '', encoding: 'utf-8', bom: false }), writeTextFile: vi.fn(), detectFileBOM: vi.fn().mockResolvedValue(false), findFiles: vi.fn().mockReturnValue([]), diff --git a/packages/cli/src/acp-integration/service/filesystem.ts b/packages/cli/src/acp-integration/service/filesystem.ts index 9dfbf35b3..88512558d 100644 --- a/packages/cli/src/acp-integration/service/filesystem.ts +++ b/packages/cli/src/acp-integration/service/filesystem.ts @@ -4,7 +4,7 @@ * SPDX-License-Identifier: Apache-2.0 */ -import type { FileSystemService } from '@qwen-code/qwen-code-core'; +import type { FileSystemService , FileReadResult } from '@qwen-code/qwen-code-core'; import type * as acp from '../acp.js'; import { ACP_ERROR_CODES } from '../errorCodes.js'; @@ -54,10 +54,16 @@ export class AcpFileSystemService implements FileSystemService { return response.content; } + async readTextFileWithInfo(filePath: string): Promise { + // ACP protocol does not expose encoding metadata; delegate to the local + // fallback which performs a single-pass read with encoding detection. + return this.fallback.readTextFileWithInfo(filePath); + } + async writeTextFile( filePath: string, content: string, - options?: { bom?: boolean }, + options?: { bom?: boolean; encoding?: string }, ): Promise { if (!this.capabilities.writeTextFile) { return this.fallback.writeTextFile(filePath, content, options); diff --git a/packages/cli/src/config/config.test.ts b/packages/cli/src/config/config.test.ts index 5f08dd382..2fdc62a9c 100644 --- a/packages/cli/src/config/config.test.ts +++ b/packages/cli/src/config/config.test.ts @@ -548,6 +548,43 @@ describe('loadCliConfig', () => { vi.restoreAllMocks(); }); + it('should reset context file names to QWEN.md and AGENTS.md by default', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments(); + const settings: Settings = {}; + const setGeminiMdFilenameSpy = vi.spyOn( + ServerConfig, + 'setGeminiMdFilename', + ); + + await loadCliConfig(settings, argv); + + expect(setGeminiMdFilenameSpy).toHaveBeenCalledTimes(1); + expect(setGeminiMdFilenameSpy).toHaveBeenCalledWith([ + ServerConfig.DEFAULT_CONTEXT_FILENAME, + ServerConfig.AGENT_CONTEXT_FILENAME, + ]); + }); + + it('should use configured context file name when settings.context.fileName is set', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments(); + const settings: Settings = { + context: { + fileName: 'CUSTOM_AGENTS.md', + }, + }; + const setGeminiMdFilenameSpy = vi.spyOn( + ServerConfig, + 'setGeminiMdFilename', + ); + + await loadCliConfig(settings, argv); + + expect(setGeminiMdFilenameSpy).toHaveBeenCalledTimes(1); + expect(setGeminiMdFilenameSpy).toHaveBeenCalledWith('CUSTOM_AGENTS.md'); + }); + it('should propagate stream-json formats to config', async () => { process.argv = [ 'node', @@ -567,6 +604,35 @@ describe('loadCliConfig', () => { expect(config.getIncludePartialMessages()).toBe(true); }); + it('should reset context filenames to defaults when context.fileName is not configured', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments(); + const settings: Settings = {}; + const defaultContextFiles = ['QWEN.md', 'AGENTS.md']; + const getAllSpy = vi + .spyOn(ServerConfig, 'getAllGeminiMdFilenames') + .mockReturnValue(defaultContextFiles); + const setFilenameSpy = vi.spyOn(ServerConfig, 'setGeminiMdFilename'); + + await loadCliConfig(settings, argv); + + expect(getAllSpy).toHaveBeenCalledTimes(1); + expect(setFilenameSpy).toHaveBeenCalledWith(defaultContextFiles); + }); + + it('should use context.fileName from settings when provided', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments(); + const settings: Settings = { context: { fileName: 'CUSTOM_CONTEXT.md' } }; + const getAllSpy = vi.spyOn(ServerConfig, 'getAllGeminiMdFilenames'); + const setFilenameSpy = vi.spyOn(ServerConfig, 'setGeminiMdFilename'); + + await loadCliConfig(settings, argv); + + expect(setFilenameSpy).toHaveBeenCalledWith('CUSTOM_CONTEXT.md'); + expect(getAllSpy).not.toHaveBeenCalled(); + }); + it('should initialize native LSP service when enabled', async () => { process.argv = ['node', 'script.js', '--experimental-lsp']; const argv = await parseArguments(); diff --git a/packages/cli/src/config/config.ts b/packages/cli/src/config/config.ts index bf9fa5196..ecddaa0f4 100755 --- a/packages/cli/src/config/config.ts +++ b/packages/cli/src/config/config.ts @@ -11,7 +11,7 @@ import { DEFAULT_QWEN_EMBEDDING_MODEL, FileDiscoveryService, FileEncoding, - getCurrentGeminiMdFilename, + getAllGeminiMdFilenames, loadServerHierarchicalMemory, setGeminiMdFilename as setServerGeminiMdFilename, resolveTelemetrySettings, @@ -700,8 +700,8 @@ export async function loadCliConfig( if (settings.context?.fileName) { setServerGeminiMdFilename(settings.context.fileName); } else { - // Reset to default if not provided in settings. - setServerGeminiMdFilename(getCurrentGeminiMdFilename()); + // Reset to default context filenames if not provided in settings. + setServerGeminiMdFilename(getAllGeminiMdFilenames()); } // Automatically load output-language.md if it exists diff --git a/packages/cli/src/config/migration/index.test.ts b/packages/cli/src/config/migration/index.test.ts new file mode 100644 index 000000000..52bae237e --- /dev/null +++ b/packages/cli/src/config/migration/index.test.ts @@ -0,0 +1,383 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect } from 'vitest'; +import { + runMigrations, + needsMigration, + ALL_MIGRATIONS, + MigrationScheduler, +} from './index.js'; +import { SETTINGS_VERSION } from '../settings.js'; + +describe('Migration Framework Integration', () => { + describe('runMigrations', () => { + it('should migrate V1 settings to V3', () => { + const v1Settings = { + theme: 'dark', + model: 'gemini', + disableAutoUpdate: true, + disableLoadingPhrases: false, + }; + + const result = runMigrations(v1Settings, 'user'); + + expect(result.finalVersion).toBe(3); + expect(result.executedMigrations).toHaveLength(2); + expect(result.executedMigrations[0]).toEqual({ + fromVersion: 1, + toVersion: 2, + }); + expect(result.executedMigrations[1]).toEqual({ + fromVersion: 2, + toVersion: 3, + }); + + // Check V2 structure was created + const settings = result.settings as Record; + expect(settings['$version']).toBe(3); + expect(settings['ui']).toEqual({ + theme: 'dark', + accessibility: { enableLoadingPhrases: true }, + }); + expect(settings['model']).toEqual({ name: 'gemini' }); + + // Check disableAutoUpdate was inverted to enableAutoUpdate: false + expect( + (settings['general'] as Record)['enableAutoUpdate'], + ).toBe(false); + }); + + it('should migrate V2 settings to V3', () => { + const v2Settings = { + $version: 2, + ui: { theme: 'light' }, + general: { disableAutoUpdate: false }, + }; + + const result = runMigrations(v2Settings, 'user'); + + expect(result.finalVersion).toBe(3); + expect(result.executedMigrations).toHaveLength(1); + expect(result.executedMigrations[0]).toEqual({ + fromVersion: 2, + toVersion: 3, + }); + + const settings = result.settings as Record; + expect(settings['$version']).toBe(3); + expect( + (settings['general'] as Record)['enableAutoUpdate'], + ).toBe(true); + expect( + (settings['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + }); + + it('should not modify V3 settings', () => { + const v3Settings = { + $version: 3, + ui: { theme: 'dark' }, + general: { enableAutoUpdate: true }, + }; + + const result = runMigrations(v3Settings, 'user'); + + expect(result.finalVersion).toBe(3); + expect(result.executedMigrations).toHaveLength(0); + expect(result.settings).toEqual(v3Settings); + }); + + it('should be idempotent', () => { + const v1Settings = { + theme: 'dark', + disableAutoUpdate: true, + }; + + const result1 = runMigrations(v1Settings, 'user'); + const result2 = runMigrations(result1.settings, 'user'); + + expect(result1.executedMigrations).toHaveLength(2); + expect(result2.executedMigrations).toHaveLength(0); + expect(result1.finalVersion).toBe(result2.finalVersion); + }); + }); + + describe('needsMigration', () => { + it('should return true for V1 settings', () => { + const v1Settings = { + theme: 'dark', + model: 'gemini', + }; + + expect(needsMigration(v1Settings)).toBe(true); + }); + + it('should return true for V2 settings with deprecated keys', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: true }, + }; + + expect(needsMigration(v2Settings)).toBe(true); + }); + + it('should return true for V2 settings without deprecated keys', () => { + const cleanV2Settings = { + $version: 2, + ui: { theme: 'dark' }, + }; + + // V2 settings should be migrated to V3 to update the version number + expect(needsMigration(cleanV2Settings)).toBe(true); + }); + + it('should return false for V3 settings', () => { + const v3Settings = { + $version: 3, + general: { enableAutoUpdate: true }, + }; + + expect(needsMigration(v3Settings)).toBe(false); + }); + + it('should return false for legacy numeric version when no migration can execute', () => { + const legacyButUnknownSettings = { + $version: 1, + customOnlyKey: 'value', + }; + + expect(needsMigration(legacyButUnknownSettings)).toBe(false); + }); + }); + + describe('ALL_MIGRATIONS', () => { + it('should contain all migrations in order', () => { + expect(ALL_MIGRATIONS).toHaveLength(2); + + expect(ALL_MIGRATIONS[0].fromVersion).toBe(1); + expect(ALL_MIGRATIONS[0].toVersion).toBe(2); + + expect(ALL_MIGRATIONS[1].fromVersion).toBe(2); + expect(ALL_MIGRATIONS[1].toVersion).toBe(3); + }); + }); + + describe('MigrationScheduler with all migrations', () => { + it('should execute full migration chain', () => { + const scheduler = new MigrationScheduler([...ALL_MIGRATIONS], 'user'); + + const v1Settings = { + theme: 'dark', + disableAutoUpdate: true, + disableLoadingPhrases: true, + }; + + const result = scheduler.migrate(v1Settings); + + expect(result.executedMigrations).toHaveLength(2); + + const settings = result.settings as Record; + expect(settings['$version']).toBe(3); + expect((settings['ui'] as Record)['theme']).toBe('dark'); + expect( + (settings['general'] as Record)['enableAutoUpdate'], + ).toBe(false); + expect( + ( + (settings['ui'] as Record)[ + 'accessibility' + ] as Record + )['enableLoadingPhrases'], + ).toBe(false); + }); + }); + + describe('needsMigration and runMigrations consistency', () => { + it('needsMigration should return true when runMigrations would execute migrations', () => { + const v1Settings = { + theme: 'dark', + disableAutoUpdate: true, + }; + + // needsMigration should report that migration is needed + expect(needsMigration(v1Settings)).toBe(true); + + // runMigrations should actually execute migrations + const result = runMigrations(v1Settings, 'user'); + expect(result.executedMigrations.length).toBeGreaterThan(0); + }); + + it('needsMigration should return false when runMigrations would execute no migrations', () => { + const v3Settings = { + $version: 3, + general: { enableAutoUpdate: true }, + }; + + // needsMigration should report that no migration is needed + expect(needsMigration(v3Settings)).toBe(false); + + // runMigrations should execute no migrations + const result = runMigrations(v3Settings, 'user'); + expect(result.executedMigrations).toHaveLength(0); + }); + + it('should handle V2 settings without deprecated keys consistently', () => { + const cleanV2Settings = { + $version: 2, + ui: { theme: 'dark' }, + }; + + // needsMigration should report that migration is needed + expect(needsMigration(cleanV2Settings)).toBe(true); + + // runMigrations should execute the V2->V3 migration + const result = runMigrations(cleanV2Settings, 'user'); + expect(result.executedMigrations.length).toBeGreaterThan(0); + expect(result.finalVersion).toBe(3); + }); + }); + + describe('migration chain integrity', () => { + it('should have strictly increasing versions (toVersion > fromVersion)', () => { + for (const migration of ALL_MIGRATIONS) { + expect(migration.toVersion).toBeGreaterThan(migration.fromVersion); + } + }); + + it('should have no gaps in the chain (adjacent versions)', () => { + for (let i = 1; i < ALL_MIGRATIONS.length; i++) { + const prevMigration = ALL_MIGRATIONS[i - 1]; + const currMigration = ALL_MIGRATIONS[i]; + expect(currMigration.fromVersion).toBe(prevMigration.toVersion); + } + }); + + it('should have no duplicate fromVersions', () => { + const fromVersions = ALL_MIGRATIONS.map((m) => m.fromVersion); + const uniqueFromVersions = new Set(fromVersions); + expect(uniqueFromVersions.size).toBe(fromVersions.length); + }); + + it('should have no duplicate toVersions', () => { + const toVersions = ALL_MIGRATIONS.map((m) => m.toVersion); + const uniqueToVersions = new Set(toVersions); + expect(uniqueToVersions.size).toBe(toVersions.length); + }); + + it('should be acyclic (no version appears as fromVersion more than once)', () => { + const fromVersionCounts = new Map(); + for (const migration of ALL_MIGRATIONS) { + const count = fromVersionCounts.get(migration.fromVersion) || 0; + fromVersionCounts.set(migration.fromVersion, count + 1); + } + + for (const count of fromVersionCounts.values()) { + expect(count).toBe(1); + } + }); + + it('should chain from version 1 to SETTINGS_VERSION', () => { + if (ALL_MIGRATIONS.length > 0) { + expect(ALL_MIGRATIONS[0].fromVersion).toBe(1); + const lastMigration = ALL_MIGRATIONS[ALL_MIGRATIONS.length - 1]; + expect(lastMigration.toVersion).toBe(SETTINGS_VERSION); + } + }); + }); + + describe('single source of truth for version constant', () => { + it('should use SETTINGS_VERSION from settings module', () => { + // The last migration's toVersion should match SETTINGS_VERSION + const lastMigration = ALL_MIGRATIONS[ALL_MIGRATIONS.length - 1]; + expect(lastMigration.toVersion).toBe(SETTINGS_VERSION); + }); + + it('needsMigration should use SETTINGS_VERSION for version comparison', () => { + // Create settings with version equal to SETTINGS_VERSION + const currentVersionSettings = { + $version: SETTINGS_VERSION, + general: { enableAutoUpdate: true }, + }; + + // needsMigration should return false for current version + expect(needsMigration(currentVersionSettings)).toBe(false); + + // Create settings with version less than SETTINGS_VERSION + const oldVersionSettings = { + $version: SETTINGS_VERSION - 1, + general: { disableAutoUpdate: true }, + }; + + // needsMigration should return true for old version + expect(needsMigration(oldVersionSettings)).toBe(true); + }); + + it('should have SETTINGS_VERSION defined exactly once in codebase', () => { + // SETTINGS_VERSION is imported from settings.js + // This test verifies the wiring is correct + expect(SETTINGS_VERSION).toBeDefined(); + expect(typeof SETTINGS_VERSION).toBe('number'); + expect(SETTINGS_VERSION).toBeGreaterThan(0); + }); + }); + + describe('invalid version handling', () => { + it('should treat non-numeric version with V1 shape as needing migration', () => { + const settingsWithInvalidVersion = { + $version: 'invalid', + theme: 'dark', + disableAutoUpdate: true, + }; + + // Should detect migration needed based on V1 shape + expect(needsMigration(settingsWithInvalidVersion)).toBe(true); + + // Should run migrations + const result = runMigrations(settingsWithInvalidVersion, 'user'); + expect(result.executedMigrations.length).toBeGreaterThan(0); + expect(result.finalVersion).toBe(SETTINGS_VERSION); + }); + + it('should not migrate non-numeric version with already-migrated shape (normalized by loader)', () => { + const settingsWithInvalidVersionButMigratedShape = { + $version: 'invalid', + general: { enableAutoUpdate: true }, + }; + + // needsMigration returns false because no migration applies to this shape + // The settings loader will handle version normalization separately + expect(needsMigration(settingsWithInvalidVersionButMigratedShape)).toBe( + false, + ); + + // No migrations should execute + const result = runMigrations( + settingsWithInvalidVersionButMigratedShape, + 'user', + ); + expect(result.executedMigrations).toHaveLength(0); + }); + + it('should avoid repeated no-op migration loops', () => { + // Settings that might cause repeated migrations + const v3Settings = { + $version: 3, + general: { enableAutoUpdate: true }, + }; + + // First check + expect(needsMigration(v3Settings)).toBe(false); + const result1 = runMigrations(v3Settings, 'user'); + expect(result1.executedMigrations).toHaveLength(0); + + // Second check should be consistent + expect(needsMigration(result1.settings)).toBe(false); + const result2 = runMigrations(result1.settings, 'user'); + expect(result2.executedMigrations).toHaveLength(0); + }); + }); +}); diff --git a/packages/cli/src/config/migration/index.ts b/packages/cli/src/config/migration/index.ts new file mode 100644 index 000000000..40d176cbe --- /dev/null +++ b/packages/cli/src/config/migration/index.ts @@ -0,0 +1,106 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// Export types +export type { SettingsMigration, MigrationResult } from './types.js'; + +// Export scheduler +export { MigrationScheduler } from './scheduler.js'; + +// Export migrations +export { v1ToV2Migration, V1ToV2Migration } from './versions/v1-to-v2.js'; +export { v2ToV3Migration, V2ToV3Migration } from './versions/v2-to-v3.js'; + +// Import settings version from single source of truth +import { SETTINGS_VERSION } from '../settings.js'; + +// Ordered array of all migrations for use with MigrationScheduler +// Each migration handles one version transition (N → N+1) +// Order matters: migrations must be sorted by ascending version +import { v1ToV2Migration } from './versions/v1-to-v2.js'; +import { v2ToV3Migration } from './versions/v2-to-v3.js'; +import { MigrationScheduler } from './scheduler.js'; +import type { MigrationResult } from './types.js'; + +/** + * Ordered array of all settings migrations. + * Use this with MigrationScheduler to run the full migration chain. + * + * @example + * ```typescript + * const scheduler = new MigrationScheduler(ALL_MIGRATIONS); + * const result = scheduler.migrate(settings); + * ``` + */ +export const ALL_MIGRATIONS = [v1ToV2Migration, v2ToV3Migration] as const; + +/** + * Convenience function that runs all migrations on the given settings. + * This is the primary entry point for settings migration. + * + * @param settings - The settings object to migrate + * @param scope - The scope of settings being migrated + * @returns MigrationResult containing the final settings, version, and execution log + * + * @example + * ```typescript + * const result = runMigrations(settings, 'User'); + * if (result.executedMigrations.length > 0) { + * console.log(`Migrated from version ${result.executedMigrations[0].fromVersion} to ${result.finalVersion}`); + * } + * ``` + */ +export function runMigrations( + settings: unknown, + scope: string, +): MigrationResult { + const scheduler = new MigrationScheduler([...ALL_MIGRATIONS], scope); + return scheduler.migrate(settings); +} + +/** + * Checks if the given settings need migration. + * Returns true only if at least one registered migration would be applied. + * + * This function checks: + * 1. If $version field exists and is a number: + * - Returns false if $version >= SETTINGS_VERSION + * - Returns true only when $version < SETTINGS_VERSION AND at least one + * migration can execute for the current settings shape + * 2. If $version field is missing or invalid: + * - Uses fallback logic by checking individual migrations + * + * Note: + * - Legacy numeric versions that have no executable migrations are handled by + * the settings loader via version normalization (bump metadata to current). + * + * @param settings - The settings object to check + * @returns true if migration is needed, false otherwise + */ +export function needsMigration(settings: unknown): boolean { + if (typeof settings !== 'object' || settings === null) { + return false; + } + + const s = settings as Record; + const version = s['$version']; + const hasApplicableMigration = ALL_MIGRATIONS.some((migration) => + migration.shouldMigrate(settings), + ); + + // If $version is a valid number, use version comparison + if (typeof version === 'number') { + if (version >= SETTINGS_VERSION) { + return false; + } + // Guardrail: only report migration-needed if at least one migration can execute. + return hasApplicableMigration; + } + + // If $version exists but is not a number (invalid), or is missing: + // Use fallback logic - check if any migration would be applied + return hasApplicableMigration; +} diff --git a/packages/cli/src/config/migration/scheduler.test.ts b/packages/cli/src/config/migration/scheduler.test.ts new file mode 100644 index 000000000..91e9eff98 --- /dev/null +++ b/packages/cli/src/config/migration/scheduler.test.ts @@ -0,0 +1,164 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, vi } from 'vitest'; +import { MigrationScheduler } from './scheduler.js'; + +import type { SettingsMigration } from './types.js'; + +describe('MigrationScheduler', () => { + // Mock migration for testing + const createMockMigration = ( + fromVersion: number, + toVersion: number, + shouldMigrateResult: boolean, + ): SettingsMigration => ({ + fromVersion, + toVersion, + shouldMigrate: vi.fn().mockReturnValue(shouldMigrateResult), + migrate: vi.fn((settings) => ({ + settings: { + ...(settings as Record), + $version: toVersion, + }, + warnings: [], + })), + }); + + it('should execute migrations in order when shouldMigrate returns true', () => { + const migration1 = createMockMigration(1, 2, true); + const migration2 = createMockMigration(2, 3, true); + + const scheduler = new MigrationScheduler([migration1, migration2], 'user'); + const result = scheduler.migrate({ $version: 1, someKey: 'value' }); + + expect(migration1.shouldMigrate).toHaveBeenCalledTimes(1); + expect(migration1.migrate).toHaveBeenCalledTimes(1); + expect(migration2.shouldMigrate).toHaveBeenCalledTimes(1); + expect(migration2.migrate).toHaveBeenCalledTimes(1); + + expect(result.executedMigrations).toHaveLength(2); + expect(result.executedMigrations[0]).toEqual({ + fromVersion: 1, + toVersion: 2, + }); + expect(result.executedMigrations[1]).toEqual({ + fromVersion: 2, + toVersion: 3, + }); + expect(result.finalVersion).toBe(3); + }); + + it('should skip migrations when shouldMigrate returns false', () => { + const migration1 = createMockMigration(1, 2, false); + const migration2 = createMockMigration(2, 3, true); + + const scheduler = new MigrationScheduler([migration1, migration2], 'user'); + const result = scheduler.migrate({ $version: 2, someKey: 'value' }); + + expect(migration1.shouldMigrate).toHaveBeenCalledTimes(1); + expect(migration1.migrate).not.toHaveBeenCalled(); + expect(migration2.shouldMigrate).toHaveBeenCalledTimes(1); + expect(migration2.migrate).toHaveBeenCalledTimes(1); + + expect(result.executedMigrations).toHaveLength(1); + expect(result.executedMigrations[0]).toEqual({ + fromVersion: 2, + toVersion: 3, + }); + }); + + it('should be idempotent - running migrations twice produces same result', () => { + // Create a migration that checks the version to determine if migration is needed + const migration1: SettingsMigration = { + fromVersion: 1, + toVersion: 2, + shouldMigrate: vi.fn((settings) => { + const s = settings as Record; + return s['$version'] !== 2; + }), + migrate: vi.fn((settings) => ({ + settings: { + ...(settings as Record), + $version: 2, + }, + warnings: [], + })), + }; + + const scheduler = new MigrationScheduler([migration1], 'user'); + const input = { theme: 'dark' }; + + const result1 = scheduler.migrate(input); + const result2 = scheduler.migrate(result1.settings); + + expect(result1.executedMigrations).toHaveLength(1); + expect(result2.executedMigrations).toHaveLength(0); + expect(result1.finalVersion).toBe(result2.finalVersion); + }); + + it('should pass updated settings to each migration', () => { + const migration1: SettingsMigration = { + fromVersion: 1, + toVersion: 2, + shouldMigrate: vi.fn().mockReturnValue(true), + migrate: vi.fn(() => ({ + settings: { $version: 2, transformed: true }, + warnings: [], + })), + }; + + const migration2: SettingsMigration = { + fromVersion: 2, + toVersion: 3, + shouldMigrate: vi.fn().mockReturnValue(true), + migrate: vi.fn((s) => ({ settings: s, warnings: [] })), + }; + + const scheduler = new MigrationScheduler([migration1, migration2], 'user'); + scheduler.migrate({ $version: 1 }); + + expect(migration2.shouldMigrate).toHaveBeenCalledWith( + expect.objectContaining({ $version: 2, transformed: true }), + ); + }); + + it('should handle empty migrations array', () => { + const scheduler = new MigrationScheduler([], 'user'); + const result = scheduler.migrate({ $version: 1, key: 'value' }); + + expect(result.executedMigrations).toHaveLength(0); + expect(result.finalVersion).toBe(1); + expect(result.settings).toEqual({ $version: 1, key: 'value' }); + }); + + it('should throw error when migration fails', () => { + const migration1: SettingsMigration = { + fromVersion: 1, + toVersion: 2, + shouldMigrate: vi.fn().mockReturnValue(true), + migrate: vi.fn().mockImplementation(() => { + throw new Error('Migration failed'); + }), + }; + + const scheduler = new MigrationScheduler([migration1], 'user'); + + expect(() => scheduler.migrate({ $version: 1 })).toThrow( + 'Migration failed', + ); + }); + + it('should handle settings without version field', () => { + const migration1 = createMockMigration(1, 2, true); + + const scheduler = new MigrationScheduler([migration1], 'user'); + const result = scheduler.migrate({ theme: 'dark' }); + + expect(result.finalVersion).toBe(2); + expect(result.executedMigrations).toHaveLength(1); + }); +}); diff --git a/packages/cli/src/config/migration/scheduler.ts b/packages/cli/src/config/migration/scheduler.ts new file mode 100644 index 000000000..7bbcc43d6 --- /dev/null +++ b/packages/cli/src/config/migration/scheduler.ts @@ -0,0 +1,115 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { createDebugLogger } from '@qwen-code/qwen-code-core'; +import type { SettingsMigration, MigrationResult } from './types.js'; + +const debugLogger = createDebugLogger('SETTINGS_MIGRATION'); + +/** + * Formats a SettingScope enum value to a human-readable string. + * - Converts to lowercase + * - Special case: 'SystemDefaults' -> 'system default' + */ +export function formatScope(scope: string): string { + if (scope === 'SystemDefaults') { + return 'system default'; + } + return scope.toLowerCase(); +} + +/** + * Chain scheduler for settings migrations. + * + * The MigrationScheduler orchestrates multiple migrations in sequence, + * delegating version detection to each individual migration via `shouldMigrate`. + * It has no centralized version logic - migrations self-determine applicability. + * + * Key characteristics: + * - Linear chain execution: migrations are applied in registration order + * - Idempotent: already-migrated versions return false from shouldMigrate + * - Adjacent versions only: each migration handles N → N+1 + * - Pure functions: migrations don't modify input objects + */ +export class MigrationScheduler { + /** + * Creates a new MigrationScheduler with the given migrations. + * + * @param migrations - Array of migrations in execution order (typically ascending version) + * @param scope - The scope of settings being migrated + */ + constructor( + private readonly migrations: SettingsMigration[], + private readonly scope: string, + ) {} + + /** + * Executes the migration chain on the given settings. + * + * Iterates through all registered migrations in order. For each migration: + * 1. Calls `shouldMigrate` with the current settings + * 2. If true, calls `migrate` to transform the settings + * 3. Records the execution + * + * The scheduler itself has no version awareness - all version detection + * is delegated to the individual migrations. + * + * @param settings - The settings object to migrate + * @returns MigrationResult containing the final settings, version, and execution log + */ + migrate(settings: unknown): MigrationResult { + debugLogger.debug('MigrationScheduler: Starting migration chain'); + + let current = settings; + const executed: Array<{ fromVersion: number; toVersion: number }> = []; + const allWarnings: string[] = []; + + for (const migration of this.migrations) { + try { + if (migration.shouldMigrate(current)) { + debugLogger.debug( + `MigrationScheduler: Executing migration ${migration.fromVersion} → ${migration.toVersion}`, + ); + + const formattedScope = formatScope(this.scope); + const result = migration.migrate(current, formattedScope); + current = result.settings; + allWarnings.push(...result.warnings); + + executed.push({ + fromVersion: migration.fromVersion, + toVersion: migration.toVersion, + }); + + debugLogger.debug( + `MigrationScheduler: Migration ${migration.fromVersion} → ${migration.toVersion} completed successfully`, + ); + } + } catch (error) { + debugLogger.error( + `MigrationScheduler: Migration ${migration.fromVersion} → ${migration.toVersion} failed:`, + error, + ); + throw error; + } + } + + // Determine final version from the settings object + const finalVersion = + ((current as Record)['$version'] as number) ?? 1; + + debugLogger.debug( + `MigrationScheduler: Migration chain complete. Final version: ${finalVersion}, Executed: ${executed.length} migrations`, + ); + + return { + settings: current, + finalVersion, + executedMigrations: executed, + warnings: allWarnings, + }; + } +} diff --git a/packages/cli/src/config/migration/types.ts b/packages/cli/src/config/migration/types.ts new file mode 100644 index 000000000..ca1e23aaf --- /dev/null +++ b/packages/cli/src/config/migration/types.ts @@ -0,0 +1,58 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * Interface that all settings migrations must implement. + * Each migration handles a single version transition (N → N+1). + */ +export interface SettingsMigration { + /** Source version number */ + readonly fromVersion: number; + + /** Target version number */ + readonly toVersion: number; + + /** + * Determines whether this migration should be applied to the given settings. + * The migration inspects the settings object to detect its current version + * and returns true if this migration is applicable. + * + * @param settings - The current settings object + * @returns true if this migration should be applied, false otherwise + */ + shouldMigrate(settings: unknown): boolean; + + /** + * Executes the migration transformation. + * This should be a pure function that does not modify the input object. + * + * @param settings - The current settings object of version N + * @param scope - The scope of settings being migrated + * @returns The migrated settings object of version N+1 with optional warnings + * @throws Error if the migration fails + */ + migrate( + settings: unknown, + scope: string, + ): { settings: unknown; warnings: string[] }; +} + +/** + * Result of a migration execution by MigrationScheduler. + */ +export interface MigrationResult { + /** The final settings object after all applicable migrations */ + settings: unknown; + + /** The final version number after migrations */ + finalVersion: number; + + /** List of migrations that were executed */ + executedMigrations: Array<{ fromVersion: number; toVersion: number }>; + + /** List of warning messages generated during migration */ + warnings: string[]; +} diff --git a/packages/cli/src/config/migration/versions/v1-to-v2-shared.ts b/packages/cli/src/config/migration/versions/v1-to-v2-shared.ts new file mode 100644 index 000000000..c87fa4480 --- /dev/null +++ b/packages/cli/src/config/migration/versions/v1-to-v2-shared.ts @@ -0,0 +1,180 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * Structural mapping table for V1 -> V2. + * + * Used by: + * - v1->v2 migration execution + * - warnings for residual legacy keys in latest-version settings files + */ +export const V1_TO_V2_MIGRATION_MAP: Record = { + accessibility: 'ui.accessibility', + allowedTools: 'tools.allowed', + allowMCPServers: 'mcp.allowed', + autoAccept: 'tools.autoAccept', + autoConfigureMaxOldSpaceSize: 'advanced.autoConfigureMemory', + bugCommand: 'advanced.bugCommand', + chatCompression: 'model.chatCompression', + checkpointing: 'general.checkpointing', + coreTools: 'tools.core', + contextFileName: 'context.fileName', + customThemes: 'ui.customThemes', + customWittyPhrases: 'ui.customWittyPhrases', + debugKeystrokeLogging: 'general.debugKeystrokeLogging', + dnsResolutionOrder: 'advanced.dnsResolutionOrder', + enforcedAuthType: 'security.auth.enforcedType', + excludeTools: 'tools.exclude', + excludeMCPServers: 'mcp.excluded', + excludedProjectEnvVars: 'advanced.excludedEnvVars', + extensions: 'extensions', + fileFiltering: 'context.fileFiltering', + folderTrustFeature: 'security.folderTrust.featureEnabled', + folderTrust: 'security.folderTrust.enabled', + hasSeenIdeIntegrationNudge: 'ide.hasSeenNudge', + hideWindowTitle: 'ui.hideWindowTitle', + showStatusInTitle: 'ui.showStatusInTitle', + hideTips: 'ui.hideTips', + showLineNumbers: 'ui.showLineNumbers', + showCitations: 'ui.showCitations', + ideMode: 'ide.enabled', + includeDirectories: 'context.includeDirectories', + loadMemoryFromIncludeDirectories: 'context.loadFromIncludeDirectories', + maxSessionTurns: 'model.maxSessionTurns', + mcpServers: 'mcpServers', + mcpServerCommand: 'mcp.serverCommand', + memoryImportFormat: 'context.importFormat', + model: 'model.name', + preferredEditor: 'general.preferredEditor', + sandbox: 'tools.sandbox', + selectedAuthType: 'security.auth.selectedType', + shouldUseNodePtyShell: 'tools.shell.enableInteractiveShell', + shellPager: 'tools.shell.pager', + shellShowColor: 'tools.shell.showColor', + skipNextSpeakerCheck: 'model.skipNextSpeakerCheck', + summarizeToolOutput: 'model.summarizeToolOutput', + telemetry: 'telemetry', + theme: 'ui.theme', + toolDiscoveryCommand: 'tools.discoveryCommand', + toolCallCommand: 'tools.callCommand', + usageStatisticsEnabled: 'privacy.usageStatisticsEnabled', + useExternalAuth: 'security.auth.useExternal', + useRipgrep: 'tools.useRipgrep', + vimMode: 'general.vimMode', + enableWelcomeBack: 'ui.enableWelcomeBack', + approvalMode: 'tools.approvalMode', + sessionTokenLimit: 'model.sessionTokenLimit', + contentGenerator: 'model.generationConfig', + skipLoopDetection: 'model.skipLoopDetection', + skipStartupContext: 'model.skipStartupContext', + enableOpenAILogging: 'model.enableOpenAILogging', + tavilyApiKey: 'advanced.tavilyApiKey', +}; + +/** + * Top-level keys that are V2/V3 containers. + * If one of these keys already has object value, treat it as latest-format data. + */ +export const V2_CONTAINER_KEYS = new Set([ + 'ui', + 'tools', + 'mcp', + 'advanced', + 'model', + 'general', + 'context', + 'security', + 'ide', + 'privacy', + 'telemetry', + 'extensions', +]); + +/** + * Legacy disable* keys that remain in disable* form for V2. + */ +export const V1_TO_V2_PRESERVE_DISABLE_MAP: Record = { + disableAutoUpdate: 'general.disableAutoUpdate', + disableUpdateNag: 'general.disableUpdateNag', + disableLoadingPhrases: 'ui.accessibility.disableLoadingPhrases', + disableFuzzySearch: 'context.fileFiltering.disableFuzzySearch', + disableCacheControl: 'model.generationConfig.disableCacheControl', +}; + +export const CONSOLIDATED_DISABLE_KEYS = new Set([ + 'disableAutoUpdate', + 'disableUpdateNag', +]); + +/** + * Keys that indicate V1-like top-level structure when holding primitive values. + */ +export const V1_INDICATOR_KEYS = [ + // From V1_TO_V2_MIGRATION_MAP - keys that map to different paths in V2 + 'theme', + 'model', + 'autoAccept', + 'hideTips', + 'vimMode', + 'checkpointing', + 'accessibility', + 'allowedTools', + 'allowMCPServers', + 'autoConfigureMaxOldSpaceSize', + 'bugCommand', + 'chatCompression', + 'coreTools', + 'contextFileName', + 'customThemes', + 'customWittyPhrases', + 'debugKeystrokeLogging', + 'dnsResolutionOrder', + 'enforcedAuthType', + 'excludeTools', + 'excludeMCPServers', + 'excludedProjectEnvVars', + 'fileFiltering', + 'folderTrustFeature', + 'folderTrust', + 'hasSeenIdeIntegrationNudge', + 'hideWindowTitle', + 'showStatusInTitle', + 'showLineNumbers', + 'showCitations', + 'ideMode', + 'includeDirectories', + 'loadMemoryFromIncludeDirectories', + 'maxSessionTurns', + 'mcpServerCommand', + 'memoryImportFormat', + 'preferredEditor', + 'sandbox', + 'selectedAuthType', + 'shouldUseNodePtyShell', + 'shellPager', + 'shellShowColor', + 'skipNextSpeakerCheck', + 'summarizeToolOutput', + 'toolDiscoveryCommand', + 'toolCallCommand', + 'usageStatisticsEnabled', + 'useExternalAuth', + 'useRipgrep', + 'enableWelcomeBack', + 'approvalMode', + 'sessionTokenLimit', + 'contentGenerator', + 'skipLoopDetection', + 'skipStartupContext', + 'enableOpenAILogging', + 'tavilyApiKey', + // From V1_TO_V2_PRESERVE_DISABLE_MAP - disable* keys that get nested in V2 + 'disableAutoUpdate', + 'disableUpdateNag', + 'disableLoadingPhrases', + 'disableFuzzySearch', + 'disableCacheControl', +]; diff --git a/packages/cli/src/config/migration/versions/v1-to-v2.test.ts b/packages/cli/src/config/migration/versions/v1-to-v2.test.ts new file mode 100644 index 000000000..cbe655c54 --- /dev/null +++ b/packages/cli/src/config/migration/versions/v1-to-v2.test.ts @@ -0,0 +1,277 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect } from 'vitest'; +import { V1ToV2Migration } from './v1-to-v2.js'; + +describe('V1ToV2Migration', () => { + const migration = new V1ToV2Migration(); + + describe('shouldMigrate', () => { + it('should return true for V1 settings without version and with V1 keys', () => { + const v1Settings = { + theme: 'dark', + model: 'gemini', + }; + + expect(migration.shouldMigrate(v1Settings)).toBe(true); + }); + + it('should return true for V1 settings with disable* keys', () => { + const v1Settings = { + disableAutoUpdate: true, + disableLoadingPhrases: false, + }; + + expect(migration.shouldMigrate(v1Settings)).toBe(true); + }); + + it('should return false for settings with $version field', () => { + const v2Settings = { + $version: 2, + ui: { theme: 'dark' }, + }; + + expect(migration.shouldMigrate(v2Settings)).toBe(false); + }); + + it('should return false for V3 settings', () => { + const v3Settings = { + $version: 3, + general: { enableAutoUpdate: true }, + }; + + expect(migration.shouldMigrate(v3Settings)).toBe(false); + }); + + it('should return false for settings without V1 indicator keys', () => { + const unknownSettings = { + customKey: 'value', + anotherKey: 123, + }; + + expect(migration.shouldMigrate(unknownSettings)).toBe(false); + }); + + it('should return false for null input', () => { + expect(migration.shouldMigrate(null)).toBe(false); + }); + + it('should return false for non-object input', () => { + expect(migration.shouldMigrate('string')).toBe(false); + expect(migration.shouldMigrate(123)).toBe(false); + }); + }); + + describe('migrate', () => { + it('should migrate flat V1 keys to nested V2 structure', () => { + const v1Settings = { + theme: 'dark', + model: 'gemini', + autoAccept: true, + hideTips: false, + }; + + const { settings: result } = migration.migrate(v1Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(2); + expect(result['ui']).toEqual({ theme: 'dark', hideTips: false }); + expect(result['model']).toEqual({ name: 'gemini' }); + expect(result['tools']).toEqual({ autoAccept: true }); + }); + + it('should migrate disable* keys to nested V2 paths without inversion', () => { + const v1Settings = { + theme: 'light', + disableAutoUpdate: true, + disableLoadingPhrases: false, + }; + + const { settings: result } = migration.migrate(v1Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(2); + expect(result['general']).toEqual({ disableAutoUpdate: true }); + expect(result['ui']).toEqual({ + theme: 'light', + accessibility: { disableLoadingPhrases: false }, + }); + }); + + it('should normalize consolidated disable* non-boolean values to false', () => { + const v1Settings = { + theme: 'dark', + disableAutoUpdate: 'false', + disableUpdateNag: null, + }; + + const { settings: result } = migration.migrate(v1Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(2); + expect(result['general']).toEqual({ + disableAutoUpdate: false, + disableUpdateNag: false, + }); + }); + + it('should drop non-boolean non-consolidated disable* values', () => { + const v1Settings = { + theme: 'dark', + disableLoadingPhrases: 'TRUE', + disableFuzzySearch: 1, + }; + + const { settings: result } = migration.migrate(v1Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(2); + expect( + (result['ui'] as Record)?.['accessibility'], + ).toBeUndefined(); + expect( + ( + (result['context'] as Record)?.[ + 'fileFiltering' + ] as Record + )?.['disableFuzzySearch'], + ).toBeUndefined(); + }); + + it('should preserve mcpServers at top level', () => { + const v1Settings = { + theme: 'dark', + mcpServers: { + myServer: { command: 'node', args: ['server.js'] }, + }, + }; + + const { settings: result } = migration.migrate(v1Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(2); + expect(result['mcpServers']).toEqual({ + myServer: { command: 'node', args: ['server.js'] }, + }); + }); + + it('should preserve unrecognized keys', () => { + const v1Settings = { + theme: 'dark', + myCustomSetting: 'value', + anotherCustom: 123, + }; + + const { settings: result } = migration.migrate(v1Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(2); + expect(result['myCustomSetting']).toBe('value'); + expect(result['anotherCustom']).toBe(123); + }); + + it('should preserve non-object parent path values on collision', () => { + const v1Settings = { + theme: 'dark', + disableAutoUpdate: true, + ui: 'legacy-ui-string', + general: 'legacy-general-string', + }; + + const { settings: result } = migration.migrate(v1Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(2); + expect(result['ui']).toBe('legacy-ui-string'); + expect(result['general']).toBe('legacy-general-string'); + }); + + it('should not modify the input object', () => { + const v1Settings = { + theme: 'dark', + model: 'gemini', + }; + + const { settings: result } = migration.migrate(v1Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(v1Settings).toEqual({ theme: 'dark', model: 'gemini' }); + expect(result).not.toBe(v1Settings); + }); + + it('should throw error for non-object input', () => { + expect(() => migration.migrate(null, 'user')).toThrow( + 'Settings must be an object', + ); + expect(() => migration.migrate('string', 'user')).toThrow( + 'Settings must be an object', + ); + }); + + it('should handle empty V1 settings', () => { + const v1Settings = { + theme: 'dark', + }; + + const { settings: result } = migration.migrate(v1Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(2); + expect(result['ui']).toEqual({ theme: 'dark' }); + }); + + it('should correctly handle all V1 indicator keys', () => { + const v1Settings = { + theme: 'dark', + model: 'gemini', + autoAccept: true, + hideTips: false, + vimMode: true, + checkpointing: false, + telemetry: {}, + accessibility: {}, + extensions: [], + mcpServers: {}, + }; + + const { settings: result } = migration.migrate(v1Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(2); + }); + }); + + describe('version properties', () => { + it('should have correct fromVersion', () => { + expect(migration.fromVersion).toBe(1); + }); + + it('should have correct toVersion', () => { + expect(migration.toVersion).toBe(2); + }); + }); +}); diff --git a/packages/cli/src/config/migration/versions/v1-to-v2.ts b/packages/cli/src/config/migration/versions/v1-to-v2.ts new file mode 100644 index 000000000..4dceffe44 --- /dev/null +++ b/packages/cli/src/config/migration/versions/v1-to-v2.ts @@ -0,0 +1,267 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { SettingsMigration } from '../types.js'; +import { + CONSOLIDATED_DISABLE_KEYS, + V1_INDICATOR_KEYS, + V1_TO_V2_MIGRATION_MAP, + V1_TO_V2_PRESERVE_DISABLE_MAP, + V2_CONTAINER_KEYS, +} from './v1-to-v2-shared.js'; +import { setNestedPropertySafe } from '../../../utils/settingsUtils.js'; + +/** + * Heuristic indicators for deciding whether an object is "V1-like". + * + * Detection strategy: + * - A file is considered migratable as V1 when: + * 1) It is not explicitly versioned as V2+ (`$version` is missing or invalid), and + * 2) At least one indicator key appears in a legacy-compatible top-level shape. + * - Indicator list intentionally excludes keys that are valid top-level entries in + * both old and new structures to reduce false positives. + * + * Shape rule: + * - Object values for indicator keys are treated as already-nested V2-like content + * and do not alone trigger migration. + * - Primitive/array/null values on indicator keys are treated as legacy V1 signals. + */ + +/** + * V1 -> V2 migration (structural normalization stage). + * + * Migration contract: + * - Input: settings in legacy V1-like shape (mostly flat, may contain mixed partial V2). + * - Output: V2-compatible nested structure with `$version: 2`. + * - No semantic inversion of disable* naming in this stage. + * + * Data-preservation strategy: + * - Prefer transforming known keys into canonical V2 locations. + * - Preserve unrecognized keys verbatim. + * - Preserve parent-path scalar values when nested writes would collide with them. + * - Preserve/merge existing partial V2 objects where safe. + * + * This class intentionally optimizes for backward compatibility and non-destructive + * behavior over aggressive normalization. + */ +export class V1ToV2Migration implements SettingsMigration { + readonly fromVersion = 1; + readonly toVersion = 2; + + /** + * Determines whether this migration should execute. + * + * Decision strategy: + * - Hard-stop when `$version` is a number >= 2 (already V2+). + * - Otherwise, scan indicator keys and trigger only when at least one indicator is + * still in legacy top-level shape (primitive/array/null). + * + * Mixed-shape tolerance: + * - Files that are partially migrated are supported; V2-like object-valued indicators + * are ignored while legacy-shaped indicators can still trigger migration. + */ + shouldMigrate(settings: unknown): boolean { + if (typeof settings !== 'object' || settings === null) { + return false; + } + + const s = settings as Record; + + // If $version exists and is a number >= 2, it's not V1 + const version = s['$version']; + if (typeof version === 'number' && version >= 2) { + return false; + } + + // Check for V1 indicator keys with primitive values + // A setting is considered V1 if ANY indicator key has a primitive value + // (string, number, boolean, null, or array) at the top level. + // Keys with object values are skipped as they may already be in V2 format. + return V1_INDICATOR_KEYS.some((key) => { + if (!(key in s)) { + return false; + } + const value = s[key]; + // Skip keys with object values - they may already be in V2 nested format + // But don't let them block migration of other keys + if ( + typeof value === 'object' && + value !== null && + !Array.isArray(value) + ) { + // This key appears to be in V2 format, skip it but continue + // checking other keys + return false; + } + // Found a key with primitive value - this is V1 format + return true; + }); + } + + /** + * Performs non-destructive V1 -> V2 transformation. + * + * Detailed strategy: + * 1) Relocate known V1 keys using `V1_TO_V2_MIGRATION_MAP`. + * - If a source value is already an object and maps to a child path of itself + * (partial V2 shape), merge child properties into target path. + * 2) Relocate disable* keys into V2 disable* locations. + * - Consolidated keys (`disableAutoUpdate`, `disableUpdateNag`): normalize to + * boolean with stable-compatible presence semantics (`value === true`). + * - Other disable* keys: migrate only boolean values. + * 3) Preserve `mcpServers` top-level placement. + * 4) Carry over remaining keys: + * - If a key is parent of migrated nested paths, merge unprocessed object children. + * - If parent value is non-object, preserve that scalar/array/null as-is. + * - Otherwise copy untouched key/value. + * 5) Stamp `$version = 2`. + * + * The method is pure with respect to input mutation. + */ + migrate( + settings: unknown, + _scope: string, + ): { settings: unknown; warnings: string[] } { + if (typeof settings !== 'object' || settings === null) { + throw new Error('Settings must be an object'); + } + + const source = settings as Record; + const result: Record = {}; + const processedKeys = new Set(); + const warnings: string[] = []; + + // Step 1: Map known V1 keys to V2 nested paths + for (const [v1Key, v2Path] of Object.entries(V1_TO_V2_MIGRATION_MAP)) { + if (v1Key in source) { + const value = source[v1Key]; + + // Safety check: If this key is a V2 container (like 'model') and it's + // already an object, it's likely already in V2 format. Skip migration + // to prevent double-nesting (e.g., model.name.name). + if ( + V2_CONTAINER_KEYS.has(v1Key) && + typeof value === 'object' && + value !== null && + !Array.isArray(value) + ) { + // This is already a V2 container, carry it over as-is + result[v1Key] = value; + processedKeys.add(v1Key); + continue; + } + + // If value is already an object and the path matches the key, + // it might be a partial V2 structure. Merge its contents. + if ( + typeof value === 'object' && + value !== null && + !Array.isArray(value) && + v2Path.startsWith(v1Key + '.') + ) { + // Merge nested properties from this partial V2 structure + for (const [nestedKey, nestedValue] of Object.entries(value)) { + setNestedPropertySafe( + result, + `${v2Path}.${nestedKey}`, + nestedValue, + ); + } + } else { + setNestedPropertySafe(result, v2Path, value); + } + processedKeys.add(v1Key); + } + } + + // Step 2: Map V1 disable* keys to V2 nested disable* paths + for (const [v1Key, v2Path] of Object.entries( + V1_TO_V2_PRESERVE_DISABLE_MAP, + )) { + if (v1Key in source) { + const value = source[v1Key]; + if (CONSOLIDATED_DISABLE_KEYS.has(v1Key)) { + // Preserve stable behavior: consolidated keys use presence semantics. + // Only literal true remains true; all other present values become false. + setNestedPropertySafe(result, v2Path, value === true); + } else if (typeof value === 'boolean') { + // Non-consolidated disable* keys only migrate when explicitly boolean. + setNestedPropertySafe(result, v2Path, value); + } + processedKeys.add(v1Key); + } + } + + // Step 3: Preserve mcpServers at the top level + if ('mcpServers' in source) { + result['mcpServers'] = source['mcpServers']; + processedKeys.add('mcpServers'); + } + + // Step 4: Carry over any unrecognized keys (including unknown nested objects) + // Important: Skip keys that are parent paths of already-migrated properties + // to avoid overwriting merged structures (e.g., 'ui' should not overwrite 'ui.theme') + for (const key of Object.keys(source)) { + if (!processedKeys.has(key)) { + // Check if this key is a parent of any already-migrated path + const isParentOfMigratedPath = Array.from(processedKeys).some( + (processedKey) => { + // Get the v2 path for this processed key + const v2Path = + V1_TO_V2_MIGRATION_MAP[processedKey] || + V1_TO_V2_PRESERVE_DISABLE_MAP[processedKey]; + if (!v2Path) return false; + // Check if the v2 path starts with this key + '.' + return v2Path.startsWith(key + '.'); + }, + ); + + if (isParentOfMigratedPath) { + // This key is a parent of an already-migrated path + // Merge its unprocessed children instead of overwriting + const existingValue = source[key]; + if ( + typeof existingValue === 'object' && + existingValue !== null && + !Array.isArray(existingValue) + ) { + for (const [nestedKey, nestedValue] of Object.entries( + existingValue, + )) { + // Only merge if this nested key wasn't already processed + const fullNestedPath = `${key}.${nestedKey}`; + const wasProcessed = Array.from(processedKeys).some( + (processedKey) => { + const v2Path = + V1_TO_V2_MIGRATION_MAP[processedKey] || + V1_TO_V2_PRESERVE_DISABLE_MAP[processedKey]; + return v2Path === fullNestedPath; + }, + ); + if (!wasProcessed) { + setNestedPropertySafe(result, fullNestedPath, nestedValue); + } + } + } else { + // Preserve non-object parent values to match legacy overwrite semantics. + result[key] = source[key]; + } + } else { + // Not a parent path, safe to copy as-is + result[key] = source[key]; + } + } + } + + // Step 5: Set version to 2 + result['$version'] = 2; + + return { settings: result, warnings }; + } +} + +/** Singleton instance of V1→V2 migration */ +export const v1ToV2Migration = new V1ToV2Migration(); diff --git a/packages/cli/src/config/migration/versions/v2-to-v3.test.ts b/packages/cli/src/config/migration/versions/v2-to-v3.test.ts new file mode 100644 index 000000000..a1ba9b46d --- /dev/null +++ b/packages/cli/src/config/migration/versions/v2-to-v3.test.ts @@ -0,0 +1,598 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect } from 'vitest'; +import { V2ToV3Migration } from './v2-to-v3.js'; + +describe('V2ToV3Migration', () => { + const migration = new V2ToV3Migration(); + + describe('shouldMigrate', () => { + it('should return true for V2 settings with deprecated disable* keys', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: true }, + }; + + expect(migration.shouldMigrate(v2Settings)).toBe(true); + }); + + it('should return true for V2 settings with ui.accessibility.disableLoadingPhrases', () => { + const v2Settings = { + $version: 2, + ui: { accessibility: { disableLoadingPhrases: false } }, + }; + + expect(migration.shouldMigrate(v2Settings)).toBe(true); + }); + + it('should return false for V3 settings', () => { + const v3Settings = { + $version: 3, + general: { enableAutoUpdate: true }, + }; + + expect(migration.shouldMigrate(v3Settings)).toBe(false); + }); + + it('should return false for V1 settings without version', () => { + const v1Settings = { + theme: 'dark', + disableAutoUpdate: true, + }; + + expect(migration.shouldMigrate(v1Settings)).toBe(false); + }); + + it('should return true for V2 settings without deprecated keys', () => { + const cleanV2Settings = { + $version: 2, + ui: { theme: 'dark' }, + general: { enableAutoUpdate: true }, + }; + + // V2 settings should always be migrated to V3 to update the version number + expect(migration.shouldMigrate(cleanV2Settings)).toBe(true); + }); + + it('should return false for null input', () => { + expect(migration.shouldMigrate(null)).toBe(false); + }); + + it('should return false for non-object input', () => { + expect(migration.shouldMigrate('string')).toBe(false); + expect(migration.shouldMigrate(123)).toBe(false); + }); + }); + + describe('migrate', () => { + it('should migrate disableAutoUpdate to enableAutoUpdate with inverted value', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: true }, + }; + + const { settings: result } = migration.migrate(v2Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBe(false); + expect( + (result['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + }); + + it('should migrate disableLoadingPhrases to enableLoadingPhrases', () => { + const v2Settings = { + $version: 2, + ui: { accessibility: { disableLoadingPhrases: true } }, + }; + + const { settings: result } = migration.migrate(v2Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['ui'] as Record)['accessibility'], + ).toEqual({ + enableLoadingPhrases: false, + }); + }); + + it('should migrate disableFuzzySearch to enableFuzzySearch', () => { + const v2Settings = { + $version: 2, + context: { fileFiltering: { disableFuzzySearch: false } }, + }; + + const { settings: result } = migration.migrate(v2Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['context'] as Record)['fileFiltering'], + ).toEqual({ + enableFuzzySearch: true, + }); + }); + + it('should migrate disableCacheControl to enableCacheControl', () => { + const v2Settings = { + $version: 2, + model: { generationConfig: { disableCacheControl: true } }, + }; + + const { settings: result } = migration.migrate(v2Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['model'] as Record)['generationConfig'], + ).toEqual({ + enableCacheControl: false, + }); + }); + + it('should handle consolidated disableAutoUpdate and disableUpdateNag', () => { + const v2Settings = { + $version: 2, + general: { + disableAutoUpdate: true, + disableUpdateNag: false, + }, + }; + + const { settings: result } = migration.migrate(v2Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(3); + // If ANY disable* is true, enable should be false + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBe(false); + expect( + (result['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + expect( + (result['general'] as Record)['disableUpdateNag'], + ).toBeUndefined(); + }); + + it('should set enableAutoUpdate to true when both disable* are false', () => { + const v2Settings = { + $version: 2, + general: { + disableAutoUpdate: false, + disableUpdateNag: false, + }, + }; + + const { settings: result } = migration.migrate(v2Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBe(true); + }); + + it('should preserve other settings during migration', () => { + const v2Settings = { + $version: 2, + ui: { + theme: 'dark', + accessibility: { disableLoadingPhrases: true }, + }, + model: { + name: 'gemini', + }, + }; + + const { settings: result } = migration.migrate(v2Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(3); + expect((result['ui'] as Record)['theme']).toBe('dark'); + expect((result['model'] as Record)['name']).toBe( + 'gemini', + ); + expect( + (result['ui'] as Record)['accessibility'], + ).toEqual({ + enableLoadingPhrases: false, + }); + }); + + it('should not modify the input object', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: true }, + }; + + const result = migration.migrate(v2Settings, 'user'); + + expect(v2Settings.general).toEqual({ disableAutoUpdate: true }); + expect(result).not.toBe(v2Settings); + }); + + it('should throw error for non-object input', () => { + expect(() => migration.migrate(null, 'user')).toThrow( + 'Settings must be an object', + ); + expect(() => migration.migrate('string', 'user')).toThrow( + 'Settings must be an object', + ); + }); + + it('should handle multiple deprecated keys in one migration', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: false }, + ui: { accessibility: { disableLoadingPhrases: false } }, + context: { fileFiltering: { disableFuzzySearch: false } }, + }; + + const { settings: result } = migration.migrate(v2Settings, 'user') as { + settings: Record; + warnings: unknown[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBe(true); + expect( + (result['ui'] as Record)['accessibility'], + ).toEqual({ + enableLoadingPhrases: true, + }); + expect( + (result['context'] as Record)['fileFiltering'], + ).toEqual({ + enableFuzzySearch: true, + }); + }); + + it('should coerce string "true" and remove deprecated key', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: 'true' }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBe(false); + expect(warnings).toHaveLength(0); + }); + + it('should coerce string "false" and remove deprecated key', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: 'false' }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBe(true); + expect(warnings).toHaveLength(0); + }); + + it('should coerce case-insensitive strings for consolidated keys', () => { + const v2Settings = { + $version: 2, + general: { + disableAutoUpdate: 'TRUE', + disableUpdateNag: 'FALSE', + }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + expect( + (result['general'] as Record)['disableUpdateNag'], + ).toBeUndefined(); + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBe(false); + expect(warnings).toHaveLength(0); + }); + + it('should remove number value and emit warning', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: 123 }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBeUndefined(); + expect(warnings).toHaveLength(1); + expect(warnings[0]).toContain('general.disableAutoUpdate'); + }); + + it('should remove invalid string value and emit warning', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: 'invalid-string' }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBeUndefined(); + expect(warnings).toHaveLength(1); + expect(warnings[0]).toContain('general.disableAutoUpdate'); + }); + + it('should coerce disableCacheControl string "true"', () => { + const v2Settings = { + $version: 2, + model: { generationConfig: { disableCacheControl: 'true' } }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['model'] as Record)['generationConfig'], + ).toEqual({ + enableCacheControl: false, + }); + expect(warnings).toHaveLength(0); + }); + + it('should coerce disableCacheControl string "false"', () => { + const v2Settings = { + $version: 2, + model: { generationConfig: { disableCacheControl: 'false' } }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['model'] as Record)['generationConfig'], + ).toEqual({ + enableCacheControl: true, + }); + expect(warnings).toHaveLength(0); + }); + + it('should remove disableCacheControl number value and emit warning', () => { + const v2Settings = { + $version: 2, + model: { generationConfig: { disableCacheControl: 456 } }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['model'] as Record)['generationConfig'], + ).toEqual({}); + expect( + ( + (result['model'] as Record)[ + 'generationConfig' + ] as Record + )['enableCacheControl'], + ).toBeUndefined(); + expect(warnings).toHaveLength(1); + expect(warnings[0]).toContain( + 'model.generationConfig.disableCacheControl', + ); + }); + + it('should handle mixed valid and invalid disableAutoUpdate and disableUpdateNag', () => { + const v2Settings = { + $version: 2, + general: { + disableAutoUpdate: true, + disableUpdateNag: 'invalid', + }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + // Only valid values should contribute to the consolidated result + // Since disableAutoUpdate is true, enableAutoUpdate should be false + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBe(false); + expect( + (result['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + expect( + (result['general'] as Record)['disableUpdateNag'], + ).toBeUndefined(); + expect(warnings).toHaveLength(1); + expect(warnings[0]).toContain('general.disableUpdateNag'); + }); + + it('should remove object value for disable key and emit warning', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: { nested: 'value' } }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBeUndefined(); + expect(warnings).toHaveLength(1); + expect(warnings[0]).toContain('general.disableAutoUpdate'); + }); + + it('should remove array value for disable key and emit warning', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: [1, 2, 3] }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBeUndefined(); + expect(warnings).toHaveLength(1); + expect(warnings[0]).toContain('general.disableAutoUpdate'); + }); + + it('should remove null value for disable key and emit warning', () => { + const v2Settings = { + $version: 2, + general: { disableAutoUpdate: null }, + }; + + const { settings: result, warnings } = migration.migrate( + v2Settings, + 'user', + ) as { + settings: Record; + warnings: string[]; + }; + + expect(result['$version']).toBe(3); + expect( + (result['general'] as Record)['disableAutoUpdate'], + ).toBeUndefined(); + expect( + (result['general'] as Record)['enableAutoUpdate'], + ).toBeUndefined(); + expect(warnings).toHaveLength(1); + expect(warnings[0]).toContain('general.disableAutoUpdate'); + }); + }); + + describe('version properties', () => { + it('should have correct fromVersion', () => { + expect(migration.fromVersion).toBe(2); + }); + + it('should have correct toVersion', () => { + expect(migration.toVersion).toBe(3); + }); + }); +}); diff --git a/packages/cli/src/config/migration/versions/v2-to-v3.ts b/packages/cli/src/config/migration/versions/v2-to-v3.ts new file mode 100644 index 000000000..6c0133443 --- /dev/null +++ b/packages/cli/src/config/migration/versions/v2-to-v3.ts @@ -0,0 +1,222 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import type { SettingsMigration } from '../types.js'; +import { + deleteNestedPropertySafe, + getNestedProperty, + setNestedPropertySafe, +} from '../../../utils/settingsUtils.js'; + +/** + * Path mapping for boolean polarity migration (V2 disable* -> V3 enable*). + * + * Strategy: + * - For each mapped path, values are normalized before migration: + * - boolean values are accepted directly + * - string values "true"/"false" (case-insensitive, trim-aware) are coerced + * - all other present values are treated as invalid + * - Transformation is inversion-based: disable=true -> enable=false, disable=false -> enable=true. + * - Deprecated disable* keys are removed whenever present (valid or invalid). + * - Invalid values do not create enable* keys and produce warnings. + */ +const V2_TO_V3_BOOLEAN_MAP: Record = { + 'general.disableAutoUpdate': 'general.enableAutoUpdate', + 'general.disableUpdateNag': 'general.enableAutoUpdate', + 'ui.accessibility.disableLoadingPhrases': + 'ui.accessibility.enableLoadingPhrases', + 'context.fileFiltering.disableFuzzySearch': + 'context.fileFiltering.enableFuzzySearch', + 'model.generationConfig.disableCacheControl': + 'model.generationConfig.enableCacheControl', +}; + +/** + * Consolidated old paths that collapse into one V3 field. + * + * Current policy: + * - `general.disableAutoUpdate` and `general.disableUpdateNag` both drive + * `general.enableAutoUpdate`. + * - If any valid normalized source is true, target becomes false. + * - If at least one valid normalized source exists, consolidated target is emitted. + * - Invalid present values are removed and warned, and do not contribute to target calculation. + */ +const CONSOLIDATED_V2_PATHS: Record = { + 'general.enableAutoUpdate': [ + 'general.disableAutoUpdate', + 'general.disableUpdateNag', + ], +}; + +/** + * Normalizes deprecated disable* values for migration. + * + * Returns: + * - `isPresent=false` when the path does not exist + * - `isPresent=true, isValid=true` when value is boolean or coercible string + * - `isPresent=true, isValid=false` for invalid values (number/object/array/null/other strings) + */ +function normalizeDisableValue(value: unknown): { + isPresent: boolean; + isValid: boolean; + booleanValue?: boolean; +} { + if (value === undefined) { + return { isPresent: false, isValid: false }; + } + if (typeof value === 'boolean') { + return { isPresent: true, isValid: true, booleanValue: value }; + } + if (typeof value === 'string') { + const normalized = value.trim().toLowerCase(); + if (normalized === 'true') { + return { isPresent: true, isValid: true, booleanValue: true }; + } + if (normalized === 'false') { + return { isPresent: true, isValid: true, booleanValue: false }; + } + } + return { isPresent: true, isValid: false }; +} + +/** + * V2 -> V3 migration (boolean polarity normalization stage). + * + * Migration contract: + * - Input: V2 settings object (`$version: 2`). + * - Output: `$version: 3` with deprecated disable* fields removed and + * valid values migrated to enable* equivalents. + * + * Compatibility strategy: + * - Accept boolean values and coercible strings "true"/"false". + * - Remove invalid deprecated values (rather than preserving them). + * - Emit warnings for each removed invalid deprecated key. + * - Always bump version to 3 so future loads are idempotent and skip repeated checks. + */ +export class V2ToV3Migration implements SettingsMigration { + readonly fromVersion = 2; + readonly toVersion = 3; + + /** + * Migration trigger rule. + * + * Execute only when `$version === 2`. + * This includes V2 files with no migratable disable* booleans so that version + * metadata still advances to 3. + */ + shouldMigrate(settings: unknown): boolean { + if (typeof settings !== 'object' || settings === null) { + return false; + } + + const s = settings as Record; + + // Migrate if $version is 2 + return s['$version'] === 2; + } + + /** + * Applies V2 -> V3 transformation with deterministic deprecated-key cleanup. + * + * Detailed strategy: + * 1) Clone input. + * 2) Process consolidated paths first: + * - Inspect each source path. + * - Normalize each present value (boolean / coercible string / invalid). + * - Always delete present deprecated source key. + * - Valid normalized values contribute to aggregate. + * - Invalid values emit warnings. + * - Emit consolidated target when at least one valid source was consumed. + * 3) Process remaining one-to-one mappings: + * - For each unmapped source, normalize value. + * - If valid -> delete old key and write inverted target. + * - If invalid -> delete old key and emit warning. + * 4) Set `$version = 3`. + * + * Guarantees: + * - Input object is not mutated. + * - Valid migration and invalid cleanup are deterministic. + * - Deprecated disable* keys are not retained after migration. + */ + migrate( + settings: unknown, + scope: string, + ): { settings: unknown; warnings: string[] } { + if (typeof settings !== 'object' || settings === null) { + throw new Error('Settings must be an object'); + } + + // Deep clone to avoid mutating input + const result = structuredClone(settings) as Record; + const processedPaths = new Set(); + const warnings: string[] = []; + + // Step 1: Handle consolidated paths (multiple old paths → single new path) + // Policy: if ANY of the old disable* settings is true, the new enable* should be false + for (const [newPath, oldPaths] of Object.entries(CONSOLIDATED_V2_PATHS)) { + let hasAnyDisable = false; + let hasAnyBooleanValue = false; + + for (const oldPath of oldPaths) { + const oldValue = getNestedProperty(result, oldPath); + const normalized = normalizeDisableValue(oldValue); + if (!normalized.isPresent) { + continue; + } + + deleteNestedPropertySafe(result, oldPath); + processedPaths.add(oldPath); + + if (normalized.isValid) { + hasAnyBooleanValue = true; + if (normalized.booleanValue === true) { + hasAnyDisable = true; + } + } else { + warnings.push( + `Removed deprecated setting '${oldPath}' from ${scope} settings because the value is invalid. Expected boolean.`, + ); + } + } + + if (hasAnyBooleanValue) { + // enableAutoUpdate = !hasAnyDisable (if any disable* was true, enable should be false) + setNestedPropertySafe(result, newPath, !hasAnyDisable); + } + } + + // Step 2: Handle remaining individual disable* → enable* mappings + for (const [oldPath, newPath] of Object.entries(V2_TO_V3_BOOLEAN_MAP)) { + if (processedPaths.has(oldPath)) { + continue; + } + + const oldValue = getNestedProperty(result, oldPath); + const normalized = normalizeDisableValue(oldValue); + if (!normalized.isPresent) { + continue; + } + + deleteNestedPropertySafe(result, oldPath); + if (normalized.isValid) { + // Set new property with inverted value + setNestedPropertySafe(result, newPath, !normalized.booleanValue); + } else { + warnings.push( + `Removed deprecated setting '${oldPath}' from ${scope} settings because the value is invalid. Expected boolean or string "true"/"false".`, + ); + } + } + + // Step 3: Always update version to 3 + result['$version'] = 3; + + return { settings: result, warnings }; + } +} + +/** Singleton instance of V2→V3 migration */ +export const v2ToV3Migration = new V2ToV3Migration(); diff --git a/packages/cli/src/config/settings.test.ts b/packages/cli/src/config/settings.test.ts index bea89475f..d4241c7ba 100644 --- a/packages/cli/src/config/settings.test.ts +++ b/packages/cli/src/config/settings.test.ts @@ -18,16 +18,6 @@ vi.mock('os', async (importOriginal) => { }; }); -// Mock './settings.js' to ensure it uses the mocked 'os.homedir()' for its internal constants. -vi.mock('./settings.js', async (importActual) => { - const originalModule = await importActual(); - return { - __esModule: true, // Ensure correct module shape - ...originalModule, // Re-export all original members - // We are relying on originalModule's USER_SETTINGS_PATH being constructed with mocked os.homedir() - }; -}); - // Mock trustedFolders vi.mock('./trustedFolders.js', () => ({ isWorkspaceTrusted: vi @@ -46,7 +36,6 @@ import { afterEach, type Mocked, type Mock, - fail, } from 'vitest'; import * as fs from 'node:fs'; // fs will be mocked separately import stripJsonComments from 'strip-json-comments'; // Will be mocked separately @@ -60,13 +49,12 @@ import { getSystemSettingsPath, getSystemDefaultsPath, SETTINGS_DIRECTORY_NAME, // This is from the original module, but used by the mock. - migrateSettingsToV1, - needsMigration, type Settings, loadEnvironment, SETTINGS_VERSION, SETTINGS_VERSION_KEY, } from './settings.js'; +import { needsMigration } from './migration/index.js'; import { FatalConfigError, QWEN_DIR } from '@qwen-code/qwen-code-core'; const MOCK_WORKSPACE_DIR = '/mock/workspace'; @@ -84,6 +72,23 @@ type TestSettings = Settings & { nestedObj?: { [key: string]: unknown }; }; +vi.mock('node:fs', async (importOriginal) => { + // Get all the functions from the real 'fs' module + const actualFs = await importOriginal(); + + return { + ...actualFs, // Keep all the real functions + // Now, just override the ones we need for the test + existsSync: vi.fn(), + readFileSync: vi.fn(), + writeFileSync: vi.fn(), + renameSync: vi.fn(), + mkdirSync: vi.fn(), + realpathSync: (p: string) => p, + }; +}); + +// Also mock 'fs' for compatibility vi.mock('fs', async (importOriginal) => { // Get all the functions from the real 'fs' module const actualFs = await importOriginal(); @@ -594,19 +599,22 @@ describe('Settings Loading and Merging', () => { loadSettings(MOCK_WORKSPACE_DIR); - // Verify that fs.writeFileSync was called (to add version) - // but NOT fs.renameSync (no backup needed, just adding version) - expect(fs.renameSync).not.toHaveBeenCalled(); - expect(fs.writeFileSync).toHaveBeenCalledTimes(1); - - const writeCall = (fs.writeFileSync as Mock).mock.calls[0]; - const writtenPath = writeCall[0]; + // Version normalization now uses writeWithBackupSync (temp write + rename) + // Verify that writeFileSync was called with the temp file path + const writeCall = (fs.writeFileSync as Mock).mock.calls.find( + (call: unknown[]) => call[0] === `${USER_SETTINGS_PATH}.tmp`, + ); + expect(writeCall).toBeDefined(); + if (!writeCall) { + throw new Error('Expected temp write call for version normalization'); + } const writtenContent = JSON.parse(writeCall[1] as string); - expect(writtenPath).toBe(USER_SETTINGS_PATH); expect(writtenContent[SETTINGS_VERSION_KEY]).toBe(SETTINGS_VERSION); expect(writtenContent.ui?.theme).toBe('dark'); expect(writtenContent.model?.name).toBe('qwen-coder'); + // Verify writeWithBackupSync was called by checking temp file write + expect(fs.writeFileSync).toHaveBeenCalled(); }); it('should correctly handle partially migrated settings without version field', () => { @@ -734,14 +742,85 @@ describe('Settings Loading and Merging', () => { loadSettings(MOCK_WORKSPACE_DIR); // Version should be bumped to 3 even though no keys needed migration + // writeWithBackupSync writes to a temp file first, then renames const writeCall = (fs.writeFileSync as Mock).mock.calls.find( - (call: unknown[]) => call[0] === USER_SETTINGS_PATH, + (call: unknown[]) => call[0] === `${USER_SETTINGS_PATH}.tmp`, ); expect(writeCall).toBeDefined(); + if (!writeCall) { + throw new Error('Expected temp write call for V2->V3 version bump'); + } const writtenContent = JSON.parse(writeCall[1] as string); expect(writtenContent.$version).toBe(SETTINGS_VERSION); }); + it('should normalize invalid version metadata when no migration is applicable', () => { + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === USER_SETTINGS_PATH, + ); + const invalidVersionSettings = { + $version: 'invalid-version', + general: { + enableAutoUpdate: true, + }, + }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(invalidVersionSettings); + return '{}'; + }, + ); + + loadSettings(MOCK_WORKSPACE_DIR); + + const writeCall = (fs.writeFileSync as Mock).mock.calls.find( + (call: unknown[]) => call[0] === `${USER_SETTINGS_PATH}.tmp`, + ); + expect(writeCall).toBeDefined(); + if (!writeCall) { + throw new Error( + 'Expected temp write call for invalid version normalization', + ); + } + const writtenContent = JSON.parse(writeCall[1] as string); + expect(writtenContent.$version).toBe(SETTINGS_VERSION); + expect(writtenContent.general?.enableAutoUpdate).toBe(true); + }); + + it('should normalize legacy numeric version when no migration can execute', () => { + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === USER_SETTINGS_PATH, + ); + const staleVersionSettings = { + $version: 1, + // No V1/V2 indicators recognized by migrations + customOnlyKey: 'value', + }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(staleVersionSettings); + return '{}'; + }, + ); + + loadSettings(MOCK_WORKSPACE_DIR); + + const writeCall = (fs.writeFileSync as Mock).mock.calls.find( + (call: unknown[]) => call[0] === `${USER_SETTINGS_PATH}.tmp`, + ); + expect(writeCall).toBeDefined(); + if (!writeCall) { + throw new Error( + 'Expected temp write call for stale version normalization', + ); + } + const writtenContent = JSON.parse(writeCall[1] as string); + expect(writtenContent.$version).toBe(SETTINGS_VERSION); + expect(writtenContent.customOnlyKey).toBe('value'); + }); + it('should correctly merge and migrate legacy array properties from multiple scopes', () => { (mockFsExistsSync as Mock).mockReturnValue(true); const legacyUserSettings = { @@ -1619,7 +1698,7 @@ describe('Settings Loading and Merging', () => { try { loadSettings(MOCK_WORKSPACE_DIR); - fail('loadSettings should have thrown a FatalConfigError'); + throw new Error('loadSettings should have thrown a FatalConfigError'); } catch (e) { expect(e).toBeInstanceOf(FatalConfigError); const error = e as FatalConfigError; @@ -2261,385 +2340,6 @@ describe('Settings Loading and Merging', () => { }); }); - describe('migrateSettingsToV1', () => { - it('should handle an empty object', () => { - const v2Settings = {}; - const v1Settings = migrateSettingsToV1(v2Settings); - expect(v1Settings).toEqual({}); - }); - - it('should migrate a simple v2 settings object to v1', () => { - const v2Settings = { - general: { - preferredEditor: 'vscode', - vimMode: true, - }, - ui: { - theme: 'dark', - }, - }; - const v1Settings = migrateSettingsToV1(v2Settings); - expect(v1Settings).toEqual({ - preferredEditor: 'vscode', - vimMode: true, - theme: 'dark', - }); - }); - - it('should handle nested properties correctly', () => { - const v2Settings = { - security: { - folderTrust: { - enabled: true, - }, - auth: { - selectedType: 'oauth', - }, - }, - advanced: { - autoConfigureMemory: true, - }, - }; - const v1Settings = migrateSettingsToV1(v2Settings); - expect(v1Settings).toEqual({ - folderTrust: true, - selectedAuthType: 'oauth', - autoConfigureMaxOldSpaceSize: true, - }); - }); - - it('should preserve mcpServers at the top level', () => { - const v2Settings = { - general: { - preferredEditor: 'vscode', - }, - mcpServers: { - 'my-server': { - command: 'npm start', - }, - }, - }; - const v1Settings = migrateSettingsToV1(v2Settings); - expect(v1Settings).toEqual({ - preferredEditor: 'vscode', - mcpServers: { - 'my-server': { - command: 'npm start', - }, - }, - }); - }); - - it('should carry over unrecognized top-level properties', () => { - const v2Settings = { - general: { - vimMode: false, - }, - unrecognized: 'value', - another: { - nested: true, - }, - }; - const v1Settings = migrateSettingsToV1(v2Settings); - expect(v1Settings).toEqual({ - vimMode: false, - unrecognized: 'value', - another: { - nested: true, - }, - }); - }); - - it('should handle a complex object with mixed properties', () => { - const v2Settings = { - general: { - disableAutoUpdate: true, - }, - ui: { - hideTips: true, - customThemes: { - myTheme: {}, - }, - }, - model: { - name: 'gemini-pro', - chatCompression: { - contextPercentageThreshold: 0.5, - }, - }, - mcpServers: { - 'server-1': { - command: 'node server.js', - }, - }, - unrecognized: { - should: 'be-preserved', - }, - }; - const v1Settings = migrateSettingsToV1(v2Settings); - expect(v1Settings).toEqual({ - disableAutoUpdate: true, - hideTips: true, - customThemes: { - myTheme: {}, - }, - model: 'gemini-pro', - chatCompression: { - contextPercentageThreshold: 0.5, - }, - mcpServers: { - 'server-1': { - command: 'node server.js', - }, - }, - unrecognized: { - should: 'be-preserved', - }, - }); - }); - - it('should not migrate a v1 settings object', () => { - const v1Settings = { - preferredEditor: 'vscode', - vimMode: true, - theme: 'dark', - }; - const migratedSettings = migrateSettingsToV1(v1Settings); - expect(migratedSettings).toEqual({ - preferredEditor: 'vscode', - vimMode: true, - theme: 'dark', - }); - }); - - it('should migrate a full v2 settings object to v1', () => { - const v2Settings: TestSettings = { - general: { - preferredEditor: 'code', - vimMode: true, - }, - ui: { - theme: 'dark', - }, - privacy: { - usageStatisticsEnabled: false, - }, - model: { - name: 'gemini-pro', - chatCompression: { - contextPercentageThreshold: 0.8, - }, - }, - context: { - fileName: 'CONTEXT.md', - includeDirectories: ['/src'], - }, - tools: { - sandbox: true, - exclude: ['toolA'], - }, - mcp: { - allowed: ['server1'], - }, - security: { - folderTrust: { - enabled: true, - }, - }, - advanced: { - dnsResolutionOrder: 'ipv4first', - excludedEnvVars: ['SECRET'], - }, - mcpServers: { - 'my-server': { - command: 'npm start', - }, - }, - unrecognizedTopLevel: { - value: 'should be preserved', - }, - }; - - const v1Settings = migrateSettingsToV1(v2Settings); - - expect(v1Settings).toEqual({ - preferredEditor: 'code', - vimMode: true, - theme: 'dark', - usageStatisticsEnabled: false, - model: 'gemini-pro', - chatCompression: { - contextPercentageThreshold: 0.8, - }, - contextFileName: 'CONTEXT.md', - includeDirectories: ['/src'], - sandbox: true, - excludeTools: ['toolA'], - allowMCPServers: ['server1'], - folderTrust: true, - dnsResolutionOrder: 'ipv4first', - excludedProjectEnvVars: ['SECRET'], - mcpServers: { - 'my-server': { - command: 'npm start', - }, - }, - unrecognizedTopLevel: { - value: 'should be preserved', - }, - }); - }); - - it('should handle partial v2 settings', () => { - const v2Settings: TestSettings = { - general: { - vimMode: false, - }, - ui: {}, - model: { - name: 'gemini-1.5-pro', - }, - unrecognized: 'value', - }; - - const v1Settings = migrateSettingsToV1(v2Settings); - - expect(v1Settings).toEqual({ - vimMode: false, - model: 'gemini-1.5-pro', - unrecognized: 'value', - }); - }); - - it('should handle settings with different data types', () => { - const v2Settings: TestSettings = { - general: { - vimMode: false, - }, - model: { - maxSessionTurns: -1, - }, - context: { - includeDirectories: [], - }, - security: { - folderTrust: { - enabled: false, - }, - }, - }; - - const v1Settings = migrateSettingsToV1(v2Settings); - - expect(v1Settings).toEqual({ - vimMode: false, - maxSessionTurns: -1, - includeDirectories: [], - folderTrust: false, - }); - }); - - it('should preserve unrecognized top-level keys', () => { - const v2Settings: TestSettings = { - general: { - vimMode: true, - }, - customTopLevel: { - a: 1, - b: [2], - }, - anotherOne: 'hello', - }; - - const v1Settings = migrateSettingsToV1(v2Settings); - - expect(v1Settings).toEqual({ - vimMode: true, - customTopLevel: { - a: 1, - b: [2], - }, - anotherOne: 'hello', - }); - }); - - it('should handle an empty v2 settings object', () => { - const v2Settings = {}; - const v1Settings = migrateSettingsToV1(v2Settings); - expect(v1Settings).toEqual({}); - }); - - it('should correctly handle mcpServers at the top level', () => { - const v2Settings: TestSettings = { - mcpServers: { - serverA: { command: 'a' }, - }, - mcp: { - allowed: ['serverA'], - }, - }; - - const v1Settings = migrateSettingsToV1(v2Settings); - - expect(v1Settings).toEqual({ - mcpServers: { - serverA: { command: 'a' }, - }, - allowMCPServers: ['serverA'], - }); - }); - - it('should correctly migrate customWittyPhrases', () => { - const v2Settings: Partial = { - ui: { - customWittyPhrases: ['test phrase'], - }, - }; - const v1Settings = migrateSettingsToV1(v2Settings as Settings); - expect(v1Settings).toEqual({ - customWittyPhrases: ['test phrase'], - }); - }); - - it('should remove version field when migrating to V1', () => { - const v2Settings = { - [SETTINGS_VERSION_KEY]: SETTINGS_VERSION, - ui: { - theme: 'dark', - }, - model: { - name: 'qwen-coder', - }, - }; - const v1Settings = migrateSettingsToV1(v2Settings); - - // Version field should not be present in V1 settings - expect(v1Settings[SETTINGS_VERSION_KEY]).toBeUndefined(); - // Other fields should be properly migrated - expect(v1Settings).toEqual({ - theme: 'dark', - model: 'qwen-coder', - }); - }); - - it('should handle version field in unrecognized properties', () => { - const v2Settings = { - [SETTINGS_VERSION_KEY]: SETTINGS_VERSION, - general: { - vimMode: true, - }, - someUnrecognizedKey: 'value', - }; - const v1Settings = migrateSettingsToV1(v2Settings); - - // Version field should be filtered out - expect(v1Settings[SETTINGS_VERSION_KEY]).toBeUndefined(); - // Unrecognized keys should be preserved - expect(v1Settings['someUnrecognizedKey']).toBe('value'); - expect(v1Settings['vimMode']).toBe(true); - }); - }); - describe('loadEnvironment', () => { function setup({ isFolderTrustEnabled = true, diff --git a/packages/cli/src/config/settings.ts b/packages/cli/src/config/settings.ts index e261cc723..bfc670b60 100644 --- a/packages/cli/src/config/settings.ts +++ b/packages/cli/src/config/settings.ts @@ -14,6 +14,9 @@ import { QWEN_DIR, getErrorMessage, Storage, + setDebugLogSession, + sanitizeCwd, + createDebugLogger, } from '@qwen-code/qwen-code-core'; import stripJsonComments from 'strip-json-comments'; import { DefaultLight } from '../ui/themes/default-light.js'; @@ -28,9 +31,16 @@ import { getSettingsSchema, } from './settingsSchema.js'; import { resolveEnvVarsInObject } from '../utils/envVarResolver.js'; -import { customDeepMerge, type MergeableObject } from '../utils/deepMerge.js'; +import { setNestedPropertySafe } from '../utils/settingsUtils.js'; +import { customDeepMerge } from '../utils/deepMerge.js'; import { updateSettingsFilePreservingFormat } from '../utils/commentJson.js'; -import { writeStderrLine } from '../utils/stdioHelpers.js'; +const debugLogger = createDebugLogger('SETTINGS'); +import { runMigrations, needsMigration } from './migration/index.js'; +import { + V1_TO_V2_MIGRATION_MAP, + V2_CONTAINER_KEYS, +} from './migration/versions/v1-to-v2-shared.js'; +import { writeWithBackupSync } from '../utils/writeWithBackup.js'; function getMergeStrategyForPath(path: string[]): MergeStrategy | undefined { let current: SettingDefinition | undefined = undefined; @@ -54,113 +64,10 @@ export const USER_SETTINGS_PATH = Storage.getGlobalSettingsPath(); export const USER_SETTINGS_DIR = path.dirname(USER_SETTINGS_PATH); export const DEFAULT_EXCLUDED_ENV_VARS = ['DEBUG', 'DEBUG_MODE']; -const MIGRATE_V2_OVERWRITE = true; - // Settings version to track migration state export const SETTINGS_VERSION = 3; export const SETTINGS_VERSION_KEY = '$version'; -const MIGRATION_MAP: Record = { - accessibility: 'ui.accessibility', - allowedTools: 'tools.allowed', - allowMCPServers: 'mcp.allowed', - autoAccept: 'tools.autoAccept', - autoConfigureMaxOldSpaceSize: 'advanced.autoConfigureMemory', - bugCommand: 'advanced.bugCommand', - chatCompression: 'model.chatCompression', - checkpointing: 'general.checkpointing', - coreTools: 'tools.core', - contextFileName: 'context.fileName', - customThemes: 'ui.customThemes', - customWittyPhrases: 'ui.customWittyPhrases', - debugKeystrokeLogging: 'general.debugKeystrokeLogging', - dnsResolutionOrder: 'advanced.dnsResolutionOrder', - enforcedAuthType: 'security.auth.enforcedType', - excludeTools: 'tools.exclude', - excludeMCPServers: 'mcp.excluded', - excludedProjectEnvVars: 'advanced.excludedEnvVars', - extensions: 'extensions', - fileFiltering: 'context.fileFiltering', - folderTrustFeature: 'security.folderTrust.featureEnabled', - folderTrust: 'security.folderTrust.enabled', - hasSeenIdeIntegrationNudge: 'ide.hasSeenNudge', - hideWindowTitle: 'ui.hideWindowTitle', - showStatusInTitle: 'ui.showStatusInTitle', - hideTips: 'ui.hideTips', - showLineNumbers: 'ui.showLineNumbers', - showCitations: 'ui.showCitations', - ideMode: 'ide.enabled', - includeDirectories: 'context.includeDirectories', - loadMemoryFromIncludeDirectories: 'context.loadFromIncludeDirectories', - maxSessionTurns: 'model.maxSessionTurns', - mcpServers: 'mcpServers', - mcpServerCommand: 'mcp.serverCommand', - memoryImportFormat: 'context.importFormat', - model: 'model.name', - preferredEditor: 'general.preferredEditor', - sandbox: 'tools.sandbox', - selectedAuthType: 'security.auth.selectedType', - shouldUseNodePtyShell: 'tools.shell.enableInteractiveShell', - shellPager: 'tools.shell.pager', - shellShowColor: 'tools.shell.showColor', - skipNextSpeakerCheck: 'model.skipNextSpeakerCheck', - summarizeToolOutput: 'model.summarizeToolOutput', - telemetry: 'telemetry', - theme: 'ui.theme', - toolDiscoveryCommand: 'tools.discoveryCommand', - toolCallCommand: 'tools.callCommand', - usageStatisticsEnabled: 'privacy.usageStatisticsEnabled', - useExternalAuth: 'security.auth.useExternal', - useRipgrep: 'tools.useRipgrep', - vimMode: 'general.vimMode', - - enableWelcomeBack: 'ui.enableWelcomeBack', - approvalMode: 'tools.approvalMode', - sessionTokenLimit: 'model.sessionTokenLimit', - contentGenerator: 'model.generationConfig', - skipLoopDetection: 'model.skipLoopDetection', - skipStartupContext: 'model.skipStartupContext', - enableOpenAILogging: 'model.enableOpenAILogging', - tavilyApiKey: 'advanced.tavilyApiKey', -}; - -// Settings that need boolean inversion during migration (V1 -> V3) -// Old negative naming -> new positive naming with inverted value -const INVERTED_BOOLEAN_MIGRATIONS: Record = { - disableAutoUpdate: 'general.enableAutoUpdate', - disableUpdateNag: 'general.enableAutoUpdate', - disableLoadingPhrases: 'ui.accessibility.enableLoadingPhrases', - disableFuzzySearch: 'context.fileFiltering.enableFuzzySearch', - disableCacheControl: 'model.generationConfig.enableCacheControl', -}; - -// Consolidated settings: multiple old V1 keys that map to a single new key. -// Policy: if ANY of the old disable* settings is true, the new enable* should be false. -const CONSOLIDATED_SETTINGS: Record = { - 'general.enableAutoUpdate': ['disableAutoUpdate', 'disableUpdateNag'], -}; - -// V2 nested paths that need inversion when migrating to V3 -const INVERTED_V2_PATHS: Record = { - 'general.disableAutoUpdate': 'general.enableAutoUpdate', - 'general.disableUpdateNag': 'general.enableAutoUpdate', - 'ui.accessibility.disableLoadingPhrases': - 'ui.accessibility.enableLoadingPhrases', - 'context.fileFiltering.disableFuzzySearch': - 'context.fileFiltering.enableFuzzySearch', - 'model.generationConfig.disableCacheControl': - 'model.generationConfig.enableCacheControl', -}; - -// Consolidated V2 paths: multiple old paths that map to a single new path. -// Policy: if ANY of the old disable* settings is true, the new enable* should be false. -const CONSOLIDATED_V2_PATHS: Record = { - 'general.enableAutoUpdate': [ - 'general.disableAutoUpdate', - 'general.disableUpdateNag', - ], -}; - export function getSystemSettingsPath(): string { if (process.env['QWEN_CODE_SYSTEM_SETTINGS_PATH']) { return process.env['QWEN_CODE_SYSTEM_SETTINGS_PATH']; @@ -218,312 +125,6 @@ export interface SettingsFile { rawJson?: string; } -function setNestedProperty( - obj: Record, - path: string, - value: unknown, -) { - const keys = path.split('.'); - const lastKey = keys.pop(); - if (!lastKey) return; - - let current: Record = obj; - for (const key of keys) { - if (current[key] === undefined) { - current[key] = {}; - } - const next = current[key]; - if (typeof next === 'object' && next !== null) { - current = next as Record; - } else { - // This path is invalid, so we stop. - return; - } - } - current[lastKey] = value; -} - -// Dynamically determine the top-level keys from the V2 settings structure. -const KNOWN_V2_CONTAINERS = new Set([ - ...Object.values(MIGRATION_MAP).map((path) => path.split('.')[0]), - ...Object.values(INVERTED_BOOLEAN_MIGRATIONS).map( - (path) => path.split('.')[0], - ), -]); - -export function needsMigration(settings: Record): boolean { - // Check version field first - if present and matches current version, no migration needed - if (SETTINGS_VERSION_KEY in settings) { - const version = settings[SETTINGS_VERSION_KEY]; - if (typeof version === 'number' && version >= SETTINGS_VERSION) { - return false; - } - } - - // Fallback to legacy detection: A file needs migration if it contains any - // top-level key that is moved to a nested location in V2. - const hasV1Keys = Object.entries(MIGRATION_MAP).some(([v1Key, v2Path]) => { - if (v1Key === v2Path || !(v1Key in settings)) { - return false; - } - // If a key exists that is both a V1 key and a V2 container (like 'model'), - // we need to check the type. If it's an object, it's a V2 container and not - // a V1 key that needs migration. - if ( - KNOWN_V2_CONTAINERS.has(v1Key) && - typeof settings[v1Key] === 'object' && - settings[v1Key] !== null - ) { - return false; - } - return true; - }); - - // Also check for old inverted boolean keys (disable* -> enable*) - const hasInvertedBooleanKeys = Object.keys(INVERTED_BOOLEAN_MIGRATIONS).some( - (v1Key) => v1Key in settings, - ); - - return hasV1Keys || hasInvertedBooleanKeys; -} - -/** - * Migrates V1 (flat) settings directly to V3. - * This includes both structural migration (flat -> nested) and boolean - * inversion (disable* -> enable*), so migrateV2ToV3 will be skipped. - */ -function migrateV1ToV3( - flatSettings: Record, -): Record | null { - if (!needsMigration(flatSettings)) { - return null; - } - - const v2Settings: Record = {}; - const flatKeys = new Set(Object.keys(flatSettings)); - - for (const [oldKey, newPath] of Object.entries(MIGRATION_MAP)) { - if (flatKeys.has(oldKey)) { - // Safety check: If this key is a V2 container (like 'model') and it's - // already an object, it's likely already in V2 format. Skip migration - // to prevent double-nesting (e.g., model.name.name). - if ( - KNOWN_V2_CONTAINERS.has(oldKey) && - typeof flatSettings[oldKey] === 'object' && - flatSettings[oldKey] !== null && - !Array.isArray(flatSettings[oldKey]) - ) { - // This is already a V2 container, carry it over as-is - v2Settings[oldKey] = flatSettings[oldKey]; - flatKeys.delete(oldKey); - continue; - } - - setNestedProperty(v2Settings, newPath, flatSettings[oldKey]); - flatKeys.delete(oldKey); - } - } - - // Handle consolidated settings first (multiple old keys -> single new key) - // Policy: if ANY of the old disable* settings is true, the new enable* should be false - for (const [newPath, oldKeys] of Object.entries(CONSOLIDATED_SETTINGS)) { - let hasAnyDisable = false; - let hasAnyValue = false; - for (const oldKey of oldKeys) { - if (flatKeys.has(oldKey)) { - hasAnyValue = true; - const oldValue = flatSettings[oldKey]; - if (typeof oldValue === 'boolean' && oldValue === true) { - hasAnyDisable = true; - } - flatKeys.delete(oldKey); - } - } - if (hasAnyValue) { - // enableAutoUpdate = !hasAnyDisable (if any disable* was true, enable should be false) - setNestedProperty(v2Settings, newPath, !hasAnyDisable); - } - } - - // Handle remaining V1 settings that need boolean inversion (disable* -> enable*) - // Skip keys that were already handled by consolidated settings - const consolidatedKeys = new Set(Object.values(CONSOLIDATED_SETTINGS).flat()); - for (const [oldKey, newPath] of Object.entries(INVERTED_BOOLEAN_MIGRATIONS)) { - if (consolidatedKeys.has(oldKey)) { - continue; - } - if (flatKeys.has(oldKey)) { - const oldValue = flatSettings[oldKey]; - if (typeof oldValue === 'boolean') { - setNestedProperty(v2Settings, newPath, !oldValue); - } - flatKeys.delete(oldKey); - } - } - - // Preserve mcpServers at the top level - if (flatSettings['mcpServers']) { - v2Settings['mcpServers'] = flatSettings['mcpServers']; - flatKeys.delete('mcpServers'); - } - - // Carry over any unrecognized keys - for (const remainingKey of flatKeys) { - const existingValue = v2Settings[remainingKey]; - const newValue = flatSettings[remainingKey]; - - if ( - typeof existingValue === 'object' && - existingValue !== null && - !Array.isArray(existingValue) && - typeof newValue === 'object' && - newValue !== null && - !Array.isArray(newValue) - ) { - const pathAwareGetStrategy = (path: string[]) => - getMergeStrategyForPath([remainingKey, ...path]); - v2Settings[remainingKey] = customDeepMerge( - pathAwareGetStrategy, - {}, - newValue as MergeableObject, - existingValue as MergeableObject, - ); - } else { - v2Settings[remainingKey] = newValue; - } - } - - // Set version field to indicate this is a V2 settings file - v2Settings[SETTINGS_VERSION_KEY] = SETTINGS_VERSION; - - return v2Settings; -} - -// Migrate V2 settings to V3 (invert disable* -> enable* booleans) -function migrateV2ToV3( - settings: Record, -): Record | null { - const version = settings[SETTINGS_VERSION_KEY]; - if (typeof version === 'number' && version >= 3) { - return null; - } - - let changed = false; - const result = structuredClone(settings); - const processedPaths = new Set(); - - // Handle consolidated V2 paths first (multiple old paths -> single new path) - // Policy: if ANY of the old disable* settings is true, the new enable* should be false - for (const [newPath, oldPaths] of Object.entries(CONSOLIDATED_V2_PATHS)) { - let hasAnyDisable = false; - let hasAnyValue = false; - for (const oldPath of oldPaths) { - const oldValue = getNestedProperty(result, oldPath); - if (typeof oldValue === 'boolean') { - hasAnyValue = true; - if (oldValue === true) { - hasAnyDisable = true; - } - deleteNestedProperty(result, oldPath); - processedPaths.add(oldPath); - changed = true; - } - } - if (hasAnyValue) { - // enableAutoUpdate = !hasAnyDisable (if any disable* was true, enable should be false) - setNestedProperty(result, newPath, !hasAnyDisable); - } - } - - // Handle remaining V2 paths that need inversion - for (const [oldPath, newPath] of Object.entries(INVERTED_V2_PATHS)) { - if (processedPaths.has(oldPath)) { - continue; - } - const oldValue = getNestedProperty(result, oldPath); - if (typeof oldValue === 'boolean') { - // Remove old property - deleteNestedProperty(result, oldPath); - // Set new property with inverted value - setNestedProperty(result, newPath, !oldValue); - changed = true; - } - } - - if (changed) { - result[SETTINGS_VERSION_KEY] = SETTINGS_VERSION; - return result; - } - - // Even if no changes, bump version to 3 to skip future migration checks - if (typeof version === 'number' && version < SETTINGS_VERSION) { - result[SETTINGS_VERSION_KEY] = SETTINGS_VERSION; - return result; - } - - return null; -} - -function deleteNestedProperty( - obj: Record, - path: string, -): void { - const keys = path.split('.'); - const lastKey = keys.pop(); - if (!lastKey) return; - - let current: Record = obj; - for (const key of keys) { - const next = current[key]; - if (typeof next !== 'object' || next === null) { - return; - } - current = next as Record; - } - delete current[lastKey]; -} - -function getNestedProperty( - obj: Record, - path: string, -): unknown { - const keys = path.split('.'); - let current: unknown = obj; - for (const key of keys) { - if (typeof current !== 'object' || current === null || !(key in current)) { - return undefined; - } - current = (current as Record)[key]; - } - return current; -} - -const REVERSE_MIGRATION_MAP: Record = Object.fromEntries( - Object.entries(MIGRATION_MAP).map(([key, value]) => [value, key]), -); - -// Reverse map for old V2 paths (before rename) to V1 keys. -// Used when migrating settings that still have old V2 naming (e.g., general.disableAutoUpdate). -const OLD_V2_TO_V1_MAP: Record = {}; -for (const [oldV2Path, newV3Path] of Object.entries(INVERTED_V2_PATHS)) { - // Find the V1 key that maps to this V3 path - for (const [v1Key, v3Path] of Object.entries(INVERTED_BOOLEAN_MIGRATIONS)) { - if (v3Path === newV3Path) { - OLD_V2_TO_V1_MAP[oldV2Path] = v1Key; - break; - } - } -} - -// Reverse map for new V3 paths to V1 keys (with boolean inversion). -// Used when migrating settings that have new V3 naming (e.g., general.enableAutoUpdate). -const V3_TO_V1_INVERTED_MAP: Record = Object.fromEntries( - Object.entries(INVERTED_BOOLEAN_MIGRATIONS).map(([v1Key, v3Path]) => [ - v3Path, - v1Key, - ]), -); - function getSettingsFileKeyWarnings( settings: Record, settingsFilePath: string, @@ -537,7 +138,7 @@ function getSettingsFileKeyWarnings( const ignoredLegacyKeys = new Set(); // Ignored legacy keys (V1 top-level keys that moved to a nested V2 path). - for (const [oldKey, newPath] of Object.entries(MIGRATION_MAP)) { + for (const [oldKey, newPath] of Object.entries(V1_TO_V2_MIGRATION_MAP)) { if (oldKey === newPath) { continue; } @@ -550,7 +151,7 @@ function getSettingsFileKeyWarnings( // If this key is a V2 container (like 'model') and it's already an object, // it's likely already in V2 format. Don't warn. if ( - KNOWN_V2_CONTAINERS.has(oldKey) && + V2_CONTAINER_KEYS.has(oldKey) && typeof oldValue === 'object' && oldValue !== null && !Array.isArray(oldValue) @@ -586,7 +187,8 @@ function getSettingsFileKeyWarnings( } /** - * Collects warnings for ignored legacy and unknown settings keys. + * Collects warnings for ignored legacy and unknown settings keys, + * as well as migration warnings. * * For `$version: 2` settings files, we do not apply implicit migrations. * Instead, we surface actionable, de-duplicated warnings in the terminal UI. @@ -594,6 +196,11 @@ function getSettingsFileKeyWarnings( export function getSettingsWarnings(loadedSettings: LoadedSettings): string[] { const warningSet = new Set(); + // Add migration warnings first + for (const warning of loadedSettings.migrationWarnings) { + warningSet.add(`Warning: ${warning}`); + } + for (const scope of [SettingScope.User, SettingScope.Workspace]) { const settingsFile = loadedSettings.forScope(scope); if (settingsFile.rawJson === undefined) { @@ -616,75 +223,6 @@ export function getSettingsWarnings(loadedSettings: LoadedSettings): string[] { return [...warningSet]; } -export function migrateSettingsToV1( - v2Settings: Record, -): Record { - const v1Settings: Record = {}; - const v2Keys = new Set(Object.keys(v2Settings)); - - for (const [newPath, oldKey] of Object.entries(REVERSE_MIGRATION_MAP)) { - const value = getNestedProperty(v2Settings, newPath); - if (value !== undefined) { - v1Settings[oldKey] = value; - v2Keys.delete(newPath.split('.')[0]); - } - } - - // Handle old V2 inverted paths (no value inversion needed) - // e.g., general.disableAutoUpdate -> disableAutoUpdate - for (const [oldV2Path, v1Key] of Object.entries(OLD_V2_TO_V1_MAP)) { - const value = getNestedProperty(v2Settings, oldV2Path); - if (value !== undefined) { - v1Settings[v1Key] = value; - v2Keys.delete(oldV2Path.split('.')[0]); - } - } - - // Handle new V3 inverted paths (WITH value inversion) - // e.g., general.enableAutoUpdate -> disableAutoUpdate (inverted) - for (const [v3Path, v1Key] of Object.entries(V3_TO_V1_INVERTED_MAP)) { - const value = getNestedProperty(v2Settings, v3Path); - if (value !== undefined && typeof value === 'boolean') { - v1Settings[v1Key] = !value; - v2Keys.delete(v3Path.split('.')[0]); - } - } - - // Preserve mcpServers at the top level - if (v2Settings['mcpServers']) { - v1Settings['mcpServers'] = v2Settings['mcpServers']; - v2Keys.delete('mcpServers'); - } - - // Carry over any unrecognized keys - for (const remainingKey of v2Keys) { - // Skip the version field - it's only for V2 format - if (remainingKey === SETTINGS_VERSION_KEY) { - continue; - } - - const value = v2Settings[remainingKey]; - if (value === undefined) { - continue; - } - - // Don't carry over empty objects that were just containers for migrated settings. - if ( - KNOWN_V2_CONTAINERS.has(remainingKey) && - typeof value === 'object' && - value !== null && - !Array.isArray(value) && - Object.keys(value).length === 0 - ) { - continue; - } - - v1Settings[remainingKey] = value; - } - - return v1Settings; -} - function mergeSettings( system: Settings, systemDefaults: Settings, @@ -718,6 +256,7 @@ export class LoadedSettings { workspace: SettingsFile, isTrusted: boolean, migratedInMemorScopes: Set, + migrationWarnings: string[] = [], ) { this.system = system; this.systemDefaults = systemDefaults; @@ -725,6 +264,7 @@ export class LoadedSettings { this.workspace = workspace; this.isTrusted = isTrusted; this.migratedInMemorScopes = migratedInMemorScopes; + this.migrationWarnings = migrationWarnings; this._merged = this.computeMergedSettings(); } @@ -734,6 +274,7 @@ export class LoadedSettings { readonly workspace: SettingsFile; readonly isTrusted: boolean; readonly migratedInMemorScopes: Set; + readonly migrationWarnings: string[]; private _merged: Settings; @@ -768,8 +309,8 @@ export class LoadedSettings { setValue(scope: SettingScope, key: string, value: unknown): void { const settingsFile = this.forScope(scope); - setNestedProperty(settingsFile.settings, key, value); - setNestedProperty(settingsFile.originalSettings, key, value); + setNestedPropertySafe(settingsFile.settings, key, value); + setNestedPropertySafe(settingsFile.originalSettings, key, value); this._merged = this.computeMergedSettings(); saveSettings(settingsFile); } @@ -793,6 +334,7 @@ export function createMinimalSettings(): LoadedSettings { emptySettingsFile, false, new Set(), + [], ); } @@ -933,6 +475,16 @@ export function loadEnvironment(settings: Settings): void { export function loadSettings( workspaceDir: string = process.cwd(), ): LoadedSettings { + // Set up a temporary debug log session for the startup phase. + // This allows migration errors to be logged to file instead of being + // exposed to users via stderr. The Config class will override this + // with the actual session once initialized. + const resolvedWorkspaceDir = path.resolve(workspaceDir); + const sanitizedProjectId = sanitizeCwd(resolvedWorkspaceDir); + setDebugLogSession({ + getSessionId: () => `startup-${sanitizedProjectId}`, + }); + let systemSettings: Settings = {}; let systemDefaultSettings: Settings = {}; let userSettings: Settings = {}; @@ -943,7 +495,7 @@ export function loadSettings( const migratedInMemorScopes = new Set(); // Resolve paths to their canonical representation to handle symlinks - const resolvedWorkspaceDir = path.resolve(workspaceDir); + // Note: resolvedWorkspaceDir is already defined at the top of the function const resolvedHomeDir = path.resolve(homedir()); let realWorkspaceDir = resolvedWorkspaceDir; @@ -964,7 +516,7 @@ export function loadSettings( const loadAndMigrate = ( filePath: string, scope: SettingScope, - ): { settings: Settings; rawJson?: string } => { + ): { settings: Settings; rawJson?: string; migrationWarnings?: string[] } => { try { if (fs.existsSync(filePath)) { const content = fs.readFileSync(filePath, 'utf-8'); @@ -983,74 +535,59 @@ export function loadSettings( } let settingsObject = rawSettings as Record; + const hasVersionKey = SETTINGS_VERSION_KEY in settingsObject; + const versionValue = settingsObject[SETTINGS_VERSION_KEY]; + const hasInvalidVersion = + hasVersionKey && typeof versionValue !== 'number'; + const hasLegacyNumericVersion = + typeof versionValue === 'number' && versionValue < SETTINGS_VERSION; + let migrationWarnings: string[] | undefined; + + const persistSettingsObject = (warningPrefix: string) => { + try { + writeWithBackupSync( + filePath, + JSON.stringify(settingsObject, null, 2), + ); + } catch (e) { + debugLogger.error(`${warningPrefix}: ${getErrorMessage(e)}`); + } + }; + if (needsMigration(settingsObject)) { - const migratedSettings = migrateV1ToV3(settingsObject); - if (migratedSettings) { - if (MIGRATE_V2_OVERWRITE) { - try { - fs.renameSync(filePath, `${filePath}.orig`); - fs.writeFileSync( - filePath, - JSON.stringify(migratedSettings, null, 2), - 'utf-8', - ); - } catch (e) { - writeStderrLine( - `Error migrating settings file on disk: ${getErrorMessage( - e, - )}`, - ); - } - } else { - migratedInMemorScopes.add(scope); - } - settingsObject = migratedSettings; + const migrationResult = runMigrations(settingsObject, scope); + if (migrationResult.executedMigrations.length > 0) { + settingsObject = migrationResult.settings as Record< + string, + unknown + >; + migrationWarnings = migrationResult.warnings; + persistSettingsObject('Error migrating settings file on disk'); + } else if (hasLegacyNumericVersion || hasInvalidVersion) { + // Migration was deemed needed but nothing executed. Normalize version metadata + // to avoid repeated no-op checks on startup. + settingsObject[SETTINGS_VERSION_KEY] = SETTINGS_VERSION; + debugLogger.warn( + `Settings version metadata in ${filePath} could not be migrated by any registered migration. Normalizing ${SETTINGS_VERSION_KEY} to ${SETTINGS_VERSION}.`, + ); + persistSettingsObject('Error normalizing settings version on disk'); } - } else if (!(SETTINGS_VERSION_KEY in settingsObject)) { - // No migration needed, but version field is missing - add it for future optimizations + } else if ( + !hasVersionKey || + hasInvalidVersion || + hasLegacyNumericVersion + ) { + // No migration needed/executable, but version metadata is missing or invalid. + // Normalize it to current version to avoid repeated startup work. settingsObject[SETTINGS_VERSION_KEY] = SETTINGS_VERSION; - if (MIGRATE_V2_OVERWRITE) { - try { - fs.writeFileSync( - filePath, - JSON.stringify(settingsObject, null, 2), - 'utf-8', - ); - } catch (e) { - writeStderrLine( - `Error adding version to settings file: ${getErrorMessage(e)}`, - ); - } - } + persistSettingsObject('Error normalizing settings version on disk'); } - // V2 to V3 migration (invert disable* -> enable* booleans) - const v3Migrated = migrateV2ToV3(settingsObject); - if (v3Migrated) { - if (MIGRATE_V2_OVERWRITE) { - try { - // Only backup if not already backed up by V1->V2 migration - const backupPath = `${filePath}.orig`; - if (!fs.existsSync(backupPath)) { - fs.renameSync(filePath, backupPath); - } - fs.writeFileSync( - filePath, - JSON.stringify(v3Migrated, null, 2), - 'utf-8', - ); - } catch (e) { - writeStderrLine( - `Error migrating settings file to V3: ${getErrorMessage(e)}`, - ); - } - } else { - migratedInMemorScopes.add(scope); - } - settingsObject = v3Migrated; - } - - return { settings: settingsObject as Settings, rawJson: content }; + return { + settings: settingsObject as Settings, + rawJson: content, + migrationWarnings, + }; } } catch (error: unknown) { settingsErrors.push({ @@ -1068,7 +605,11 @@ export function loadSettings( ); const userResult = loadAndMigrate(USER_SETTINGS_PATH, SettingScope.User); - let workspaceResult: { settings: Settings; rawJson?: string } = { + let workspaceResult: { + settings: Settings; + rawJson?: string; + migrationWarnings?: string[]; + } = { settings: {} as Settings, rawJson: undefined, }; @@ -1138,6 +679,14 @@ export function loadSettings( ); } + // Collect all migration warnings from all scopes + const allMigrationWarnings: string[] = [ + ...(systemResult.migrationWarnings ?? []), + ...(systemDefaultsResult.migrationWarnings ?? []), + ...(userResult.migrationWarnings ?? []), + ...(workspaceResult.migrationWarnings ?? []), + ]; + return new LoadedSettings( { path: systemSettingsPath, @@ -1165,6 +714,7 @@ export function loadSettings( }, isTrusted, migratedInMemorScopes, + allMigrationWarnings, ); } @@ -1176,21 +726,14 @@ export function saveSettings(settingsFile: SettingsFile): void { fs.mkdirSync(dirPath, { recursive: true }); } - let settingsToSave = settingsFile.originalSettings; - if (!MIGRATE_V2_OVERWRITE) { - settingsToSave = migrateSettingsToV1( - settingsToSave as Record, - ) as Settings; - } - // Use the format-preserving update function updateSettingsFilePreservingFormat( settingsFile.path, - settingsToSave as Record, + settingsFile.originalSettings as Record, ); } catch (error) { - writeStderrLine('Error saving user settings file.'); - writeStderrLine(error instanceof Error ? error.message : String(error)); + debugLogger.error('Error saving user settings file.'); + debugLogger.error(error instanceof Error ? error.message : String(error)); throw error; } } diff --git a/packages/cli/src/gemini.test.tsx b/packages/cli/src/gemini.test.tsx index 8c9cd687f..e4efea1f5 100644 --- a/packages/cli/src/gemini.test.tsx +++ b/packages/cli/src/gemini.test.tsx @@ -190,6 +190,7 @@ describe('gemini.tsx main function', () => { }, setValue: vi.fn(), forScope: () => ({ settings: {}, originalSettings: {}, path: '' }), + migrationWarnings: [], } as never); try { await main(); @@ -262,7 +263,7 @@ describe('gemini.tsx main function', () => { 'isRaw', ); Object.defineProperty(process.stdin, 'isTTY', { - value: true, + value: false, // 在 stream-json 模式下应为 false configurable: true, }); Object.defineProperty(process.stdin, 'isRaw', { @@ -322,6 +323,7 @@ describe('gemini.tsx main function', () => { }, setValue: vi.fn(), forScope: () => ({ settings: {}, originalSettings: {}, path: '' }), + migrationWarnings: [], } as never); vi.mocked(parseArguments).mockResolvedValue({ @@ -344,6 +346,9 @@ describe('gemini.tsx main function', () => { getInputFormat: () => 'stream-json', getContentGeneratorConfig: () => ({ authType: 'test-auth' }), getWarnings: () => [], + getUsageStatisticsEnabled: () => true, + getSessionId: () => 'test-session-id', + getOutputFormat: () => OutputFormat.TEXT, } as unknown as Config; vi.mocked(loadCliConfig).mockResolvedValue(configStub); @@ -442,6 +447,7 @@ describe('gemini.tsx main function kitty protocol', () => { getScreenReader: () => false, getGeminiMdFileCount: () => 0, getWarnings: () => [], + getUsageStatisticsEnabled: () => true, } as unknown as Config); vi.mocked(loadSettings).mockReturnValue({ errors: [], @@ -452,6 +458,7 @@ describe('gemini.tsx main function kitty protocol', () => { }, setValue: vi.fn(), forScope: () => ({ settings: {}, originalSettings: {}, path: '' }), + migrationWarnings: [], } as never); vi.mocked(parseArguments).mockResolvedValue({ model: undefined, diff --git a/packages/cli/src/gemini.tsx b/packages/cli/src/gemini.tsx index c5e742ee6..58a735c73 100644 --- a/packages/cli/src/gemini.tsx +++ b/packages/cli/src/gemini.tsx @@ -385,17 +385,16 @@ export async function main() { setMaxSizedBoxDebugging(isDebugMode); // Check input format early to determine initialization flow - const inputFormat = - typeof config.getInputFormat === 'function' + // In TTY mode, ignore stream-json input format to prevent process from hanging + const inputFormat = process.stdin.isTTY + ? InputFormat.TEXT + : typeof config.getInputFormat === 'function' ? config.getInputFormat() : InputFormat.TEXT; // For stream-json mode, defer config.initialize() until after the initialize control request // For other modes, initialize normally - let initializationResult: InitializationResult | undefined; - if (inputFormat !== InputFormat.STREAM_JSON) { - initializationResult = await initializeApp(config, settings); - } + const initializationResult = await initializeApp(config, settings); if (config.getExperimentalZedIntegration()) { return runAcpAgent(config, settings, argv); diff --git a/packages/cli/src/services/FileCommandLoader-markdown.test.ts b/packages/cli/src/services/FileCommandLoader-markdown.test.ts index 590f2d100..737cc39db 100644 --- a/packages/cli/src/services/FileCommandLoader-markdown.test.ts +++ b/packages/cli/src/services/FileCommandLoader-markdown.test.ts @@ -77,6 +77,30 @@ This is a test prompt from markdown.`; } }); + it('should load markdown commands with BOM and CRLF frontmatter', async () => { + const mdContent = + '\uFEFF---\r\ndescription: Windows markdown command\r\n---\r\n\r\nPrompt from windows markdown.\r\n'; + + const commandPath = path.join(tempDir, 'windows-command.md'); + await fs.writeFile(commandPath, mdContent, 'utf-8'); + + const loader = new FileCommandLoader(null); + const originalMethod = loader['getCommandDirectories']; + loader['getCommandDirectories'] = () => [{ path: tempDir }]; + + try { + const commands = await loader.loadCommands(new AbortController().signal); + const windowsCommand = commands.find( + (cmd) => cmd.name === 'windows-command', + ); + + expect(windowsCommand).toBeDefined(); + expect(windowsCommand?.description).toBe('Windows markdown command'); + } finally { + loader['getCommandDirectories'] = originalMethod; + } + }); + it('should load both toml and markdown commands', async () => { // Create both TOML and Markdown files const tomlContent = `prompt = "TOML prompt" diff --git a/packages/cli/src/services/markdown-command-parser.test.ts b/packages/cli/src/services/markdown-command-parser.test.ts index 4de35f0ea..bbefa43a4 100644 --- a/packages/cli/src/services/markdown-command-parser.test.ts +++ b/packages/cli/src/services/markdown-command-parser.test.ts @@ -94,6 +94,51 @@ Prompt content.`; expect(result.frontmatter).toBeDefined(); expect(result.prompt).toBe('Prompt content.'); }); + + it('should parse frontmatter in CRLF files', () => { + const content = + '---\r\ndescription: Windows command\r\n---\r\n\r\nLine 1\r\nLine 2\r\n'; + + const result = parseMarkdownCommand(content); + + expect(result).toEqual({ + frontmatter: { + description: 'Windows command', + }, + prompt: 'Line 1\nLine 2', + }); + }); + + it('should parse frontmatter in CR-only files', () => { + const content = + '---\rdescription: Old mac command\r---\r\rLine 1\rLine 2\r'; + + const result = parseMarkdownCommand(content); + + expect(result).toEqual({ + frontmatter: { + description: 'Old mac command', + }, + prompt: 'Line 1\nLine 2', + }); + }); + + it('should parse frontmatter when content starts with UTF-8 BOM', () => { + const content = `\uFEFF--- +description: BOM command +--- + +Prompt from BOM file.`; + + const result = parseMarkdownCommand(content); + + expect(result).toEqual({ + frontmatter: { + description: 'BOM command', + }, + prompt: 'Prompt from BOM file.', + }); + }); }); describe('MarkdownCommandDefSchema', () => { diff --git a/packages/cli/src/services/markdown-command-parser.ts b/packages/cli/src/services/markdown-command-parser.ts index 5b6ed38bf..5d4a3b7df 100644 --- a/packages/cli/src/services/markdown-command-parser.ts +++ b/packages/cli/src/services/markdown-command-parser.ts @@ -5,7 +5,10 @@ */ import { z } from 'zod'; -import { parse as parseYaml } from '@qwen-code/qwen-code-core'; +import { + parse as parseYaml, + normalizeContent, +} from '@qwen-code/qwen-code-core'; /** * Defines the Zod schema for a Markdown command definition file. @@ -31,19 +34,21 @@ export type MarkdownCommandDef = z.infer; * @returns Parsed command definition with frontmatter and prompt */ export function parseMarkdownCommand(content: string): MarkdownCommandDef { + const normalizedContent = normalizeContent(content); + // Match YAML frontmatter pattern: ---\n...\n---\n - // Allow empty frontmatter: ---\n---\n // Use (?:[\s\S]*?) to make the frontmatter content optional - const frontmatterRegex = /^---\n([\s\S]*?)---\n([\s\S]*)$/; - const match = content.match(frontmatterRegex); + // Allow empty frontmatter: ---\n---\n + const frontmatterRegex = /^---\n(?:([\s\S]*?)\n)?---(?:\n|$)([\s\S]*)$/; + const match = normalizedContent.match(frontmatterRegex); if (!match) { // No frontmatter, entire content is the prompt return { - prompt: content.trim(), + prompt: normalizedContent.trim(), }; } - const [, frontmatterYaml, body] = match; + const [, frontmatterYaml = '', body] = match; // Parse YAML frontmatter if not empty let frontmatter: Record | undefined; diff --git a/packages/cli/src/ui/components/messages/DiffRenderer.test.tsx b/packages/cli/src/ui/components/messages/DiffRenderer.test.tsx index a725f5e64..245f4df2c 100644 --- a/packages/cli/src/ui/components/messages/DiffRenderer.test.tsx +++ b/packages/cli/src/ui/components/messages/DiffRenderer.test.tsx @@ -56,6 +56,7 @@ index 0000000..e69de29 80, undefined, mockSettings, + 4, ); }); @@ -86,6 +87,7 @@ index 0000000..e69de29 80, undefined, mockSettings, + 4, ); }); @@ -115,6 +117,7 @@ index 0000000..e69de29 80, undefined, mockSettings, + 4, ); }); diff --git a/packages/cli/src/ui/components/messages/DiffRenderer.tsx b/packages/cli/src/ui/components/messages/DiffRenderer.tsx index 3670be34b..8910d6d80 100644 --- a/packages/cli/src/ui/components/messages/DiffRenderer.tsx +++ b/packages/cli/src/ui/components/messages/DiffRenderer.tsx @@ -161,6 +161,7 @@ export const DiffRenderer: React.FC = ({ contentWidth, theme, settings, + tabWidth, ); } else { renderedOutput = renderDiffContent( diff --git a/packages/cli/src/ui/contexts/KeypressContext.test.tsx b/packages/cli/src/ui/contexts/KeypressContext.test.tsx index c28cd9525..d69bada5b 100644 --- a/packages/cli/src/ui/contexts/KeypressContext.test.tsx +++ b/packages/cli/src/ui/contexts/KeypressContext.test.tsx @@ -1335,6 +1335,40 @@ describe('KeypressContext - Kitty Protocol', () => { ); }); + describe('Printable CSI-u keys', () => { + it('parses kitty CSI-u space as a space key with literal sequence', () => { + const keyHandler = vi.fn(); + const { result } = renderHook(() => useKeypressContext(), { wrapper }); + act(() => result.current.subscribe(keyHandler)); + + act(() => stdin.sendKittySequence(`\x1b[32u`)); + + expect(keyHandler).toHaveBeenCalledWith( + expect.objectContaining({ + name: 'space', + sequence: ' ', + kittyProtocol: true, + }), + ); + }); + + it('parses kitty CSI-u printable letters as literal input', () => { + const keyHandler = vi.fn(); + const { result } = renderHook(() => useKeypressContext(), { wrapper }); + act(() => result.current.subscribe(keyHandler)); + + act(() => stdin.sendKittySequence(`\x1b[100u`)); // 'd' + + expect(keyHandler).toHaveBeenCalledWith( + expect.objectContaining({ + name: 'd', + sequence: 'd', + kittyProtocol: true, + }), + ); + }); + }); + describe('Shift+Tab forms', () => { it.each([ { sequence: `\x1b[Z`, description: 'legacy reverse Tab' }, diff --git a/packages/cli/src/ui/contexts/KeypressContext.tsx b/packages/cli/src/ui/contexts/KeypressContext.tsx index c4e192609..4496f5e1b 100644 --- a/packages/cli/src/ui/contexts/KeypressContext.tsx +++ b/packages/cli/src/ui/contexts/KeypressContext.tsx @@ -332,6 +332,36 @@ export function KeypressProvider({ }; } + // Printable CSI-u keys (including space) should behave like regular + // character input so downstream text inputs receive the literal char. + if ( + terminator === 'u' && + !ctrl && + keyCode >= 32 && + keyCode !== 127 && + keyCode <= 0x10ffff + ) { + const char = String.fromCodePoint(keyCode); + const printableName = + char === ' ' + ? 'space' + : /^[A-Za-z]$/.test(char) + ? char.toLowerCase() + : char; + return { + key: { + name: printableName, + ctrl: false, + meta: alt, + shift, + paste: false, + sequence: char, + kittyProtocol: true, + }, + length: m[0].length, + }; + } + // Ctrl+letters if ( ctrl && diff --git a/packages/cli/src/ui/utils/CodeColorizer.tsx b/packages/cli/src/ui/utils/CodeColorizer.tsx index 0dabddb22..da0d99132 100644 --- a/packages/cli/src/ui/utils/CodeColorizer.tsx +++ b/packages/cli/src/ui/utils/CodeColorizer.tsx @@ -125,6 +125,7 @@ export function colorizeLine( * * @param code The code string to highlight. * @param language The language identifier (e.g., 'javascript', 'css', 'html') + * @param tabWidth The number of spaces to replace each tab character with, default is 4 * @returns A React.ReactNode containing Ink elements for the highlighted code. */ export function colorizeCode( @@ -134,8 +135,11 @@ export function colorizeCode( maxWidth?: number, theme?: Theme, settings?: LoadedSettings, + tabWidth = 4, ): React.ReactNode { - const codeToHighlight = code.replace(/\n$/, ''); + const codeToHighlight = code + .replace(/\n$/, '') + .replace(/\t/g, ' '.repeat(tabWidth)); const activeTheme = theme || themeManager.getActiveTheme(); const showLineNumbers = settings?.merged.ui?.showLineNumbers ?? true; diff --git a/packages/cli/src/utils/languageUtils.test.ts b/packages/cli/src/utils/languageUtils.test.ts index 7081f0c94..39582af75 100644 --- a/packages/cli/src/utils/languageUtils.test.ts +++ b/packages/cli/src/utils/languageUtils.test.ts @@ -218,6 +218,43 @@ describe('languageUtils', () => { '', ); }); + + it('should use mandatory language rule instead of preference', () => { + writeOutputLanguageFile('Chinese'); + + const writtenContent = vi.mocked(fs.writeFileSync).mock + .calls[0][1] as string; + expect(writtenContent).toContain( + 'You MUST always respond in **Chinese**', + ); + expect(writtenContent).toContain( + 'This is a mandatory requirement, not a preference.', + ); + expect(writtenContent).not.toContain('Prefer responding'); + }); + + it('should include exception clause for explicit user language requests', () => { + writeOutputLanguageFile('English'); + + const writtenContent = vi.mocked(fs.writeFileSync).mock + .calls[0][1] as string; + expect(writtenContent).toContain('## Exception'); + expect(writtenContent).toContain( + "switch to the user's requested language for the remainder of the conversation", + ); + }); + + it('should use the correct language name throughout the template', () => { + writeOutputLanguageFile('Japanese'); + + const writtenContent = vi.mocked(fs.writeFileSync).mock + .calls[0][1] as string; + expect(writtenContent).toContain( + 'You MUST always respond in **Japanese**', + ); + expect(writtenContent).toContain('## Rule'); + expect(writtenContent).toContain('## Exception'); + }); }); describe('updateOutputLanguageFile', () => { diff --git a/packages/cli/src/utils/languageUtils.ts b/packages/cli/src/utils/languageUtils.ts index e9b61923d..cb4e06c4d 100644 --- a/packages/cli/src/utils/languageUtils.ts +++ b/packages/cli/src/utils/languageUtils.ts @@ -89,17 +89,18 @@ function generateOutputLanguageFileContent(language: string): string { return `# Output language preference: ${language} -## Goal -Prefer responding in **${language}** for normal assistant messages and explanations. +## Rule +You MUST always respond in **${language}** regardless of the user's input language. +This is a mandatory requirement, not a preference. + +## Exception +If the user **explicitly** requests a response in a specific language (e.g., "please reply in English", "用中文回答"), switch to the user's requested language for the remainder of the conversation. ## Keep technical artifacts unchanged Do **not** translate or rewrite: - Code blocks, CLI commands, file paths, stack traces, logs, JSON keys, identifiers - Exact quoted text from the user (keep quotes verbatim) -## When a conflict exists -If higher-priority instructions (system/developer) require a different behavior, follow them. - ## Tool / system outputs Raw tool/system outputs may contain fixed-format English. Preserve them verbatim, and if needed, add a short **${language}** explanation below. `; diff --git a/packages/cli/src/utils/settingsUtils.ts b/packages/cli/src/utils/settingsUtils.ts index 1bd5988eb..0effeb738 100644 --- a/packages/cli/src/utils/settingsUtils.ts +++ b/packages/cli/src/utils/settingsUtils.ts @@ -129,6 +129,13 @@ export function getNestedValue( return undefined; } +export function getNestedProperty( + obj: Record, + path: string, +): unknown { + return getNestedValue(obj, path.split('.')); +} + /** * Get the effective value for a setting, considering inheritance from higher scopes * Always returns a value (never undefined) - falls back to default if not set anywhere @@ -382,30 +389,69 @@ export function settingExistsInScope( return value !== undefined; } -/** - * Recursively sets a value in a nested object using a key path array. - */ -function setNestedValue( +export function setNestedPropertyForce( obj: Record, - path: string[], + path: string, value: unknown, -): Record { - const [first, ...rest] = path; - if (!first) { - return obj; +): void { + const keys = path.split('.'); + const lastKey = keys.pop(); + if (!lastKey) return; + + let current: Record = obj; + for (const key of keys) { + if (!current[key] || typeof current[key] !== 'object') { + current[key] = {}; + } + current = current[key] as Record; } - if (rest.length === 0) { - obj[first] = value; - return obj; + current[lastKey] = value; +} + +export function setNestedPropertySafe( + obj: Record, + path: string, + value: unknown, +): void { + const keys = path.split('.'); + const lastKey = keys.pop(); + if (!lastKey) return; + + let current: Record = obj; + for (const key of keys) { + if (current[key] === undefined) { + current[key] = {}; + } + const next = current[key]; + if (typeof next === 'object' && next !== null) { + current = next as Record; + } else { + return; + } } - if (!obj[first] || typeof obj[first] !== 'object') { - obj[first] = {}; + current[lastKey] = value; +} + +export function deleteNestedPropertySafe( + obj: Record, + path: string, +): void { + const keys = path.split('.'); + const lastKey = keys.pop(); + if (!lastKey) return; + + let current: Record = obj; + for (const key of keys) { + const next = current[key]; + if (typeof next !== 'object' || next === null) { + return; + } + current = next as Record; } - setNestedValue(obj[first] as Record, rest, value); - return obj; + delete current[lastKey]; } /** @@ -416,9 +462,8 @@ export function setPendingSettingValue( value: boolean, pendingSettings: Settings, ): Settings { - const path = key.split('.'); const newSettings = JSON.parse(JSON.stringify(pendingSettings)); - setNestedValue(newSettings, path, value); + setNestedPropertyForce(newSettings, key, value); return newSettings; } @@ -430,9 +475,8 @@ export function setPendingSettingValueAny( value: SettingsValue, pendingSettings: Settings, ): Settings { - const path = key.split('.'); const newSettings = structuredClone(pendingSettings); - setNestedValue(newSettings, path, value); + setNestedPropertyForce(newSettings, key, value); return newSettings; } diff --git a/packages/cli/src/utils/writeWithBackup.test.ts b/packages/cli/src/utils/writeWithBackup.test.ts new file mode 100644 index 000000000..219bda81b --- /dev/null +++ b/packages/cli/src/utils/writeWithBackup.test.ts @@ -0,0 +1,232 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import * as fs from 'node:fs'; +import * as path from 'node:path'; +import * as os from 'node:os'; +import { writeWithBackup, writeWithBackupSync } from './writeWithBackup.js'; + +describe('writeWithBackup', () => { + let tempDir: string; + + beforeEach(() => { + tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'writeWithBackup-test-')); + }); + + afterEach(() => { + // Clean up temp directory + try { + fs.rmSync(tempDir, { recursive: true, force: true }); + } catch (_e) { + // Ignore cleanup errors + } + }); + + describe('writeWithBackupSync', () => { + it('should write content to a new file', () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + const content = 'Hello, World!'; + + writeWithBackupSync(targetPath, content); + + expect(fs.existsSync(targetPath)).toBe(true); + expect(fs.readFileSync(targetPath, 'utf-8')).toBe(content); + }); + + it('should backup existing file before writing', () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + const originalContent = 'Original content'; + const newContent = 'New content'; + + fs.writeFileSync(targetPath, originalContent); + writeWithBackupSync(targetPath, newContent); + + expect(fs.readFileSync(targetPath, 'utf-8')).toBe(newContent); + expect(fs.existsSync(`${targetPath}.orig`)).toBe(true); + expect(fs.readFileSync(`${targetPath}.orig`, 'utf-8')).toBe( + originalContent, + ); + }); + + it('should use custom backup suffix', () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + const originalContent = 'Original'; + + fs.writeFileSync(targetPath, originalContent); + writeWithBackupSync(targetPath, 'New', { backupSuffix: '.bak' }); + + expect(fs.existsSync(`${targetPath}.bak`)).toBe(true); + expect(fs.existsSync(`${targetPath}.orig`)).toBe(false); + }); + + it('should clean up temp file on failure', () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + const tempPath = `${targetPath}.tmp`; + + // Create a situation where rename will fail (e.g., by creating a directory at target) + fs.mkdirSync(targetPath); + + expect(() => writeWithBackupSync(targetPath, 'content')).toThrow(); + expect(fs.existsSync(tempPath)).toBe(false); + }); + + it('should preserve original file content when write fails after backup', () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + const originalContent = 'Original content that must be preserved'; + + // Create original file + fs.writeFileSync(targetPath, originalContent); + + // Create a situation where rename will fail (by creating a directory at temp path) + const tempPath = `${targetPath}.tmp`; + fs.mkdirSync(tempPath); + + // The write should fail + expect(() => writeWithBackupSync(targetPath, 'New content')).toThrow(); + + // Original file should still exist with original content + expect(fs.existsSync(targetPath)).toBe(true); + expect(fs.statSync(targetPath).isFile()).toBe(true); + expect(fs.readFileSync(targetPath, 'utf-8')).toBe(originalContent); + + // Cleanup + fs.rmdirSync(tempPath); + }); + + it('should restore original file from backup when rename fails', () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + const backupPath = `${targetPath}.orig`; + const originalContent = 'Original content'; + const newContent = 'New content'; + + // Create original file + fs.writeFileSync(targetPath, originalContent); + + // Write new content successfully first + writeWithBackupSync(targetPath, newContent); + + // Verify backup exists with original content + expect(fs.existsSync(backupPath)).toBe(true); + expect(fs.readFileSync(backupPath, 'utf-8')).toBe(originalContent); + + // Verify target has new content + expect(fs.readFileSync(targetPath, 'utf-8')).toBe(newContent); + + // Now simulate a failure scenario: delete target and try to restore from backup + fs.unlinkSync(targetPath); + + // Restore from backup manually to verify backup integrity + fs.copyFileSync(backupPath, targetPath); + expect(fs.readFileSync(targetPath, 'utf-8')).toBe(originalContent); + }); + + it('should include recovery information in error message', () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + + // Create a situation where rename will fail (directory at target) + fs.mkdirSync(targetPath); + + let errorMessage = ''; + try { + writeWithBackupSync(targetPath, 'content'); + } catch (error) { + errorMessage = error instanceof Error ? error.message : String(error); + } + + // Error message should be descriptive + expect(errorMessage).toContain('directory'); + expect(errorMessage.length).toBeGreaterThan(10); + }); + + it('should handle backup failure with descriptive error', () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + const backupPath = `${targetPath}.orig`; + const originalContent = 'Original content'; + + // Create original file + fs.writeFileSync(targetPath, originalContent); + + // Create a directory at backup path to cause backup to fail + fs.mkdirSync(backupPath); + + let errorMessage = ''; + try { + writeWithBackupSync(targetPath, 'New content'); + } catch (error) { + errorMessage = error instanceof Error ? error.message : String(error); + } + + // Error message should mention backup failure + expect(errorMessage).toContain('backup'); + + // Original file should still exist + expect(fs.existsSync(targetPath)).toBe(true); + expect(fs.readFileSync(targetPath, 'utf-8')).toBe(originalContent); + + // Cleanup + fs.rmdirSync(backupPath); + }); + + it('should clean up temp file when backup creation fails', () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + const tempPath = `${targetPath}.tmp`; + const backupPath = `${targetPath}.orig`; + const originalContent = 'Original content'; + + // Create original file + fs.writeFileSync(targetPath, originalContent); + + // Create a directory at backup path to cause backup to fail + fs.mkdirSync(backupPath); + + // The write should fail + expect(() => writeWithBackupSync(targetPath, 'New content')).toThrow(); + + // Temp file should be cleaned up + expect(fs.existsSync(tempPath)).toBe(false); + + // Cleanup + fs.rmdirSync(backupPath); + }); + }); + + describe('writeWithBackup (async)', () => { + it('should write content to a new file', async () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + const content = 'Hello, World!'; + + await writeWithBackup(targetPath, content); + + expect(fs.existsSync(targetPath)).toBe(true); + expect(fs.readFileSync(targetPath, 'utf-8')).toBe(content); + }); + + it('should backup existing file before writing', async () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + const originalContent = 'Original content'; + const newContent = 'New content'; + + fs.writeFileSync(targetPath, originalContent); + await writeWithBackup(targetPath, newContent); + + expect(fs.readFileSync(targetPath, 'utf-8')).toBe(newContent); + expect(fs.existsSync(`${targetPath}.orig`)).toBe(true); + expect(fs.readFileSync(`${targetPath}.orig`, 'utf-8')).toBe( + originalContent, + ); + }); + + it('should use custom encoding', async () => { + const targetPath = path.join(tempDir, 'test-file.txt'); + const content = 'Hello, World!'; + + await writeWithBackup(targetPath, content, { encoding: 'utf8' }); + + expect(fs.readFileSync(targetPath, 'utf-8')).toBe(content); + }); + }); +}); diff --git a/packages/cli/src/utils/writeWithBackup.ts b/packages/cli/src/utils/writeWithBackup.ts new file mode 100644 index 000000000..2c341ae38 --- /dev/null +++ b/packages/cli/src/utils/writeWithBackup.ts @@ -0,0 +1,169 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as fs from 'node:fs'; + +/** + * Options for writeWithBackup function. + */ +export interface WriteWithBackupOptions { + /** Suffix for backup file (default: '.orig') */ + backupSuffix?: string; + /** File encoding (default: 'utf-8') */ + encoding?: BufferEncoding; +} + +/** + * Safely writes content to a file with backup protection. + * + * This function ensures data safety by: + * 1. Writing content to a temporary file first + * 2. Backing up the existing target file (if any) + * 3. Renaming the temporary file to the target path + * + * If any step fails, an error is thrown and no partial changes are left on disk. + * The backup file (if created) can be used for manual recovery. + * + * Note: This is not 100% atomic but provides good protection. In the worst case, + * a .orig backup file remains that can be manually restored. + * + * @param targetPath - The path to write to + * @param content - The content to write + * @param options - Optional configuration + * @throws Error if any step of the write process fails + * + * @example + * ```typescript + * await writeWithBackup('/path/to/settings.json', JSON.stringify(settings, null, 2)); + * // If /path/to/settings.json existed, it's now backed up to /path/to/settings.json.orig + * ``` + */ +export async function writeWithBackup( + targetPath: string, + content: string, + options: WriteWithBackupOptions = {}, +): Promise { + // Async version delegates to sync version since file operations are synchronous + writeWithBackupSync(targetPath, content, options); +} + +/** + * Synchronous version of writeWithBackup. + * + * @param targetPath - The path to write to + * @param content - The content to write + * @param options - Optional configuration + * @throws Error if any step of the write process fails + */ +export function writeWithBackupSync( + targetPath: string, + content: string, + options: WriteWithBackupOptions = {}, +): void { + const { backupSuffix = '.orig', encoding = 'utf-8' } = options; + const tempPath = `${targetPath}.tmp`; + const backupPath = `${targetPath}${backupSuffix}`; + + // Clean up any existing temp file from previous failed attempts + try { + if (fs.existsSync(tempPath)) { + fs.unlinkSync(tempPath); + } + } catch (_e) { + // Ignore cleanup errors + } + + try { + // Step 1: Write to temporary file + fs.writeFileSync(tempPath, content, { encoding }); + + // Step 2: If target exists, back it up + if (fs.existsSync(targetPath)) { + // Check if target is a directory - we can't write to a directory + const targetStat = fs.statSync(targetPath); + if (targetStat.isDirectory()) { + // Clean up temp file before throwing + try { + fs.unlinkSync(tempPath); + } catch (_e) { + // Ignore cleanup error + } + throw new Error( + `Cannot write to '${targetPath}' because it is a directory`, + ); + } + + try { + fs.renameSync(targetPath, backupPath); + } catch (backupError) { + // Clean up temp file before throwing + try { + fs.unlinkSync(tempPath); + } catch (_e) { + // Ignore cleanup error + } + throw new Error( + `Failed to backup existing file: ${backupError instanceof Error ? backupError.message : String(backupError)}`, + ); + } + } + + // Step 3: Rename temp file to target + try { + fs.renameSync(tempPath, targetPath); + } catch (renameError) { + let restoreFailedMessage: string | undefined; + let backupExisted = false; + + // Attempt to restore backup if rename failed + if (fs.existsSync(backupPath)) { + backupExisted = true; + try { + fs.renameSync(backupPath, targetPath); + } catch (restoreError) { + restoreFailedMessage = + restoreError instanceof Error + ? restoreError.message + : String(restoreError); + } + } + + const writeFailureMessage = + renameError instanceof Error + ? renameError.message + : String(renameError); + + if (restoreFailedMessage) { + throw new Error( + `Failed to write file: ${writeFailureMessage}. ` + + `Automatic restore failed: ${restoreFailedMessage}. ` + + `Manual recovery may be required using backup file '${backupPath}'.`, + ); + } + + if (backupExisted) { + throw new Error( + `Failed to write file: ${writeFailureMessage}. ` + + `Target was automatically restored from backup '${backupPath}'.`, + ); + } + + throw new Error( + `Failed to write file: ${writeFailureMessage}. No backup file was available for restoration.`, + ); + } + } catch (error) { + // Ensure temp file is cleaned up on any error + try { + if (fs.existsSync(tempPath)) { + fs.unlinkSync(tempPath); + } + } catch (_e) { + // Ignore cleanup error + } + throw error; + } +} diff --git a/packages/core/package.json b/packages/core/package.json index 91dd7709b..43219cbcc 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -1,6 +1,6 @@ { "name": "@qwen-code/qwen-code-core", - "version": "0.11.1", + "version": "0.12.0", "description": "Qwen Code Core", "repository": { "type": "git", @@ -42,6 +42,7 @@ "ajv-formats": "^3.0.0", "async-mutex": "^0.5.0", "chardet": "^2.1.0", + "iconv-lite": "^0.6.3", "chokidar": "^4.0.3", "diff": "^7.0.0", "dotenv": "^17.1.0", diff --git a/packages/core/src/config/config.test.ts b/packages/core/src/config/config.test.ts index 2be01125f..828ef9c3e 100644 --- a/packages/core/src/config/config.test.ts +++ b/packages/core/src/config/config.test.ts @@ -118,6 +118,7 @@ vi.mock('../tools/memoryTool', () => ({ MemoryTool: createToolMock('save_memory'), setGeminiMdFilename: vi.fn(), getCurrentGeminiMdFilename: vi.fn(() => 'QWEN.md'), // Mock the original filename + getAllGeminiMdFilenames: vi.fn(() => ['QWEN.md', 'AGENTS.md']), DEFAULT_CONTEXT_FILENAME: 'QWEN.md', QWEN_CONFIG_DIR: '.qwen', })); diff --git a/packages/core/src/config/storage.ts b/packages/core/src/config/storage.ts index f9d0107e5..3293280a8 100644 --- a/packages/core/src/config/storage.ts +++ b/packages/core/src/config/storage.ts @@ -7,7 +7,7 @@ import * as path from 'node:path'; import * as os from 'node:os'; import * as fs from 'node:fs'; -import { getProjectHash } from '../utils/paths.js'; +import { getProjectHash, sanitizeCwd } from '../utils/paths.js'; export const QWEN_DIR = '.qwen'; export const GOOGLE_ACCOUNTS_FILENAME = 'google_accounts.json'; @@ -82,7 +82,7 @@ export class Storage { } getProjectDir(): string { - const projectId = this.sanitizeCwd(this.getProjectRoot()); + const projectId = sanitizeCwd(this.getProjectRoot()); const projectsDir = path.join(Storage.getGlobalQwenDir(), PROJECT_DIR_NAME); return path.join(projectsDir, projectId); } @@ -140,10 +140,4 @@ export class Storage { getHistoryFilePath(): string { return path.join(this.getProjectTempDir(), 'shell_history'); } - - private sanitizeCwd(cwd: string): string { - // On Windows, normalize to lowercase for case-insensitive matching - const normalizedCwd = os.platform() === 'win32' ? cwd.toLowerCase() : cwd; - return normalizedCwd.replace(/[^a-zA-Z0-9]/g, '-'); - } } diff --git a/packages/core/src/core/geminiChat.test.ts b/packages/core/src/core/geminiChat.test.ts index 1e68344ed..4f69b62eb 100644 --- a/packages/core/src/core/geminiChat.test.ts +++ b/packages/core/src/core/geminiChat.test.ts @@ -79,7 +79,7 @@ vi.mock('../telemetry/uiTelemetry.js', () => ({ }, })); -describe('GeminiChat', () => { +describe('GeminiChat', async () => { let mockContentGenerator: ContentGenerator; let chat: GeminiChat; let mockConfig: Config; @@ -132,6 +132,44 @@ describe('GeminiChat', () => { vi.resetAllMocks(); }); + /** + * Helper: consume a stream and expect it to throw InvalidStreamError + * after all transient retries exhaust. Uses fake timers to skip delays. + * Must be called within a vi.useFakeTimers() / vi.useRealTimers() block. + */ + async function expectStreamExhaustion( + stream: AsyncGenerator, + ): Promise { + const collecting = (async () => { + for await (const _ of stream) { + /* consume */ + } + })(); + // Get assertion promise first (don't await), then advance timers. + const resultPromise = (async () => { + await expect(collecting).rejects.toThrow(InvalidStreamError); + })(); + await vi.advanceTimersByTimeAsync(0); + await vi.advanceTimersByTimeAsync(35_000); + await resultPromise; + } + + async function collectStreamWithFakeTimers( + stream: AsyncGenerator, + advanceByMs: number = 10_000, + ): Promise { + const events: StreamEvent[] = []; + const collecting = (async () => { + for await (const event of stream) { + events.push(event); + } + return events; + })(); + await vi.advanceTimersByTimeAsync(0); + await vi.advanceTimersByTimeAsync(advanceByMs); + return collecting; + } + describe('sendMessageStream', () => { it('should succeed if a tool call is followed by an empty part', async () => { // 1. Mock a stream that contains a tool call, then an invalid (empty) part. @@ -187,48 +225,44 @@ describe('GeminiChat', () => { }); it('should fail if the stream ends with an empty part and has no finishReason', async () => { - // 1. Mock a stream that ends with an invalid part and has no finish reason. - const streamWithNoFinish = (async function* () { - yield { - candidates: [ - { - content: { - role: 'model', - parts: [{ text: 'Initial content...' }], + vi.useFakeTimers(); + try { + const streamWithNoFinish = (async function* () { + yield { + candidates: [ + { + content: { + role: 'model', + parts: [{ text: 'Initial content...' }], + }, }, - }, - ], - } as unknown as GenerateContentResponse; - // This second chunk is invalid and has no finishReason, so it should fail. - yield { - candidates: [ - { - content: { - role: 'model', - parts: [{ text: '' }], + ], + } as unknown as GenerateContentResponse; + yield { + candidates: [ + { + content: { + role: 'model', + parts: [{ text: '' }], + }, }, - }, - ], - } as unknown as GenerateContentResponse; - })(); + ], + } as unknown as GenerateContentResponse; + })(); - vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue( - streamWithNoFinish, - ); + vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue( + streamWithNoFinish, + ); - // 2. Action & Assert: The stream should fail because there's no finish reason. - const stream = await chat.sendMessageStream( - 'test-model', - { message: 'test message' }, - 'prompt-id-no-finish-empty-end', - ); - await expect( - (async () => { - for await (const _ of stream) { - /* consume stream */ - } - })(), - ).rejects.toThrow(InvalidStreamError); + const stream = await chat.sendMessageStream( + 'test-model', + { message: 'test message' }, + 'prompt-id-no-finish-empty-end', + ); + await expectStreamExhaustion(stream); + } finally { + vi.useRealTimers(); + } }); it('should succeed if the stream ends with an invalid part but has a finishReason and contained a valid part', async () => { @@ -443,63 +477,62 @@ describe('GeminiChat', () => { ); }); it('should throw an error when a tool call is followed by an empty stream response', async () => { - // 1. Setup: A history where the model has just made a function call. - const initialHistory: Content[] = [ - { - role: 'user', - parts: [{ text: 'Find a good Italian restaurant for me.' }], - }, - { - role: 'model', - parts: [ - { - functionCall: { + vi.useFakeTimers(); + try { + // 1. Setup: A history where the model has just made a function call. + const initialHistory: Content[] = [ + { + role: 'user', + parts: [{ text: 'Find a good Italian restaurant for me.' }], + }, + { + role: 'model', + parts: [ + { + functionCall: { + name: 'find_restaurant', + args: { cuisine: 'Italian' }, + }, + }, + ], + }, + ]; + chat.setHistory(initialHistory); + + // 2. Mock the API to return an empty/thought-only stream. + const emptyStreamResponse = (async function* () { + yield { + candidates: [ + { + content: { role: 'model', parts: [{ thought: true }] }, + finishReason: 'STOP', + }, + ], + } as unknown as GenerateContentResponse; + })(); + vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue( + emptyStreamResponse, + ); + + // 3. Action: Send the function response back to the model and consume the stream. + const stream = await chat.sendMessageStream( + 'test-model', + { + message: { + functionResponse: { name: 'find_restaurant', - args: { cuisine: 'Italian' }, + response: { name: 'Vesuvio' }, }, }, - ], - }, - ]; - chat.setHistory(initialHistory); - - // 2. Mock the API to return an empty/thought-only stream. - const emptyStreamResponse = (async function* () { - yield { - candidates: [ - { - content: { role: 'model', parts: [{ thought: true }] }, - finishReason: 'STOP', - }, - ], - } as unknown as GenerateContentResponse; - })(); - vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue( - emptyStreamResponse, - ); - - // 3. Action: Send the function response back to the model and consume the stream. - const stream = await chat.sendMessageStream( - 'test-model', - { - message: { - functionResponse: { - name: 'find_restaurant', - response: { name: 'Vesuvio' }, - }, }, - }, - 'prompt-id-stream-1', - ); + 'prompt-id-stream-1', + ); - // 4. Assert: The stream processing should throw an InvalidStreamError. - await expect( - (async () => { - for await (const _ of stream) { - // This loop consumes the stream to trigger the internal logic. - } - })(), - ).rejects.toThrow(InvalidStreamError); + // 4. Assert: The stream processing should throw an InvalidStreamError. + await expectStreamExhaustion(stream); + } finally { + vi.useRealTimers(); + } }); it('should succeed when there is a tool call without finish reason', async () => { @@ -546,73 +579,69 @@ describe('GeminiChat', () => { }); it('should throw InvalidStreamError when no tool call and no finish reason', async () => { - // Setup: Stream with text but no finish reason and no tool call - const streamWithoutFinishReason = (async function* () { - yield { - candidates: [ - { - content: { - role: 'model', - parts: [{ text: 'some response' }], + vi.useFakeTimers(); + try { + // Setup: Stream with text but no finish reason and no tool call + const streamWithoutFinishReason = (async function* () { + yield { + candidates: [ + { + content: { + role: 'model', + parts: [{ text: 'some response' }], + }, + // No finishReason }, - // No finishReason - }, - ], - } as unknown as GenerateContentResponse; - })(); + ], + } as unknown as GenerateContentResponse; + })(); - vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue( - streamWithoutFinishReason, - ); + vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue( + streamWithoutFinishReason, + ); - const stream = await chat.sendMessageStream( - 'test-model', - { message: 'test' }, - 'prompt-id-1', - ); - - await expect( - (async () => { - for await (const _ of stream) { - // consume stream - } - })(), - ).rejects.toThrow(InvalidStreamError); + const stream = await chat.sendMessageStream( + 'test-model', + { message: 'test' }, + 'prompt-id-1', + ); + await expectStreamExhaustion(stream); + } finally { + vi.useRealTimers(); + } }); it('should throw InvalidStreamError when no tool call and empty response text', async () => { - // Setup: Stream with finish reason but empty response (only thoughts) - const streamWithEmptyResponse = (async function* () { - yield { - candidates: [ - { - content: { - role: 'model', - parts: [{ thought: 'thinking...' }], + vi.useFakeTimers(); + try { + // Setup: Stream with finish reason but empty response (only thoughts) + const streamWithEmptyResponse = (async function* () { + yield { + candidates: [ + { + content: { + role: 'model', + parts: [{ thought: 'thinking...' }], + }, + finishReason: 'STOP', }, - finishReason: 'STOP', - }, - ], - } as unknown as GenerateContentResponse; - })(); + ], + } as unknown as GenerateContentResponse; + })(); - vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue( - streamWithEmptyResponse, - ); + vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue( + streamWithEmptyResponse, + ); - const stream = await chat.sendMessageStream( - 'test-model', - { message: 'test' }, - 'prompt-id-1', - ); - - await expect( - (async () => { - for await (const _ of stream) { - // consume stream - } - })(), - ).rejects.toThrow(InvalidStreamError); + const stream = await chat.sendMessageStream( + 'test-model', + { message: 'test' }, + 'prompt-id-1', + ); + await expectStreamExhaustion(stream); + } finally { + vi.useRealTimers(); + } }); it('should succeed when there is finish reason and response text', async () => { @@ -651,6 +680,50 @@ describe('GeminiChat', () => { ).resolves.not.toThrow(); }); + it('should not lose finish reason when last chunk only has usage metadata', async () => { + const streamWithTrailingUsageOnlyChunk = (async function* () { + yield { + candidates: [ + { + content: { + role: 'model', + parts: [{ text: 'valid response' }], + }, + finishReason: 'STOP', + }, + ], + } as unknown as GenerateContentResponse; + + // Some providers emit a trailing usage-only chunk after finishReason. + yield { + candidates: [], + usageMetadata: { + promptTokenCount: 11, + candidatesTokenCount: 5, + totalTokenCount: 16, + }, + } as unknown as GenerateContentResponse; + })(); + + vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue( + streamWithTrailingUsageOnlyChunk, + ); + + const stream = await chat.sendMessageStream( + 'test-model', + { message: 'test' }, + 'prompt-id-1', + ); + + await expect( + (async () => { + for await (const _ of stream) { + // consume stream + } + })(), + ).resolves.not.toThrow(); + }); + it('should call generateContentStream with the correct parameters', async () => { const response = (async function* () { yield { @@ -774,122 +847,87 @@ describe('GeminiChat', () => { }); describe('sendMessageStream with retries', () => { - it('should yield a RETRY event when an invalid stream is encountered', async () => { - // ARRANGE: Mock the stream to fail once, then succeed. - vi.mocked(mockContentGenerator.generateContentStream) - .mockImplementationOnce(async () => - // First attempt: An invalid stream with an empty text part. - (async function* () { - yield { - candidates: [{ content: { parts: [{ text: '' }] } }], - } as unknown as GenerateContentResponse; - })(), - ) - .mockImplementationOnce(async () => - // Second attempt (the retry): A minimal valid stream. - (async function* () { - yield { - candidates: [ - { - content: { parts: [{ text: 'Success' }] }, - finishReason: 'STOP', - }, - ], - } as unknown as GenerateContentResponse; - })(), - ); - - // ACT: Send a message and collect all events from the stream. - const stream = await chat.sendMessageStream( - 'test-model', - { message: 'test' }, - 'prompt-id-yield-retry', - ); - const events: StreamEvent[] = []; - for await (const event of stream) { - events.push(event); - } - - // ASSERT: Check that a RETRY event was present in the stream's output. - const retryEvent = events.find((e) => e.type === StreamEventType.RETRY); - - expect(retryEvent).toBeDefined(); - expect(retryEvent?.type).toBe(StreamEventType.RETRY); - }); it('should retry on invalid content, succeed, and report metrics', async () => { - // Use mockImplementationOnce to provide a fresh, promise-wrapped generator for each attempt. - vi.mocked(mockContentGenerator.generateContentStream) - .mockImplementationOnce(async () => - // First call returns an invalid stream - (async function* () { - yield { - candidates: [{ content: { parts: [{ text: '' }] } }], // Invalid empty text part - } as unknown as GenerateContentResponse; - })(), - ) - .mockImplementationOnce(async () => - // Second call returns a valid stream - (async function* () { - yield { - candidates: [ - { - content: { parts: [{ text: 'Successful response' }] }, - finishReason: 'STOP', - }, - ], - } as unknown as GenerateContentResponse; - })(), + vi.useFakeTimers(); + try { + // Use mockImplementationOnce to provide a fresh, promise-wrapped generator for each attempt. + vi.mocked(mockContentGenerator.generateContentStream) + .mockImplementationOnce(async () => + // First call returns an invalid stream + (async function* () { + yield { + candidates: [{ content: { parts: [{ text: '' }] } }], // Invalid empty text part + } as unknown as GenerateContentResponse; + })(), + ) + .mockImplementationOnce(async () => + // Second call returns a valid stream + (async function* () { + yield { + candidates: [ + { + content: { parts: [{ text: 'Successful response' }] }, + finishReason: 'STOP', + }, + ], + } as unknown as GenerateContentResponse; + })(), + ); + + const stream = await chat.sendMessageStream( + 'test-model', + { message: 'test' }, + 'prompt-id-retry-success', ); + const chunks = await collectStreamWithFakeTimers(stream); - const stream = await chat.sendMessageStream( - 'test-model', - { message: 'test' }, - 'prompt-id-retry-success', - ); - const chunks: StreamEvent[] = []; - for await (const chunk of stream) { - chunks.push(chunk); + // Assertions + expect(mockLogContentRetry).toHaveBeenCalledTimes(1); + expect(mockLogContentRetryFailure).not.toHaveBeenCalled(); + expect( + mockContentGenerator.generateContentStream, + ).toHaveBeenCalledTimes(2); + + // Check for a retry event + expect(chunks.some((c) => c.type === StreamEventType.RETRY)).toBe(true); + + // Check for the successful content chunk + expect( + chunks.some( + (c) => + c.type === StreamEventType.CHUNK && + c.value.candidates?.[0]?.content?.parts?.[0]?.text === + 'Successful response', + ), + ).toBe(true); + + // Check that history was recorded correctly once, with no duplicates. + const history = chat.getHistory(); + expect(history.length).toBe(2); + expect(history[0]).toEqual({ + role: 'user', + parts: [{ text: 'test' }], + }); + expect(history[1]).toEqual({ + role: 'model', + parts: [{ text: 'Successful response' }], + }); + + // Verify that token counting is not called when usageMetadata is missing + expect( + uiTelemetryService.setLastPromptTokenCount, + ).not.toHaveBeenCalled(); + } finally { + vi.useRealTimers(); } - - // Assertions - expect(mockLogContentRetry).toHaveBeenCalledTimes(1); - expect(mockLogContentRetryFailure).not.toHaveBeenCalled(); - expect(mockContentGenerator.generateContentStream).toHaveBeenCalledTimes( - 2, - ); - - // Check for a retry event - expect(chunks.some((c) => c.type === StreamEventType.RETRY)).toBe(true); - - // Check for the successful content chunk - expect( - chunks.some( - (c) => - c.type === StreamEventType.CHUNK && - c.value.candidates?.[0]?.content?.parts?.[0]?.text === - 'Successful response', - ), - ).toBe(true); - - // Check that history was recorded correctly once, with no duplicates. - const history = chat.getHistory(); - expect(history.length).toBe(2); - expect(history[0]).toEqual({ - role: 'user', - parts: [{ text: 'test' }], - }); - expect(history[1]).toEqual({ - role: 'model', - parts: [{ text: 'Successful response' }], - }); - - // Verify that token counting is not called when usageMetadata is missing - expect(uiTelemetryService.setLastPromptTokenCount).not.toHaveBeenCalled(); }); it('should fail after all retries on persistent invalid content and report metrics', async () => { - vi.mocked(mockContentGenerator.generateContentStream).mockImplementation( - async () => + vi.useFakeTimers(); + try { + vi.mocked( + mockContentGenerator.generateContentStream, + ).mockImplementation(async () => (async function* () { yield { candidates: [ @@ -902,33 +940,86 @@ describe('GeminiChat', () => { ], } as unknown as GenerateContentResponse; })(), - ); + ); - const stream = await chat.sendMessageStream( - 'test-model', - { message: 'test' }, - 'prompt-id-retry-fail', - ); - await expect(async () => { - for await (const _ of stream) { - // Must loop to trigger the internal logic that throws. - } - }).rejects.toThrow(InvalidStreamError); + const stream = await chat.sendMessageStream( + 'test-model', + { message: 'test' }, + 'prompt-id-retry-fail', + ); + await expectStreamExhaustion(stream); - // Should be called 2 times (initial + 1 retry) - expect(mockContentGenerator.generateContentStream).toHaveBeenCalledTimes( - 2, - ); - expect(mockLogContentRetry).toHaveBeenCalledTimes(1); - expect(mockLogContentRetryFailure).toHaveBeenCalledTimes(1); + // Should be called 3 times (1 initial + 2 transient retries) + expect( + mockContentGenerator.generateContentStream, + ).toHaveBeenCalledTimes(3); + expect(mockLogContentRetry).toHaveBeenCalledTimes(2); + expect(mockLogContentRetryFailure).toHaveBeenCalledTimes(1); - // History should still contain the user message. - const history = chat.getHistory(); - expect(history.length).toBe(1); - expect(history[0]).toEqual({ - role: 'user', - parts: [{ text: 'test' }], - }); + // History should still contain the user message. + const history = chat.getHistory(); + expect(history.length).toBe(1); + expect(history[0]).toEqual({ + role: 'user', + parts: [{ text: 'test' }], + }); + } finally { + vi.useRealTimers(); + } + }); + + it('should retry usage-only empty streams and succeed on a later attempt', async () => { + vi.useFakeTimers(); + try { + vi.mocked(mockContentGenerator.generateContentStream) + .mockImplementationOnce(async () => + (async function* () { + yield { + usageMetadata: { + promptTokenCount: 10, + candidatesTokenCount: 0, + totalTokenCount: 10, + }, + } as unknown as GenerateContentResponse; + })(), + ) + .mockImplementationOnce(async () => + (async function* () { + yield { + candidates: [ + { + content: { + parts: [{ text: 'Recovered after empty stream' }], + }, + finishReason: 'STOP', + }, + ], + } as unknown as GenerateContentResponse; + })(), + ); + + const stream = await chat.sendMessageStream( + 'test-model', + { message: 'test' }, + 'prompt-id-empty-usage-retry', + ); + const events = await collectStreamWithFakeTimers(stream); + + expect( + mockContentGenerator.generateContentStream, + ).toHaveBeenCalledTimes(2); + expect(mockLogContentRetry).toHaveBeenCalledTimes(1); + expect( + events.some( + (e) => + e.type === StreamEventType.CHUNK && + e.value.candidates?.[0]?.content?.parts?.[0]?.text === + 'Recovered after empty stream', + ), + ).toBe(true); + } finally { + vi.useRealTimers(); + } }); it('should retry on TPM throttling StreamContentError with fixed delay', async () => { diff --git a/packages/core/src/core/geminiChat.ts b/packages/core/src/core/geminiChat.ts index 2e1923355..f58bcdb61 100644 --- a/packages/core/src/core/geminiChat.ts +++ b/packages/core/src/core/geminiChat.ts @@ -64,6 +64,16 @@ const INVALID_CONTENT_RETRY_OPTIONS: ContentRetryOptions = { initialDelayMs: 500, }; +// Some providers occasionally return transient stream anomalies: either an +// empty stream (usage metadata only, no candidates), a stream that finishes +// normally but contains no usable text, or a stream cut off without a finish +// reason. All are retried with an independent budget (similar to rate-limit +// retries) so they do not consume each other's retry budgets. +const INVALID_STREAM_RETRY_CONFIG = { + maxRetries: 2, + initialDelayMs: 2000, +}; + /** * Options for retrying on rate-limit throttling errors returned as stream content. * Fixed 60s delay matches the DashScope per-minute quota window. @@ -285,6 +295,7 @@ export class GeminiChat { try { let lastError: unknown = new Error('Request failed after all retries.'); let rateLimitRetryCount = 0; + let invalidStreamRetryCount = 0; // Read per-config overrides; fall back to built-in defaults. const cgConfig = self.config.getContentGeneratorConfig(); @@ -298,7 +309,11 @@ export class GeminiChat { attempt++ ) { try { - if (attempt > 0 || rateLimitRetryCount > 0) { + if ( + attempt > 0 || + rateLimitRetryCount > 0 || + invalidStreamRetryCount > 0 + ) { yield { type: StreamEventType.RETRY }; } @@ -348,10 +363,46 @@ export class GeminiChat { continue; } - const isContentError = error instanceof InvalidStreamError; + // Transient stream anomalies (NO_FINISH_REASON / NO_RESPONSE_TEXT): + // independent retry budget, similar to rate-limit handling. + // Does NOT consume the content retry budget. + const isTransientStreamError = error instanceof InvalidStreamError; + if ( + isTransientStreamError && + invalidStreamRetryCount < INVALID_STREAM_RETRY_CONFIG.maxRetries + ) { + invalidStreamRetryCount++; + const delayMs = + INVALID_STREAM_RETRY_CONFIG.initialDelayMs * + invalidStreamRetryCount; + debugLogger.warn( + `Invalid stream [${(error as InvalidStreamError).type}] ` + + `(retry ${invalidStreamRetryCount}/${INVALID_STREAM_RETRY_CONFIG.maxRetries}). ` + + `Waiting ${delayMs / 1000}s before retrying...`, + ); + logContentRetry( + self.config, + new ContentRetryEvent( + invalidStreamRetryCount - 1, + (error as InvalidStreamError).type, + delayMs, + model, + ), + ); + yield { type: StreamEventType.RETRY }; + // Don't count transient retries against content retry limit. + attempt--; + await new Promise((res) => setTimeout(res, delayMs)); + continue; + } + // Transient budget exhausted — stop immediately. + if (isTransientStreamError) { + break; + } + // Other content validation errors (e.g. NO_FINISH_REASON). + const isContentError = error instanceof InvalidStreamError; if (isContentError) { - // Check if we have more attempts left. if (attempt < INVALID_CONTENT_RETRY_OPTIONS.maxAttempts - 1) { logContentRetry( self.config, @@ -378,11 +429,12 @@ export class GeminiChat { if (lastError) { if (lastError instanceof InvalidStreamError) { + const totalAttempts = invalidStreamRetryCount + 1; logContentRetryFailure( self.config, new ContentRetryFailureEvent( - INVALID_CONTENT_RETRY_OPTIONS.maxAttempts, - (lastError as InvalidStreamError).type, + totalAttempts, + lastError.type, model, ), ); @@ -563,8 +615,11 @@ export class GeminiChat { let hasFinishReason = false; for await (const chunk of streamResponse) { - hasFinishReason = + // Use ||= to avoid later usage-only chunks (no candidates) overwriting + // a finishReason that was already seen in an earlier chunk. + hasFinishReason ||= chunk?.candidates?.some((candidate) => candidate.finishReason) ?? false; + if (isValidResponse(chunk)) { const content = chunk.candidates?.[0]?.content; if (content?.parts) { diff --git a/packages/core/src/core/loggingContentGenerator/loggingContentGenerator.ts b/packages/core/src/core/loggingContentGenerator/loggingContentGenerator.ts index 3c64c1267..1a51846c3 100644 --- a/packages/core/src/core/loggingContentGenerator/loggingContentGenerator.ts +++ b/packages/core/src/core/loggingContentGenerator/loggingContentGenerator.ts @@ -159,7 +159,7 @@ export class LoggingContentGenerator implements ContentGenerator { return response; } catch (error) { const durationMs = Date.now() - startTime; - this._logApiError(undefined, durationMs, error, req.model, userPromptId); + this._logApiError('', durationMs, error, req.model, userPromptId); await this.logOpenAIInteraction(openaiRequest, undefined, error); throw error; } @@ -178,7 +178,7 @@ export class LoggingContentGenerator implements ContentGenerator { stream = await this.wrapped.generateContentStream(req, userPromptId); } catch (error) { const durationMs = Date.now() - startTime; - this._logApiError(undefined, durationMs, error, req.model, userPromptId); + this._logApiError('', durationMs, error, req.model, userPromptId); await this.logOpenAIInteraction(openaiRequest, undefined, error); throw error; } @@ -225,7 +225,7 @@ export class LoggingContentGenerator implements ContentGenerator { } catch (error) { const durationMs = Date.now() - startTime; this._logApiError( - undefined, + responses[0]?.responseId ?? '', durationMs, error, responses[0]?.modelVersion || model, diff --git a/packages/core/src/core/openaiContentGenerator/pipeline.test.ts b/packages/core/src/core/openaiContentGenerator/pipeline.test.ts index d71e23e91..6969a51ef 100644 --- a/packages/core/src/core/openaiContentGenerator/pipeline.test.ts +++ b/packages/core/src/core/openaiContentGenerator/pipeline.test.ts @@ -980,6 +980,147 @@ describe('ContentGenerationPipeline', () => { totalTokenCount: 30, }); }); + + it('should not duplicate function calls when trailing chunks arrive after finish+usage merge', async () => { + // Reproduces the real-world bug: some providers (e.g. bailian/glm-5) + // send trailing empty chunks AFTER the finish+usage pair. Before the + // fix, each trailing chunk re-triggered the merge logic and yielded + // the finish response again (with the same function-call parts), + // causing duplicate tool-call execution in the UI. + const request: GenerateContentParameters = { + model: 'test-model', + contents: [{ parts: [{ text: 'Hello' }], role: 'user' }], + }; + const userPromptId = 'test-prompt-id'; + + // Chunk 1: content text + const mockChunk1 = { + id: 'chunk-1', + choices: [ + { delta: { content: 'I will create a todo' }, finish_reason: null }, + ], + } as OpenAI.Chat.ChatCompletionChunk; + + // Chunk 2: finish reason (with tool calls) + const mockChunk2 = { + id: 'chunk-2', + choices: [{ delta: {}, finish_reason: 'tool_calls' }], + } as OpenAI.Chat.ChatCompletionChunk; + + // Chunk 3: usage metadata only + const mockChunk3 = { + id: 'chunk-3', + choices: [], + usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 }, + } as unknown as OpenAI.Chat.ChatCompletionChunk; + + // Chunk 4: trailing empty chunk (the problematic one) + const mockChunk4 = { + id: 'chunk-4', + choices: [], + } as unknown as OpenAI.Chat.ChatCompletionChunk; + + const mockStream = { + async *[Symbol.asyncIterator]() { + yield mockChunk1; + yield mockChunk2; + yield mockChunk3; + yield mockChunk4; + }, + }; + + // Converter output for chunk 1: text content + const mockContentResponse = new GenerateContentResponse(); + mockContentResponse.candidates = [ + { + content: { + parts: [{ text: 'I will create a todo' }], + role: 'model', + }, + }, + ]; + + // Converter output for chunk 2: finish + function call + const mockFinishResponse = new GenerateContentResponse(); + mockFinishResponse.candidates = [ + { + content: { + parts: [ + { + functionCall: { + name: 'todoWrite', + args: { text: 'buy milk' }, + }, + }, + ], + role: 'model', + }, + finishReason: FinishReason.STOP, + }, + ]; + + // Converter output for chunk 3: usage only + const mockUsageResponse = new GenerateContentResponse(); + mockUsageResponse.candidates = []; + mockUsageResponse.usageMetadata = { + promptTokenCount: 10, + candidatesTokenCount: 20, + totalTokenCount: 30, + }; + + // Converter output for chunk 4: trailing empty + const mockTrailingResponse = new GenerateContentResponse(); + mockTrailingResponse.candidates = []; + + (mockConverter.convertGeminiRequestToOpenAI as Mock).mockReturnValue([]); + (mockConverter.convertOpenAIChunkToGemini as Mock) + .mockReturnValueOnce(mockContentResponse) + .mockReturnValueOnce(mockFinishResponse) + .mockReturnValueOnce(mockUsageResponse) + .mockReturnValueOnce(mockTrailingResponse); + (mockClient.chat.completions.create as Mock).mockResolvedValue( + mockStream, + ); + + // Act + const resultGenerator = await pipeline.executeStream( + request, + userPromptId, + ); + const results = []; + for await (const result of resultGenerator) { + results.push(result); + } + + // Assert: exactly 2 results — content chunk + ONE merged finish chunk. + // Before the fix this was 3 (the trailing chunk triggered a duplicate). + expect(results).toHaveLength(2); + expect(results[0]).toBe(mockContentResponse); + + // The merged result should have the function call and usage metadata + const mergedResult = results[1]!; + expect(mergedResult.candidates?.[0]?.finishReason).toBe( + FinishReason.STOP, + ); + expect( + mergedResult.candidates?.[0]?.content?.parts?.[0]?.functionCall?.name, + ).toBe('todoWrite'); + expect(mergedResult.usageMetadata).toEqual({ + promptTokenCount: 10, + candidatesTokenCount: 20, + totalTokenCount: 30, + }); + + // Count function-call parts across ALL yielded results — must be exactly 1 + let totalFunctionCalls = 0; + for (const result of results) { + const parts = result.candidates?.[0]?.content?.parts ?? []; + totalFunctionCalls += parts.filter( + (p: { functionCall?: unknown }) => p.functionCall, + ).length; + } + expect(totalFunctionCalls).toBe(1); + }); }); describe('buildRequest', () => { diff --git a/packages/core/src/core/openaiContentGenerator/pipeline.ts b/packages/core/src/core/openaiContentGenerator/pipeline.ts index 8d2cc9fc7..5c6cdc682 100644 --- a/packages/core/src/core/openaiContentGenerator/pipeline.ts +++ b/packages/core/src/core/openaiContentGenerator/pipeline.ts @@ -127,8 +127,15 @@ export class ContentGenerationPipeline { // Reset streaming tool calls to prevent data pollution from previous streams this.converter.resetStreamingToolCalls(); - // State for handling chunk merging + // State for handling chunk merging. + // pendingFinishResponse holds a finish chunk waiting to be merged with + // a subsequent usage-metadata chunk before yielding. + // finishYielded is set to true once the merged finish response has been + // yielded, so that any further trailing chunks are treated as normal + // chunks instead of triggering another merge (which would duplicate the + // function-call parts from the finish chunk). let pendingFinishResponse: GenerateContentResponse | null = null; + let finishYielded = false; try { // Stage 2a: Convert and yield each chunk while preserving original @@ -155,7 +162,29 @@ export class ContentGenerationPipeline { continue; } - // Stage 2c: Handle chunk merging for providers that send finishReason and usageMetadata separately + // Stage 2c: Handle chunk merging for providers that send + // finishReason and usageMetadata in separate chunks. + // Once the merged finish response has been yielded, skip + // further merging so trailing chunks don't duplicate the + // function-call parts carried by the finish chunk. + if (finishYielded) { + // Finish already yielded — absorb any remaining usage + // metadata but do NOT yield another response. + // Note: pendingFinishResponse is guaranteed non-null here because + // finishYielded is only set to true inside the `if (pendingFinishResponse)` + // block below. TypeScript cannot infer this through the callback + // assignment in handleChunkMerging, so an explicit cast is needed. + if (response.usageMetadata) { + const pending = + pendingFinishResponse as GenerateContentResponse | null; + if (pending) { + pending.usageMetadata = response.usageMetadata; + } + } + collectedGeminiResponses.push(response); + continue; + } + const shouldYield = this.handleChunkMerging( response, collectedGeminiResponses, @@ -168,15 +197,18 @@ export class ContentGenerationPipeline { // If we have a pending finish response, yield it instead if (pendingFinishResponse) { yield pendingFinishResponse; - pendingFinishResponse = null; + finishYielded = true; + // Keep pendingFinishResponse alive so late-arriving usage + // metadata can still be merged (see finishYielded block above). } else { yield response; } } } - // Stage 2d: If there's still a pending finish response at the end, yield it - if (pendingFinishResponse) { + // Stage 2d: If there's still a pending finish response at the end + // (e.g. no usage chunk arrived after the finish chunk), yield it. + if (pendingFinishResponse && !finishYielded) { yield pendingFinishResponse; } diff --git a/packages/core/src/core/openaiContentGenerator/provider/dashscope.test.ts b/packages/core/src/core/openaiContentGenerator/provider/dashscope.test.ts index 2e528120a..e1ecb61b6 100644 --- a/packages/core/src/core/openaiContentGenerator/provider/dashscope.test.ts +++ b/packages/core/src/core/openaiContentGenerator/provider/dashscope.test.ts @@ -117,6 +117,28 @@ describe('DashScopeOpenAICompatibleProvider', () => { expect(result).toBe(true); }); + it('should return true for DashScope coding plan URL', () => { + const config = { + authType: AuthType.USE_OPENAI, + baseUrl: 'https://coding.dashscope.aliyuncs.com/v1', + } as ContentGeneratorConfig; + + const result = + DashScopeOpenAICompatibleProvider.isDashScopeProvider(config); + expect(result).toBe(true); + }); + + it('should return true for DashScope international coding plan URL', () => { + const config = { + authType: AuthType.USE_OPENAI, + baseUrl: 'https://coding-intl.dashscope-intl.aliyuncs.com/v1', + } as ContentGeneratorConfig; + + const result = + DashScopeOpenAICompatibleProvider.isDashScopeProvider(config); + expect(result).toBe(true); + }); + it('should return false for non-DashScope configurations', () => { const configs = [ { diff --git a/packages/core/src/core/openaiContentGenerator/provider/dashscope.ts b/packages/core/src/core/openaiContentGenerator/provider/dashscope.ts index c2134914a..a889401cf 100644 --- a/packages/core/src/core/openaiContentGenerator/provider/dashscope.ts +++ b/packages/core/src/core/openaiContentGenerator/provider/dashscope.ts @@ -35,14 +35,13 @@ export class DashScopeOpenAICompatibleProvider static isDashScopeProvider( contentGeneratorConfig: ContentGeneratorConfig, ): boolean { - const authType = contentGeneratorConfig.authType; - const baseUrl = contentGeneratorConfig.baseUrl; - return ( - authType === AuthType.QWEN_OAUTH || - baseUrl === 'https://dashscope.aliyuncs.com/compatible-mode/v1' || - baseUrl === 'https://dashscope-intl.aliyuncs.com/compatible-mode/v1' || - !baseUrl - ); + const { authType, baseUrl } = contentGeneratorConfig; + + if (authType === AuthType.QWEN_OAUTH) return true; + if (!baseUrl) return true; + + // Matches: dashscope.aliyuncs.com, *.dashscope.aliyuncs.com, or *.dashscope-intl.aliyuncs.com + return /([\w-]+\.)?dashscope(-intl)?\.aliyuncs\.com/i.test(baseUrl); } buildHeaders(): Record { diff --git a/packages/core/src/extension/claude-converter.test.ts b/packages/core/src/extension/claude-converter.test.ts index b4d16c8f4..502e8196e 100644 --- a/packages/core/src/extension/claude-converter.test.ts +++ b/packages/core/src/extension/claude-converter.test.ts @@ -368,4 +368,69 @@ describe('convertClaudePluginPackage', () => { // Clean up converted directory fs.rmSync(result.convertedDir, { recursive: true, force: true }); }); + + it('should successfully convert agent files with Windows CRLF endings', async () => { + // Setup: Create a plugin with a source agents folder containing a CRLF agent + const pluginSourceDir = path.join(testDir, 'plugin-crlf-agents'); + fs.mkdirSync(pluginSourceDir, { recursive: true }); + + // Create source agents directory (renamed to src-agents to avoid skip-logic bug) + const agentsDir = path.join(pluginSourceDir, 'src-agents'); + fs.mkdirSync(agentsDir, { recursive: true }); + + // Write a .md file with CRLF endings + const crlfAgentContent = `---\r\nname: cool-agent\r\ndescription: A cool agent\r\n---\r\n\r\nSystem prompt body\r\n`; + fs.writeFileSync( + path.join(agentsDir, 'agent.md'), + crlfAgentContent, + 'utf-8', + ); + + // Create marketplace.json specifying to load this agent + const marketplaceDir = path.join(pluginSourceDir, '.claude-plugin'); + fs.mkdirSync(marketplaceDir, { recursive: true }); + + const marketplaceConfig: ClaudeMarketplaceConfig = { + name: 'test-marketplace', + owner: { name: 'Test Owner', email: 'test@example.com' }, + plugins: [ + { + name: 'crlf-agents-plugin', + version: '1.0.0', + source: './', + strict: false, + agents: ['./src-agents/agent.md'], + }, + ], + }; + + fs.writeFileSync( + path.join(marketplaceDir, 'marketplace.json'), + JSON.stringify(marketplaceConfig, null, 2), + 'utf-8', + ); + + // Act: Convert + const result = await convertClaudePluginPackage( + pluginSourceDir, + 'crlf-agents-plugin', + ); + + // Verify: agent file was properly parsed and converted into .qwen/agents folder structure + const convertedAgentsDir = path.join(result.convertedDir, 'agents'); + expect(fs.existsSync(convertedAgentsDir)).toBe(true); + + const convertedFiles = fs.readdirSync(convertedAgentsDir); + expect(convertedFiles).toContain('agent.md'); // The filename is preserved from source + + // Verify it was actually parsed by checking the converted content format + const convertedContent = fs.readFileSync( + path.join(convertedAgentsDir, 'agent.md'), + 'utf-8', + ); + expect(convertedContent).toContain('name: cool-agent'); + + // Clean up + fs.rmSync(result.convertedDir, { recursive: true, force: true }); + }); }); diff --git a/packages/core/src/extension/claude-converter.ts b/packages/core/src/extension/claude-converter.ts index 68da9cfff..98639b197 100644 --- a/packages/core/src/extension/claude-converter.ts +++ b/packages/core/src/extension/claude-converter.ts @@ -24,6 +24,7 @@ import { stringify as stringifyYaml, } from '../utils/yaml-parser.js'; import { createDebugLogger } from '../utils/debugLogger.js'; +import { normalizeContent } from '../utils/textUtils.js'; const debugLogger = createDebugLogger('CLAUDE_CONVERTER'); @@ -226,10 +227,11 @@ async function convertAgentFiles(agentsDir: string): Promise { try { const content = await fs.promises.readFile(filePath, 'utf-8'); + const normalizedContent = normalizeContent(content); // Parse frontmatter const frontmatterRegex = /^---\n([\s\S]*?)\n---\n([\s\S]*)$/; - const match = content.match(frontmatterRegex); + const match = normalizedContent.match(frontmatterRegex); if (!match) { // No frontmatter, skip this file @@ -387,15 +389,15 @@ export async function convertClaudePluginPackage( const strict = marketplacePlugin.strict ?? false; let mergedConfig: ClaudePluginConfig; - if (strict) { - const pluginJsonPath = path.join( - pluginSource, - '.claude-plugin', - 'plugin.json', - ); - if (!fs.existsSync(pluginJsonPath)) { - throw new Error(`Strict mode requires plugin.json at ${pluginJsonPath}`); - } + const pluginJsonPath = path.join( + pluginSource, + '.claude-plugin', + 'plugin.json', + ); + if (strict && !fs.existsSync(pluginJsonPath)) { + throw new Error(`Strict mode requires plugin.json at ${pluginJsonPath}`); + } + if (fs.existsSync(pluginJsonPath)) { const pluginContent = fs.readFileSync(pluginJsonPath, 'utf-8'); const pluginConfig: ClaudePluginConfig = JSON.parse(pluginContent); mergedConfig = mergeClaudeConfigs(marketplacePlugin, pluginConfig); @@ -552,6 +554,18 @@ async function collectResources( const srcFile = path.join(resolvedPath, file); const destFile = path.join(finalDestDir, file); + // Check if the source is a regular file (skip sockets, FIFOs, directories behind symlinks, etc.) + try { + const fileStat = fs.statSync(srcFile); + if (!fileStat.isFile()) { + debugLogger.debug(`Skipping non-regular file: ${srcFile}`); + continue; + } + } catch { + debugLogger.debug(`Failed to stat file, skipping: ${srcFile}`); + continue; + } + // Ensure parent directory exists const destFileDir = path.dirname(destFile); if (!fs.existsSync(destFileDir)) { diff --git a/packages/core/src/extension/extensionManager.ts b/packages/core/src/extension/extensionManager.ts index 2da26995a..629de747a 100644 --- a/packages/core/src/extension/extensionManager.ts +++ b/packages/core/src/extension/extensionManager.ts @@ -1238,7 +1238,21 @@ export async function copyExtension( source: string, destination: string, ): Promise { - await fs.promises.cp(source, destination, { recursive: true }); + await fs.promises.cp(source, destination, { + recursive: true, + dereference: true, + filter: async (src: string) => { + try { + const stats = await fs.promises.stat(src); + // Only copy regular files and directories + // Skip sockets, FIFOs, block devices, and character devices + return stats.isFile() || stats.isDirectory(); + } catch { + // If we can't stat the file, skip it + return false; + } + }, + }); } export function getExtensionId( diff --git a/packages/core/src/extension/gemini-converter.ts b/packages/core/src/extension/gemini-converter.ts index 7f5c2d054..b5461369e 100644 --- a/packages/core/src/extension/gemini-converter.ts +++ b/packages/core/src/extension/gemini-converter.ts @@ -130,9 +130,24 @@ export async function copyDirectory( if (entry.isDirectory()) { await copyDirectory(sourcePath, destPath); - } else { + } else if (entry.isSymbolicLink()) { + // Resolve symlink and copy the target content + try { + const realPath = fs.realpathSync(sourcePath); + const targetStat = fs.statSync(realPath); + if (targetStat.isDirectory()) { + await copyDirectory(realPath, destPath); + } else if (targetStat.isFile()) { + fs.copyFileSync(realPath, destPath); + } + // Skip sockets, FIFOs, etc. + } catch { + // Skip broken symlinks + } + } else if (entry.isFile()) { fs.copyFileSync(sourcePath, destPath); } + // Skip sockets, FIFOs, block devices, and character devices } } diff --git a/packages/core/src/extension/github.test.ts b/packages/core/src/extension/github.test.ts index e98e6498a..8c31b1284 100644 --- a/packages/core/src/extension/github.test.ts +++ b/packages/core/src/extension/github.test.ts @@ -69,6 +69,8 @@ describe('git extension helpers', () => { await cloneFromGit(installMetadata, destination); expect(mockGit.clone).toHaveBeenCalledWith('http://my-repo.com', './', [ + '-c', + 'core.symlinks=true', '--depth', '1', ]); diff --git a/packages/core/src/extension/github.ts b/packages/core/src/extension/github.ts index 9e1d46ed4..5ef49d35b 100644 --- a/packages/core/src/extension/github.ts +++ b/packages/core/src/extension/github.ts @@ -75,7 +75,12 @@ export async function cloneFromGit( // We let git handle the source as is. } } - await git.clone(sourceUrl, './', ['--depth', '1']); + await git.clone(sourceUrl, './', [ + '-c', + 'core.symlinks=true', + '--depth', + '1', + ]); const remotes = await git.getRemotes(true); if (remotes.length === 0) { diff --git a/packages/core/src/ide/ide-client.test.ts b/packages/core/src/ide/ide-client.test.ts index 72f780896..88788fc57 100644 --- a/packages/core/src/ide/ide-client.test.ts +++ b/packages/core/src/ide/ide-client.test.ts @@ -14,8 +14,15 @@ import { type Mocked, type Mock, } from 'vitest'; -import { IdeClient, IDEConnectionStatus } from './ide-client.js'; +import { + IdeClient, + IDEConnectionStatus, + getIdeServerHost, + _resetCachedIdeServerHost, +} from './ide-client.js'; import * as fs from 'node:fs'; +import type { FileHandle } from 'node:fs/promises'; +import * as dns from 'node:dns'; import { getIdeProcessInfo } from './process-utils.js'; import { Client } from '@modelcontextprotocol/sdk/client/index.js'; import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js'; @@ -35,7 +42,17 @@ vi.mock('node:fs', async (importOriginal) => { stat: vi.fn(), }, realpathSync: (p: string) => p, - existsSync: () => false, + existsSync: vi.fn().mockReturnValue(false), + }; +}); +vi.mock('node:dns', async (importOriginal) => { + const actual = await importOriginal(); + return { + ...(actual as object), + promises: { + ...actual.promises, + lookup: vi.fn(), + }, }; }); vi.mock('./process-utils.js'); @@ -51,9 +68,13 @@ describe('IdeClient', () => { let mockStdioTransport: Mocked; beforeEach(async () => { - // Reset singleton instance for test isolation - (IdeClient as unknown as { instance: IdeClient | undefined }).instance = - undefined; + // Reset singleton instance and cached host for test isolation + ( + IdeClient as unknown as { + instancePromise: Promise | null; + } + ).instancePromise = null; + _resetCachedIdeServerHost(); // Mock environment variables process.env['QWEN_CODE_IDE_WORKSPACE_PATH'] = '/test/workspace'; @@ -94,6 +115,7 @@ describe('IdeClient', () => { }); afterEach(() => { + vi.useRealTimers(); vi.restoreAllMocks(); }); @@ -183,6 +205,49 @@ describe('IdeClient', () => { ); }); + it('should fall back to host.docker.internal when localhost fails in container', async () => { + process.env['QWEN_CODE_IDE_SERVER_PORT'] = '9090'; + vi.mocked(fs.promises.readFile).mockRejectedValue( + new Error('File not found'), + ); + ( + vi.mocked(fs.promises.readdir) as Mock< + (path: fs.PathLike) => Promise + > + ).mockResolvedValue([]); + vi.mocked(fs.existsSync).mockImplementation( + (filePath: fs.PathLike) => filePath === '/.dockerenv', + ); + (dns.promises.lookup as unknown as Mock).mockResolvedValue({ + address: '192.168.65.254', + family: 4, + }); + mockClient.connect + .mockRejectedValueOnce(new Error('localhost unreachable')) + .mockResolvedValueOnce(undefined); + + const ideClient = await IdeClient.getInstance(); + await ideClient.connect(); + + // Localhost is always tried first. + expect(StreamableHTTPClientTransport).toHaveBeenNthCalledWith( + 1, + new URL('http://127.0.0.1:9090/mcp'), + expect.any(Object), + ); + // In a container, host.docker.internal is used as fallback. + expect(StreamableHTTPClientTransport).toHaveBeenNthCalledWith( + 2, + new URL('http://host.docker.internal:9090/mcp'), + expect.any(Object), + ); + expect(ideClient.getConnectionStatus().status).toBe( + IDEConnectionStatus.Connected, + ); + + delete process.env['QWEN_CODE_IDE_SERVER_PORT']; + }); + it('should connect using stdio when stdio config is in environment variables', async () => { vi.mocked(fs.promises.readFile).mockRejectedValue( new Error('File not found'), @@ -358,6 +423,107 @@ describe('IdeClient', () => { expect(result).toEqual(config); delete process.env['QWEN_CODE_IDE_SERVER_PORT']; }); + + it('should scan IDE lock directory when env and legacy config are unavailable', async () => { + const latestConfig = { + port: '2000', + workspacePath: '/test/workspace', + }; + + vi.mocked(fs.promises.readFile).mockImplementation( + async (filePath: fs.PathLike | FileHandle) => { + const file = String(filePath); + if (file === path.join('/tmp', 'qwen-code-ide-server-12345.json')) { + throw new Error('not found'); + } + if (file === path.join('/home/test', '.qwen', 'ide', '1000.lock')) { + return JSON.stringify({ + port: '1000', + workspacePath: '/older/workspace', + }); + } + if (file === path.join('/home/test', '.qwen', 'ide', '2000.lock')) { + return JSON.stringify(latestConfig); + } + throw new Error(`unexpected path: ${file}`); + }, + ); + ( + vi.mocked(fs.promises.readdir) as Mock< + (path: fs.PathLike) => Promise + > + ).mockResolvedValue(['1000.lock', '2000.lock']); + ( + vi.mocked(fs.promises.stat) as Mock< + (path: fs.PathLike) => Promise + > + ).mockImplementation(async (filePath: fs.PathLike) => { + const file = String(filePath); + return { + mtimeMs: file.endsWith('2000.lock') ? 2000 : 1000, + } as fs.Stats; + }); + + const ideClient = await IdeClient.getInstance(); + const result = await ( + ideClient as unknown as { + getConnectionConfigFromFile: () => Promise; + } + ).getConnectionConfigFromFile(); + + expect(result).toEqual(latestConfig); + expect(fs.promises.readdir).toHaveBeenCalledWith( + path.join('/home/test', '.qwen', 'ide'), + ); + }); + + it('should return undefined when scanned lock files do not match current workspace', async () => { + vi.mocked(fs.promises.readFile).mockImplementation( + async (filePath: fs.PathLike | FileHandle) => { + const file = String(filePath); + if (file === path.join('/tmp', 'qwen-code-ide-server-12345.json')) { + throw new Error('not found'); + } + if (file === path.join('/home/test', '.qwen', 'ide', '1000.lock')) { + return JSON.stringify({ + port: '1000', + workspacePath: '/another/workspace', + }); + } + if (file === path.join('/home/test', '.qwen', 'ide', '2000.lock')) { + return JSON.stringify({ + port: '2000', + workspacePath: '/yet/another/workspace', + }); + } + throw new Error(`unexpected path: ${file}`); + }, + ); + ( + vi.mocked(fs.promises.readdir) as Mock< + (path: fs.PathLike) => Promise + > + ).mockResolvedValue(['1000.lock', '2000.lock']); + ( + vi.mocked(fs.promises.stat) as Mock< + (path: fs.PathLike) => Promise + > + ).mockImplementation(async (filePath: fs.PathLike) => { + const file = String(filePath); + return { + mtimeMs: file.endsWith('2000.lock') ? 2000 : 1000, + } as fs.Stats; + }); + + const ideClient = await IdeClient.getInstance(); + const result = await ( + ideClient as unknown as { + getConnectionConfigFromFile: () => Promise; + } + ).getConnectionConfigFromFile(); + + expect(result).toBeUndefined(); + }); }); describe('isDiffingEnabled', () => { @@ -479,3 +645,120 @@ describe('IdeClient', () => { }); }); }); + +describe('getIdeServerHost', () => { + const dnsLookupMock = dns.promises.lookup as unknown as Mock; + + function mockDnsResolvable(reachable: boolean): void { + if (reachable) { + dnsLookupMock.mockResolvedValue({ address: '192.168.65.254', family: 4 }); + } else { + dnsLookupMock.mockRejectedValue(new Error('ENOTFOUND')); + } + } + + beforeEach(() => { + _resetCachedIdeServerHost(); + vi.mocked(fs.existsSync).mockReturnValue(false); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + it('should return 127.0.0.1 when not in a container', async () => { + const host = await getIdeServerHost(); + + expect(host).toBe('127.0.0.1'); + expect(dnsLookupMock).not.toHaveBeenCalled(); + }); + + it('should return host.docker.internal when in a container and the host is reachable', async () => { + vi.mocked(fs.existsSync).mockImplementation( + (filePath: fs.PathLike) => filePath === '/.dockerenv', + ); + mockDnsResolvable(true); + + const host = await getIdeServerHost(); + + expect(host).toBe('host.docker.internal'); + expect(dnsLookupMock).toHaveBeenCalledWith('host.docker.internal'); + }); + + it('should fall back to 127.0.0.1 when in a container but host.docker.internal is not reachable', async () => { + vi.mocked(fs.existsSync).mockImplementation( + (filePath: fs.PathLike) => filePath === '/.dockerenv', + ); + mockDnsResolvable(false); + + const host = await getIdeServerHost(); + + expect(host).toBe('127.0.0.1'); + expect(dnsLookupMock).toHaveBeenCalledWith('host.docker.internal'); + }); + + it('should detect container via /run/.containerenv', async () => { + vi.mocked(fs.existsSync).mockImplementation( + (filePath: fs.PathLike) => filePath === '/run/.containerenv', + ); + mockDnsResolvable(true); + + const host = await getIdeServerHost(); + + expect(host).toBe('host.docker.internal'); + }); + + it('should cache the result and not perform DNS lookup again', async () => { + vi.mocked(fs.existsSync).mockImplementation( + (filePath: fs.PathLike) => filePath === '/.dockerenv', + ); + mockDnsResolvable(true); + + const host1 = await getIdeServerHost(); + const host2 = await getIdeServerHost(); + + expect(host1).toBe('host.docker.internal'); + expect(host2).toBe('host.docker.internal'); + expect(dnsLookupMock).toHaveBeenCalledTimes(1); + }); + + it('should fall back to 127.0.0.1 when DNS lookup times out in a container', async () => { + vi.useFakeTimers(); + vi.mocked(fs.existsSync).mockImplementation( + (filePath: fs.PathLike) => filePath === '/.dockerenv', + ); + // Simulate dns.promises.lookup that never resolves + dnsLookupMock.mockReturnValue(new Promise(() => {})); + + const hostPromise = getIdeServerHost(); + await vi.advanceTimersByTimeAsync(3000); + const host = await hostPromise; + + expect(host).toBe('127.0.0.1'); + expect(dnsLookupMock).toHaveBeenCalledWith('host.docker.internal'); + }); + + it('should perform only one DNS lookup when called concurrently', async () => { + vi.useRealTimers(); + vi.mocked(fs.existsSync).mockImplementation( + (filePath: fs.PathLike) => filePath === '/.dockerenv', + ); + + // Simulate a slow DNS lookup + dnsLookupMock.mockImplementation( + () => + new Promise((resolve) => + setTimeout( + () => resolve({ address: '192.168.65.254', family: 4 }), + 50, + ), + ), + ); + + const promises = Array.from({ length: 5 }, () => getIdeServerHost()); + const results = await Promise.all(promises); + + expect(results.every((r) => r === 'host.docker.internal')).toBe(true); + expect(dnsLookupMock).toHaveBeenCalledTimes(1); + }); +}); diff --git a/packages/core/src/ide/ide-client.ts b/packages/core/src/ide/ide-client.ts index d839004ad..b4835e30e 100644 --- a/packages/core/src/ide/ide-client.ts +++ b/packages/core/src/ide/ide-client.ts @@ -4,6 +4,7 @@ * SPDX-License-Identifier: Apache-2.0 */ +import * as dns from 'node:dns'; import * as fs from 'node:fs'; import { isSubpath } from '../utils/paths.js'; import { detectIde, type IdeInfo } from '../ide/detect-ide.js'; @@ -585,7 +586,33 @@ export class IdeClient { } // Legacy discovery for VSCode extension < v0.5.1. - return this.getLegacyConnectionConfig(portFromEnv); + const legacyConfig = await this.getLegacyConnectionConfig(portFromEnv); + if (legacyConfig) { + return legacyConfig; + } + + // Scan lock directory as a last resort when neither env var nor legacy + // file is available (e.g. code-server where the env var is not injected). + // Configs are sorted by modification time (most recent first). Pick the + // first one whose workspace matches the current working directory. + if (!portFromEnv) { + const ideDir = Storage.getGlobalIdeDir(); + const configs = await this.getAllConnectionConfigs(ideDir); + if (configs.length > 0) { + debugLogger.debug( + `Discovered ${configs.length} IDE lock file(s) via directory scan`, + ); + const cwd = process.cwd(); + const match = configs.find( + (c) => + c.workspacePath !== undefined && + IdeClient.validateWorkspacePath(c.workspacePath, cwd).isValid, + ); + return match; + } + } + + return undefined; } // Legacy connection files were written in the global temp directory. @@ -671,11 +698,13 @@ export class IdeClient { .map(({ parsed }) => parsed); } - private createProxyAwareFetch() { - // ignore proxy for '127.0.0.1' by deafult to allow connecting to the ide mcp server + private createProxyAwareFetch(ideHost: string) { + // Ignore proxy for IDE server host to allow connecting to the ide mcp + // server even when HTTP_PROXY is set const existingNoProxy = process.env['NO_PROXY'] || ''; + const noProxyHosts = [existingNoProxy, ideHost]; const agent = new EnvHttpProxyAgent({ - noProxy: [existingNoProxy, '127.0.0.1'].filter(Boolean).join(','), + noProxy: noProxyHosts.filter(Boolean).join(','), }); const undiciPromise = import('undici'); return async (url: string | URL, init?: RequestInit): Promise => { @@ -778,9 +807,34 @@ export class IdeClient { } private async establishHttpConnection(port: string): Promise { + // Always try localhost first. This covers the most common scenarios: + // non-container environments, and code-server where the extension runs + // inside the same container as the CLI. + const connected = await this.tryHttpConnect(port, LOCAL_HOST); + if (connected) { + return true; + } + + // If localhost failed and we are inside a container, the IDE server may + // be running on the host machine (e.g. VS Code Dev Containers). Try + // host.docker.internal as a fallback when it is DNS-resolvable. + const ideHost = await getIdeServerHost(); + if (ideHost === CONTAINER_HOST) { + debugLogger.debug( + `Connection to ${LOCAL_HOST}:${port} failed, retrying with ${CONTAINER_HOST}`, + ); + return this.tryHttpConnect(port, CONTAINER_HOST); + } + + return false; + } + + private async tryHttpConnect(port: string, host: string): Promise { let transport: StreamableHTTPClientTransport | undefined; try { - debugLogger.debug('Attempting to connect to IDE via HTTP SSE'); + debugLogger.debug( + `Attempting to connect to IDE via HTTP at ${host}:${port}`, + ); this.client = new Client({ name: 'streamable-http-client', // TODO(#3487): use the CLI version here. @@ -788,9 +842,9 @@ export class IdeClient { }); transport = new StreamableHTTPClientTransport( - new URL(`http://${getIdeServerHost()}:${port}/mcp`), + new URL(`http://${host}:${port}/mcp`), { - fetch: this.createProxyAwareFetch(), + fetch: this.createProxyAwareFetch(host), requestInit: { headers: this.authToken ? { Authorization: `Bearer ${this.authToken}` } @@ -806,7 +860,8 @@ export class IdeClient { await this.discoverTools(); this.setState(IDEConnectionStatus.Connected); return true; - } catch (_error) { + } catch (error) { + debugLogger.debug(`HTTP connection to ${host}:${port} failed:`, error); if (transport) { try { await transport.close(); @@ -853,8 +908,76 @@ export class IdeClient { } } -function getIdeServerHost() { +const CONTAINER_HOST = 'host.docker.internal'; +const LOCAL_HOST = '127.0.0.1'; +const DNS_LOOKUP_TIMEOUT_MS = 3_000; + +/** + * Cached promise for IDE server host. Caching the promise itself handles both + * result caching and concurrent-call deduplication in one mechanism: a resolved + * promise returns instantly, and a pending promise is shared across callers. + */ +let hostPromise: Promise | undefined; + +/** + * Reset the cached host promise. Exported for testing only. + * @internal + */ +export function _resetCachedIdeServerHost(): void { + hostPromise = undefined; +} + +/** + * Check if a hostname is DNS-resolvable, with a timeout guard. + */ +async function isHostResolvable(hostname: string): Promise { + try { + const timeout = new Promise((_, reject) => { + const timer = setTimeout( + () => reject(new Error('DNS lookup timeout')), + DNS_LOOKUP_TIMEOUT_MS, + ); + timer.unref?.(); + }); + await Promise.race([dns.promises.lookup(hostname), timeout]); + return true; + } catch { + return false; + } +} + +/** + * Determine the IDE server host to connect to. + * + * In container environments (`/.dockerenv` or `/run/.containerenv`), verify + * `host.docker.internal` is DNS-resolvable and use it if reachable. + * Otherwise fall back to `127.0.0.1`. + * + * Results are cached; concurrent calls share a single lookup. + */ +async function resolveIdeServerHost(): Promise { const isInContainer = fs.existsSync('/.dockerenv') || fs.existsSync('/run/.containerenv'); - return isInContainer ? 'host.docker.internal' : '127.0.0.1'; + + if (!isInContainer) { + return LOCAL_HOST; + } + + const reachable = await isHostResolvable(CONTAINER_HOST); + if (reachable) { + debugLogger.debug('Container detected, host.docker.internal is reachable'); + return CONTAINER_HOST; + } + + debugLogger.debug( + 'Container detected, but host.docker.internal is NOT reachable, falling back to 127.0.0.1', + ); + return LOCAL_HOST; +} + +export async function getIdeServerHost(): Promise { + if (!hostPromise) { + hostPromise = resolveIdeServerHost(); + } + return hostPromise; } diff --git a/packages/core/src/services/fileSystemService.test.ts b/packages/core/src/services/fileSystemService.test.ts index 69898f72d..fe72829e2 100644 --- a/packages/core/src/services/fileSystemService.test.ts +++ b/packages/core/src/services/fileSystemService.test.ts @@ -10,6 +10,20 @@ import { StandardFileSystemService } from './fileSystemService.js'; vi.mock('fs/promises'); +vi.mock('../utils/fileUtils.js', async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + readFileWithEncoding: vi.fn(), + readFileWithEncodingInfo: vi.fn(), + }; +}); + +import { + readFileWithEncoding, + readFileWithEncodingInfo, +} from '../utils/fileUtils.js'; + describe('StandardFileSystemService', () => { let fileSystem: StandardFileSystemService; @@ -23,19 +37,19 @@ describe('StandardFileSystemService', () => { }); describe('readTextFile', () => { - it('should read file content using fs', async () => { + it('should read file content using readFileWithEncoding', async () => { const testContent = 'Hello, World!'; - vi.mocked(fs.readFile).mockResolvedValue(testContent); + vi.mocked(readFileWithEncoding).mockResolvedValue(testContent); const result = await fileSystem.readTextFile('/test/file.txt'); - expect(fs.readFile).toHaveBeenCalledWith('/test/file.txt', 'utf-8'); + expect(readFileWithEncoding).toHaveBeenCalledWith('/test/file.txt'); expect(result).toBe(testContent); }); - it('should propagate fs.readFile errors', async () => { + it('should propagate readFileWithEncoding errors', async () => { const error = new Error('ENOENT: File not found'); - vi.mocked(fs.readFile).mockRejectedValue(error); + vi.mocked(readFileWithEncoding).mockRejectedValue(error); await expect(fileSystem.readTextFile('/test/file.txt')).rejects.toThrow( 'ENOENT: File not found', @@ -43,6 +57,42 @@ describe('StandardFileSystemService', () => { }); }); + describe('readTextFileWithInfo', () => { + it('should return content, encoding, and bom via readFileWithEncodingInfo', async () => { + const mockResult = { content: 'Hello', encoding: 'utf-8', bom: false }; + vi.mocked(readFileWithEncodingInfo).mockResolvedValue(mockResult); + + const result = await fileSystem.readTextFileWithInfo('/test/file.txt'); + + expect(readFileWithEncodingInfo).toHaveBeenCalledWith('/test/file.txt'); + expect(result).toEqual(mockResult); + }); + + it('should return non-UTF-8 encoding info for GBK file', async () => { + const mockResult = { + content: '你好世界', + encoding: 'gb18030', + bom: false, + }; + vi.mocked(readFileWithEncodingInfo).mockResolvedValue(mockResult); + + const result = await fileSystem.readTextFileWithInfo('/test/gbk.txt'); + + expect(result.encoding).toBe('gb18030'); + expect(result.bom).toBe(false); + expect(result.content).toBe('你好世界'); + }); + + it('should propagate readFileWithEncodingInfo errors', async () => { + const error = new Error('ENOENT: File not found'); + vi.mocked(readFileWithEncodingInfo).mockRejectedValue(error); + + await expect( + fileSystem.readTextFileWithInfo('/test/file.txt'), + ).rejects.toThrow('ENOENT: File not found'); + }); + }); + describe('writeTextFile', () => { it('should write file content using fs', async () => { vi.mocked(fs.writeFile).mockResolvedValue(); @@ -120,6 +170,67 @@ describe('StandardFileSystemService', () => { } expect(bomCount).toBe(1); }); + it('should write file with non-UTF-8 encoding using iconv-lite', async () => { + vi.mocked(fs.writeFile).mockResolvedValue(); + + await fileSystem.writeTextFile('/test/file.txt', '你好世界', { + encoding: 'gbk', + }); + + // Verify that fs.writeFile was called with a Buffer (iconv-encoded) + const writeCall = vi.mocked(fs.writeFile).mock.calls[0]; + expect(writeCall[0]).toBe('/test/file.txt'); + expect(writeCall[1]).toBeInstanceOf(Buffer); + }); + + it('should write file as UTF-8 when encoding is utf-8', async () => { + vi.mocked(fs.writeFile).mockResolvedValue(); + + await fileSystem.writeTextFile('/test/file.txt', 'Hello', { + encoding: 'utf-8', + }); + + expect(fs.writeFile).toHaveBeenCalledWith( + '/test/file.txt', + 'Hello', + 'utf-8', + ); + }); + + it('should preserve UTF-16LE BOM when writing back a UTF-16LE file', async () => { + vi.mocked(fs.writeFile).mockResolvedValue(); + + await fileSystem.writeTextFile('/test/file.txt', 'Hello', { + encoding: 'utf-16le', + bom: true, + }); + + // iconv-lite encodes as UTF-16LE; with bom:true the FF FE BOM is prepended + const writeCall = vi.mocked(fs.writeFile).mock.calls[0]; + expect(writeCall[0]).toBe('/test/file.txt'); + expect(writeCall[1]).toBeInstanceOf(Buffer); + const buf = writeCall[1] as Buffer; + // First two bytes must be the UTF-16LE BOM: FF FE + expect(buf[0]).toBe(0xff); + expect(buf[1]).toBe(0xfe); + }); + + it('should not add BOM when writing UTF-16LE file without bom flag', async () => { + vi.mocked(fs.writeFile).mockResolvedValue(); + + await fileSystem.writeTextFile('/test/file.txt', 'Hello', { + encoding: 'utf-16le', + bom: false, + }); + + // No BOM prepended — raw iconv-encoded buffer written directly + const writeCall = vi.mocked(fs.writeFile).mock.calls[0]; + expect(writeCall[0]).toBe('/test/file.txt'); + expect(writeCall[1]).toBeInstanceOf(Buffer); + const buf = writeCall[1] as Buffer; + // First two bytes should NOT be FF FE (the UTF-16LE BOM) + expect(!(buf[0] === 0xff && buf[1] === 0xfe)).toBe(true); + }); }); describe('detectFileBOM', () => { diff --git a/packages/core/src/services/fileSystemService.ts b/packages/core/src/services/fileSystemService.ts index 91f36161c..787d68929 100644 --- a/packages/core/src/services/fileSystemService.ts +++ b/packages/core/src/services/fileSystemService.ts @@ -7,6 +7,16 @@ import fs from 'node:fs/promises'; import * as path from 'node:path'; import { globSync } from 'glob'; +import { + readFileWithEncoding, + readFileWithEncodingInfo, +} from '../utils/fileUtils.js'; +import type { FileReadResult } from '../utils/fileUtils.js'; +import { + iconvEncode, + iconvEncodingExists, + isUtf8CompatibleEncoding, +} from '../utils/iconvHelper.js'; /** * Supported file encodings for new files. @@ -33,6 +43,15 @@ export interface FileSystemService { */ readTextFile(filePath: string): Promise; + /** + * Read text content from a file, returning both the content and encoding metadata. + * Combines readTextFile + detectFileBOM + detectFileEncoding into a single I/O pass. + * + * @param filePath - The path to the file to read + * @returns The file content, encoding name, and whether a UTF-8 BOM was present + */ + readTextFileWithInfo(filePath: string): Promise; + /** * Write text content to a file * @@ -74,6 +93,14 @@ export interface WriteTextFileOptions { * @default false */ bom?: boolean; + + /** + * The encoding to use when writing the file. + * If specified and not UTF-8 compatible, iconv-lite will be used to encode. + * This is used to preserve the original encoding of non-UTF-8 files (e.g. GBK, Big5). + * @default undefined (writes as UTF-8) + */ + encoding?: string; } /** @@ -92,12 +119,44 @@ function hasUTF8BOM(buffer: Buffer): boolean { ); } +/** + * Return the BOM byte sequence for a given encoding name, or null if the + * encoding does not use a standard BOM. Used when writing back a file that + * originally had a BOM so the BOM is preserved. + */ +function getBOMBytesForEncoding(encoding: string): Buffer | null { + const lower = encoding.toLowerCase().replace(/[^a-z0-9]/g, ''); + switch (lower) { + case 'utf8': + return Buffer.from([0xef, 0xbb, 0xbf]); + case 'utf16le': + case 'utf16': + return Buffer.from([0xff, 0xfe]); + case 'utf16be': + return Buffer.from([0xfe, 0xff]); + case 'utf32le': + case 'utf32': + return Buffer.from([0xff, 0xfe, 0x00, 0x00]); + case 'utf32be': + return Buffer.from([0x00, 0x00, 0xfe, 0xff]); + default: + return null; + } +} + /** * Standard file system implementation */ export class StandardFileSystemService implements FileSystemService { async readTextFile(filePath: string): Promise { - return fs.readFile(filePath, FileEncoding.UTF8); + // Use encoding-aware reader that handles BOM and non-UTF-8 encodings (e.g. GBK) + return readFileWithEncoding(filePath); + } + + async readTextFileWithInfo(filePath: string): Promise { + // Single I/O pass: returns content, encoding, and BOM flag together, + // eliminating the need for separate detectFileEncoding / detectFileBOM calls. + return readFileWithEncodingInfo(filePath); } async writeTextFile( @@ -106,10 +165,32 @@ export class StandardFileSystemService implements FileSystemService { options?: WriteTextFileOptions, ): Promise { const bom = options?.bom ?? false; + const encoding = options?.encoding; - if (bom) { - // Prepend UTF-8 BOM (EF BB BF) - // If content already starts with BOM character, strip it first to avoid double BOM + // Check if a non-UTF-8 encoding is specified and supported by iconv-lite + const isNonUtf8Encoding = + encoding && + !isUtf8CompatibleEncoding(encoding) && + iconvEncodingExists(encoding); + + if (isNonUtf8Encoding) { + // Non-UTF-8 encoding (e.g. GBK, Big5, Shift_JIS, UTF-16LE, UTF-32BE…) + // Use iconv-lite to encode the content. When the file originally had a BOM + // (bom: true), prepend the correct BOM bytes for this encoding so the + // byte-order mark is preserved on write-back. + const encoded = iconvEncode(content, encoding); + if (bom) { + const bomBytes = getBOMBytesForEncoding(encoding); + await fs.writeFile( + filePath, + bomBytes ? Buffer.concat([bomBytes, encoded]) : encoded, + ); + } else { + await fs.writeFile(filePath, encoded); + } + } else if (bom) { + // UTF-8 BOM: prepend EF BB BF + // If content already starts with the BOM character, strip it first to avoid double BOM. const normalizedContent = content.charCodeAt(0) === 0xfeff ? content.slice(1) : content; const bomBuffer = Buffer.from([0xef, 0xbb, 0xbf]); diff --git a/packages/core/src/skills/skill-load.ts b/packages/core/src/skills/skill-load.ts index dc6f2c616..639b85071 100644 --- a/packages/core/src/skills/skill-load.ts +++ b/packages/core/src/skills/skill-load.ts @@ -3,6 +3,7 @@ import * as fs from 'fs/promises'; import * as path from 'path'; import { parse as parseYaml } from '../utils/yaml-parser.js'; import { createDebugLogger } from '../utils/debugLogger.js'; +import { normalizeContent } from '../utils/textUtils.js'; const debugLogger = createDebugLogger('SKILL_LOAD'); @@ -56,21 +57,6 @@ export async function loadSkillsFromDir( } } -/** - * Normalizes skill file content for consistent parsing across platforms. - * - Strips UTF-8 BOM to ensure frontmatter starts at the first character. - * - Normalizes line endings so skills authored on Windows (CRLF) parse correctly. - */ -function normalizeSkillFileContent(content: string): string { - // Strip UTF-8 BOM to ensure frontmatter starts at the first character. - let normalized = content.replace(/^\uFEFF/, ''); - - // Normalize line endings so skills authored on Windows (CRLF) parse correctly. - normalized = normalized.replace(/\r\n/g, '\n').replace(/\r/g, '\n'); - - return normalized; -} - export function parseSkillContent( content: string, filePath: string, @@ -78,7 +64,7 @@ export function parseSkillContent( debugLogger.debug(`Parsing skill content from: ${filePath}`); // Normalize content to handle BOM and CRLF line endings - const normalizedContent = normalizeSkillFileContent(content); + const normalizedContent = normalizeContent(content); // Split frontmatter and content // Use (?:\n|$) to allow frontmatter ending with or without trailing newline diff --git a/packages/core/src/skills/skill-manager.ts b/packages/core/src/skills/skill-manager.ts index 8ee69e9a0..05eabdd5a 100644 --- a/packages/core/src/skills/skill-manager.ts +++ b/packages/core/src/skills/skill-manager.ts @@ -20,6 +20,7 @@ import { SkillError, SkillErrorCode } from './types.js'; import type { Config } from '../config/config.js'; import { validateConfig } from './skill-load.js'; import { createDebugLogger } from '../utils/debugLogger.js'; +import { normalizeContent } from '../utils/textUtils.js'; const debugLogger = createDebugLogger('SKILL_MANAGER'); @@ -333,7 +334,7 @@ export class SkillManager { level: SkillLevel, ): SkillConfig { try { - const normalizedContent = normalizeSkillFileContent(content); + const normalizedContent = normalizeContent(content); // Split frontmatter and content const frontmatterRegex = /^---\n([\s\S]*?)\n---(?:\n|$)([\s\S]*)$/; @@ -649,13 +650,3 @@ export class SkillManager { } } } - -function normalizeSkillFileContent(content: string): string { - // Strip UTF-8 BOM to ensure frontmatter starts at the first character. - let normalized = content.replace(/^\uFEFF/, ''); - - // Normalize line endings so skills authored on Windows (CRLF) parse correctly. - normalized = normalized.replace(/\r\n/g, '\n').replace(/\r/g, '\n'); - - return normalized; -} diff --git a/packages/core/src/subagents/subagent-manager.test.ts b/packages/core/src/subagents/subagent-manager.test.ts index e04964ea1..cf3afb4c8 100644 --- a/packages/core/src/subagents/subagent-manager.test.ts +++ b/packages/core/src/subagents/subagent-manager.test.ts @@ -193,6 +193,21 @@ You are a helpful assistant. expect(config.filePath).toBe(validConfig.filePath); }); + it('should parse valid markdown content with CRLF line endings', () => { + const markdownWithCRLF = `---\r\nname: test-agent\r\ndescription: A test subagent\r\n---\r\n\r\nYou are a helpful assistant.\r\n`; + const config = manager.parseSubagentContent( + markdownWithCRLF, + validConfig.filePath!, + 'project', + ); + + expect(config.name).toBe('test-agent'); + expect(config.description).toBe('A test subagent'); + // The system prompt logic applies .trim(), so the trailing \r is removed regardless, + // but the central test is that frontmatterRegex didn't throw an error. + expect(config.systemPrompt).toBe('You are a helpful assistant.'); + }); + it('should parse content with tools', () => { const markdownWithTools = `--- name: test-agent diff --git a/packages/core/src/subagents/subagent-manager.ts b/packages/core/src/subagents/subagent-manager.ts index fea33040c..0552fa60c 100644 --- a/packages/core/src/subagents/subagent-manager.ts +++ b/packages/core/src/subagents/subagent-manager.ts @@ -29,6 +29,7 @@ import { SubagentValidator } from './validation.js'; import { SubAgentScope } from './subagent.js'; import type { Config } from '../config/config.js'; import { createDebugLogger } from '../utils/debugLogger.js'; +import { normalizeContent } from '../utils/textUtils.js'; const debugLogger = createDebugLogger('SUBAGENT_MANAGER'); import { BuiltinAgentRegistry } from './builtin-agents.js'; @@ -908,9 +909,11 @@ function parseSubagentContent( validator: SubagentValidator, ): SubagentConfig { try { + const normalizedContent = normalizeContent(content); + // Split frontmatter and content const frontmatterRegex = /^---\n([\s\S]*?)\n---\n([\s\S]*)$/; - const match = content.match(frontmatterRegex); + const match = normalizedContent.match(frontmatterRegex); if (!match) { throw new Error('Invalid format: missing YAML frontmatter'); diff --git a/packages/core/src/tools/edit.ts b/packages/core/src/tools/edit.ts index 016eb2854..61a318190 100644 --- a/packages/core/src/tools/edit.ts +++ b/packages/core/src/tools/edit.ts @@ -108,6 +108,10 @@ interface CalculatedEdit { occurrences: number; error?: { display: string; raw: string; type: ToolErrorType }; isNewFile: boolean; + /** Detected encoding of the existing file (e.g. 'utf-8', 'gbk') */ + encoding: string; + /** Whether the existing file has a UTF-8 BOM */ + bom: boolean; } class EditToolInvocation implements ToolInvocation { @@ -134,17 +138,22 @@ class EditToolInvocation implements ToolInvocation { let finalNewString = params.new_string; let finalOldString = params.old_string; let occurrences = 0; + let encoding = 'utf-8'; + let bom = false; let error: | { display: string; raw: string; type: ToolErrorType } | undefined = undefined; try { - currentContent = await this.config + const fileInfo = await this.config .getFileSystemService() - .readTextFile(params.file_path); + .readTextFileWithInfo(params.file_path); // Normalize line endings to LF for consistent processing. - currentContent = currentContent.replace(/\r\n/g, '\n'); + currentContent = fileInfo.content.replace(/\r\n/g, '\n'); fileExists = true; + // Encoding and BOM are returned from the same I/O pass, avoiding redundant reads. + encoding = fileInfo.encoding; + bom = fileInfo.bom; } catch (err: unknown) { if (!isNodeError(err) || err.code !== 'ENOENT') { // Rethrow unexpected FS errors (permissions, etc.) @@ -238,6 +247,8 @@ class EditToolInvocation implements ToolInvocation { occurrences, error, isNewFile, + encoding, + bom, }; } @@ -373,7 +384,7 @@ class EditToolInvocation implements ToolInvocation { this.ensureParentDirectoriesExist(this.params.file_path); // For new files, apply default file encoding setting - // For existing files, keep original content as-is (including any BOM character) + // For existing files, preserve the original encoding (BOM and charset) if (editData.isNewFile) { const useBOM = this.config.getDefaultFileEncoding() === FileEncoding.UTF8_BOM; @@ -385,7 +396,10 @@ class EditToolInvocation implements ToolInvocation { } else { await this.config .getFileSystemService() - .writeTextFile(this.params.file_path, editData.newContent); + .writeTextFile(this.params.file_path, editData.newContent, { + bom: editData.bom, + encoding: editData.encoding, + }); } const fileName = path.basename(this.params.file_path); diff --git a/packages/core/src/tools/write-file.test.ts b/packages/core/src/tools/write-file.test.ts index b0d7a2b0d..e096b0a72 100644 --- a/packages/core/src/tools/write-file.test.ts +++ b/packages/core/src/tools/write-file.test.ts @@ -759,6 +759,7 @@ describe('WriteFileTool', () => { // Verify writeTextFile was called with bom: true expect(writeSpy).toHaveBeenCalledWith(filePath, newContent, { bom: true, + encoding: 'utf-8', }); // Cleanup @@ -785,6 +786,7 @@ describe('WriteFileTool', () => { // Verify writeTextFile was called with bom: false expect(writeSpy).toHaveBeenCalledWith(filePath, newContent, { bom: false, + encoding: 'utf-8', }); // Cleanup diff --git a/packages/core/src/tools/write-file.ts b/packages/core/src/tools/write-file.ts index 1ccb7bf0b..4085e3b69 100644 --- a/packages/core/src/tools/write-file.ts +++ b/packages/core/src/tools/write-file.ts @@ -243,17 +243,25 @@ class WriteFileToolInvocation extends BaseToolInvocation< // Check if file exists and has BOM to preserve encoding // For new files, use the configured default encoding let useBOM = false; + let detectedEncoding: string | undefined; if (!isNewFile) { - useBOM = await this.config + // Use readTextFileWithInfo for a single I/O pass that returns encoding + // and BOM metadata together, avoiding separate detectFileBOM / detectFileEncoding calls. + const fileInfo = await this.config .getFileSystemService() - .detectFileBOM(file_path); + .readTextFileWithInfo(file_path); + useBOM = fileInfo.bom; + detectedEncoding = fileInfo.encoding; } else { useBOM = this.config.getDefaultFileEncoding() === FileEncoding.UTF8_BOM; } await this.config .getFileSystemService() - .writeTextFile(file_path, fileContent, { bom: useBOM }); + .writeTextFile(file_path, fileContent, { + bom: useBOM, + encoding: detectedEncoding, + }); // Generate diff for display result const fileName = path.basename(file_path); diff --git a/packages/core/src/utils/fileUtils.test.ts b/packages/core/src/utils/fileUtils.test.ts index b21ee79e2..6dc38e4d7 100644 --- a/packages/core/src/utils/fileUtils.test.ts +++ b/packages/core/src/utils/fileUtils.test.ts @@ -28,6 +28,8 @@ import { processSingleFileContent, detectBOM, readFileWithEncoding, + readFileWithEncodingInfo, + detectFileEncoding, fileExists, } from './fileUtils.js'; import type { Config } from '../config/config.js'; @@ -407,6 +409,153 @@ describe('fileUtils', () => { const result = await readFileWithEncoding(filePath); expect(result).toBe(''); }); + + it('should read GBK-encoded file with Chinese characters correctly', async () => { + // GBK encoding of "你好世界这是中文内容用于测试编码检测" + // Needs enough content for chardet to reliably detect the encoding + const gbkBuffer = Buffer.from([ + 0xc4, 0xe3, 0xba, 0xc3, 0xca, 0xc0, 0xbd, 0xe7, 0xd5, 0xe2, 0xca, + 0xc7, 0xd6, 0xd0, 0xce, 0xc4, 0xc4, 0xda, 0xc8, 0xdd, 0xd3, 0xc3, + 0xd3, 0xda, 0xb2, 0xe2, 0xca, 0xd4, 0xb1, 0xe0, 0xc2, 0xeb, 0xbc, + 0xec, 0xb2, 0xe2, + ]); + const filePath = path.join(testDir, 'gbk-chinese.txt'); + await fsPromises.writeFile(filePath, gbkBuffer); + + const result = await readFileWithEncoding(filePath); + expect(result).toBe('你好世界这是中文内容用于测试编码检测'); + }); + + it('should read GBK-encoded file with mixed ASCII and Chinese correctly', async () => { + // GBK encoding of "// 这是注释内容用于测试\nhello你好世界测试中文编码检测\n函数返回值正确" + // Needs enough Chinese content for chardet to reliably detect as GB18030/GBK + const gbkBuffer = Buffer.from([ + 0x2f, 0x2f, 0x20, 0xd5, 0xe2, 0xca, 0xc7, 0xd7, 0xa2, 0xca, 0xcd, + 0xc4, 0xda, 0xc8, 0xdd, 0xd3, 0xc3, 0xd3, 0xda, 0xb2, 0xe2, 0xca, + 0xd4, 0x0a, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0xc4, 0xe3, 0xba, 0xc3, + 0xca, 0xc0, 0xbd, 0xe7, 0xb2, 0xe2, 0xca, 0xd4, 0xd6, 0xd0, 0xce, + 0xc4, 0xb1, 0xe0, 0xc2, 0xeb, 0xbc, 0xec, 0xb2, 0xe2, 0x0a, 0xba, + 0xaf, 0xca, 0xfd, 0xb7, 0xb5, 0xbb, 0xd8, 0xd6, 0xb5, 0xd5, 0xfd, + 0xc8, 0xb7, + ]); + const filePath = path.join(testDir, 'gbk-mixed.txt'); + await fsPromises.writeFile(filePath, gbkBuffer); + + const result = await readFileWithEncoding(filePath); + expect(result).toContain('hello'); + expect(result).toContain('你好世界'); + expect(result).toContain('函数返回值正确'); + }); + }); + + describe('readFileWithEncodingInfo', () => { + it('should return bom: false and encoding utf-8 for plain UTF-8 file', async () => { + const filePath = path.join(testDir, 'info-utf8.txt'); + await fsPromises.writeFile(filePath, 'Hello', 'utf8'); + + const result = await readFileWithEncodingInfo(filePath); + expect(result.content).toBe('Hello'); + expect(result.encoding).toBe('utf-8'); + expect(result.bom).toBe(false); + }); + + it('should return bom: true and encoding utf-8 for UTF-8 BOM file', async () => { + const utf8Bom = Buffer.from([0xef, 0xbb, 0xbf]); + const filePath = path.join(testDir, 'info-utf8-bom.txt'); + await fsPromises.writeFile( + filePath, + Buffer.concat([utf8Bom, Buffer.from('Hello', 'utf8')]), + ); + + const result = await readFileWithEncodingInfo(filePath); + expect(result.content).toBe('Hello'); + expect(result.encoding).toBe('utf-8'); + expect(result.bom).toBe(true); + }); + + it('should return bom: true and encoding utf-16le for UTF-16LE BOM file', async () => { + const utf16leBom = Buffer.from([0xff, 0xfe]); + const utf16leContent = Buffer.from('Hi', 'utf16le'); + const filePath = path.join(testDir, 'info-utf16le.txt'); + await fsPromises.writeFile( + filePath, + Buffer.concat([utf16leBom, utf16leContent]), + ); + + const result = await readFileWithEncodingInfo(filePath); + expect(result.content).toBe('Hi'); + expect(result.encoding).toBe('utf-16le'); + // Non-UTF-8 BOM should also be flagged so it is preserved on write-back + expect(result.bom).toBe(true); + }); + + it('should return bom: false for GBK file (no BOM)', async () => { + const gbkBuffer = Buffer.from([ + 0xc4, 0xe3, 0xba, 0xc3, 0xca, 0xc0, 0xbd, 0xe7, 0xd5, 0xe2, 0xca, + 0xc7, 0xd6, 0xd0, 0xce, 0xc4, 0xc4, 0xda, 0xc8, 0xdd, 0xd3, 0xc3, + 0xd3, 0xda, 0xb2, 0xe2, 0xca, 0xd4, 0xb1, 0xe0, 0xc2, 0xeb, 0xbc, + 0xec, 0xb2, 0xe2, + ]); + const filePath = path.join(testDir, 'info-gbk.txt'); + await fsPromises.writeFile(filePath, gbkBuffer); + + const result = await readFileWithEncodingInfo(filePath); + expect(result.bom).toBe(false); + expect(result.encoding).toBe('gb18030'); + expect(result.content).toBe('你好世界这是中文内容用于测试编码检测'); + }); + }); + + describe('detectFileEncoding', () => { + it('should detect UTF-8 for plain ASCII file', async () => { + const filePath = path.join(testDir, 'ascii.txt'); + await fsPromises.writeFile(filePath, 'Hello World', 'utf8'); + + const encoding = await detectFileEncoding(filePath); + expect(encoding).toBe('utf-8'); + }); + + it('should detect UTF-8 for file with UTF-8 BOM', async () => { + const utf8Bom = Buffer.from([0xef, 0xbb, 0xbf]); + const content = Buffer.from('Hello', 'utf8'); + const filePath = path.join(testDir, 'utf8-bom-detect.txt'); + await fsPromises.writeFile(filePath, Buffer.concat([utf8Bom, content])); + + const encoding = await detectFileEncoding(filePath); + expect(encoding).toBe('utf-8'); + }); + + it('should detect GBK encoding for Chinese text in GBK', async () => { + // GBK encoding of "你好世界这是中文内容用于测试编码检测" + // Needs enough content for chardet to reliably detect + const gbkBuffer = Buffer.from([ + 0xc4, 0xe3, 0xba, 0xc3, 0xca, 0xc0, 0xbd, 0xe7, 0xd5, 0xe2, 0xca, + 0xc7, 0xd6, 0xd0, 0xce, 0xc4, 0xc4, 0xda, 0xc8, 0xdd, 0xd3, 0xc3, + 0xd3, 0xda, 0xb2, 0xe2, 0xca, 0xd4, 0xb1, 0xe0, 0xc2, 0xeb, 0xbc, + 0xec, 0xb2, 0xe2, + ]); + const filePath = path.join(testDir, 'gbk-detect.txt'); + await fsPromises.writeFile(filePath, gbkBuffer); + + const encoding = await detectFileEncoding(filePath); + // chardet detects GBK as 'gb18030' (its superset) + expect(encoding).toBe('gb18030'); + }); + + it('should return utf-8 for empty file', async () => { + const filePath = path.join(testDir, 'empty-detect.txt'); + await fsPromises.writeFile(filePath, ''); + + const encoding = await detectFileEncoding(filePath); + expect(encoding).toBe('utf-8'); + }); + + it('should return utf-8 for non-existent file', async () => { + const filePath = path.join(testDir, 'nonexistent-detect.txt'); + + const encoding = await detectFileEncoding(filePath); + expect(encoding).toBe('utf-8'); + }); }); describe('isBinaryFile with BOM awareness', () => { diff --git a/packages/core/src/utils/fileUtils.ts b/packages/core/src/utils/fileUtils.ts index aab6935cb..05de408ef 100644 --- a/packages/core/src/utils/fileUtils.ts +++ b/packages/core/src/utils/fileUtils.ts @@ -9,10 +9,16 @@ import fsPromises from 'node:fs/promises'; import path from 'node:path'; import type { PartUnion } from '@google/genai'; import mime from 'mime/lite'; +import { + iconvDecode, + iconvEncodingExists, + isUtf8CompatibleEncoding, +} from './iconvHelper.js'; import { ToolErrorType } from '../tools/tool-error.js'; import { BINARY_EXTENSIONS } from './ignorePatterns.js'; import type { Config } from '../config/config.js'; import { createDebugLogger } from './debugLogger.js'; +import { detectEncodingFromBuffer } from './systemEncoding.js'; const debugLogger = createDebugLogger('FILE_UTILS'); @@ -118,23 +124,41 @@ function decodeUTF32(buf: Buffer, littleEndian: boolean): string { } /** - * Read a file as text, honoring BOM encodings (UTF‑8/16/32) and stripping the BOM. - * Falls back to utf8 when no BOM is present. + * Check whether a buffer is valid UTF-8 by attempting a strict decode. + * If any invalid byte sequence is encountered, TextDecoder with `fatal: true` throws. */ -export async function readFileWithEncoding(filePath: string): Promise { - // Read the file once; detect BOM and decode from the single buffer. - const full = await fs.promises.readFile(filePath); - if (full.length === 0) return ''; - - const bom = detectBOM(full); - if (!bom) { - // No BOM → treat as UTF‑8 - return full.toString('utf8'); +function isValidUtf8(buffer: Buffer): boolean { + try { + new TextDecoder('utf-8', { fatal: true }).decode(buffer); + return true; + } catch { + return false; } +} - // Strip BOM and decode per encoding - const content = full.subarray(bom.bomLength); - switch (bom.encoding) { +/** + * Result of reading a file with encoding detection. + */ +export interface FileReadResult { + /** Decoded text content of the file (BOM stripped if present). */ + content: string; + /** Detected encoding name (e.g. 'utf-8', 'gb18030', 'utf-16le'). */ + encoding: string; + /** + * Whether the file had a Unicode BOM (UTF-8, UTF-16 LE/BE, or UTF-32 LE/BE). + * When true, the same BOM should be re-written on save to preserve the file's + * original byte-order mark. + */ + bom: boolean; +} + +/** + * Internal helper: decode a buffer given a BOMInfo. + * Returns the decoded string for each supported BOM encoding. + */ +function decodeBOMBuffer(buf: Buffer, bomInfo: BOMInfo): string { + const content = buf.subarray(bomInfo.bomLength); + switch (bomInfo.encoding) { case 'utf8': return content.toString('utf8'); case 'utf16le': @@ -151,6 +175,153 @@ export async function readFileWithEncoding(filePath: string): Promise { } } +/** + * Map a BOMInfo encoding to a canonical encoding name string. + */ +function bomEncodingToName(bomEncoding: UnicodeEncoding): string { + switch (bomEncoding) { + case 'utf8': + return 'utf-8'; + case 'utf16le': + return 'utf-16le'; + case 'utf16be': + return 'utf-16be'; + case 'utf32le': + return 'utf-32le'; + case 'utf32be': + return 'utf-32be'; + default: + return 'utf-8'; + } +} + +/** + * Read a file as text, honoring BOM encodings (UTF‑8/16/32) and stripping the BOM. + * For files without BOM, validates UTF-8 first. If invalid UTF-8, uses chardet + * to detect encoding (e.g. GBK, Big5, Shift_JIS) and iconv-lite to decode. + * Falls back to utf8 when detection fails. + * + * Returns both the decoded content and the detected encoding/BOM information + * in a single I/O pass, avoiding redundant file reads. + */ +export async function readFileWithEncodingInfo( + filePath: string, +): Promise { + // Read the file once; detect BOM and decode from the single buffer. + const full = await fs.promises.readFile(filePath); + if (full.length === 0) return { content: '', encoding: 'utf-8', bom: false }; + + const bomInfo = detectBOM(full); + if (bomInfo) { + return { + content: decodeBOMBuffer(full, bomInfo), + encoding: bomEncodingToName(bomInfo.encoding), + // Mark bom: true for all Unicode BOM variants (UTF-8/16/32) so that + // the BOM is re-written on save and the file's original format is preserved. + bom: true, + }; + } + + // No BOM — check if it's valid UTF-8 first (fast path for the common case) + if (isValidUtf8(full)) { + return { content: full.toString('utf8'), encoding: 'utf-8', bom: false }; + } + + // Not valid UTF-8 — try chardet-based encoding detection + const detected = detectEncodingFromBuffer(full); + if (detected && !isUtf8CompatibleEncoding(detected)) { + try { + if (iconvEncodingExists(detected)) { + return { + content: iconvDecode(full, detected), + encoding: detected, + bom: false, + }; + } + } catch (e) { + debugLogger.warn( + `Failed to decode file ${filePath} as ${detected}: ${e instanceof Error ? e.message : String(e)}`, + ); + } + } + + // Final fallback: UTF-8 with replacement characters + return { content: full.toString('utf8'), encoding: 'utf-8', bom: false }; +} + +/** + * Read a file as text, honoring BOM encodings (UTF‑8/16/32) and stripping the BOM. + * For files without BOM, validates UTF-8 first. If invalid UTF-8, uses chardet + * to detect encoding (e.g. GBK, Big5, Shift_JIS) and iconv-lite to decode. + * Falls back to utf8 when detection fails. + */ +export async function readFileWithEncoding(filePath: string): Promise { + const result = await readFileWithEncodingInfo(filePath); + return result.content; +} + +/** + * Detect the encoding of a file by reading a sample from its beginning. + * Returns the encoding name (e.g. 'utf-8', 'gbk', 'shift_jis'). + * Uses BOM detection first, then UTF-8 validation, then chardet as fallback. + */ +export async function detectFileEncoding(filePath: string): Promise { + let fh: fs.promises.FileHandle | null = null; + try { + fh = await fs.promises.open(filePath, 'r'); + const stats = await fh.stat(); + if (stats.size === 0) return 'utf-8'; + + // Read a sample (up to 8KB) for detection + const sampleSize = Math.min(8192, stats.size); + const buf = Buffer.alloc(sampleSize); + const { bytesRead } = await fh.read(buf, 0, sampleSize, 0); + if (bytesRead === 0) return 'utf-8'; + const sample = buf.subarray(0, bytesRead); + + // 1. Check for BOM + const bom = detectBOM(sample); + if (bom) { + switch (bom.encoding) { + case 'utf8': + return 'utf-8'; + case 'utf16le': + return 'utf-16le'; + case 'utf16be': + return 'utf-16be'; + case 'utf32le': + return 'utf-32le'; + case 'utf32be': + return 'utf-32be'; + default: + return 'utf-8'; + } + } + + // 2. Validate UTF-8 + if (isValidUtf8(sample)) return 'utf-8'; + + // 3. Use chardet for detection + const detected = detectEncodingFromBuffer(sample); + if (detected && !isUtf8CompatibleEncoding(detected)) { + return detected; + } + + return 'utf-8'; + } catch { + // If file can't be read, default to UTF-8 + return 'utf-8'; + } finally { + if (fh) { + try { + await fh.close(); + } catch { + // Ignore close errors + } + } + } +} + /** * Looks up the specific MIME type for a file path. * @param filePath Path to the file. diff --git a/packages/core/src/utils/iconvHelper.ts b/packages/core/src/utils/iconvHelper.ts new file mode 100644 index 000000000..12c1a56c8 --- /dev/null +++ b/packages/core/src/utils/iconvHelper.ts @@ -0,0 +1,65 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * Helper module to bridge iconv-lite CJS module with our ESM codebase. + * iconv-lite v0.6.x uses ambient `declare module` type declarations + * that are incompatible with NodeNext module resolution. + * This module provides properly-typed wrappers. + */ + +interface IconvLite { + decode(buffer: Buffer, encoding: string): string; + encode(content: string, encoding: string): Buffer; + encodingExists(encoding: string): boolean; +} + +// iconv-lite is a CJS module. Under NodeNext resolution, its ambient type +// declarations don't map correctly. We import the default export (which is +// the CJS module.exports object) and cast it to a proper interface. +import iconvModule from 'iconv-lite'; +const iconvLite: IconvLite = iconvModule as unknown as IconvLite; + +/** + * Decode a buffer using the specified encoding. + * @param buffer The buffer to decode + * @param encoding The encoding to use (e.g. 'gbk', 'big5', 'shift_jis') + * @returns The decoded string + */ +export function iconvDecode(buffer: Buffer, encoding: string): string { + return iconvLite.decode(buffer, encoding); +} + +/** + * Encode a string to a buffer using the specified encoding. + * @param content The string to encode + * @param encoding The encoding to use (e.g. 'gbk', 'big5', 'shift_jis') + * @returns The encoded buffer + */ +export function iconvEncode(content: string, encoding: string): Buffer { + return iconvLite.encode(content, encoding); +} + +/** + * Check if an encoding is supported by iconv-lite. + * @param encoding The encoding name to check + * @returns True if the encoding is supported + */ +export function iconvEncodingExists(encoding: string): boolean { + return iconvLite.encodingExists(encoding); +} + +/** + * Check whether an encoding name represents a UTF-8 compatible encoding + * that Node's Buffer can handle natively without iconv-lite. + * Normalizes encoding names (e.g. 'utf-8', 'UTF8', 'us-ascii' all match). + * @param encoding The encoding name to check + * @returns True if the encoding is UTF-8 or ASCII compatible + */ +export function isUtf8CompatibleEncoding(encoding: string): boolean { + const lower = encoding.toLowerCase().replace(/[^a-z0-9]/g, ''); + return lower === 'utf8' || lower === 'ascii' || lower === 'usascii'; +} diff --git a/packages/core/src/utils/ignorePatterns.test.ts b/packages/core/src/utils/ignorePatterns.test.ts index 646c4b6bb..722f72edb 100644 --- a/packages/core/src/utils/ignorePatterns.test.ts +++ b/packages/core/src/utils/ignorePatterns.test.ts @@ -14,7 +14,7 @@ import type { Config } from '../config/config.js'; // Mock the memoryTool module vi.mock('../tools/memoryTool.js', () => ({ - getCurrentGeminiMdFilename: vi.fn(() => 'GEMINI.md'), + getAllGeminiMdFilenames: vi.fn(() => ['GEMINI.md', 'AGENTS.md']), })); describe('FileExclusions', () => { @@ -56,6 +56,7 @@ describe('FileExclusions', () => { // Should include dynamic patterns expect(patterns).toContain('**/GEMINI.md'); + expect(patterns).toContain('**/AGENTS.md'); }); it('should respect includeDefaults option', () => { @@ -68,6 +69,7 @@ describe('FileExclusions', () => { expect(patterns).not.toContain('**/node_modules/**'); expect(patterns).not.toContain('**/.git/**'); expect(patterns).not.toContain('**/GEMINI.md'); + expect(patterns).not.toContain('**/AGENTS.md'); expect(patterns).toHaveLength(0); }); @@ -101,7 +103,9 @@ describe('FileExclusions', () => { }); expect(patternsWithDynamic).toContain('**/GEMINI.md'); + expect(patternsWithDynamic).toContain('**/AGENTS.md'); expect(patternsWithoutDynamic).not.toContain('**/GEMINI.md'); + expect(patternsWithoutDynamic).not.toContain('**/AGENTS.md'); }); }); @@ -114,6 +118,7 @@ describe('FileExclusions', () => { expect(patterns).toContain('**/node_modules/**'); expect(patterns).toContain('**/.git/**'); expect(patterns).toContain('**/GEMINI.md'); + expect(patterns).toContain('**/AGENTS.md'); // Should include additional excludes expect(patterns).toContain('**/*.log'); diff --git a/packages/core/src/utils/ignorePatterns.ts b/packages/core/src/utils/ignorePatterns.ts index 9f9776db5..b4a4c2e40 100644 --- a/packages/core/src/utils/ignorePatterns.ts +++ b/packages/core/src/utils/ignorePatterns.ts @@ -6,7 +6,7 @@ import path from 'node:path'; import type { Config } from '../config/config.js'; -import { getCurrentGeminiMdFilename } from '../tools/memoryTool.js'; +import { getAllGeminiMdFilenames } from '../tools/memoryTool.js'; /** * Common ignore patterns used across multiple tools for basic exclusions. @@ -119,7 +119,7 @@ export interface ExcludeOptions { runtimePatterns?: string[]; /** - * Whether to include dynamic patterns like the current Gemini MD filename. Defaults to true. + * Whether to include dynamic patterns like configured context filenames. Defaults to true. */ includeDynamicPatterns?: boolean; } @@ -158,9 +158,11 @@ export class FileExclusions { patterns.push(...DEFAULT_FILE_EXCLUDES); } - // Add dynamic patterns (like current Gemini MD filename) + // Add dynamic patterns (like context filenames) if (includeDynamicPatterns) { - patterns.push(`**/${getCurrentGeminiMdFilename()}`); + for (const filename of getAllGeminiMdFilenames()) { + patterns.push(`**/${filename}`); + } } // Add custom patterns from configuration diff --git a/packages/core/src/utils/paths.ts b/packages/core/src/utils/paths.ts index 96856a5dc..dc4434ece 100644 --- a/packages/core/src/utils/paths.ts +++ b/packages/core/src/utils/paths.ts @@ -202,6 +202,25 @@ export function getProjectHash(projectRoot: string): string { return crypto.createHash('sha256').update(normalizedPath).digest('hex'); } +/** + * Sanitizes a directory path to create a safe project ID. + * + * - On Windows: normalizes to lowercase for case-insensitive matching + * - Replaces all non-alphanumeric characters with hyphens + * + * This is used for: + * - Creating project-specific directories + * - Generating session IDs for debug logging during startup + * + * @param cwd - The directory path to sanitize + * @returns A sanitized string safe for use as a project identifier + */ +export function sanitizeCwd(cwd: string): string { + // On Windows, normalize to lowercase for case-insensitive matching + const normalizedCwd = os.platform() === 'win32' ? cwd.toLowerCase() : cwd; + return normalizedCwd.replace(/[^a-zA-Z0-9]/g, '-'); +} + /** * Checks if a path is a subpath of another path. * @param parentPath The parent path. diff --git a/packages/core/src/utils/textUtils.test.ts b/packages/core/src/utils/textUtils.test.ts index c1468c111..bdbc80216 100644 --- a/packages/core/src/utils/textUtils.test.ts +++ b/packages/core/src/utils/textUtils.test.ts @@ -5,7 +5,7 @@ */ import { describe, it, expect } from 'vitest'; -import { safeLiteralReplace } from './textUtils.js'; +import { safeLiteralReplace, normalizeContent } from './textUtils.js'; describe('safeLiteralReplace', () => { it('returns original string when oldString empty or not found', () => { @@ -77,3 +77,43 @@ describe('safeLiteralReplace', () => { expect(safeLiteralReplace('abc', 'b', '$$')).toBe('a$$c'); }); }); + +describe('normalizeContent', () => { + it('strips UTF-8 BOM from the beginning of the string', () => { + const contentWithBOM = '\uFEFFHello World'; + expect(normalizeContent(contentWithBOM)).toBe('Hello World'); + }); + + it('preserves BOM-like characters not at the beginning', () => { + const content = 'Hello\uFEFFWorld'; + expect(normalizeContent(content)).toBe('Hello\uFEFFWorld'); + }); + + it('converts CRLF to LF', () => { + const content = 'Line 1\r\nLine 2'; + expect(normalizeContent(content)).toBe('Line 1\nLine 2'); + }); + + it('converts standalone CR to LF', () => { + const content = 'Line 1\rLine 2'; + expect(normalizeContent(content)).toBe('Line 1\nLine 2'); + }); + + it('leaves existing LF unchanged', () => { + const content = 'Line 1\nLine 2'; + expect(normalizeContent(content)).toBe('Line 1\nLine 2'); + }); + + it('handles mixed line endings correctly', () => { + const content = 'Line 1\r\nLine 2\rLine 3\nLine 4'; + expect(normalizeContent(content)).toBe('Line 1\nLine 2\nLine 3\nLine 4'); + }); + + it('handles empty strings', () => { + expect(normalizeContent('')).toBe(''); + }); + + it('handles strings without newlines or BOM', () => { + expect(normalizeContent('Just a single line')).toBe('Just a single line'); + }); +}); diff --git a/packages/core/src/utils/textUtils.ts b/packages/core/src/utils/textUtils.ts index 693ab48fe..32c25b89f 100644 --- a/packages/core/src/utils/textUtils.ts +++ b/packages/core/src/utils/textUtils.ts @@ -53,3 +53,23 @@ export function isBinary( // If no NULL bytes were found in the sample, we assume it's text. return false; } + +/** + * Normalizes text content by stripping the UTF-8 BOM and converting all CRLF (\r\n) + * or standalone CR (\r) line endings to LF (\n). + * + * This is crucial for cross-platform compatibility, particularly to prevent parsing + * failures on Windows where files may be saved with CRLF line endings. + * + * @param content The raw text content to normalize + * @returns The normalized string with uniform \n line endings + */ +export function normalizeContent(content: string): string { + // Strip UTF-8 BOM to ensure string processing starts at the first real character. + let normalized = content.replace(/^\uFEFF/, ''); + + // Normalize line endings to LF (\n). + normalized = normalized.replace(/\r\n/g, '\n').replace(/\r/g, '\n'); + + return normalized; +} diff --git a/packages/test-utils/package.json b/packages/test-utils/package.json index 358128630..e5f087f3c 100644 --- a/packages/test-utils/package.json +++ b/packages/test-utils/package.json @@ -1,6 +1,6 @@ { "name": "@qwen-code/qwen-code-test-utils", - "version": "0.11.1", + "version": "0.12.0", "private": true, "main": "src/index.ts", "license": "Apache-2.0", diff --git a/packages/vscode-ide-companion/.vscodeignore b/packages/vscode-ide-companion/.vscodeignore index 18e07a04b..5d1a75d88 100644 --- a/packages/vscode-ide-companion/.vscodeignore +++ b/packages/vscode-ide-companion/.vscodeignore @@ -6,3 +6,5 @@ !LICENSE !NOTICES.txt !assets/ +!schemas/ +!schemas/** diff --git a/packages/vscode-ide-companion/package.json b/packages/vscode-ide-companion/package.json index 28da4cf4f..f83d3cd86 100644 --- a/packages/vscode-ide-companion/package.json +++ b/packages/vscode-ide-companion/package.json @@ -2,7 +2,7 @@ "name": "qwen-code-vscode-ide-companion", "displayName": "Qwen Code Companion", "description": "Enable Qwen Code with direct access to your VS Code workspace.", - "version": "0.11.1", + "version": "0.12.0", "publisher": "qwenlm", "icon": "assets/icon.png", "repository": { @@ -31,6 +31,12 @@ "onStartupFinished" ], "contributes": { + "jsonValidation": [ + { + "fileMatch": "**/.qwen/settings.json", + "url": "./schemas/settings.schema.json" + } + ], "languages": [ { "id": "qwen-diff-editable" diff --git a/packages/vscode-ide-companion/schemas/settings.schema.json b/packages/vscode-ide-companion/schemas/settings.schema.json new file mode 100644 index 000000000..8b5fca2b0 --- /dev/null +++ b/packages/vscode-ide-companion/schemas/settings.schema.json @@ -0,0 +1,599 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "description": "Qwen Code settings configuration", + "properties": { + "mcpServers": { + "description": "Configuration for MCP servers.", + "type": "object", + "additionalProperties": true + }, + "modelProviders": { + "description": "Model providers configuration grouped by authType. Each authType contains an array of model configurations.", + "type": "object", + "additionalProperties": true + }, + "codingPlan": { + "description": "Coding Plan template version tracking and configuration.", + "type": "object", + "properties": { + "version": { + "description": "SHA256 hash of the Coding Plan template. Used to detect template updates.", + "type": "string" + } + } + }, + "env": { + "description": "Environment variables to set as fallback defaults. These are loaded with the lowest priority: system environment variables > .env files > settings.env.", + "type": "object", + "additionalProperties": true + }, + "general": { + "description": "General application settings.", + "type": "object", + "properties": { + "preferredEditor": { + "description": "The preferred editor to open files in.", + "type": "string" + }, + "vimMode": { + "description": "Enable Vim keybindings", + "type": "boolean", + "default": false + }, + "enableAutoUpdate": { + "description": "Enable automatic update checks and installations on startup.", + "type": "boolean", + "default": true + }, + "gitCoAuthor": { + "description": "Automatically add a Co-authored-by trailer to git commit messages when commits are made through Qwen Code.", + "type": "boolean", + "default": true + }, + "checkpointing": { + "description": "Session checkpointing settings.", + "type": "object", + "properties": { + "enabled": { + "description": "Enable session checkpointing for recovery", + "type": "boolean", + "default": false + } + } + }, + "debugKeystrokeLogging": { + "description": "Enable debug logging of keystrokes to the console.", + "type": "boolean", + "default": false + }, + "language": { + "description": "The language for the user interface. Use \"auto\" to detect from system settings. You can also use custom language codes (e.g., \"es\", \"fr\") by placing JS language files in ~/.qwen/locales/ (e.g., ~/.qwen/locales/es.js). Options: auto, en, zh, ru, de, ja, pt", + "enum": [ + "auto", + "en", + "zh", + "ru", + "de", + "ja", + "pt" + ], + "default": "auto" + }, + "outputLanguage": { + "description": "The language for LLM output. Use \"auto\" to detect from system settings, or set a specific language.", + "type": "string", + "default": "auto" + }, + "terminalBell": { + "description": "Play terminal bell sound when response completes or needs approval.", + "type": "boolean", + "default": true + }, + "chatRecording": { + "description": "Enable saving chat history to disk. Disabling this will also prevent --continue and --resume from working.", + "type": "boolean", + "default": true + }, + "defaultFileEncoding": { + "description": "Default encoding for new files. Use \"utf-8\" (default) for UTF-8 without BOM, or \"utf-8-bom\" for UTF-8 with BOM. Only change this if your project specifically requires BOM. Options: utf-8, utf-8-bom", + "enum": [ + "utf-8", + "utf-8-bom" + ], + "default": "utf-8" + } + } + }, + "output": { + "description": "Settings for the CLI output.", + "type": "object", + "properties": { + "format": { + "description": "The format of the CLI output. Options: text, json", + "enum": [ + "text", + "json" + ], + "default": "text" + } + } + }, + "ui": { + "description": "User interface settings.", + "type": "object", + "properties": { + "theme": { + "description": "The color theme for the UI.", + "type": "string", + "default": "Qwen Dark" + }, + "customThemes": { + "description": "Custom theme definitions.", + "type": "object", + "additionalProperties": true + }, + "hideWindowTitle": { + "description": "Hide the window title bar", + "type": "boolean", + "default": false + }, + "showStatusInTitle": { + "description": "Show Qwen Code status and thoughts in the terminal window title", + "type": "boolean", + "default": false + }, + "hideTips": { + "description": "Hide helpful tips in the UI", + "type": "boolean", + "default": false + }, + "showLineNumbers": { + "description": "Show line numbers in the code output.", + "type": "boolean", + "default": false + }, + "showCitations": { + "description": "Show citations for generated text in the chat.", + "type": "boolean", + "default": false + }, + "customWittyPhrases": { + "description": "Custom witty phrases to display during loading.", + "type": "array", + "items": { + "type": "string" + } + }, + "enableWelcomeBack": { + "description": "Show welcome back dialog when returning to a project with conversation history.", + "type": "boolean", + "default": true + }, + "enableUserFeedback": { + "description": "Show optional feedback dialog after conversations to help improve Qwen performance.", + "type": "boolean", + "default": true + }, + "accessibility": { + "description": "Accessibility settings.", + "type": "object", + "properties": { + "enableLoadingPhrases": { + "description": "Enable loading phrases (disable for accessibility)", + "type": "boolean", + "default": true + }, + "screenReader": { + "description": "Render output in plain-text to be more screen reader accessible", + "type": "boolean" + } + } + }, + "feedbackLastShownTimestamp": { + "description": "The last time the feedback dialog was shown.", + "type": "number", + "default": 0 + } + } + }, + "ide": { + "description": "IDE integration settings.", + "type": "object", + "properties": { + "enabled": { + "description": "Enable IDE integration mode", + "type": "boolean", + "default": false + }, + "hasSeenNudge": { + "description": "Whether the user has seen the IDE integration nudge.", + "type": "boolean", + "default": false + } + } + }, + "privacy": { + "description": "Privacy-related settings.", + "type": "object", + "properties": { + "usageStatisticsEnabled": { + "description": "Enable collection of usage statistics", + "type": "boolean", + "default": true + } + } + }, + "telemetry": { + "description": "Telemetry configuration.", + "type": "object", + "additionalProperties": true + }, + "model": { + "description": "Settings related to the generative model.", + "type": "object", + "properties": { + "name": { + "description": "The model to use for conversations.", + "type": "string" + }, + "maxSessionTurns": { + "description": "Maximum number of user/model/tool turns to keep in a session. -1 means unlimited.", + "type": "number", + "default": -1 + }, + "summarizeToolOutput": { + "description": "Settings for summarizing tool output.", + "type": "object", + "additionalProperties": true + }, + "chatCompression": { + "description": "Chat compression settings.", + "type": "object", + "additionalProperties": true + }, + "sessionTokenLimit": { + "description": "The maximum number of tokens allowed in a session.", + "type": "number" + }, + "skipNextSpeakerCheck": { + "description": "Skip the next speaker check.", + "type": "boolean", + "default": true + }, + "skipLoopDetection": { + "description": "Disable all loop detection checks (streaming and LLM).", + "type": "boolean", + "default": false + }, + "skipStartupContext": { + "description": "Avoid sending the workspace startup context at the beginning of each session.", + "type": "boolean", + "default": false + }, + "enableOpenAILogging": { + "description": "Enable OpenAI logging.", + "type": "boolean", + "default": false + }, + "openAILoggingDir": { + "description": "Custom directory path for OpenAI API logs. If not specified, defaults to logs/openai in the current working directory.", + "type": "string" + }, + "generationConfig": { + "description": "Generation configuration settings.", + "type": "object", + "properties": { + "timeout": { + "description": "Request timeout in milliseconds.", + "type": "number" + }, + "maxRetries": { + "description": "Maximum number of retries for failed requests.", + "type": "number" + }, + "enableCacheControl": { + "description": "Enable cache control for DashScope providers.", + "type": "boolean", + "default": true + }, + "schemaCompliance": { + "description": "The compliance mode for tool schemas sent to the model. Use \"openapi_30\" for strict OpenAPI 3.0 compatibility (e.g., for Gemini). Options: auto, openapi_30", + "enum": [ + "auto", + "openapi_30" + ], + "default": "auto" + }, + "contextWindowSize": { + "description": "Overrides the default context window size for the selected model. Use this setting when a provider's effective context limit differs from Qwen Code's default. This value defines the model's assumed maximum context capacity, not a per-request token limit.", + "type": "number" + } + } + } + } + }, + "context": { + "description": "Settings for managing context provided to the model.", + "type": "object", + "properties": { + "fileName": { + "description": "The name of the context file.", + "type": "object", + "additionalProperties": true + }, + "importFormat": { + "description": "The format to use when importing memory.", + "type": "string" + }, + "includeDirectories": { + "description": "Additional directories to include in the workspace context. Missing directories will be skipped with a warning.", + "type": "array", + "items": { + "type": "string" + } + }, + "loadFromIncludeDirectories": { + "description": "Whether to load memory files from include directories.", + "type": "boolean", + "default": false + }, + "fileFiltering": { + "description": "Settings for git-aware file filtering.", + "type": "object", + "properties": { + "respectGitIgnore": { + "description": "Respect .gitignore files when searching", + "type": "boolean", + "default": true + }, + "respectQwenIgnore": { + "description": "Respect .qwenignore files when searching", + "type": "boolean", + "default": true + }, + "enableRecursiveFileSearch": { + "description": "Enable recursive file search functionality", + "type": "boolean", + "default": true + }, + "enableFuzzySearch": { + "description": "Enable fuzzy search when searching for files.", + "type": "boolean", + "default": true + } + } + } + } + }, + "tools": { + "description": "Settings for built-in and custom tools.", + "type": "object", + "properties": { + "sandbox": { + "description": "Sandbox execution environment (can be a boolean or a path string).", + "type": "object", + "additionalProperties": true + }, + "shell": { + "description": "Settings for shell execution.", + "type": "object", + "properties": { + "enableInteractiveShell": { + "description": "Use node-pty for an interactive shell experience. Fallback to child_process still applies.", + "type": "boolean", + "default": false + }, + "pager": { + "description": "The pager command to use for shell output. Defaults to `cat`.", + "type": "string", + "default": "cat" + }, + "showColor": { + "description": "Show color in shell output.", + "type": "boolean", + "default": false + } + } + }, + "core": { + "description": "Paths to core tool definitions.", + "type": "array", + "items": { + "type": "string" + } + }, + "allowed": { + "description": "A list of tool names that will bypass the confirmation dialog.", + "type": "array", + "items": { + "type": "string" + } + }, + "exclude": { + "description": "Tool names to exclude from discovery.", + "type": "array", + "items": { + "type": "string" + } + }, + "approvalMode": { + "description": "Approval mode for tool usage. Controls how tools are approved before execution. Options: plan, default, auto-edit, yolo", + "enum": [ + "plan", + "default", + "auto-edit", + "yolo" + ], + "default": "default" + }, + "autoAccept": { + "description": "Automatically accept and execute tool calls that are considered safe (e.g., read-only operations) without explicit user confirmation.", + "type": "boolean", + "default": false + }, + "discoveryCommand": { + "description": "Command to run for tool discovery.", + "type": "string" + }, + "callCommand": { + "description": "Command to run for tool calls.", + "type": "string" + }, + "useRipgrep": { + "description": "Use ripgrep for file content search instead of the fallback implementation. Provides faster search performance.", + "type": "boolean", + "default": true + }, + "useBuiltinRipgrep": { + "description": "Use the bundled ripgrep binary. When set to false, the system-level \"rg\" command will be used instead. This setting is only effective when useRipgrep is true.", + "type": "boolean", + "default": true + }, + "enableToolOutputTruncation": { + "description": "Enable truncation of large tool outputs.", + "type": "boolean", + "default": true + }, + "truncateToolOutputThreshold": { + "description": "Truncate tool output if it is larger than this many characters. Set to -1 to disable.", + "type": "number", + "default": 25000 + }, + "truncateToolOutputLines": { + "description": "The number of lines to keep when truncating tool output.", + "type": "number", + "default": 1000 + } + } + }, + "mcp": { + "description": "Settings for Model Context Protocol (MCP) servers.", + "type": "object", + "properties": { + "serverCommand": { + "description": "Command to start an MCP server.", + "type": "string" + }, + "allowed": { + "description": "A list of MCP servers to allow.", + "type": "array", + "items": { + "type": "string" + } + }, + "excluded": { + "description": "A list of MCP servers to exclude.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "security": { + "description": "Security-related settings.", + "type": "object", + "properties": { + "folderTrust": { + "description": "Settings for folder trust.", + "type": "object", + "properties": { + "enabled": { + "description": "Setting to track whether Folder trust is enabled.", + "type": "boolean", + "default": false + } + } + }, + "auth": { + "description": "Authentication settings.", + "type": "object", + "properties": { + "selectedType": { + "description": "The currently selected authentication type.", + "type": "string" + }, + "enforcedType": { + "description": "The required auth type. If this does not match the selected auth type, the user will be prompted to re-authenticate.", + "type": "string" + }, + "useExternal": { + "description": "Whether to use an external authentication flow.", + "type": "boolean" + }, + "apiKey": { + "description": "API key for OpenAI compatible authentication.", + "type": "string" + }, + "baseUrl": { + "description": "Base URL for OpenAI compatible API.", + "type": "string" + } + } + } + } + }, + "advanced": { + "description": "Advanced settings for power users.", + "type": "object", + "properties": { + "autoConfigureMemory": { + "description": "Automatically configure Node.js memory limits", + "type": "boolean", + "default": false + }, + "dnsResolutionOrder": { + "description": "The DNS resolution order.", + "type": "string" + }, + "excludedEnvVars": { + "description": "Environment variables to exclude from project context.", + "type": "array", + "items": { + "type": "string" + }, + "default": [ + "DEBUG", + "DEBUG_MODE" + ] + }, + "bugCommand": { + "description": "Configuration for the bug report command.", + "type": "object", + "additionalProperties": true + }, + "tavilyApiKey": { + "description": "⚠️ DEPRECATED: Please use webSearch.provider configuration instead. Legacy API key for the Tavily API.", + "type": "string" + } + } + }, + "webSearch": { + "description": "Configuration for web search providers.", + "type": "object", + "additionalProperties": true + }, + "experimental": { + "description": "Setting to enable experimental features", + "type": "object", + "properties": { + "visionModelPreview": { + "description": "Enable vision model support and auto-switching functionality. When disabled, vision models like qwen-vl-max-latest will be hidden and auto-switching will not occur.", + "type": "boolean", + "default": true + }, + "vlmSwitchMode": { + "description": "Default behavior when images are detected in input. Values: once (one-time switch), session (switch for entire session), persist (continue with current model). If not set, user will be prompted each time. This is a temporary experimental feature.", + "type": "string" + } + } + }, + "$version": { + "type": "number", + "description": "Settings schema version for migration tracking.", + "default": 3 + } + }, + "additionalProperties": true +} diff --git a/packages/web-templates/package.json b/packages/web-templates/package.json index a1b11d81c..740b966b8 100644 --- a/packages/web-templates/package.json +++ b/packages/web-templates/package.json @@ -1,6 +1,6 @@ { "name": "@qwen-code/web-templates", - "version": "0.11.1", + "version": "0.12.0", "description": "Web templates bundled as embeddable JS/CSS strings", "repository": { "type": "git", diff --git a/packages/web-templates/src/export-html/src/components/TempFileModal.css b/packages/web-templates/src/export-html/src/components/TempFileModal.css new file mode 100644 index 000000000..ba317104e --- /dev/null +++ b/packages/web-templates/src/export-html/src/components/TempFileModal.css @@ -0,0 +1,68 @@ +/* Temp file modal */ +.modal-overlay { + position: fixed; + inset: 0; + z-index: 1000; + background-color: rgba(0, 0, 0, 0.6); + display: flex; + align-items: center; + justify-content: center; + padding: 24px; +} + +.modal-container { + background-color: var(--bg-secondary); + border: 1px solid var(--border-color); + border-radius: 12px; + width: 100%; + max-width: 800px; + max-height: 80vh; + display: flex; + flex-direction: column; + box-shadow: 0 25px 50px -12px rgba(0, 0, 0, 0.5); +} + +.modal-header { + display: flex; + align-items: center; + justify-content: space-between; + padding: 12px 16px; + border-bottom: 1px solid var(--border-color); + flex-shrink: 0; +} + +.modal-title { + font-size: 13px; + color: var(--text-secondary); + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.modal-close { + background: none; + border: none; + color: var(--text-secondary); + cursor: pointer; + font-size: 16px; + padding: 4px 8px; + border-radius: 6px; + line-height: 1; + transition: background-color 0.15s, color 0.15s; +} + +.modal-close:hover { + background-color: var(--border-color); + color: var(--text-primary); +} + +.modal-content { + margin: 0; + padding: 16px; + overflow: auto; + font-size: 13px; + line-height: 1.6; + color: var(--text-primary); + white-space: pre-wrap; + word-break: break-word; +} diff --git a/packages/web-templates/src/export-html/src/components/TempFileModal.tsx b/packages/web-templates/src/export-html/src/components/TempFileModal.tsx new file mode 100644 index 000000000..31c0bd31d --- /dev/null +++ b/packages/web-templates/src/export-html/src/components/TempFileModal.tsx @@ -0,0 +1,65 @@ +import './TempFileModal.css'; + +const React = window.React; + +export type ModalState = { + visible: boolean; + content: string; + fileName: string; +}; + +export const TempFileModal = ({ + state, + onClose, +}: { + state: ModalState; + onClose: () => void; +}) => { + // Lock body scroll when modal is visible + React.useEffect(() => { + if (state.visible) { + const originalOverflow = document.body.style.overflow; + document.body.style.overflow = 'hidden'; + return () => { + document.body.style.overflow = originalOverflow; + }; + } + }, [state.visible]); + + if (!state.visible) return null; + + return ( +
+
e.stopPropagation()}> +
+ {state.fileName} + +
+
{state.content}
+
+
+ ); +}; + +export const useModalState = () => { + const [modalState, setModalState] = React.useState({ + visible: false, + content: '', + fileName: '', + }); + + const openModal = React.useCallback( + (content: string, fileName: string = 'temp') => { + setModalState({ visible: true, content, fileName }); + }, + [], + ); + + const closeModal = React.useCallback(() => { + setModalState((prev) => ({ ...prev, visible: false })); + }, []); + + return { modalState, openModal, closeModal }; +}; diff --git a/packages/web-templates/src/export-html/src/main.tsx b/packages/web-templates/src/export-html/src/main.tsx index 525b7a006..a0d7468ba 100644 --- a/packages/web-templates/src/export-html/src/main.tsx +++ b/packages/web-templates/src/export-html/src/main.tsx @@ -1,5 +1,6 @@ import './styles.css'; import logoSvg from './favicon.svg'; +import { TempFileModal, useModalState } from './components/TempFileModal'; declare global { interface Window { @@ -35,9 +36,11 @@ type PlatformContextValue = { postMessage: (message: unknown) => void; onMessage: (handler: (event: MessageEvent) => void) => () => void; openFile: (path: string) => void; + openTempFile?: (content: string, fileName?: string) => void; getResourceUrl: () => string | undefined; features: { canOpenFile: boolean; + canOpenTempFile?: boolean; canCopy: boolean; }; }; @@ -56,24 +59,38 @@ const logoSvgWithGradient = (() => { return withDefs.replace(/fill="[^"]*"/, 'fill="url(#qwen-logo-gradient)"'); })(); -const platformContext = { - platform: 'web' as PlatformContextValue['platform'], - postMessage: (message: unknown) => { - console.log('Posted message:', message); - }, - onMessage: (handler: (event: MessageEvent) => void) => { - window.addEventListener('message', handler); - return () => window.removeEventListener('message', handler); - }, - openFile: (path: string) => { - console.log('Opening file:', path); - }, - getResourceUrl: () => undefined, - features: { - canOpenFile: false, - canCopy: true, - }, -} satisfies PlatformContextValue; +const React = window.React; + +const usePlatformContext = () => { + const { modalState, openModal, closeModal } = useModalState(); + + const platformContext = React.useMemo( + () => + ({ + platform: 'web' as PlatformContextValue['platform'], + postMessage: (message: unknown) => { + console.log('Posted message:', message); + }, + onMessage: (handler: (event: MessageEvent) => void) => { + window.addEventListener('message', handler); + return () => window.removeEventListener('message', handler); + }, + openFile: (path: string) => { + console.log('Opening file:', path); + }, + openTempFile: openModal, + getResourceUrl: () => undefined, + features: { + canOpenFile: false, + canOpenTempFile: true, + canCopy: true, + }, + }) satisfies PlatformContextValue, + [openModal], + ); + + return { platformContext, modalState, closeModal }; +}; const isChatViewerMessage = (value: unknown): value is ChatViewerMessage => Boolean(value) && typeof value === 'object'; @@ -123,6 +140,7 @@ const App = () => { .filter((record) => record.type !== 'system'); const sessionId = chatData.sessionId ?? '-'; const sessionDate = formatSessionDate(chatData.startTime); + const { platformContext, modalState, closeModal } = usePlatformContext(); return (
@@ -155,6 +173,7 @@ const App = () => {
+ ); }; diff --git a/packages/webui/package.json b/packages/webui/package.json index 339c85322..f2d26978b 100644 --- a/packages/webui/package.json +++ b/packages/webui/package.json @@ -1,6 +1,6 @@ { "name": "@qwen-code/webui", - "version": "0.11.1", + "version": "0.12.0", "description": "Shared UI components for Qwen Code packages", "type": "module", "main": "./dist/index.cjs", diff --git a/scripts/build.js b/scripts/build.js index 68da1c6e8..0ce010b3b 100644 --- a/scripts/build.js +++ b/scripts/build.js @@ -56,6 +56,15 @@ for (const workspace of buildOrder) { stdio: 'inherit', cwd: root, }); + + // After cli is built, generate the JSON Schema for settings + // so the vscode-ide-companion extension can provide IntelliSense + if (workspace === 'packages/cli') { + execSync('npx tsx scripts/generate-settings-schema.ts', { + stdio: 'inherit', + cwd: root, + }); + } } // also build container image if sandboxing is enabled diff --git a/scripts/generate-settings-schema.ts b/scripts/generate-settings-schema.ts new file mode 100644 index 000000000..9d13e8166 --- /dev/null +++ b/scripts/generate-settings-schema.ts @@ -0,0 +1,146 @@ +/** + * @license + * Copyright 2025 Qwen team + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * Generates a JSON Schema from the internal SETTINGS_SCHEMA definition. + * + * Usage: npx tsx scripts/generate-settings-schema.ts + * + * This reads the TypeScript settings schema and converts it to a standard + * JSON Schema file that VS Code uses for IntelliSense in settings.json files. + * + * Prerequisites: npm run build (core package must be built first) + */ + +import * as fs from 'node:fs'; +import * as path from 'node:path'; +import { fileURLToPath } from 'node:url'; + +import type { + SettingDefinition, + SettingsSchema, +} from '../packages/cli/src/config/settingsSchema.js'; +import { getSettingsSchema } from '../packages/cli/src/config/settingsSchema.js'; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); + +interface JsonSchemaProperty { + $schema?: string; + type?: string | string[]; + description?: string; + properties?: Record; + items?: JsonSchemaProperty; + enum?: (string | number)[]; + default?: unknown; + additionalProperties?: boolean | JsonSchemaProperty; +} + +function convertSettingToJsonSchema( + setting: SettingDefinition, +): JsonSchemaProperty { + const schema: JsonSchemaProperty = {}; + + if (setting.description) { + schema.description = setting.description; + } + + switch (setting.type) { + case 'boolean': + schema.type = 'boolean'; + break; + case 'string': + schema.type = 'string'; + break; + case 'number': + schema.type = 'number'; + break; + case 'array': + schema.type = 'array'; + schema.items = { type: 'string' }; + break; + case 'enum': + if (setting.options && setting.options.length > 0) { + schema.enum = setting.options.map((o) => o.value); + schema.description += + ' Options: ' + setting.options.map((o) => `${o.value}`).join(', '); + } else { + // Enum without predefined options - accept any string + schema.type = 'string'; + } + break; + case 'object': + schema.type = 'object'; + if (setting.properties) { + schema.properties = {}; + for (const [key, childDef] of Object.entries(setting.properties)) { + schema.properties[key] = convertSettingToJsonSchema( + childDef as SettingDefinition, + ); + } + } else { + schema.additionalProperties = true; + } + break; + } + + // Add default value for simple types only + if (setting.default !== undefined && setting.default !== null) { + const defaultVal = setting.default; + if ( + typeof defaultVal === 'boolean' || + typeof defaultVal === 'number' || + typeof defaultVal === 'string' + ) { + schema.default = defaultVal; + } else if (Array.isArray(defaultVal) && defaultVal.length > 0) { + schema.default = defaultVal; + } + } + + return schema; +} + +function generateJsonSchema( + settingsSchema: SettingsSchema, +): JsonSchemaProperty { + const jsonSchema: JsonSchemaProperty = { + $schema: 'http://json-schema.org/draft-07/schema#', + type: 'object', + description: 'Qwen Code settings configuration', + properties: {}, + additionalProperties: true, + }; + + for (const [key, setting] of Object.entries(settingsSchema)) { + jsonSchema.properties![key] = convertSettingToJsonSchema( + setting as SettingDefinition, + ); + } + + // Add $version property + jsonSchema.properties!['$version'] = { + type: 'number', + description: 'Settings schema version for migration tracking.', + default: 3, + }; + + return jsonSchema; +} + +const schema = getSettingsSchema(); +const jsonSchema = generateJsonSchema(schema as unknown as SettingsSchema); + +const outputDir = path.resolve( + __dirname, + '../packages/vscode-ide-companion/schemas', +); +const outputPath = path.join(outputDir, 'settings.schema.json'); + +fs.mkdirSync(outputDir, { recursive: true }); +fs.writeFileSync(outputPath, JSON.stringify(jsonSchema, null, 2) + '\n'); + +console.log(`Generated settings JSON Schema at: ${outputPath}`); diff --git a/scripts/installation/install-qwen-with-source.bat b/scripts/installation/install-qwen-with-source.bat index 5a919134c..fcc9d9ac3 100644 --- a/scripts/installation/install-qwen-with-source.bat +++ b/scripts/installation/install-qwen-with-source.bat @@ -2,8 +2,8 @@ REM Script to install Node.js and Qwen Code with source information REM This script handles the installation process and sets the installation source REM -REM Usage: install-qwen-with-source.bat --source [github|npm|internal|local-build] -REM install-qwen-with-source.bat -s [github|npm|internal|local-build] +REM Usage: install-qwen-with-source.bat --source +REM install-qwen-with-source.bat -s REM setlocal enabledelayedexpansion @@ -14,21 +14,21 @@ REM Parse command line arguments :parse_args if "%~1"=="" goto end_parse if /i "%~1"=="--source" ( - set "SOURCE=%~2" - shift - shift - goto parse_args + if not "%~2"=="" ( + set "SOURCE=%~2" + shift + shift + goto parse_args + ) ) if /i "%~1"=="-s" ( - set "SOURCE=%~2" - shift - shift - goto parse_args + if not "%~2"=="" ( + set "SOURCE=%~2" + shift + shift + goto parse_args + ) ) -if /i "%~1"=="github" set "SOURCE=github" -if /i "%~1"=="npm" set "SOURCE=npm" -if /i "%~1"=="internal" set "SOURCE=internal" -if /i "%~1"=="local-build" set "SOURCE=local-build" shift goto parse_args @@ -100,8 +100,8 @@ if exist "!NODEJS_PATH!\npm.cmd" ( REM Install Qwen Code with source information echo INFO: Installing Qwen Code with source: %SOURCE% -echo INFO: Running: %NPM_CMD% install -g @qwen-code/qwen-code -call "%NPM_CMD%" install -g @qwen-code/qwen-code +echo INFO: Running: %NPM_CMD% install -g @qwen-code/qwen-code@latest --registry https://registry.npmmirror.com +call "%NPM_CMD%" install -g @qwen-code/qwen-code@latest --registry https://registry.npmmirror.com if %ERRORLEVEL% EQU 0 ( echo SUCCESS: Qwen Code installed successfully! @@ -110,21 +110,25 @@ if %ERRORLEVEL% EQU 0 ( exit /b 1 ) -REM After installation, create source.json in the .qwen directory -echo INFO: Creating source.json in %USERPROFILE%\.qwen... +REM Create source.json only if --source or -s was explicitly provided +if not "!SOURCE!"=="unknown" ( + echo INFO: Creating source.json in %USERPROFILE%\.qwen... -set "QWEN_DIR=%USERPROFILE%\.qwen" -if not exist "%QWEN_DIR%" ( - mkdir "%QWEN_DIR%" + set "QWEN_DIR=%USERPROFILE%\.qwen" + if not exist "!QWEN_DIR!" ( + mkdir "!QWEN_DIR!" + ) + + REM Create the source.json file with the installation source + ( + echo { + echo "source": "!SOURCE!" + echo } + ) > "!QWEN_DIR!\source.json" + + echo SUCCESS: Installation source saved to %USERPROFILE%\.qwen\source.json ) -REM Create the source.json file with the installation source -echo { > "%QWEN_DIR%\source.json" -echo "source": "%SOURCE%" >> "%QWEN_DIR%\source.json" -echo } >> "%QWEN_DIR%\source.json" - -echo SUCCESS: Installation source saved to %USERPROFILE%\.qwen\source.json - REM Verify installation call :CheckCommandExists qwen if %ERRORLEVEL% EQU 0 ( @@ -138,6 +142,7 @@ echo. echo =========================================== echo SUCCESS: Installation completed! echo The source information is stored in %USERPROFILE%\.qwen\source.json +echo Tips: Please restart your terminal and run: qwen echo. echo =========================================== diff --git a/scripts/installation/install-qwen-with-source.sh b/scripts/installation/install-qwen-with-source.sh index 0991ec485..6f67e469b 100755 --- a/scripts/installation/install-qwen-with-source.sh +++ b/scripts/installation/install-qwen-with-source.sh @@ -84,7 +84,9 @@ get_shell_profile() { echo "${HOME}/.zshrc" ;; fish) - echo "${HOME}/.config/fish/config.fish" + # Fish uses its own syntax; bash/zsh export statements are not compatible. + # Return empty string to signal callers to skip automatic profile writes. + echo "" ;; *) echo "${HOME}/.profile" @@ -163,9 +165,16 @@ ensure_download_tool() { clean_npmrc_conflict() { local npmrc="${HOME}/.npmrc" if [[ -f "${npmrc}" ]]; then - log_info "Cleaning npmrc conflicts..." - grep -Ev '^(prefix|globalconfig) *= *' "${npmrc}" > "${npmrc}.tmp" || true - mv -f "${npmrc}.tmp" "${npmrc}" || true + # Only clean if conflicting entries actually exist + if grep -Eq '^(prefix|globalconfig) *= *' "${npmrc}" 2>/dev/null; then + log_info "Cleaning npmrc conflicts..." + # Backup original npmrc before modifying + cp -f "${npmrc}" "${npmrc}.bak" + log_info "Backed up original .npmrc to ${npmrc}.bak" + grep -Ev '^(prefix|globalconfig) *= *' "${npmrc}.bak" > "${npmrc}.tmp" || true + mv -f "${npmrc}.tmp" "${npmrc}" || true + log_success "Removed conflicting prefix/globalconfig entries from .npmrc" + fi fi } @@ -204,8 +213,13 @@ install_nvm() { local PROFILE_FILE PROFILE_FILE=$(get_shell_profile) + # Fish shell returns empty string from get_shell_profile because export/source + # syntax is incompatible with fish. Skip automatic profile writes for fish users. + if [[ -z "${PROFILE_FILE}" ]]; then + log_warning "Fish shell detected: automatic shell profile configuration is not supported." + log_info "Please add NVM configuration manually. See: https://github.com/nvm-sh/nvm#fish" # Check if profile file is writable - if [[ -f "${PROFILE_FILE}" ]] && [[ ! -w "${PROFILE_FILE}" ]]; then + elif [[ -f "${PROFILE_FILE}" ]] && [[ ! -w "${PROFILE_FILE}" ]]; then log_warning "Cannot write to ${PROFILE_FILE} (permission denied)" log_info "Skipping shell profile configuration" log_info "You may need to manually add NVM configuration to your shell profile" @@ -284,7 +298,13 @@ check_node_version() { local current_version current_version=$(node -v | sed 's/v//') local major_version - major_version=$(echo "${current_version}" | cut -d. -f1) + major_version=$(echo "${current_version}" | cut -d. -f1 | sed 's/[^0-9]//g') + + # Handle cases where major_version is empty or non-numeric + if [[ -z "${major_version}" ]]; then + log_warning "Unable to determine Node.js version from: $(node -v)" + return 1 + fi if [[ "${major_version}" -ge 20 ]]; then log_success "Node.js v${current_version} is already installed (>= 20)" @@ -356,55 +376,51 @@ fix_npm_permissions() { local NPM_GLOBAL_DIR NPM_GLOBAL_DIR=$(npm config get prefix 2>/dev/null) || true + + # Determine whether we need to fall back to ~/.npm-global: + # 1. prefix is empty or contains an error string + # 2. prefix is a system directory (would break sudo setuid binaries) + # 3. prefix directory is not writable + local use_user_dir=false + if [[ -z "${NPM_GLOBAL_DIR}" ]] || [[ "${NPM_GLOBAL_DIR}" == *"error"* ]]; then + log_info "npm prefix is unset or invalid, switching to user directory" + use_user_dir=true + else + # SAFETY CHECK: Never use system directories + case "${NPM_GLOBAL_DIR}" in + /|/usr|/usr/local|/bin|/sbin|/lib|/lib64|/opt|/snap|/var|/etc) + log_warning "npm prefix is a system directory (${NPM_GLOBAL_DIR}), switching to user directory to avoid breaking system binaries." + use_user_dir=true + ;; + esac + fi + + if [[ "${use_user_dir}" == false ]] && [[ ! -w "${NPM_GLOBAL_DIR}" ]]; then + log_warning "npm global directory is not writable: ${NPM_GLOBAL_DIR}, switching to user directory." + use_user_dir=true + fi + + if [[ "${use_user_dir}" == true ]]; then NPM_GLOBAL_DIR="${HOME}/.npm-global" + # Create the directory before setting prefix so npm config set succeeds + mkdir -p "${NPM_GLOBAL_DIR}" npm config set prefix "${NPM_GLOBAL_DIR}" - log_info "Set npm prefix to user directory: ${NPM_GLOBAL_DIR}" - return 0 - fi + log_success "npm prefix set to: ${NPM_GLOBAL_DIR}" - # SAFETY CHECK: Never modify system directories - # This prevents catastrophic failures like breaking sudo setuid binaries - case "${NPM_GLOBAL_DIR}" in - /|/usr|/usr/local|/bin|/sbin|/lib|/lib64|/opt|/snap|/var|/etc) - log_warning "npm prefix is a system directory (${NPM_GLOBAL_DIR})." - log_info "Using user directory instead to avoid breaking system binaries." - NPM_GLOBAL_DIR="${HOME}/.npm-global" - npm config set prefix "${NPM_GLOBAL_DIR}" - log_success "npm prefix set to: ${NPM_GLOBAL_DIR}" - return 0 - ;; - *) - # Safe to proceed with non-system directory - ;; - esac - - # Check if npm global directory is writable - if [[ -w "${NPM_GLOBAL_DIR}" ]]; then - log_info "npm global directory is writable" - return 0 - fi - - # If not writable, use user directory - log_warning "npm global directory is not writable: ${NPM_GLOBAL_DIR}" - log_info "Setting npm prefix to user directory..." - - NPM_GLOBAL_DIR="${HOME}/.npm-global" - mkdir -p "${NPM_GLOBAL_DIR}" - npm config set prefix "${NPM_GLOBAL_DIR}" - - log_success "npm prefix set to: ${NPM_GLOBAL_DIR}" - - # Add to PATH in shell profile - local PROFILE_FILE - PROFILE_FILE=$(get_shell_profile) - if ! grep -q '.npm-global/bin' "${PROFILE_FILE}" 2>/dev/null; then - { - echo "" - echo "# NPM global bin (added by Qwen Code installer)" - echo "export PATH=\"\$HOME/.npm-global/bin:\$PATH\"" - } >> "${PROFILE_FILE}" - log_info "Added npm global bin to PATH in ${PROFILE_FILE}" + # Only add ~/.npm-global/bin to PATH when we actually use it + local PROFILE_FILE + PROFILE_FILE=$(get_shell_profile) + if [[ -n "${PROFILE_FILE}" ]] && ! grep -q '.npm-global/bin' "${PROFILE_FILE}" 2>/dev/null; then + { + echo "" + echo "# NPM global bin (added by Qwen Code installer)" + echo "export PATH=\"\$HOME/.npm-global/bin:\$PATH\"" + } >> "${PROFILE_FILE}" 2>/dev/null || log_warning "Failed to write PATH update to ${PROFILE_FILE}" + log_info "Added npm global bin to PATH in ${PROFILE_FILE}" + fi + else + log_info "npm global directory is writable: ${NPM_GLOBAL_DIR}" fi return 0 @@ -421,14 +437,14 @@ install_qwen_code() { # Add npm global bin to PATH local NPM_GLOBAL_BIN - NPM_GLOBAL_BIN=$(npm bin -g 2>/dev/null) || true + NPM_GLOBAL_BIN=$(npm config get prefix 2>/dev/null)/bin if [[ -n "${NPM_GLOBAL_BIN}" ]]; then export PATH="${NPM_GLOBAL_BIN}:${PATH}" fi if command_exists qwen; then local QWEN_VERSION - QWEN_VERSION=$(qwen --version 2>/dev/null) || echo "unknown" + QWEN_VERSION=$(qwen --version 2>/dev/null || echo "unknown") log_success "Qwen Code is already installed: ${QWEN_VERSION}" log_info "Upgrading to the latest version..." fi @@ -439,13 +455,9 @@ install_qwen_code() { # Fix npm permissions if needed fix_npm_permissions - # Configure npm registry for faster downloads in China - npm config set registry https://registry.npmmirror.com - log_info "npm registry set to npmmirror" - # Install Qwen Code log_info "Installing Qwen Code..." - if npm install -g @qwen-code/qwen-code@latest; then + if npm install -g @qwen-code/qwen-code@latest --registry https://registry.npmmirror.com; then log_success "Qwen Code installed successfully!" # Verify installation @@ -532,7 +544,7 @@ main() { # shellcheck source=/dev/null [[ -s "${NVM_DIR}/nvm.sh" ]] && \. "${NVM_DIR}/nvm.sh" 2>/dev/null || true local NPM_GLOBAL_BIN - NPM_GLOBAL_BIN=$(npm bin -g 2>/dev/null) || true + NPM_GLOBAL_BIN=$(npm config get prefix 2>/dev/null)/bin if [[ -n "${NPM_GLOBAL_BIN}" ]]; then export PATH="${NPM_GLOBAL_BIN}:${PATH}" fi @@ -541,15 +553,16 @@ main() { if command_exists qwen; then log_success "Qwen Code is ready to use!" echo "" - echo "You can now run: qwen" + log_info "Tips: Please restart your terminal and run: qwen" + echo "" else - log_warning "To start using Qwen Code, please run:" + log_warning "Tips: To start using Qwen Code, please run:" echo "" local PROFILE_FILE PROFILE_FILE=$(get_shell_profile) echo " source ${PROFILE_FILE}" echo "" - echo "Or simply restart your terminal, then run: qwen" + log_info "Or simply restart your terminal, then run: qwen" fi }