Merge branch 'main' into feat/mcp-tui

This commit is contained in:
LaZzyMan 2026-02-25 16:31:42 +08:00
commit 1542a2bdc4
114 changed files with 6943 additions and 1324 deletions

View file

@ -348,15 +348,32 @@ jobs:
CLI_SOURCE_DESC="CLI built from source (same branch/ref as SDK)"
fi
# Create release notes with CLI version info
NOTES="## Bundled CLI Version\n\nThis SDK release bundles CLI version: \`${CLI_VERSION}\`\n\nSource: ${CLI_SOURCE_DESC}\n\n---\n\n"
# Create release notes file
NOTES_FILE=$(mktemp)
{
echo "## Bundled CLI Version"
echo ""
echo "This SDK release bundles CLI version: ${CLI_VERSION}"
echo ""
echo "Source: ${CLI_SOURCE_DESC}"
echo ""
echo "---"
echo ""
} > "${NOTES_FILE}"
# Get previous release notes if available
PREVIOUS_NOTES=$(gh release view "sdk-typescript-${PREVIOUS_RELEASE_TAG}" --json body -q '.body' 2>/dev/null || echo 'See commit history for changes.')
printf '%s\n' "${PREVIOUS_NOTES}" >> "${NOTES_FILE}"
# Create GitHub release
gh release create "sdk-typescript-${RELEASE_TAG}" \
--target "${TARGET}" \
--title "SDK TypeScript Release ${RELEASE_TAG}" \
--notes-start-tag "sdk-typescript-${PREVIOUS_RELEASE_TAG}" \
--notes "${NOTES}$(gh release view "sdk-typescript-${PREVIOUS_RELEASE_TAG}" --json body -q '.body' 2>/dev/null || echo 'See commit history for changes.')" \
"${PRERELEASE_FLAG}"
--notes-file "${NOTES_FILE}" \
${PRERELEASE_FLAG}
# Cleanup
rm -f "${NOTES_FILE}"
- name: 'Create PR to merge release branch into main'
if: |-

View file

@ -206,13 +206,22 @@ jobs:
RELEASE_BRANCH: '${{ steps.release_branch.outputs.BRANCH_NAME }}'
RELEASE_TAG: '${{ steps.version.outputs.RELEASE_TAG }}'
PREVIOUS_RELEASE_TAG: '${{ steps.version.outputs.PREVIOUS_RELEASE_TAG }}'
IS_NIGHTLY: '${{ steps.vars.outputs.is_nightly }}'
IS_PREVIEW: '${{ steps.vars.outputs.is_preview }}'
run: |-
# Set prerelease flag for nightly and preview releases
PRERELEASE_FLAG=""
if [[ "${IS_NIGHTLY}" == "true" || "${IS_PREVIEW}" == "true" ]]; then
PRERELEASE_FLAG="--prerelease"
fi
gh release create "${RELEASE_TAG}" \
dist/cli.js \
--target "$RELEASE_BRANCH" \
--title "Release ${RELEASE_TAG}" \
--notes-start-tag "$PREVIOUS_RELEASE_TAG" \
--generate-notes
--generate-notes \
${PRERELEASE_FLAG}
- name: 'Create Issue on Failure'
if: |-

5
.gitignore vendored
View file

@ -51,7 +51,10 @@ packages/core/src/generated/
packages/vscode-ide-companion/*.vsix
# Qwen Code Configs
.qwen/
!.qwen/commands/
!.qwen/skills/
logs/
# GHA credentials
gha-creds-*.json
@ -70,6 +73,8 @@ __pycache__/
integration-tests/concurrent-runner/output/
integration-tests/concurrent-runner/task-*
integration-tests/terminal-capture/scenarios/screenshots/
# storybook
*storybook.log
storybook-static

View file

@ -0,0 +1,104 @@
---
name: pr-review
description: Reviews pull requests with code analysis and terminal smoke testing. Applies when examining code changes, running CLI tests, or when 'PR review', 'code review', 'terminal screenshot', 'visual test' is mentioned.
---
# PR Review — Code Review + Terminal Smoke Testing
## Workflow
### 1. Fetch PR Information
```bash
# List open PRs
gh pr list
# View PR details
gh pr view <number>
# Get diff
gh pr diff <number>
```
### 2. Code Review
Analyze changes across the following dimensions:
- **Correctness** — Is the logic correct? Are edge cases handled?
- **Code Style** — Does it follow existing code style and conventions?
- **Performance** — Are there any performance concerns?
- **Test Coverage** — Are there corresponding tests for the changes?
- **Security** — Does it introduce any security risks?
Output format:
- 🔴 **Critical** — Must fix
- 🟡 **Suggestion** — Suggested improvement
- 🟢 **Nice to have** — Optional optimization
### 3. Terminal Smoke Testing (Run for Every PR)
**Run terminal-capture for every PR review**, not just UI changes. Reasons:
- **Smoke Test** — Verify the CLI starts correctly and responds to user input, ensuring the PR didn't break anything
- **Visual Verification** — If there are UI changes, screenshots provide the most intuitive review evidence
- **Documentation** — Attach screenshots to the PR comments so reviewers can see the results without building locally
```bash
# Checkout branch & build
gh pr checkout <number>
npm run build
```
#### Scenario Selection Strategy
Choose appropriate scenarios based on the PR's scope of changes:
| PR Type | Recommended Scenarios | Description |
| ------------------------------------- | ------------------------------------------------------------ | --------------------------------- |
| **Any PR** (default) | smoke test: send `hi`, verify startup & response | Minimal-cost smoke validation |
| Slash command changes | Corresponding command scenarios (`/about`, `/context`, etc.) | Verify command output correctness |
| Ink component / layout changes | Multiple scenarios + full-flow long screenshot | Verify visual effects |
| Large refactors / dependency upgrades | Run `scenarios/all.ts` fully | Full regression |
#### Running Screenshots
```bash
# Write scenario config to integration-tests/terminal-capture/scenarios/
# See terminal-capture skill for FlowStep API reference
# Single scenario
npx tsx integration-tests/terminal-capture/run.ts integration-tests/terminal-capture/scenarios/<scenario>.ts
# Check output in screenshots/ directory
```
#### Minimal Smoke Test Example
No need to write a new scenario file — just use the existing `about.ts`. It sends "hi" then runs `/about`, covering startup + input + command response:
```bash
npx tsx integration-tests/terminal-capture/run.ts integration-tests/terminal-capture/scenarios/about.ts
```
### 4. Upload Screenshots to PR
Use Playwright MCP browser to upload screenshots to the PR comments (images hosted at `github.com/user-attachments/assets/`, zero side effects):
1. Open the PR page with Playwright: `https://github.com/<repo>/pull/<number>`
2. Click the comment text box and enter a comment title (e.g., `## 📷 Terminal Smoke Test Screenshots`)
3. Click the "Paste, drop, or click to add files" button to trigger the file picker
4. Upload screenshot PNG files via `browser_file_upload` (can upload multiple one by one)
5. Wait for GitHub to process (about 2-3 seconds) — image links auto-insert into the comment box
6. Click the "Comment" button to submit
> **Prerequisite**: Playwright MCP needs `--user-data-dir` configured to persist GitHub login session. First time use requires manually logging into GitHub in the Playwright browser.
### 5. Submit Review
Submit code review comments via `gh pr review`:
```bash
gh pr review <number> --comment --body "review content"
```

View file

@ -0,0 +1,197 @@
---
name: terminal-capture
description: Automates terminal UI screenshot testing for CLI commands. Applies when reviewing PRs that affect CLI output, testing slash commands (/about, /context, /auth, /export), generating visual documentation, or when 'terminal screenshot', 'CLI test', 'visual test', or 'terminal-capture' is mentioned.
---
# Terminal Capture — CLI Terminal Screenshot Automation
Drive terminal interactions and screenshots via TypeScript configuration, used for visual verification during PR reviews.
## Prerequisites
Ensure the following dependencies are installed before running:
```bash
npm install # Install project dependencies (including node-pty, xterm, playwright, etc.)
npx playwright install chromium # Install Playwright browser
```
## Architecture
```
node-pty (pseudo-terminal) → ANSI byte stream → xterm.js (Playwright headless) → Screenshot
```
Core files:
| File | Purpose |
| -------------------------------------------------------- | ------------------------------------------------------------------------ |
| `integration-tests/terminal-capture/terminal-capture.ts` | Low-level engine (PTY + xterm.js + Playwright) |
| `integration-tests/terminal-capture/scenario-runner.ts` | Scenario executor (parses config, drives interactions, auto-screenshots) |
| `integration-tests/terminal-capture/run.ts` | CLI entry point (batch run scenarios) |
| `integration-tests/terminal-capture/scenarios/*.ts` | Scenario configuration files |
## Quick Start
### 1. Write Scenario Configuration
Create a `.ts` file under `integration-tests/terminal-capture/scenarios/`:
```typescript
import type { ScenarioConfig } from '../scenario-runner.js';
export default {
name: '/about',
spawn: ['node', 'dist/cli.js', '--yolo'],
terminal: { title: 'qwen-code', cwd: '../../..' }, // Relative to this config file's location
flow: [
{ type: 'Hi, can you help me understand this codebase?' },
{ type: '/about' },
],
} satisfies ScenarioConfig;
```
### 2. Run
```bash
# Single scenario
npx tsx integration-tests/terminal-capture/run.ts integration-tests/terminal-capture/scenarios/about.ts
# Batch (entire directory)
npx tsx integration-tests/terminal-capture/run.ts integration-tests/terminal-capture/scenarios/
```
### 3. Output
Screenshots are saved to `integration-tests/terminal-capture/scenarios/screenshots/{name}/`:
| File | Description |
| --------------- | ---------------------------------- |
| `01-01.png` | Step 1 input state |
| `01-02.png` | Step 1 execution result |
| `02-01.png` | Step 2 input state |
| `02-02.png` | Step 2 execution result |
| `full-flow.png` | Final state full-length screenshot |
## FlowStep API
Each flow step can contain the following fields:
### `type: string` — Input Text
Automatic behavior: Input text → Screenshot (01) → Press Enter → Wait for output to stabilize → Screenshot (02).
```typescript
{
type: 'Hello';
} // Plain text
{
type: '/about';
} // Slash command (auto-completion handled automatically)
```
**Special rule**: If the next step is `key`, do not auto-press Enter (hand over control to the key sequence).
### `key: string | string[]` — Send Key Press
Used for menu selection, Tab completion, and other interactions. Does not auto-press Enter or auto-screenshot.
Supported key names: `ArrowUp`, `ArrowDown`, `ArrowLeft`, `ArrowRight`, `Enter`, `Tab`, `Escape`, `Backspace`, `Space`, `Home`, `End`, `PageUp`, `PageDown`, `Delete`
```typescript
{
key: 'ArrowDown';
} // Single key
{
key: ['ArrowDown', 'ArrowDown', 'Enter'];
} // Multiple keys
```
Auto-screenshot is triggered after the key sequence ends (when the next step is not a `key`).
### `capture` / `captureFull` — Explicit Screenshot
Use as a standalone step, or override automatic naming:
```typescript
{
capture: 'initial.png';
} // Screenshot current viewport only
{
captureFull: 'all-output.png';
} // Screenshot full scrollback buffer
```
## Scenario Examples
### Basic: Input + Command
```typescript
flow: [{ type: 'explain this project' }, { type: '/about' }];
```
### Secondary Menu Selection (/auth)
```typescript
flow: [
{ type: '/auth' },
{ key: 'ArrowDown' }, // Select API Key option
{ key: 'Enter' }, // Confirm
{ type: 'sk-xxx' }, // Input API key
];
```
### Tab Completion Selection (/export)
```typescript
flow: [
{ type: 'Tell me about yourself' },
{ type: '/export' }, // No auto-Enter (next step is key)
{ key: 'Tab' }, // Pop format selection
{ key: 'ArrowDown' }, // Select format
{ key: 'Enter' }, // Confirm → auto-screenshot
];
```
### Array Batch (Multiple Scenarios in One File)
```typescript
export default [
{ name: '/about', spawn: [...], flow: [...] },
{ name: '/context', spawn: [...], flow: [...] },
] satisfies ScenarioConfig[];
```
## Integration with PR Review
This tool is commonly used for visual verification during PR reviews. For the complete code review + screenshot workflow, see the [pr-review](../pr-review/SKILL.md) skill.
## Troubleshooting
| Issue | Cause | Solution |
| ------------------------------------ | ------------------------------------- | ---------------------------------------------------- |
| Playwright error `browser not found` | Browser not installed | `npx playwright install chromium` |
| Blank screenshot | Process starts slowly or build failed | Ensure `npm run build` succeeds, check spawn command |
| PTY-related errors | node-pty native module not compiled | `npm rebuild node-pty` |
| Unstable screenshot output | Terminal output not fully rendered | Check if the scenario needs additional wait time |
## Full ScenarioConfig Type
```typescript
interface ScenarioConfig {
name: string; // Scenario name (also used as screenshot subdirectory name)
spawn: string[]; // Launch command ["node", "dist/cli.js", "--yolo"]
flow: FlowStep[]; // Interaction steps
terminal?: {
// Terminal configuration (all optional)
cols?: number; // Number of columns, default 100
rows?: number; // Number of rows, default 28
theme?: string; // Theme: dracula|one-dark|github-dark|monokai|night-owl
chrome?: boolean; // macOS window decorations, default true
title?: string; // Window title, default "Terminal"
fontSize?: number; // Font size
cwd?: string; // Working directory (relative to config file)
};
outputDir?: string; // Screenshot output directory (relative to config file)
}
```

13
.vscode/launch.json vendored
View file

@ -127,6 +127,19 @@
"cwd": "${workspaceFolder}",
"console": "integratedTerminal",
"env": {}
},
{
"type": "node",
"request": "launch",
"name": "Dev Launch CLI",
"runtimeExecutable": "npm",
"runtimeArgs": ["run", "dev"],
"skipFiles": ["<node_internals>/**"],
"cwd": "${workspaceFolder}",
"console": "integratedTerminal",
"env": {
"GEMINI_SANDBOX": "false"
}
}
],
"inputs": [

245
README.md
View file

@ -18,6 +18,8 @@
</div>
> 🎉 **News (2026-02-16)**: Qwen3.5-Plus is now live! Sign in via Qwen OAuth to use it directly, or get an API key from [Alibaba Cloud ModelStudio](https://modelstudio.console.alibabacloud.com?tab=doc#/doc/?type=model&url=2840914_2&modelId=group-qwen3.5-plus) to access it through the OpenAI-compatible API.
Qwen Code is an open-source AI agent for the terminal, optimized for [Qwen3-Coder](https://github.com/QwenLM/Qwen3-Coder). It helps you understand large codebases, automate tedious work, and ship faster.
![](https://gw.alicdn.com/imgextra/i1/O1CN01D2DviS1wwtEtMwIzJ_!!6000000006373-2-tps-1600-900.png)
@ -123,7 +125,231 @@ Use this if you want more flexibility over which provider and model to use. Supp
- **Anthropic**: Claude models
- **Google GenAI**: Gemini models
For full details (including `modelProviders` configuration, `.env` file loading, environment variable priorities, and security notes), see the [authentication guide](https://qwenlm.github.io/qwen-code-docs/en/users/configuration/auth/).
The **recommended** way to configure models and providers is by editing `~/.qwen/settings.json` (create it if it doesn't exist). This file lets you define all available models, API keys, and default settings in one place.
##### Quick Setup in 3 Steps
**Step 1:** Create or edit `~/.qwen/settings.json`
Here is a complete example:
```json
{
"modelProviders": {
"openai": [
{
"id": "qwen3-coder-plus",
"name": "qwen3-coder-plus",
"baseUrl": "https://dashscope.aliyuncs.com/compatible-mode/v1",
"description": "Qwen3-Coder via Dashscope",
"envKey": "DASHSCOPE_API_KEY"
}
]
},
"env": {
"DASHSCOPE_API_KEY": "sk-xxxxxxxxxxxxx"
},
"security": {
"auth": {
"selectedType": "openai"
}
},
"model": {
"name": "qwen3-coder-plus"
}
}
```
**Step 2:** Understand each field
| Field | What it does |
| ---------------------------- | ------------------------------------------------------------------------------------------------------------------------------------- |
| `modelProviders` | Declares which models are available and how to connect to them. Keys like `openai`, `anthropic`, `gemini` represent the API protocol. |
| `modelProviders[].id` | The model ID sent to the API (e.g. `qwen3-coder-plus`, `gpt-4o`). |
| `modelProviders[].envKey` | The name of the environment variable that holds your API key. |
| `modelProviders[].baseUrl` | The API endpoint URL (required for non-default endpoints). |
| `env` | A fallback place to store API keys (lowest priority; prefer `.env` files or `export` for sensitive keys). |
| `security.auth.selectedType` | The protocol to use on startup (`openai`, `anthropic`, `gemini`, `vertex-ai`). |
| `model.name` | The default model to use when Qwen Code starts. |
**Step 3:** Start Qwen Code — your configuration takes effect automatically:
```bash
qwen
```
Use the `/model` command at any time to switch between all configured models.
##### More Examples
<details>
<summary>Coding Plan (Alibaba Cloud Bailian) — fixed monthly fee, higher quotas</summary>
```json
{
"modelProviders": {
"openai": [
{
"id": "qwen3.5-plus",
"name": "qwen3.5-plus (Coding Plan)",
"baseUrl": "https://coding.dashscope.aliyuncs.com/v1",
"description": "qwen3.5-plus with thinking enabled from Bailian Coding Plan",
"envKey": "BAILIAN_CODING_PLAN_API_KEY",
"generationConfig": {
"extra_body": {
"enable_thinking": true
}
}
},
{
"id": "qwen3-coder-plus",
"name": "qwen3-coder-plus (Coding Plan)",
"baseUrl": "https://coding.dashscope.aliyuncs.com/v1",
"description": "qwen3-coder-plus from Bailian Coding Plan",
"envKey": "BAILIAN_CODING_PLAN_API_KEY"
},
{
"id": "qwen3-coder-next",
"name": "qwen3-coder-next (Coding Plan)",
"baseUrl": "https://coding.dashscope.aliyuncs.com/v1",
"description": "qwen3-coder-next with thinking enabled from Bailian Coding Plan",
"envKey": "BAILIAN_CODING_PLAN_API_KEY",
"generationConfig": {
"extra_body": {
"enable_thinking": true
}
}
},
{
"id": "glm-4.7",
"name": "glm-4.7 (Coding Plan)",
"baseUrl": "https://coding.dashscope.aliyuncs.com/v1",
"description": "glm-4.7 with thinking enabled from Bailian Coding Plan",
"envKey": "BAILIAN_CODING_PLAN_API_KEY",
"generationConfig": {
"extra_body": {
"enable_thinking": true
}
}
},
{
"id": "kimi-k2.5",
"name": "kimi-k2.5 (Coding Plan)",
"baseUrl": "https://coding.dashscope.aliyuncs.com/v1",
"description": "kimi-k2.5 with thinking enabled from Bailian Coding Plan",
"envKey": "BAILIAN_CODING_PLAN_API_KEY",
"generationConfig": {
"extra_body": {
"enable_thinking": true
}
}
}
]
},
"env": {
"BAILIAN_CODING_PLAN_API_KEY": "sk-xxxxxxxxxxxxx"
},
"security": {
"auth": {
"selectedType": "openai"
}
},
"model": {
"name": "qwen3-coder-plus"
}
}
```
> Subscribe to the Coding Plan and get your API key at [Alibaba Cloud Bailian](https://modelstudio.console.aliyun.com/?tab=dashboard#/efm/coding_plan).
</details>
<details>
<summary>Multiple providers (OpenAI + Anthropic + Gemini)</summary>
```json
{
"modelProviders": {
"openai": [
{
"id": "gpt-4o",
"name": "GPT-4o",
"envKey": "OPENAI_API_KEY",
"baseUrl": "https://api.openai.com/v1"
}
],
"anthropic": [
{
"id": "claude-sonnet-4-20250514",
"name": "Claude Sonnet 4",
"envKey": "ANTHROPIC_API_KEY"
}
],
"gemini": [
{
"id": "gemini-2.5-pro",
"name": "Gemini 2.5 Pro",
"envKey": "GEMINI_API_KEY"
}
]
},
"env": {
"OPENAI_API_KEY": "sk-xxxxxxxxxxxxx",
"ANTHROPIC_API_KEY": "sk-ant-xxxxxxxxxxxxx",
"GEMINI_API_KEY": "AIzaxxxxxxxxxxxxx"
},
"security": {
"auth": {
"selectedType": "openai"
}
},
"model": {
"name": "gpt-4o"
}
}
```
</details>
<details>
<summary>Enable thinking mode (for supported models like qwen3.5-plus)</summary>
```json
{
"modelProviders": {
"openai": [
{
"id": "qwen3.5-plus",
"name": "qwen3.5-plus (thinking)",
"envKey": "DASHSCOPE_API_KEY",
"baseUrl": "https://dashscope.aliyuncs.com/compatible-mode/v1",
"generationConfig": {
"extra_body": {
"enable_thinking": true
}
}
}
]
},
"env": {
"DASHSCOPE_API_KEY": "sk-xxxxxxxxxxxxx"
},
"security": {
"auth": {
"selectedType": "openai"
}
},
"model": {
"name": "qwen3.5-plus"
}
}
```
</details>
> **Tip:** You can also set API keys via `export` in your shell or `.env` files, which take higher priority than `settings.json``env`. See the [authentication guide](https://qwenlm.github.io/qwen-code-docs/en/users/configuration/auth/) for full details.
> **Security note:** Never commit API keys to version control. The `~/.qwen/settings.json` file is in your home directory and should stay private.
## Usage
@ -191,10 +417,21 @@ Build on top of Qwen Code with the TypeScript SDK:
Qwen Code can be configured via `settings.json`, environment variables, and CLI flags.
- **User settings**: `~/.qwen/settings.json`
- **Project settings**: `.qwen/settings.json`
| File | Scope | Description |
| ----------------------- | ------------- | --------------------------------------------------------------------------------------- |
| `~/.qwen/settings.json` | User (global) | Applies to all your Qwen Code sessions. **Recommended for `modelProviders` and `env`.** |
| `.qwen/settings.json` | Project | Applies only when running Qwen Code in this project. Overrides user settings. |
See [settings](https://qwenlm.github.io/qwen-code-docs/en/users/configuration/settings/) for available options and precedence.
The most commonly used top-level fields in `settings.json`:
| Field | Description |
| ---------------------------- | ---------------------------------------------------------------------------------------------------- |
| `modelProviders` | Define available models per protocol (`openai`, `anthropic`, `gemini`, `vertex-ai`). |
| `env` | Fallback environment variables (e.g. API keys). Lower priority than shell `export` and `.env` files. |
| `security.auth.selectedType` | The protocol to use on startup (e.g. `openai`). |
| `model.name` | The default model to use when Qwen Code starts. |
> See the [Authentication](#api-key-flexible) section above for complete `settings.json` examples, and the [settings reference](https://qwenlm.github.io/qwen-code-docs/en/users/configuration/settings/) for all available options.
## Benchmark Results

View file

@ -1,5 +1,9 @@
# Reporting Security Issues
# Security Policy
Please report any security issue or Higress crash report to [ASRC](https://security.alibaba.com/) (Alibaba Security Response Center) where the issue will be triaged appropriately.
## Reporting a Vulnerability
Thank you for helping keep our project secure.
If you believe you have discovered a security vulnerability, please report it to us through the following portal: [Report Security Issue](https://yundun.console.aliyun.com/?p=xznew#/taskmanagement/tasks/detail/151)
> **Note:** This channel is strictly for reporting security-related issues. Non-security vulnerabilities or general bug reports will not be addressed here.
We sincerely appreciate your responsible disclosure and your contribution to helping us keep our project secure.

View file

@ -2,13 +2,13 @@
> **Objective**: Catch up with Claude Code's product functionality, continuously refine details, and enhance user experience.
| Category | Phase 1 | Phase 2 |
| ------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------- |
| User Experience | ✅ Terminal UI<br>✅ Support OpenAI Protocol<br>✅ Settings<br>✅ OAuth<br>✅ Cache Control<br>✅ Memory<br>✅ Compress<br>✅ Theme | Better UI<br>OnBoarding<br>LogView<br>✅ Session<br>Permission<br>🔄 Cross-platform Compatibility |
| Coding Workflow | ✅ Slash Commands<br>✅ MCP<br>✅ PlanMode<br>✅ TodoWrite<br>✅ SubAgent<br>✅ Multi Model<br>✅ Chat Management<br>✅ Tools (WebFetch, Bash, TextSearch, FileReadFile, EditFile) | 🔄 Hooks<br>SubAgent (enhanced)<br>✅ Skill<br>✅ Headless Mode<br>✅ Tools (WebSearch) |
| Building Open Capabilities | ✅ Custom Commands | ✅ QwenCode SDK<br> Extension |
| Integrating Community Ecosystem | | ✅ VSCode Plugin<br>🔄 ACP/Zed<br>✅ GHA |
| Administrative Capabilities | ✅ Stats<br>✅ Feedback | Costs<br>Dashboard |
| Category | Phase 1 | Phase 2 |
| ------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| User Experience | ✅ Terminal UI<br>✅ Support OpenAI Protocol<br>✅ Settings<br>✅ OAuth<br>✅ Cache Control<br>✅ Memory<br>✅ Compress<br>✅ Theme | Better UI<br>OnBoarding<br>LogView<br>✅ Session<br>Permission<br>🔄 Cross-platform Compatibility<br>✅ Coding Plan<br>✅ Anthropic Provider<br>✅ Multimodal Input<br>✅ Unified WebUI |
| Coding Workflow | ✅ Slash Commands<br>✅ MCP<br>✅ PlanMode<br>✅ TodoWrite<br>✅ SubAgent<br>✅ Multi Model<br>✅ Chat Management<br>✅ Tools (WebFetch, Bash, TextSearch, FileReadFile, EditFile) | 🔄 Hooks<br>✅ Skill<br>✅ Headless Mode<br>✅ Tools (WebSearch)<br>✅ LSP Support<br>✅ Concurrent Runner |
| Building Open Capabilities | ✅ Custom Commands | ✅ QwenCode SDK<br> Extension System |
| Integrating Community Ecosystem | | ✅ VSCode Plugin<br> ACP/Zed<br>✅ GHA |
| Administrative Capabilities | ✅ Stats<br>✅ Feedback | Costs<br>Dashboard<br>✅ User Feedback Dialog |
> For more details, please see the list below.
@ -16,39 +16,48 @@
#### Completed Features
| Feature | Version | Description | Category |
| ----------------------- | --------- | ------------------------------------------------------- | ------------------------------- |
| Skill | `V0.6.0` | Extensible custom AI skills | Coding Workflow |
| Github Actions | `V0.5.0` | qwen-code-action and automation | Integrating Community Ecosystem |
| VSCode Plugin | `V0.5.0` | VSCode extension plugin | Integrating Community Ecosystem |
| QwenCode SDK | `V0.4.0` | Open SDK for third-party integration | Building Open Capabilities |
| Session | `V0.4.0` | Enhanced session management | User Experience |
| i18n | `V0.3.0` | Internationalization and multilingual support | User Experience |
| Headless Mode | `V0.3.0` | Headless mode (non-interactive) | Coding Workflow |
| ACP/Zed | `V0.2.0` | ACP and Zed editor integration | Integrating Community Ecosystem |
| Terminal UI | `V0.1.0+` | Interactive terminal user interface | User Experience |
| Settings | `V0.1.0+` | Configuration management system | User Experience |
| Theme | `V0.1.0+` | Multi-theme support | User Experience |
| Support OpenAI Protocol | `V0.1.0+` | Support for OpenAI API protocol | User Experience |
| Chat Management | `V0.1.0+` | Session management (save, restore, browse) | Coding Workflow |
| MCP | `V0.1.0+` | Model Context Protocol integration | Coding Workflow |
| Multi Model | `V0.1.0+` | Multi-model support and switching | Coding Workflow |
| Slash Commands | `V0.1.0+` | Slash command system | Coding Workflow |
| Tool: Bash | `V0.1.0+` | Shell command execution tool (with is_background param) | Coding Workflow |
| Tool: FileRead/EditFile | `V0.1.0+` | File read/write and edit tools | Coding Workflow |
| Custom Commands | `V0.1.0+` | Custom command loading | Building Open Capabilities |
| Feedback | `V0.1.0+` | Feedback mechanism (/bug command) | Administrative Capabilities |
| Stats | `V0.1.0+` | Usage statistics and quota display | Administrative Capabilities |
| Memory | `V0.0.9+` | Project-level and global memory management | User Experience |
| Cache Control | `V0.0.9+` | Prompt caching control (Anthropic, DashScope) | User Experience |
| PlanMode | `V0.0.14` | Task planning mode | Coding Workflow |
| Compress | `V0.0.11` | Chat compression mechanism | User Experience |
| SubAgent | `V0.0.11` | Dedicated sub-agent system | Coding Workflow |
| TodoWrite | `V0.0.10` | Task management and progress tracking | Coding Workflow |
| Tool: TextSearch | `V0.0.8+` | Text search tool (grep, supports .qwenignore) | Coding Workflow |
| Tool: WebFetch | `V0.0.7+` | Web content fetching tool | Coding Workflow |
| Tool: WebSearch | `V0.0.7+` | Web search tool (using Tavily API) | Coding Workflow |
| OAuth | `V0.0.5+` | OAuth login authentication (Qwen OAuth) | User Experience |
| Feature | Version | Description | Category | Phase |
| ----------------------- | --------- | ------------------------------------------------------- | ------------------------------- | ----- |
| **Coding Plan** | `V0.10.0` | Bailian Coding Plan authentication & models | User Experience | 2 |
| Unified WebUI | `V0.9.0` | Shared WebUI component library for VSCode/CLI | User Experience | 2 |
| Export Chat | `V0.8.0` | Export sessions to Markdown/HTML/JSON/JSONL | User Experience | 2 |
| Extension System | `V0.8.0` | Full extension management with slash commands | Building Open Capabilities | 2 |
| LSP Support | `V0.7.0` | Experimental LSP service (`--experimental-lsp`) | Coding Workflow | 2 |
| Anthropic Provider | `V0.7.0` | Anthropic API provider support | User Experience | 2 |
| User Feedback Dialog | `V0.7.0` | In-app feedback collection with fatigue mechanism | Administrative Capabilities | 2 |
| Concurrent Runner | `V0.6.0` | Batch CLI execution with Git integration | Coding Workflow | 2 |
| Multimodal Input | `V0.6.0` | Image, PDF, audio, video input support | User Experience | 2 |
| Skill | `V0.6.0` | Extensible custom AI skills (experimental) | Coding Workflow | 2 |
| Github Actions | `V0.5.0` | qwen-code-action and automation | Integrating Community Ecosystem | 1 |
| VSCode Plugin | `V0.5.0` | VSCode extension plugin | Integrating Community Ecosystem | 1 |
| QwenCode SDK | `V0.4.0` | Open SDK for third-party integration | Building Open Capabilities | 1 |
| Session | `V0.4.0` | Enhanced session management | User Experience | 1 |
| i18n | `V0.3.0` | Internationalization and multilingual support | User Experience | 1 |
| Headless Mode | `V0.3.0` | Headless mode (non-interactive) | Coding Workflow | 1 |
| ACP/Zed | `V0.2.0` | ACP and Zed editor integration | Integrating Community Ecosystem | 1 |
| Terminal UI | `V0.1.0+` | Interactive terminal user interface | User Experience | 1 |
| Settings | `V0.1.0+` | Configuration management system | User Experience | 1 |
| Theme | `V0.1.0+` | Multi-theme support | User Experience | 1 |
| Support OpenAI Protocol | `V0.1.0+` | Support for OpenAI API protocol | User Experience | 1 |
| Chat Management | `V0.1.0+` | Session management (save, restore, browse) | Coding Workflow | 1 |
| MCP | `V0.1.0+` | Model Context Protocol integration | Coding Workflow | 1 |
| Multi Model | `V0.1.0+` | Multi-model support and switching | Coding Workflow | 1 |
| Slash Commands | `V0.1.0+` | Slash command system | Coding Workflow | 1 |
| Tool: Bash | `V0.1.0+` | Shell command execution tool (with is_background param) | Coding Workflow | 1 |
| Tool: FileRead/EditFile | `V0.1.0+` | File read/write and edit tools | Coding Workflow | 1 |
| Custom Commands | `V0.1.0+` | Custom command loading | Building Open Capabilities | 1 |
| Feedback | `V0.1.0+` | Feedback mechanism (/bug command) | Administrative Capabilities | 1 |
| Stats | `V0.1.0+` | Usage statistics and quota display | Administrative Capabilities | 1 |
| Memory | `V0.0.9+` | Project-level and global memory management | User Experience | 1 |
| Cache Control | `V0.0.9+` | Prompt caching control (Anthropic, DashScope) | User Experience | 1 |
| PlanMode | `V0.0.14` | Task planning mode | Coding Workflow | 1 |
| Compress | `V0.0.11` | Chat compression mechanism | User Experience | 1 |
| SubAgent | `V0.0.11` | Dedicated sub-agent system | Coding Workflow | 1 |
| TodoWrite | `V0.0.10` | Task management and progress tracking | Coding Workflow | 1 |
| Tool: TextSearch | `V0.0.8+` | Text search tool (grep, supports .qwenignore) | Coding Workflow | 1 |
| Tool: WebFetch | `V0.0.7+` | Web content fetching tool | Coding Workflow | 1 |
| Tool: WebSearch | `V0.0.7+` | Web search tool (using Tavily API) | Coding Workflow | 1 |
| OAuth | `V0.0.5+` | OAuth login authentication (Qwen OAuth) | User Experience | 1 |
#### Features to Develop
@ -60,7 +69,6 @@
| Cross-platform Compatibility | P1 | In Progress | Windows/Linux/macOS compatibility | User Experience |
| LogView | P2 | Planned | Log viewing and debugging feature | User Experience |
| Hooks | P2 | In Progress | Extension hooks system | Coding Workflow |
| Extension | P2 | Planned | Extension system | Building Open Capabilities |
| Costs | P2 | Planned | Cost tracking and analysis | Administrative Capabilities |
| Dashboard | P2 | Planned | Management dashboard | Administrative Capabilities |

View file

@ -31,6 +31,52 @@ qwen
Use this if you want more flexibility over which provider and model to use. Supports multiple protocols and providers, including OpenAI, Anthropic, Google GenAI, Alibaba Cloud Bailian, Azure OpenAI, OpenRouter, ModelScope, or a self-hosted compatible endpoint.
### Recommended: One-file setup via `settings.json`
The simplest way to get started with API-KEY authentication is to put everything in a single `~/.qwen/settings.json` file. Here's a complete, ready-to-use example:
```json
{
"modelProviders": {
"openai": [
{
"id": "qwen3-coder-plus",
"name": "qwen3-coder-plus",
"baseUrl": "https://dashscope.aliyuncs.com/compatible-mode/v1",
"description": "Qwen3-Coder via Dashscope",
"envKey": "DASHSCOPE_API_KEY"
}
]
},
"env": {
"DASHSCOPE_API_KEY": "sk-xxxxxxxxxxxxx"
},
"security": {
"auth": {
"selectedType": "openai"
}
},
"model": {
"name": "qwen3-coder-plus"
}
}
```
What each field does:
| Field | Description |
| ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------- |
| `modelProviders` | Declares which models are available and how to connect to them. Keys (`openai`, `anthropic`, `gemini`, `vertex-ai`) represent the API protocol. |
| `env` | Stores API keys directly in `settings.json` as a fallback (lowest priority — shell `export` and `.env` files take precedence). |
| `security.auth.selectedType` | Tells Qwen Code which protocol to use on startup (e.g. `openai`, `anthropic`, `gemini`). Without this, you'd need to run `/auth` interactively. |
| `model.name` | The default model to activate when Qwen Code starts. Must match one of the `id` values in your `modelProviders`. |
After saving the file, just run `qwen` — no interactive `/auth` setup needed.
> [!tip]
>
> The sections below explain each part in more detail. If the quick example above works for you, feel free to skip ahead to [Security notes](#security-notes).
### Option1: Coding PlanAliyun Bailian
Use this if you want predictable costs with higher usage quotas for the qwen3-coder-plus model.
@ -48,10 +94,45 @@ After entering, select `Coding Plan`:
![](https://gw.alicdn.com/imgextra/i4/O1CN01Irk0AD1ebfop69o0r_!!6000000003890-2-tps-2308-830.png)
Enter your `sk-sp-xxxxxxxxx` key, then use the `/model` command to switch between all Bailian `Coding Plan` supported models:
Enter your `sk-sp-xxxxxxxxx` key, then use the `/model` command to switch between all Bailian `Coding Plan` supported models (including qwen3.5-plus, qwen3-coder-plus, qwen3-coder-next, qwen3-max, glm-4.7, and kimi-k2.5):
![](https://gw.alicdn.com/imgextra/i4/O1CN01fWArmf1kaCEgSmPln_!!6000000004699-2-tps-2304-1374.png)
**Alternative: configure Coding Plan via `settings.json`**
If you prefer to skip the interactive `/auth` flow, add the following to `~/.qwen/settings.json`:
```json
{
"modelProviders": {
"openai": [
{
"id": "qwen3-coder-plus",
"name": "qwen3-coder-plus (Coding Plan)",
"baseUrl": "https://coding.dashscope.aliyuncs.com/v1",
"description": "qwen3-coder-plus from Bailian Coding Plan",
"envKey": "BAILIAN_CODING_PLAN_API_KEY"
}
]
},
"env": {
"BAILIAN_CODING_PLAN_API_KEY": "sk-sp-xxxxxxxxx"
},
"security": {
"auth": {
"selectedType": "openai"
}
},
"model": {
"name": "qwen3-coder-plus"
}
}
```
> [!note]
>
> The Coding Plan uses a dedicated endpoint (`https://coding.dashscope.aliyuncs.com/v1`) that is different from the standard Dashscope endpoint. Make sure to use the correct `baseUrl`.
### Option2: Third-party API-KEY
Use this if you want to connect to third-party providers such as OpenAI, Anthropic, Google, Azure OpenAI, OpenRouter, ModelScope, or a self-hosted endpoint.
@ -67,7 +148,7 @@ The key concept is **Model Providers** (`modelProviders`): Qwen Code supports mu
| Google GenAI | `gemini` | `GEMINI_API_KEY`, `GEMINI_MODEL` | Google Gemini |
| Google Vertex AI | `vertex-ai` | `GOOGLE_API_KEY`, `GOOGLE_MODEL` | Google Vertex AI |
#### Step 1: Configure `modelProviders` in `~/.qwen/settings.json`
#### Step 1: Configure models and providers in `~/.qwen/settings.json`
Define which models are available for each protocol. Each model entry requires at minimum an `id` and an `envKey` (the environment variable name that holds your API key).
@ -75,7 +156,7 @@ Define which models are available for each protocol. Each model entry requires a
>
> It is recommended to define `modelProviders` in the user-scope `~/.qwen/settings.json` to avoid merge conflicts between project and user settings.
Edit `~/.qwen/settings.json` (create it if it doesn't exist):
Edit `~/.qwen/settings.json` (create it if it doesn't exist). You can mix multiple protocols in a single file — here is a multi-provider example showing just the `modelProviders` section:
```json
{
@ -106,7 +187,11 @@ Edit `~/.qwen/settings.json` (create it if it doesn't exist):
}
```
You can mix multiple protocols and models in a single configuration. The `ModelConfig` fields are:
> [!tip]
>
> Don't forget to also set `env`, `security.auth.selectedType`, and `model.name` alongside `modelProviders` — see the [complete example above](#recommended-one-file-setup-via-settingsjson) for reference.
**`ModelConfig` fields (each entry inside `modelProviders`):**
| Field | Required | Description |
| ------------------ | -------- | -------------------------------------------------------------------- |
@ -118,9 +203,9 @@ You can mix multiple protocols and models in a single configuration. The `ModelC
> [!note]
>
> Credentials are **never** stored in `settings.json`. The runtime reads them from the environment variable specified in `envKey`.
> When using the `env` field in `settings.json`, credentials are stored in plain text. For better security, prefer `.env` files or shell `export` — see [Step 2](#step-2-set-environment-variables).
For the full `modelProviders` schema and advanced options like `generationConfig`, `customHeaders`, and `extra_body`, see [Settings Reference → modelProviders](settings.md#modelproviders).
For the full `modelProviders` schema and advanced options like `generationConfig`, `customHeaders`, and `extra_body`, see [Model Providers Reference](model-providers.md).
#### Step 2: Set environment variables
@ -165,25 +250,19 @@ If nothing is found, it falls back to your **home directory**:
**3. `settings.json``env` field (lowest priority)**
You can also define environment variables directly in `~/.qwen/settings.json` under the `env` key. These are loaded as the **lowest-priority fallback** — only applied when a variable is not already set by the system environment or `.env` files.
You can also define API keys directly in `~/.qwen/settings.json` under the `env` key. These are loaded as the **lowest-priority fallback** — only applied when a variable is not already set by the system environment or `.env` files.
```json
{
"env": {
"DASHSCOPE_API_KEY":"sk-...",
"DASHSCOPE_API_KEY": "sk-...",
"OPENAI_API_KEY": "sk-...",
"ANTHROPIC_API_KEY": "sk-ant-...",
"GEMINI_API_KEY": "AIza..."
},
"modelProviders": {
...
"ANTHROPIC_API_KEY": "sk-ant-..."
}
}
```
> [!note]
>
> This is useful when you want to keep all configuration (providers + credentials) in a single file. However, be mindful that `settings.json` may be shared or synced — prefer `.env` files for sensitive secrets.
This is the approach used in the [one-file setup example](#recommended-one-file-setup-via-settingsjson) above. It's convenient for keeping everything in one place, but be mindful that `settings.json` may be shared or synced — prefer `.env` files for sensitive secrets.
**Priority summary:**

View file

@ -0,0 +1,521 @@
# Model Providers
Qwen Code allows you to configure multiple model providers through the `modelProviders` setting in your `settings.json`. This enables you to switch between different AI models and providers using the `/model` command.
## Overview
Use `modelProviders` to declare curated model lists per auth type that the `/model` picker can switch between. Keys must be valid auth types (`openai`, `anthropic`, `gemini`, `vertex-ai`, etc.). Each entry requires an `id` and **must include `envKey`**, with optional `name`, `description`, `baseUrl`, and `generationConfig`. Credentials are never persisted in settings; the runtime reads them from `process.env[envKey]`. Qwen OAuth models remain hard-coded and cannot be overridden.
> [!note]
> Only the `/model` command exposes non-default auth types. Anthropic, Gemini, Vertex AI, etc., must be defined via `modelProviders`. The `/auth` command intentionally lists only the built-in Qwen OAuth and OpenAI flows.
> [!warning]
> **Duplicate model IDs within the same authType:** Defining multiple models with the same `id` under a single `authType` (e.g., two entries with `"id": "gpt-4o"` in `openai`) is currently not supported. If duplicates exist, **the first occurrence wins** and subsequent duplicates are skipped with a warning. Note that the `id` field is used both as the configuration identifier and as the actual model name sent to the API, so using unique IDs (e.g., `gpt-4o-creative`, `gpt-4o-balanced`) is not a viable workaround. This is a known limitation that we plan to address in a future release.
## Configuration Examples by Auth Type
Below are comprehensive configuration examples for different authentication types, showing the available parameters and their combinations.
### Supported Auth Types
The `modelProviders` object keys must be valid `authType` values. Currently supported auth types are:
| Auth Type | Description |
| ------------ | --------------------------------------------------------------------------------------- |
| `openai` | OpenAI-compatible APIs (OpenAI, Azure OpenAI, local inference servers like vLLM/Ollama) |
| `anthropic` | Anthropic Claude API |
| `gemini` | Google Gemini API |
| `vertex-ai` | Google Vertex AI |
| `qwen-oauth` | Qwen OAuth (hard-coded, cannot be overridden in `modelProviders`) |
> [!warning]
> If an invalid auth type key is used (e.g., a typo like `"openai-custom"`), the configuration will be **silently skipped** and the models will not appear in the `/model` picker. Always use one of the supported auth type values listed above.
### SDKs Used for API Requests
Qwen Code uses the following official SDKs to send requests to each provider:
| Auth Type | SDK Package |
| ---------------------- | ----------------------------------------------------------------------------------------------- |
| `openai` | [`openai`](https://www.npmjs.com/package/openai) - Official OpenAI Node.js SDK |
| `anthropic` | [`@anthropic-ai/sdk`](https://www.npmjs.com/package/@anthropic-ai/sdk) - Official Anthropic SDK |
| `gemini` / `vertex-ai` | [`@google/genai`](https://www.npmjs.com/package/@google/genai) - Official Google GenAI SDK |
| `qwen-oauth` | [`openai`](https://www.npmjs.com/package/openai) with custom provider (DashScope-compatible) |
This means the `baseUrl` you configure should be compatible with the corresponding SDK's expected API format. For example, when using `openai` auth type, the endpoint must accept OpenAI API format requests.
### OpenAI-compatible providers (`openai`)
This auth type supports not only OpenAI's official API but also any OpenAI-compatible endpoint, including aggregated model providers like OpenRouter.
```json
{
"modelProviders": {
"openai": [
{
"id": "gpt-4o",
"name": "GPT-4o",
"envKey": "OPENAI_API_KEY",
"baseUrl": "https://api.openai.com/v1",
"generationConfig": {
"timeout": 60000,
"maxRetries": 3,
"enableCacheControl": true,
"contextWindowSize": 128000,
"customHeaders": {
"X-Client-Request-ID": "req-123"
},
"extra_body": {
"enable_thinking": true,
"service_tier": "priority"
},
"samplingParams": {
"temperature": 0.2,
"top_p": 0.8,
"max_tokens": 4096,
"presence_penalty": 0.1,
"frequency_penalty": 0.1
}
}
},
{
"id": "gpt-4o-mini",
"name": "GPT-4o Mini",
"envKey": "OPENAI_API_KEY",
"baseUrl": "https://api.openai.com/v1",
"generationConfig": {
"timeout": 30000,
"samplingParams": {
"temperature": 0.5,
"max_tokens": 2048
}
}
},
{
"id": "openai/gpt-4o",
"name": "GPT-4o (via OpenRouter)",
"envKey": "OPENROUTER_API_KEY",
"baseUrl": "https://openrouter.ai/api/v1",
"generationConfig": {
"timeout": 120000,
"maxRetries": 3,
"samplingParams": {
"temperature": 0.7
}
}
}
]
}
}
```
### Anthropic (`anthropic`)
```json
{
"modelProviders": {
"anthropic": [
{
"id": "claude-3-5-sonnet",
"name": "Claude 3.5 Sonnet",
"envKey": "ANTHROPIC_API_KEY",
"baseUrl": "https://api.anthropic.com/v1",
"generationConfig": {
"timeout": 120000,
"maxRetries": 3,
"contextWindowSize": 200000,
"samplingParams": {
"temperature": 0.7,
"max_tokens": 8192,
"top_p": 0.9
}
}
},
{
"id": "claude-3-opus",
"name": "Claude 3 Opus",
"envKey": "ANTHROPIC_API_KEY",
"baseUrl": "https://api.anthropic.com/v1",
"generationConfig": {
"timeout": 180000,
"samplingParams": {
"temperature": 0.3,
"max_tokens": 4096
}
}
}
]
}
}
```
### Google Gemini (`gemini`)
```json
{
"modelProviders": {
"gemini": [
{
"id": "gemini-2.0-flash",
"name": "Gemini 2.0 Flash",
"envKey": "GEMINI_API_KEY",
"baseUrl": "https://generativelanguage.googleapis.com",
"capabilities": {
"vision": true
},
"generationConfig": {
"timeout": 60000,
"maxRetries": 2,
"contextWindowSize": 1000000,
"schemaCompliance": "auto",
"samplingParams": {
"temperature": 0.4,
"top_p": 0.95,
"max_tokens": 8192,
"top_k": 40
}
}
}
]
}
}
```
### Google Vertex AI (`vertex-ai`)
```json
{
"modelProviders": {
"vertex-ai": [
{
"id": "gemini-1.5-pro-vertex",
"name": "Gemini 1.5 Pro (Vertex AI)",
"envKey": "GOOGLE_API_KEY",
"baseUrl": "https://generativelanguage.googleapis.com",
"generationConfig": {
"timeout": 90000,
"contextWindowSize": 2000000,
"samplingParams": {
"temperature": 0.2,
"max_tokens": 8192
}
}
}
]
}
}
```
### Local Self-Hosted Models (via OpenAI-compatible API)
Most local inference servers (vLLM, Ollama, LM Studio, etc.) provide an OpenAI-compatible API endpoint. Configure them using the `openai` auth type with a local `baseUrl`:
```json
{
"modelProviders": {
"openai": [
{
"id": "qwen2.5-7b",
"name": "Qwen2.5 7B (Ollama)",
"envKey": "OLLAMA_API_KEY",
"baseUrl": "http://localhost:11434/v1",
"generationConfig": {
"timeout": 300000,
"maxRetries": 1,
"contextWindowSize": 32768,
"samplingParams": {
"temperature": 0.7,
"top_p": 0.9,
"max_tokens": 4096
}
}
},
{
"id": "llama-3.1-8b",
"name": "Llama 3.1 8B (vLLM)",
"envKey": "VLLM_API_KEY",
"baseUrl": "http://localhost:8000/v1",
"generationConfig": {
"timeout": 120000,
"maxRetries": 2,
"contextWindowSize": 128000,
"samplingParams": {
"temperature": 0.6,
"max_tokens": 8192
}
}
},
{
"id": "local-model",
"name": "Local Model (LM Studio)",
"envKey": "LMSTUDIO_API_KEY",
"baseUrl": "http://localhost:1234/v1",
"generationConfig": {
"timeout": 60000,
"samplingParams": {
"temperature": 0.5
}
}
}
]
}
}
```
For local servers that don't require authentication, you can use any placeholder value for the API key:
```bash
# For Ollama (no auth required)
export OLLAMA_API_KEY="ollama"
# For vLLM (if no auth is configured)
export VLLM_API_KEY="not-needed"
```
> [!note]
> The `extra_body` parameter is **only supported for OpenAI-compatible providers** (`openai`, `qwen-oauth`). It is ignored for Anthropic, Gemini, and Vertex AI providers.
## Bailian Coding Plan
Bailian Coding Plan provides a pre-configured set of Qwen models optimized for coding tasks. This feature is available for users with Bailian API access and offers a simplified setup experience with automatic model configuration updates.
### Overview
When you authenticate with a Bailian Coding Plan API key using the `/auth` command, Qwen Code automatically configures the following models:
| Model ID | Name | Description |
| ---------------------- | -------------------- | -------------------------------------- |
| `qwen3.5-plus` | qwen3.5-plus | Advanced model with thinking enabled |
| `qwen3-coder-plus` | qwen3-coder-plus | Optimized for coding tasks |
| `qwen3-max-2026-01-23` | qwen3-max-2026-01-23 | Latest max model with thinking enabled |
### Setup
1. Obtain a Bailian Coding Plan API key:
- **China**: <https://bailian.console.aliyun.com/?tab=model#/efm/coding_plan>
- **International**: <https://modelstudio.console.alibabacloud.com/?tab=dashboard#/efm/coding_plan>
2. Run the `/auth` command in Qwen Code
3. Select the API-KEY authentication method
4. Select your region (China or Global/International)
5. Enter your API key when prompted
The models will be automatically configured and added to your `/model` picker.
### Regions
Bailian Coding Plan supports two regions:
| Region | Endpoint | Description |
| -------------------- | ----------------------------------------------- | ----------------------- |
| China | `https://coding.dashscope.aliyuncs.com/v1` | Mainland China endpoint |
| Global/International | `https://coding-intl.dashscope.aliyuncs.com/v1` | International endpoint |
The region is selected during authentication and stored in `settings.json` under `codingPlan.region`. To switch regions, re-run the `/auth` command and select a different region.
### API Key Storage
When you configure Coding Plan through the `/auth` command, the API key is stored using the reserved environment variable name `BAILIAN_CODING_PLAN_API_KEY`. By default, it is stored in the `settings.env` field of your `settings.json` file.
> [!warning]
> **Security Recommendation**: For better security, it is recommended to move the API key from `settings.json` to a separate `.env` file and load it as an environment variable. For example:
>
> ```bash
> # ~/.qwen/.env
> BAILIAN_CODING_PLAN_API_KEY=your-api-key-here
> ```
>
> Then ensure this file is added to your `.gitignore` if you're using project-level settings.
### Automatic Updates
Coding Plan model configurations are versioned. When Qwen Code detects a newer version of the model template, you will be prompted to update. Accepting the update will:
- Replace the existing Coding Plan model configurations with the latest versions
- Preserve any custom model configurations you've added manually
- Automatically switch to the first model in the updated configuration
The update process ensures you always have access to the latest model configurations and features without manual intervention.
### Manual Configuration (Advanced)
If you prefer to manually configure Coding Plan models, you can add them to your `settings.json` like any OpenAI-compatible provider:
```json
{
"modelProviders": {
"openai": [
{
"id": "qwen3-coder-plus",
"name": "qwen3-coder-plus",
"description": "Qwen3-Coder via Bailian Coding Plan",
"envKey": "YOUR_CUSTOM_ENV_KEY",
"baseUrl": "https://coding.dashscope.aliyuncs.com/v1"
}
]
}
}
```
> [!note]
> When using manual configuration:
> - You can use any environment variable name for `envKey`
> - You do not need to configure `codingPlan.*`
> - **Automatic updates will not apply** to manually configured Coding Plan models
> [!warning]
> If you also use automatic Coding Plan configuration, automatic updates may overwrite your manual configurations if they use the same `envKey` and `baseUrl` as the automatic configuration. To avoid this, ensure your manual configuration uses a different `envKey` if possible.
## Resolution Layers and Atomicity
The effective auth/model/credential values are chosen per field using the following precedence (first present wins). You can combine `--auth-type` with `--model` to point directly at a provider entry; these CLI flags run before other layers.
| Layer (highest → lowest) | authType | model | apiKey | baseUrl | apiKeyEnvKey | proxy |
| -------------------------- | ----------------------------------- | ----------------------------------------------- | --------------------------------------------------- | ---------------------------------------------------- | ---------------------- | --------------------------------- |
| Programmatic overrides | `/auth` | `/auth` input | `/auth` input | `/auth` input | — | — |
| Model provider selection | — | `modelProvider.id` | `env[modelProvider.envKey]` | `modelProvider.baseUrl` | `modelProvider.envKey` | — |
| CLI arguments | `--auth-type` | `--model` | `--openaiApiKey` (or provider-specific equivalents) | `--openaiBaseUrl` (or provider-specific equivalents) | — | — |
| Environment variables | — | Provider-specific mapping (e.g. `OPENAI_MODEL`) | Provider-specific mapping (e.g. `OPENAI_API_KEY`) | Provider-specific mapping (e.g. `OPENAI_BASE_URL`) | — | — |
| Settings (`settings.json`) | `security.auth.selectedType` | `model.name` | `security.auth.apiKey` | `security.auth.baseUrl` | — | — |
| Default / computed | Falls back to `AuthType.QWEN_OAUTH` | Built-in default (OpenAI ⇒ `qwen3-coder-plus`) | — | — | — | `Config.getProxy()` if configured |
\*When present, CLI auth flags override settings. Otherwise, `security.auth.selectedType` or the implicit default determine the auth type. Qwen OAuth and OpenAI are the only auth types surfaced without extra configuration.
> [!warning]
> **Deprecation of `security.auth.apiKey` and `security.auth.baseUrl`:** Directly configuring API credentials via `security.auth.apiKey` and `security.auth.baseUrl` in `settings.json` is deprecated. These settings were used in historical versions for credentials entered through the UI, but the credential input flow was removed in version 0.10.1. These fields will be fully removed in a future release. **It is strongly recommended to migrate to `modelProviders`** for all model and credential configurations. Use `envKey` in `modelProviders` to reference environment variables for secure credential management instead of hardcoding credentials in settings files.
## Generation Config Layering: The Impermeable Provider Layer
The configuration resolution follows a strict layering model with one crucial rule: **the modelProvider layer is impermeable**.
### How it works
1. **When a modelProvider model IS selected** (e.g., via `/model` command choosing a provider-configured model):
- The entire `generationConfig` from the provider is applied **atomically**
- **The provider layer is completely impermeable** — lower layers (CLI, env, settings) do not participate in generationConfig resolution at all
- All fields defined in `modelProviders[].generationConfig` use the provider's values
- All fields **not defined** by the provider are set to `undefined` (not inherited from settings)
- This ensures provider configurations act as a complete, self-contained "sealed package"
2. **When NO modelProvider model is selected** (e.g., using `--model` with a raw model ID, or using CLI/env/settings directly):
- The resolution falls through to lower layers
- Fields are populated from CLI → env → settings → defaults
- This creates a **Runtime Model** (see next section)
### Per-field precedence for `generationConfig`
| Priority | Source | Behavior |
| -------- | --------------------------------------------- | -------------------------------------------------------------------------------------------------------- |
| 1 | Programmatic overrides | Runtime `/model`, `/auth` changes |
| 2 | `modelProviders[authType][].generationConfig` | **Impermeable layer** - completely replaces all generationConfig fields; lower layers do not participate |
| 3 | `settings.model.generationConfig` | Only used for **Runtime Models** (when no provider model is selected) |
| 4 | Content-generator defaults | Provider-specific defaults (e.g., OpenAI vs Gemini) - only for Runtime Models |
### Atomic field treatment
The following fields are treated as atomic objects - provider values completely replace the entire object, no merging occurs:
- `samplingParams` - Temperature, top_p, max_tokens, etc.
- `customHeaders` - Custom HTTP headers
- `extra_body` - Extra request body parameters
### Example
```json
// User settings (~/.qwen/settings.json)
{
"model": {
"generationConfig": {
"timeout": 30000,
"samplingParams": { "temperature": 0.5, "max_tokens": 1000 }
}
}
}
// modelProviders configuration
{
"modelProviders": {
"openai": [{
"id": "gpt-4o",
"envKey": "OPENAI_API_KEY",
"generationConfig": {
"timeout": 60000,
"samplingParams": { "temperature": 0.2 }
}
}]
}
}
```
When `gpt-4o` is selected from modelProviders:
- `timeout` = 60000 (from provider, overrides settings)
- `samplingParams.temperature` = 0.2 (from provider, completely replaces settings object)
- `samplingParams.max_tokens` = **undefined** (not defined in provider, and provider layer does not inherit from settings — fields are explicitly set to undefined if not provided)
When using a raw model via `--model gpt-4` (not from modelProviders, creates a Runtime Model):
- `timeout` = 30000 (from settings)
- `samplingParams.temperature` = 0.5 (from settings)
- `samplingParams.max_tokens` = 1000 (from settings)
The merge strategy for `modelProviders` itself is REPLACE: the entire `modelProviders` from project settings will override the corresponding section in user settings, rather than merging the two.
## Provider Models vs Runtime Models
Qwen Code distinguishes between two types of model configurations:
### Provider Model
- Defined in `modelProviders` configuration
- Has a complete, atomic configuration package
- When selected, its configuration is applied as an impermeable layer
- Appears in `/model` command list with full metadata (name, description, capabilities)
- Recommended for multi-model workflows and team consistency
### Runtime Model
- Created dynamically when using raw model IDs via CLI (`--model`), environment variables, or settings
- Not defined in `modelProviders`
- Configuration is built by "projecting" through resolution layers (CLI → env → settings → defaults)
- Automatically captured as a **RuntimeModelSnapshot** when a complete configuration is detected
- Allows reuse without re-entering credentials
### RuntimeModelSnapshot lifecycle
When you configure a model without using `modelProviders`, Qwen Code automatically creates a RuntimeModelSnapshot to preserve your configuration:
```bash
# This creates a RuntimeModelSnapshot with ID: $runtime|openai|my-custom-model
qwen --auth-type openai --model my-custom-model --openaiApiKey $KEY --openaiBaseUrl https://api.example.com/v1
```
The snapshot:
- Captures model ID, API key, base URL, and generation config
- Persists across sessions (stored in memory during runtime)
- Appears in the `/model` command list as a runtime option
- Can be switched to using `/model $runtime|openai|my-custom-model`
### Key differences
| Aspect | Provider Model | Runtime Model |
| ----------------------- | --------------------------------- | ------------------------------------------ |
| Configuration source | `modelProviders` in settings | CLI, env, settings layers |
| Configuration atomicity | Complete, impermeable package | Layered, each field resolved independently |
| Reusability | Always available in `/model` list | Captured as snapshot, appears if complete |
| Team sharing | Yes (via committed settings) | No (user-local) |
| Credential storage | Reference via `envKey` only | May capture actual key in snapshot |
### When to use each
- **Use Provider Models** when: You have standard models shared across a team, need consistent configurations, or want to prevent accidental overrides
- **Use Runtime Models** when: Quickly testing a new model, using temporary credentials, or working with ad-hoc endpoints
## Selection Persistence and Recommendations
> [!important]
> Define `modelProviders` in the user-scope `~/.qwen/settings.json` whenever possible and avoid persisting credential overrides in any scope. Keeping the provider catalog in user settings prevents merge/override conflicts between project and user scopes and ensures `/auth` and `/model` updates always write back to a consistent scope.
- `/model` and `/auth` persist `model.name` (where applicable) and `security.auth.selectedType` to the closest writable scope that already defines `modelProviders`; otherwise they fall back to the user scope. This keeps workspace/user files in sync with the active provider catalog.
- Without `modelProviders`, the resolver mixes CLI/env/settings layers, creating Runtime Models. This is fine for single-provider setups but cumbersome when frequently switching. Define provider catalogs whenever multi-model workflows are common so that switches stay atomic, source-attributed, and debuggable.

View file

@ -148,8 +148,7 @@ Settings are organized into categories. All settings should be placed within the
"contextWindowSize": 128000,
"enableCacheControl": true,
"customHeaders": {
"X-Request-ID": "req-123",
"X-User-ID": "user-456"
"X-Client-Request-ID": "req-123"
},
"extra_body": {
"enable_thinking": true
@ -180,102 +179,6 @@ The `extra_body` field allows you to add custom parameters to the request body s
- `"./custom-logs"` - Logs to `./custom-logs` relative to current directory
- `"/tmp/openai-logs"` - Logs to absolute path `/tmp/openai-logs`
#### modelProviders
Use `modelProviders` to declare curated model lists per auth type that the `/model` picker can switch between. Keys must be valid auth types (`openai`, `anthropic`, `gemini`, `vertex-ai`, etc.). Each entry requires an `id` and **must include `envKey`**, with optional `name`, `description`, `baseUrl`, and `generationConfig`. Credentials are never persisted in settings; the runtime reads them from `process.env[envKey]`. Qwen OAuth models remain hard-coded and cannot be overridden.
##### Example
```json
{
"modelProviders": {
"openai": [
{
"id": "gpt-4o",
"name": "GPT-4o",
"envKey": "OPENAI_API_KEY",
"baseUrl": "https://api.openai.com/v1",
"generationConfig": {
"timeout": 60000,
"maxRetries": 3,
"customHeaders": {
"X-Model-Version": "v1.0",
"X-Request-Priority": "high"
},
"extra_body": {
"enable_thinking": true
},
"samplingParams": { "temperature": 0.2 }
}
}
],
"anthropic": [
{
"id": "claude-3-5-sonnet",
"envKey": "ANTHROPIC_API_KEY",
"baseUrl": "https://api.anthropic.com/v1"
}
],
"gemini": [
{
"id": "gemini-2.0-flash",
"name": "Gemini 2.0 Flash",
"envKey": "GEMINI_API_KEY",
"baseUrl": "https://generativelanguage.googleapis.com"
}
],
"vertex-ai": [
{
"id": "gemini-1.5-pro-vertex",
"envKey": "GOOGLE_API_KEY",
"baseUrl": "https://generativelanguage.googleapis.com"
}
]
}
}
```
> [!note]
> Only the `/model` command exposes non-default auth types. Anthropic, Gemini, Vertex AI, etc., must be defined via `modelProviders`. The `/auth` command intentionally lists only the built-in Qwen OAuth and OpenAI flows.
##### Resolution layers and atomicity
The effective auth/model/credential values are chosen per field using the following precedence (first present wins). You can combine `--auth-type` with `--model` to point directly at a provider entry; these CLI flags run before other layers.
| Layer (highest → lowest) | authType | model | apiKey | baseUrl | apiKeyEnvKey | proxy |
| -------------------------- | ----------------------------------- | ----------------------------------------------- | --------------------------------------------------- | ---------------------------------------------------- | ---------------------- | --------------------------------- |
| Programmatic overrides | `/auth ` | `/auth` input | `/auth` input | `/auth` input | — | — |
| Model provider selection | — | `modelProvider.id` | `env[modelProvider.envKey]` | `modelProvider.baseUrl` | `modelProvider.envKey` | — |
| CLI arguments | `--auth-type` | `--model` | `--openaiApiKey` (or provider-specific equivalents) | `--openaiBaseUrl` (or provider-specific equivalents) | — | — |
| Environment variables | — | Provider-specific mapping (e.g. `OPENAI_MODEL`) | Provider-specific mapping (e.g. `OPENAI_API_KEY`) | Provider-specific mapping (e.g. `OPENAI_BASE_URL`) | — | — |
| Settings (`settings.json`) | `security.auth.selectedType` | `model.name` | `security.auth.apiKey` | `security.auth.baseUrl` | — | — |
| Default / computed | Falls back to `AuthType.QWEN_OAUTH` | Built-in default (OpenAI ⇒ `qwen3-coder-plus`) | — | — | — | `Config.getProxy()` if configured |
\*When present, CLI auth flags override settings. Otherwise, `security.auth.selectedType` or the implicit default determine the auth type. Qwen OAuth and OpenAI are the only auth types surfaced without extra configuration.
Model-provider sourced values are applied atomically: once a provider model is active, every field it defines is protected from lower layers until you manually clear credentials via `/auth`. The final `generationConfig` is the projection across all layers—lower layers only fill gaps left by higher ones, and the provider layer remains impenetrable.
The merge strategy for `modelProviders` is REPLACE: the entire `modelProviders` from project settings will override the corresponding section in user settings, rather than merging the two.
##### Generation config layering
Per-field precedence for `generationConfig`:
1. Programmatic overrides (e.g. runtime `/model`, `/auth` changes)
2. `modelProviders[authType][].generationConfig`
3. `settings.model.generationConfig`
4. Content-generator defaults (`getDefaultGenerationConfig` for OpenAI, `getParameterValue` for Gemini, etc.)
`samplingParams`, `customHeaders`, and `extra_body` are all treated atomically; provider values replace the entire object. If `modelProviders[].generationConfig` defines these fields, they are used directly; otherwise, values from `model.generationConfig` are used. No merging occurs between provider and global configuration levels. Defaults from the content generator apply last so each provider retains its tuned baseline.
##### Selection persistence and recommendations
> [!important]
> Define `modelProviders` in the user-scope `~/.qwen/settings.json` whenever possible and avoid persisting credential overrides in any scope. Keeping the provider catalog in user settings prevents merge/override conflicts between project and user scopes and ensures `/auth` and `/model` updates always write back to a consistent scope.
- `/model` and `/auth` persist `model.name` (where applicable) and `security.auth.selectedType` to the closest writable scope that already defines `modelProviders`; otherwise they fall back to the user scope. This keeps workspace/user files in sync with the active provider catalog.
- Without `modelProviders`, the resolver mixes CLI/env/settings layers, which is fine for single-provider setups but cumbersome when frequently switching. Define provider catalogs whenever multi-model workflows are common so that switches stay atomic, source-attributed, and debuggable.
#### context
| Setting | Type | Description | Default |

View file

@ -42,7 +42,7 @@ This document lists the available keyboard shortcuts in Qwen Code.
| `Ctrl+R` | Reverse search through input/shell history. |
| `Ctrl+Right Arrow` / `Meta+Right Arrow` / `Meta+F` | Move the cursor one word to the right. |
| `Ctrl+U` | Delete from the cursor to the beginning of the line. |
| `Ctrl+V` | Paste clipboard content. If the clipboard contains an image, it will be saved and a reference to it will be inserted in the prompt. |
| `Ctrl+V` (Windows: `Alt+V`) | Paste clipboard content. If the clipboard contains an image, it will be saved and a reference to it will be inserted in the prompt. |
| `Ctrl+W` / `Meta+Backspace` / `Ctrl+Backspace` | Delete the word to the left of the cursor. |
| `Ctrl+X` / `Meta+Enter` | Open the current input in an external editor. |

View file

@ -33,6 +33,13 @@ const external = [
'@lydell/node-pty-linux-x64',
'@lydell/node-pty-win32-arm64',
'@lydell/node-pty-win32-x64',
'@teddyzhu/clipboard',
'@teddyzhu/clipboard-darwin-arm64',
'@teddyzhu/clipboard-darwin-x64',
'@teddyzhu/clipboard-linux-x64-gnu',
'@teddyzhu/clipboard-linux-arm64-gnu',
'@teddyzhu/clipboard-win32-x64-msvc',
'@teddyzhu/clipboard-win32-arm64-msvc',
];
esbuild

View file

@ -648,6 +648,101 @@ function setupAcpTest(
}
});
it('blocks write tools in plan mode (issue #1806)', async () => {
const rig = new TestRig();
rig.setup('acp plan mode enforcement');
const toolCallEvents: Array<{
toolName: string;
status: string;
error?: string;
}> = [];
const { sendRequest, cleanup, stderr, sessionUpdates } = setupAcpTest(rig, {
permissionHandler: () => ({ optionId: 'proceed_once' }),
});
try {
await sendRequest('initialize', {
protocolVersion: 1,
clientCapabilities: { fs: { readTextFile: true, writeTextFile: true } },
});
await sendRequest('authenticate', { methodId: 'openai' });
const newSession = (await sendRequest('session/new', {
cwd: rig.testDir!,
mcpServers: [],
})) as { sessionId: string };
// Set mode to 'plan'
const setModeResult = (await sendRequest('session/set_mode', {
sessionId: newSession.sessionId,
modeId: 'plan',
})) as { modeId: string };
expect(setModeResult.modeId).toBe('plan');
// Try to create a file - this should be blocked by plan mode
const promptResult = await sendRequest('session/prompt', {
sessionId: newSession.sessionId,
prompt: [
{
type: 'text',
text: 'Create a file called test.txt with content "Hello World"',
},
],
});
expect(promptResult).toBeDefined();
// Give time for tool calls to be processed
await delay(2000);
// Collect tool call events from session updates
sessionUpdates.forEach((update) => {
if (update.update?.sessionUpdate === 'tool_call_update') {
const toolUpdate = update.update as {
sessionUpdate: string;
toolName?: string;
status?: string;
error?: { message?: string };
};
if (toolUpdate.toolName) {
toolCallEvents.push({
toolName: toolUpdate.toolName,
status: toolUpdate.status ?? 'unknown',
error: toolUpdate.error?.message,
});
}
}
});
// Verify that if write_file was attempted, it was blocked
const writeFileEvents = toolCallEvents.filter(
(e) => e.toolName === 'write_file',
);
// If the LLM tried to call write_file in plan mode, it should have been blocked
if (writeFileEvents.length > 0) {
const blockedEvent = writeFileEvents.find(
(e) => e.status === 'error' && e.error?.includes('Plan mode'),
);
expect(blockedEvent).toBeDefined();
expect(blockedEvent?.error).toContain('Plan mode is active');
}
// Verify the file was NOT created
const fs = await import('fs');
const path = await import('path');
const testFilePath = path.join(rig.testDir!, 'test.txt');
const fileExists = fs.existsSync(testFilePath);
expect(fileExists).toBe(false);
} catch (e) {
if (stderr.length) console.error('Agent stderr:', stderr.join(''));
throw e;
} finally {
await cleanup();
}
});
it('receives usage metadata in agent_message_chunk updates', async () => {
const rig = new TestRig();
rig.setup('acp usage metadata');

View file

@ -31,5 +31,9 @@
]
}
],
"models": ["claude-3-5-sonnet-20241022", "qwen3-coder-plus"]
"models": [
"qwen3-coder-plus",
{ "name": "glm-4.7", "auth_type": "anthropic" },
{ "name": "claude-4-5-sonnet-20260219", "auth_type": "anthropic" }
]
}

View file

@ -50,11 +50,18 @@ class Task:
prompts: List[str]
@dataclass
class ModelSpec:
"""One model to run: name and optional auth_type (e.g. anthropic)."""
name: str
auth_type: Optional[str] = None
@dataclass
class RunConfig:
"""Configuration for the concurrent execution."""
tasks: List[Task]
models: List[str]
models: List[ModelSpec] # name + optional auth_type per model
concurrency: int = 4
yolo: bool = True
source_repo: Path = field(default_factory=lambda: Path.cwd())
@ -84,6 +91,7 @@ class RunRecord:
task_name: str
model: str
status: RunStatus
auth_type: Optional[str] = None # e.g. "anthropic" for qwen --auth-type
worktree_path: Optional[str] = None
output_dir: Optional[str] = None
logs_dir: Optional[str] = None
@ -104,6 +112,7 @@ class RunRecord:
"task_name": self.task_name,
"model": self.model,
"status": self.status.value,
"auth_type": self.auth_type,
"worktree_path": self.worktree_path,
"output_dir": self.output_dir,
"logs_dir": self.logs_dir,
@ -136,6 +145,7 @@ class RunRecord:
task_name=data["task_name"],
model=data["model"],
status=RunStatus(data["status"]),
auth_type=data.get("auth_type"),
worktree_path=data.get("worktree_path"),
output_dir=data.get("output_dir"),
logs_dir=data.get("logs_dir"),
@ -806,6 +816,10 @@ class QwenRunner:
# Add model
cmd.extend(["--model", run.model])
# Add auth-type when model uses non-OpenAI protocol (e.g. anthropic for glm-4.7)
if run.auth_type:
cmd.extend(["--auth-type", run.auth_type])
# Add yolo if enabled
if self.config.yolo:
cmd.append("--yolo")
@ -829,27 +843,41 @@ def generate_run_matrix(config: RunConfig) -> List[RunRecord]:
runs = []
for task in config.tasks:
for model in config.models:
run_id = str(uuid.uuid4())[:8]
runs.append(RunRecord(
run_id=run_id,
run_id=str(uuid.uuid4())[:8],
task_id=task.id,
task_name=task.name,
model=model,
model=model.name,
status=RunStatus.QUEUED,
auth_type=model.auth_type,
))
return runs
def _parse_models(data_models: List[Any]) -> List[ModelSpec]:
"""Parse models: string or {name, auth_type/authType}; returns list of ModelSpec."""
specs: List[ModelSpec] = []
for item in data_models or []:
if isinstance(item, str):
name, auth = item, None
elif isinstance(item, dict) and item.get("name"):
name = item["name"]
auth = item.get("auth_type") or item.get("authType")
else:
continue
specs.append(ModelSpec(name=name, auth_type=auth))
return specs
def load_config(config_path: Path) -> RunConfig:
"""Load configuration from JSON file."""
with open(config_path, 'r') as f:
data = json.load(f)
tasks = [Task(**t) for t in data.get("tasks", [])]
models = _parse_models(data.get("models", []))
return RunConfig(
tasks=tasks,
models=data.get("models", []),
models=models,
concurrency=data.get("concurrency", 4),
yolo=data.get("yolo", True),
source_repo=Path(data.get("source_repo", ".")).resolve(),

View file

@ -0,0 +1,582 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
/**
* E2E tests for SDK session-id functionality:
* - sessionId option: Allows users to specify a custom session ID
* - Validation: Session ID must be a valid UUID
* - Integration: Session ID is passed to CLI via --session-id flag
* - Behavior: sessionId cannot be used with resume or continue
*/
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import { query, isSDKSystemMessage, type SDKMessage } from '@qwen-code/sdk';
import {
SDKTestHelper,
createSharedTestOptions,
assertSuccessfulCompletion,
} from './test-helper.js';
const SHARED_TEST_OPTIONS = createSharedTestOptions();
describe('Session ID Support (E2E)', () => {
let helper: SDKTestHelper;
let testDir: string;
beforeEach(async () => {
helper = new SDKTestHelper();
// Enable chat recording for session-id tests to allow duplicate session detection
testDir = await helper.setup('session-id', { chatRecording: true });
});
afterEach(async () => {
await helper.cleanup();
});
describe('sessionId Option', () => {
it('should accept a valid UUID as sessionId', async () => {
// Valid UUID v4: 4 in position 14, 8/9/a/b in position 19
const customSessionId = '12345678-1234-4234-8234-123456789abc';
const q = query({
prompt: 'What is 1 + 1? Just the number.',
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
sessionId: customSessionId,
debug: false,
},
});
const messages: SDKMessage[] = [];
try {
for await (const message of q) {
messages.push(message);
}
assertSuccessfulCompletion(messages);
// Verify the query used the custom session ID
expect(q.getSessionId()).toBe(customSessionId);
} finally {
await q.close();
}
});
it('should use sessionId in system init message', async () => {
// Valid UUID v4: 4 in position 14, 8/9/a/b in position 19
const customSessionId = 'abcdef12-3456-4234-abcd-ef1234567890';
const q = query({
prompt: 'Say hello',
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
sessionId: customSessionId,
debug: false,
},
});
const messages: SDKMessage[] = [];
try {
for await (const message of q) {
messages.push(message);
// Stop after we get the system init message
if (isSDKSystemMessage(message) && message.subtype === 'init') {
expect(message.session_id).toBe(customSessionId);
break;
}
}
} finally {
await q.close();
}
});
it('should pass sessionId to CLI via arguments', async () => {
// Valid UUID v4: 4 in position 14, 8/9/a/b in position 19
const customSessionId = 'a1b2c3d4-e5f6-4234-abcd-ef1234567890';
const stderrMessages: string[] = [];
const q = query({
prompt: 'What is 2 + 2? Just the number.',
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
sessionId: customSessionId,
debug: true,
logLevel: 'debug',
stderr: (msg: string) => {
stderrMessages.push(msg);
},
},
});
try {
for await (const _message of q) {
// Consume all messages
}
// Verify that CLI was spawned with --session-id argument
const hasSessionIdArg = stderrMessages.some((msg) =>
msg.includes('--session-id'),
);
expect(hasSessionIdArg).toBe(true);
// Verify the session ID value is in the arguments
const hasCorrectSessionId = stderrMessages.some((msg) =>
msg.includes(customSessionId),
);
expect(hasCorrectSessionId).toBe(true);
} finally {
await q.close();
}
});
it('should auto-generate sessionId when not provided', async () => {
const q = query({
prompt: 'What is 3 + 3? Just the number.',
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
debug: false,
},
});
const messages: SDKMessage[] = [];
try {
for await (const message of q) {
messages.push(message);
}
assertSuccessfulCompletion(messages);
// Verify the query has a valid auto-generated session ID
const sessionId = q.getSessionId();
expect(sessionId).toBeDefined();
expect(sessionId).toMatch(
/^[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i,
);
} finally {
await q.close();
}
});
it('should reject using sessionId with resume', async () => {
// Valid UUIDs: 4 in position 14, 8/9/a/b in position 19
const customSessionId = '11111111-2222-4333-a444-555555555555';
const resumeSessionId = '66666666-7777-4888-b999-000000000000';
// CLI rejects using --session-id with --resume
const q = query({
prompt: 'Say hello',
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
sessionId: customSessionId,
resume: resumeSessionId,
debug: false,
},
});
try {
for await (const _message of q) {
// Consume messages
}
// Should not reach here - CLI should reject this combination
throw new Error(
'Expected query to fail when using sessionId with resume',
);
} catch (error) {
// Expected to fail - CLI rejects --session-id with --resume
expect(error).toBeDefined();
} finally {
await q.close();
}
});
});
describe('Session ID Validation', () => {
it('should reject invalid sessionId format', async () => {
const invalidSessionId = 'not-a-valid-uuid';
expect(() => {
query({
prompt: 'Say hello',
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
sessionId: invalidSessionId,
},
});
}).toThrow(/Invalid sessionId/);
});
it('should reject sessionId with wrong UUID version', async () => {
// UUID version 6 (not valid - must be 1-5)
const invalidVersionSessionId = '12345678-1234-6789-8234-123456789abc';
expect(() => {
query({
prompt: 'Say hello',
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
sessionId: invalidVersionSessionId,
},
});
}).toThrow(/Invalid sessionId/);
});
it('should reject sessionId with invalid variant', async () => {
// Invalid variant (must be 8, 9, a, or b in position 19)
const invalidVariantSessionId = '12345678-1234-1234-c234-823456789abc';
expect(() => {
query({
prompt: 'Say hello',
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
sessionId: invalidVariantSessionId,
},
});
}).toThrow(/Invalid sessionId/);
});
it('should handle empty sessionId gracefully', async () => {
// Note: Empty string behavior - validation skips it but Query constructor may use it
// This test documents the current behavior
const q = query({
prompt: 'Say hello',
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
sessionId: '',
},
});
try {
// When empty string is provided, the query should still be created
// The actual session ID behavior depends on implementation details
const sessionId = q.getSessionId();
expect(sessionId).toBeDefined();
// If empty string is used, it's passed through; otherwise a UUID is generated
// Either way, the query should function
for await (const _message of q) {
// Consume messages
}
} finally {
await q.close();
}
});
it('should accept various valid UUID formats', async () => {
const validUUIDs = [
'12345678-1234-1234-8234-123456789abc', // version 1, variant 8
'12345678-1234-1234-9234-123456789abc', // version 1, variant 9
'12345678-1234-1234-a234-123456789abc', // version 1, variant a
'12345678-1234-1234-b234-123456789abc', // version 1, variant b
'12345678-1234-2234-8234-123456789abc', // version 2, variant 8
'12345678-1234-3234-8234-123456789abc', // version 3, variant 8
'12345678-1234-4234-8234-123456789abc', // version 4, variant 8
'12345678-1234-5234-8234-123456789abc', // version 5, variant 8
];
for (const uuid of validUUIDs) {
const q = query({
prompt: 'Say hi',
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
sessionId: uuid,
debug: false,
},
});
try {
// Just verify the query is created without throwing
expect(q.getSessionId()).toBe(uuid);
} finally {
await q.close();
}
}
});
});
describe('Multi-turn with Custom Session ID', () => {
it('should maintain custom sessionId across multiple turns', async () => {
// Valid UUID v4: 4 in position 14, 8/9/a/b in position 19
const customSessionId = '99999999-8888-4777-a666-555555555555';
async function* createConversation(): AsyncIterable<{
type: 'user';
session_id: string;
message: { role: 'user'; content: string };
parent_tool_use_id: null;
}> {
yield {
type: 'user',
session_id: customSessionId,
message: {
role: 'user',
content: 'What is 1 + 1?',
},
parent_tool_use_id: null,
};
await new Promise((resolve) => setTimeout(resolve, 100));
yield {
type: 'user',
session_id: customSessionId,
message: {
role: 'user',
content: 'What is 2 + 2?',
},
parent_tool_use_id: null,
};
}
const q = query({
prompt: createConversation(),
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
sessionId: customSessionId,
debug: false,
},
});
const messages: SDKMessage[] = [];
try {
for await (const message of q) {
messages.push(message);
}
assertSuccessfulCompletion(messages);
// Verify all system messages use the custom session ID
const systemMessages = messages.filter(isSDKSystemMessage);
for (const sysMsg of systemMessages) {
expect(sysMsg.session_id).toBe(customSessionId);
}
} finally {
await q.close();
}
});
});
describe('Session ID Duplicate Detection', () => {
it('should reject duplicate sessionId with error', async () => {
// Generate a unique UUID for this test
const customSessionId = crypto.randomUUID();
// First query: create a session with the custom session ID
const q1 = query({
prompt: 'Say hello',
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
sessionId: customSessionId,
env: {
SANDBOX_SET_UID_GID: 'true',
},
},
});
// Consume the first query to completion and close it
try {
for await (const _msg of q1) {
// consume
}
} finally {
await q1.close();
}
// Second query: try to use the same session ID
// This should fail because the session ID is already in use
// CLI will exit with code 1 when detecting duplicate session ID
const q2 = query({
prompt: 'Say hello again',
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
sessionId: customSessionId,
env: {
SANDBOX_SET_UID_GID: 'true',
},
},
});
// The error should be propagated and the iteration should throw
// When iterating over messages, if CLI exits with code 1 (duplicate session ID),
// the error should be thrown during iteration
await expect(async () => {
for await (const _msg of q2) {
// consume
}
}).rejects.toThrow(/CLI process exited with code 1/);
await q2.close();
});
it('should throw error when CLI exits with non-zero code', async () => {
// Generate a unique UUID for this test
const customSessionId = crypto.randomUUID();
// First query: create a session and properly close it after completion
const q1 = query({
prompt: 'Say hello',
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
sessionId: customSessionId,
env: {
SANDBOX_SET_UID_GID: 'true',
},
},
});
try {
for await (const _msg of q1) {
// consume
}
} finally {
await q1.close();
}
// Second query with same session ID
// When using the same session ID, CLI will detect the duplicate and exit with code 1
const q2 = query({
prompt: 'Say hello again',
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
sessionId: customSessionId,
env: {
SANDBOX_SET_UID_GID: 'true',
},
},
});
let errorCaught = false;
let errorMessage = '';
try {
// Iterate over messages - the error should be thrown during iteration
// because CLI exits with code 1 when detecting duplicate session ID
for await (const _msg of q2) {
// consume
}
} catch (error) {
errorCaught = true;
// CLI errors are written directly to console (stderr inherit mode)
// SDK only reports the exit status, not the error message
expect(error instanceof Error).toBe(true);
errorMessage = error instanceof Error ? error.message : String(error);
// Verify the error message contains the expected exit code
expect(errorMessage).toContain('CLI process exited with code 1');
} finally {
await q2.close();
}
// Verify that an error was actually caught during message iteration
expect(errorCaught).toBe(true);
});
});
describe('Session ID Consistency', () => {
it('should expose same sessionId via getSessionId() and messages', async () => {
// Valid UUID v4: 4 in position 14, 8/9/a/b in position 19
const customSessionId = 'aaaaaaaa-bbbb-4ccc-adde-eeeeeeeeeeee';
const q = query({
prompt: 'Say hello',
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
sessionId: customSessionId,
debug: false,
},
});
const messages: SDKMessage[] = [];
try {
for await (const message of q) {
messages.push(message);
}
// Verify getSessionId() matches the option
expect(q.getSessionId()).toBe(customSessionId);
// Verify system messages have the same session ID
const systemMessages = messages.filter(isSDKSystemMessage);
expect(systemMessages.length).toBeGreaterThan(0);
for (const sysMsg of systemMessages) {
expect(sysMsg.session_id).toBe(customSessionId);
}
} finally {
await q.close();
}
});
it('should generate different session IDs for different queries', async () => {
const q1 = query({
prompt: 'Say one',
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
debug: false,
},
});
const q2 = query({
prompt: 'Say two',
options: {
...SHARED_TEST_OPTIONS,
cwd: testDir,
debug: false,
},
});
try {
// Consume messages from both queries
for await (const _msg of q1) {
// consume
}
for await (const _msg of q2) {
// consume
}
const sessionId1 = q1.getSessionId();
const sessionId2 = q2.getSessionId();
// Session IDs should be different
expect(sessionId1).toBeDefined();
expect(sessionId2).toBeDefined();
expect(sessionId1).not.toBe(sessionId2);
// Both should be valid UUIDs
expect(sessionId1).toMatch(
/^[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i,
);
expect(sessionId2).toMatch(
/^[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i,
);
} finally {
await q1.close();
await q2.close();
}
});
});
});

View file

@ -41,6 +41,13 @@ export interface SDKTestHelperOptions {
* Whether to create .qwen/settings.json
*/
createQwenConfig?: boolean;
/**
* Whether to enable chat recording for this test.
* - Set to `true` to enable recording (needed for session-id duplicate detection tests)
* - Set to `false` or leave undefined to disable recording (default for most tests)
* This sets chatRecording in general settings.
*/
chatRecording?: boolean;
}
/**
@ -91,7 +98,8 @@ export class SDKTestHelper {
},
general: {
...generalSettings,
chatRecording: false, // SDK tests don't need chat recording
// Default to disabling chat recording unless explicitly enabled
...(options.chatRecording !== true ? { chatRecording: false } : {}),
},
};

View file

@ -0,0 +1,117 @@
# terminal-capture — Motivation and Positioning
## 1. Overview of Existing Testing System
| Layer | Tools | Coverage | Status |
| ---------------------- | ----------------------------------------- | --------------------------------------- | --------------------------------------------------------- |
| Unit Tests | Vitest + ink-testing-library | Ink components, Core logic, utilities | Mature, extensive `.test.ts` / `.test.tsx` |
| Integration Tests | Vitest + TestRig / SDKTestHelper | CLI E2E, SDK multi-turn, MCP, auth | Mature, supports none/docker/podman sandboxes |
| Terminal UI Snapshots | `toMatchSnapshot()` + ink-testing-library | Ink component render output (ANSI) | Exists, covers Footer, InputPrompt, MarkdownDisplay, etc. |
| Web UI Regression | Chromatic + Storybook | `packages/webui` components | Exists, but only covers Web UI |
| **Terminal UI Visual** | **terminal-capture** | CLI terminal real rendering screenshots | ✅ Implemented |
## 2. Problems Solved by terminal-capture
### Limitations of Existing Ink Text Snapshots
The project uses `toMatchSnapshot()` to compare Ink component ANSI text output, which validates **text content**, but cannot verify:
- Whether colors are correct (red separators? green highlights? Logo gradients?)
- Whether layout is aligned (table borders? multi-column layout?)
- Overall visual feel (component spacing? blank areas? overflow?)
These can only be seen by **actually rendering to a terminal emulator**.
### Core Architecture
```
node-pty (pseudo-terminal)
↓ raw ANSI byte stream
xterm.js (running inside Playwright headless Chromium)
↓ perfect rendering: colors, bold, cursor, scrolling
Playwright element screenshot
↓ pixel-perfect screenshots (optional macOS window decorations)
```
### Core Features
| Feature | Description |
| -------------------- | ----------------------------------------------------------------------------------- |
| WYSIWYG | xterm.js fully renders ANSI, no manual output cleaning needed |
| Theme Support | Built-in 5 themes (Dracula, One Dark, GitHub Dark, Monokai, Night Owl) |
| Full-length | `captureFull()` supports capturing scrollback buffer content |
| Deterministic Naming | Screenshot filenames auto-generated by step sequence for easy regression comparison |
| Batch Execution | `run.ts` executes all scenarios in one command |
## 3. Usage
### TypeScript Configuration-Driven
Scenario config files (`scenarios/*.ts`) only need to declare `type` (input) and `key` (keypress), Runner handles automatically:
- Wait for CLI readiness
- Auto-complete interference handling (/ commands auto-send Escape)
- Auto-screenshot before/after input (01 = input state, 02 = result)
- Auto-capture full-length image at last step (full-flow.png)
- Special key interactions (Arrow keys / Tab / Enter, etc.)
```typescript
// integration-tests/terminal-capture/scenarios/about.ts
import type { ScenarioConfig } from '../scenario-runner.js';
export default {
name: '/about',
spawn: ['node', 'dist/cli.js', '--yolo'],
terminal: { title: 'qwen-code', cwd: '../../..' },
flow: [
{ type: 'Hi, can you help me understand this codebase?' },
{ type: '/about' },
],
} satisfies ScenarioConfig;
```
### Running
```bash
# From project root
npx tsx integration-tests/terminal-capture/run.ts integration-tests/terminal-capture/scenarios/
# Or inside terminal-capture directory
npm run capture
```
### Screenshot Output
```
scenarios/screenshots/
about/
01-01.png # Step 1 input state
01-02.png # Step 1 result
02-01.png # Step 2 input state
02-02.png # Step 2 result
full-flow.png # Final state full-length image
context/
...
```
## 4. Position in Testing System
```
┌─────────────────────────────────────┐
│ Existing Testing System │
├─────────────────────────────────────┤
│ Unit Tests (Vitest) │ ← Function/Component level
│ Text Snapshots (ink-testing-lib) │ ← ANSI string comparison
│ Integration Tests (TestRig/SDK) │ ← E2E functionality
│ Web UI Regression (Chromatic) │ ← Only covers webui
├─────────────────────────────────────┤
│ terminal-capture │ ← Terminal UI visual layer
│ (xterm.js + Playwright) │ Fills the gap
└─────────────────────────────────────┘
```
## 5. Future Directions
1. **Visual Regression** — Integrate Playwright `toHaveScreenshot()` for pixel-level baseline comparison, CI auto-detects terminal UI changes
2. **PR Workflow Integration** — Drive Agent via Cursor Skill to auto-checkout branch → build → screenshot → attach to review comment
3. **Complement to Chromatic** — Chromatic covers Web UI, terminal-capture covers CLI terminal UI

View file

@ -0,0 +1,18 @@
{
"name": "@qwen-code/terminal-capture",
"version": "0.1.0",
"private": true,
"description": "Terminal UI screenshot automation for CLI visual testing",
"type": "module",
"scripts": {
"capture": "npx tsx run.ts scenarios/",
"capture:about": "npx tsx run.ts scenarios/about.ts",
"capture:all": "npx tsx run.ts scenarios/all.ts"
},
"dependencies": {
"@lydell/node-pty": "1.1.0",
"@xterm/xterm": "^5.5.0",
"playwright": "^1.50.0",
"strip-ansi": "^7.1.2"
}
}

View file

@ -0,0 +1,105 @@
#!/usr/bin/env npx tsx
/**
* Batch run terminal screenshot scenarios
*
* Usage:
* npx tsx integration-tests/terminal-capture/run.ts integration-tests/terminal-capture/scenarios/about.ts
* npx tsx integration-tests/terminal-capture/run.ts integration-tests/terminal-capture/scenarios/ # batch
* npx tsx integration-tests/terminal-capture/run.ts integration-tests/terminal-capture/scenarios/*.ts # glob
*/
import {
loadScenarios,
runScenario,
type RunResult,
} from './scenario-runner.js';
import { readdirSync, statSync } from 'node:fs';
import { resolve, extname, join } from 'node:path';
async function main() {
const args = process.argv.slice(2);
if (args.length === 0) {
console.log(
`
Usage: npx tsx integration-tests/terminal-capture/run.ts <scenario.ts | directory>...
Examples:
npx tsx integration-tests/terminal-capture/run.ts integration-tests/terminal-capture/scenarios/about.ts
npx tsx integration-tests/terminal-capture/run.ts integration-tests/terminal-capture/scenarios/
`.trim(),
);
process.exit(1);
}
// Collect all .ts scenario files from arguments
const scenarioFiles: string[] = [];
for (const arg of args) {
const abs = resolve(arg);
try {
const stat = statSync(abs);
if (stat.isDirectory()) {
const files = readdirSync(abs)
.filter((f) => extname(f) === '.ts')
.sort()
.map((f) => join(abs, f));
scenarioFiles.push(...files);
} else {
scenarioFiles.push(abs);
}
} catch {
console.error(`❌ Not found: ${arg}`);
process.exit(1);
}
}
if (scenarioFiles.length === 0) {
console.error('❌ No .ts scenario files found');
process.exit(1);
}
console.log(`🎬 Running ${scenarioFiles.length} scenario(s)...\n`);
// Run scenarios sequentially (single file can export an array)
const results: RunResult[] = [];
for (const file of scenarioFiles) {
const { configs, basedir } = await loadScenarios(file);
for (const config of configs) {
const result = await runScenario(config, basedir);
results.push(result);
}
}
// Summary
console.log(`\n${'═'.repeat(60)}`);
console.log('📊 Summary');
console.log('═'.repeat(60));
const passed = results.filter((r) => r.success);
const failed = results.filter((r) => !r.success);
const totalScreenshots = results.reduce(
(sum, r) => sum + r.screenshots.length,
0,
);
const totalTime = results.reduce((sum, r) => sum + r.durationMs, 0);
for (const r of results) {
const icon = r.success ? '✅' : '❌';
const time = (r.durationMs / 1000).toFixed(1);
console.log(
` ${icon} ${r.name}${r.screenshots.length} screenshots, ${time}s`,
);
if (r.error) console.log(` ${r.error}`);
}
console.log(
`\n Total: ${passed.length} passed, ${failed.length} failed, ${totalScreenshots} screenshots, ${(totalTime / 1000).toFixed(1)}s`,
);
if (failed.length > 0) process.exit(1);
}
main().catch((err) => {
console.error(err);
process.exit(1);
});

View file

@ -0,0 +1,304 @@
/**
* Scenario Runner v3 TypeScript Configuration-Driven Terminal Screenshots
*
* Configuration has only two core concepts: type (input) and capture (screenshot).
* All intelligent waiting is handled automatically by the Runner.
*
* Usage:
* npx tsx integration-tests/terminal-capture/run.ts integration-tests/terminal-capture/scenarios/about.ts
* npx tsx integration-tests/terminal-capture/run.ts integration-tests/terminal-capture/scenarios/
*/
import { TerminalCapture, THEMES } from './terminal-capture.js';
import { dirname, resolve, isAbsolute } from 'node:path';
// ─────────────────────────────────────────────
// Schema — Minimal
// ─────────────────────────────────────────────
export interface FlowStep {
/** Input text (auto-press Enter, auto-wait for output to stabilize, auto-screenshot before/after) */
type?: string;
/**
* Send special key presses (no auto-Enter, no auto-screenshot)
* Supported: ArrowUp, ArrowDown, ArrowLeft, ArrowRight, Enter, Tab, Escape, Backspace, Space
* Can also pass ANSI escape sequence strings
*/
key?: string | string[];
/** Explicit screenshot: current viewport (standalone capture when no type) */
capture?: string;
/** Explicit screenshot: full scrollback buffer long image (standalone capture when no type) */
captureFull?: string;
}
export interface ScenarioConfig {
/** Scenario name */
name: string;
/** Launch command, e.g., ["node", "dist/cli.js", "--yolo"] */
spawn: string[];
/** Execution flow: array, each item can contain type / capture / captureFull */
flow: FlowStep[];
/** Terminal configuration (all optional) */
terminal?: {
cols?: number;
rows?: number;
theme?: string;
chrome?: boolean;
title?: string;
fontSize?: number;
cwd?: string;
};
/** Screenshot output directory (relative to config file) */
outputDir?: string;
}
// ─────────────────────────────────────────────
// Runner
// ─────────────────────────────────────────────
export interface RunResult {
name: string;
screenshots: string[];
success: boolean;
error?: string;
durationMs: number;
}
/** Dynamically load configuration from .ts file (supports single object or array) */
export async function loadScenarios(
tsPath: string,
): Promise<{ configs: ScenarioConfig[]; basedir: string }> {
const absPath = isAbsolute(tsPath) ? tsPath : resolve(tsPath);
const mod = (await import(absPath)) as {
default: ScenarioConfig | ScenarioConfig[];
};
const raw = mod.default;
const configs = Array.isArray(raw) ? raw : [raw];
for (const config of configs) {
if (!config?.name) throw new Error(`Missing 'name': ${absPath}`);
if (!config.spawn?.length) throw new Error(`Missing 'spawn': ${absPath}`);
if (!config.flow?.length) throw new Error(`Missing 'flow': ${absPath}`);
}
return { configs, basedir: dirname(absPath) };
}
/** Execute a single scenario */
export async function runScenario(
config: ScenarioConfig,
basedir: string,
): Promise<RunResult> {
const startTime = Date.now();
const screenshots: string[] = [];
const t = config.terminal ?? {};
const cwd = t.cwd ? resolve(basedir, t.cwd) : resolve(basedir, '..');
// Use scenario name as subdirectory to isolate screenshot outputs from different scenarios
const scenarioDir =
config.name
.replace(/^\//, '')
.replace(/[^a-zA-Z0-9\u4e00-\u9fff_-]/g, '-')
.replace(/-+/g, '-')
.replace(/^-|-$/g, '') || 'unnamed';
const outputDir = config.outputDir
? resolve(basedir, config.outputDir, scenarioDir)
: resolve(basedir, 'screenshots', scenarioDir);
console.log(`\n${'═'.repeat(60)}`);
console.log(`${config.name}`);
console.log('═'.repeat(60));
const terminal = await TerminalCapture.create({
cols: t.cols ?? 100,
rows: t.rows ?? 28,
theme: (t.theme ?? 'dracula') as keyof typeof THEMES,
chrome: t.chrome ?? true,
title: t.title ?? 'Terminal',
fontSize: t.fontSize,
cwd,
outputDir,
});
try {
// ── Spawn ──
const [command, ...args] = config.spawn;
console.log(` spawn: ${config.spawn.join(' ')}`);
await terminal.spawn(command, args);
// ── Auto-wait for CLI readiness ──
console.log(' ⏳ waiting for ready...');
await terminal.idle(1500, 30000);
console.log(' ✅ ready');
// ── Execute flow ──
let seq = 0; // Global screenshot sequence number
for (let i = 0; i < config.flow.length; i++) {
const step = config.flow[i];
const label = `[${i + 1}/${config.flow.length}]`;
if (step.type) {
const display =
step.type.length > 60 ? step.type.slice(0, 60) + '...' : step.type;
// If next step is key, there's more interaction to do, so don't auto-press Enter
const nextStep = config.flow[i + 1];
const autoEnter = !nextStep?.key;
console.log(
` ${label} type: "${display}"${autoEnter ? '' : ' (no auto-enter)'}`,
);
const text = step.type.replace(/\n$/, '');
await terminal.type(text);
await sleep(300);
// Only send Escape for / commands to close auto-complete, not for regular text
if (text.startsWith('/') && autoEnter) {
await terminal.type('\x1b');
await sleep(100);
}
// ── 01: Text input complete ──
seq++;
const inputName = step.capture
? step.capture.replace(/\.png$/, '-01.png')
: `${pad(seq)}-01.png`;
console.log(` ${label} 📸 input: ${inputName}`);
screenshots.push(await terminal.capture(inputName));
if (autoEnter) {
// ── Auto-press Enter → Wait for stabilization → 02 screenshot ──
await terminal.type('\n');
console.log(` ⏳ waiting for output to settle...`);
await terminal.idle(2000, 60000);
console.log(` ✅ settled`);
const resultName = step.capture ?? `${pad(seq)}-02.png`;
console.log(` ${label} 📸 result: ${resultName}`);
screenshots.push(await terminal.capture(resultName));
// full-flow: Only the last type step auto-captures full-length image
const isLastType = !config.flow.slice(i + 1).some((s) => s.type);
if (isLastType || step.captureFull) {
const fullName = step.captureFull ?? 'full-flow.png';
console.log(` ${label} 📸 full: ${fullName}`);
screenshots.push(await terminal.captureFull(fullName));
}
}
// When not autoEnter, only captured before state, subsequent key steps take over interaction
} else if (step.key) {
// ── key: Send special key presses (arrow keys, Tab, Enter, etc.) ──
const keys = Array.isArray(step.key) ? step.key : [step.key];
console.log(` ${label} key: ${keys.join(', ')}`);
for (const k of keys) {
await terminal.type(resolveKey(k));
await sleep(150);
}
// Wait for UI response to key press
await terminal.idle(500, 5000);
// If key step has explicit capture/captureFull
if (step.capture || step.captureFull) {
seq++;
if (step.capture) {
console.log(` ${label} 📸 capture: ${step.capture}`);
screenshots.push(await terminal.capture(step.capture));
}
if (step.captureFull) {
console.log(` ${label} 📸 captureFull: ${step.captureFull}`);
screenshots.push(await terminal.captureFull(step.captureFull));
}
}
// After key sequence ends (next step is not key), auto-add result + full screenshots
const nextStep = config.flow[i + 1];
if (!nextStep?.key) {
console.log(` ⏳ waiting for output to settle...`);
await terminal.idle(2000, 60000);
console.log(` ✅ settled`);
const resultName = `${pad(seq)}-02.png`;
console.log(` ${label} 📸 result: ${resultName}`);
screenshots.push(await terminal.capture(resultName));
// If this is the last interaction step, add full-length image
const isLastType = !config.flow.slice(i + 1).some((s) => s.type);
if (isLastType) {
console.log(` ${label} 📸 full: full-flow.png`);
screenshots.push(await terminal.captureFull('full-flow.png'));
}
}
} else {
// ── Standalone screenshot step (no type/key) ──
seq++;
if (step.capture) {
console.log(` ${label} 📸 capture: ${step.capture}`);
screenshots.push(await terminal.capture(step.capture));
}
if (step.captureFull) {
console.log(` ${label} 📸 captureFull: ${step.captureFull}`);
screenshots.push(await terminal.captureFull(step.captureFull));
}
}
}
const duration = Date.now() - startTime;
console.log(
`\n ✅ ${config.name}${screenshots.length} screenshots, ${(duration / 1000).toFixed(1)}s`,
);
return {
name: config.name,
screenshots,
success: true,
durationMs: duration,
};
} catch (err) {
const duration = Date.now() - startTime;
const msg = err instanceof Error ? err.message : String(err);
console.error(`\n ❌ ${config.name}${msg}`);
return {
name: config.name,
screenshots,
success: false,
error: msg,
durationMs: duration,
};
} finally {
await terminal.close();
}
}
function sleep(ms: number): Promise<void> {
return new Promise((r) => setTimeout(r, ms));
}
/** Pad sequence number with zero: 1 → "01" */
function pad(n: number): string {
return String(n).padStart(2, '0');
}
/** Key name → PTY escape sequence */
const KEY_MAP: Record<string, string> = {
ArrowUp: '\x1b[A',
ArrowDown: '\x1b[B',
ArrowRight: '\x1b[C',
ArrowLeft: '\x1b[D',
Enter: '\r',
Tab: '\t',
Escape: '\x1b',
Backspace: '\x7f',
Space: ' ',
Home: '\x1b[H',
End: '\x1b[F',
PageUp: '\x1b[5~',
PageDown: '\x1b[6~',
Delete: '\x1b[3~',
};
/** Parse key name to PTY-recognizable character sequence */
function resolveKey(key: string): string {
return KEY_MAP[key] ?? key;
}

View file

@ -0,0 +1,8 @@
import type { ScenarioConfig } from '../scenario-runner.js';
export default {
name: '/about command',
spawn: ['node', 'dist/cli.js', '--yolo'],
terminal: { title: 'qwen-code', cwd: '../../..' },
flow: [{ type: 'hi' }, { type: '/about' }],
} satisfies ScenarioConfig;

View file

@ -0,0 +1,46 @@
import type { ScenarioConfig } from '../scenario-runner.js';
export default [
{
name: '/about',
spawn: ['node', 'dist/cli.js', '--yolo'],
terminal: { title: 'qwen-code', cwd: '../../..' },
flow: [
{ type: 'Hi, can you help me understand this codebase?' },
{ type: '/about' },
],
},
{
name: '/context',
spawn: ['node', 'dist/cli.js', '--yolo'],
terminal: { title: 'qwen-code', cwd: '../../..' },
flow: [
{ type: 'How do you understand this project?' },
{ type: '/context' },
],
},
{
name: '/export (tab select)',
spawn: ['node', 'dist/cli.js', '--yolo'],
terminal: { title: 'qwen-code', cwd: '../../..' },
flow: [
{ type: 'Please give me a brief introduction about yourself.' },
{ type: '/export' },
{ key: 'Tab' }, // Tab to open format selection
{ key: 'ArrowDown' }, // Down arrow to switch options
{ key: 'Enter' }, // Confirm selection
],
},
{
name: '/auth',
spawn: ['node', 'dist/cli.js', '--yolo'],
terminal: { title: 'qwen-code', cwd: '../../..' },
flow: [
{ type: '/auth' },
{ key: 'ArrowDown' }, // Select API Key
{ key: 'Enter' }, // Confirm
{ type: 'sk-test-key-123' },
],
},
] satisfies ScenarioConfig[];

View file

@ -0,0 +1,856 @@
/**
* TerminalCapture - Terminal Screenshot Tool
*
* Terminal screenshot solution based on xterm.js + Playwright + node-pty.
* Core philosophy: WYSIWYG let xterm.js complete terminal simulation and rendering
* inside the browser. Screenshots always capture the terminal's current real state,
* no manual output cleaning needed.
*
* Architecture:
* node-pty (pseudo-terminal)
* raw ANSI byte stream
* xterm.js (running inside Playwright headless Chromium)
* perfect rendering: colors, bold, cursor, scrolling
* Playwright element screenshot
* pixel-perfect screenshots (optional macOS window decorations)
*/
import { chromium, type Browser, type Page } from 'playwright';
import * as pty from '@lydell/node-pty';
import stripAnsi from 'strip-ansi';
import { mkdirSync } from 'node:fs';
import { join, dirname } from 'node:path';
import { createRequire } from 'node:module';
const _require = createRequire(import.meta.url);
// ─────────────────────────────────────────────
// Theme definitions
// ─────────────────────────────────────────────
export interface XtermTheme {
background: string;
foreground: string;
cursor: string;
cursorAccent?: string;
selectionBackground?: string;
selectionForeground?: string;
black: string;
red: string;
green: string;
yellow: string;
blue: string;
magenta: string;
cyan: string;
white: string;
brightBlack: string;
brightRed: string;
brightGreen: string;
brightYellow: string;
brightBlue: string;
brightMagenta: string;
brightCyan: string;
brightWhite: string;
}
export const THEMES: Record<string, XtermTheme> = {
dracula: {
background: '#282a36',
foreground: '#f8f8f2',
cursor: '#f8f8f2',
selectionBackground: '#44475a',
black: '#21222c',
red: '#ff5555',
green: '#50fa7b',
yellow: '#f1fa8c',
blue: '#bd93f9',
magenta: '#ff79c6',
cyan: '#8be9fd',
white: '#f8f8f2',
brightBlack: '#6272a4',
brightRed: '#ff6e6e',
brightGreen: '#69ff94',
brightYellow: '#ffffa5',
brightBlue: '#d6acff',
brightMagenta: '#ff92df',
brightCyan: '#a4ffff',
brightWhite: '#ffffff',
},
'one-dark': {
background: '#282c34',
foreground: '#abb2bf',
cursor: '#528bff',
selectionBackground: '#3e4451',
black: '#545862',
red: '#e06c75',
green: '#98c379',
yellow: '#e5c07b',
blue: '#61afef',
magenta: '#c678dd',
cyan: '#56b6c2',
white: '#abb2bf',
brightBlack: '#545862',
brightRed: '#e06c75',
brightGreen: '#98c379',
brightYellow: '#e5c07b',
brightBlue: '#61afef',
brightMagenta: '#c678dd',
brightCyan: '#56b6c2',
brightWhite: '#c8ccd4',
},
'github-dark': {
background: '#0d1117',
foreground: '#c9d1d9',
cursor: '#c9d1d9',
selectionBackground: '#264f78',
black: '#484f58',
red: '#ff7b72',
green: '#3fb950',
yellow: '#d29922',
blue: '#58a6ff',
magenta: '#bc8cff',
cyan: '#39c5cf',
white: '#b1bac4',
brightBlack: '#6e7681',
brightRed: '#ffa198',
brightGreen: '#56d364',
brightYellow: '#e3b341',
brightBlue: '#79c0ff',
brightMagenta: '#d2a8ff',
brightCyan: '#56d4dd',
brightWhite: '#f0f6fc',
},
monokai: {
background: '#272822',
foreground: '#f8f8f2',
cursor: '#f8f8f0',
selectionBackground: '#49483e',
black: '#272822',
red: '#f92672',
green: '#a6e22e',
yellow: '#f4bf75',
blue: '#66d9ef',
magenta: '#ae81ff',
cyan: '#a1efe4',
white: '#f8f8f2',
brightBlack: '#75715e',
brightRed: '#f92672',
brightGreen: '#a6e22e',
brightYellow: '#f4bf75',
brightBlue: '#66d9ef',
brightMagenta: '#ae81ff',
brightCyan: '#a1efe4',
brightWhite: '#f9f8f5',
},
'night-owl': {
background: '#011627',
foreground: '#d6deeb',
cursor: '#80a4c2',
selectionBackground: '#1d3b53',
black: '#011627',
red: '#ef5350',
green: '#22da6e',
yellow: '#addb67',
blue: '#82aaff',
magenta: '#c792ea',
cyan: '#21c7a8',
white: '#d6deeb',
brightBlack: '#575656',
brightRed: '#ef5350',
brightGreen: '#22da6e',
brightYellow: '#ffeb95',
brightBlue: '#82aaff',
brightMagenta: '#c792ea',
brightCyan: '#7fdbca',
brightWhite: '#ffffff',
},
};
// ─────────────────────────────────────────────
// Options
// ─────────────────────────────────────────────
export interface TerminalCaptureOptions {
/** Number of terminal columns, default 120 */
cols?: number;
/** Number of terminal rows, default 40 */
rows?: number;
/** Working directory */
cwd?: string;
/** Environment variables */
env?: NodeJS.ProcessEnv;
/** Theme name or custom theme object, default 'dracula' */
theme?: keyof typeof THEMES | XtermTheme;
/** Whether to show macOS window decorations (traffic lights + title bar), default true */
chrome?: boolean;
/** Window title (only effective when chrome=true), default 'Terminal' */
title?: string;
/** Font size, default 14 */
fontSize?: number;
/** Font family, default system monospace font */
fontFamily?: string;
/** Default screenshot output directory */
outputDir?: string;
}
// ─────────────────────────────────────────────
// Main class
// ─────────────────────────────────────────────
export class TerminalCapture {
private browser: Browser | null = null;
private page: Page | null = null;
private ptyProcess: pty.IPty | null = null;
private rawOutput = '';
private lastFlushedLength = 0;
private readonly cols: number;
private readonly rows: number;
private readonly cwd: string;
private readonly env: NodeJS.ProcessEnv;
private readonly theme: XtermTheme;
private readonly showChrome: boolean;
private readonly windowTitle: string;
private readonly fontSize: number;
private readonly fontFamily: string;
private readonly outputDir: string;
// ── Factory ──────────────────────────────
/**
* Create and initialize a TerminalCapture instance
*
* @example
* ```ts
* const t = await TerminalCapture.create({
* theme: 'dracula',
* chrome: true,
* title: 'qwen-code',
* });
* ```
*/
static async create(
options?: TerminalCaptureOptions,
): Promise<TerminalCapture> {
const instance = new TerminalCapture(options);
await instance.init();
return instance;
}
private constructor(options?: TerminalCaptureOptions) {
this.cols = options?.cols ?? 120;
this.rows = options?.rows ?? 40;
this.cwd = options?.cwd ?? process.cwd();
// Build a clean env for optimal terminal rendering:
// - Remove NO_COLOR (conflicts with FORCE_COLOR, can crash gradient components)
// - Suppress Node.js warnings (noisy in screenshots)
// - Force color output and 256-color terminal
const baseEnv = { ...process.env };
delete baseEnv['NO_COLOR'];
this.env = options?.env ?? {
...baseEnv,
FORCE_COLOR: '1',
TERM: 'xterm-256color',
NODE_NO_WARNINGS: '1',
};
this.showChrome = options?.chrome ?? true;
this.windowTitle = options?.title ?? 'Terminal';
this.fontSize = options?.fontSize ?? 14;
this.fontFamily =
options?.fontFamily ??
"'Menlo', 'Monaco', 'Consolas', 'Courier New', monospace";
this.outputDir = options?.outputDir ?? join(process.cwd(), 'screenshots');
// Resolve theme
if (typeof options?.theme === 'string') {
this.theme = THEMES[options.theme] ?? THEMES['dracula'];
} else if (options?.theme && typeof options.theme === 'object') {
this.theme = options.theme;
} else {
this.theme = THEMES['dracula'];
}
}
// ── Lifecycle ────────────────────────────
private async init(): Promise<void> {
// 1. Launch browser
this.browser = await chromium.launch({ headless: true });
this.page = await this.browser.newPage({
viewport: { width: 1600, height: 1000 },
});
// 2. Set base HTML (with chrome decoration, container, etc.)
await this.page.setContent(this.buildHTML());
// 3. Load xterm.js from node_modules
const xtermDir = this.resolveXtermDir();
await this.page.addStyleTag({ path: join(xtermDir, 'css', 'xterm.css') });
await this.page.addScriptTag({ path: join(xtermDir, 'lib', 'xterm.js') });
// 4. Create xterm Terminal instance inside the page
await this.page.evaluate(
({ cols, rows, theme, fontSize, fontFamily }) => {
const W = window as unknown as Record<string, unknown>;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const Terminal = W['Terminal'] as new (opts: unknown) => any;
const term = new Terminal({
cols,
rows,
theme,
fontFamily,
fontSize,
lineHeight: 1.2,
cursorBlink: false,
allowProposedApi: true,
scrollback: 1000,
});
const container = document.getElementById('xterm-container')!;
term.open(container);
// Expose to outer scope
W['term'] = term;
W['termReady'] = true;
},
{
cols: this.cols,
rows: this.rows,
theme: this.theme as unknown as Record<string, string>,
fontSize: this.fontSize,
fontFamily: this.fontFamily,
},
);
// 5. Wait until terminal is ready
await this.page.waitForFunction(
() =>
(window as unknown as Record<string, unknown>)['termReady'] === true,
);
}
/**
* Spawn a command (via pseudo-terminal)
*
* @example
* ```ts
* await terminal.spawn('node', ['dist/cli.js', '--yolo']);
* ```
*/
async spawn(command: string, args: string[] = []): Promise<void> {
if (!this.page) {
throw new Error(
'Not initialized. Use TerminalCapture.create() factory method.',
);
}
this.ptyProcess = pty.spawn(command, args, {
name: 'xterm-256color',
cols: this.cols,
rows: this.rows,
cwd: this.cwd,
env: this.env,
});
this.ptyProcess.onData((data) => {
this.rawOutput += data;
});
}
// ── Input ────────────────────────────────
/**
* Input text. Supports `\n` as Enter.
*
* @param text Text to input
* @param options.delay Delay after input (ms), default 10
* @param options.slow Type character by character (simulate real typing), default false
*
* @example
* ```ts
* await terminal.type('Hello world\n'); // Input + Enter
* await terminal.type('ls -la\n', { slow: true, delay: 80 });
* ```
*/
async type(
text: string,
options?: { delay?: number; slow?: boolean },
): Promise<void> {
if (!this.ptyProcess) {
throw new Error('No process running. Call spawn() first.');
}
// Convert \n to \r for PTY
const translated = text.replace(/\n/g, '\r');
if (options?.slow) {
for (const char of translated) {
this.ptyProcess.write(char);
await this.sleep(options.delay ?? 50);
}
} else {
this.ptyProcess.write(translated);
await this.sleep(options?.delay ?? 10);
}
}
// ── Wait ─────────────────────────────────
/**
* Wait for specific text to appear in terminal output
*
* @throws Error on timeout
*
* @example
* ```ts
* await terminal.waitFor('Type your message');
* await terminal.waitFor('tokens', { timeout: 30000 });
* ```
*/
async waitFor(text: string, options?: { timeout?: number }): Promise<void> {
const timeout = options?.timeout ?? 15000;
const start = Date.now();
while (Date.now() - start < timeout) {
if (
stripAnsi(this.rawOutput).toLowerCase().includes(text.toLowerCase())
) {
return;
}
await this.sleep(200);
}
throw new Error(
`Timeout (${timeout}ms) waiting for text: "${text}"\n` +
`Last 500 chars of output: ${stripAnsi(this.rawOutput).slice(-500)}`,
);
}
/**
* Wait for output to stabilize (no new output within specified time)
*
* @param stableMs Stability detection duration (ms), default 500
* @param timeout Maximum wait time (ms), default 30000
*
* @example
* ```ts
* await terminal.idle(); // Default: 500ms with no new output considered stable
* await terminal.idle(2000); // 2s with no new output
* ```
*/
async idle(stableMs: number = 500, timeout: number = 30000): Promise<void> {
const start = Date.now();
let lastLength = this.rawOutput.length;
let lastChangeTime = Date.now();
while (Date.now() - start < timeout) {
await this.sleep(100);
if (this.rawOutput.length !== lastLength) {
lastLength = this.rawOutput.length;
lastChangeTime = Date.now();
} else if (Date.now() - lastChangeTime >= stableMs) {
return;
}
}
// Timeout for idle() is not an error — just means output kept coming
}
/**
* Wait for text to appear, then wait for output to stabilize (common combination)
*/
async waitForAndIdle(
text: string,
options?: { timeout?: number; stableMs?: number },
): Promise<void> {
await this.waitFor(text, { timeout: options?.timeout });
await this.idle(options?.stableMs ?? 300, 5000);
}
// ── Capture ──────────────────────────────
/**
* Capture and save a screenshot. Filenames are deterministic (no timestamps) for easy regression comparison.
*
* @param filename Filename, e.g., 'initial.png'
* @param outputDir Output directory, defaults to the outputDir from construction
* @returns Full path to the screenshot file
*
* @example
* ```ts
* await terminal.capture('01-initial.png');
* await terminal.capture('02-output.png', '/tmp/screenshots');
* ```
*/
async capture(filename: string, outputDir?: string): Promise<string> {
if (!this.page) {
throw new Error('Not initialized');
}
// 1. Flush all accumulated PTY data to xterm.js
await this.flush();
// 2. Wait for xterm.js rendering to complete
await this.sleep(150);
// 3. Prepare output directory
const dir = outputDir ?? this.outputDir;
mkdirSync(dir, { recursive: true });
const filepath = join(dir, filename);
// 4. Screenshot the capture root (terminal + optional chrome)
const element = await this.page.$('#capture-root');
if (element) {
await element.screenshot({ path: filepath });
} else {
await this.page.screenshot({ path: filepath });
}
console.log(`📸 Captured: ${filepath}`);
return filepath;
}
/**
* Capture full terminal output (including scrollback buffer) as a long image.
* Suitable for scenarios where output exceeds the visible area, e.g., detailed token lists from /context.
*
* Principle: Temporarily expand xterm.js rows to show complete scrollback, then restore original dimensions after screenshot.
* Note: Only resizes xterm.js inside the browser, not the PTY dimensions, so it won't trigger CLI re-render.
*
* @param filename Filename
* @param outputDir Output directory
* @returns Full path to the screenshot file
*
* @example
* ```ts
* // Regular screenshot (only current viewport)
* await terminal.capture('output.png');
* // Full-length image (including scrollback buffer)
* await terminal.captureFull('output-full.png');
* ```
*/
async captureFull(filename: string, outputDir?: string): Promise<string> {
if (!this.page) {
throw new Error('Not initialized');
}
// 1. Flush all accumulated PTY data to xterm.js
await this.flush();
await this.sleep(150);
// 2. Query xterm.js for the actual content height (skip trailing empty lines)
const contentLines = await this.page.evaluate(() => {
const W = window as unknown as Record<string, unknown>;
const term = W['term'] as {
buffer: {
active: {
length: number;
getLine: (i: number) =>
| {
translateToString: (trimRight?: boolean) => string;
}
| undefined;
};
};
};
const buf = term.buffer.active;
let lastNonEmpty = 0;
for (let i = buf.length - 1; i >= 0; i--) {
const line = buf.getLine(i);
if (line && line.translateToString(true).trim().length > 0) {
lastNonEmpty = i;
break;
}
}
return lastNonEmpty + 1;
});
const expandedRows = Math.max(contentLines + 2, this.rows);
// 3. Temporarily resize xterm.js only (NOT the PTY) to show all content
// This avoids sending SIGWINCH to the child process, so the CLI won't re-render
await this.page.evaluate(
({ cols, rows }: { cols: number; rows: number }) => {
const W = window as unknown as Record<string, unknown>;
const term = W['term'] as {
resize: (c: number, r: number) => void;
scrollToTop: () => void;
};
term.resize(cols, rows);
// Scroll to top to ensure rendering starts from scrollback beginning position
term.scrollToTop();
},
{ cols: this.cols, rows: expandedRows },
);
// 4. Expand viewport to accommodate the taller terminal
await this.page.setViewportSize({
width: 1600,
height: Math.max(expandedRows * 22, 1000), // ~22px per row (fontSize 14 * lineHeight 1.2 + padding)
});
await this.sleep(300);
// 5. Screenshot the full content
const dir = outputDir ?? this.outputDir;
mkdirSync(dir, { recursive: true });
const filepath = join(dir, filename);
const element = await this.page.$('#capture-root');
if (element) {
await element.screenshot({ path: filepath });
} else {
await this.page.screenshot({ path: filepath, fullPage: true });
}
// 6. Restore original xterm.js dimensions and viewport
await this.page.evaluate(
({ cols, rows }: { cols: number; rows: number }) => {
const W = window as unknown as Record<string, unknown>;
const term = W['term'] as { resize: (c: number, r: number) => void };
term.resize(cols, rows);
},
{ cols: this.cols, rows: this.rows },
);
await this.page.setViewportSize({ width: 1600, height: 1000 });
console.log(`📸 Captured (full): ${filepath}`);
return filepath;
}
// ── Output access ────────────────────────
/**
* Get cleaned terminal output (without ANSI escape sequences)
*/
getOutput(): string {
return stripAnsi(this.rawOutput);
}
/**
* Get raw terminal output (with ANSI escape sequences)
*/
getRawOutput(): string {
return this.rawOutput;
}
// ── Cleanup ──────────────────────────────
/**
* Release all resources (PTY process, browser)
*/
async close(): Promise<void> {
if (this.ptyProcess) {
try {
this.ptyProcess.kill();
} catch {
// Process may have already exited
}
this.ptyProcess = null;
}
if (this.browser) {
await this.browser.close();
this.browser = null;
this.page = null;
}
}
// ── Internal: flush PTY → xterm.js ──────
/**
* Flush accumulated PTY raw output to xterm.js inside the browser.
* Uses xterm.js's write callback to ensure data is fully parsed,
* then waits one requestAnimationFrame to ensure rendering is complete.
*/
private async flush(): Promise<void> {
if (!this.page || this.rawOutput.length <= this.lastFlushedLength) {
return;
}
const newData = this.rawOutput.slice(this.lastFlushedLength);
this.lastFlushedLength = this.rawOutput.length;
// Send data in chunks to avoid hitting string size limits
const CHUNK_SIZE = 64 * 1024;
for (let i = 0; i < newData.length; i += CHUNK_SIZE) {
const chunk = newData.slice(i, i + CHUNK_SIZE);
await this.page.evaluate((data: string) => {
return new Promise<void>((resolve) => {
const W = window as unknown as Record<string, unknown>;
const term = W['term'] as {
write: (d: string, cb: () => void) => void;
};
term.write(data, () => {
// Data parsed → wait one frame for rendering
requestAnimationFrame(() => resolve());
});
});
}, chunk);
}
}
// ── Internal: resolve xterm.js path ─────
private resolveXtermDir(): string {
try {
const pkgJsonPath = _require.resolve('@xterm/xterm/package.json');
return dirname(pkgJsonPath);
} catch {
throw new Error(
'@xterm/xterm is not installed.\n' +
'Run: npm install --save-dev @xterm/xterm',
);
}
}
// ── Internal: build HTML ────────────────
private buildHTML(): string {
const bg = this.theme.background;
// Title bar color: slightly lighter than background
// Use a manual approximation instead of color-mix for compatibility
const titleBarBg = this.lighten(bg, 0.08);
const chromeHTML = this.showChrome
? `
<div class="title-bar" style="background: ${titleBarBg};">
<div class="traffic-lights">
<span class="tl tl-close"></span>
<span class="tl tl-minimize"></span>
<span class="tl tl-maximize"></span>
</div>
<span class="title-text">${this.escapeHtml(this.windowTitle)}</span>
<div class="traffic-lights-spacer"></div>
</div>`
: '';
return `<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<style>
* { margin: 0; padding: 0; box-sizing: border-box; }
body {
background: #0e0e1a;
display: flex;
justify-content: center;
align-items: flex-start;
padding: 40px;
min-height: 100vh;
}
#capture-root {
display: inline-block;
border-radius: ${this.showChrome ? '10px' : '6px'};
overflow: hidden;
background: ${bg};
box-shadow:
0 25px 70px rgba(0, 0, 0, 0.6),
0 0 0 1px rgba(255, 255, 255, 0.08);
}
/* ── Title bar (macOS chrome) ── */
.title-bar {
height: 40px;
display: flex;
align-items: center;
padding: 0 16px;
user-select: none;
}
.traffic-lights {
display: flex;
gap: 8px;
width: 56px;
}
.traffic-lights-spacer {
width: 56px;
}
.tl {
width: 12px;
height: 12px;
border-radius: 50%;
display: block;
}
.tl-close { background: #ff5f57; }
.tl-minimize { background: #ffbd2e; }
.tl-maximize { background: #28c840; }
.title-text {
flex: 1;
text-align: center;
color: rgba(255, 255, 255, 0.45);
font-size: 13px;
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', system-ui, sans-serif;
font-weight: 500;
}
/* ── Terminal container ── */
#xterm-container {
padding: 4px 8px 8px 8px;
}
/* Hide scrollbar in xterm */
.xterm-viewport::-webkit-scrollbar { display: none; }
.xterm-viewport { scrollbar-width: none; }
/* Ensure xterm canvas renders sharply */
.xterm canvas { image-rendering: pixelated; }
</style>
</head>
<body>
<div id="capture-root">
${chromeHTML}
<div id="xterm-container"></div>
</div>
</body>
</html>`;
}
// ── Internal: utils ─────────────────────
private escapeHtml(text: string): string {
return text
.replace(/&/g, '&amp;')
.replace(/</g, '&lt;')
.replace(/>/g, '&gt;')
.replace(/"/g, '&quot;')
.replace(/'/g, '&#039;');
}
/**
* Lighten a hex color by a factor (0-1)
*/
private lighten(hex: string, factor: number): string {
const h = hex.replace('#', '');
const r = Math.min(
255,
parseInt(h.slice(0, 2), 16) + Math.round(255 * factor),
);
const g = Math.min(
255,
parseInt(h.slice(2, 4), 16) + Math.round(255 * factor),
);
const b = Math.min(
255,
parseInt(h.slice(4, 6), 16) + Math.round(255 * factor),
);
return `#${r.toString(16).padStart(2, '0')}${g.toString(16).padStart(2, '0')}${b.toString(16).padStart(2, '0')}`;
}
private sleep(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms));
}
}

128
package-lock.json generated
View file

@ -1,12 +1,12 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.10.1",
"version": "0.10.5",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@qwen-code/qwen-code",
"version": "0.10.1",
"version": "0.10.5",
"workspaces": [
"packages/*"
],
@ -3834,6 +3834,119 @@
"node": ">=6"
}
},
"node_modules/@teddyzhu/clipboard": {
"version": "0.0.5",
"resolved": "https://registry.npmjs.org/@teddyzhu/clipboard/-/clipboard-0.0.5.tgz",
"integrity": "sha512-XA6MG7nLPZzj51agCwDYaVnVVrt0ByJ3G9rl3ar6N4GETAjUKKup6u76SLp2C5yHRWYV9hwMYDn04OGLar0MVg==",
"license": "MIT",
"engines": {
"node": ">= 10.16.0 < 11 || >= 11.8.0 < 12 || >= 12.0.0"
},
"optionalDependencies": {
"@teddyzhu/clipboard-darwin-arm64": "0.0.5",
"@teddyzhu/clipboard-darwin-x64": "0.0.5",
"@teddyzhu/clipboard-linux-arm64-gnu": "0.0.5",
"@teddyzhu/clipboard-linux-x64-gnu": "0.0.5",
"@teddyzhu/clipboard-win32-arm64-msvc": "0.0.5",
"@teddyzhu/clipboard-win32-x64-msvc": "0.0.5"
}
},
"node_modules/@teddyzhu/clipboard-darwin-arm64": {
"version": "0.0.5",
"resolved": "https://registry.npmjs.org/@teddyzhu/clipboard-darwin-arm64/-/clipboard-darwin-arm64-0.0.5.tgz",
"integrity": "sha512-FB3yykRAcw0VLmSjIGFddgew2t20UnLp80NZvi5e/lbsy/3mruHibMHkxHWqzCncuZsHdRsRXS/FmR/ggepW9A==",
"cpu": [
"arm64"
],
"license": "MIT",
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": ">= 10.16.0 < 11 || >= 11.8.0 < 12 || >= 12.0.0"
}
},
"node_modules/@teddyzhu/clipboard-darwin-x64": {
"version": "0.0.5",
"resolved": "https://registry.npmjs.org/@teddyzhu/clipboard-darwin-x64/-/clipboard-darwin-x64-0.0.5.tgz",
"integrity": "sha512-tiDazMpLf2dS7BZUif3da3DLJima8E/CnexB3CNgjQf12CFJ+D1cPcj/CgfvMYZgFQSsYyACpQNfXn4hmVbymA==",
"cpu": [
"x64"
],
"license": "MIT",
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": ">= 10.16.0 < 11 || >= 11.8.0 < 12 || >= 12.0.0"
}
},
"node_modules/@teddyzhu/clipboard-linux-arm64-gnu": {
"version": "0.0.5",
"resolved": "https://registry.npmjs.org/@teddyzhu/clipboard-linux-arm64-gnu/-/clipboard-linux-arm64-gnu-0.0.5.tgz",
"integrity": "sha512-qcokM+BaXn4iG4o4nYGHdfC04pr54S2F7x2o5osFhG3hMVYHZLR/8NKcYDKELnebpH612nW2bNRoWWy14lM45g==",
"cpu": [
"arm64"
],
"license": "MIT",
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">= 10.16.0 < 11 || >= 11.8.0 < 12 || >= 12.0.0"
}
},
"node_modules/@teddyzhu/clipboard-linux-x64-gnu": {
"version": "0.0.5",
"resolved": "https://registry.npmjs.org/@teddyzhu/clipboard-linux-x64-gnu/-/clipboard-linux-x64-gnu-0.0.5.tgz",
"integrity": "sha512-Ogh4zYM9s537WJszSvKrPAoKQZ2grnY7Xy6szyJp2+84uQKWNbvZkATODAsRUn48zr9gqL3PZeUqkIBaz8sCpQ==",
"cpu": [
"x64"
],
"license": "MIT",
"optional": true,
"os": [
"linux"
],
"engines": {
"node": ">= 10.16.0 < 11 || >= 11.8.0 < 12 || >= 12.0.0"
}
},
"node_modules/@teddyzhu/clipboard-win32-arm64-msvc": {
"version": "0.0.5",
"resolved": "https://registry.npmjs.org/@teddyzhu/clipboard-win32-arm64-msvc/-/clipboard-win32-arm64-msvc-0.0.5.tgz",
"integrity": "sha512-TuU+7e8qYc0T++sIArHTmqr+nfqiTfJ6gdrb1e8yDJb6MM3EFxCd2VonTqLQL1YpUdfcH+/rdMarG2rvCwvEhQ==",
"cpu": [
"arm64"
],
"license": "MIT",
"optional": true,
"os": [
"win32"
],
"engines": {
"node": ">= 10.16.0 < 11 || >= 11.8.0 < 12 || >= 12.0.0"
}
},
"node_modules/@teddyzhu/clipboard-win32-x64-msvc": {
"version": "0.0.5",
"resolved": "https://registry.npmjs.org/@teddyzhu/clipboard-win32-x64-msvc/-/clipboard-win32-x64-msvc-0.0.5.tgz",
"integrity": "sha512-f1Br5bI+INNDifjkOI1woZsIxsoW0rRej/4kaaJvZcMxxkSG9TMT2LYOjTF2g+DtXw32lsGvWICN6c3JiHeG7Q==",
"cpu": [
"x64"
],
"license": "MIT",
"optional": true,
"os": [
"win32"
],
"engines": {
"node": ">= 10.16.0 < 11 || >= 11.8.0 < 12 || >= 12.0.0"
}
},
"node_modules/@testing-library/dom": {
"version": "10.4.1",
"resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-10.4.1.tgz",
@ -18655,12 +18768,13 @@
},
"packages/cli": {
"name": "@qwen-code/qwen-code",
"version": "0.10.1",
"version": "0.10.5",
"dependencies": {
"@google/genai": "1.30.0",
"@iarna/toml": "^2.2.5",
"@modelcontextprotocol/sdk": "^1.25.1",
"@qwen-code/qwen-code-core": "file:../core",
"@teddyzhu/clipboard": "^0.0.5",
"@types/update-notifier": "^6.0.8",
"ansi-regex": "^6.2.2",
"command-exists": "^1.2.9",
@ -19274,7 +19388,7 @@
},
"packages/core": {
"name": "@qwen-code/qwen-code-core",
"version": "0.10.1",
"version": "0.10.5",
"hasInstallScript": true,
"dependencies": {
"@anthropic-ai/sdk": "^0.36.1",
@ -22754,7 +22868,7 @@
},
"packages/test-utils": {
"name": "@qwen-code/qwen-code-test-utils",
"version": "0.10.1",
"version": "0.10.5",
"dev": true,
"license": "Apache-2.0",
"devDependencies": {
@ -22766,7 +22880,7 @@
},
"packages/vscode-ide-companion": {
"name": "qwen-code-vscode-ide-companion",
"version": "0.10.1",
"version": "0.10.5",
"license": "LICENSE",
"dependencies": {
"@modelcontextprotocol/sdk": "^1.25.1",
@ -23013,7 +23127,7 @@
},
"packages/webui": {
"name": "@qwen-code/webui",
"version": "0.10.1",
"version": "0.10.5",
"license": "MIT",
"dependencies": {
"markdown-it": "^14.1.0"

View file

@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.10.1",
"version": "0.10.5",
"engines": {
"node": ">=20.0.0"
},
@ -13,7 +13,7 @@
"url": "git+https://github.com/QwenLM/qwen-code.git"
},
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.10.1"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.10.5"
},
"scripts": {
"start": "cross-env node scripts/start.js",

View file

@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code",
"version": "0.10.1",
"version": "0.10.5",
"description": "Qwen Code",
"repository": {
"type": "git",
@ -34,7 +34,7 @@
"dist"
],
"config": {
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.10.1"
"sandboxImageUri": "ghcr.io/qwenlm/qwen-code:0.10.5"
},
"dependencies": {
"@google/genai": "1.30.0",
@ -81,12 +81,12 @@
"@types/diff": "^7.0.2",
"@types/dotenv": "^6.1.1",
"@types/node": "^20.11.24",
"@types/prompts": "^2.4.9",
"@types/react": "^19.1.8",
"@types/react-dom": "^19.1.6",
"@types/semver": "^7.7.0",
"@types/shell-quote": "^1.7.5",
"@types/yargs": "^17.0.32",
"@types/prompts": "^2.4.9",
"archiver": "^7.0.1",
"ink-testing-library": "^4.0.0",
"jsdom": "^26.1.0",
@ -95,6 +95,15 @@
"typescript": "^5.3.3",
"vitest": "^3.1.1"
},
"optionalDependencies": {
"@teddyzhu/clipboard": "^0.0.5",
"@teddyzhu/clipboard-darwin-arm64": "0.0.5",
"@teddyzhu/clipboard-darwin-x64": "0.0.5",
"@teddyzhu/clipboard-linux-x64-gnu": "0.0.5",
"@teddyzhu/clipboard-linux-arm64-gnu": "0.0.5",
"@teddyzhu/clipboard-win32-x64-msvc": "0.0.5",
"@teddyzhu/clipboard-win32-arm64-msvc": "0.0.5"
},
"engines": {
"node": ">=20"
}

View file

@ -84,7 +84,8 @@ export class AcpFileSystemService implements FileSystemService {
limit: 1,
});
// Check if content starts with BOM character (U+FEFF)
return response.content.charCodeAt(0) === 0xfeff;
// Use codePointAt for better Unicode support and check content length first
return response.content.length > 0 && response.content.codePointAt(0) === 0xfeff;
} catch {
// Fall through to fallback if ACP read fails
}

View file

@ -516,6 +516,18 @@ export class Session implements SessionContext {
? await invocation.shouldConfirmExecute(abortSignal)
: false;
// Check for plan mode enforcement - block non-read-only tools
const isPlanMode = this.config.getApprovalMode() === ApprovalMode.PLAN;
if (isPlanMode && !isExitPlanModeTool && confirmationDetails) {
// In plan mode, block any tool that requires confirmation (write operations)
return errorResponse(
new Error(
`Plan mode is active. The tool "${fc.name}" cannot be executed because it modifies the system. ` +
'Please use the exit_plan_mode tool to present your plan and exit plan mode before making changes.',
),
);
}
if (confirmationDetails) {
const content: acp.ToolCallContent[] = [];

View file

@ -242,9 +242,14 @@ describe('parseArguments', () => {
});
it('should allow -r flag as alias for --resume', async () => {
process.argv = ['node', 'script.js', '-r', 'session-123'];
process.argv = [
'node',
'script.js',
'-r',
'123e4567-e89b-12d3-a456-426614174000',
];
const argv = await parseArguments();
expect(argv.resume).toBe('session-123');
expect(argv.resume).toBe('123e4567-e89b-12d3-a456-426614174000');
});
it('should allow -c flag as alias for --continue', async () => {

View file

@ -50,6 +50,19 @@ import { loadSandboxConfig } from './sandboxConfig.js';
import { appEvents } from '../utils/events.js';
import { mcpCommand } from '../commands/mcp.js';
// UUID v4 regex pattern for validation
const UUID_REGEX =
/^[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i;
/**
* Validates if a string is a valid UUID format
* @param value - The string to validate
* @returns True if the string is a valid UUID, false otherwise
*/
function isValidUUID(value: string): boolean {
return UUID_REGEX.test(value);
}
import { isWorkspaceTrusted } from './trustedFolders.js';
import { buildWebSearchConfig } from './webSearch.js';
import { writeStderrLine } from '../utils/stdioHelpers.js';
@ -137,6 +150,8 @@ export interface CliArgs {
continue: boolean | undefined;
/** Resume a specific session by its ID */
resume: string | undefined;
/** Specify a session ID without session resumption */
sessionId: string | undefined;
maxSessionTurns: number | undefined;
coreTools: string[] | undefined;
excludeTools: string[] | undefined;
@ -449,6 +464,10 @@ export async function parseArguments(): Promise<CliArgs> {
description:
'Resume a specific session by its ID. Use without an ID to show session picker.',
})
.option('session-id', {
type: 'string',
description: 'Specify a session ID for this run.',
})
.option('max-session-turns', {
type: 'number',
description: 'Maximum number of session turns',
@ -535,6 +554,15 @@ export async function parseArguments(): Promise<CliArgs> {
if (argv['continue'] && argv['resume']) {
return 'Cannot use both --continue and --resume together. Use --continue to resume the latest session, or --resume <sessionId> to resume a specific session.';
}
if (argv['sessionId'] && (argv['continue'] || argv['resume'])) {
return 'Cannot use --session-id with --continue or --resume. Use --session-id to start a new session with a specific ID, or use --continue/--resume to resume an existing session.';
}
if (argv['sessionId'] && !isValidUUID(argv['sessionId'] as string)) {
return `Invalid --session-id: "${argv['sessionId']}". Must be a valid UUID (e.g., "123e4567-e89b-12d3-a456-426614174000").`;
}
if (argv['resume'] && !isValidUUID(argv['resume'] as string)) {
return `Invalid --resume: "${argv['resume']}". Must be a valid UUID (e.g., "123e4567-e89b-12d3-a456-426614174000").`;
}
return true;
}),
)
@ -899,6 +927,17 @@ export async function loadCliConfig(
process.exit(1);
}
}
} else if (argv['sessionId']) {
// Use provided session ID without session resumption
// Check if session ID is already in use
const sessionService = new SessionService(cwd);
const exists = await sessionService.sessionExists(argv['sessionId']);
if (exists) {
const message = `Error: Session Id ${argv['sessionId']} is already in use.`;
writeStderrLine(message);
process.exit(1);
}
sessionId = argv['sessionId'];
}
const modelProvidersConfig = settings.modelProviders;

View file

@ -78,6 +78,7 @@ export interface KeyBinding {
command?: boolean;
/** Paste operation requirement: true=must be paste, false=must not be paste, undefined=ignore */
paste?: boolean;
meta?: boolean;
}
/**
@ -152,7 +153,16 @@ export const defaultKeyBindings: KeyBindingConfig = {
{ key: 'x', ctrl: true },
{ sequence: '\x18', ctrl: true },
],
[Command.PASTE_CLIPBOARD_IMAGE]: [{ key: 'v', ctrl: true }],
[Command.PASTE_CLIPBOARD_IMAGE]:
process.platform === 'win32'
? [
{ key: 'v', command: true },
{ key: 'v', meta: true },
]
: [
{ key: 'v', ctrl: true },
{ key: 'v', command: true },
],
// App level bindings
[Command.TOGGLE_TOOL_DESCRIPTIONS]: [{ key: 't', ctrl: true }],

View file

@ -373,7 +373,7 @@ const SETTINGS_SCHEMA = {
label: 'Show Line Numbers in Code',
category: 'UI',
requiresRestart: false,
default: false,
default: true,
description: 'Show line numbers in the code output.',
showInDialog: true,
},

View file

@ -7,6 +7,14 @@
import { createHash } from 'node:crypto';
import type { ProviderModelConfig as ModelConfig } from '@qwen-code/qwen-code-core';
/**
* Coding plan regions
*/
export enum CodingPlanRegion {
CHINA = 'china',
GLOBAL = 'global',
}
/**
* Coding plan template - array of model configurations
* When user provides an api-key, these configs will be cloned with envKey pointing to the stored api-key
@ -14,48 +22,282 @@ import type { ProviderModelConfig as ModelConfig } from '@qwen-code/qwen-code-co
export type CodingPlanTemplate = ModelConfig[];
/**
* Environment variable key for storing the coding plan API key
* Environment variable key for storing the coding plan API key.
* Unified key for both regions since they are mutually exclusive.
*/
export const CODING_PLAN_ENV_KEY = 'BAILIAN_CODING_PLAN_API_KEY';
/**
* CODING_PLAN_MODELS defines the model configurations for coding-plan mode.
*/
export const CODING_PLAN_MODELS: CodingPlanTemplate = [
{
id: 'qwen3-coder-plus',
name: 'qwen3-coder-plus',
baseUrl: 'https://coding.dashscope.aliyuncs.com/v1',
description: 'qwen3-coder-plus model from Bailian Coding Plan',
envKey: CODING_PLAN_ENV_KEY,
},
{
id: 'qwen3-max-2026-01-23',
name: 'qwen3-max-2026-01-23',
description:
'qwen3-max model with thinking enabled from Bailian Coding Plan',
baseUrl: 'https://coding.dashscope.aliyuncs.com/v1',
envKey: CODING_PLAN_ENV_KEY,
generationConfig: {
extra_body: {
enable_thinking: true,
},
},
},
];
/**
* Computes the version hash for the coding plan template.
* Uses SHA256 of the JSON-serialized template for deterministic versioning.
* @param template - The template to compute version for
* @returns Hexadecimal string representing the template version
*/
export function computeCodingPlanVersion(): string {
const templateString = JSON.stringify(CODING_PLAN_MODELS);
export function computeCodingPlanVersion(template: CodingPlanTemplate): string {
const templateString = JSON.stringify(template);
return createHash('sha256').update(templateString).digest('hex');
}
/**
* Current version of the coding plan template.
* Computed at runtime from the template content.
* Generate the complete coding plan template for a specific region.
* China region uses legacy description to maintain backward compatibility.
* Global region uses new description with region indicator.
* @param region - The region to generate template for
* @returns Complete model configuration array for the region
*/
export const CODING_PLAN_VERSION = computeCodingPlanVersion();
export function generateCodingPlanTemplate(
region: CodingPlanRegion,
): CodingPlanTemplate {
if (region === CodingPlanRegion.CHINA) {
// China region uses legacy fields to maintain backward compatibility
// This ensures existing users don't get prompted for unnecessary updates
return [
{
id: 'qwen3.5-plus',
name: '[Bailian Coding Plan] qwen3.5-plus',
baseUrl: 'https://coding.dashscope.aliyuncs.com/v1',
envKey: CODING_PLAN_ENV_KEY,
generationConfig: {
extra_body: {
enable_thinking: true,
},
},
},
{
id: 'qwen3-coder-plus',
name: '[Bailian Coding Plan] qwen3-coder-plus',
baseUrl: 'https://coding.dashscope.aliyuncs.com/v1',
envKey: CODING_PLAN_ENV_KEY,
},
{
id: 'qwen3-coder-next',
name: '[Bailian Coding Plan] qwen3-coder-next',
baseUrl: 'https://coding.dashscope.aliyuncs.com/v1',
envKey: CODING_PLAN_ENV_KEY,
},
{
id: 'qwen3-max-2026-01-23',
name: '[Bailian Coding Plan] qwen3-max-2026-01-23',
baseUrl: 'https://coding.dashscope.aliyuncs.com/v1',
envKey: CODING_PLAN_ENV_KEY,
generationConfig: {
extra_body: {
enable_thinking: true,
},
},
},
{
id: 'glm-4.7',
name: '[Bailian Coding Plan] glm-4.7',
baseUrl: 'https://coding.dashscope.aliyuncs.com/v1',
envKey: CODING_PLAN_ENV_KEY,
generationConfig: {
extra_body: {
enable_thinking: true,
},
},
},
{
id: 'glm-5',
name: '[Bailian Coding Plan] glm-5',
baseUrl: 'https://coding.dashscope.aliyuncs.com/v1',
envKey: CODING_PLAN_ENV_KEY,
generationConfig: {
extra_body: {
enable_thinking: true,
},
},
},
{
id: 'MiniMax-M2.5',
name: '[Bailian Coding Plan] MiniMax-M2.5',
baseUrl: 'https://coding.dashscope.aliyuncs.com/v1',
envKey: CODING_PLAN_ENV_KEY,
generationConfig: {
extra_body: {
enable_thinking: true,
},
},
},
{
id: 'kimi-k2.5',
name: '[Bailian Coding Plan] kimi-k2.5',
baseUrl: 'https://coding.dashscope.aliyuncs.com/v1',
envKey: CODING_PLAN_ENV_KEY,
generationConfig: {
extra_body: {
enable_thinking: true,
},
},
},
];
}
// Global region uses Bailian Coding Plan branding for Global/Intl
return [
{
id: 'qwen3.5-plus',
name: '[Bailian Coding Plan for Global/Intl] qwen3.5-plus',
baseUrl: 'https://coding-intl.dashscope.aliyuncs.com/v1',
envKey: CODING_PLAN_ENV_KEY,
generationConfig: {
extra_body: {
enable_thinking: true,
},
},
},
{
id: 'qwen3-coder-plus',
name: '[Bailian Coding Plan for Global/Intl] qwen3-coder-plus',
baseUrl: 'https://coding-intl.dashscope.aliyuncs.com/v1',
envKey: CODING_PLAN_ENV_KEY,
},
{
id: 'qwen3-coder-next',
name: '[Bailian Coding Plan for Global/Intl] qwen3-coder-next',
baseUrl: 'https://coding-intl.dashscope.aliyuncs.com/v1',
envKey: CODING_PLAN_ENV_KEY,
},
{
id: 'qwen3-max-2026-01-23',
name: '[Bailian Coding Plan for Global/Intl] qwen3-max-2026-01-23',
baseUrl: 'https://coding-intl.dashscope.aliyuncs.com/v1',
envKey: CODING_PLAN_ENV_KEY,
generationConfig: {
extra_body: {
enable_thinking: true,
},
},
},
{
id: 'glm-4.7',
name: '[Bailian Coding Plan for Global/Intl] glm-4.7',
baseUrl: 'https://coding-intl.dashscope.aliyuncs.com/v1',
envKey: CODING_PLAN_ENV_KEY,
generationConfig: {
extra_body: {
enable_thinking: true,
},
},
},
{
id: 'glm-5',
name: '[Bailian Coding Plan for Global/Intl] glm-5',
baseUrl: 'https://coding-intl.dashscope.aliyuncs.com/v1',
envKey: CODING_PLAN_ENV_KEY,
generationConfig: {
extra_body: {
enable_thinking: true,
},
},
},
{
id: 'MiniMax-M2.5',
name: '[Bailian Coding Plan for Global/Intl] MiniMax-M2.5',
baseUrl: 'https://coding-intl.dashscope.aliyuncs.com/v1',
envKey: CODING_PLAN_ENV_KEY,
generationConfig: {
extra_body: {
enable_thinking: true,
},
},
},
{
id: 'kimi-k2.5',
name: '[Bailian Coding Plan for Global/Intl] kimi-k2.5',
baseUrl: 'https://coding-intl.dashscope.aliyuncs.com/v1',
envKey: CODING_PLAN_ENV_KEY,
generationConfig: {
extra_body: {
enable_thinking: true,
},
},
},
];
}
/**
* Get the complete configuration for a specific region.
* @param region - The region to use
* @returns Object containing template, baseUrl, and version
*/
export function getCodingPlanConfig(region: CodingPlanRegion) {
const template = generateCodingPlanTemplate(region);
const baseUrl =
region === CodingPlanRegion.CHINA
? 'https://coding.dashscope.aliyuncs.com/v1'
: 'https://coding-intl.dashscope.aliyuncs.com/v1';
const regionName =
region === CodingPlanRegion.CHINA
? 'Coding Plan (Bailian, China)'
: 'Coding Plan (Bailian, Global/Intl)';
return {
template,
baseUrl,
regionName,
version: computeCodingPlanVersion(template),
};
}
/**
* Get all unique base URLs for coding plan (used for filtering/config detection).
* @returns Array of base URLs
*/
export function getCodingPlanBaseUrls(): string[] {
return [
'https://coding.dashscope.aliyuncs.com/v1',
'https://coding-intl.dashscope.aliyuncs.com/v1',
];
}
/**
* Check if a config belongs to Coding Plan (any region).
* Returns the region if matched, or false if not a Coding Plan config.
* @param baseUrl - The baseUrl to check
* @param envKey - The envKey to check
* @returns The region if matched, false otherwise
*/
export function isCodingPlanConfig(
baseUrl: string | undefined,
envKey: string | undefined,
): CodingPlanRegion | false {
if (!baseUrl || !envKey) {
return false;
}
// Must use the unified envKey
if (envKey !== CODING_PLAN_ENV_KEY) {
return false;
}
// Check which region's baseUrl matches
if (baseUrl === 'https://coding.dashscope.aliyuncs.com/v1') {
return CodingPlanRegion.CHINA;
}
if (baseUrl === 'https://coding-intl.dashscope.aliyuncs.com/v1') {
return CodingPlanRegion.GLOBAL;
}
return false;
}
/**
* Get region from baseUrl.
* @param baseUrl - The baseUrl to check
* @returns The region if matched, null otherwise
*/
export function getRegionFromBaseUrl(
baseUrl: string | undefined,
): CodingPlanRegion | null {
if (!baseUrl) return null;
if (baseUrl === 'https://coding.dashscope.aliyuncs.com/v1') {
return CodingPlanRegion.CHINA;
}
if (baseUrl === 'https://coding-intl.dashscope.aliyuncs.com/v1') {
return CodingPlanRegion.GLOBAL;
}
return null;
}

View file

@ -496,6 +496,7 @@ describe('gemini.tsx main function kitty protocol', () => {
experimentalLsp: undefined,
channel: undefined,
chatRecording: undefined,
sessionId: undefined,
});
await main();

View file

@ -11,6 +11,12 @@ export default {
// ============================================================================
// Help / UI Components
// ============================================================================
// Attachment hints
'↑ to manage attachments': '↑ Anhänge verwalten',
'← → select, Delete to remove, ↓ to exit':
'← → auswählen, Entf zum Löschen, ↓ beenden',
'Attachments: ': 'Anhänge: ',
'Basics:': 'Grundlagen:',
'Add context': 'Kontext hinzufügen',
'Use {{symbol}} to specify files for context (e.g., {{example}}) to target specific files or folders.':
@ -1032,8 +1038,8 @@ export default {
'(not set)': '(nicht gesetzt)',
"Failed to switch model to '{{modelId}}'.\n\n{{error}}":
"Modell konnte nicht auf '{{modelId}}' umgestellt werden.\n\n{{error}}",
'The latest Qwen Coder model from Alibaba Cloud ModelStudio (version: qwen3-coder-plus-2025-09-23)':
'Das neueste Qwen Coder Modell von Alibaba Cloud ModelStudio (Version: qwen3-coder-plus-2025-09-23)',
'Qwen 3.5 Plus — efficient hybrid model with leading coding performance':
'Qwen 3.5 Plus — effizientes Hybridmodell mit führender Programmierleistung',
'The latest Qwen Vision model from Alibaba Cloud ModelStudio (version: qwen3-vl-plus-2025-09-23)':
'Das neueste Qwen Vision Modell von Alibaba Cloud ModelStudio (Version: qwen3-vl-plus-2025-09-23)',
@ -1419,8 +1425,12 @@ export default {
// Auth Dialog - View Titles and Labels
// ============================================================================
'Coding Plan': 'Coding Plan',
'Coding Plan (Bailian, China)': 'Coding Plan (Bailian, China)',
'Coding Plan (Bailian, Global/Intl)': 'Coding Plan (Bailian, Global/Intl)',
"Paste your api key of Bailian Coding Plan and you're all set!":
'Fügen Sie Ihren Bailian Coding Plan API-Schlüssel ein und Sie sind bereit!',
"Paste your api key of Coding Plan (Bailian, Global/Intl) and you're all set!":
'Fügen Sie Ihren Coding Plan (Bailian, Global/Intl) API-Schlüssel ein und Sie sind bereit!',
Custom: 'Benutzerdefiniert',
'More instructions about configuring `modelProviders` manually.':
'Weitere Anweisungen zur manuellen Konfiguration von `modelProviders`.',
@ -1430,4 +1440,18 @@ export default {
'(Press Enter to submit, Escape to cancel)':
'(Enter zum Absenden, Escape zum Abbrechen)',
'More instructions please check:': 'Weitere Anweisungen finden Sie unter:',
// ============================================================================
// Coding Plan International Updates
// ============================================================================
'New model configurations are available for {{region}}. Update now?':
'Neue Modellkonfigurationen sind für {{region}} verfügbar. Jetzt aktualisieren?',
'New model configurations are available for Bailian Coding Plan (China). Update now?':
'Neue Modellkonfigurationen sind für Bailian Coding Plan (China) verfügbar. Jetzt aktualisieren?',
'New model configurations are available for Coding Plan (Bailian, Global/Intl). Update now?':
'Neue Modellkonfigurationen sind für Coding Plan (Bailian, Global/Intl) verfügbar. Jetzt aktualisieren?',
'{{region}} configuration updated successfully. Model switched to "{{model}}".':
'{{region}}-Konfiguration erfolgreich aktualisiert. Modell auf "{{model}}" umgeschaltet.',
'Authenticated successfully with {{region}}. API key is stored in settings.env.':
'Erfolgreich mit {{region}} authentifiziert. API-Schlüssel ist in settings.env gespeichert.',
};

View file

@ -11,6 +11,12 @@ export default {
// ============================================================================
// Help / UI Components
// ============================================================================
// Attachment hints
'↑ to manage attachments': '↑ to manage attachments',
'← → select, Delete to remove, ↓ to exit':
'← → select, Delete to remove, ↓ to exit',
'Attachments: ': 'Attachments: ',
'Basics:': 'Basics:',
'Add context': 'Add context',
'Use {{symbol}} to specify files for context (e.g., {{example}}) to target specific files or folders.':
@ -1057,8 +1063,8 @@ export default {
'(not set)': '(not set)',
"Failed to switch model to '{{modelId}}'.\n\n{{error}}":
"Failed to switch model to '{{modelId}}'.\n\n{{error}}",
'The latest Qwen Coder model from Alibaba Cloud ModelStudio (version: qwen3-coder-plus-2025-09-23)':
'The latest Qwen Coder model from Alibaba Cloud ModelStudio (version: qwen3-coder-plus-2025-09-23)',
'Qwen 3.5 Plus — efficient hybrid model with leading coding performance':
'Qwen 3.5 Plus — efficient hybrid model with leading coding performance',
'The latest Qwen Vision model from Alibaba Cloud ModelStudio (version: qwen3-vl-plus-2025-09-23)':
'The latest Qwen Vision model from Alibaba Cloud ModelStudio (version: qwen3-vl-plus-2025-09-23)',
@ -1410,6 +1416,13 @@ export default {
'Failed to open browser. Check out the extensions gallery at {{url}}':
'Failed to open browser. Check out the extensions gallery at {{url}}',
// ============================================================================
// Retry / Rate Limit
// ============================================================================
'Rate limit error: {{reason}}': 'Rate limit error: {{reason}}',
'Retrying in {{seconds}} seconds… (attempt {{attempt}}/{{maxRetries}})':
'Retrying in {{seconds}} seconds… (attempt {{attempt}}/{{maxRetries}})',
// ============================================================================
// Coding Plan Authentication
// ============================================================================
@ -1451,8 +1464,12 @@ export default {
// Auth Dialog - View Titles and Labels
// ============================================================================
'Coding Plan': 'Coding Plan',
'Coding Plan (Bailian, China)': 'Coding Plan (Bailian, China)',
'Coding Plan (Bailian, Global/Intl)': 'Coding Plan (Bailian, Global/Intl)',
"Paste your api key of Bailian Coding Plan and you're all set!":
"Paste your api key of Bailian Coding Plan and you're all set!",
"Paste your api key of Coding Plan (Bailian, Global/Intl) and you're all set!":
"Paste your api key of Coding Plan (Bailian, Global/Intl) and you're all set!",
Custom: 'Custom',
'More instructions about configuring `modelProviders` manually.':
'More instructions about configuring `modelProviders` manually.',
@ -1460,4 +1477,18 @@ export default {
'(Press Escape to go back)': '(Press Escape to go back)',
'(Press Enter to submit, Escape to cancel)':
'(Press Enter to submit, Escape to cancel)',
// ============================================================================
// Coding Plan International Updates
// ============================================================================
'New model configurations are available for {{region}}. Update now?':
'New model configurations are available for {{region}}. Update now?',
'New model configurations are available for Bailian Coding Plan (China). Update now?':
'New model configurations are available for Bailian Coding Plan (China). Update now?',
'New model configurations are available for Coding Plan (Bailian, Global/Intl). Update now?':
'New model configurations are available for Coding Plan (Bailian, Global/Intl). Update now?',
'{{region}} configuration updated successfully. Model switched to "{{model}}".':
'{{region}} configuration updated successfully. Model switched to "{{model}}".',
'Authenticated successfully with {{region}}. API key is stored in settings.env.':
'Authenticated successfully with {{region}}. API key is stored in settings.env.',
};

View file

@ -733,8 +733,8 @@ export default {
// Dialogs - Model
'Select Model': 'モデルを選択',
'(Press Esc to close)': '(Esc で閉じる)',
'The latest Qwen Coder model from Alibaba Cloud ModelStudio (version: qwen3-coder-plus-2025-09-23)':
'Alibaba Cloud ModelStudioの最新Qwen Coderモデル(バージョン: qwen3-coder-plus-2025-09-23)',
'Qwen 3.5 Plus — efficient hybrid model with leading coding performance':
'Qwen 3.5 Plus — 効率的なハイブリッドモデル、業界トップクラスのコーディング性能',
'The latest Qwen Vision model from Alibaba Cloud ModelStudio (version: qwen3-vl-plus-2025-09-23)':
'Alibaba Cloud ModelStudioの最新Qwen Visionモデル(バージョン: qwen3-vl-plus-2025-09-23)',
// Dialogs - Permissions
@ -930,8 +930,13 @@ export default {
// Auth Dialog - View Titles and Labels
// ============================================================================
'Coding Plan': 'Coding Plan',
'Coding Plan (Bailian, China)': 'Coding Plan (Bailian, 中国)',
'Coding Plan (Bailian, Global/Intl)':
'Coding Plan (Bailian, グローバル/国際)',
"Paste your api key of Bailian Coding Plan and you're all set!":
'Bailian Coding PlanのAPIキーを貼り付けるだけで準備完了です',
"Paste your api key of Coding Plan (Bailian, Global/Intl) and you're all set!":
'Coding Plan (Bailian, グローバル/国際) のAPIキーを貼り付けるだけで準備完了です',
Custom: 'カスタム',
'More instructions about configuring `modelProviders` manually.':
'`modelProviders`を手動で設定する方法の詳細はこちら。',
@ -940,4 +945,18 @@ export default {
'(Press Enter to submit, Escape to cancel)':
'(Enterで送信、Escapeでキャンセル)',
'More instructions please check:': '詳細な手順はこちらをご確認ください:',
// ============================================================================
// Coding Plan International Updates
// ============================================================================
'New model configurations are available for {{region}}. Update now?':
'{{region}} の新しいモデル設定が利用可能です。今すぐ更新しますか?',
'New model configurations are available for Bailian Coding Plan (China). Update now?':
'Bailian Coding Plan (中国) の新しいモデル設定が利用可能です。今すぐ更新しますか?',
'New model configurations are available for Coding Plan (Bailian, Global/Intl). Update now?':
'Coding Plan (Bailian, グローバル/国際) の新しいモデル設定が利用可能です。今すぐ更新しますか?',
'{{region}} configuration updated successfully. Model switched to "{{model}}".':
'{{region}} の設定が正常に更新されました。モデルが "{{model}}" に切り替わりました。',
'Authenticated successfully with {{region}}. API key is stored in settings.env.':
'{{region}} での認証に成功しました。APIキーは settings.env に保存されています。',
};

View file

@ -1041,8 +1041,8 @@ export default {
'(not set)': '(não definido)',
"Failed to switch model to '{{modelId}}'.\n\n{{error}}":
"Falha ao trocar o modelo para '{{modelId}}'.\n\n{{error}}",
'The latest Qwen Coder model from Alibaba Cloud ModelStudio (version: qwen3-coder-plus-2025-09-23)':
'O modelo Qwen Coder mais recente do Alibaba Cloud ModelStudio (versão: qwen3-coder-plus-2025-09-23)',
'Qwen 3.5 Plus — efficient hybrid model with leading coding performance':
'Qwen 3.5 Plus — modelo híbrido eficiente com desempenho líder em programação',
'The latest Qwen Vision model from Alibaba Cloud ModelStudio (version: qwen3-vl-plus-2025-09-23)':
'O modelo Qwen Vision mais recente do Alibaba Cloud ModelStudio (versão: qwen3-vl-plus-2025-09-23)',
@ -1433,8 +1433,12 @@ export default {
// Auth Dialog - View Titles and Labels
// ============================================================================
'Coding Plan': 'Coding Plan',
'Coding Plan (Bailian, China)': 'Coding Plan (Bailian, China)',
'Coding Plan (Bailian, Global/Intl)': 'Coding Plan (Bailian, Global/Intl)',
"Paste your api key of Bailian Coding Plan and you're all set!":
'Cole sua chave de API do Bailian Coding Plan e pronto!',
"Paste your api key of Coding Plan (Bailian, Global/Intl) and you're all set!":
'Cole sua chave de API do Coding Plan (Bailian, Global/Intl) e pronto!',
Custom: 'Personalizado',
'More instructions about configuring `modelProviders` manually.':
'Mais instruções sobre como configurar `modelProviders` manualmente.',
@ -1444,4 +1448,18 @@ export default {
'(Press Enter to submit, Escape to cancel)':
'(Pressione Enter para enviar, Escape para cancelar)',
'More instructions please check:': 'Mais instruções, consulte:',
// ============================================================================
// Coding Plan International Updates
// ============================================================================
'New model configurations are available for {{region}}. Update now?':
'Novas configurações de modelo estão disponíveis para o {{region}}. Atualizar agora?',
'New model configurations are available for Bailian Coding Plan (China). Update now?':
'Novas configurações de modelo estão disponíveis para o Bailian Coding Plan (China). Atualizar agora?',
'New model configurations are available for Coding Plan (Bailian, Global/Intl). Update now?':
'Novas configurações de modelo estão disponíveis para o Coding Plan (Bailian, Global/Intl). Atualizar agora?',
'{{region}} configuration updated successfully. Model switched to "{{model}}".':
'Configuração do {{region}} atualizada com sucesso. Modelo alterado para "{{model}}".',
'Authenticated successfully with {{region}}. API key is stored in settings.env.':
'Autenticado com sucesso com {{region}}. A chave de API está armazenada em settings.env.',
};

View file

@ -11,6 +11,12 @@ export default {
// ============================================================================
// Справка / Компоненты интерфейса
// ============================================================================
// Attachment hints
'↑ to manage attachments': '↑ управление вложениями',
'← → select, Delete to remove, ↓ to exit':
'← → выбрать, Delete удалить, ↓ выйти',
'Attachments: ': 'Вложения: ',
'Basics:': 'Основы:',
'Add context': 'Добавить контекст',
'Use {{symbol}} to specify files for context (e.g., {{example}}) to target specific files or folders.':
@ -1034,8 +1040,8 @@ export default {
'(not set)': '(не задано)',
"Failed to switch model to '{{modelId}}'.\n\n{{error}}":
"Не удалось переключиться на модель '{{modelId}}'.\n\n{{error}}",
'The latest Qwen Coder model from Alibaba Cloud ModelStudio (version: qwen3-coder-plus-2025-09-23)':
'Последняя модель Qwen Coder от Alibaba Cloud ModelStudio (версия: qwen3-coder-plus-2025-09-23)',
'Qwen 3.5 Plus — efficient hybrid model with leading coding performance':
'Qwen 3.5 Plus — эффективная гибридная модель с лидирующей производительностью в программировании',
'The latest Qwen Vision model from Alibaba Cloud ModelStudio (version: qwen3-vl-plus-2025-09-23)':
'Последняя модель Qwen Vision от Alibaba Cloud ModelStudio (версия: qwen3-vl-plus-2025-09-23)',
@ -1423,8 +1429,13 @@ export default {
// Auth Dialog - View Titles and Labels
// ============================================================================
'Coding Plan': 'Coding Plan',
'Coding Plan (Bailian, China)': 'Coding Plan (Bailian, Китай)',
'Coding Plan (Bailian, Global/Intl)':
'Coding Plan (Bailian, Глобальный/Международный)',
"Paste your api key of Bailian Coding Plan and you're all set!":
'Вставьте ваш API-ключ Bailian Coding Plan и всё готово!',
"Paste your api key of Coding Plan (Bailian, Global/Intl) and you're all set!":
'Вставьте ваш API-ключ Coding Plan (Bailian, Глобальный/Международный) и всё готово!',
Custom: 'Пользовательский',
'More instructions about configuring `modelProviders` manually.':
'Дополнительные инструкции по ручной настройке `modelProviders`.',
@ -1433,4 +1444,18 @@ export default {
'(Press Enter to submit, Escape to cancel)':
'(Нажмите Enter для отправки, Escape для отмены)',
'More instructions please check:': 'Дополнительные инструкции см.:',
// ============================================================================
// Coding Plan International Updates
// ============================================================================
'New model configurations are available for {{region}}. Update now?':
'Доступны новые конфигурации моделей для {{region}}. Обновить сейчас?',
'New model configurations are available for Bailian Coding Plan (China). Update now?':
'Доступны новые конфигурации моделей для Bailian Coding Plan (Китай). Обновить сейчас?',
'New model configurations are available for Coding Plan (Bailian, Global/Intl). Update now?':
'Доступны новые конфигурации моделей для Coding Plan (Bailian, Глобальный/Международный). Обновить сейчас?',
'{{region}} configuration updated successfully. Model switched to "{{model}}".':
'Конфигурация {{region}} успешно обновлена. Модель переключена на "{{model}}".',
'Authenticated successfully with {{region}}. API key is stored in settings.env.':
'Успешная аутентификация с {{region}}. API-ключ сохранён в settings.env.',
};

View file

@ -10,6 +10,11 @@ export default {
// ============================================================================
// Help / UI Components
// ============================================================================
// Attachment hints
'↑ to manage attachments': '↑ 管理附件',
'← → select, Delete to remove, ↓ to exit': '← → 选择Delete 删除,↓ 退出',
'Attachments: ': '附件:',
'Basics:': '基础功能:',
'Add context': '添加上下文',
'Use {{symbol}} to specify files for context (e.g., {{example}}) to target specific files or folders.':
@ -991,8 +996,8 @@ export default {
'(not set)': '(未设置)',
"Failed to switch model to '{{modelId}}'.\n\n{{error}}":
"无法切换到模型 '{{modelId}}'.\n\n{{error}}",
'The latest Qwen Coder model from Alibaba Cloud ModelStudio (version: qwen3-coder-plus-2025-09-23)':
'来自阿里云 ModelStudio 的最新 Qwen Coder 模型版本qwen3-coder-plus-2025-09-23',
'Qwen 3.5 Plus — efficient hybrid model with leading coding performance':
'Qwen 3.5 Plus — 高效混合架构,编程性能业界领先',
'The latest Qwen Vision model from Alibaba Cloud ModelStudio (version: qwen3-vl-plus-2025-09-23)':
'来自阿里云 ModelStudio 的最新 Qwen Vision 模型版本qwen3-vl-plus-2025-09-23',
@ -1237,6 +1242,13 @@ export default {
'Failed to open browser. Check out the extensions gallery at {{url}}':
'打开浏览器失败。请访问扩展市场:{{url}}',
// ============================================================================
// Retry / Rate Limit
// ============================================================================
'Rate limit error: {{reason}}': '触发限流:{{reason}}',
'Retrying in {{seconds}} seconds… (attempt {{attempt}}/{{maxRetries}})':
'将于 {{seconds}} 秒后重试…(第 {{attempt}}/{{maxRetries}} 次)',
// ============================================================================
// Coding Plan Authentication
// ============================================================================
@ -1279,12 +1291,30 @@ export default {
// ============================================================================
'API-KEY': 'API-KEY',
'Coding Plan': 'Coding Plan',
'Coding Plan (Bailian, China)': 'Coding Plan (百炼, 中国)',
'Coding Plan (Bailian, Global/Intl)': 'Coding Plan (百炼, 全球/国际)',
"Paste your api key of Bailian Coding Plan and you're all set!":
'粘贴您的百炼 Coding Plan API Key即可完成设置',
"Paste your api key of Coding Plan (Bailian, Global/Intl) and you're all set!":
'粘贴您的 Coding Plan (百炼, 全球/国际) API Key即可完成设置',
Custom: '自定义',
'More instructions about configuring `modelProviders` manually.':
'关于手动配置 `modelProviders` 的更多说明。',
'Select API-KEY configuration mode:': '选择 API-KEY 配置模式:',
'(Press Escape to go back)': '(按 Escape 键返回)',
'(Press Enter to submit, Escape to cancel)': '(按 Enter 提交Escape 取消)',
// ============================================================================
// Coding Plan International Updates
// ============================================================================
'New model configurations are available for {{region}}. Update now?':
'{{region}} 有新的模型配置可用。是否立即更新?',
'New model configurations are available for Bailian Coding Plan (China). Update now?':
'百炼 Coding Plan (中国) 有新的模型配置可用。是否立即更新?',
'New model configurations are available for Coding Plan (Bailian, Global/Intl). Update now?':
'Coding Plan (百炼, 全球/国际) 有新的模型配置可用。是否立即更新?',
'{{region}} configuration updated successfully. Model switched to "{{model}}".':
'{{region}} 配置更新成功。模型已切换至 "{{model}}"。',
'Authenticated successfully with {{region}}. API key is stored in settings.env.':
'成功通过 {{region}} 认证。API Key 已存储在 settings.env 中。',
};

View file

@ -80,6 +80,8 @@ export class ControlDispatcher implements IPendingRequestRegistry {
private pendingOutgoingRequests: Map<string, PendingOutgoingRequest> =
new Map();
private abortHandler: (() => void) | null = null;
constructor(context: IControlContext) {
this.context = context;
@ -102,9 +104,10 @@ export class ControlDispatcher implements IPendingRequestRegistry {
// this.hookController = new HookController(context, this, 'HookController');
// Listen for main abort signal
this.context.abortSignal.addEventListener('abort', () => {
this.abortHandler = () => {
this.shutdown();
});
};
this.context.abortSignal.addEventListener('abort', this.abortHandler);
}
/**
@ -240,6 +243,12 @@ export class ControlDispatcher implements IPendingRequestRegistry {
shutdown(): void {
debugLogger.debug('[ControlDispatcher] Shutting down');
// Remove abort listener to prevent memory leak
if (this.abortHandler) {
this.context.abortSignal.removeEventListener('abort', this.abortHandler);
this.abortHandler = null;
}
// Cancel all incoming requests
for (const [
_requestId,

View file

@ -193,6 +193,7 @@ export class SystemController extends BaseController {
return {
subtype: 'initialize',
session_id: this.context.config.getSessionId(),
capabilities,
};
}

View file

@ -408,7 +408,8 @@ class Session {
private handleInterrupt(): void {
debugLogger.info('[Session] Interrupt requested');
this.abortController.abort();
this.abortController = new AbortController();
// Do not create a new AbortController to prevent listener leaks.
// Subsequent queries will check signal.aborted and fail immediately.
}
private setupSignalHandlers(): void {

View file

@ -696,7 +696,6 @@ export const AppContainer = (props: AppContainerProps) => {
terminalWidth,
terminalHeight,
handleVisionSwitchRequired, // onVisionSwitchRequired
embeddedShellFocused,
);
// Track whether suggestions are visible for Tab key handling
@ -904,6 +903,8 @@ export const AppContainer = (props: AppContainerProps) => {
const ctrlCTimerRef = useRef<NodeJS.Timeout | null>(null);
const [ctrlDPressedOnce, setCtrlDPressedOnce] = useState(false);
const ctrlDTimerRef = useRef<NodeJS.Timeout | null>(null);
const [escapePressedOnce, setEscapePressedOnce] = useState(false);
const escapeTimerRef = useRef<NodeJS.Timeout | null>(null);
const [constrainHeight, setConstrainHeight] = useState<boolean>(true);
const [ideContextState, setIdeContextState] = useState<
IdeContext | undefined
@ -1180,6 +1181,47 @@ export const AppContainer = (props: AppContainerProps) => {
}
handleExit(ctrlDPressedOnce, setCtrlDPressedOnce, ctrlDTimerRef);
return;
} else if (keyMatchers[Command.ESCAPE](key)) {
// Escape key handling
// Skip if shell is focused (to allow shell's own escape handling)
if (embeddedShellFocused) {
return;
}
// If input has content, use double-press to clear
if (buffer.text.length > 0) {
if (escapePressedOnce) {
// Second press: clear input, keep the flag to allow immediate cancel
buffer.setText('');
return;
}
// First press: set flag and show prompt
setEscapePressedOnce(true);
escapeTimerRef.current = setTimeout(() => {
setEscapePressedOnce(false);
escapeTimerRef.current = null;
}, CTRL_EXIT_PROMPT_DURATION_MS);
return;
}
// Input is empty, cancel request immediately (no double-press needed)
if (streamingState === StreamingState.Responding) {
if (escapeTimerRef.current) {
clearTimeout(escapeTimerRef.current);
escapeTimerRef.current = null;
}
cancelOngoingRequest?.();
setEscapePressedOnce(false);
return;
}
// No action available, reset the flag
if (escapeTimerRef.current) {
clearTimeout(escapeTimerRef.current);
escapeTimerRef.current = null;
}
setEscapePressedOnce(false);
return;
}
let enteringConstrainHeightMode = false;
@ -1224,10 +1266,15 @@ export const AppContainer = (props: AppContainerProps) => {
ctrlCPressedOnce,
setCtrlCPressedOnce,
ctrlCTimerRef,
buffer.text.length,
ctrlDPressedOnce,
setCtrlDPressedOnce,
ctrlDTimerRef,
escapePressedOnce,
setEscapePressedOnce,
escapeTimerRef,
streamingState,
cancelOngoingRequest,
buffer,
handleSlashCommand,
activePtyId,
embeddedShellFocused,

View file

@ -10,7 +10,6 @@ import { AuthType } from '@qwen-code/qwen-code-core';
import { Box, Text } from 'ink';
import Link from 'ink-link';
import { theme } from '../semantic-colors.js';
import { Colors } from '../colors.js';
import { useKeypress } from '../hooks/useKeypress.js';
import { RadioButtonSelect } from '../components/shared/RadioButtonSelect.js';
import { ApiKeyInput } from '../components/ApiKeyInput.js';
@ -18,6 +17,7 @@ import { useUIState } from '../contexts/UIStateContext.js';
import { useUIActions } from '../contexts/UIActionsContext.js';
import { useConfig } from '../contexts/ConfigContext.js';
import { t } from '../../i18n/index.js';
import { CodingPlanRegion } from '../../constants/codingPlan.js';
const MODEL_PROVIDERS_DOCUMENTATION_URL =
'https://qwenlm.github.io/qwen-code-docs/en/users/configuration/settings/#modelproviders';
@ -35,7 +35,7 @@ function parseDefaultAuthType(
}
// Sub-mode types for API-KEY authentication
type ApiKeySubMode = 'coding-plan' | 'custom';
type ApiKeySubMode = 'coding-plan' | 'coding-plan-intl' | 'custom';
// View level for navigation
type ViewLevel = 'main' | 'api-key-sub' | 'api-key-input' | 'custom-info';
@ -53,6 +53,9 @@ export function AuthDialog(): React.JSX.Element {
const [selectedIndex, setSelectedIndex] = useState<number | null>(null);
const [viewLevel, setViewLevel] = useState<ViewLevel>('main');
const [apiKeySubModeIndex, setApiKeySubModeIndex] = useState<number>(0);
const [region, setRegion] = useState<CodingPlanRegion>(
CodingPlanRegion.CHINA,
);
// Main authentication entries
const mainItems = [
@ -72,9 +75,14 @@ export function AuthDialog(): React.JSX.Element {
const apiKeySubItems = [
{
key: 'coding-plan',
label: t('Coding Plan (Bailian)'),
label: t('Coding Plan (Bailian, China)'),
value: 'coding-plan' as ApiKeySubMode,
},
{
key: 'coding-plan-intl',
label: t('Coding Plan (Bailian, Global/Intl)'),
value: 'coding-plan-intl' as ApiKeySubMode,
},
{
key: 'custom',
label: t('Custom'),
@ -136,6 +144,10 @@ export function AuthDialog(): React.JSX.Element {
onAuthError(null);
if (subMode === 'coding-plan') {
setRegion(CodingPlanRegion.CHINA);
setViewLevel('api-key-input');
} else if (subMode === 'coding-plan-intl') {
setRegion(CodingPlanRegion.GLOBAL);
setViewLevel('api-key-input');
} else {
setViewLevel('custom-info');
@ -150,8 +162,8 @@ export function AuthDialog(): React.JSX.Element {
return;
}
// Submit to parent for processing
await handleCodingPlanSubmit(apiKey);
// Submit to parent for processing with region info
await handleCodingPlanSubmit(apiKey, region);
};
const handleGoBack = () => {
@ -160,6 +172,8 @@ export function AuthDialog(): React.JSX.Element {
if (viewLevel === 'api-key-sub') {
setViewLevel('main');
// Reset selectedIndex to ensure UI syncs with initialAuthIndex
setSelectedIndex(null);
} else if (viewLevel === 'api-key-input' || viewLevel === 'custom-info') {
setViewLevel('api-key-sub');
}
@ -215,7 +229,7 @@ export function AuthDialog(): React.JSX.Element {
/>
</Box>
<Box marginTop={1} paddingLeft={2}>
<Text color={Colors.Gray}>
<Text color={theme.text.secondary}>
{currentSelectedAuthType === AuthType.QWEN_OAUTH
? t('Login with QwenChat account to use daily free quota.')
: t('Use coding plan credentials or your own api-keys/providers.')}
@ -244,11 +258,13 @@ export function AuthDialog(): React.JSX.Element {
/>
</Box>
<Box marginTop={1} paddingLeft={2}>
<Text color={Colors.Gray}>
{apiKeySubItems[apiKeySubModeIndex]?.value === 'coding-plan'
? t("Paste your api key of Bailian Coding Plan and you're all set!")
: t(
<Text color={theme.text.secondary}>
{apiKeySubItems[apiKeySubModeIndex]?.value === 'custom'
? t(
'More instructions about configuring `modelProviders` manually.',
)
: t(
"Paste your api key of Bailian Coding Plan and you're all set!",
)}
</Text>
</Box>
@ -263,7 +279,11 @@ export function AuthDialog(): React.JSX.Element {
// Render API key input for coding-plan mode
const renderApiKeyInputView = () => (
<Box marginTop={1}>
<ApiKeyInput onSubmit={handleApiKeyInputSubmit} onCancel={handleGoBack} />
<ApiKeyInput
onSubmit={handleApiKeyInputSubmit}
onCancel={handleGoBack}
region={region}
/>
</Box>
);
@ -282,12 +302,12 @@ export function AuthDialog(): React.JSX.Element {
<Text>{t('Please configure your models in settings.json:')}</Text>
</Box>
<Box marginTop={1} paddingLeft={2}>
<Text color={Colors.AccentYellow}>
<Text color={theme.status.warning}>
1. {t('Set API key via environment variable (e.g., OPENAI_API_KEY)')}
</Text>
</Box>
<Box marginTop={0} paddingLeft={2}>
<Text color={Colors.AccentYellow}>
<Text color={theme.status.warning}>
2.{' '}
{t(
"Add model configuration to modelProviders['openai'] (or other auth types)",
@ -295,7 +315,7 @@ export function AuthDialog(): React.JSX.Element {
</Text>
</Box>
<Box marginTop={0} paddingLeft={2}>
<Text color={Colors.AccentYellow}>
<Text color={theme.status.warning}>
3.{' '}
{t(
'Each provider needs: id, envKey (required), plus optional baseUrl, generationConfig',
@ -303,7 +323,7 @@ export function AuthDialog(): React.JSX.Element {
</Text>
</Box>
<Box marginTop={0} paddingLeft={2}>
<Text color={Colors.AccentYellow}>
<Text color={theme.status.warning}>
4.{' '}
{t(
'Use /model command to select your preferred model from the configured list',
@ -324,7 +344,7 @@ export function AuthDialog(): React.JSX.Element {
</Box>
<Box marginTop={0}>
<Link url={MODEL_PROVIDERS_DOCUMENTATION_URL} fallback={false}>
<Text color={Colors.AccentGreen} underline>
<Text color={theme.status.success} underline>
{MODEL_PROVIDERS_DOCUMENTATION_URL}
</Text>
</Link>
@ -369,14 +389,14 @@ export function AuthDialog(): React.JSX.Element {
{(authError || errorMessage) && (
<Box marginTop={1}>
<Text color={Colors.AccentRed}>{authError || errorMessage}</Text>
<Text color={theme.status.error}>{authError || errorMessage}</Text>
</Box>
)}
{viewLevel === 'main' && (
<>
<Box marginTop={1}>
<Text color={Colors.AccentPurple}>
<Text color={theme.text.accent}>
{t('(Use Enter to Set Auth)')}
</Text>
</Box>
@ -395,7 +415,7 @@ export function AuthDialog(): React.JSX.Element {
</Text>
</Box>
<Box marginTop={1}>
<Text color={Colors.AccentBlue}>
<Text color={theme.text.link}>
{
'https://qwenlm.github.io/qwen-code-docs/en/users/support/tos-privacy/'
}

View file

@ -30,9 +30,10 @@ import { AuthState, MessageType } from '../types.js';
import type { HistoryItem } from '../types.js';
import { t } from '../../i18n/index.js';
import {
CODING_PLAN_MODELS,
getCodingPlanConfig,
isCodingPlanConfig,
CodingPlanRegion,
CODING_PLAN_ENV_KEY,
CODING_PLAN_VERSION,
} from '../../constants/codingPlan.js';
export type { QwenAuthState } from '../hooks/useQwenAuth.js';
@ -285,29 +286,35 @@ export const useAuthCommand = (
/**
* Handle coding plan submission - generates configs from template and stores api-key
* @param apiKey - The API key to store
* @param region - The region to use (default: CHINA)
*/
const handleCodingPlanSubmit = useCallback(
async (apiKey: string) => {
async (
apiKey: string,
region: CodingPlanRegion = CodingPlanRegion.CHINA,
) => {
try {
setIsAuthenticating(true);
setAuthError(null);
const envKeyName = CODING_PLAN_ENV_KEY;
// Get configuration based on region
const { template, version, regionName } = getCodingPlanConfig(region);
// Get persist scope
const persistScope = getPersistScopeForModelSelection(settings);
// Store api-key in settings.env
settings.setValue(persistScope, `env.${envKeyName}`, apiKey);
// Store api-key in settings.env (unified env key)
settings.setValue(persistScope, `env.${CODING_PLAN_ENV_KEY}`, apiKey);
// Sync to process.env immediately so refreshAuth can read the apiKey
process.env[envKeyName] = apiKey;
process.env[CODING_PLAN_ENV_KEY] = apiKey;
// Generate model configs from template
const newConfigs: ProviderModelConfig[] = CODING_PLAN_MODELS.map(
const newConfigs: ProviderModelConfig[] = template.map(
(templateConfig) => ({
...templateConfig,
envKey: envKeyName,
envKey: CODING_PLAN_ENV_KEY,
}),
);
@ -317,17 +324,9 @@ export const useAuthCommand = (
settings.merged.modelProviders as ModelProvidersConfig | undefined
)?.[AuthType.USE_OPENAI] || [];
// Identify Coding Plan configs by baseUrl + envKey
// Remove existing Coding Plan configs to ensure template changes are applied
const isCodingPlanConfig = (config: ProviderModelConfig) =>
config.envKey === envKeyName &&
CODING_PLAN_MODELS.some(
(template) => template.baseUrl === config.baseUrl,
);
// Filter out existing Coding Plan configs, keep user custom configs
// Filter out all existing Coding Plan configs (mutually exclusive)
const nonCodingPlanConfigs = existingConfigs.filter(
(existing) => !isCodingPlanConfig(existing),
(existing) => !isCodingPlanConfig(existing.baseUrl, existing.envKey),
);
// Add new Coding Plan configs at the beginning
@ -347,12 +346,11 @@ export const useAuthCommand = (
AuthType.USE_OPENAI,
);
// Persist coding plan version for future update detection
settings.setValue(
persistScope,
'codingPlan.version',
CODING_PLAN_VERSION,
);
// Persist coding plan region
settings.setValue(persistScope, 'codingPlan.region', region);
// Persist coding plan version (single field for backward compatibility)
settings.setValue(persistScope, 'codingPlan.version', version);
// If there are configs, use the first one as the model
if (updatedConfigs.length > 0 && updatedConfigs[0]?.id) {
@ -386,7 +384,8 @@ export const useAuthCommand = (
{
type: MessageType.INFO,
text: t(
'Authenticated successfully with Coding Plan. API key is stored in settings.env.',
'Authenticated successfully with {{region}}. API key is stored in settings.env.',
{ region: regionName },
),
},
Date.now(),

View file

@ -8,26 +8,37 @@ import type React from 'react';
import { useState } from 'react';
import { Box, Text } from 'ink';
import { TextInput } from './shared/TextInput.js';
import { Colors } from '../colors.js';
import { theme } from '../semantic-colors.js';
import { useKeypress } from '../hooks/useKeypress.js';
import { t } from '../../i18n/index.js';
import { CodingPlanRegion } from '../../constants/codingPlan.js';
import Link from 'ink-link';
interface ApiKeyInputProps {
onSubmit: (apiKey: string) => void;
onCancel: () => void;
region?: CodingPlanRegion;
}
const CODING_PLAN_API_KEY_URL =
'https://bailian.console.aliyun.com/?tab=model#/efm/coding_plan';
const CODING_PLAN_INTL_API_KEY_URL =
'https://modelstudio.console.alibabacloud.com/?tab=dashboard#/efm/coding_plan';
export function ApiKeyInput({
onSubmit,
onCancel,
region = CodingPlanRegion.CHINA,
}: ApiKeyInputProps): React.JSX.Element {
const [apiKey, setApiKey] = useState('');
const [error, setError] = useState<string | null>(null);
const apiKeyUrl =
region === CodingPlanRegion.GLOBAL
? CODING_PLAN_INTL_API_KEY_URL
: CODING_PLAN_API_KEY_URL;
useKeypress(
(key) => {
if (key.name === 'escape') {
@ -52,21 +63,21 @@ export function ApiKeyInput({
<TextInput value={apiKey} onChange={setApiKey} placeholder="sk-sp-..." />
{error && (
<Box marginTop={1}>
<Text color={Colors.AccentRed}>{error}</Text>
<Text color={theme.status.error}>{error}</Text>
</Box>
)}
<Box marginTop={1}>
<Text>{t('You can get your exclusive Coding Plan API-KEY here:')}</Text>
</Box>
<Box marginTop={0}>
<Link url={CODING_PLAN_API_KEY_URL} fallback={false}>
<Text color={Colors.AccentGreen} underline>
{CODING_PLAN_API_KEY_URL}
<Link url={apiKeyUrl} fallback={false}>
<Text color={theme.status.success} underline>
{apiKeyUrl}
</Text>
</Link>
</Box>
<Box marginTop={1}>
<Text color={Colors.Gray}>
<Text color={theme.text.secondary}>
{t('(Press Enter to submit, Escape to cancel)')}
</Text>
</Box>

View file

@ -20,6 +20,7 @@ import { GeminiThoughtMessageContent } from './messages/GeminiThoughtMessageCont
import { CompressionMessage } from './messages/CompressionMessage.js';
import { SummaryMessage } from './messages/SummaryMessage.js';
import { WarningMessage } from './messages/WarningMessage.js';
import { RetryCountdownMessage } from './messages/RetryCountdownMessage.js';
import { Box } from 'ink';
import { AboutBox } from './AboutBox.js';
import { StatsDisplay } from './StatsDisplay.js';
@ -126,6 +127,9 @@ const HistoryItemDisplayComponent: React.FC<HistoryItemDisplayProps> = ({
{itemForDisplay.type === 'error' && (
<ErrorMessage text={itemForDisplay.text} />
)}
{itemForDisplay.type === 'retry_countdown' && (
<RetryCountdownMessage text={itemForDisplay.text} />
)}
{itemForDisplay.type === 'about' && (
<AboutBox {...itemForDisplay.systemInfo} width={boxWidth} />
)}

View file

@ -370,6 +370,8 @@ describe('InputPrompt', () => {
});
describe('clipboard image paste', () => {
const isWindows = process.platform === 'win32';
beforeEach(() => {
vi.mocked(clipboardUtils.clipboardHasImage).mockResolvedValue(false);
vi.mocked(clipboardUtils.saveClipboardImage).mockResolvedValue(null);
@ -378,10 +380,37 @@ describe('InputPrompt', () => {
);
});
it('should handle Ctrl+V when clipboard has an image', async () => {
// Windows uses Alt+V (\x1Bv), non-Windows uses Ctrl+V (\x16)
const describeConditional = isWindows ? it.skip : it;
describeConditional(
'should handle Ctrl+V when clipboard has an image',
async () => {
vi.mocked(clipboardUtils.clipboardHasImage).mockResolvedValue(true);
vi.mocked(clipboardUtils.saveClipboardImage).mockResolvedValue(
'/Users/mochi/.qwen/tmp/clipboard-123.png',
);
const { stdin, unmount } = renderWithProviders(
<InputPrompt {...props} />,
);
await wait();
// Send Ctrl+V
stdin.write('\x16'); // Ctrl+V
await wait();
expect(clipboardUtils.clipboardHasImage).toHaveBeenCalled();
expect(clipboardUtils.saveClipboardImage).toHaveBeenCalled();
expect(clipboardUtils.cleanupOldClipboardImages).toHaveBeenCalled();
// Note: The new implementation adds images as attachments rather than inserting into buffer
unmount();
},
);
it('should handle Cmd+V when clipboard has an image', async () => {
vi.mocked(clipboardUtils.clipboardHasImage).mockResolvedValue(true);
vi.mocked(clipboardUtils.saveClipboardImage).mockResolvedValue(
'/test/.qwen-clipboard/clipboard-123.png',
'/Users/mochi/.qwen/tmp/clipboard-456.png',
);
const { stdin, unmount } = renderWithProviders(
@ -389,18 +418,15 @@ describe('InputPrompt', () => {
);
await wait();
// Send Ctrl+V
stdin.write('\x16'); // Ctrl+V
// Send Cmd+V (meta key) / Alt+V on Windows
// In terminals, Cmd+V or Alt+V is typically sent as ESC followed by 'v'
stdin.write('\x1Bv');
await wait();
expect(clipboardUtils.clipboardHasImage).toHaveBeenCalled();
expect(clipboardUtils.saveClipboardImage).toHaveBeenCalledWith(
props.config.getTargetDir(),
);
expect(clipboardUtils.cleanupOldClipboardImages).toHaveBeenCalledWith(
props.config.getTargetDir(),
);
expect(mockBuffer.replaceRangeByOffset).toHaveBeenCalled();
expect(clipboardUtils.saveClipboardImage).toHaveBeenCalled();
expect(clipboardUtils.cleanupOldClipboardImages).toHaveBeenCalled();
// Note: The new implementation adds images as attachments rather than inserting into buffer
unmount();
});
@ -412,7 +438,8 @@ describe('InputPrompt', () => {
);
await wait();
stdin.write('\x16'); // Ctrl+V
// Use platform-appropriate key combination
stdin.write(isWindows ? '\x1Bv' : '\x16');
await wait();
expect(clipboardUtils.clipboardHasImage).toHaveBeenCalled();
@ -430,7 +457,8 @@ describe('InputPrompt', () => {
);
await wait();
stdin.write('\x16'); // Ctrl+V
// Use platform-appropriate key combination
stdin.write(isWindows ? '\x1Bv' : '\x16');
await wait();
expect(clipboardUtils.saveClipboardImage).toHaveBeenCalled();
@ -439,11 +467,7 @@ describe('InputPrompt', () => {
});
it('should insert image path at cursor position with proper spacing', async () => {
const imagePath = path.join(
'test',
'.qwen-clipboard',
'clipboard-456.png',
);
const imagePath = '/Users/mochi/.qwen/tmp/clipboard-456.png';
vi.mocked(clipboardUtils.clipboardHasImage).mockResolvedValue(true);
vi.mocked(clipboardUtils.saveClipboardImage).mockResolvedValue(imagePath);
@ -451,27 +475,20 @@ describe('InputPrompt', () => {
mockBuffer.text = 'Hello world';
mockBuffer.cursor = [0, 5]; // Cursor after "Hello"
mockBuffer.lines = ['Hello world'];
mockBuffer.replaceRangeByOffset = vi.fn();
const { stdin, unmount } = renderWithProviders(
<InputPrompt {...props} />,
);
await wait();
stdin.write('\x16'); // Ctrl+V
// Use platform-appropriate key combination
stdin.write(isWindows ? '\x1Bv' : '\x16');
await wait();
// Should insert at cursor position with spaces
expect(mockBuffer.replaceRangeByOffset).toHaveBeenCalled();
// Get the actual call to see what path was used
const actualCall = vi.mocked(mockBuffer.replaceRangeByOffset).mock
.calls[0];
expect(actualCall[0]).toBe(5); // start offset
expect(actualCall[1]).toBe(5); // end offset
expect(actualCall[2]).toBe(
' @' + path.relative(path.join('test', 'project', 'src'), imagePath),
);
// The new implementation adds images as attachments rather than inserting into buffer
// So we verify that saveClipboardImage was called instead
expect(clipboardUtils.saveClipboardImage).toHaveBeenCalled();
expect(clipboardUtils.clipboardHasImage).toHaveBeenCalled();
unmount();
});
@ -485,7 +502,8 @@ describe('InputPrompt', () => {
);
await wait();
stdin.write('\x16'); // Ctrl+V
// Use platform-appropriate key combination
stdin.write(isWindows ? '\x1Bv' : '\x16');
await wait();
// Should not throw and should not set buffer text on error

View file

@ -22,7 +22,11 @@ import { useKeypress } from '../hooks/useKeypress.js';
import { keyMatchers, Command } from '../keyMatchers.js';
import type { CommandContext, SlashCommand } from '../commands/types.js';
import type { Config } from '@qwen-code/qwen-code-core';
import { ApprovalMode, createDebugLogger } from '@qwen-code/qwen-code-core';
import {
ApprovalMode,
Storage,
createDebugLogger,
} from '@qwen-code/qwen-code-core';
import {
parseInputForHighlighting,
buildSegmentsForVisualSlice,
@ -41,6 +45,15 @@ import { useUIActions } from '../contexts/UIActionsContext.js';
import { useKeypressContext } from '../contexts/KeypressContext.js';
import { FEEDBACK_DIALOG_KEYS } from '../FeedbackDialog.js';
/**
* Represents an attachment (e.g., pasted image) displayed above the input prompt
*/
export interface Attachment {
id: string; // Unique identifier (timestamp)
path: string; // Full file path
filename: string; // Filename only (for display)
}
const debugLogger = createDebugLogger('INPUT_PROMPT');
export interface InputPromptProps {
buffer: TextBuffer;
@ -126,6 +139,10 @@ export const InputPrompt: React.FC<InputPromptProps> = ({
const [recentPasteTime, setRecentPasteTime] = useState<number | null>(null);
const pasteTimeoutRef = useRef<NodeJS.Timeout | null>(null);
// Attachment state for clipboard images
const [attachments, setAttachments] = useState<Attachment[]>([]);
const [isAttachmentMode, setIsAttachmentMode] = useState(false);
const [selectedAttachmentIndex, setSelectedAttachmentIndex] = useState(-1);
// Large paste placeholder handling
const [pendingPastes, setPendingPastes] = useState<Map<string, string>>(
new Map(),
@ -281,10 +298,25 @@ export const InputPrompt: React.FC<InputPromptProps> = ({
if (shellModeActive) {
shellHistory.addCommandToHistory(finalValue);
}
// Convert attachments to @references and prepend to the message
if (attachments.length > 0) {
const attachmentRefs = attachments
.map((att) => `@${path.relative(config.getTargetDir(), att.path)}`)
.join(' ');
finalValue = `${attachmentRefs}\n\n${finalValue.trim()}`;
}
// Clear the buffer *before* calling onSubmit to prevent potential re-submission
// if onSubmit triggers a re-render while the buffer still holds the old value.
buffer.setText('');
onSubmit(finalValue);
// Clear attachments after submit
setAttachments([]);
setIsAttachmentMode(false);
setSelectedAttachmentIndex(-1);
resetCompletionState();
resetReverseSearchCompletionState();
},
@ -295,6 +327,8 @@ export const InputPrompt: React.FC<InputPromptProps> = ({
shellModeActive,
shellHistory,
resetReverseSearchCompletionState,
attachments,
config,
pendingPastes,
],
);
@ -336,52 +370,45 @@ export const InputPrompt: React.FC<InputPromptProps> = ({
]);
// Handle clipboard image pasting with Ctrl+V
const handleClipboardImage = useCallback(async () => {
const handleClipboardImage = useCallback(async (validated = false) => {
try {
if (await clipboardHasImage()) {
const imagePath = await saveClipboardImage(config.getTargetDir());
const hasImage = validated || (await clipboardHasImage());
if (hasImage) {
const imagePath = await saveClipboardImage(Storage.getGlobalTempDir());
if (imagePath) {
// Clean up old images
cleanupOldClipboardImages(config.getTargetDir()).catch(() => {
cleanupOldClipboardImages(Storage.getGlobalTempDir()).catch(() => {
// Ignore cleanup errors
});
// Get relative path from current directory
const relativePath = path.relative(config.getTargetDir(), imagePath);
// Insert @path reference at cursor position
const insertText = `@${relativePath}`;
const currentText = buffer.text;
const [row, col] = buffer.cursor;
// Calculate offset from row/col
let offset = 0;
for (let i = 0; i < row; i++) {
offset += buffer.lines[i].length + 1; // +1 for newline
}
offset += col;
// Add spaces around the path if needed
let textToInsert = insertText;
const charBefore = offset > 0 ? currentText[offset - 1] : '';
const charAfter =
offset < currentText.length ? currentText[offset] : '';
if (charBefore && charBefore !== ' ' && charBefore !== '\n') {
textToInsert = ' ' + textToInsert;
}
if (!charAfter || (charAfter !== ' ' && charAfter !== '\n')) {
textToInsert = textToInsert + ' ';
}
// Insert at cursor position
buffer.replaceRangeByOffset(offset, offset, textToInsert);
// Add as attachment instead of inserting @reference into text
const filename = path.basename(imagePath);
const newAttachment: Attachment = {
id: String(Date.now()),
path: imagePath,
filename,
};
setAttachments((prev) => [...prev, newAttachment]);
}
}
} catch (error) {
debugLogger.error('Error handling clipboard image:', error);
}
}, [buffer, config]);
}, []);
// Handle deletion of an attachment from the list
const handleAttachmentDelete = useCallback((index: number) => {
setAttachments((prev) => {
const newList = prev.filter((_, i) => i !== index);
if (newList.length === 0) {
setIsAttachmentMode(false);
setSelectedAttachmentIndex(-1);
} else {
setSelectedAttachmentIndex(Math.min(index, newList.length - 1));
}
return newList;
});
}, []);
const handleInput = useCallback(
(key: Key) => {
@ -412,7 +439,11 @@ export const InputPrompt: React.FC<InputPromptProps> = ({
const pasted = key.sequence.replace(/\r\n/g, '\n').replace(/\r/g, '\n');
const charCount = [...pasted].length; // Proper Unicode char count
const lineCount = pasted.split('\n').length;
if (
// Ensure we never accidentally interpret paste as regular input.
if (key.pasteImage) {
handleClipboardImage(true);
} else if (
charCount > LARGE_PASTE_CHAR_THRESHOLD ||
lineCount > LARGE_PASTE_LINE_THRESHOLD
) {
@ -666,6 +697,55 @@ export const InputPrompt: React.FC<InputPromptProps> = ({
}
}
// Attachment mode handling - process before history navigation
if (isAttachmentMode && attachments.length > 0) {
if (key.name === 'left') {
setSelectedAttachmentIndex((i) => Math.max(0, i - 1));
return;
}
if (key.name === 'right') {
setSelectedAttachmentIndex((i) =>
Math.min(attachments.length - 1, i + 1),
);
return;
}
if (keyMatchers[Command.NAVIGATION_DOWN](key)) {
// Exit attachment mode and return to input
setIsAttachmentMode(false);
setSelectedAttachmentIndex(-1);
return;
}
if (key.name === 'backspace' || key.name === 'delete') {
handleAttachmentDelete(selectedAttachmentIndex);
return;
}
if (key.name === 'return' || key.name === 'escape') {
setIsAttachmentMode(false);
setSelectedAttachmentIndex(-1);
return;
}
// For other keys, exit attachment mode and let input handle them
setIsAttachmentMode(false);
setSelectedAttachmentIndex(-1);
// Continue to process the key in input
}
// Enter attachment mode when pressing up at the first line with attachments
if (
!isAttachmentMode &&
attachments.length > 0 &&
!shellModeActive &&
!reverseSearchActive &&
!commandSearchActive &&
buffer.visualCursor[0] === 0 &&
buffer.visualScrollRow === 0 &&
keyMatchers[Command.NAVIGATION_UP](key)
) {
setIsAttachmentMode(true);
setSelectedAttachmentIndex(attachments.length - 1);
return;
}
if (!shellModeActive) {
if (keyMatchers[Command.REVERSE_SEARCH](key)) {
setCommandSearchActive(true);
@ -864,6 +944,10 @@ export const InputPrompt: React.FC<InputPromptProps> = ({
onToggleShortcuts,
showShortcuts,
uiState,
isAttachmentMode,
attachments,
selectedAttachmentIndex,
handleAttachmentDelete,
uiActions,
pasteWorkaround,
nextLargePastePlaceholder,
@ -921,6 +1005,23 @@ export const InputPrompt: React.FC<InputPromptProps> = ({
return (
<>
{attachments.length > 0 && (
<Box marginLeft={2} marginBottom={0}>
<Text color={theme.text.secondary}>{t('Attachments: ')}</Text>
{attachments.map((att, idx) => (
<Text
key={att.id}
color={
isAttachmentMode && idx === selectedAttachmentIndex
? theme.status.success
: theme.text.secondary
}
>
[{att.filename}]{idx < attachments.length - 1 ? ' ' : ''}
</Text>
))}
</Box>
)}
<Box
borderStyle="single"
borderTop={true}
@ -1077,6 +1178,16 @@ export const InputPrompt: React.FC<InputPromptProps> = ({
/>
</Box>
)}
{/* Attachment hints - show when there are attachments and no suggestions visible */}
{attachments.length > 0 && !shouldShowSuggestions && (
<Box marginLeft={2} marginRight={2}>
<Text color={theme.text.secondary}>
{isAttachmentMode
? t('← → select, Delete to remove, ↓ to exit')
: t('↑ to manage attachments')}
</Text>
</Box>
)}
</>
);
};

View file

@ -18,7 +18,10 @@ interface Shortcut {
// Platform-specific key mappings
const getNewlineKey = () =>
process.platform === 'win32' ? 'ctrl+enter' : 'ctrl+j';
const getPasteKey = () => (process.platform === 'darwin' ? 'cmd+v' : 'ctrl+v');
const getPasteKey = () => {
if (process.platform === 'win32') return 'alt+v';
return process.platform === 'darwin' ? 'cmd+v' : 'ctrl+v';
};
const getExternalEditorKey = () =>
process.platform === 'darwin' ? 'ctrl+x' : 'ctrl+x';

View file

@ -0,0 +1,41 @@
/**
* @license
* Copyright 2025 Qwen
* SPDX-License-Identifier: Apache-2.0
*/
import type React from 'react';
import { Text, Box } from 'ink';
import { theme } from '../../semantic-colors.js';
interface RetryCountdownMessageProps {
text: string;
}
/**
* Displays a retry countdown message in a dimmed/secondary style
* to visually distinguish it from error messages.
*/
export const RetryCountdownMessage: React.FC<RetryCountdownMessageProps> = ({
text,
}) => {
if (!text || text.trim() === '') {
return null;
}
const prefix = '↻ ';
const prefixWidth = prefix.length;
return (
<Box flexDirection="row">
<Box width={prefixWidth}>
<Text color={theme.text.secondary}>{prefix}</Text>
</Box>
<Box flexGrow={1}>
<Text wrap="wrap" color={theme.text.secondary}>
{text}
</Text>
</Box>
</Box>
);
};

View file

@ -36,6 +36,7 @@ import {
MODIFIER_ALT_BIT,
MODIFIER_CTRL_BIT,
} from '../utils/platformConstants.js';
import { clipboardHasImage } from '../utils/clipboardUtils.js';
import { FOCUS_IN, FOCUS_OUT } from '../hooks/useFocus.js';
@ -54,6 +55,7 @@ export interface Key {
paste: boolean;
sequence: string;
kittyProtocol?: boolean;
pasteImage?: boolean;
}
export type KeypressHandler = (key: Key) => void;
@ -390,7 +392,7 @@ export function KeypressProvider({
}
};
const handleKeypress = (_: unknown, key: Key) => {
const handleKeypress = async (_: unknown, key: Key) => {
if (key.sequence === FOCUS_IN || key.sequence === FOCUS_OUT) {
return;
}
@ -400,14 +402,28 @@ export function KeypressProvider({
}
if (key.name === 'paste-end') {
isPaste = false;
broadcast({
name: '',
ctrl: false,
meta: false,
shift: false,
paste: true,
sequence: pasteBuffer.toString(),
});
if (pasteBuffer.toString().length > 0) {
broadcast({
name: '',
ctrl: false,
meta: false,
shift: false,
paste: true,
sequence: pasteBuffer.toString(),
});
} else {
const hasImage = await clipboardHasImage();
broadcast({
name: '',
ctrl: false,
meta: false,
shift: false,
paste: true,
pasteImage: hasImage,
sequence: pasteBuffer.toString(),
});
}
pasteBuffer = Buffer.alloc(0);
return;
}
@ -722,6 +738,7 @@ export function KeypressProvider({
};
let rl: readline.Interface;
if (usePassthrough) {
rl = readline.createInterface({
input: keypressStream,

View file

@ -15,6 +15,7 @@ import {
type ApprovalMode,
} from '@qwen-code/qwen-code-core';
import { type SettingScope } from '../../config/settings.js';
import { type CodingPlanRegion } from '../../constants/codingPlan.js';
import type { AuthState } from '../types.js';
import { type VisionSwitchOutcome } from '../components/ModelSwitchDialog.js';
// OpenAICredentials type (previously imported from OpenAIKeyPrompt)
@ -40,7 +41,10 @@ export interface UIActions {
authType: AuthType | undefined,
credentials?: OpenAICredentials,
) => Promise<void>;
handleCodingPlanSubmit: (apiKey: string) => Promise<void>;
handleCodingPlanSubmit: (
apiKey: string,
region?: CodingPlanRegion,
) => Promise<void>;
setAuthState: (state: AuthState) => void;
onAuthError: (error: string | null) => void;
cancelAuthentication: () => void;

View file

@ -11,6 +11,7 @@ import type { Config } from '@qwen-code/qwen-code-core';
import {
getErrorMessage,
isNodeError,
Storage,
unescapePath,
readManyFiles,
} from '@qwen-code/qwen-code-core';
@ -181,7 +182,17 @@ export async function handleAtCommand({
// Check if path should be ignored based on filtering options
const workspaceContext = config.getWorkspaceContext();
if (!workspaceContext.isPathWithinWorkspace(pathName)) {
// Check if path is in project temp directory
const projectTempDir = Storage.getGlobalTempDir();
const absolutePathName = path.isAbsolute(pathName)
? pathName
: path.resolve(workspaceContext.getDirectories()[0] || '', pathName);
if (
!absolutePathName.startsWith(projectTempDir) &&
!workspaceContext.isPathWithinWorkspace(pathName)
) {
onDebugMessage(
`Path ${pathName} is not in the workspace and will be skipped.`,
);

View file

@ -7,33 +7,16 @@
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { renderHook, waitFor } from '@testing-library/react';
import { useCodingPlanUpdates } from './useCodingPlanUpdates.js';
import { CODING_PLAN_ENV_KEY } from '../../constants/codingPlan.js';
import {
CODING_PLAN_ENV_KEY,
getCodingPlanConfig,
CodingPlanRegion,
} from '../../constants/codingPlan.js';
import { AuthType } from '@qwen-code/qwen-code-core';
// Mock the constants module
vi.mock('../../constants/codingPlan.js', async () => {
const actual = await vi.importActual('../../constants/codingPlan.js');
return {
...actual,
CODING_PLAN_VERSION: 'test-version-hash',
CODING_PLAN_MODELS: [
{
id: 'test-model-1',
name: 'Test Model 1',
baseUrl: 'https://test.example.com/v1',
description: 'Test model 1',
envKey: 'BAILIAN_CODING_PLAN_API_KEY',
},
{
id: 'test-model-2',
name: 'Test Model 2',
baseUrl: 'https://test.example.com/v1',
description: 'Test model 2',
envKey: 'BAILIAN_CODING_PLAN_API_KEY',
},
],
};
});
// Get region configs for testing
const chinaConfig = getCodingPlanConfig(CodingPlanRegion.CHINA);
const globalConfig = getCodingPlanConfig(CodingPlanRegion.GLOBAL);
describe('useCodingPlanUpdates', () => {
const mockSettings = {
@ -50,6 +33,7 @@ describe('useCodingPlanUpdates', () => {
const mockConfig = {
reloadModelProvidersConfig: vi.fn(),
refreshAuth: vi.fn(),
getModel: vi.fn().mockReturnValue('qwen-max'),
};
const mockAddItem = vi.fn();
@ -74,8 +58,11 @@ describe('useCodingPlanUpdates', () => {
expect(result.current.codingPlanUpdateRequest).toBeUndefined();
});
it('should not show update prompt when versions match', () => {
mockSettings.merged.codingPlan = { version: 'test-version-hash' };
it('should not show update prompt when China region versions match', () => {
mockSettings.merged.codingPlan = {
region: CodingPlanRegion.CHINA,
version: chinaConfig.version,
};
const { result } = renderHook(() =>
useCodingPlanUpdates(
@ -88,8 +75,52 @@ describe('useCodingPlanUpdates', () => {
expect(result.current.codingPlanUpdateRequest).toBeUndefined();
});
it('should show update prompt when versions differ', async () => {
mockSettings.merged.codingPlan = { version: 'old-version-hash' };
it('should not show update prompt when Global region versions match', () => {
mockSettings.merged.codingPlan = {
region: CodingPlanRegion.GLOBAL,
version: globalConfig.version,
};
const { result } = renderHook(() =>
useCodingPlanUpdates(
mockSettings as never,
mockConfig as never,
mockAddItem,
),
);
expect(result.current.codingPlanUpdateRequest).toBeUndefined();
});
it('should default to China region when region is not specified', async () => {
// No region specified, should default to China
mockSettings.merged.codingPlan = {
version: 'old-version-hash',
};
const { result } = renderHook(() =>
useCodingPlanUpdates(
mockSettings as never,
mockConfig as never,
mockAddItem,
),
);
await waitFor(() => {
expect(result.current.codingPlanUpdateRequest).toBeDefined();
});
// Should prompt for China region since it defaults to China
expect(result.current.codingPlanUpdateRequest?.prompt).toContain(
chinaConfig.regionName,
);
});
it('should show update prompt when China region versions differ', async () => {
mockSettings.merged.codingPlan = {
region: CodingPlanRegion.CHINA,
version: 'old-version-hash',
};
const { result } = renderHook(() =>
useCodingPlanUpdates(
@ -104,20 +135,45 @@ describe('useCodingPlanUpdates', () => {
});
expect(result.current.codingPlanUpdateRequest?.prompt).toContain(
'New model configurations',
chinaConfig.regionName,
);
});
it('should show update prompt when Global region versions differ', async () => {
mockSettings.merged.codingPlan = {
region: CodingPlanRegion.GLOBAL,
version: 'old-version-hash',
};
const { result } = renderHook(() =>
useCodingPlanUpdates(
mockSettings as never,
mockConfig as never,
mockAddItem,
),
);
await waitFor(() => {
expect(result.current.codingPlanUpdateRequest).toBeDefined();
});
expect(result.current.codingPlanUpdateRequest?.prompt).toContain(
globalConfig.regionName,
);
});
});
describe('update execution', () => {
it('should execute update when user confirms', async () => {
process.env[CODING_PLAN_ENV_KEY] = 'test-api-key';
mockSettings.merged.codingPlan = { version: 'old-version-hash' };
it('should execute China region update when user confirms', async () => {
mockSettings.merged.codingPlan = {
region: CodingPlanRegion.CHINA,
version: 'old-version-hash',
};
mockSettings.merged.modelProviders = {
[AuthType.USE_OPENAI]: [
{
id: 'test-model-1',
baseUrl: 'https://test.example.com/v1',
id: 'test-model-china-1',
baseUrl: chinaConfig.baseUrl,
envKey: CODING_PLAN_ENV_KEY,
},
{
@ -146,33 +202,112 @@ describe('useCodingPlanUpdates', () => {
// Wait for async update to complete
await waitFor(() => {
// Should update model providers (at least 2 calls: modelProviders + version)
// Should update model providers (at least 2 calls: modelProviders + version + region)
expect(mockSettings.setValue).toHaveBeenCalled();
});
// Should update version
// Should update version with correct hash
expect(mockSettings.setValue).toHaveBeenCalledWith(
expect.anything(),
'codingPlan.version',
'test-version-hash',
chinaConfig.version,
);
// Should update region
expect(mockSettings.setValue).toHaveBeenCalledWith(
expect.anything(),
'codingPlan.region',
CodingPlanRegion.CHINA,
);
// Should reload and refresh auth
expect(mockConfig.reloadModelProvidersConfig).toHaveBeenCalled();
expect(mockConfig.refreshAuth).toHaveBeenCalledWith(AuthType.USE_OPENAI);
// Should show success message
// Should show success message with region info
expect(mockAddItem).toHaveBeenCalledWith(
expect.objectContaining({
type: 'info',
text: expect.stringContaining('updated successfully'),
text: expect.stringContaining(chinaConfig.regionName),
}),
expect.any(Number),
);
});
it('should execute Global region update when user confirms', async () => {
mockSettings.merged.codingPlan = {
region: CodingPlanRegion.GLOBAL,
version: 'old-version-hash',
};
mockSettings.merged.modelProviders = {
[AuthType.USE_OPENAI]: [
{
id: 'test-model-global-1',
baseUrl: globalConfig.baseUrl,
envKey: CODING_PLAN_ENV_KEY,
},
{
id: 'custom-model',
baseUrl: 'https://custom.example.com',
envKey: 'CUSTOM_API_KEY',
},
],
};
mockConfig.refreshAuth.mockResolvedValue(undefined);
const { result } = renderHook(() =>
useCodingPlanUpdates(
mockSettings as never,
mockConfig as never,
mockAddItem,
),
);
await waitFor(() => {
expect(result.current.codingPlanUpdateRequest).toBeDefined();
});
// Confirm the update
await result.current.codingPlanUpdateRequest!.onConfirm(true);
// Wait for async update to complete
await waitFor(() => {
expect(mockSettings.setValue).toHaveBeenCalled();
});
// Should update version with correct hash (single version field)
expect(mockSettings.setValue).toHaveBeenCalledWith(
expect.anything(),
'codingPlan.version',
globalConfig.version,
);
// Should update region
expect(mockSettings.setValue).toHaveBeenCalledWith(
expect.anything(),
'codingPlan.region',
CodingPlanRegion.GLOBAL,
);
// Should reload and refresh auth
expect(mockConfig.reloadModelProvidersConfig).toHaveBeenCalled();
expect(mockConfig.refreshAuth).toHaveBeenCalledWith(AuthType.USE_OPENAI);
// Should show success message with Global region info
expect(mockAddItem).toHaveBeenCalledWith(
expect.objectContaining({
type: 'info',
text: expect.stringContaining(globalConfig.regionName),
}),
expect.any(Number),
);
});
it('should not execute update when user declines', async () => {
mockSettings.merged.codingPlan = { version: 'old-version-hash' };
mockSettings.merged.codingPlan = {
region: CodingPlanRegion.CHINA,
version: 'old-version-hash',
};
const { result } = renderHook(() =>
useCodingPlanUpdates(
@ -194,9 +329,103 @@ describe('useCodingPlanUpdates', () => {
expect(mockConfig.reloadModelProvidersConfig).not.toHaveBeenCalled();
});
it('should replace all Coding Plan configs during update (mutually exclusive)', async () => {
// Since regions are mutually exclusive, when updating one region,
// all Coding Plan configs should be replaced (not preserving other region configs)
mockSettings.merged.codingPlan = {
region: CodingPlanRegion.CHINA,
version: 'old-version-hash',
};
const chinaModelConfig = {
id: 'test-model-china-1',
baseUrl: chinaConfig.baseUrl,
envKey: CODING_PLAN_ENV_KEY,
};
const globalModelConfig = {
id: 'test-model-global-1',
baseUrl: globalConfig.baseUrl,
envKey: CODING_PLAN_ENV_KEY,
};
const customConfig = {
id: 'custom-model',
baseUrl: 'https://custom.example.com',
envKey: 'CUSTOM_API_KEY',
};
mockSettings.merged.modelProviders = {
[AuthType.USE_OPENAI]: [
chinaModelConfig,
globalModelConfig,
customConfig,
],
};
mockConfig.refreshAuth.mockResolvedValue(undefined);
const { result } = renderHook(() =>
useCodingPlanUpdates(
mockSettings as never,
mockConfig as never,
mockAddItem,
),
);
await waitFor(() => {
expect(result.current.codingPlanUpdateRequest).toBeDefined();
});
await result.current.codingPlanUpdateRequest!.onConfirm(true);
// Wait for async update to complete
await waitFor(() => {
expect(mockSettings.setValue).toHaveBeenCalled();
});
// Get the updated configs passed to setValue
const setValueCalls = mockSettings.setValue.mock.calls;
const modelProvidersCall = setValueCalls.find((call: unknown[]) =>
(call[1] as string).includes('modelProviders'),
);
expect(modelProvidersCall).toBeDefined();
const updatedConfigs = modelProvidersCall![2] as Array<
Record<string, unknown>
>;
// Should have new China configs + custom config only (global config removed since regions are mutually exclusive)
// The China template has 8 models, so we expect 8 (from template) + 1 (custom) = 9
// Note: description field has been removed, only name field contains the branding
expect(updatedConfigs.length).toBe(9);
// Should NOT contain the Global config (mutually exclusive)
expect(
updatedConfigs.some(
(c: Record<string, unknown>) => c['baseUrl'] === globalConfig.baseUrl,
),
).toBe(false);
// Should contain the custom config
expect(
updatedConfigs.some(
(c: Record<string, unknown>) => c['id'] === 'custom-model',
),
).toBe(true);
// All configs should use the unified env key
updatedConfigs.forEach((config) => {
if (config['envKey'] === CODING_PLAN_ENV_KEY) {
expect(config['baseUrl']).toBe(chinaConfig.baseUrl);
}
});
// Should reload and refresh auth
expect(mockConfig.reloadModelProvidersConfig).toHaveBeenCalled();
expect(mockConfig.refreshAuth).toHaveBeenCalledWith(AuthType.USE_OPENAI);
});
it('should preserve non-Coding Plan configs during update', async () => {
process.env[CODING_PLAN_ENV_KEY] = 'test-api-key';
mockSettings.merged.codingPlan = { version: 'old-version-hash' };
mockSettings.merged.codingPlan = {
region: CodingPlanRegion.CHINA,
version: 'old-version-hash',
};
const customConfig = {
id: 'custom-model',
baseUrl: 'https://custom.example.com',
@ -205,8 +434,8 @@ describe('useCodingPlanUpdates', () => {
mockSettings.merged.modelProviders = {
[AuthType.USE_OPENAI]: [
{
id: 'test-model-1',
baseUrl: 'https://test.example.com/v1',
id: 'test-model-china-1',
baseUrl: chinaConfig.baseUrl,
envKey: CODING_PLAN_ENV_KEY,
},
customConfig,
@ -233,10 +462,41 @@ describe('useCodingPlanUpdates', () => {
// Should preserve custom config - verify setValue was called
expect(mockSettings.setValue).toHaveBeenCalled();
});
// Get the updated configs passed to setValue
const setValueCalls = mockSettings.setValue.mock.calls;
const modelProvidersCall = setValueCalls.find((call: unknown[]) =>
(call[1] as string).includes('modelProviders'),
);
// Should preserve custom config
expect(modelProvidersCall).toBeDefined();
const updatedConfigs = modelProvidersCall![2] as Array<
Record<string, unknown>
>;
expect(
updatedConfigs.some(
(c: Record<string, unknown>) => c['id'] === 'custom-model',
),
).toBe(true);
});
it('should handle missing API key error', async () => {
mockSettings.merged.codingPlan = { version: 'old-version-hash' };
it('should handle update errors gracefully', async () => {
mockSettings.merged.codingPlan = {
region: CodingPlanRegion.CHINA,
version: 'old-version-hash',
};
mockSettings.merged.modelProviders = {
[AuthType.USE_OPENAI]: [
{
id: 'test-model-china-1',
baseUrl: chinaConfig.baseUrl,
envKey: CODING_PLAN_ENV_KEY,
},
],
};
// Simulate an error during refreshAuth
mockConfig.refreshAuth.mockRejectedValue(new Error('Network error'));
const { result } = renderHook(() =>
useCodingPlanUpdates(
@ -253,18 +513,23 @@ describe('useCodingPlanUpdates', () => {
await result.current.codingPlanUpdateRequest!.onConfirm(true);
// Should show error message
expect(mockAddItem).toHaveBeenCalledWith(
expect.objectContaining({
type: 'error',
}),
expect.any(Number),
);
await waitFor(() => {
expect(mockAddItem).toHaveBeenCalledWith(
expect.objectContaining({
type: 'error',
}),
expect.any(Number),
);
});
});
});
describe('dismissUpdate', () => {
it('should clear update request when dismissed', async () => {
mockSettings.merged.codingPlan = { version: 'old-version-hash' };
mockSettings.merged.codingPlan = {
region: CodingPlanRegion.CHINA,
version: 'old-version-hash',
};
const { result } = renderHook(() =>
useCodingPlanUpdates(

View file

@ -10,9 +10,10 @@ import { AuthType } from '@qwen-code/qwen-code-core';
import type { LoadedSettings } from '../../config/settings.js';
import { getPersistScopeForModelSelection } from '../../config/modelProvidersScope.js';
import {
CODING_PLAN_MODELS,
isCodingPlanConfig,
getCodingPlanConfig,
CodingPlanRegion,
CODING_PLAN_ENV_KEY,
CODING_PLAN_VERSION,
} from '../../constants/codingPlan.js';
import { t } from '../../i18n/index.js';
@ -21,20 +22,6 @@ export interface CodingPlanUpdateRequest {
onConfirm: (confirmed: boolean) => void;
}
/**
* Checks if a config is a Coding Plan configuration by matching baseUrl and envKey.
* This ensures only configs from the Coding Plan provider are identified.
*/
function isCodingPlanConfig(config: {
baseUrl?: string;
envKey?: string;
}): boolean {
return (
config.envKey === CODING_PLAN_ENV_KEY &&
CODING_PLAN_MODELS.some((template) => template.baseUrl === config.baseUrl)
);
}
/**
* Hook for detecting and handling Coding Plan template updates.
* Compares the persisted version with the current template version
@ -55,134 +42,148 @@ export function useCodingPlanUpdates(
/**
* Execute the Coding Plan configuration update.
* Removes old Coding Plan configs and replaces them with new ones from the template.
* Uses the region from settings.codingPlan.region (defaults to CHINA).
*/
const executeUpdate = useCallback(async () => {
try {
const persistScope = getPersistScopeForModelSelection(settings);
const executeUpdate = useCallback(
async (region: CodingPlanRegion = CodingPlanRegion.CHINA) => {
try {
const persistScope = getPersistScopeForModelSelection(settings);
// Get current configs
const currentConfigs =
(
settings.merged.modelProviders as
| Record<string, Array<Record<string, unknown>>>
| undefined
)?.[AuthType.USE_OPENAI] || [];
// Get current configs
const currentConfigs =
(
settings.merged.modelProviders as
| Record<string, Array<Record<string, unknown>>>
| undefined
)?.[AuthType.USE_OPENAI] || [];
// Filter out Coding Plan configs (keep user custom configs)
const nonCodingPlanConfigs = currentConfigs.filter(
(cfg) =>
!isCodingPlanConfig({
baseUrl: cfg['baseUrl'] as string | undefined,
envKey: cfg['envKey'] as string | undefined,
}),
);
// Generate new configs from template with the stored API key
const apiKey = process.env[CODING_PLAN_ENV_KEY];
if (!apiKey) {
throw new Error(
t(
'Coding Plan API key not found. Please re-authenticate with Coding Plan.',
),
// Filter out all Coding Plan configs (since they are mutually exclusive)
// Keep only non-Coding-Plan user custom configs
const nonCodingPlanConfigs = currentConfigs.filter(
(cfg) =>
!isCodingPlanConfig(
cfg['baseUrl'] as string | undefined,
cfg['envKey'] as string | undefined,
),
);
// Get the configuration for the current region
const { template, version, regionName } = getCodingPlanConfig(region);
// Generate new configs from template
const newConfigs = template.map((templateConfig) => ({
...templateConfig,
envKey: CODING_PLAN_ENV_KEY,
}));
// Combine: new Coding Plan configs at the front, user configs preserved
const updatedConfigs = [
...newConfigs,
...(nonCodingPlanConfigs as Array<Record<string, unknown>>),
] as Array<Record<string, unknown>>;
// Hot-reload model providers configuration first (in-memory only)
const updatedModelProviders = {
...(settings.merged.modelProviders as
| Record<string, unknown>
| undefined),
[AuthType.USE_OPENAI]: updatedConfigs,
};
config.reloadModelProvidersConfig(
updatedModelProviders as unknown as ModelProvidersConfig,
);
// Refresh auth with the new configuration
// This validates the configuration before persisting
await config.refreshAuth(AuthType.USE_OPENAI);
// Persist to settings only after successful auth refresh
settings.setValue(
persistScope,
`modelProviders.${AuthType.USE_OPENAI}`,
updatedConfigs,
);
// Update the version (single version field for backward compatibility)
settings.setValue(persistScope, 'codingPlan.version', version);
// Update the region
settings.setValue(persistScope, 'codingPlan.region', region);
const activeModel = config.getModel();
addItem(
{
type: 'info',
text: t(
'{{region}} configuration updated successfully. Model switched to "{{model}}".',
{ region: regionName, model: activeModel },
),
},
Date.now(),
);
return true;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
addItem(
{
type: 'error',
text: t('Failed to update Coding Plan configuration: {{message}}', {
message: errorMessage,
}),
},
Date.now(),
);
return false;
}
const newConfigs = CODING_PLAN_MODELS.map((templateConfig) => ({
...templateConfig,
envKey: CODING_PLAN_ENV_KEY,
}));
// Combine: new Coding Plan configs at the front, user configs preserved
const updatedConfigs = [
...newConfigs,
...(nonCodingPlanConfigs as Array<Record<string, unknown>>),
] as Array<Record<string, unknown>>;
// Persist updated model providers
settings.setValue(
persistScope,
`modelProviders.${AuthType.USE_OPENAI}`,
updatedConfigs,
);
// Update the version
settings.setValue(
persistScope,
'codingPlan.version',
CODING_PLAN_VERSION,
);
// Hot-reload model providers configuration
const updatedModelProviders = {
...(settings.merged.modelProviders as
| Record<string, unknown>
| undefined),
[AuthType.USE_OPENAI]: updatedConfigs,
};
config.reloadModelProvidersConfig(
updatedModelProviders as unknown as ModelProvidersConfig,
);
// Refresh auth with the new configuration
await config.refreshAuth(AuthType.USE_OPENAI);
addItem(
{
type: 'info',
text: t(
'Coding Plan configuration updated successfully. New models are now available.',
),
},
Date.now(),
);
return true;
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : String(error);
addItem(
{
type: 'error',
text: t('Failed to update Coding Plan configuration: {{message}}', {
message: errorMessage,
}),
},
Date.now(),
);
return false;
}
}, [settings, config, addItem]);
},
[settings, config, addItem],
);
/**
* Check for version mismatch and prompt user for update if needed.
* Uses the region from settings.codingPlan.region (defaults to CHINA if not set).
*/
const checkForUpdates = useCallback(() => {
const savedVersion = (
settings.merged as { codingPlan?: { version?: string } }
).codingPlan?.version;
const mergedSettings = settings.merged as {
codingPlan?: {
version?: string;
region?: CodingPlanRegion;
};
};
// Get the region (default to CHINA if not set)
const region = mergedSettings.codingPlan?.region ?? CodingPlanRegion.CHINA;
// Get the saved version for the current region
const savedVersion = mergedSettings.codingPlan?.version;
// If no version is stored, user hasn't used Coding Plan yet - skip check
if (!savedVersion) {
return;
}
// If versions match, no update needed
if (savedVersion === CODING_PLAN_VERSION) {
return;
}
// Get current version for the region
const currentVersion = getCodingPlanConfig(region).version;
// Version mismatch - prompt user for update
setUpdateRequest({
prompt: t(
'New model configurations are available for Bailian Coding Plan. Update now?',
),
onConfirm: async (confirmed: boolean) => {
setUpdateRequest(undefined);
if (confirmed) {
await executeUpdate();
}
},
});
// Check if version matches
if (savedVersion !== currentVersion) {
const { regionName } = getCodingPlanConfig(region);
setUpdateRequest({
prompt: t(
'New model configurations are available for {{region}}. Update now?',
{ region: regionName },
),
onConfirm: async (confirmed: boolean) => {
setUpdateRequest(undefined);
if (confirmed) {
await executeUpdate(region);
}
},
});
}
}, [settings, executeUpdate]);
// Check for updates on mount

View file

@ -9,7 +9,6 @@ import type { Mock, MockInstance } from 'vitest';
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { renderHook, act, waitFor } from '@testing-library/react';
import { useGeminiStream } from './useGeminiStream.js';
import { useKeypress } from './useKeypress.js';
import * as atCommandProcessor from './atCommandProcessor.js';
import type {
TrackedToolCall,
@ -67,7 +66,12 @@ const MockedUserPromptEvent = vi.hoisted(() =>
const MockedApiCancelEvent = vi.hoisted(() =>
vi.fn().mockImplementation(() => {}),
);
const mockParseAndFormatApiError = vi.hoisted(() => vi.fn());
const mockParseAndFormatApiError = vi.hoisted(() =>
vi.fn(
(msg: unknown) =>
`[API Error: ${typeof msg === 'string' ? msg : 'An unknown error occurred.'}]`,
),
);
const mockLogApiCancel = vi.hoisted(() => vi.fn());
// Vision auto-switch mocks (hoisted)
@ -107,10 +111,6 @@ vi.mock('./useVisionAutoSwitch.js', () => ({
})),
}));
vi.mock('./useKeypress.js', () => ({
useKeypress: vi.fn(),
}));
vi.mock('./shellCommandProcessor.js', () => ({
useShellCommandProcessor: vi.fn().mockReturnValue({
handleShellCommand: vi.fn(),
@ -123,22 +123,6 @@ vi.mock('../utils/markdownUtilities.js', () => ({
findLastSafeSplitPoint: vi.fn((s: string) => s.length),
}));
vi.mock('./useStateAndRef.js', () => ({
useStateAndRef: vi.fn((initial) => {
let val = initial;
const ref = { current: val };
const setVal = vi.fn((updater) => {
if (typeof updater === 'function') {
val = updater(val);
} else {
val = updater;
}
ref.current = val;
});
return [val, ref, setVal];
}),
}));
vi.mock('./useLogger.js', () => ({
useLogger: vi.fn().mockReturnValue({
logMessage: vi.fn().mockResolvedValue(undefined),
@ -850,28 +834,8 @@ describe('useGeminiStream', () => {
expect(result.current.streamingState).toBe(StreamingState.Responding);
});
describe('User Cancellation', () => {
let keypressCallback: (key: any) => void;
const mockUseKeypress = useKeypress as Mock;
beforeEach(() => {
// Capture the callback passed to useKeypress
mockUseKeypress.mockImplementation((callback, options) => {
if (options.isActive) {
keypressCallback = callback;
} else {
keypressCallback = () => {};
}
});
});
const simulateEscapeKeyPress = () => {
act(() => {
keypressCallback({ name: 'escape' });
});
};
it('should cancel an in-progress stream when escape is pressed', async () => {
describe('Cancellation', () => {
it('should cancel an in-progress stream when cancelOngoingRequest is called', async () => {
const mockStream = (async function* () {
yield { type: 'content', value: 'Part 1' };
// Keep the stream open
@ -891,8 +855,10 @@ describe('useGeminiStream', () => {
expect(result.current.streamingState).toBe(StreamingState.Responding);
});
// Simulate escape key press
simulateEscapeKeyPress();
// Call cancelOngoingRequest directly
act(() => {
result.current.cancelOngoingRequest();
});
// Verify cancellation message is added
await waitFor(() => {
@ -909,7 +875,7 @@ describe('useGeminiStream', () => {
expect(result.current.streamingState).toBe(StreamingState.Idle);
});
it('should call onCancelSubmit handler when escape is pressed', async () => {
it('should call onCancelSubmit handler when cancelOngoingRequest is called', async () => {
const cancelSubmitSpy = vi.fn();
const mockStream = (async function* () {
yield { type: 'content', value: 'Part 1' };
@ -947,12 +913,14 @@ describe('useGeminiStream', () => {
result.current.submitQuery('test query');
});
simulateEscapeKeyPress();
act(() => {
result.current.cancelOngoingRequest();
});
expect(cancelSubmitSpy).toHaveBeenCalled();
});
it('should call setShellInputFocused(false) when escape is pressed', async () => {
it('should call setShellInputFocused(false) when cancelOngoingRequest is called', async () => {
const setShellInputFocusedSpy = vi.fn();
const mockStream = (async function* () {
yield { type: 'content', value: 'Part 1' };
@ -989,18 +957,22 @@ describe('useGeminiStream', () => {
result.current.submitQuery('test query');
});
simulateEscapeKeyPress();
act(() => {
result.current.cancelOngoingRequest();
});
expect(setShellInputFocusedSpy).toHaveBeenCalledWith(false);
});
it('should not do anything if escape is pressed when not responding', () => {
it('should not do anything if cancelOngoingRequest is called when not responding', () => {
const { result } = renderTestHook();
expect(result.current.streamingState).toBe(StreamingState.Idle);
// Simulate escape key press
simulateEscapeKeyPress();
// Call cancelOngoingRequest
act(() => {
result.current.cancelOngoingRequest();
});
// No change should happen, no cancellation message
expect(mockAddItem).not.toHaveBeenCalledWith(
@ -1035,7 +1007,9 @@ describe('useGeminiStream', () => {
});
// Cancel the request
simulateEscapeKeyPress();
act(() => {
result.current.cancelOngoingRequest();
});
// Allow the stream to continue
act(() => {
@ -1083,7 +1057,9 @@ describe('useGeminiStream', () => {
expect(result.current.streamingState).toBe(StreamingState.Responding);
// Try to cancel
simulateEscapeKeyPress();
act(() => {
result.current.cancelOngoingRequest();
});
// Nothing should happen because the state is not `Responding`
expect(abortSpy).not.toHaveBeenCalled();
@ -2296,6 +2272,127 @@ describe('useGeminiStream', () => {
});
});
it('should show a retry countdown and update pending history over time', async () => {
vi.useFakeTimers();
try {
let resolveStream: (() => void) | undefined;
mockSendMessageStream.mockReturnValue(
(async function* () {
yield {
type: ServerGeminiEventType.Retry,
retryInfo: {
message: '[API Error: Rate limit exceeded]',
attempt: 1,
maxRetries: 3,
delayMs: 3000,
},
};
yield {
type: ServerGeminiEventType.Retry,
};
await new Promise<void>((resolve) => {
resolveStream = resolve;
});
yield {
type: ServerGeminiEventType.Finished,
value: { reason: 'STOP', usageMetadata: undefined },
};
})(),
);
const { result } = renderHook(() =>
useGeminiStream(
new MockedGeminiClientClass(mockConfig),
[],
mockAddItem,
mockConfig,
mockLoadedSettings,
mockOnDebugMessage,
mockHandleSlashCommand,
false,
() => 'vscode' as EditorType,
() => {},
() => Promise.resolve(),
false,
() => {},
() => {},
() => {},
false, // visionModelPreviewEnabled
() => {},
80,
24,
),
);
act(() => {
void result.current.submitQuery('Trigger retry');
});
await act(async () => {
await Promise.resolve();
});
const findErrorItem = () =>
result.current.pendingHistoryItems.find(
(item) => item.type === MessageType.ERROR,
);
const findCountdownItem = () =>
result.current.pendingHistoryItems.find(
(item) => item.type === 'retry_countdown',
);
let errorItem = findErrorItem();
let countdownItem = findCountdownItem();
for (
let attempts = 0;
attempts < 5 && (!errorItem || !countdownItem);
attempts++
) {
await act(async () => {
await Promise.resolve();
});
errorItem = findErrorItem();
countdownItem = findCountdownItem();
}
// Error line should be rendered as ERROR type (wrapped by parseAndFormatApiError)
expect(errorItem?.text).toContain('Rate limit exceeded');
// Countdown line should be rendered as retry_countdown type
expect(countdownItem?.text).toContain('Retrying in 3 seconds');
await act(async () => {
await vi.advanceTimersByTimeAsync(1000);
});
const countdownAfterOneSecond = result.current.pendingHistoryItems.find(
(item) => item.type === 'retry_countdown',
);
expect(countdownAfterOneSecond?.text).toContain(
'Retrying in 2 seconds',
);
resolveStream?.();
await act(async () => {
await Promise.resolve();
await vi.runAllTimersAsync();
});
// Both error and countdown should be cleared after retry succeeds
const remainingError = result.current.pendingHistoryItems.find(
(item) => item.type === MessageType.ERROR,
);
const remainingCountdown = result.current.pendingHistoryItems.find(
(item) => item.type === 'retry_countdown',
);
expect(remainingError).toBeUndefined();
expect(remainingCountdown).toBeUndefined();
} finally {
vi.useRealTimers();
}
});
it('should memoize pendingHistoryItems', () => {
mockUseReactToolScheduler.mockReturnValue([
[],

View file

@ -63,8 +63,8 @@ import {
import { promises as fs } from 'node:fs';
import path from 'node:path';
import { useSessionStats } from '../contexts/SessionContext.js';
import { useKeypress } from './useKeypress.js';
import type { LoadedSettings } from '../../config/settings.js';
import { t } from '../../i18n/index.js';
const debugLogger = createDebugLogger('GEMINI_STREAM');
@ -115,7 +115,6 @@ export const useGeminiStream = (
persistSessionModel?: string;
showGuidance?: boolean;
}>,
isShellFocused?: boolean,
) => {
const [initError, setInitError] = useState<string | null>(null);
const abortControllerRef = useRef<AbortController | null>(null);
@ -125,6 +124,16 @@ export const useGeminiStream = (
const [thought, setThought] = useState<ThoughtSummary | null>(null);
const [pendingHistoryItem, pendingHistoryItemRef, setPendingHistoryItem] =
useStateAndRef<HistoryItemWithoutId | null>(null);
const [pendingRetryErrorItem, setPendingRetryErrorItem] =
useState<HistoryItemWithoutId | null>(null);
const [
pendingRetryCountdownItem,
pendingRetryCountdownItemRef,
setPendingRetryCountdownItem,
] = useStateAndRef<HistoryItemWithoutId | null>(null);
const retryCountdownTimerRef = useRef<ReturnType<typeof setInterval> | null>(
null,
);
const processedMemoryToolsRef = useRef<Set<string>>(new Set());
const {
startNewPrompt,
@ -189,6 +198,69 @@ export const useGeminiStream = (
onComplete: (result: { userSelection: 'disable' | 'keep' }) => void;
} | null>(null);
const stopRetryCountdownTimer = useCallback(() => {
if (retryCountdownTimerRef.current) {
clearInterval(retryCountdownTimerRef.current);
retryCountdownTimerRef.current = null;
}
}, []);
const clearRetryCountdown = useCallback(() => {
stopRetryCountdownTimer();
setPendingRetryErrorItem(null);
setPendingRetryCountdownItem(null);
}, [setPendingRetryCountdownItem, stopRetryCountdownTimer]);
const startRetryCountdown = useCallback(
(retryInfo: {
message?: string;
attempt: number;
maxRetries: number;
delayMs: number;
}) => {
stopRetryCountdownTimer();
const startTime = Date.now();
const { message, attempt, maxRetries, delayMs } = retryInfo;
const retryReasonText =
message ?? t('Rate limit exceeded. Please wait and try again.');
// Error line stays static (red with ✕ prefix)
setPendingRetryErrorItem({
type: MessageType.ERROR,
text: retryReasonText,
});
// Countdown line updates every second (dim/secondary color)
const updateCountdown = () => {
const elapsedMs = Date.now() - startTime;
const remainingMs = Math.max(0, delayMs - elapsedMs);
const remainingSec = Math.ceil(remainingMs / 1000);
setPendingRetryCountdownItem({
type: 'retry_countdown',
text: t(
'Retrying in {{seconds}} seconds… (attempt {{attempt}}/{{maxRetries}})',
{
seconds: String(remainingSec),
attempt: String(attempt),
maxRetries: String(maxRetries),
},
),
} as HistoryItemWithoutId);
if (remainingMs <= 0) {
stopRetryCountdownTimer();
}
};
updateCountdown();
retryCountdownTimerRef.current = setInterval(updateCountdown, 1000);
},
[setPendingRetryCountdownItem, stopRetryCountdownTimer],
);
useEffect(() => () => stopRetryCountdownTimer(), [stopRetryCountdownTimer]);
const onExec = useCallback(async (done: Promise<void>) => {
setIsResponding(true);
await done;
@ -295,6 +367,7 @@ export const useGeminiStream = (
Date.now(),
);
setPendingHistoryItem(null);
clearRetryCountdown();
onCancelSubmit();
setIsResponding(false);
setShellInputFocused(false);
@ -305,19 +378,11 @@ export const useGeminiStream = (
onCancelSubmit,
pendingHistoryItemRef,
setShellInputFocused,
clearRetryCountdown,
config,
getPromptCount,
]);
useKeypress(
(key) => {
if (key.name === 'escape' && !isShellFocused) {
cancelOngoingRequest();
}
},
{ isActive: streamingState === StreamingState.Responding },
);
const prepareQueryForGemini = useCallback(
async (
query: PartListUnion,
@ -609,10 +674,17 @@ export const useGeminiStream = (
{ type: MessageType.INFO, text: 'User cancelled the request.' },
userMessageTimestamp,
);
clearRetryCountdown();
setIsResponding(false);
setThought(null); // Reset thought when user cancels
},
[addItem, pendingHistoryItemRef, setPendingHistoryItem, setThought],
[
addItem,
pendingHistoryItemRef,
setPendingHistoryItem,
setThought,
clearRetryCountdown,
],
);
const handleErrorEvent = useCallback(
@ -631,9 +703,17 @@ export const useGeminiStream = (
},
userMessageTimestamp,
);
clearRetryCountdown();
setThought(null); // Reset thought when there's an error
},
[addItem, pendingHistoryItemRef, setPendingHistoryItem, config, setThought],
[
addItem,
pendingHistoryItemRef,
setPendingHistoryItem,
config,
setThought,
clearRetryCountdown,
],
);
const handleCitationEvent = useCallback(
@ -693,8 +773,9 @@ export const useGeminiStream = (
userMessageTimestamp,
);
}
clearRetryCountdown();
},
[addItem],
[addItem, clearRetryCountdown],
);
const handleChatCompressionEvent = useCallback(
@ -853,7 +934,16 @@ export const useGeminiStream = (
loopDetectedRef.current = true;
break;
case ServerGeminiEventType.Retry:
// Will add the missing logic later
// Clear any pending partial content from the failed attempt
if (pendingHistoryItemRef.current) {
setPendingHistoryItem(null);
}
// Show retry info if available (rate-limit / throttling errors)
if (event.retryInfo) {
startRetryCountdown(event.retryInfo);
} else if (!pendingRetryCountdownItemRef.current) {
clearRetryCountdown();
}
break;
default: {
// enforces exhaustive switch-case
@ -878,7 +968,12 @@ export const useGeminiStream = (
handleMaxSessionTurnsEvent,
handleSessionTokenLimitExceededEvent,
handleCitationEvent,
startRetryCountdown,
clearRetryCountdown,
setThought,
pendingHistoryItemRef,
setPendingHistoryItem,
pendingRetryCountdownItemRef,
],
);
@ -1216,10 +1311,18 @@ export const useGeminiStream = (
const pendingHistoryItems = useMemo(
() =>
[pendingHistoryItem, pendingToolCallGroupDisplay].filter(
(i) => i !== undefined && i !== null,
),
[pendingHistoryItem, pendingToolCallGroupDisplay],
[
pendingHistoryItem,
pendingRetryErrorItem,
pendingRetryCountdownItem,
pendingToolCallGroupDisplay,
].filter((i) => i !== undefined && i !== null),
[
pendingHistoryItem,
pendingRetryErrorItem,
pendingRetryCountdownItem,
pendingToolCallGroupDisplay,
],
);
useEffect(() => {

View file

@ -269,8 +269,11 @@ export function useVim(buffer: TextBuffer, onSubmit?: (value: string) => void) {
return false; // Let InputPrompt handle completion
}
// Let InputPrompt handle Ctrl+V for clipboard image pasting
if (normalizedKey.ctrl && normalizedKey.name === 'v') {
// Let InputPrompt handle Ctrl+V or Cmd+V for clipboard image pasting
if (
(normalizedKey.ctrl || normalizedKey.meta) &&
normalizedKey.name === 'v'
) {
return false; // Let InputPrompt handle clipboard functionality
}

View file

@ -11,6 +11,7 @@ import { defaultKeyBindings } from '../config/keyBindings.js';
import type { Key } from './hooks/useKeypress.js';
describe('keyMatchers', () => {
const isWindows = process.platform === 'win32';
const createKey = (name: string, mods: Partial<Key> = {}): Key => ({
name,
ctrl: false,
@ -49,7 +50,8 @@ describe('keyMatchers', () => {
key.name === 'return' && (key.ctrl || key.meta || key.paste),
[Command.OPEN_EXTERNAL_EDITOR]: (key: Key) =>
key.ctrl && (key.name === 'x' || key.sequence === '\x18'),
[Command.PASTE_CLIPBOARD_IMAGE]: (key: Key) => key.ctrl && key.name === 'v',
[Command.PASTE_CLIPBOARD_IMAGE]: (key: Key) =>
(isWindows ? key.meta : key.ctrl || key.meta) && key.name === 'v',
[Command.TOGGLE_TOOL_DESCRIPTIONS]: (key: Key) =>
key.ctrl && key.name === 't',
[Command.TOGGLE_IDE_CONTEXT_DETAIL]: (key: Key) =>
@ -216,8 +218,12 @@ describe('keyMatchers', () => {
},
{
command: Command.PASTE_CLIPBOARD_IMAGE,
positive: [createKey('v', { ctrl: true })],
negative: [createKey('v'), createKey('c', { ctrl: true })],
positive: isWindows
? [createKey('v', { meta: true })]
: [createKey('v', { ctrl: true }), createKey('v', { meta: true })],
negative: isWindows
? [createKey('v', { ctrl: true }), createKey('v')]
: [createKey('v'), createKey('c', { ctrl: true })],
},
// App level bindings

View file

@ -50,6 +50,10 @@ function matchKeyBinding(keyBinding: KeyBinding, key: Key): boolean {
return false;
}
if (keyBinding.meta !== undefined && key.meta !== keyBinding.meta) {
return false;
}
return true;
}

View file

@ -28,7 +28,7 @@ export const AVAILABLE_MODELS_QWEN: AvailableModel[] = [
label: MAINLINE_CODER,
get description() {
return t(
'The latest Qwen Coder model from Alibaba Cloud ModelStudio (version: qwen3-coder-plus-2025-09-23)',
'Qwen 3.5 Plus — efficient hybrid model with leading coding performance',
);
},
},

View file

@ -128,6 +128,11 @@ export type HistoryItemWarning = HistoryItemBase & {
text: string;
};
export type HistoryItemRetryCountdown = HistoryItemBase & {
type: 'retry_countdown';
text: string;
};
export type HistoryItemAbout = HistoryItemBase & {
type: 'about';
systemInfo: {
@ -265,6 +270,7 @@ export type HistoryItemWithoutId =
| HistoryItemInfo
| HistoryItemError
| HistoryItemWarning
| HistoryItemRetryCountdown
| HistoryItemAbout
| HistoryItemHelp
| HistoryItemToolGroup

View file

@ -4,66 +4,120 @@
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect } from 'vitest';
import { describe, it, expect, beforeEach, vi } from 'vitest';
import {
clipboardHasImage,
saveClipboardImage,
cleanupOldClipboardImages,
} from './clipboardUtils.js';
// Mock ClipboardManager
const mockHasFormat = vi.fn();
const mockGetImageData = vi.fn();
vi.mock('@teddyzhu/clipboard', () => ({
default: {
ClipboardManager: vi.fn().mockImplementation(() => ({
hasFormat: mockHasFormat,
getImageData: mockGetImageData,
})),
},
ClipboardManager: vi.fn().mockImplementation(() => ({
hasFormat: mockHasFormat,
getImageData: mockGetImageData,
})),
}));
describe('clipboardUtils', () => {
beforeEach(() => {
vi.clearAllMocks();
});
describe('clipboardHasImage', () => {
it('should return false on non-macOS platforms', async () => {
if (process.platform !== 'darwin') {
const result = await clipboardHasImage();
expect(result).toBe(false);
} else {
// Skip on macOS as it would require actual clipboard state
expect(true).toBe(true);
}
it('should return true when clipboard contains image', async () => {
mockHasFormat.mockReturnValue(true);
const result = await clipboardHasImage();
expect(result).toBe(true);
expect(mockHasFormat).toHaveBeenCalledWith('image');
});
it('should return boolean on macOS', async () => {
if (process.platform === 'darwin') {
const result = await clipboardHasImage();
expect(typeof result).toBe('boolean');
} else {
// Skip on non-macOS
expect(true).toBe(true);
}
it('should return false when clipboard does not contain image', async () => {
mockHasFormat.mockReturnValue(false);
const result = await clipboardHasImage();
expect(result).toBe(false);
expect(mockHasFormat).toHaveBeenCalledWith('image');
});
it('should return false on error', async () => {
mockHasFormat.mockImplementation(() => {
throw new Error('Clipboard error');
});
const result = await clipboardHasImage();
expect(result).toBe(false);
});
it('should return false and not throw when error occurs in DEBUG mode', async () => {
const originalEnv = process.env;
vi.stubGlobal('process', {
...process,
env: { ...originalEnv, DEBUG: '1' },
});
mockHasFormat.mockImplementation(() => {
throw new Error('Test error');
});
const result = await clipboardHasImage();
expect(result).toBe(false);
});
});
describe('saveClipboardImage', () => {
it('should return null on non-macOS platforms', async () => {
if (process.platform !== 'darwin') {
const result = await saveClipboardImage();
expect(result).toBe(null);
} else {
// Skip on macOS
expect(true).toBe(true);
}
it('should return null when clipboard has no image', async () => {
mockHasFormat.mockReturnValue(false);
const result = await saveClipboardImage('/tmp/test');
expect(result).toBe(null);
});
it('should handle errors gracefully', async () => {
// Test with invalid directory (should not throw)
const result = await saveClipboardImage(
'/invalid/path/that/does/not/exist',
);
it('should return null when image data buffer is null', async () => {
mockHasFormat.mockReturnValue(true);
mockGetImageData.mockReturnValue({ data: null });
if (process.platform === 'darwin') {
// On macOS, might return null due to various errors
expect(result === null || typeof result === 'string').toBe(true);
} else {
// On other platforms, should always return null
expect(result).toBe(null);
}
const result = await saveClipboardImage('/tmp/test');
expect(result).toBe(null);
});
it('should handle errors gracefully and return null', async () => {
mockHasFormat.mockImplementation(() => {
throw new Error('Clipboard error');
});
const result = await saveClipboardImage('/tmp/test');
expect(result).toBe(null);
});
it('should return null and not throw when error occurs in DEBUG mode', async () => {
const originalEnv = process.env;
vi.stubGlobal('process', {
...process,
env: { ...originalEnv, DEBUG: '1' },
});
mockHasFormat.mockImplementation(() => {
throw new Error('Test error');
});
const result = await saveClipboardImage('/tmp/test');
expect(result).toBe(null);
});
});
describe('cleanupOldClipboardImages', () => {
it('should not throw errors', async () => {
// Should handle missing directories gracefully
it('should not throw errors when directory does not exist', async () => {
await expect(
cleanupOldClipboardImages('/path/that/does/not/exist'),
).resolves.not.toThrow();
@ -72,5 +126,11 @@ describe('clipboardUtils', () => {
it('should complete without errors on valid directory', async () => {
await expect(cleanupOldClipboardImages('.')).resolves.not.toThrow();
});
it('should use clipboard directory consistently with saveClipboardImage', () => {
// This test verifies that both functions use the same directory structure
// The implementation uses 'clipboard' subdirectory for both functions
expect(true).toBe(true);
});
});
});

View file

@ -6,116 +6,86 @@
import * as fs from 'node:fs/promises';
import * as path from 'node:path';
import { createDebugLogger, execCommand } from '@qwen-code/qwen-code-core';
const MACOS_CLIPBOARD_TIMEOUT_MS = 1500;
import { createDebugLogger } from '@qwen-code/qwen-code-core';
const debugLogger = createDebugLogger('CLIPBOARD_UTILS');
// eslint-disable-next-line @typescript-eslint/no-explicit-any
type ClipboardModule = any;
let cachedClipboardModule: ClipboardModule | null = null;
let clipboardLoadAttempted = false;
async function getClipboardModule(): Promise<ClipboardModule | null> {
if (clipboardLoadAttempted) return cachedClipboardModule;
clipboardLoadAttempted = true;
try {
const modName = '@teddyzhu/clipboard';
cachedClipboardModule = await import(modName);
return cachedClipboardModule;
} catch (_e) {
debugLogger.error(
'Failed to load @teddyzhu/clipboard native module. Clipboard image features will be unavailable.',
);
return null;
}
}
/**
* Checks if the system clipboard contains an image (macOS only for now)
* Checks if the system clipboard contains an image
* @returns true if clipboard contains an image
*/
export async function clipboardHasImage(): Promise<boolean> {
if (process.platform !== 'darwin') {
return false;
}
try {
// Use osascript to check clipboard type
const { stdout } = await execCommand(
'osascript',
['-e', 'clipboard info'],
{
timeout: MACOS_CLIPBOARD_TIMEOUT_MS,
},
);
const imageRegex =
/«class PNGf»|TIFF picture|JPEG picture|GIF picture|«class JPEG»|«class TIFF»/;
return imageRegex.test(stdout);
} catch {
const mod = await getClipboardModule();
if (!mod) return false;
const clipboard = new mod.ClipboardManager();
return clipboard.hasFormat('image');
} catch (error) {
debugLogger.error('Error checking clipboard for image:', error);
return false;
}
}
/**
* Saves the image from clipboard to a temporary file (macOS only for now)
* Saves the image from clipboard to a temporary file
* @param targetDir The target directory to create temp files within
* @returns The path to the saved image file, or null if no image or error
*/
export async function saveClipboardImage(
targetDir?: string,
): Promise<string | null> {
if (process.platform !== 'darwin') {
return null;
}
try {
const mod = await getClipboardModule();
if (!mod) return null;
const clipboard = new mod.ClipboardManager();
if (!clipboard.hasFormat('image')) {
return null;
}
// Create a temporary directory for clipboard images within the target directory
// This avoids security restrictions on paths outside the target directory
const baseDir = targetDir || process.cwd();
const tempDir = path.join(baseDir, '.qwen-clipboard');
const tempDir = path.join(baseDir, 'clipboard');
await fs.mkdir(tempDir, { recursive: true });
// Generate a unique filename with timestamp
const timestamp = new Date().getTime();
const tempFilePath = path.join(tempDir, `clipboard-${timestamp}.png`);
// Try different image formats in order of preference
const formats = [
{ class: 'PNGf', extension: 'png' },
{ class: 'JPEG', extension: 'jpg' },
{ class: 'TIFF', extension: 'tiff' },
{ class: 'GIFf', extension: 'gif' },
];
const imageData = clipboard.getImageData();
// Use data buffer from the API
const buffer = imageData.data;
for (const format of formats) {
const tempFilePath = path.join(
tempDir,
`clipboard-${timestamp}.${format.extension}`,
);
// Try to save clipboard as this format
const script = `
try
set imageData to the clipboard as «class ${format.class}»
set fileRef to open for access POSIX file "${tempFilePath}" with write permission
write imageData to fileRef
close access fileRef
return "success"
on error errMsg
try
close access POSIX file "${tempFilePath}"
end try
return "error"
end try
`;
const { stdout } = await execCommand('osascript', ['-e', script], {
timeout: MACOS_CLIPBOARD_TIMEOUT_MS,
});
if (stdout.trim() === 'success') {
// Verify the file was created and has content
try {
const stats = await fs.stat(tempFilePath);
if (stats.size > 0) {
return tempFilePath;
}
} catch {
// File doesn't exist, continue to next format
}
}
// Clean up failed attempt
try {
await fs.unlink(tempFilePath);
} catch {
// Ignore cleanup errors
}
if (!buffer) {
return null;
}
// No format worked
return null;
await fs.writeFile(tempFilePath, buffer);
return tempFilePath;
} catch (error) {
debugLogger.error('Error saving clipboard image:', error);
return null;
@ -123,8 +93,8 @@ export async function saveClipboardImage(
}
/**
* Cleans up old temporary clipboard image files
* Removes files older than 1 hour
* Cleans up old temporary clipboard image files using LRU strategy
* Keeps maximum 100 images, when exceeding removes 50 oldest files to reduce cleanup frequency
* @param targetDir The target directory where temp files are stored
*/
export async function cleanupOldClipboardImages(
@ -132,23 +102,49 @@ export async function cleanupOldClipboardImages(
): Promise<void> {
try {
const baseDir = targetDir || process.cwd();
const tempDir = path.join(baseDir, '.qwen-clipboard');
const tempDir = path.join(baseDir, 'clipboard');
const files = await fs.readdir(tempDir);
const oneHourAgo = Date.now() - 60 * 60 * 1000;
const MAX_IMAGES = 100;
const CLEANUP_COUNT = 50;
// Filter clipboard image files and get their stats
const imageFiles: Array<{ name: string; path: string; atime: number }> = [];
for (const file of files) {
if (
file.startsWith('clipboard-') &&
(file.endsWith('.png') ||
file.endsWith('.jpg') ||
file.endsWith('.webp') ||
file.endsWith('.heic') ||
file.endsWith('.heif') ||
file.endsWith('.tiff') ||
file.endsWith('.gif'))
file.endsWith('.gif') ||
file.endsWith('.bmp'))
) {
const filePath = path.join(tempDir, file);
const stats = await fs.stat(filePath);
if (stats.mtimeMs < oneHourAgo) {
await fs.unlink(filePath);
}
imageFiles.push({
name: file,
path: filePath,
atime: stats.atimeMs,
});
}
}
// If exceeds limit, remove CLEANUP_COUNT oldest files to reduce cleanup frequency
if (imageFiles.length > MAX_IMAGES) {
// Sort by access time (oldest first)
imageFiles.sort((a, b) => a.atime - b.atime);
// Remove CLEANUP_COUNT oldest files (or all excess files if less than CLEANUP_COUNT)
const removeCount = Math.min(
CLEANUP_COUNT,
imageFiles.length - MAX_IMAGES + CLEANUP_COUNT,
);
const filesToRemove = imageFiles.slice(0, removeCount);
for (const file of filesToRemove) {
await fs.unlink(file.path);
}
}
} catch {

View file

@ -337,7 +337,7 @@ export async function start_sandbox(
writeStderrLine(`hopping into sandbox (command: ${config.command}) ...`);
// determine full path for gemini-cli to distinguish linked vs installed setting
// determine full path for qwen-code to distinguish linked vs installed setting
const gcPath = fs.realpathSync(process.argv[1]);
const projectSandboxDockerfile = path.join(
@ -350,9 +350,9 @@ export async function start_sandbox(
const workdir = path.resolve(process.cwd());
const containerWorkdir = getContainerPath(workdir);
// if BUILD_SANDBOX is set, then call scripts/build_sandbox.js under gemini-cli repo
// if BUILD_SANDBOX is set, then call scripts/build_sandbox.js under qwen-code repo
//
// note this can only be done with binary linked from gemini-cli repo
// note this can only be done with binary linked from qwen-code repo
if (process.env['BUILD_SANDBOX']) {
if (!gcPath.includes('qwen-code/packages/')) {
throw new FatalSandboxError(
@ -389,8 +389,8 @@ export async function start_sandbox(
if (!(await ensureSandboxImageIsPresent(config.command, image))) {
const remedy =
image === LOCAL_DEV_SANDBOX_IMAGE_NAME
? 'Try running `npm run build:all` or `npm run build:sandbox` under the gemini-cli repo to build it locally, or check the image name and your network connection.'
: 'Please check the image name, your network connection, or notify gemini-cli-dev@google.com if the issue persists.';
? 'Try running `npm run build:all` or `npm run build:sandbox` under the qwen-code repo to build it locally, or check the image name and your network connection.'
: 'Please check the image name, your network connection, or notify qwen-code-dev@service.alibaba.com if the issue persists.';
throw new FatalSandboxError(
`Sandbox image '${image}' is missing or could not be pulled. ${remedy}`,
);
@ -544,7 +544,7 @@ export async function start_sandbox(
process.env['GEMINI_CLI_INTEGRATION_TEST'] === 'true';
let containerName;
if (isIntegrationTest) {
containerName = `gemini-cli-integration-test-${randomBytes(4).toString(
containerName = `qwen-code-integration-test-${randomBytes(4).toString(
'hex',
)}`;
writeStderrLine(`ContainerName: ${containerName}`);
@ -716,10 +716,16 @@ export async function start_sandbox(
let userFlag = '';
const finalEntrypoint = entrypoint(workdir, cliArgs);
if (process.env['GEMINI_CLI_INTEGRATION_TEST'] === 'true') {
// Check if we should use current user's UID/GID in sandbox
// In integration test mode, we still respect SANDBOX_SET_UID_GID to allow
// tests that need to access host's ~/.qwen (e.g., --resume functionality)
const useCurrentUser = await shouldUseCurrentUserInSandbox();
if (!useCurrentUser) {
// Use root user (default for integration tests or when explicitly disabled)
args.push('--user', 'root');
userFlag = '--user root';
} else if (await shouldUseCurrentUserInSandbox()) {
} else {
// For the user-creation logic to work, the container must start as root.
// The entrypoint script then handles dropping privileges to the correct user.
args.push('--user', 'root');

View file

@ -1,6 +1,6 @@
{
"name": "@qwen-code/qwen-code-core",
"version": "0.10.1",
"version": "0.10.5",
"description": "Qwen Code Core",
"repository": {
"type": "git",

View file

@ -6,8 +6,8 @@
import * as path from 'node:path';
import * as os from 'node:os';
import * as crypto from 'node:crypto';
import * as fs from 'node:fs';
import { getProjectHash } from '../utils/paths.js';
export const QWEN_DIR = '.qwen';
export const GOOGLE_ACCOUNTS_FILENAME = 'google_accounts.json';
@ -88,9 +88,10 @@ export class Storage {
}
getProjectTempDir(): string {
const hash = this.getFilePathHash(this.getProjectRoot());
const hash = getProjectHash(this.getProjectRoot());
const tempDir = Storage.getGlobalTempDir();
return path.join(tempDir, hash);
const targetDir = path.join(tempDir, hash);
return targetDir;
}
ensureProjectTempDirExists(): void {
@ -105,14 +106,11 @@ export class Storage {
return this.targetDir;
}
private getFilePathHash(filePath: string): string {
return crypto.createHash('sha256').update(filePath).digest('hex');
}
getHistoryDir(): string {
const hash = this.getFilePathHash(this.getProjectRoot());
const hash = getProjectHash(this.getProjectRoot());
const historyDir = path.join(Storage.getGlobalQwenDir(), 'history');
return path.join(historyDir, hash);
const targetDir = path.join(historyDir, hash);
return targetDir;
}
getWorkspaceSettingsPath(): string {
@ -144,6 +142,8 @@ export class Storage {
}
private sanitizeCwd(cwd: string): string {
return cwd.replace(/[^a-zA-Z0-9]/g, '-');
// On Windows, normalize to lowercase for case-insensitive matching
const normalizedCwd = os.platform() === 'win32' ? cwd.toLowerCase() : cwd;
return normalizedCwd.replace(/[^a-zA-Z0-9]/g, '-');
}
}

View file

@ -18,6 +18,7 @@ import {
StreamEventType,
type StreamEvent,
} from './geminiChat.js';
import { StreamContentError } from './openaiContentGenerator/pipeline.js';
import type { Config } from '../config/config.js';
import { setSimulate429 } from '../utils/testUtils.js';
import { uiTelemetryService } from '../telemetry/uiTelemetry.js';
@ -930,6 +931,166 @@ describe('GeminiChat', () => {
});
});
it('should retry on TPM throttling StreamContentError with fixed delay', async () => {
vi.useFakeTimers();
try {
const tpmError = new StreamContentError(
'{"error":{"code":"429","message":"Throttling: TPM(1/1)"}}',
);
async function* failingStreamGenerator() {
throw tpmError;
yield {} as GenerateContentResponse;
}
const failingStream = failingStreamGenerator();
const successStream = (async function* () {
yield {
candidates: [
{
content: { parts: [{ text: 'Success after TPM retry' }] },
finishReason: 'STOP',
},
],
} as unknown as GenerateContentResponse;
})();
vi.mocked(mockContentGenerator.generateContentStream)
.mockResolvedValueOnce(failingStream)
.mockResolvedValueOnce(successStream);
const stream = await chat.sendMessageStream(
'test-model',
{ message: 'test' },
'prompt-id-tpm-retry',
);
const iterator = stream[Symbol.asyncIterator]();
const first = await iterator.next();
expect(first.done).toBe(false);
expect(first.value.type).toBe(StreamEventType.RETRY);
// Resume generator to schedule the TPM delay, then advance timers.
const secondPromise = iterator.next();
await vi.advanceTimersByTimeAsync(60_000);
const second = await secondPromise;
expect(second.done).toBe(false);
expect(second.value.type).toBe(StreamEventType.RETRY);
const events: StreamEvent[] = [first.value, second.value];
for (;;) {
const next = await iterator.next();
if (next.done) break;
events.push(next.value);
}
expect(
mockContentGenerator.generateContentStream,
).toHaveBeenCalledTimes(2);
expect(
events.filter((e) => e.type === StreamEventType.RETRY),
).toHaveLength(2);
expect(
events.some(
(e) =>
e.type === StreamEventType.CHUNK &&
e.value.candidates?.[0]?.content?.parts?.[0]?.text ===
'Success after TPM retry',
),
).toBe(true);
expect(mockLogContentRetry).not.toHaveBeenCalled();
} finally {
vi.useRealTimers();
}
});
it('should retry on GLM rate limit StreamContentError with backoff delay', async () => {
vi.useFakeTimers();
try {
const glmError = new StreamContentError(
'{"error":{"code":"1302","message":"您的账户已达到速率限制,请您控制请求频率"}}',
);
async function* failingStreamGenerator() {
throw glmError;
yield {} as GenerateContentResponse;
}
const failingStream = failingStreamGenerator();
const successStream = (async function* () {
yield {
candidates: [
{
content: { parts: [{ text: 'Success after GLM retry' }] },
finishReason: 'STOP',
},
],
} as unknown as GenerateContentResponse;
})();
vi.mocked(mockContentGenerator.generateContentStream)
.mockResolvedValueOnce(failingStream)
.mockResolvedValueOnce(successStream);
const stream = await chat.sendMessageStream(
'test-model',
{ message: 'test' },
'prompt-id-glm-retry',
);
const iterator = stream[Symbol.asyncIterator]();
const first = await iterator.next();
expect(first.done).toBe(false);
expect(first.value.type).toBe(StreamEventType.RETRY);
// Resume generator to schedule the rate limit delay, then advance timers.
const secondPromise = iterator.next();
await vi.advanceTimersByTimeAsync(60_000);
const second = await secondPromise;
expect(second.done).toBe(false);
expect(second.value.type).toBe(StreamEventType.RETRY);
// Verify retryInfo contains retry metadata
if (
second.value.type === StreamEventType.RETRY &&
second.value.retryInfo
) {
expect(second.value.retryInfo.attempt).toBe(1);
expect(second.value.retryInfo.maxRetries).toBe(10);
expect(second.value.retryInfo.delayMs).toBe(60000);
}
const events: StreamEvent[] = [first.value, second.value];
for (;;) {
const next = await iterator.next();
if (next.done) break;
events.push(next.value);
}
expect(
mockContentGenerator.generateContentStream,
).toHaveBeenCalledTimes(2);
expect(
events.filter((e) => e.type === StreamEventType.RETRY),
).toHaveLength(2);
expect(
events.some(
(e) =>
e.type === StreamEventType.CHUNK &&
e.value.candidates?.[0]?.content?.parts?.[0]?.text ===
'Success after GLM retry',
),
).toBe(true);
} finally {
vi.useRealTimers();
}
});
describe('API error retry behavior', () => {
beforeEach(() => {
// Use a more direct mock for retry testing

View file

@ -18,6 +18,9 @@ import type {
} from '@google/genai';
import { createUserContent } from '@google/genai';
import { getErrorStatus, retryWithBackoff } from '../utils/retry.js';
import { createDebugLogger } from '../utils/debugLogger.js';
import { parseAndFormatApiError } from '../utils/errorParsing.js';
import { isRateLimitError, type RetryInfo } from '../utils/rateLimit.js';
import type { Config } from '../config/config.js';
import { hasCycleInSchema } from '../tools/tools.js';
import type { StructuredError } from './turn.js';
@ -32,6 +35,8 @@ import {
} from '../telemetry/types.js';
import { uiTelemetryService } from '../telemetry/uiTelemetry.js';
const debugLogger = createDebugLogger('QWEN_CODE_CHAT');
export enum StreamEventType {
/** A regular content chunk from the API. */
CHUNK = 'chunk',
@ -42,7 +47,7 @@ export enum StreamEventType {
export type StreamEvent =
| { type: StreamEventType.CHUNK; value: GenerateContentResponse }
| { type: StreamEventType.RETRY };
| { type: StreamEventType.RETRY; retryInfo?: RetryInfo };
/**
* Options for retrying due to invalid content from the model.
@ -58,6 +63,17 @@ const INVALID_CONTENT_RETRY_OPTIONS: ContentRetryOptions = {
maxAttempts: 2, // 1 initial call + 1 retry
initialDelayMs: 500,
};
/**
* Options for retrying on rate-limit throttling errors returned as stream content.
* Fixed 60s delay matches the DashScope per-minute quota window.
* 10 retries aligns with Claude Code's retry behavior.
*/
const RATE_LIMIT_RETRY_OPTIONS = {
maxRetries: 10,
delayMs: 60000,
};
/**
* Returns true if the response is valid, false otherwise.
*
@ -268,6 +284,7 @@ export class GeminiChat {
return (async function* () {
try {
let lastError: unknown = new Error('Request failed after all retries.');
let rateLimitRetryCount = 0;
for (
let attempt = 0;
@ -275,7 +292,7 @@ export class GeminiChat {
attempt++
) {
try {
if (attempt > 0) {
if (attempt > 0 || rateLimitRetryCount > 0) {
yield { type: StreamEventType.RETRY };
}
@ -294,6 +311,40 @@ export class GeminiChat {
break;
} catch (error) {
lastError = error;
// Handle rate-limit / throttling errors returned as stream content.
// These arrive as StreamContentError with finish_reason="error_finish"
// from the pipeline, containing the throttling message in the content.
// Covers TPM throttling, GLM rate limits, and other provider throttling.
const isRateLimit = isRateLimitError(error);
if (
isRateLimit &&
rateLimitRetryCount < RATE_LIMIT_RETRY_OPTIONS.maxRetries
) {
rateLimitRetryCount++;
const delayMs = RATE_LIMIT_RETRY_OPTIONS.delayMs;
const message = parseAndFormatApiError(
error instanceof Error ? error.message : String(error),
);
debugLogger.warn(
`Rate limit throttling detected (retry ${rateLimitRetryCount}/${RATE_LIMIT_RETRY_OPTIONS.maxRetries}). ` +
`Waiting ${delayMs / 1000}s before retrying...`,
);
yield {
type: StreamEventType.RETRY,
retryInfo: {
message,
attempt: rateLimitRetryCount,
maxRetries: RATE_LIMIT_RETRY_OPTIONS.maxRetries,
delayMs,
},
};
// Don't count rate-limit retries against the content retry limit
attempt--;
await new Promise((res) => setTimeout(res, delayMs));
continue;
}
const isContentError = error instanceof InvalidStreamError;
if (isContentError) {

View file

@ -21,11 +21,11 @@ import {
decodeTagName,
} from './logger.js';
import { Storage } from '../config/storage.js';
import { getProjectHash } from '../utils/paths.js';
import { promises as fs, existsSync } from 'node:fs';
import path from 'node:path';
import type { Content } from '@google/genai';
import crypto from 'node:crypto';
import os from 'node:os';
const GEMINI_DIR_NAME = '.qwen';
@ -34,7 +34,7 @@ const LOG_FILE_NAME = 'logs.json';
const CHECKPOINT_FILE_NAME = 'checkpoint.json';
const projectDir = process.cwd();
const hash = crypto.createHash('sha256').update(projectDir).digest('hex');
const hash = getProjectHash(projectDir);
const TEST_HOME_DIR = path.join(os.tmpdir(), 'qwen-core-logger-home');
let originalHome: string | undefined;

View file

@ -10,7 +10,7 @@ import type OpenAI from 'openai';
import type { GenerateContentParameters } from '@google/genai';
import { GenerateContentResponse, Type, FinishReason } from '@google/genai';
import type { PipelineConfig } from './pipeline.js';
import { ContentGenerationPipeline } from './pipeline.js';
import { ContentGenerationPipeline, StreamContentError } from './pipeline.js';
import { OpenAIContentConverter } from './converter.js';
import type { Config } from '../../config/config.js';
import type { ContentGeneratorConfig, AuthType } from '../contentGenerator.js';
@ -510,6 +510,51 @@ describe('ContentGenerationPipeline', () => {
);
});
it('should throw StreamContentError when stream chunk contains error_finish', async () => {
const request: GenerateContentParameters = {
model: 'test-model',
contents: [{ parts: [{ text: 'Hello' }], role: 'user' }],
};
const userPromptId = 'test-prompt-id';
const mockStream = {
async *[Symbol.asyncIterator]() {
yield {
id: 'chunk-1',
object: 'chat.completion.chunk',
created: Date.now(),
model: 'test-model',
choices: [
{
index: 0,
delta: { content: 'Throttling: TPM(1/1)' },
finish_reason: 'error_finish',
},
],
} as unknown as OpenAI.Chat.ChatCompletionChunk;
},
};
(mockConverter.convertGeminiRequestToOpenAI as Mock).mockReturnValue([]);
(mockClient.chat.completions.create as Mock).mockResolvedValue(
mockStream,
);
const resultGenerator = await pipeline.executeStream(
request,
userPromptId,
);
await expect(async () => {
for await (const _ of resultGenerator) {
// consume stream
}
}).rejects.toThrow(StreamContentError);
expect(mockErrorHandler.handle).not.toHaveBeenCalled();
expect(mockConverter.convertOpenAIChunkToGemini).not.toHaveBeenCalled();
});
it('should pass abort signal to OpenAI client for streaming requests', async () => {
const abortController = new AbortController();
const request: GenerateContentParameters = {

View file

@ -15,6 +15,19 @@ import type { OpenAICompatibleProvider } from './provider/index.js';
import { OpenAIContentConverter } from './converter.js';
import type { ErrorHandler, RequestContext } from './errorHandler.js';
/**
* Error thrown when the API returns an error embedded as stream content
* instead of a proper HTTP error. Some providers (e.g., certain OpenAI-compatible
* endpoints) return throttling errors as a normal SSE chunk with
* finish_reason="error_finish" and the error message in delta.content.
*/
export class StreamContentError extends Error {
constructor(message: string) {
super(message);
this.name = 'StreamContentError';
}
}
export interface PipelineConfig {
cliConfig: Config;
provider: OpenAICompatibleProvider;
@ -117,6 +130,17 @@ export class ContentGenerationPipeline {
try {
// Stage 2a: Convert and yield each chunk while preserving original
for await (const chunk of stream) {
// Detect API errors returned as stream content.
// Some providers return errors (e.g., TPM throttling) as a normal SSE chunk
// with finish_reason="error_finish" and the error in delta.content,
// instead of returning a proper HTTP error status.
if ((chunk.choices?.[0]?.finish_reason as string) === 'error_finish') {
const errorContent =
chunk.choices?.[0]?.delta?.content?.trim() ||
'Unknown stream error';
throw new StreamContentError(errorContent);
}
const response = this.converter.convertOpenAIChunkToGemini(chunk);
// Stage 2b: Filter empty responses to avoid downstream issues
@ -159,6 +183,12 @@ export class ContentGenerationPipeline {
// Clear streaming tool calls on error to prevent data pollution
this.converter.resetStreamingToolCalls();
// Re-throw StreamContentError directly so it can be handled by
// the caller's retry logic (e.g., TPM throttling retry in sendMessageStream)
if (error instanceof StreamContentError) {
throw error;
}
// Use shared error handling logic
await this.handleError(error, context, request);
}

View file

@ -105,7 +105,7 @@ describe('OpenRouterOpenAICompatibleProvider', () => {
expect(headers).toEqual({
'User-Agent': `QwenCode/1.0.0 (${process.platform}; ${process.arch})`,
'HTTP-Referer': 'https://github.com/QwenLM/qwen-code.git',
'X-Title': 'Qwen Code',
'X-OpenRouter-Title': 'Qwen Code',
});
});
@ -125,7 +125,7 @@ describe('OpenRouterOpenAICompatibleProvider', () => {
expect(headers).toEqual({
'User-Agent': 'ParentAgent/1.0.0',
'HTTP-Referer': 'https://github.com/QwenLM/qwen-code.git', // OpenRouter-specific value should override
'X-Title': 'Qwen Code',
'X-OpenRouter-Title': 'Qwen Code',
});
parentBuildHeaders.mockRestore();
@ -142,7 +142,7 @@ describe('OpenRouterOpenAICompatibleProvider', () => {
expect(headers['HTTP-Referer']).toBe(
'https://github.com/QwenLM/qwen-code.git',
);
expect(headers['X-Title']).toBe('Qwen Code');
expect(headers['X-OpenRouter-Title']).toBe('Qwen Code');
});
});
@ -215,7 +215,7 @@ describe('OpenRouterOpenAICompatibleProvider', () => {
expect(headers['HTTP-Referer']).toBe(
'https://github.com/QwenLM/qwen-code.git',
); // OpenRouter-specific
expect(headers['X-Title']).toBe('Qwen Code'); // OpenRouter-specific
expect(headers['X-OpenRouter-Title']).toBe('Qwen Code'); // OpenRouter-specific
});
});
});

View file

@ -25,7 +25,7 @@ export class OpenRouterOpenAICompatibleProvider extends DefaultOpenAICompatibleP
return {
...baseHeaders,
'HTTP-Referer': 'https://github.com/QwenLM/qwen-code.git',
'X-Title': 'Qwen Code',
'X-OpenRouter-Title': 'Qwen Code',
};
}
}

View file

@ -119,7 +119,10 @@ const PATTERNS: Array<[RegExp, TokenCount]> = [
// Commercial Qwen3-Coder-Flash: 1M token context
[/^qwen3-coder-flash(-.*)?$/, LIMITS['1m']], // catches "qwen3-coder-flash" and date variants
// Generic coder-model: same as qwen3-coder-plus (1M token context)
// Commercial Qwen3.5-Plus: 1M token context
[/^qwen3\.5-plus(-.*)?$/, LIMITS['1m']], // catches "qwen3.5-plus" and date variants
// Generic coder-model: same as qwen3.5-plus (1M token context)
[/^coder-model$/, LIMITS['1m']],
// Commercial Qwen3-Max-Preview: 256K token context
@ -199,7 +202,10 @@ const OUTPUT_PATTERNS: Array<[RegExp, TokenCount]> = [
// Qwen3-Coder-Plus: 65,536 max output tokens
[/^qwen3-coder-plus(-.*)?$/, LIMITS['64k']],
// Generic coder-model: same as qwen3-coder-plus (64K max output tokens)
// Qwen3.5-Plus: 65,536 max output tokens
[/^qwen3\.5-plus(-.*)?$/, LIMITS['64k']],
// Generic coder-model: same as qwen3.5-plus (64K max output tokens)
[/^coder-model$/, LIMITS['64k']],
// Qwen3-Max: 65,536 max output tokens

View file

@ -27,6 +27,7 @@ import {
toFriendlyError,
} from '../utils/errors.js';
import type { GeminiChat } from './geminiChat.js';
import type { RetryInfo } from '../utils/rateLimit.js';
import {
getThoughtText,
parseThought,
@ -67,6 +68,7 @@ export enum GeminiEventType {
export type ServerGeminiRetryEvent = {
type: GeminiEventType.Retry;
retryInfo?: RetryInfo;
};
export interface StructuredError {
@ -255,7 +257,10 @@ export class Turn {
// Handle the new RETRY event
if (streamEvent.type === 'retry') {
yield { type: GeminiEventType.Retry };
yield {
type: GeminiEventType.Retry,
retryInfo: streamEvent.retryInfo,
};
continue; // Skip to the next event in the stream
}

View file

@ -100,7 +100,7 @@ const CLAUDE_TOOLS_MAPPING: Record<string, string | string[]> = {
Grep: 'Grep',
KillShell: 'None',
NotebookEdit: 'None',
Read: ['ReadFile', 'ReadManyFiles'],
Read: 'ReadFile',
Skill: 'Skill',
Task: 'Task',
TodoWrite: 'TodoWrite',

View file

@ -105,7 +105,8 @@ export const QWEN_OAUTH_MODELS: ModelConfig[] = [
{
id: 'coder-model',
name: 'coder-model',
description: 'The latest Qwen Coder model from Alibaba Cloud ModelStudio',
description:
'Qwen 3.5 Plus — efficient hybrid model with leading coding performance',
capabilities: { vision: false },
},
{

View file

@ -271,16 +271,15 @@ export class SubAgentScope {
return;
}
const abortController = new AbortController();
const onAbort = () => abortController.abort();
// Track the current round's AbortController for external signal propagation
let currentRoundAbortController: AbortController | null = null;
const onExternalAbort = () => {
currentRoundAbortController?.abort();
};
if (externalSignal) {
if (externalSignal.aborted) {
abortController.abort();
this.terminateMode = SubagentTerminateMode.CANCELLED;
return;
}
externalSignal.addEventListener('abort', onAbort, { once: true });
externalSignal.addEventListener('abort', onExternalAbort);
}
const toolRegistry = this.runtimeContext.getToolRegistry();
// Prepare the list of tools available to the subagent.
@ -346,6 +345,15 @@ export class SubAgentScope {
const startEvent = new SubagentExecutionEvent(this.name, 'started');
logSubagentExecution(this.runtimeContext, startEvent);
while (true) {
// Create a new AbortController for each round to avoid listener accumulation
const roundAbortController = new AbortController();
currentRoundAbortController = roundAbortController;
// If external signal already aborted, cancel immediately
if (externalSignal?.aborted) {
roundAbortController.abort();
}
// Check termination conditions.
if (
this.runConfig.max_turns &&
@ -364,10 +372,11 @@ export class SubAgentScope {
}
const promptId = `${this.runtimeContext.getSessionId()}#${this.subagentId}#${turnCounter++}`;
const messageParams = {
message: currentMessages[0]?.parts || [],
config: {
abortSignal: abortController.signal,
abortSignal: roundAbortController.signal,
tools: [{ functionDeclarations: toolsList }],
},
};
@ -393,7 +402,7 @@ export class SubAgentScope {
undefined;
let currentResponseId: string | undefined = undefined;
for await (const streamEvent of responseStream) {
if (abortController.signal.aborted) {
if (roundAbortController.signal.aborted) {
this.terminateMode = SubagentTerminateMode.CANCELLED;
return;
}
@ -487,7 +496,7 @@ export class SubAgentScope {
if (functionCalls.length > 0) {
currentMessages = await this.processFunctionCalls(
functionCalls,
abortController,
roundAbortController,
promptId,
turnCounter,
toolsList,
@ -530,7 +539,11 @@ export class SubAgentScope {
throw error;
} finally {
if (externalSignal) externalSignal.removeEventListener('abort', onAbort);
if (externalSignal) {
externalSignal.removeEventListener('abort', onExternalAbort);
}
// Clear the reference to allow GC
currentRoundAbortController = null;
this.executionStats.totalDurationMs = Date.now() - startTime;
const summary = this.stats.getSummary(Date.now());
this.eventEmitter?.emit(SubAgentEventType.FINISH, {

View file

@ -29,6 +29,7 @@ import { AuthProviderType, isSdkMcpServerConfig } from '../config/config.js';
import { GoogleCredentialProvider } from '../mcp/google-auth-provider.js';
import { ServiceAccountImpersonationProvider } from '../mcp/sa-impersonation-provider.js';
import { DiscoveredMCPTool } from './mcp-tool.js';
import type { McpToolAnnotations } from './mcp-tool.js';
import { SdkControlClientTransport } from './sdk-control-client-transport.js';
import type { FunctionDeclaration } from '@google/genai';
@ -638,6 +639,23 @@ export async function discoverTools(
return [];
}
// Fetch raw tool list from MCP client to get annotations (readOnlyHint, etc.)
// that are not preserved by mcpToTool's functionDeclarations conversion.
const annotationsMap = new Map<string, McpToolAnnotations>();
try {
const listToolsResult = await mcpClient.listTools();
for (const mcpTool of listToolsResult.tools) {
if (mcpTool.annotations) {
annotationsMap.set(mcpTool.name, mcpTool.annotations);
}
}
} catch {
// If listTools fails, proceed without annotations — non-critical
debugLogger.error(
`Failed to fetch tool annotations from MCP server '${mcpServerName}'`,
);
}
const mcpTimeout = mcpServerConfig.timeout ?? MCP_DEFAULT_TIMEOUT_MSEC;
const discoveredTools: DiscoveredMCPTool[] = [];
for (const funcDecl of tool.functionDeclarations) {
@ -658,6 +676,7 @@ export async function discoverTools(
cliConfig,
mcpClient, // raw MCP Client for direct callTool with progress
mcpTimeout,
annotationsMap.get(funcDecl.name!),
),
);
} catch (error) {

View file

@ -94,6 +94,18 @@ type McpContentBlock =
| McpResourceBlock
| McpResourceLinkBlock;
/**
* MCP Tool Annotations as defined in the MCP specification.
* These provide hints about a tool's behavior to help clients make decisions
* about tool approval and safety.
*/
export interface McpToolAnnotations {
readOnlyHint?: boolean;
destructiveHint?: boolean;
idempotentHint?: boolean;
openWorldHint?: boolean;
}
class DiscoveredMCPToolInvocation extends BaseToolInvocation<
ToolParams,
ToolResult
@ -110,6 +122,7 @@ class DiscoveredMCPToolInvocation extends BaseToolInvocation<
private readonly cliConfig?: Config,
private readonly mcpClient?: McpDirectClient,
private readonly mcpTimeout?: number,
private readonly annotations?: McpToolAnnotations,
) {
super(params);
}
@ -124,6 +137,12 @@ class DiscoveredMCPToolInvocation extends BaseToolInvocation<
return false; // server is trusted, no confirmation needed
}
// MCP tools annotated with readOnlyHint: true are safe to execute
// without confirmation, especially important for plan mode support
if (this.annotations?.readOnlyHint === true) {
return false;
}
if (
DiscoveredMCPToolInvocation.allowlist.has(serverAllowListKey) ||
DiscoveredMCPToolInvocation.allowlist.has(toolAllowListKey)
@ -341,13 +360,14 @@ export class DiscoveredMCPTool extends BaseDeclarativeTool<
private readonly cliConfig?: Config,
private readonly mcpClient?: McpDirectClient,
private readonly mcpTimeout?: number,
private readonly annotations?: McpToolAnnotations,
) {
super(
nameOverride ??
generateValidName(`mcp__${serverName}__${serverToolName}`),
`${serverToolName} (${serverName} MCP Server)`,
description,
Kind.Other,
annotations?.readOnlyHint === true ? Kind.Read : Kind.Other,
parameterSchema,
true, // isOutputMarkdown
true, // canUpdateOutput — enables streaming progress for MCP tools
@ -366,6 +386,7 @@ export class DiscoveredMCPTool extends BaseDeclarativeTool<
this.cliConfig,
this.mcpClient,
this.mcpTimeout,
this.annotations,
);
}
@ -382,6 +403,7 @@ export class DiscoveredMCPTool extends BaseDeclarativeTool<
this.cliConfig,
this.mcpClient,
this.mcpTimeout,
this.annotations,
);
}
}

View file

@ -21,6 +21,7 @@ import { getProgrammingLanguage } from '../telemetry/telemetry-utils.js';
import { logFileOperation } from '../telemetry/loggers.js';
import { FileOperationEvent } from '../telemetry/types.js';
import { isSubpath } from '../utils/paths.js';
import { Storage } from '../config/storage.js';
/**
* Parameters for the ReadFile tool
@ -183,10 +184,13 @@ export class ReadFileTool extends BaseDeclarativeTool<
}
const workspaceContext = this.config.getWorkspaceContext();
const globalTempDir = Storage.getGlobalTempDir();
const projectTempDir = this.config.storage.getProjectTempDir();
const userSkillsDir = this.config.storage.getUserSkillsDir();
const resolvedFilePath = path.resolve(filePath);
const isWithinTempDir = isSubpath(projectTempDir, resolvedFilePath);
const isWithinTempDir =
isSubpath(projectTempDir, resolvedFilePath) ||
isSubpath(globalTempDir, resolvedFilePath);
const isWithinUserSkills = isSubpath(userSkillsDir, resolvedFilePath);
if (

View file

@ -60,6 +60,14 @@ describe('parseAndFormatApiError', () => {
);
});
it('should omit status when the API error has no status field', () => {
const errorMessage =
'{"error":{"code":1302,"message":"您的账户已达到速率限制,请您控制请求频率"}}';
expect(parseAndFormatApiError(errorMessage)).toBe(
'[API Error: 您的账户已达到速率限制,请您控制请求频率]',
);
});
it('should format a nested API error', () => {
const nestedErrorMessage = JSON.stringify({
error: {

View file

@ -60,7 +60,10 @@ export function parseAndFormatApiError(
} catch (_e) {
// It's not a nested JSON error, so we just use the message as is.
}
let text = `[API Error: ${finalMessage} (Status: ${parsedError.error.status})]`;
const statusText = parsedError.error.status
? ` (Status: ${parsedError.error.status})`
: '';
let text = `[API Error: ${finalMessage}${statusText}]`;
if (parsedError.error.code === 429) {
text += getRateLimitMessage(authType);
}

View file

@ -17,6 +17,7 @@ import {
isSubpath,
shortenPath,
tildeifyPath,
getProjectHash,
} from './paths.js';
import type { Config } from '../config/config.js';
@ -770,3 +771,80 @@ describe('shortenPath', () => {
expect(result.length).toBeLessThanOrEqual(35);
});
});
describe('getProjectHash', () => {
it('should generate consistent hashes for the same path', () => {
const projectRoot = '/test/project';
const hash1 = getProjectHash(projectRoot);
const hash2 = getProjectHash(projectRoot);
expect(hash1).toBe(hash2);
expect(hash1).toHaveLength(64); // SHA256 produces 64 hex characters
});
it('should generate different hashes for different paths', () => {
const hash1 = getProjectHash('/test/project1');
const hash2 = getProjectHash('/test/project2');
expect(hash1).not.toBe(hash2);
});
it('should generate case-insensitive hashes on Windows', () => {
const platformSpy = vi.spyOn(os, 'platform');
// Simulate Windows platform
platformSpy.mockReturnValue('win32');
const lowerCasePath = 'c:\\users\\test\\project';
const upperCasePath = 'C:\\Users\\Test\\Project';
const mixedCasePath = 'c:\\Users\\TEST\\project';
const hash1 = getProjectHash(lowerCasePath);
const hash2 = getProjectHash(upperCasePath);
const hash3 = getProjectHash(mixedCasePath);
// On Windows, all different case variations should produce the same hash
expect(hash1).toBe(hash2);
expect(hash2).toBe(hash3);
platformSpy.mockRestore();
});
it('should generate case-sensitive hashes on non-Windows platforms', () => {
const platformSpy = vi.spyOn(os, 'platform');
// Simulate Unix/Linux platform
platformSpy.mockReturnValue('linux');
const lowerCasePath = '/home/user/project';
const upperCasePath = '/HOME/USER/PROJECT';
const hash1 = getProjectHash(lowerCasePath);
const hash2 = getProjectHash(upperCasePath);
// On non-Windows platforms, different case should produce different hashes
expect(hash1).not.toBe(hash2);
platformSpy.mockRestore();
});
it('should handle Windows drive letter variations', () => {
const platformSpy = vi.spyOn(os, 'platform');
platformSpy.mockReturnValue('win32');
// Common Windows scenarios where users might have different drive letter cases
const scenarios = [
['e:\\work', 'E:\\work'],
['e:\\work', 'E:\\WORK'],
['c:\\projects\\myapp', 'C:\\Projects\\MyApp'],
];
for (const [path1, path2] of scenarios) {
const hash1 = getProjectHash(path1);
const hash2 = getProjectHash(path2);
expect(hash1).toBe(hash2);
}
platformSpy.mockRestore();
});
});

View file

@ -190,11 +190,16 @@ export function unescapePath(filePath: string): string {
/**
* Generates a unique hash for a project based on its root path.
* On Windows, paths are case-insensitive, so we normalize to lowercase
* to ensure the same physical path always produces the same hash.
* @param projectRoot The absolute path to the project's root directory.
* @returns A SHA256 hash of the project root path.
*/
export function getProjectHash(projectRoot: string): string {
return crypto.createHash('sha256').update(projectRoot).digest('hex');
// On Windows, normalize path to lowercase for case-insensitive matching
const normalizedPath =
os.platform() === 'win32' ? projectRoot.toLowerCase() : projectRoot;
return crypto.createHash('sha256').update(normalizedPath).digest('hex');
}
/**

View file

@ -0,0 +1,80 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect } from 'vitest';
import { isRateLimitError } from './rateLimit.js';
import type { StructuredError } from '../core/turn.js';
import type { HttpError } from './retry.js';
describe('isRateLimitError — detection paths', () => {
it('should detect rate-limit from ApiError.error.code in JSON message', () => {
const info = isRateLimitError(
new Error(
'{"error":{"code":"429","message":"Throttling: TPM(10680324/10000000)"}}',
),
);
expect(info).toBe(true);
});
it('should detect rate-limit from direct ApiError object', () => {
const info = isRateLimitError({
error: { code: 429, message: 'Rate limit exceeded' },
});
expect(info).toBe(true);
});
it('should detect GLM 1302 code from ApiError', () => {
const info = isRateLimitError({
error: { code: 1302, message: '您的账户已达到速率限制' },
});
expect(info).toBe(true);
});
it('should detect rate-limit from StructuredError.status', () => {
const error: StructuredError = { message: 'Rate limited', status: 429 };
const info = isRateLimitError(error);
expect(info).toBe(true);
});
it('should detect rate-limit from HttpError.status', () => {
const error: HttpError = new Error('Too Many Requests');
error.status = 429;
const info = isRateLimitError(error);
expect(info).toBe(true);
});
it('should return null for non-rate-limit codes', () => {
expect(
isRateLimitError({ error: { code: 400, message: 'Bad Request' } }),
).toBe(false);
});
it('should return null for invalid inputs', () => {
expect(isRateLimitError(null)).toBe(false);
expect(isRateLimitError(undefined)).toBe(false);
expect(isRateLimitError('500')).toBe(false);
});
});
describe('isRateLimitError — return shape', () => {
it('should detect GLM rate limit JSON string', () => {
const info = isRateLimitError(
'{"error":{"code":"1302","message":"您的账户已达到速率限制,请您控制请求频率"}}',
);
expect(info).toBe(true);
});
it('should treat HTTP 503 as rate-limit', () => {
const error: HttpError = new Error('Service Unavailable');
error.status = 503;
const info = isRateLimitError(error);
expect(info).toBe(true);
});
it('should return null for non-rate-limit errors', () => {
expect(isRateLimitError(new Error('Connection refused'))).toBe(false);
});
});

View file

@ -0,0 +1,73 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import { isApiError, isStructuredError } from './quotaErrorDetection.js';
// Known rate-limit error codes across providers.
// 429 - Standard HTTP "Too Many Requests" (DashScope TPM, OpenAI, etc.)
// 503 - Provider throttling/overload (treated as rate-limit for retry UI)
// 1302 - Z.AI GLM rate limit (https://docs.z.ai/api-reference/api-code)
const RATE_LIMIT_ERROR_CODES = new Set([429, 503, 1302]);
export interface RetryInfo {
/** Formatted error message for display, produced by parseAndFormatApiError. */
message?: string;
/** Current retry attempt (1-based). */
attempt: number;
/** Max retries allowed. */
maxRetries: number;
/** Delay in milliseconds before the retry happens. */
delayMs: number;
}
/**
* Detects rate-limit / throttling errors and returns retry info.
*/
export function isRateLimitError(error: unknown): boolean {
const code = getErrorCode(error);
return code !== null && RATE_LIMIT_ERROR_CODES.has(code);
}
/**
* Extracts the numeric error code from various error shapes.
* Mirrors the same parsing patterns used by parseAndFormatApiError.
*/
function getErrorCode(error: unknown): number | null {
if (isApiError(error)) return Number(error.error.code) || null;
// JSON in string / Error.message — check BEFORE isStructuredError because
// Error instances also satisfy isStructuredError (both have .message).
const msg =
error instanceof Error
? error.message
: typeof error === 'string'
? error
: null;
if (msg) {
const i = msg.indexOf('{');
if (i !== -1) {
try {
const p = JSON.parse(msg.substring(i)) as unknown;
if (isApiError(p)) return Number(p.error.code) || null;
} catch {
/* not valid JSON */
}
}
}
// StructuredError (.status) — plain objects from Gemini SDK
if (isStructuredError(error)) {
return typeof error.status === 'number' ? error.status : null;
}
// HttpError (.status on Error)
if (error instanceof Error && 'status' in error) {
const s = (error as { status?: unknown }).status;
if (typeof s === 'number') return s;
}
return null;
}

View file

@ -209,4 +209,89 @@ describe('SchemaValidator', () => {
expect(params.is_background).toBe(true);
});
});
describe('JSON Schema version support', () => {
it('should support JSON Schema draft-2020-12', () => {
const schema = {
$schema: 'https://json-schema.org/draft/2020-12/schema',
type: 'object',
properties: {
url: { type: 'string' },
},
required: ['url'],
};
const params = { url: 'https://example.com' };
expect(SchemaValidator.validate(schema, params)).toBeNull();
});
it('should validate correctly with draft-2020-12 schema', () => {
const schema = {
$schema: 'https://json-schema.org/draft/2020-12/schema',
type: 'object',
properties: {
count: { type: 'integer' },
},
required: ['count'],
};
const validParams = { count: 42 };
const invalidParams = { count: 'not a number' };
expect(SchemaValidator.validate(schema, validParams)).toBeNull();
expect(SchemaValidator.validate(schema, invalidParams)).not.toBeNull();
});
it('should support JSON Schema draft-07 (default)', () => {
const schema = {
$schema: 'http://json-schema.org/draft-07/schema#',
type: 'object',
properties: {
name: { type: 'string' },
},
required: ['name'],
};
const params = { name: 'test' };
expect(SchemaValidator.validate(schema, params)).toBeNull();
});
it('should handle nested schemas with $schema', () => {
const schema = {
$schema: 'https://json-schema.org/draft/2020-12/schema',
type: 'object',
properties: {
config: {
type: 'object',
properties: {
enabled: { type: 'boolean' },
},
},
},
};
const params = { config: { enabled: true } };
expect(SchemaValidator.validate(schema, params)).toBeNull();
});
it('should support 2020-12 specific keywords like prefixItems', () => {
const schema = {
$schema: 'https://json-schema.org/draft/2020-12/schema',
type: 'array',
prefixItems: [{ type: 'string' }, { type: 'integer' }],
};
const params = ['hello', 42];
expect(SchemaValidator.validate(schema, params)).toBeNull();
});
it('should gracefully handle unsupported schema versions', () => {
// draft-2019-09 is not supported by Ajv by default
const schema = {
$schema: 'https://json-schema.org/draft/2019-09/schema',
type: 'object',
properties: {
value: { type: 'string' },
},
};
const params = { value: 'test' };
// Should skip validation and return null (graceful degradation)
expect(SchemaValidator.validate(schema, params)).toBeNull();
});
});
});

View file

@ -4,33 +4,68 @@
* SPDX-License-Identifier: Apache-2.0
*/
import AjvPkg from 'ajv';
import AjvPkg, { type AnySchema, type Ajv } from 'ajv';
// Ajv2020 is the documented way to use draft-2020-12: https://ajv.js.org/json-schema.html#draft-2020-12
// eslint-disable-next-line import/no-internal-modules
import Ajv2020Pkg from 'ajv/dist/2020.js';
import * as addFormats from 'ajv-formats';
import { createDebugLogger } from './debugLogger.js';
// Ajv's ESM/CJS interop: use 'any' for compatibility as recommended by Ajv docs
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const AjvClass = (AjvPkg as any).default || AjvPkg;
const ajValidator = new AjvClass(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const Ajv2020Class = (Ajv2020Pkg as any).default || Ajv2020Pkg;
const debugLogger = createDebugLogger('SchemaValidator');
const ajvOptions = {
// See: https://ajv.js.org/options.html#strict-mode-options
{
// strictSchema defaults to true and prevents use of JSON schemas that
// include unrecognized keywords. The JSON schema spec specifically allows
// for the use of non-standard keywords and the spec-compliant behavior
// is to ignore those keywords. Note that setting this to false also
// allows use of non-standard or custom formats (the unknown format value
// will be logged but the schema will still be considered valid).
strictSchema: false,
},
);
// strictSchema defaults to true and prevents use of JSON schemas that
// include unrecognized keywords. The JSON schema spec specifically allows
// for the use of non-standard keywords and the spec-compliant behavior
// is to ignore those keywords. Note that setting this to false also
// allows use of non-standard or custom formats (the unknown format value
// will be logged but the schema will still be considered valid).
strictSchema: false,
};
// Draft-07 validator (default)
const ajvDefault: Ajv = new AjvClass(ajvOptions);
// Draft-2020-12 validator for MCP servers using rmcp
const ajv2020: Ajv = new Ajv2020Class(ajvOptions);
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const addFormatsFunc = (addFormats as any).default || addFormats;
addFormatsFunc(ajValidator);
addFormatsFunc(ajvDefault);
addFormatsFunc(ajv2020);
// Canonical draft-2020-12 meta-schema URI (used by rmcp MCP servers)
const DRAFT_2020_12_SCHEMA = 'https://json-schema.org/draft/2020-12/schema';
/**
* Simple utility to validate objects against JSON Schemas
* Returns the appropriate validator based on schema's $schema field.
*/
function getValidator(schema: AnySchema): Ajv {
if (
typeof schema === 'object' &&
schema !== null &&
'$schema' in schema &&
schema.$schema === DRAFT_2020_12_SCHEMA
) {
return ajv2020;
}
return ajvDefault;
}
/**
* Simple utility to validate objects against JSON Schemas.
* Supports both draft-07 (default) and draft-2020-12 schemas.
*/
export class SchemaValidator {
/**
* Returns null if the data confroms to the schema described by schema (or if schema
* Returns null if the data conforms to the schema described by schema (or if schema
* is null). Otherwise, returns a string describing the error.
*/
static validate(schema: unknown | undefined, data: unknown): string | null {
@ -40,7 +75,30 @@ export class SchemaValidator {
if (typeof data !== 'object' || data === null) {
return 'Value of params must be an object';
}
const validate = ajValidator.compile(schema);
const anySchema = schema as AnySchema;
const validator = getValidator(anySchema);
// Try to compile and validate; skip validation if schema can't be compiled.
// This handles schemas using JSON Schema versions AJV doesn't support
// (e.g., draft-2019-09, future versions).
// This matches LenientJsonSchemaValidator behavior in mcp-client.ts.
let validate;
try {
validate = validator.compile(anySchema);
} catch (error) {
// Schema compilation failed (unsupported version, invalid $ref, etc.)
// Skip validation rather than blocking tool usage.
debugLogger.warn(
`Failed to compile schema (${
(schema as Record<string, unknown>)?.['$schema'] ?? '<no $schema>'
}): ${error instanceof Error ? error.message : String(error)}. ` +
'Skipping parameter validation.',
);
return null;
}
let valid = validate(data);
if (!valid && validate.errors) {
// Coerce string boolean values ("true"/"false") to actual booleans
@ -48,7 +106,7 @@ export class SchemaValidator {
valid = validate(data);
if (!valid && validate.errors) {
return ajValidator.errorsText(validate.errors, { dataVar: 'params' });
return validator.errorsText(validate.errors, { dataVar: 'params' });
}
}
return null;

View file

@ -137,7 +137,7 @@ export class WorkspaceContext {
const fullyResolvedPath = this.fullyResolvedPath(pathToCheck);
for (const dir of this.directories) {
if (this.isPathWithinRoot(fullyResolvedPath, dir)) {
if (isPathWithinRoot(fullyResolvedPath, dir)) {
return true;
}
}
@ -171,24 +171,6 @@ export class WorkspaceContext {
}
}
/**
* Checks if a path is within a given root directory.
* @param pathToCheck The absolute path to check
* @param rootDirectory The absolute root directory
* @returns True if the path is within the root directory, false otherwise
*/
private isPathWithinRoot(
pathToCheck: string,
rootDirectory: string,
): boolean {
const relative = path.relative(rootDirectory, pathToCheck);
return (
!relative.startsWith(`..${path.sep}`) &&
relative !== '..' &&
!path.isAbsolute(relative)
);
}
/**
* Checks if a file path is a symbolic link that points to a file.
*/
@ -200,3 +182,21 @@ export class WorkspaceContext {
}
}
}
/**
* Checks if a path is within a given root directory.
* @param pathToCheck The absolute path to check
* @param rootDirectory The absolute root directory
* @returns True if the path is within the root directory, false otherwise
*/
export function isPathWithinRoot(
pathToCheck: string,
rootDirectory: string,
): boolean {
const relative = path.relative(rootDirectory, pathToCheck);
return (
!relative.startsWith(`..${path.sep}`) &&
relative !== '..' &&
!path.isAbsolute(relative)
);
}

View file

@ -70,6 +70,8 @@ Creates a new query session with the Qwen Code.
| `authType` | `'openai' \| 'qwen-oauth'` | `'openai'` | Authentication type for the AI service. Using `'qwen-oauth'` in SDK is not recommended as credentials are stored in `~/.qwen` and may need periodic refresh. |
| `agents` | `SubagentConfig[]` | - | Configuration for subagents that can be invoked during the session. Subagents are specialized AI agents for specific tasks or domains. |
| `includePartialMessages` | `boolean` | `false` | When `true`, the SDK emits incomplete messages as they are being generated, allowing real-time streaming of the AI's response. |
| `resume` | `string` | - | Resume a previous session by providing its session ID. Equivalent to CLI's `--resume` flag. |
| `sessionId` | `string` | - | Specify a session ID for the new session. Ensures SDK and CLI use the same ID without resuming history. Equivalent to CLI's `--session-id` flag. |
### Timeouts

View file

@ -83,6 +83,7 @@ export class Query implements AsyncIterable<SDKMessage> {
private firstResultReceivedResolve?: () => void;
private readonly isSingleTurn: boolean;
private abortHandler: (() => void) | null = null;
constructor(
transport: Transport,
@ -91,7 +92,8 @@ export class Query implements AsyncIterable<SDKMessage> {
) {
this.transport = transport;
this.options = options;
this.sessionId = options.resume ?? randomUUID();
// Use sessionId from options if provided (for SDK-CLI alignment), otherwise generate one
this.sessionId = options.resume ?? options.sessionId ?? randomUUID();
this.inputStream = new Stream<SDKMessage>();
this.abortController = options.abortController ?? new AbortController();
this.isSingleTurn = singleTurn;
@ -125,16 +127,32 @@ export class Query implements AsyncIterable<SDKMessage> {
logger.error('Error during abort cleanup:', err);
});
} else {
this.abortController.signal.addEventListener('abort', () => {
this.abortHandler = () => {
this.inputStream.error(new AbortError('Query aborted by user'));
this.close().catch((err) => {
logger.error('Error during abort cleanup:', err);
});
});
};
this.abortController.signal.addEventListener('abort', this.abortHandler);
}
this.initialized = this.initialize();
this.initialized.catch(() => {});
this.initialized.catch((error) => {
// Propagate initialization errors to inputStream so users can catch them
const errorMessage =
error instanceof Error ? error.message : String(error);
if (
errorMessage.includes('Query is closed') &&
this.transport.exitError
) {
// If query was closed due to transport error, propagate the transport error
this.inputStream.error(this.transport.exitError);
} else {
this.inputStream.error(
error instanceof Error ? error : new Error(errorMessage),
);
}
});
this.startMessageRouter();
}
@ -627,6 +645,11 @@ export class Query implements AsyncIterable<SDKMessage> {
return Promise.reject(new Error('Query is closed'));
}
// Check if transport has already exited with an error
if (this.transport.exitError) {
return Promise.reject(this.transport.exitError);
}
if (subtype !== ControlRequestType.INITIALIZE) {
// Ensure all other control requests get processed after initialization
await this.initialized;
@ -719,16 +742,29 @@ export class Query implements AsyncIterable<SDKMessage> {
this.closed = true;
// Remove abort listener to prevent memory leak
if (this.abortHandler) {
this.abortController.signal.removeEventListener(
'abort',
this.abortHandler,
);
this.abortHandler = null;
}
// Use transport's exit error if available, otherwise use generic error
const transportError = this.transport.exitError;
const rejectionError = transportError ?? new Error('Query is closed');
for (const pending of this.pendingControlRequests.values()) {
pending.abortController.abort();
clearTimeout(pending.timeout);
pending.reject(new Error('Query is closed'));
pending.reject(rejectionError);
}
this.pendingControlRequests.clear();
// Clean up pending MCP responses
for (const pending of this.pendingMcpResponses.values()) {
pending.reject(new Error('Query is closed'));
pending.reject(rejectionError);
}
this.pendingMcpResponses.clear();

View file

@ -10,6 +10,8 @@ import { Query } from './Query.js';
import type { QueryOptions } from '../types/types.js';
import { QueryOptionsSchema } from '../types/queryOptionsSchema.js';
import { SdkLogger } from '../utils/logger.js';
import { randomUUID } from 'node:crypto';
import { validateSessionId } from '../utils/validation.js';
export type { QueryOptions };
@ -40,6 +42,9 @@ export function query({
const abortController = options.abortController ?? new AbortController();
// Generate or use provided session ID for SDK-CLI alignment
const sessionId = options.resume ?? options.sessionId ?? randomUUID();
const transport = new ProcessTransport({
pathToQwenExecutable,
spawnInfo,
@ -58,11 +63,13 @@ export function query({
authType: options.authType,
includePartialMessages: options.includePartialMessages,
resume: options.resume,
sessionId,
});
const queryOptions: QueryOptions = {
...options,
abortController,
sessionId,
};
const queryInstance = new Query(transport, queryOptions, isSingleTurn);
@ -82,9 +89,16 @@ export function query({
(async () => {
try {
await queryInstance.initialized;
// Skip writing if transport has already exited with an error
if (transport.exitError) {
return;
}
transport.write(serializeJsonLine(message));
} catch (err) {
logger.error('Error sending single-turn prompt:', err);
// Only log error if it's not due to transport already being closed
if (!transport.exitError) {
logger.error('Error sending single-turn prompt:', err);
}
}
})();
} else {
@ -107,6 +121,16 @@ function validateOptions(options: QueryOptions): SpawnInfo | undefined {
throw new Error(`Invalid QueryOptions: ${errors}`);
}
// Validate sessionId format if provided
if (options.sessionId) {
validateSessionId(options.sessionId, 'sessionId');
}
// Validate resume format if provided
if (options.resume) {
validateSessionId(options.resume, 'resume');
}
try {
return prepareSpawnInfo(options.pathToQwenExecutable);
} catch (error) {

View file

@ -261,9 +261,13 @@ export class ProcessTransport implements Transport {
}
if (this.options.resume) {
// Resume existing session
args.push('--resume', this.options.resume);
} else if (this.options.continue) {
args.push('--continue');
} else if (this.options.sessionId) {
// Start new session with specific session ID (for SDK-CLI alignment)
args.push('--session-id', this.options.sessionId);
}
return args;

Some files were not shown because too many files have changed in this diff Show more