diff --git a/.gcp/release-docker.yaml b/.gcp/release-docker.yaml index a3bd7e702..59220b8db 100644 --- a/.gcp/release-docker.yaml +++ b/.gcp/release-docker.yaml @@ -5,19 +5,19 @@ steps: entrypoint: 'npm' args: ['install'] - # Step 4: Authenticate for Docker (so we can push images to the artifact registry) + # Step 2: Authenticate for Docker (so we can push images to the artifact registry) - name: 'us-west1-docker.pkg.dev/gemini-code-dev/gemini-code-containers/gemini-code-builder' id: 'Authenticate docker' entrypoint: 'npm' args: ['run', 'auth'] - # Step 5: Build workspace packages + # Step 3: Build workspace packages - name: 'us-west1-docker.pkg.dev/gemini-code-dev/gemini-code-containers/gemini-code-builder' id: 'Build packages' entrypoint: 'npm' args: ['run', 'build:packages'] - # Step 6: Determine Docker Image Tag + # Step 4: Determine Docker Image Tag - name: 'us-west1-docker.pkg.dev/gemini-code-dev/gemini-code-containers/gemini-code-builder' id: 'Determine Docker Image Tag' entrypoint: 'bash' @@ -39,7 +39,7 @@ steps: echo "Determined image tag: $$FINAL_TAG" echo "$$FINAL_TAG" > /workspace/image_tag.txt - # Step 7: Build sandbox container image + # Step 5: Build sandbox container image - name: 'us-west1-docker.pkg.dev/gemini-code-dev/gemini-code-containers/gemini-code-builder' id: 'Build sandbox Docker image' entrypoint: 'bash' @@ -48,7 +48,7 @@ steps: - | export GEMINI_SANDBOX_IMAGE_TAG=$$(cat /workspace/image_tag.txt) echo "Using Docker image tag for build: $$GEMINI_SANDBOX_IMAGE_TAG" - npm run build:sandbox + npm run build:sandbox -- --output-file /workspace/final_image_uri.txt env: - 'GEMINI_SANDBOX=$_CONTAINER_TOOL' diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index e91e3553e..dbaad4453 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -52,4 +52,4 @@ body: id: additional-context attributes: label: Anything else we need to know? - description: Add any other context about the problem here. \ No newline at end of file + description: Add any other context about the problem here. diff --git a/.github/actions/post-coverage-comment/action.yml b/.github/actions/post-coverage-comment/action.yml index 20b670199..10a4afeb7 100644 --- a/.github/actions/post-coverage-comment/action.yml +++ b/.github/actions/post-coverage-comment/action.yml @@ -17,6 +17,9 @@ inputs: node_version: description: 'Node.js version for context in messages' required: true + os: + description: 'The os for context in messages' + required: true github_token: description: 'GitHub token for posting comments' required: true @@ -91,7 +94,7 @@ runs: echo "" >> "$comment_file" echo "" >> "$comment_file" - echo "_For detailed HTML reports, please see the 'coverage-reports-${{ inputs.node_version }}' artifact from the main CI run._" >> "$comment_file" + echo "_For detailed HTML reports, please see the 'coverage-reports-${{ inputs.node_version }}-${{ inputs.os }}' artifact from the main CI run._" >> "$comment_file" - name: Post Coverage Comment uses: thollander/actions-comment-pull-request@v3 diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 560eec250..773e4cc87 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -4,7 +4,7 @@ ## Dive Deeper - + ## Reviewer Test Plan diff --git a/.github/scripts/pr-triage.sh b/.github/scripts/pr-triage.sh index be86e393d..6b60432bc 100755 --- a/.github/scripts/pr-triage.sh +++ b/.github/scripts/pr-triage.sh @@ -24,7 +24,7 @@ process_pr() { ISSUE_NUMBER=$(echo "$PR_BODY" | grep -oE '#[0-9]+' | head -1 | sed 's/#//' 2>/dev/null || echo "") fi - # Pattern 2: Closes/Fixes/Resolves patterns (case insensitive) + # Pattern 2: Closes/Fixes/Resolves patterns (case-insensitive) if [ -z "$ISSUE_NUMBER" ]; then ISSUE_NUMBER=$(echo "$PR_BODY" | grep -iE '(closes?|fixes?|resolves?) #[0-9]+' | grep -oE '#[0-9]+' | head -1 | sed 's/#//' 2>/dev/null || echo "") fi diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d7c59a441..9ab2e9644 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,6 +1,6 @@ # .github/workflows/ci.yml -name: Gemini CLI CI +name: Qwen Code CI on: push: @@ -10,22 +10,19 @@ on: merge_group: jobs: - build: - name: Build and Lint + lint: + name: Lint runs-on: ubuntu-latest permissions: contents: read # For checkout - strategy: - matrix: - node-version: [20.x, 22.x, 24.x] steps: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - - name: Set up Node.js ${{ matrix.node-version }} + - name: Set up Node.js uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 with: - node-version: ${{ matrix.node-version }} + node-version-file: '.nvmrc' cache: 'npm' - name: Install dependencies @@ -45,24 +42,18 @@ jobs: - name: Run type check run: npm run typecheck - - name: Upload build artifacts - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 - with: - name: build-artifacts-${{ matrix.node-version }} - path: | - packages/*/dist - package-lock.json # Only upload dist and lockfile test: name: Test - runs-on: ubuntu-latest - needs: build # This job depends on the 'build' job + runs-on: ${{ matrix.os }} + needs: lint permissions: contents: read checks: write pull-requests: write strategy: matrix: - node-version: [20.x, 22.x, 24.x] # Should match the build job's matrix + os: [ubuntu-latest, windows-latest, macos-latest] + node-version: [20.x, 22.x, 24.x] steps: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 @@ -73,26 +64,20 @@ jobs: node-version: ${{ matrix.node-version }} cache: 'npm' - - name: Download build artifacts - uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 - with: - name: build-artifacts-${{ matrix.node-version }} - path: . # Download to the root, this will include package-lock.json and packages/*/dist - - # Restore/create package structure for dist folders if necessary. - # The download-artifact action with path: . should place them correctly if the - # upload paths were relative to the workspace root. - # Example: if uploaded `packages/cli/dist`, it will be at `./packages/cli/dist`. + - name: Build project + run: npm run build - name: Install dependencies for testing run: npm ci # Install fresh dependencies using the downloaded package-lock.json - name: Run tests and generate reports - run: NO_COLOR=true npm run test:ci + run: npm run test:ci + env: + NO_COLOR: true - name: Publish Test Report (for non-forks) if: always() && (github.event.pull_request.head.repo.full_name == github.repository) - uses: dorny/test-reporter@890a17cecf52a379fc869ab770a71657660be727 # v2 + uses: dorny/test-reporter@dc3a92680fcc15842eef52e8c4606ea7ce6bd3f3 # v2 with: name: Test Results (Node ${{ matrix.node-version }}) path: packages/*/junit.xml @@ -103,14 +88,14 @@ jobs: if: always() && (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository) uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 with: - name: test-results-fork-${{ matrix.node-version }} + name: test-results-fork-${{ matrix.node-version }}-${{ matrix.os }} path: packages/*/junit.xml - name: Upload coverage reports uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4 if: always() with: - name: coverage-reports-${{ matrix.node-version }} + name: coverage-reports-${{ matrix.node-version }}-${{ matrix.os }} path: packages/*/coverage post_coverage_comment: @@ -124,7 +109,9 @@ jobs: pull-requests: write # For commenting strategy: matrix: - node-version: [22.x] # Reduce noise by only posting the comment once + # Reduce noise by only posting the comment once + os: [ubuntu-latest] + node-version: [22.x] steps: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 @@ -132,7 +119,7 @@ jobs: - name: Download coverage reports artifact uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4 with: - name: coverage-reports-${{ matrix.node-version }} + name: coverage-reports-${{ matrix.node-version }}-${{ matrix.os }} path: coverage_artifact # Download to a specific directory - name: Post Coverage Comment using Composite Action @@ -143,4 +130,24 @@ jobs: cli_full_text_summary_file: coverage_artifact/cli/coverage/full-text-summary.txt core_full_text_summary_file: coverage_artifact/core/coverage/full-text-summary.txt node_version: ${{ matrix.node-version }} + os: ${{ matrix.os }} github_token: ${{ secrets.GITHUB_TOKEN }} + + codeql: + name: CodeQL + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + + - name: Initialize CodeQL + uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3 + with: + languages: javascript + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3 diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 2997c26ff..a27c7a5c0 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -8,20 +8,21 @@ on: merge_group: jobs: - e2e-test: - name: E2E Test - ${{ matrix.sandbox }} + e2e-test-linux: + name: E2E Test (Linux) - ${{ matrix.sandbox }} runs-on: ubuntu-latest strategy: matrix: sandbox: [sandbox:none, sandbox:docker] + node-version: [20.x, 22.x, 24.x] steps: - name: Checkout repository uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 - - name: Set up Node.js + - name: Set up Node.js ${{ matrix.node-version }} uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 with: - node-version: 20.x + node-version: ${{ matrix.node-version }} cache: 'npm' - name: Install dependencies @@ -48,3 +49,29 @@ jobs: OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }} OPENAI_MODEL: ${{ secrets.OPENAI_MODEL }} run: npm run test:integration:${{ matrix.sandbox }} -- --verbose --keep-output + + e2e-test-macos: + name: E2E Test - macOS + runs-on: macos-latest + steps: + - name: Checkout repository + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4 + + - name: Set up Node.js + uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4 + with: + node-version: 20.x + cache: 'npm' + + - name: Install dependencies + run: npm ci + + - name: Build project + run: npm run build + + - name: Run E2E tests + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + OPENAI_BASE_URL: ${{ secrets.OPENAI_BASE_URL }} + OPENAI_MODEL: ${{ secrets.OPENAI_MODEL }} + run: npm run test:e2e diff --git a/.gitignore b/.gitignore index 1d2bf2513..2a3d90762 100644 --- a/.gitignore +++ b/.gitignore @@ -38,11 +38,7 @@ packages/*/coverage/ # Generated files packages/cli/src/generated/ .integration-tests/ - - -# Logs -logs/ - +packages/vscode-ide-companion/*.vsix # Qwen Code Configs .qwen/ \ No newline at end of file diff --git a/.npmrc b/.npmrc index c48c9dc8c..4865e5388 100644 --- a/.npmrc +++ b/.npmrc @@ -1,2 +1 @@ -@google:registry=https://wombat-dressing-room.appspot.com -@ali:registry=https://registry.anpm.alibaba-inc.com \ No newline at end of file +@google:registry=https://wombat-dressing-room.appspot.com \ No newline at end of file diff --git a/.nvmrc b/.nvmrc new file mode 100644 index 000000000..209e3ef4b --- /dev/null +++ b/.nvmrc @@ -0,0 +1 @@ +20 diff --git a/.vscode/launch.json b/.vscode/launch.json index 605a464df..9b9d150d2 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -30,6 +30,18 @@ "GEMINI_SANDBOX": "false" } }, + { + "name": "Launch Companion VS Code Extension", + "type": "extensionHost", + "request": "launch", + "args": [ + "--extensionDevelopmentPath=${workspaceFolder}/packages/vscode-ide-companion" + ], + "outFiles": [ + "${workspaceFolder}/packages/vscode-ide-companion/dist/**/*.js" + ], + "preLaunchTask": "npm: build: vscode-ide-companion" + }, { "name": "Attach", "port": 9229, diff --git a/.vscode/tasks.json b/.vscode/tasks.json index 1ff9a62f4..58709bc92 100644 --- a/.vscode/tasks.json +++ b/.vscode/tasks.json @@ -11,6 +11,15 @@ "problemMatcher": [], "label": "npm: build", "detail": "scripts/build.sh" + }, + { + "type": "npm", + "script": "build", + "path": "packages/vscode-ide-companion", + "group": "build", + "problemMatcher": [], + "label": "npm: build: vscode-ide-companion", + "detail": "npm run build -w packages/vscode-ide-companion" } ] } diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index dd835a0a2..4bba5b5e1 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -210,7 +210,7 @@ npm run lint - Please adhere to the coding style, patterns, and conventions used throughout the existing codebase. - Consult [GEMINI.md](https://github.com/google-gemini/gemini-cli/blob/main/GEMINI.md) (typically found in the project root) for specific instructions related to AI-assisted development, including conventions for React, comments, and Git usage. -- **Imports:** Pay special attention to import paths. The project uses `eslint-rules/no-relative-cross-package-imports.js` to enforce restrictions on relative imports between packages. +- **Imports:** Pay special attention to import paths. The project uses ESLint to enforce restrictions on relative imports between packages. ### Project Structure @@ -272,19 +272,19 @@ To debug the CLI's React-based UI, you can use React DevTools. Ink, the library ## Sandboxing -### MacOS Seatbelt +### macOS Seatbelt -On MacOS, `gemini` uses Seatbelt (`sandbox-exec`) under a `permissive-open` profile (see `packages/cli/src/utils/sandbox-macos-permissive-open.sb`) that restricts writes to the project folder but otherwise allows all other operations and outbound network traffic ("open") by default. You can switch to a `restrictive-closed` profile (see `packages/cli/src/utils/sandbox-macos-restrictive-closed.sb`) that declines all operations and outbound network traffic ("closed") by default by setting `SEATBELT_PROFILE=restrictive-closed` in your environment or `.env` file. Available built-in profiles are `{permissive,restrictive}-{open,closed,proxied}` (see below for proxied networking). You can also switch to a custom profile `SEATBELT_PROFILE=` if you also create a file `.qwen/sandbox-macos-.sb` under your project settings directory `.gemini`. +On macOS, `qwen` uses Seatbelt (`sandbox-exec`) under a `permissive-open` profile (see `packages/cli/src/utils/sandbox-macos-permissive-open.sb`) that restricts writes to the project folder but otherwise allows all other operations and outbound network traffic ("open") by default. You can switch to a `restrictive-closed` profile (see `packages/cli/src/utils/sandbox-macos-restrictive-closed.sb`) that declines all operations and outbound network traffic ("closed") by default by setting `SEATBELT_PROFILE=restrictive-closed` in your environment or `.env` file. Available built-in profiles are `{permissive,restrictive}-{open,closed,proxied}` (see below for proxied networking). You can also switch to a custom profile `SEATBELT_PROFILE=` if you also create a file `.qwen/sandbox-macos-.sb` under your project settings directory `.qwen`. ### Container-based Sandboxing (All Platforms) -For stronger container-based sandboxing on MacOS or other platforms, you can set `GEMINI_SANDBOX=true|docker|podman|` in your environment or `.env` file. The specified command (or if `true` then either `docker` or `podman`) must be installed on the host machine. Once enabled, `npm run build:all` will build a minimal container ("sandbox") image and `npm start` will launch inside a fresh instance of that container. The first build can take 20-30s (mostly due to downloading of the base image) but after that both build and start overhead should be minimal. Default builds (`npm run build`) will not rebuild the sandbox. +For stronger container-based sandboxing on macOS or other platforms, you can set `GEMINI_SANDBOX=true|docker|podman|` in your environment or `.env` file. The specified command (or if `true` then either `docker` or `podman`) must be installed on the host machine. Once enabled, `npm run build:all` will build a minimal container ("sandbox") image and `npm start` will launch inside a fresh instance of that container. The first build can take 20-30s (mostly due to downloading of the base image) but after that both build and start overhead should be minimal. Default builds (`npm run build`) will not rebuild the sandbox. -Container-based sandboxing mounts the project directory (and system temp directory) with read-write access and is started/stopped/removed automatically as you start/stop Gemini CLI. Files created within the sandbox should be automatically mapped to your user/group on host machine. You can easily specify additional mounts, ports, or environment variables by setting `SANDBOX_{MOUNTS,PORTS,ENV}` as needed. You can also fully customize the sandbox for your projects by creating the files `.qwen/sandbox.Dockerfile` and/or `.qwen/sandbox.bashrc` under your project settings directory (`.gemini`) and running `gemini` with `BUILD_SANDBOX=1` to trigger building of your custom sandbox. +Container-based sandboxing mounts the project directory (and system temp directory) with read-write access and is started/stopped/removed automatically as you start/stop Gemini CLI. Files created within the sandbox should be automatically mapped to your user/group on host machine. You can easily specify additional mounts, ports, or environment variables by setting `SANDBOX_{MOUNTS,PORTS,ENV}` as needed. You can also fully customize the sandbox for your projects by creating the files `.qwen/sandbox.Dockerfile` and/or `.qwen/sandbox.bashrc` under your project settings directory (`.qwen`) and running `qwen` with `BUILD_SANDBOX=1` to trigger building of your custom sandbox. #### Proxied Networking -All sandboxing methods, including MacOS Seatbelt using `*-proxied` profiles, support restricting outbound network traffic through a custom proxy server that can be specified as `GEMINI_SANDBOX_PROXY_COMMAND=`, where `` must start a proxy server that listens on `:::8877` for relevant requests. See `docs/examples/proxy-script.md` for a minimal proxy that only allows `HTTPS` connections to `example.com:443` (e.g. `curl https://example.com`) and declines all other requests. The proxy is started and stopped automatically alongside the sandbox. +All sandboxing methods, including macOS Seatbelt using `*-proxied` profiles, support restricting outbound network traffic through a custom proxy server that can be specified as `GEMINI_SANDBOX_PROXY_COMMAND=`, where `` must start a proxy server that listens on `:::8877` for relevant requests. See `docs/examples/proxy-script.md` for a minimal proxy that only allows `HTTPS` connections to `example.com:443` (e.g. `curl https://example.com`) and declines all other requests. The proxy is started and stopped automatically alongside the sandbox. ## Manual Publish diff --git a/QWEN.md b/QWEN.md index dff8c73ec..74185b4b3 100644 --- a/QWEN.md +++ b/QWEN.md @@ -71,7 +71,8 @@ JavaScript classes, by their nature, are designed to encapsulate internal state - Reduced Boilerplate and Increased Conciseness: Classes often promote the use of constructors, this binding, getters, setters, and other boilerplate that can unnecessarily bloat code. TypeScript interface and type declarations provide powerful static type checking without the runtime overhead or verbosity of class definitions. This allows for more succinct and readable code, aligning with JavaScript's strengths in functional programming. - Enhanced Readability and Predictability: Plain objects, especially when their structure is clearly defined by TypeScript interfaces, are often easier to read and understand. Their properties are directly accessible, and there's no hidden internal state or complex inheritance chains to navigate. This predictability leads to fewer bugs and a more maintainable codebase. - Simplified Immutability: While not strictly enforced, plain objects encourage an immutable approach to data. When you need to modify an object, you typically create a new one with the desired changes, rather than mutating the original. This pattern aligns perfectly with React's reconciliation process and helps prevent subtle bugs related to shared mutable state. + +- Simplified Immutability: While not strictly enforced, plain objects encourage an immutable approach to data. When you need to modify an object, you typically create a new one with the desired changes, rather than mutating the original. This pattern aligns perfectly with React's reconciliation process and helps prevent subtle bugs related to shared mutable state. - Better Serialization and Deserialization: Plain JavaScript objects are naturally easy to serialize to JSON and deserialize back, which is a common requirement in web development (e.g., for API communication or local storage). Classes, with their methods and prototypes, can complicate this process. diff --git a/docs/assets/theme-custom.png b/docs/assets/theme-custom.png new file mode 100644 index 000000000..0eb80f960 Binary files /dev/null and b/docs/assets/theme-custom.png differ diff --git a/docs/checkpointing.md b/docs/checkpointing.md index a678f7834..aade4233e 100644 --- a/docs/checkpointing.md +++ b/docs/checkpointing.md @@ -6,7 +6,7 @@ The Gemini CLI includes a Checkpointing feature that automatically saves a snaps When you approve a tool that modifies the file system (like `write_file` or `replace`), the CLI automatically creates a "checkpoint." This checkpoint includes: -1. **A Git Snapshot:** A commit is made in a special, shadow Git repository located in your home directory (`~/.qwen/history/`). This snapshot captures the complete state of your project files at that moment. It does **not** interfere with your own project's Git repository. +1. **A Git Snapshot:** A commit is made in a special, shadow Git repository located in your home directory (`~/.gemini/history/`). This snapshot captures the complete state of your project files at that moment. It does **not** interfere with your own project's Git repository. 2. **Conversation History:** The entire conversation you've had with the agent up to that point is saved. 3. **The Tool Call:** The specific tool call that was about to be executed is also stored. @@ -16,7 +16,7 @@ If you want to undo the change or simply go back, you can use the `/restore` com - Restore the conversation history in the CLI. - Re-propose the original tool call, allowing you to run it again, modify it, or simply ignore it. -All checkpoint data, including the Git snapshot and conversation history, is stored locally on your machine. The Git snapshot is stored in the shadow repository while the conversation history and tool calls are saved in a JSON file in your project's temporary directory, typically located at `~/.qwen/tmp//checkpoints`. +All checkpoint data, including the Git snapshot and conversation history, is stored locally on your machine. The Git snapshot is stored in the shadow repository while the conversation history and tool calls are saved in a JSON file in your project's temporary directory, typically located at `~/.gemini/tmp//checkpoints`. ## Enabling the Feature diff --git a/docs/cli/authentication.md b/docs/cli/authentication.md index 81d9a1750..e73dff530 100644 --- a/docs/cli/authentication.md +++ b/docs/cli/authentication.md @@ -1,6 +1,6 @@ # Authentication Setup -The Qwen Code CLI supports multiple authentication methods. On initial startup you'll need to configure **one** of the following authentication methods: +The Gemini CLI requires you to authenticate with Google's AI services. On initial startup you'll need to configure **one** of the following authentication methods: 1. **Login with Google (Gemini Code Assist):** - Use this option to log in with your google account. @@ -8,17 +8,18 @@ The Qwen Code CLI supports multiple authentication methods. On initial startup y - Note that the web login must be done in a browser that can communicate with the machine Gemini CLI is being run from. (Specifically, the browser will be redirected to a localhost url that Gemini CLI will be listening on). - Users may have to specify a GOOGLE_CLOUD_PROJECT if: 1. You have a Google Workspace account. Google Workspace is a paid service for businesses and organizations that provides a suite of productivity tools, including a custom email domain (e.g. your-name@your-company.com), enhanced security features, and administrative controls. These accounts are often managed by an employer or school. - 1. You have received a free Code Assist license through the [Google Developer Program](https://developers.google.com/program/plans-and-pricing) (including qualified Google Developer Experts) + 1. You have received a Gemini Code Assist license through the [Google Developer Program](https://developers.google.com/program/plans-and-pricing) (including qualified Google Developer Experts) 1. You have been assigned a license to a current Gemini Code Assist standard or enterprise subscription. 1. You are using the product outside the [supported regions](https://developers.google.com/gemini-code-assist/resources/available-locations) for free individual usage. 1. You are a Google account holder under the age of 18 - - If you fall into one of these categories, you must first configure a Google Cloud Project Id to use, [enable the Gemini for Cloud API](https://cloud.google.com/gemini/docs/discover/set-up-gemini#enable-api) and [configure access permissions](https://cloud.google.com/gemini/docs/discover/set-up-gemini#grant-iam). + - If you fall into one of these categories, you must first configure a Google Cloud Project ID to use, [enable the Gemini for Cloud API](https://cloud.google.com/gemini/docs/discover/set-up-gemini#enable-api) and [configure access permissions](https://cloud.google.com/gemini/docs/discover/set-up-gemini#grant-iam). You can temporarily set the environment variable in your current shell session using the following command: ```bash export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID" ``` + - For repeated use, you can add the environment variable to your [.env file](#persisting-environment-variables-with-env-files) or your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following command adds the environment variable to a `~/.bashrc` file: ```bash @@ -33,12 +34,17 @@ The Qwen Code CLI supports multiple authentication methods. On initial startup y ```bash export GEMINI_API_KEY="YOUR_GEMINI_API_KEY" ``` - - For repeated use, you can add the environment variable to your [.env file](#persisting-environment-variables-with-env-files) or your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following command adds the environment variable to a `~/.bashrc` file: + - For repeated use, you can add the environment variable to your [.env file](#persisting-environment-variables-with-env-files). + + - Alternatively you can export the API key from your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following command adds the environment variable to a `~/.bashrc` file: + ```bash echo 'export GEMINI_API_KEY="YOUR_GEMINI_API_KEY"' >> ~/.bashrc source ~/.bashrc ``` + :warning: Be advised that when you export your API key inside your shell configuration file, any other process executed from the shell can read it. + 3. **Vertex AI:** - Obtain your Google Cloud API key: [Get an API Key](https://cloud.google.com/vertex-ai/generative-ai/docs/start/api-keys?usertype=newuser) - Set the `GOOGLE_API_KEY` environment variable. In the following methods, replace `YOUR_GOOGLE_API_KEY` with your Vertex AI API key: @@ -63,28 +69,36 @@ The Qwen Code CLI supports multiple authentication methods. On initial startup y export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID" export GOOGLE_CLOUD_LOCATION="YOUR_PROJECT_LOCATION" # e.g., us-central1 ``` - - For repeated use, you can add the environment variables to your [.env file](#persisting-environment-variables-with-env-files) or your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following commands add the environment variables to a `~/.bashrc` file: + - For repeated use, you can add the environment variables to your [.env file](#persisting-environment-variables-with-env-files) + + - Alternatively you can export the environment variables from your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following commands add the environment variables to a `~/.bashrc` file: + ```bash echo 'export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"' >> ~/.bashrc echo 'export GOOGLE_CLOUD_LOCATION="YOUR_PROJECT_LOCATION"' >> ~/.bashrc source ~/.bashrc ``` + + :warning: Be advised that when you export your API key inside your shell configuration file, any other process executed from the shell can read it. + 4. **Cloud Shell:** - This option is only available when running in a Google Cloud Shell environment. - It automatically uses the credentials of the logged-in user in the Cloud Shell environment. - This is the default authentication method when running in Cloud Shell and no other method is configured. + :warning: Be advised that when you export your API key inside your shell configuration file, any other process executed from the shell can read it. + ### Persisting Environment Variables with `.env` Files -You can create a **`.qwen/.env`** file in your project directory or in your home directory. Creating a plain **`.env`** file also works, but `.qwen/.env` is recommended to keep Gemini variables isolated from other tools. +You can create a **`.gemini/.env`** file in your project directory or in your home directory. Creating a plain **`.env`** file also works, but `.gemini/.env` is recommended to keep Gemini variables isolated from other tools. Gemini CLI automatically loads environment variables from the **first** `.env` file it finds, using the following search order: 1. Starting in the **current directory** and moving upward toward `/`, for each directory it checks: - 1. `.qwen/.env` + 1. `.gemini/.env` 2. `.env` 2. If no file is found, it falls back to your **home directory**: - - `~/.qwen/.env` + - `~/.gemini/.env` - `~/.env` > **Important:** The search stops at the **first** file encountered—variables are **not merged** across multiple files. @@ -95,20 +109,36 @@ Gemini CLI automatically loads environment variables from the **first** `.env` f ```bash mkdir -p .gemini -echo 'GOOGLE_CLOUD_PROJECT="your-project-id"' >> .qwen/.env +echo 'GOOGLE_CLOUD_PROJECT="your-project-id"' >> .gemini/.env ``` **User-wide settings** (available in every directory): ```bash mkdir -p ~/.gemini -cat >> ~/.qwen/.env <<'EOF' +cat >> ~/.gemini/.env <<'EOF' GOOGLE_CLOUD_PROJECT="your-project-id" GEMINI_API_KEY="your-gemini-api-key" EOF ``` -5. **OpenAI Authentication:** - - Use OpenAI models instead of Google's Gemini models - - For detailed setup instructions, see [OpenAI Authentication](./openai-auth.md) - - Supports interactive setup, command line arguments, and environment variables +## Non-Interactive Mode / Headless Environments + +When running the Gemini CLI in a non-interactive environment, you cannot use the interactive login flow. +Instead, you must configure authentication using environment variables. + +The CLI will automatically detect if it is running in a non-interactive terminal and will use one of the +following authentication methods if available: + +1. **Gemini API Key:** + - Set the `GEMINI_API_KEY` environment variable. + - The CLI will use this key to authenticate with the Gemini API. + +2. **Vertex AI:** + - Set the `GOOGLE_GENAI_USE_VERTEXAI=true` environment variable. + - **Using an API Key:** Set the `GOOGLE_API_KEY` environment variable. + - **Using Application Default Credentials (ADC):** + - Run `gcloud auth application-default login` in your environment to configure ADC. + - Ensure the `GOOGLE_CLOUD_PROJECT` and `GOOGLE_CLOUD_LOCATION` environment variables are set. + +If none of these environment variables are set in a non-interactive session, the CLI will exit with an error. diff --git a/docs/cli/commands.md b/docs/cli/commands.md index 9e952a576..7bcb0acf3 100644 --- a/docs/cli/commands.md +++ b/docs/cli/commands.md @@ -6,6 +6,8 @@ Qwen Code supports several built-in commands to help you manage your session, cu Slash commands provide meta-level control over the CLI itself. +### Built-in Commands + - **`/bug`** - **Description:** File an issue about Qwen Code. By default, the issue is filed within the GitHub repository for Qwen Code. The string you enter after `/bug` will become the headline for the bug being filed. The default `/bug` behavior can be modified using the `bugCommand` setting in your `.qwen/settings.json` files. @@ -28,6 +30,9 @@ Slash commands provide meta-level control over the CLI itself. - **`/compress`** - **Description:** Replace the entire chat context with a summary. This saves on tokens used for future tasks while retaining a high level summary of what has happened. +- **`/copy`** + - **Description:** Copies the last output produced by Qwen Code to your clipboard, for easy sharing or reuse. + - **`/editor`** - **Description:** Open a dialog for selecting supported editors. @@ -90,6 +95,199 @@ Slash commands provide meta-level control over the CLI itself. - **`/quit`** (or **`/exit`**) - **Description:** Exit Qwen Code. +- **`/vim`** + - **Description:** Toggle vim mode on or off. When vim mode is enabled, the input area supports vim-style navigation and editing commands in both NORMAL and INSERT modes. + - **Features:** + - **NORMAL mode:** Navigate with `h`, `j`, `k`, `l`; jump by words with `w`, `b`, `e`; go to line start/end with `0`, `$`, `^`; go to specific lines with `G` (or `gg` for first line) + - **INSERT mode:** Standard text input with escape to return to NORMAL mode + - **Editing commands:** Delete with `x`, change with `c`, insert with `i`, `a`, `o`, `O`; complex operations like `dd`, `cc`, `dw`, `cw` + - **Count support:** Prefix commands with numbers (e.g., `3h`, `5w`, `10G`) + - **Repeat last command:** Use `.` to repeat the last editing operation + - **Persistent setting:** Vim mode preference is saved to `~/.gemini/settings.json` and restored between sessions + - **Status indicator:** When enabled, shows `[NORMAL]` or `[INSERT]` in the footer + +### Custom Commands + +For a quick start, see the [example](#example-a-pure-function-refactoring-command) below. + +Custom commands allow you to save and reuse your favorite or most frequently used prompts as personal shortcuts within Gemini CLI. You can create commands that are specific to a single project or commands that are available globally across all your projects, streamlining your workflow and ensuring consistency. + +#### File Locations & Precedence + +Gemini CLI discovers commands from two locations, loaded in a specific order: + +1. **User Commands (Global):** Located in `~/.gemini/commands/`. These commands are available in any project you are working on. +2. **Project Commands (Local):** Located in `/.gemini/commands/`. These commands are specific to the current project and can be checked into version control to be shared with your team. + +If a command in the project directory has the same name as a command in the user directory, the **project command will always be used.** This allows projects to override global commands with project-specific versions. + +#### Naming and Namespacing + +The name of a command is determined by its file path relative to its `commands` directory. Subdirectories are used to create namespaced commands, with the path separator (`/` or `\`) being converted to a colon (`:`). + +- A file at `~/.gemini/commands/test.toml` becomes the command `/test`. +- A file at `/.gemini/commands/git/commit.toml` becomes the namespaced command `/git:commit`. + +#### TOML File Format (v1) + +Your command definition files must be written in the TOML format and use the `.toml` file extension. + +##### Required Fields + +- `prompt` (String): The prompt that will be sent to the Gemini model when the command is executed. This can be a single-line or multi-line string. + +##### Optional Fields + +- `description` (String): A brief, one-line description of what the command does. This text will be displayed next to your command in the `/help` menu. **If you omit this field, a generic description will be generated from the filename.** + +#### Handling Arguments + +Custom commands support two powerful, low-friction methods for handling arguments. The CLI automatically chooses the correct method based on the content of your command's `prompt`. + +##### 1. Shorthand Injection with `{{args}}` + +If your `prompt` contains the special placeholder `{{args}}`, the CLI will replace that exact placeholder with all the text the user typed after the command name. This is perfect for simple, deterministic commands where you need to inject user input into a specific place in a larger prompt template. + +**Example (`git/fix.toml`):** + +```toml +# In: ~/.gemini/commands/git/fix.toml +# Invoked via: /git:fix "Button is misaligned on mobile" + +description = "Generates a fix for a given GitHub issue." +prompt = "Please analyze the staged git changes and provide a code fix for the issue described here: {{args}}." +``` + +The model will receive the final prompt: `Please analyze the staged git changes and provide a code fix for the issue described here: "Button is misaligned on mobile".` + +##### 2. Default Argument Handling + +If your `prompt` does **not** contain the special placeholder `{{args}}`, the CLI uses a default behavior for handling arguments. + +If you provide arguments to the command (e.g., `/mycommand arg1`), the CLI will append the full command you typed to the end of the prompt, separated by two newlines. This allows the model to see both the original instructions and the specific arguments you just provided. + +If you do **not** provide any arguments (e.g., `/mycommand`), the prompt is sent to the model exactly as it is, with nothing appended. + +**Example (`changelog.toml`):** + +This example shows how to create a robust command by defining a role for the model, explaining where to find the user's input, and specifying the expected format and behavior. + +```toml +# In: /.gemini/commands/changelog.toml +# Invoked via: /changelog 1.2.0 added "Support for default argument parsing." + +description = "Adds a new entry to the project's CHANGELOG.md file." +prompt = """ +# Task: Update Changelog + +You are an expert maintainer of this software project. A user has invoked a command to add a new entry to the changelog. + +**The user's raw command is appended below your instructions.** + +Your task is to parse the ``, ``, and `` from their input and use the `write_file` tool to correctly update the `CHANGELOG.md` file. + +## Expected Format +The command follows this format: `/changelog ` +- `` must be one of: "added", "changed", "fixed", "removed". + +## Behavior +1. Read the `CHANGELOG.md` file. +2. Find the section for the specified ``. +3. Add the `` under the correct `` heading. +4. If the version or type section doesn't exist, create it. +5. Adhere strictly to the "Keep a Changelog" format. +""" +``` + +When you run `/changelog 1.2.0 added "New feature"`, the final text sent to the model will be the original prompt followed by two newlines and the command you typed. + +##### 3. Executing Shell Commands with `!{...}` + +You can make your commands dynamic by executing shell commands directly within your `prompt` and injecting their output. This is ideal for gathering context from your local environment, like reading file content or checking the status of Git. + +When a custom command attempts to execute a shell command, Gemini CLI will now prompt you for confirmation before proceeding. This is a security measure to ensure that only intended commands can be run. + +**How It Works:** + +1. **Inject Commands:** Use the `!{...}` syntax in your `prompt` to specify where the command should be run and its output injected. +2. **Confirm Execution:** When you run the command, a dialog will appear listing the shell commands the prompt wants to execute. +3. **Grant Permission:** You can choose to: + - **Allow once:** The command(s) will run this one time. + - **Allow always for this session:** The command(s) will be added to a temporary allowlist for the current CLI session and will not require confirmation again. + - **No:** Cancel the execution of the shell command(s). + +The CLI still respects the global `excludeTools` and `coreTools` settings. A command will be blocked without a confirmation prompt if it is explicitly disallowed in your configuration. + +**Example (`git/commit.toml`):** + +This command gets the staged git diff and uses it to ask the model to write a commit message. + +````toml +# In: /.gemini/commands/git/commit.toml +# Invoked via: /git:commit + +description = "Generates a Git commit message based on staged changes." + +# The prompt uses !{...} to execute the command and inject its output. +prompt = """ +Please generate a Conventional Commit message based on the following git diff: + +```diff +!{git diff --staged} +```` + +""" + +```` + +When you run `/git:commit`, the CLI first executes `git diff --staged`, then replaces `!{git diff --staged}` with the output of that command before sending the final, complete prompt to the model. + +--- + +#### Example: A "Pure Function" Refactoring Command + +Let's create a global command that asks the model to refactor a piece of code. + +**1. Create the file and directories:** + +First, ensure the user commands directory exists, then create a `refactor` subdirectory for organization and the final TOML file. + +```bash +mkdir -p ~/.gemini/commands/refactor +touch ~/.gemini/commands/refactor/pure.toml +```` + +**2. Add the content to the file:** + +Open `~/.gemini/commands/refactor/pure.toml` in your editor and add the following content. We are including the optional `description` for best practice. + +```toml +# In: ~/.gemini/commands/refactor/pure.toml +# This command will be invoked via: /refactor:pure + +description = "Asks the model to refactor the current context into a pure function." + +prompt = """ +Please analyze the code I've provided in the current context. +Refactor it into a pure function. + +Your response should include: +1. The refactored, pure function code block. +2. A brief explanation of the key changes you made and why they contribute to purity. +""" +``` + +**3. Run the Command:** + +That's it! You can now run your command in the CLI. First, you might add a file to the context, and then invoke your command: + +``` +> @my-messy-function.js +> /refactor:pure +``` + +Gemini CLI will then execute the multi-line prompt defined in your TOML file. + ## At commands (`@`) At commands are used to include the content of files or directories as part of your prompt to Gemini. These commands include git-aware filtering. @@ -119,13 +317,13 @@ At commands are used to include the content of files or directories as part of y ## Shell mode & passthrough commands (`!`) -The `!` prefix lets you interact with your system's shell directly from within Qwen Code. +The `!` prefix lets you interact with your system's shell directly from within Gemini CLI. - **`!`** - - **Description:** Execute the given `` in your system's default shell. Any output or errors from the command are displayed in the terminal. + - **Description:** Execute the given `` using `bash` on Linux/macOS or `cmd.exe` on Windows. Any output or errors from the command are displayed in the terminal. - **Examples:** - - `!ls -la` (executes `ls -la` and returns to Qwen Code) - - `!git status` (executes `git status` and returns to Qwen Code) + - `!ls -la` (executes `ls -la` and returns to Gemini CLI) + - `!git status` (executes `git status` and returns to Gemini CLI) - **`!` (Toggle shell mode)** - **Description:** Typing `!` on its own toggles shell mode. @@ -133,6 +331,8 @@ The `!` prefix lets you interact with your system's shell directly from within Q - When active, shell mode uses a different coloring and a "Shell Mode Indicator". - While in shell mode, text you type is interpreted directly as a shell command. - **Exiting shell mode:** - - When exited, the UI reverts to its standard appearance and normal Qwen Code behavior resumes. + - When exited, the UI reverts to its standard appearance and normal Gemini CLI behavior resumes. - **Caution for all `!` usage:** Commands you execute in shell mode have the same permissions and impact as if you ran them directly in your terminal. + +- **Environment Variable:** When a command is executed via `!` or in shell mode, the `GEMINI_CLI=1` environment variable is set in the subprocess's environment. This allows scripts or tools to detect if they are being run from within the Gemini CLI. diff --git a/docs/cli/configuration.md b/docs/cli/configuration.md index 6b2cde3ef..3977bfc78 100644 --- a/docs/cli/configuration.md +++ b/docs/cli/configuration.md @@ -24,7 +24,7 @@ Gemini CLI uses `settings.json` files for persistent configuration. There are th - **Location:** `.qwen/settings.json` within your project's root directory. - **Scope:** Applies only when running Gemini CLI from that specific project. Project settings override user settings. - **System settings file:** - - **Location:** `/etc/gemini-cli/settings.json` (Linux), `C:\ProgramData\gemini-cli\settings.json` (Windows) or `/Library/Application Support/GeminiCli/settings.json` (macOS). + - **Location:** `/etc/gemini-cli/settings.json` (Linux), `C:\ProgramData\gemini-cli\settings.json` (Windows) or `/Library/Application Support/GeminiCli/settings.json` (macOS). The path can be overridden using the `GEMINI_CLI_SYSTEM_SETTINGS_PATH` environment variable. - **Scope:** Applies to all Gemini CLI sessions on the system, for all users. System settings override user and project settings. May be useful for system administrators at enterprises to have controls over users' Gemini CLI setups. **Note on environment variables in settings:** String values within your `settings.json` files can reference environment variables using either `$VAR_NAME` or `${VAR_NAME}` syntax. These variables will be automatically resolved when the settings are loaded. For example, if you have an environment variable `MY_API_TOKEN`, you could use it in `settings.json` like this: `"apiKey": "$MY_API_TOKEN"`. @@ -81,6 +81,18 @@ In addition to a project settings file, a project's `.gemini` directory can cont `excludeTools` for `run_shell_command` are based on simple string matching and can be easily bypassed. This feature is **not a security mechanism** and should not be relied upon to safely execute untrusted code. It is recommended to use `coreTools` to explicitly select commands that can be executed. +- **`allowMCPServers`** (array of strings): + - **Description:** Allows you to specify a list of MCP server names that should be made available to the model. This can be used to restrict the set of MCP servers to connect to. Note that this will be ignored if `--allowed-mcp-server-names` is set. + - **Default:** All MCP servers are available for use by the Gemini model. + - **Example:** `"allowMCPServers": ["myPythonServer"]`. + - **Security Note:** This uses simple string matching on MCP server names, which can be modified. If you're a system administrator looking to prevent users from bypassing this, consider configuring the `mcpServers` at the system settings level such that the user will not be able to configure any MCP servers of their own. This should not be used as an airtight security mechanism. + +- **`excludeMCPServers`** (array of strings): + - **Description:** Allows you to specify a list of MCP server names that should be excluded from the model. A server listed in both `excludeMCPServers` and `allowMCPServers` is excluded. Note that this will be ignored if `--allowed-mcp-server-names` is set. + - **Default**: No MCP servers excluded. + - **Example:** `"excludeMCPServers": ["myNodeServer"]`. + - **Security Note:** This uses simple string matching on MCP server names, which can be modified. If you're a system administrator looking to prevent users from bypassing this, consider configuring the `mcpServers` at the system settings level such that the user will not be able to configure any MCP servers of their own. This should not be used as an airtight security mechanism. + - **`autoAccept`** (boolean): - **Description:** Controls whether the CLI automatically accepts and executes tool calls that are considered safe (e.g., read-only operations) without explicit user confirmation. If set to `true`, the CLI will bypass the confirmation prompt for tools deemed safe. - **Default:** `false` @@ -91,6 +103,11 @@ In addition to a project settings file, a project's `.gemini` directory can cont - **Default:** `"Default"` - **Example:** `"theme": "GitHub"` +- **`vimMode`** (boolean): + - **Description:** Enables or disables vim mode for input editing. When enabled, the input area supports vim-style navigation and editing commands with NORMAL and INSERT modes. The vim mode status is displayed in the footer and persists between sessions. + - **Default:** `false` + - **Example:** `"vimMode": true` + - **`sandbox`** (boolean or string): - **Description:** Controls whether and how to use sandboxing for tool execution. If set to `true`, Gemini CLI uses a pre-built `gemini-cli-sandbox` Docker image. For more information, see [Sandboxing](#sandboxing). - **Default:** `false` @@ -120,6 +137,8 @@ In addition to a project settings file, a project's `.gemini` directory can cont - `cwd` (string, optional): The working directory in which to start the server. - `timeout` (number, optional): Timeout in milliseconds for requests to this MCP server. - `trust` (boolean, optional): Trust this server and bypass all tool call confirmations. + - `includeTools` (array of strings, optional): List of tool names to include from this MCP server. When specified, only the tools listed here will be available from this server (whitelist behavior). If not specified, all tools from the server are enabled by default. + - `excludeTools` (array of strings, optional): List of tool names to exclude from this MCP server. Tools listed here will not be available to the model, even if they are exposed by the server. **Note:** `excludeTools` takes precedence over `includeTools` - if a tool is in both lists, it will be excluded. - **Example:** ```json "mcpServers": { @@ -127,12 +146,14 @@ In addition to a project settings file, a project's `.gemini` directory can cont "command": "python", "args": ["mcp_server.py", "--port", "8080"], "cwd": "./mcp_tools/python", - "timeout": 5000 + "timeout": 5000, + "includeTools": ["safe_tool", "file_reader"], }, "myNodeServer": { "command": "node", "args": ["mcp_server.js"], - "cwd": "./mcp_tools/node" + "cwd": "./mcp_tools/node", + "excludeTools": ["dangerous_tool", "file_deleter"] }, "myDockerServer": { "command": "docker", @@ -206,45 +227,17 @@ In addition to a project settings file, a project's `.gemini` directory can cont "maxSessionTurns": 10 ``` -- **`enableOpenAILogging`** (boolean): - - **Description:** Enables or disables logging of OpenAI API calls for debugging and analysis. When enabled, all requests and responses to the OpenAI API are logged to files in the `~/.qwen/logs/` directory. - - **Default:** `false` +- **`summarizeToolOutput`** (object): + - **Description:** Enables or disables the summarization of tool output. You can specify the token budget for the summarization using the `tokenBudget` setting. + - Note: Currently only the `run_shell_command` tool is supported. + - **Default:** `{}` (Disabled by default) - **Example:** - ```json - "enableOpenAILogging": true - ``` - -- **`systemPromptMappings`** (array): - - **Description:** Configures custom system prompt templates for specific model names and base URLs. This allows you to use different system prompts for different AI models or API endpoints. - - **Default:** `undefined` (uses default system prompt) - - **Properties:** - - **`baseUrls`** (array of strings, optional): Array of base URLs to exactly match against `OPENAI_BASE_URL` environment variable. If not specified, matches any base URL. - - **`modelNames`** (array of strings, optional): Array of model names to exactly match against `OPENAI_MODEL` environment variable. If not specified, matches any model. - - **`template`** (string): The system prompt template to use when both baseUrl and modelNames match. Supports placeholders: - - `{RUNTIME_VARS_IS_GIT_REPO}`: Replaced with `true` or `false` based on whether the current directory is a git repository - - `{RUNTIME_VARS_SANDBOX}`: Replaced with the sandbox type (e.g., `"sandbox-exec"`, `"docker"`, or empty string) - - **Example:** - - ```json - "systemPromptMappings": [ - { - "baseUrls": [ - "https://dashscope.aliyuncs.com/compatible-mode/v1", - "https://dashscope-intl.aliyuncs.com/compatible-mode/v1" - ], - "modelNames": ["qwen3-coder-plus"], - "template": "SYSTEM_TEMPLATE:{\"name\":\"qwen3_coder\",\"params\":{\"is_git_repository\":{RUNTIME_VARS_IS_GIT_REPO},\"sandbox\":\"{RUNTIME_VARS_SANDBOX}\"}}" - }, - { - "modelNames": ["gpt-4"], - "template": "You are a helpful AI assistant specialized in coding tasks. Current sandbox: {RUNTIME_VARS_SANDBOX}" - }, - { - "baseUrls": ["api.openai.com"], - "template": "You are an AI coding assistant. Working in git repository: {RUNTIME_VARS_IS_GIT_REPO}" + "summarizeToolOutput": { + "run_shell_command": { + "tokenBudget": 2000 } - ] + } ``` ### Example `settings.json`: @@ -274,22 +267,11 @@ In addition to a project settings file, a project's `.gemini` directory can cont "hideTips": false, "hideBanner": false, "maxSessionTurns": 10, - "enableOpenAILogging": true, - "systemPromptMappings": [ - { - "baseUrl": "dashscope", - "modelNames": ["qwen3"], - "template": "SYSTEM_TEMPLATE:{\"name\":\"qwen3_coder\",\"params\":{\"VARS_IS_GIT_REPO\":{VARS_IS_GIT_REPO},\"sandbox\":\"{sandbox}\"}}" - }, - { - "modelNames": ["gpt-4"], - "template": "You are a helpful AI assistant specialized in coding tasks. Current sandbox: {sandbox}" - }, - { - "baseUrl": "api.openai.com", - "template": "You are an AI coding assistant. Working in git repository: {VARS_IS_GIT_REPO}" + "summarizeToolOutput": { + "run_shell_command": { + "tokenBudget": 100 } - ] + } } ``` @@ -367,6 +349,11 @@ Arguments passed directly when running the CLI can override other configurations - Example: `npm start -- --model gemini-1.5-pro-latest` - **`--prompt `** (**`-p `**): - Used to pass a prompt directly to the command. This invokes Gemini CLI in a non-interactive mode. +- **`--prompt-interactive `** (**`-i `**): + - Starts an interactive session with the provided prompt as the initial input. + - The prompt is processed within the interactive session, not before it. + - Cannot be used when piping input from stdin. + - Example: `gemini -i "explain this code"` - **`--sandbox`** (**`-s`**): - Enables sandbox mode for this session. - **`--sandbox-image`**: @@ -390,13 +377,16 @@ Arguments passed directly when running the CLI can override other configurations - **`--telemetry-log-prompts`**: - Enables logging of prompts for telemetry. See [telemetry](../telemetry.md) for more information. - **`--checkpointing`**: - - Enables [checkpointing](./commands.md#checkpointing-commands). + - Enables [checkpointing](../checkpointing.md). - **`--extensions `** (**`-e `**): - Specifies a list of extensions to use for the session. If not provided, all available extensions are used. - Use the special term `gemini -e none` to disable all extensions. - Example: `gemini -e my-extension -e my-other-extension` - **`--list-extensions`** (**`-l`**): - Lists all available extensions and exits. +- **`--proxy`**: + - Sets the proxy for the CLI. + - Example: `--proxy http://localhost:7890`. - **`--version`**: - Displays the version of the CLI. - **`--openai-logging`**: @@ -445,13 +435,13 @@ This example demonstrates how you can provide general project context, specific - **Hierarchical Loading and Precedence:** The CLI implements a sophisticated hierarchical memory system by loading context files (e.g., `GEMINI.md`) from several locations. Content from files lower in this list (more specific) typically overrides or supplements content from files higher up (more general). The exact concatenation order and final context can be inspected using the `/memory show` command. The typical loading order is: 1. **Global Context File:** - - Location: `~/.qwen/` (e.g., `~/.qwen/GEMINI.md` in your user home directory). + - Location: `~/.gemini/` (e.g., `~/.gemini/GEMINI.md` in your user home directory). - Scope: Provides default instructions for all your projects. 2. **Project Root & Ancestors Context Files:** - Location: The CLI searches for the configured context file in the current working directory and then in each parent directory up to either the project root (identified by a `.git` folder) or your home directory. - Scope: Provides context relevant to the entire project or a significant portion of it. 3. **Sub-directory Context Files (Contextual/Local):** - - Location: The CLI also scans for the configured context file in subdirectories _below_ the current working directory (respecting common ignore patterns like `node_modules`, `.git`, etc.). + - Location: The CLI also scans for the configured context file in subdirectories _below_ the current working directory (respecting common ignore patterns like `node_modules`, `.git`, etc.). The breadth of this search is limited to 200 directories by default, but can be configured with a `memoryDiscoveryMaxDirs` field in your `settings.json` file. - Scope: Allows for highly specific instructions relevant to a particular component, module, or subsection of your project. - **Concatenation & UI Indication:** The contents of all found context files are concatenated (with separators indicating their origin and path) and provided as part of the system prompt to the Gemini model. The CLI footer displays the count of loaded context files, giving you a quick visual cue about the active instructional context. - **Commands for Memory Management:** @@ -473,7 +463,7 @@ Sandboxing is disabled by default, but you can enable it in a few ways: By default, it uses a pre-built `gemini-cli-sandbox` Docker image. -For project-specific sandboxing needs, you can create a custom Dockerfile at `.qwen/sandbox.Dockerfile` in your project's root directory. This Dockerfile can be based on the base sandbox image: +For project-specific sandboxing needs, you can create a custom Dockerfile at `.gemini/sandbox.Dockerfile` in your project's root directory. This Dockerfile can be based on the base sandbox image: ```dockerfile FROM gemini-cli-sandbox @@ -484,7 +474,7 @@ FROM gemini-cli-sandbox # COPY ./my-config /app/my-config ``` -When `.qwen/sandbox.Dockerfile` exists, you can use `BUILD_SANDBOX` environment variable when running Gemini CLI to automatically build the custom sandbox image: +When `.gemini/sandbox.Dockerfile` exists, you can use `BUILD_SANDBOX` environment variable when running Gemini CLI to automatically build the custom sandbox image: ```bash BUILD_SANDBOX=1 gemini -s diff --git a/docs/cli/themes.md b/docs/cli/themes.md index 226e387ed..df8918680 100644 --- a/docs/cli/themes.md +++ b/docs/cli/themes.md @@ -32,6 +32,84 @@ Gemini CLI comes with a selection of pre-defined themes, which you can list usin Selected themes are saved in Gemini CLI's [configuration](./configuration.md) so your preference is remembered across sessions. +--- + +## Custom Color Themes + +Gemini CLI allows you to create your own custom color themes by specifying them in your `settings.json` file. This gives you full control over the color palette used in the CLI. + +### How to Define a Custom Theme + +Add a `customThemes` block to your user, project, or system `settings.json` file. Each custom theme is defined as an object with a unique name and a set of color keys. For example: + +```json +{ + "customThemes": { + "MyCustomTheme": { + "name": "MyCustomTheme", + "type": "custom", + "Background": "#181818", + "Foreground": "#F8F8F2", + "LightBlue": "#82AAFF", + "AccentBlue": "#61AFEF", + "AccentPurple": "#C678DD", + "AccentCyan": "#56B6C2", + "AccentGreen": "#98C379", + "AccentYellow": "#E5C07B", + "AccentRed": "#E06C75", + "Comment": "#5C6370", + "Gray": "#ABB2BF" + } + } +} +``` + +**Color keys:** + +- `Background` +- `Foreground` +- `LightBlue` +- `AccentBlue` +- `AccentPurple` +- `AccentCyan` +- `AccentGreen` +- `AccentYellow` +- `AccentRed` +- `Comment` +- `Gray` + +**Required Properties:** + +- `name` (must match the key in the `customThemes` object and be a string) +- `type` (must be the string `"custom"`) +- `Background` +- `Foreground` +- `LightBlue` +- `AccentBlue` +- `AccentPurple` +- `AccentCyan` +- `AccentGreen` +- `AccentYellow` +- `AccentRed` +- `Comment` +- `Gray` + +You can use either hex codes (e.g., `#FF0000`) **or** standard CSS color names (e.g., `coral`, `teal`, `blue`) for any color value. See [CSS color names](https://developer.mozilla.org/en-US/docs/Web/CSS/color_value#color_keywords) for a full list of supported names. + +You can define multiple custom themes by adding more entries to the `customThemes` object. + +### Example Custom Theme + +Custom theme example + +### Using Your Custom Theme + +- Select your custom theme using the `/theme` command in Gemini CLI. Your custom theme will appear in the theme selection dialog. +- Or, set it as the default by adding `"theme": "MyCustomTheme"` to your `settings.json`. +- Custom themes can be set at the user, project, or system level, and follow the same [configuration precedence](./configuration.md) as other settings. + +--- + ## Dark Themes ### ANSI diff --git a/docs/core/tools-api.md b/docs/core/tools-api.md index 9a9021290..e10333d24 100644 --- a/docs/core/tools-api.md +++ b/docs/core/tools-api.md @@ -8,7 +8,7 @@ The Gemini CLI core (`packages/core`) features a robust system for defining, reg - `name`: A unique internal name (used in API calls to Gemini). - `displayName`: A user-friendly name. - `description`: A clear explanation of what the tool does, which is provided to the Gemini model. - - `parameterSchema`: A JSON schema defining the parameters the tool accepts. This is crucial for the Gemini model to understand how to call the tool correctly. + - `parameterSchema`: A JSON schema defining the parameters that the tool accepts. This is crucial for the Gemini model to understand how to call the tool correctly. - `validateToolParams()`: A method to validate incoming parameters. - `getDescription()`: A method to provide a human-readable description of what the tool will do with specific parameters before execution. - `shouldConfirmExecute()`: A method to determine if user confirmation is required before execution (e.g., for potentially destructive operations). diff --git a/docs/extension.md b/docs/extension.md index 26df80832..0bdede0b9 100644 --- a/docs/extension.md +++ b/docs/extension.md @@ -6,14 +6,14 @@ Gemini CLI supports extensions that can be used to configure and extend its func On startup, Gemini CLI looks for extensions in two locations: -1. `/.qwen/extensions` -2. `/.qwen/extensions` +1. `/.gemini/extensions` +2. `/.gemini/extensions` Gemini CLI loads all extensions from both locations. If an extension with the same name exists in both locations, the extension in the workspace directory takes precedence. Within each location, individual extensions exist as a directory that contains a `gemini-extension.json` file. For example: -`/.qwen/extensions/my-extension/gemini-extension.json` +`/.gemini/extensions/my-extension/gemini-extension.json` ### `gemini-extension.json` diff --git a/docs/integration-tests.md b/docs/integration-tests.md index 60dfa30b3..53ddd1550 100644 --- a/docs/integration-tests.md +++ b/docs/integration-tests.md @@ -132,7 +132,7 @@ This structure makes it easy to locate the artifacts for a specific test run, fi ## Continuous integration -To ensure the integration tests are always run, a GitHub Actions workflow is defined in `.github/workflows/e2e.yml`. This workflow automatically runs the integration tests on every pull request and push to the `main` branch. +To ensure the integration tests are always run, a GitHub Actions workflow is defined in `.github/workflows/e2e.yml`. This workflow automatically runs the integrations tests for pull requests against the `main` branch, or when a pull request is added to a merge queue. The workflow runs the tests in different sandboxing environments to ensure Gemini CLI is tested across each: diff --git a/docs/telemetry.md b/docs/telemetry.md index f4105f4b5..2209ee0bb 100644 --- a/docs/telemetry.md +++ b/docs/telemetry.md @@ -8,7 +8,7 @@ Gemini CLI's telemetry system is built on the **[OpenTelemetry] (OTEL)** standar ## Enabling telemetry -You can enable telemetry in multiple ways. Configuration is primarily managed via the [`.qwen/settings.json` file](./cli/configuration.md) and environment variables, but CLI flags can override these settings for a specific session. +You can enable telemetry in multiple ways. Configuration is primarily managed via the [`.gemini/settings.json` file](./cli/configuration.md) and environment variables, but CLI flags can override these settings for a specific session. ### Order of precedence @@ -19,13 +19,14 @@ The following lists the precedence for applying telemetry settings, with items l - `--telemetry-target `: Overrides `telemetry.target`. - `--telemetry-otlp-endpoint `: Overrides `telemetry.otlpEndpoint`. - `--telemetry-log-prompts` / `--no-telemetry-log-prompts`: Overrides `telemetry.logPrompts`. + - `--telemetry-outfile `: Redirects telemetry output to a file. See [Exporting to a file](#exporting-to-a-file). 1. **Environment variables:** - `OTEL_EXPORTER_OTLP_ENDPOINT`: Overrides `telemetry.otlpEndpoint`. -1. **Workspace settings file (`.qwen/settings.json`):** Values from the `telemetry` object in this project-specific file. +1. **Workspace settings file (`.gemini/settings.json`):** Values from the `telemetry` object in this project-specific file. -1. **User settings file (`~/.qwen/settings.json`):** Values from the `telemetry` object in this global user file. +1. **User settings file (`~/.gemini/settings.json`):** Values from the `telemetry` object in this global user file. 1. **Defaults:** applied if not set by any of the above. - `telemetry.enabled`: `false` @@ -38,7 +39,7 @@ The `--target` argument to this script _only_ overrides the `telemetry.target` f ### Example settings -The following code can be added to your workspace (`.qwen/settings.json`) or user (`~/.qwen/settings.json`) settings to enable telemetry and send the output to Google Cloud: +The following code can be added to your workspace (`.gemini/settings.json`) or user (`~/.gemini/settings.json`) settings to enable telemetry and send the output to Google Cloud: ```json { @@ -50,6 +51,16 @@ The following code can be added to your workspace (`.qwen/settings.json`) or use } ``` +### Exporting to a file + +You can export all telemetry data to a file for local inspection. + +To enable file export, use the `--telemetry-outfile` flag with a path to your desired output file. This must be run using `--telemetry-target=local`. + +```bash +gemini --telemetry --telemetry-target=local --telemetry-outfile=/path/to/telemetry.log "your prompt" +``` + ## Running an OTEL Collector An OTEL Collector is a service that receives, processes, and exports telemetry data. @@ -61,7 +72,7 @@ Learn more about OTEL exporter standard configuration in [documentation][otel-co ### Local -Use the `npm run telemetry -- --target=local` command to automate the process of setting up a local telemetry pipeline, including configuring the necessary settings in your `.qwen/settings.json` file. The underlying script installs `otelcol-contrib` (the OpenTelemetry Collector) and `jaeger` (The Jaeger UI for viewing traces). To use it: +Use the `npm run telemetry -- --target=local` command to automate the process of setting up a local telemetry pipeline, including configuring the necessary settings in your `.gemini/settings.json` file. The underlying script installs `otelcol-contrib` (the OpenTelemetry Collector) and `jaeger` (The Jaeger UI for viewing traces). To use it: 1. **Run the command**: Execute the command from the root of the repository: @@ -81,14 +92,14 @@ Use the `npm run telemetry -- --target=local` command to automate the process of Open your web browser and navigate to **http://localhost:16686** to access the Jaeger UI. Here you can inspect detailed traces of Gemini CLI operations. 1. **Inspect logs and metrics**: - The script redirects the OTEL collector output (which includes logs and metrics) to `~/.qwen/tmp//otel/collector.log`. The script will provide links to view and a command to tail your telemetry data (traces, metrics, logs) locally. + The script redirects the OTEL collector output (which includes logs and metrics) to `~/.gemini/tmp//otel/collector.log`. The script will provide links to view and a command to tail your telemetry data (traces, metrics, logs) locally. 1. **Stop the services**: Press `Ctrl+C` in the terminal where the script is running to stop the OTEL Collector and Jaeger services. ### Google Cloud -Use the `npm run telemetry -- --target=gcp` command to automate setting up a local OpenTelemetry collector that forwards data to your Google Cloud project, including configuring the necessary settings in your `.qwen/settings.json` file. The underlying script installs `otelcol-contrib`. To use it: +Use the `npm run telemetry -- --target=gcp` command to automate setting up a local OpenTelemetry collector that forwards data to your Google Cloud project, including configuring the necessary settings in your `.gemini/settings.json` file. The underlying script installs `otelcol-contrib`. To use it: 1. **Prerequisites**: - Have a Google Cloud project ID. @@ -109,7 +120,7 @@ Use the `npm run telemetry -- --target=gcp` command to automate setting up a loc The script will: - Download the `otelcol-contrib` binary if needed. - Start an OTEL collector configured to receive data from Gemini CLI and export it to your specified Google Cloud project. - - Automatically enable telemetry and disable sandbox mode in your workspace settings (`.qwen/settings.json`). + - Automatically enable telemetry and disable sandbox mode in your workspace settings (`.gemini/settings.json`). - Provide direct links to view traces, metrics, and logs in your Google Cloud Console. - On exit (Ctrl+C), it will attempt to restore your original telemetry and sandbox settings. @@ -120,7 +131,7 @@ Use the `npm run telemetry -- --target=gcp` command to automate setting up a loc Use the links provided by the script to navigate to the Google Cloud Console and view your traces, metrics, and logs. 1. **Inspect local collector logs**: - The script redirects the local OTEL collector output to `~/.qwen/tmp//otel/collector-gcp.log`. The script provides links to view and command to tail your collector logs locally. + The script redirects the local OTEL collector output to `~/.gemini/tmp//otel/collector-gcp.log`. The script provides links to view and command to tail your collector logs locally. 1. **Stop the service**: Press `Ctrl+C` in the terminal where the script is running to stop the OTEL Collector. diff --git a/docs/tools/file-system.md b/docs/tools/file-system.md index 05a8f512b..ec741096a 100644 --- a/docs/tools/file-system.md +++ b/docs/tools/file-system.md @@ -90,7 +90,7 @@ The Gemini CLI provides a comprehensive suite of tools for interacting with the - `path` (string, optional): The absolute path to the directory to search within. Defaults to the current working directory. - `include` (string, optional): A glob pattern to filter which files are searched (e.g., `"*.js"`, `"src/**/*.{ts,tsx}"`). If omitted, searches most files (respecting common ignores). - **Behavior:** - - Uses `git grep` if available in a Git repository for speed, otherwise falls back to system `grep` or a JavaScript-based search. + - Uses `git grep` if available in a Git repository for speed; otherwise, falls back to system `grep` or a JavaScript-based search. - Returns a list of matching lines, each prefixed with its file path (relative to the search directory) and line number. - **Output (`llmContent`):** A formatted string of matches, e.g.: ``` diff --git a/docs/tools/mcp-server.md b/docs/tools/mcp-server.md index a481e591f..cd70da040 100644 --- a/docs/tools/mcp-server.md +++ b/docs/tools/mcp-server.md @@ -51,7 +51,7 @@ The Gemini CLI uses the `mcpServers` configuration in your `settings.json` file ### Configure the MCP server in settings.json -You can configure MCP servers at the global level in the `~/.qwen/settings.json` file or in your project's root directory, create or open the `.qwen/settings.json` file. Within the file, add the `mcpServers` configuration block. +You can configure MCP servers at the global level in the `~/.gemini/settings.json` file or in your project's root directory, create or open the `.gemini/settings.json` file. Within the file, add the `mcpServers` configuration block. ### Configuration Structure @@ -92,6 +92,114 @@ Each server configuration supports the following properties: - **`cwd`** (string): Working directory for Stdio transport - **`timeout`** (number): Request timeout in milliseconds (default: 600,000ms = 10 minutes) - **`trust`** (boolean): When `true`, bypasses all tool call confirmations for this server (default: `false`) +- **`includeTools`** (string[]): List of tool names to include from this MCP server. When specified, only the tools listed here will be available from this server (whitelist behavior). If not specified, all tools from the server are enabled by default. +- **`excludeTools`** (string[]): List of tool names to exclude from this MCP server. Tools listed here will not be available to the model, even if they are exposed by the server. **Note:** `excludeTools` takes precedence over `includeTools` - if a tool is in both lists, it will be excluded. + +### OAuth Support for Remote MCP Servers + +The Gemini CLI supports OAuth 2.0 authentication for remote MCP servers using SSE or HTTP transports. This enables secure access to MCP servers that require authentication. + +#### Automatic OAuth Discovery + +For servers that support OAuth discovery, you can omit the OAuth configuration and let the CLI discover it automatically: + +```json +{ + "mcpServers": { + "discoveredServer": { + "url": "https://api.example.com/sse" + } + } +} +``` + +The CLI will automatically: + +- Detect when a server requires OAuth authentication (401 responses) +- Discover OAuth endpoints from server metadata +- Perform dynamic client registration if supported +- Handle the OAuth flow and token management + +#### Authentication Flow + +When connecting to an OAuth-enabled server: + +1. **Initial connection attempt** fails with 401 Unauthorized +2. **OAuth discovery** finds authorization and token endpoints +3. **Browser opens** for user authentication (requires local browser access) +4. **Authorization code** is exchanged for access tokens +5. **Tokens are stored** securely for future use +6. **Connection retry** succeeds with valid tokens + +#### Browser Redirect Requirements + +**Important:** OAuth authentication requires that your local machine can: + +- Open a web browser for authentication +- Receive redirects on `http://localhost:7777/oauth/callback` + +This feature will not work in: + +- Headless environments without browser access +- Remote SSH sessions without X11 forwarding +- Containerized environments without browser support + +#### Managing OAuth Authentication + +Use the `/mcp auth` command to manage OAuth authentication: + +```bash +# List servers requiring authentication +/mcp auth + +# Authenticate with a specific server +/mcp auth serverName + +# Re-authenticate if tokens expire +/mcp auth serverName +``` + +#### OAuth Configuration Properties + +- **`enabled`** (boolean): Enable OAuth for this server +- **`clientId`** (string): OAuth client identifier (optional with dynamic registration) +- **`clientSecret`** (string): OAuth client secret (optional for public clients) +- **`authorizationUrl`** (string): OAuth authorization endpoint (auto-discovered if omitted) +- **`tokenUrl`** (string): OAuth token endpoint (auto-discovered if omitted) +- **`scopes`** (string[]): Required OAuth scopes +- **`redirectUri`** (string): Custom redirect URI (defaults to `http://localhost:7777/oauth/callback`) +- **`tokenParamName`** (string): Query parameter name for tokens in SSE URLs + +#### Token Management + +OAuth tokens are automatically: + +- **Stored securely** in `~/.gemini/mcp-oauth-tokens.json` +- **Refreshed** when expired (if refresh tokens are available) +- **Validated** before each connection attempt +- **Cleaned up** when invalid or expired + +#### Authentication Provider Type + +You can specify the authentication provider type using the `authProviderType` property: + +- **`authProviderType`** (string): Specifies the authentication provider. Can be one of the following: + - **`dynamic_discovery`** (default): The CLI will automatically discover the OAuth configuration from the server. + - **`google_credentials`**: The CLI will use the Google Application Default Credentials (ADC) to authenticate with the server. When using this provider, you must specify the required scopes. + +```json +{ + "mcpServers": { + "googleCloudServer": { + "httpUrl": "https://my-gcp-service.run.app/mcp", + "authProviderType": "google_credentials", + "oauth": { + "scopes": ["https://www.googleapis.com/auth/userinfo.email"] + } + } + } +} +``` ### Example Configurations @@ -185,6 +293,22 @@ Each server configuration supports the following properties: } ``` +#### MCP Server with Tool Filtering + +```json +{ + "mcpServers": { + "filteredServer": { + "command": "python", + "args": ["-m", "my_mcp_server"], + "includeTools": ["safe_tool", "file_reader", "data_processor"], + // "excludeTools": ["dangerous_tool", "file_deleter"], + "timeout": 30000 + } + } +} +``` + ## Discovery Process Deep Dive When the Gemini CLI starts, it performs MCP server discovery through the following detailed process: @@ -207,7 +331,8 @@ Upon successful connection: 1. **Tool listing:** The client calls the MCP server's tool listing endpoint 2. **Schema validation:** Each tool's function declaration is validated -3. **Name sanitization:** Tool names are cleaned to meet Gemini API requirements: +3. **Tool filtering:** Tools are filtered based on `includeTools` and `excludeTools` configuration +4. **Name sanitization:** Tool names are cleaned to meet Gemini API requirements: - Invalid characters (non-alphanumeric, underscore, dot, hyphen) are replaced with underscores - Names longer than 63 characters are truncated with middle replacement (`___`) diff --git a/docs/tools/memory.md b/docs/tools/memory.md index facd4b1c0..fa2dac577 100644 --- a/docs/tools/memory.md +++ b/docs/tools/memory.md @@ -14,7 +14,7 @@ Use `save_memory` to save and recall information across your Gemini CLI sessions ## How to use `save_memory` with the Gemini CLI -The tool appends the provided `fact` to a special `GEMINI.md` file located in the user's home directory (`~/.qwen/GEMINI.md`). This file can be configured to have a different name. +The tool appends the provided `fact` to a special `GEMINI.md` file located in the user's home directory (`~/.gemini/GEMINI.md`). This file can be configured to have a different name. Once added, the facts are stored under a `## Gemini Added Memories` section. This file is loaded as context in subsequent sessions, allowing the CLI to recall the saved information. diff --git a/docs/tools/shell.md b/docs/tools/shell.md index 021cede1a..3e2a00e49 100644 --- a/docs/tools/shell.md +++ b/docs/tools/shell.md @@ -60,6 +60,10 @@ run_shell_command(command="npm run dev &", description="Start development server - **Error handling:** Check the `Stderr`, `Error`, and `Exit Code` fields to determine if a command executed successfully. - **Background processes:** When a command is run in the background with `&`, the tool will return immediately and the process will continue to run in the background. The `Background PIDs` field will contain the process ID of the background process. +## Environment Variables + +When `run_shell_command` executes a command, it sets the `GEMINI_CLI=1` environment variable in the subprocess's environment. This allows scripts or tools to detect if they are being run from within the Gemini CLI. + ## Command Restrictions You can restrict the commands that can be executed by the `run_shell_command` tool by using the `coreTools` and `excludeTools` settings in your configuration file. diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index aeafca86e..fa88e26e9 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -19,7 +19,7 @@ This guide provides solutions to common issues and debugging tips. - A: If installed globally via npm, update Gemini CLI using the command `npm install -g @google/gemini-cli@latest`. If run from source, pull the latest changes from the repository and rebuild using `npm run build`. - **Q: Where are Gemini CLI configuration files stored?** - - A: The CLI configuration is stored within two `settings.json` files: one in your home directory and one in your project's root directory. In both locations, `settings.json` is found in the `.qwen/` folder. Refer to [CLI Configuration](./cli/configuration.md) for more details. + - A: The CLI configuration is stored within two `settings.json` files: one in your home directory and one in your project's root directory. In both locations, `settings.json` is found in the `.gemini/` folder. Refer to [CLI Configuration](./cli/configuration.md) for more details. - **Q: Why don't I see cached token counts in my stats output?** - A: Cached token information is only displayed when cached tokens are being used. This feature is available for API key users (Gemini API key or Vertex AI) but not for OAuth users (Google Personal/Enterprise accounts) at this time, as the Code Assist API does not support cached content creation. You can still view your total token usage with the `/stats` command. @@ -27,7 +27,7 @@ This guide provides solutions to common issues and debugging tips. ## Common error messages and solutions - **Error: `EADDRINUSE` (Address already in use) when starting an MCP server.** - - **Cause:** Another process is already using the port the MCP server is trying to bind to. + - **Cause:** Another process is already using the port that the MCP server is trying to bind to. - **Solution:** Either stop the other process that is using the port or configure the MCP server to use a different port. diff --git a/esbuild.config.js b/esbuild.config.js index abf6a998b..0cb8e0fa8 100644 --- a/esbuild.config.js +++ b/esbuild.config.js @@ -21,11 +21,18 @@ esbuild outfile: 'bundle/gemini.js', platform: 'node', format: 'esm', + external: [], + alias: { + 'is-in-ci': path.resolve( + __dirname, + 'packages/cli/src/patches/is-in-ci.ts', + ), + }, define: { 'process.env.CLI_VERSION': JSON.stringify(pkg.version), }, banner: { - js: `import { createRequire as _gcliCreateRequire } from 'module'; const require = _gcliCreateRequire(import.meta.url); globalThis.__filename = require('url').fileURLToPath(import.meta.url); globalThis.__dirname = require('path').dirname(globalThis.__filename);`, + js: `import { createRequire } from 'module'; const require = createRequire(import.meta.url); globalThis.__filename = require('url').fileURLToPath(import.meta.url); globalThis.__dirname = require('path').dirname(globalThis.__filename);`, }, }) .catch(() => process.exit(1)); diff --git a/eslint.config.js b/eslint.config.js index 29aa23dc9..169bbd17b 100644 --- a/eslint.config.js +++ b/eslint.config.js @@ -12,7 +12,6 @@ import prettierConfig from 'eslint-config-prettier'; import importPlugin from 'eslint-plugin-import'; import globals from 'globals'; import licenseHeader from 'eslint-plugin-license-header'; -import noRelativeCrossPackageImports from './eslint-rules/no-relative-cross-package-imports.js'; import path from 'node:path'; // Use node: prefix for built-ins import url from 'node:url'; @@ -34,7 +33,6 @@ export default tseslint.config( 'packages/core/dist/**', 'packages/server/dist/**', 'packages/vscode-ide-companion/dist/**', - 'eslint-rules/*', 'bundle/**', ], }, @@ -72,6 +70,14 @@ export default tseslint.config( { // General overrides and rules for the project (TS/TSX files) files: ['packages/*/src/**/*.{ts,tsx}'], // Target only TS/TSX in the cli package + plugins: { + import: importPlugin, + }, + settings: { + 'import/resolver': { + node: true, + }, + }, languageOptions: { globals: { ...globals.node, @@ -106,6 +112,13 @@ export default tseslint.config( caughtErrorsIgnorePattern: '^_', }, ], + 'import/no-internal-modules': [ + 'error', + { + allow: ['react-dom/test-utils', 'memfs/lib/volume.js', 'yargs/**'], + }, + ], + 'import/no-relative-packages': 'error', 'no-cond-assign': 'error', 'no-debugger': 'error', 'no-duplicate-case': 'error', @@ -213,24 +226,4 @@ export default tseslint.config( ], }, }, - // Custom eslint rules for this repo - { - files: ['packages/**/*.{js,jsx,ts,tsx}'], - plugins: { - custom: { - rules: { - 'no-relative-cross-package-imports': noRelativeCrossPackageImports, - }, - }, - }, - rules: { - // Enable and configure your custom rule - 'custom/no-relative-cross-package-imports': [ - 'error', - { - root: path.join(projectRoot, 'packages'), - }, - ], - }, - }, ); diff --git a/integration-tests/run-tests.js b/integration-tests/run-tests.js index 5923dfcf4..4b4a9a94f 100644 --- a/integration-tests/run-tests.js +++ b/integration-tests/run-tests.js @@ -61,6 +61,7 @@ async function main() { console.log(`\tFound test file: ${testFileName}`); } + const MAX_RETRIES = 3; let allTestsPassed = true; for (const testFile of testFiles) { @@ -72,63 +73,97 @@ async function main() { `------------- Running test file: ${testFileName} ------------------------------`, ); - const nodeArgs = ['--test']; - if (verbose) { - nodeArgs.push('--test-reporter=spec'); - } - nodeArgs.push(testFile); + let attempt = 0; + let testFilePassed = false; + let lastStdout = []; + let lastStderr = []; - const child = spawn('node', nodeArgs, { - stdio: 'pipe', - env: { - ...process.env, - GEMINI_CLI_INTEGRATION_TEST: 'true', - INTEGRATION_TEST_FILE_DIR: testFileDir, - KEEP_OUTPUT: keepOutput.toString(), - VERBOSE: verbose.toString(), - TEST_FILE_NAME: testFileName, - }, - }); + while (attempt < MAX_RETRIES && !testFilePassed) { + attempt++; + if (attempt > 1) { + console.log( + `--- Retrying ${testFileName} (attempt ${attempt} of ${MAX_RETRIES}) ---`, + ); + } - let outputStream; - if (keepOutput) { - const outputFile = join(testFileDir, 'output.log'); - outputStream = createWriteStream(outputFile); - console.log(`Output for ${testFileName} written to: ${outputFile}`); - } - - child.stdout.on('data', (data) => { + const nodeArgs = ['--test']; if (verbose) { - process.stdout.write(data); + nodeArgs.push('--test-reporter=spec'); } - if (outputStream) { - outputStream.write(data); - } - }); + nodeArgs.push(testFile); - child.stderr.on('data', (data) => { - if (verbose) { - process.stderr.write(data); - } - if (outputStream) { - outputStream.write(data); - } - }); + const child = spawn('node', nodeArgs, { + stdio: 'pipe', + env: { + ...process.env, + GEMINI_CLI_INTEGRATION_TEST: 'true', + INTEGRATION_TEST_FILE_DIR: testFileDir, + KEEP_OUTPUT: keepOutput.toString(), + VERBOSE: verbose.toString(), + TEST_FILE_NAME: testFileName, + }, + }); - const exitCode = await new Promise((resolve) => { - child.on('close', (code) => { - if (outputStream) { - outputStream.end(() => { - resolve(code); - }); + let outputStream; + if (keepOutput) { + const outputFile = join(testFileDir, `output-attempt-${attempt}.log`); + outputStream = createWriteStream(outputFile); + console.log(`Output for ${testFileName} written to: ${outputFile}`); + } + + const stdout = []; + const stderr = []; + + child.stdout.on('data', (data) => { + if (verbose) { + process.stdout.write(data); } else { - resolve(code); + stdout.push(data); + } + if (outputStream) { + outputStream.write(data); } }); - }); - if (exitCode !== 0) { - console.error(`Test file failed: ${testFileName}`); + child.stderr.on('data', (data) => { + if (verbose) { + process.stderr.write(data); + } else { + stderr.push(data); + } + if (outputStream) { + outputStream.write(data); + } + }); + + const exitCode = await new Promise((resolve) => { + child.on('close', (code) => { + if (outputStream) { + outputStream.end(() => { + resolve(code); + }); + } else { + resolve(code); + } + }); + }); + + if (exitCode === 0) { + testFilePassed = true; + } else { + lastStdout = stdout; + lastStderr = stderr; + } + } + + if (!testFilePassed) { + console.error( + `Test file failed after ${MAX_RETRIES} attempts: ${testFileName}`, + ); + if (!verbose) { + process.stdout.write(Buffer.concat(lastStdout).toString('utf8')); + process.stderr.write(Buffer.concat(lastStderr).toString('utf8')); + } allTestsPassed = false; } } diff --git a/integration-tests/simple-mcp-server.test.js b/integration-tests/simple-mcp-server.test.js index d585609ed..fc88522d8 100644 --- a/integration-tests/simple-mcp-server.test.js +++ b/integration-tests/simple-mcp-server.test.js @@ -54,7 +54,7 @@ describe('simple-mcp-server', () => { console.error(`stderr: ${data}`); }); // Wait for the server to be ready - return new Promise((resolve) => setTimeout(resolve, 500)); + return new Promise((resolve) => setTimeout(resolve, 2000)); }); after(() => { diff --git a/package-lock.json b/package-lock.json index 3ff68a484..5444aea7b 100644 --- a/package-lock.json +++ b/package-lock.json @@ -10,6 +10,9 @@ "workspaces": [ "packages/*" ], + "dependencies": { + "tiktoken": "^1.0.21" + }, "bin": { "qwen": "bundle/gemini.js" }, @@ -17,8 +20,9 @@ "@types/micromatch": "^4.0.9", "@types/mime-types": "^3.0.1", "@types/minimatch": "^5.1.2", - "@types/semver": "^7.7.0", + "@types/mock-fs": "^4.13.4", "@types/shell-quote": "^1.7.5", + "@types/uuid": "^10.0.0", "@vitest/coverage-v8": "^3.1.1", "concurrently": "^9.2.0", "cross-env": "^7.0.3", @@ -34,14 +38,15 @@ "json": "^11.0.0", "lodash": "^4.17.21", "memfs": "^4.17.2", + "mock-fs": "^5.5.0", "prettier": "^3.5.3", "react-devtools-core": "^4.28.5", "typescript-eslint": "^8.30.1", "vitest": "^3.2.4", - "yargs": "^18.0.0" + "yargs": "^17.7.2" }, "engines": { - "node": ">=20" + "node": ">=20.0.0" } }, "node_modules/@alcalzone/ansi-tokenize": { @@ -57,30 +62,6 @@ "node": ">=14.13.1" } }, - "node_modules/@alcalzone/ansi-tokenize/node_modules/ansi-styles": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/@alcalzone/ansi-tokenize/node_modules/is-fullwidth-code-point": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-4.0.0.tgz", - "integrity": "sha512-O4L094N2/dZ7xqVdrXhh9r1KODPJpFms8B5sGdJLPy664AgvXsreZUyCQQNItZRDlYug4xStLjNp/sz3HvBowQ==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/@ampproject/remapping": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", @@ -123,12 +104,6 @@ "node": ">=6.9.0" } }, - "node_modules/@babel/code-frame/node_modules/js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", - "license": "MIT" - }, "node_modules/@babel/helper-string-parser": { "version": "7.27.1", "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", @@ -149,13 +124,13 @@ } }, "node_modules/@babel/parser": { - "version": "7.27.5", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.27.5.tgz", - "integrity": "sha512-OsQd175SxWkGlzbny8J3K8TnnDD0N3lrIUtB92xwyRpzaenGZhxDvxN/JgU00U3CDZNj9tPuDJ5H0WS4Nt3vKg==", + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.0.tgz", + "integrity": "sha512-jVZGvOxOuNSsuQuLRTh13nU0AogFlw32w/MT+LV6D3sP5WdbW61E77RnkbaO2dUvmPAYrBDJXGn5gGS6tH4j8g==", "dev": true, "license": "MIT", "dependencies": { - "@babel/types": "^7.27.3" + "@babel/types": "^7.28.0" }, "bin": { "parser": "bin/babel-parser.js" @@ -165,9 +140,9 @@ } }, "node_modules/@babel/runtime": { - "version": "7.27.6", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.27.6.tgz", - "integrity": "sha512-vbavdySgbTTrmFE+EsiqUTzlOr5bzlnJtUv9PynGCAKvfQqjIXbvFdumPM/GxMDfyuGMJaJAU6TO4zc1Jf1i8Q==", + "version": "7.28.2", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.2.tgz", + "integrity": "sha512-KHp2IflsnGywDjBWDkR9iEqiWSpc8GIi0lgTT3mOElT0PP1tG26P4tmFI2YvAdzgq9RGyoHZQEIEdZy6Ec5xCA==", "dev": true, "license": "MIT", "engines": { @@ -175,9 +150,9 @@ } }, "node_modules/@babel/types": { - "version": "7.27.6", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.27.6.tgz", - "integrity": "sha512-ETyHEk2VHHvl9b9jZP5IHPavHYk57EhanlRRuae9XCpb/j5bDCbPPMOBfCWhnl/7EDJz0jEMCi/RhccCE8r1+Q==", + "version": "7.28.2", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.2.tgz", + "integrity": "sha512-ruv7Ae4J5dUYULmeXw1gmb7rYRz57OWCPM57pHojnLq/3Z1CK2lNSLTCVjxVk1F/TZHwOZZrOWi0ur95BbLxNQ==", "dev": true, "license": "MIT", "dependencies": { @@ -314,9 +289,9 @@ } }, "node_modules/@esbuild/aix-ppc64": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.6.tgz", - "integrity": "sha512-ShbM/3XxwuxjFiuVBHA+d3j5dyac0aEVVq1oluIDf71hUw0aRF59dV/efUsIwFnR6m8JNM2FjZOzmaZ8yG61kw==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.8.tgz", + "integrity": "sha512-urAvrUedIqEiFR3FYSLTWQgLu5tb+m0qZw0NBEasUeo6wuqatkMDaRT+1uABiGXEu5vqgPd7FGE1BhsAIy9QVA==", "cpu": [ "ppc64" ], @@ -331,9 +306,9 @@ } }, "node_modules/@esbuild/android-arm": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.6.tgz", - "integrity": "sha512-S8ToEOVfg++AU/bHwdksHNnyLyVM+eMVAOf6yRKFitnwnbwwPNqKr3srzFRe7nzV69RQKb5DgchIX5pt3L53xg==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.8.tgz", + "integrity": "sha512-RONsAvGCz5oWyePVnLdZY/HHwA++nxYWIX1atInlaW6SEkwq6XkP3+cb825EUcRs5Vss/lGh/2YxAb5xqc07Uw==", "cpu": [ "arm" ], @@ -348,9 +323,9 @@ } }, "node_modules/@esbuild/android-arm64": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.6.tgz", - "integrity": "sha512-hd5zdUarsK6strW+3Wxi5qWws+rJhCCbMiC9QZyzoxfk5uHRIE8T287giQxzVpEvCwuJ9Qjg6bEjcRJcgfLqoA==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.8.tgz", + "integrity": "sha512-OD3p7LYzWpLhZEyATcTSJ67qB5D+20vbtr6vHlHWSQYhKtzUYrETuWThmzFpZtFsBIxRvhO07+UgVA9m0i/O1w==", "cpu": [ "arm64" ], @@ -365,9 +340,9 @@ } }, "node_modules/@esbuild/android-x64": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.6.tgz", - "integrity": "sha512-0Z7KpHSr3VBIO9A/1wcT3NTy7EB4oNC4upJ5ye3R7taCc2GUdeynSLArnon5G8scPwaU866d3H4BCrE5xLW25A==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.8.tgz", + "integrity": "sha512-yJAVPklM5+4+9dTeKwHOaA+LQkmrKFX96BM0A/2zQrbS6ENCmxc4OVoBs5dPkCCak2roAD+jKCdnmOqKszPkjA==", "cpu": [ "x64" ], @@ -382,9 +357,9 @@ } }, "node_modules/@esbuild/darwin-arm64": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.6.tgz", - "integrity": "sha512-FFCssz3XBavjxcFxKsGy2DYK5VSvJqa6y5HXljKzhRZ87LvEi13brPrf/wdyl/BbpbMKJNOr1Sd0jtW4Ge1pAA==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.8.tgz", + "integrity": "sha512-Jw0mxgIaYX6R8ODrdkLLPwBqHTtYHJSmzzd+QeytSugzQ0Vg4c5rDky5VgkoowbZQahCbsv1rT1KW72MPIkevw==", "cpu": [ "arm64" ], @@ -399,9 +374,9 @@ } }, "node_modules/@esbuild/darwin-x64": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.6.tgz", - "integrity": "sha512-GfXs5kry/TkGM2vKqK2oyiLFygJRqKVhawu3+DOCk7OxLy/6jYkWXhlHwOoTb0WqGnWGAS7sooxbZowy+pK9Yg==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.8.tgz", + "integrity": "sha512-Vh2gLxxHnuoQ+GjPNvDSDRpoBCUzY4Pu0kBqMBDlK4fuWbKgGtmDIeEC081xi26PPjn+1tct+Bh8FjyLlw1Zlg==", "cpu": [ "x64" ], @@ -416,9 +391,9 @@ } }, "node_modules/@esbuild/freebsd-arm64": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.6.tgz", - "integrity": "sha512-aoLF2c3OvDn2XDTRvn8hN6DRzVVpDlj2B/F66clWd/FHLiHaG3aVZjxQX2DYphA5y/evbdGvC6Us13tvyt4pWg==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.8.tgz", + "integrity": "sha512-YPJ7hDQ9DnNe5vxOm6jaie9QsTwcKedPvizTVlqWG9GBSq+BuyWEDazlGaDTC5NGU4QJd666V0yqCBL2oWKPfA==", "cpu": [ "arm64" ], @@ -433,9 +408,9 @@ } }, "node_modules/@esbuild/freebsd-x64": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.6.tgz", - "integrity": "sha512-2SkqTjTSo2dYi/jzFbU9Plt1vk0+nNg8YC8rOXXea+iA3hfNJWebKYPs3xnOUf9+ZWhKAaxnQNUf2X9LOpeiMQ==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.8.tgz", + "integrity": "sha512-MmaEXxQRdXNFsRN/KcIimLnSJrk2r5H8v+WVafRWz5xdSVmWLoITZQXcgehI2ZE6gioE6HirAEToM/RvFBeuhw==", "cpu": [ "x64" ], @@ -450,9 +425,9 @@ } }, "node_modules/@esbuild/linux-arm": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.6.tgz", - "integrity": "sha512-SZHQlzvqv4Du5PrKE2faN0qlbsaW/3QQfUUc6yO2EjFcA83xnwm91UbEEVx4ApZ9Z5oG8Bxz4qPE+HFwtVcfyw==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.8.tgz", + "integrity": "sha512-FuzEP9BixzZohl1kLf76KEVOsxtIBFwCaLupVuk4eFVnOZfU+Wsn+x5Ryam7nILV2pkq2TqQM9EZPsOBuMC+kg==", "cpu": [ "arm" ], @@ -467,9 +442,9 @@ } }, "node_modules/@esbuild/linux-arm64": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.6.tgz", - "integrity": "sha512-b967hU0gqKd9Drsh/UuAm21Khpoh6mPBSgz8mKRq4P5mVK8bpA+hQzmm/ZwGVULSNBzKdZPQBRT3+WuVavcWsQ==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.8.tgz", + "integrity": "sha512-WIgg00ARWv/uYLU7lsuDK00d/hHSfES5BzdWAdAig1ioV5kaFNrtK8EqGcUBJhYqotlUByUKz5Qo6u8tt7iD/w==", "cpu": [ "arm64" ], @@ -484,9 +459,9 @@ } }, "node_modules/@esbuild/linux-ia32": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.6.tgz", - "integrity": "sha512-aHWdQ2AAltRkLPOsKdi3xv0mZ8fUGPdlKEjIEhxCPm5yKEThcUjHpWB1idN74lfXGnZ5SULQSgtr5Qos5B0bPw==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.8.tgz", + "integrity": "sha512-A1D9YzRX1i+1AJZuFFUMP1E9fMaYY+GnSQil9Tlw05utlE86EKTUA7RjwHDkEitmLYiFsRd9HwKBPEftNdBfjg==", "cpu": [ "ia32" ], @@ -501,9 +476,9 @@ } }, "node_modules/@esbuild/linux-loong64": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.6.tgz", - "integrity": "sha512-VgKCsHdXRSQ7E1+QXGdRPlQ/e08bN6WMQb27/TMfV+vPjjTImuT9PmLXupRlC90S1JeNNW5lzkAEO/McKeJ2yg==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.8.tgz", + "integrity": "sha512-O7k1J/dwHkY1RMVvglFHl1HzutGEFFZ3kNiDMSOyUrB7WcoHGf96Sh+64nTRT26l3GMbCW01Ekh/ThKM5iI7hQ==", "cpu": [ "loong64" ], @@ -518,9 +493,9 @@ } }, "node_modules/@esbuild/linux-mips64el": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.6.tgz", - "integrity": "sha512-WViNlpivRKT9/py3kCmkHnn44GkGXVdXfdc4drNmRl15zVQ2+D2uFwdlGh6IuK5AAnGTo2qPB1Djppj+t78rzw==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.8.tgz", + "integrity": "sha512-uv+dqfRazte3BzfMp8PAQXmdGHQt2oC/y2ovwpTteqrMx2lwaksiFZ/bdkXJC19ttTvNXBuWH53zy/aTj1FgGw==", "cpu": [ "mips64el" ], @@ -535,9 +510,9 @@ } }, "node_modules/@esbuild/linux-ppc64": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.6.tgz", - "integrity": "sha512-wyYKZ9NTdmAMb5730I38lBqVu6cKl4ZfYXIs31Baf8aoOtB4xSGi3THmDYt4BTFHk7/EcVixkOV2uZfwU3Q2Jw==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.8.tgz", + "integrity": "sha512-GyG0KcMi1GBavP5JgAkkstMGyMholMDybAf8wF5A70CALlDM2p/f7YFE7H92eDeH/VBtFJA5MT4nRPDGg4JuzQ==", "cpu": [ "ppc64" ], @@ -552,9 +527,9 @@ } }, "node_modules/@esbuild/linux-riscv64": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.6.tgz", - "integrity": "sha512-KZh7bAGGcrinEj4qzilJ4hqTY3Dg2U82c8bv+e1xqNqZCrCyc+TL9AUEn5WGKDzm3CfC5RODE/qc96OcbIe33w==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.8.tgz", + "integrity": "sha512-rAqDYFv3yzMrq7GIcen3XP7TUEG/4LK86LUPMIz6RT8A6pRIDn0sDcvjudVZBiiTcZCY9y2SgYX2lgK3AF+1eg==", "cpu": [ "riscv64" ], @@ -569,9 +544,9 @@ } }, "node_modules/@esbuild/linux-s390x": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.6.tgz", - "integrity": "sha512-9N1LsTwAuE9oj6lHMyyAM+ucxGiVnEqUdp4v7IaMmrwb06ZTEVCIs3oPPplVsnjPfyjmxwHxHMF8b6vzUVAUGw==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.8.tgz", + "integrity": "sha512-Xutvh6VjlbcHpsIIbwY8GVRbwoviWT19tFhgdA7DlenLGC/mbc3lBoVb7jxj9Z+eyGqvcnSyIltYUrkKzWqSvg==", "cpu": [ "s390x" ], @@ -586,9 +561,9 @@ } }, "node_modules/@esbuild/linux-x64": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.6.tgz", - "integrity": "sha512-A6bJB41b4lKFWRKNrWoP2LHsjVzNiaurf7wyj/XtFNTsnPuxwEBWHLty+ZE0dWBKuSK1fvKgrKaNjBS7qbFKig==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.8.tgz", + "integrity": "sha512-ASFQhgY4ElXh3nDcOMTkQero4b1lgubskNlhIfJrsH5OKZXDpUAKBlNS0Kx81jwOBp+HCeZqmoJuihTv57/jvQ==", "cpu": [ "x64" ], @@ -603,9 +578,9 @@ } }, "node_modules/@esbuild/netbsd-arm64": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.6.tgz", - "integrity": "sha512-IjA+DcwoVpjEvyxZddDqBY+uJ2Snc6duLpjmkXm/v4xuS3H+3FkLZlDm9ZsAbF9rsfP3zeA0/ArNDORZgrxR/Q==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.8.tgz", + "integrity": "sha512-d1KfruIeohqAi6SA+gENMuObDbEjn22olAR7egqnkCD9DGBG0wsEARotkLgXDu6c4ncgWTZJtN5vcgxzWRMzcw==", "cpu": [ "arm64" ], @@ -620,9 +595,9 @@ } }, "node_modules/@esbuild/netbsd-x64": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.6.tgz", - "integrity": "sha512-dUXuZr5WenIDlMHdMkvDc1FAu4xdWixTCRgP7RQLBOkkGgwuuzaGSYcOpW4jFxzpzL1ejb8yF620UxAqnBrR9g==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.8.tgz", + "integrity": "sha512-nVDCkrvx2ua+XQNyfrujIG38+YGyuy2Ru9kKVNyh5jAys6n+l44tTtToqHjino2My8VAY6Lw9H7RI73XFi66Cg==", "cpu": [ "x64" ], @@ -637,9 +612,9 @@ } }, "node_modules/@esbuild/openbsd-arm64": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.6.tgz", - "integrity": "sha512-l8ZCvXP0tbTJ3iaqdNf3pjaOSd5ex/e6/omLIQCVBLmHTlfXW3zAxQ4fnDmPLOB1x9xrcSi/xtCWFwCZRIaEwg==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.8.tgz", + "integrity": "sha512-j8HgrDuSJFAujkivSMSfPQSAa5Fxbvk4rgNAS5i3K+r8s1X0p1uOO2Hl2xNsGFppOeHOLAVgYwDVlmxhq5h+SQ==", "cpu": [ "arm64" ], @@ -654,9 +629,9 @@ } }, "node_modules/@esbuild/openbsd-x64": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.6.tgz", - "integrity": "sha512-hKrmDa0aOFOr71KQ/19JC7az1P0GWtCN1t2ahYAf4O007DHZt/dW8ym5+CUdJhQ/qkZmI1HAF8KkJbEFtCL7gw==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.8.tgz", + "integrity": "sha512-1h8MUAwa0VhNCDp6Af0HToI2TJFAn1uqT9Al6DJVzdIBAd21m/G0Yfc77KDM3uF3T/YaOgQq3qTJHPbTOInaIQ==", "cpu": [ "x64" ], @@ -671,9 +646,9 @@ } }, "node_modules/@esbuild/openharmony-arm64": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.6.tgz", - "integrity": "sha512-+SqBcAWoB1fYKmpWoQP4pGtx+pUUC//RNYhFdbcSA16617cchuryuhOCRpPsjCblKukAckWsV+aQ3UKT/RMPcA==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.8.tgz", + "integrity": "sha512-r2nVa5SIK9tSWd0kJd9HCffnDHKchTGikb//9c7HX+r+wHYCpQrSgxhlY6KWV1nFo1l4KFbsMlHk+L6fekLsUg==", "cpu": [ "arm64" ], @@ -688,9 +663,9 @@ } }, "node_modules/@esbuild/sunos-x64": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.6.tgz", - "integrity": "sha512-dyCGxv1/Br7MiSC42qinGL8KkG4kX0pEsdb0+TKhmJZgCUDBGmyo1/ArCjNGiOLiIAgdbWgmWgib4HoCi5t7kA==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.8.tgz", + "integrity": "sha512-zUlaP2S12YhQ2UzUfcCuMDHQFJyKABkAjvO5YSndMiIkMimPmxA+BYSBikWgsRpvyxuRnow4nS5NPnf9fpv41w==", "cpu": [ "x64" ], @@ -705,9 +680,9 @@ } }, "node_modules/@esbuild/win32-arm64": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.6.tgz", - "integrity": "sha512-42QOgcZeZOvXfsCBJF5Afw73t4veOId//XD3i+/9gSkhSV6Gk3VPlWncctI+JcOyERv85FUo7RxuxGy+z8A43Q==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.8.tgz", + "integrity": "sha512-YEGFFWESlPva8hGL+zvj2z/SaK+pH0SwOM0Nc/d+rVnW7GSTFlLBGzZkuSU9kFIGIo8q9X3ucpZhu8PDN5A2sQ==", "cpu": [ "arm64" ], @@ -722,9 +697,9 @@ } }, "node_modules/@esbuild/win32-ia32": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.6.tgz", - "integrity": "sha512-4AWhgXmDuYN7rJI6ORB+uU9DHLq/erBbuMoAuB4VWJTu5KtCgcKYPynF0YI1VkBNuEfjNlLrFr9KZPJzrtLkrQ==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.8.tgz", + "integrity": "sha512-hiGgGC6KZ5LZz58OL/+qVVoZiuZlUYlYHNAmczOm7bs2oE1XriPFi5ZHHrS8ACpV5EjySrnoCKmcbQMN+ojnHg==", "cpu": [ "ia32" ], @@ -739,9 +714,9 @@ } }, "node_modules/@esbuild/win32-x64": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.6.tgz", - "integrity": "sha512-NgJPHHbEpLQgDH2MjQu90pzW/5vvXIZ7KOnPyNBm92A6WgZ/7b6fJyUBjoumLqeOQQGqY2QjQxRo97ah4Sj0cA==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.8.tgz", + "integrity": "sha512-cn3Yr7+OaaZq1c+2pe+8yxC8E144SReCQjN6/2ynubzYjvyqZjTXfQJpAcQpsdJq3My7XADANiYGHoFC69pLQw==", "cpu": [ "x64" ], @@ -774,19 +749,6 @@ "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" } }, - "node_modules/@eslint-community/eslint-utils/node_modules/eslint-visitor-keys": { - "version": "3.4.3", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", - "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" - } - }, "node_modules/@eslint-community/regexpp": { "version": "4.12.1", "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.1.tgz", @@ -798,9 +760,9 @@ } }, "node_modules/@eslint/config-array": { - "version": "0.20.1", - "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.20.1.tgz", - "integrity": "sha512-OL0RJzC/CBzli0DrrR31qzj6d6i6Mm3HByuhflhl4LOBiWxN+3i6/t/ZQQNii4tjksXi8r2CRW1wMpWA2ULUEw==", + "version": "0.21.0", + "resolved": "https://registry.npmjs.org/@eslint/config-array/-/config-array-0.21.0.tgz", + "integrity": "sha512-ENIdc4iLu0d93HeYirvKmrzshzofPw6VkZRKQGe9Nv46ZnWUzcF1xV01dcvEg/1wXUR61OmmlSfyeyO7EvjLxQ==", "dev": true, "license": "Apache-2.0", "dependencies": { @@ -812,10 +774,34 @@ "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, + "node_modules/@eslint/config-array/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@eslint/config-array/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, "node_modules/@eslint/config-helpers": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.2.3.tgz", - "integrity": "sha512-u180qk2Um1le4yf0ruXH3PYFeEZeYC3p/4wCTKrr2U1CmGdzGi3KtY0nuPDH48UJxlKCC5RDzbcbh4X0XlqgHg==", + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/@eslint/config-helpers/-/config-helpers-0.3.0.tgz", + "integrity": "sha512-ViuymvFmcJi04qdZeDc2whTHryouGcDlaxPqarTD0ZE10ISpxGUVZGZDx4w01upyIynL3iu6IXH2bS1NhclQMw==", "dev": true, "license": "Apache-2.0", "engines": { @@ -823,9 +809,9 @@ } }, "node_modules/@eslint/core": { - "version": "0.14.0", - "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.14.0.tgz", - "integrity": "sha512-qIbV0/JZr7iSDjqAc60IqbLdsj9GDt16xQtWD+B78d/HAlvysGdZZ6rpJHGAc2T0FQx1X6thsSPdnoiGKdNtdg==", + "version": "0.15.1", + "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.15.1.tgz", + "integrity": "sha512-bkOp+iumZCCbt1K1CmWf0R9pM5yKpDv+ZXtvSyQpudrI9kuFLp+bM2WOPXImuD/ceQuaa8f5pj93Y7zyECIGNA==", "dev": true, "license": "Apache-2.0", "dependencies": { @@ -859,6 +845,17 @@ "url": "https://opencollective.com/eslint" } }, + "node_modules/@eslint/eslintrc/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, "node_modules/@eslint/eslintrc/node_modules/globals": { "version": "14.0.0", "resolved": "https://registry.npmjs.org/globals/-/globals-14.0.0.tgz", @@ -872,10 +869,33 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/@eslint/eslintrc/node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/@eslint/eslintrc/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, "node_modules/@eslint/js": { - "version": "9.29.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.29.0.tgz", - "integrity": "sha512-3PIF4cBw/y+1u2EazflInpV+lYsSG0aByVIQzAgb1m1MhHFSbqTyNqtBKHgWf/9Ykud+DhILS9EGkmekVhbKoQ==", + "version": "9.32.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-9.32.0.tgz", + "integrity": "sha512-BBpRFZK3eX6uMLKz8WxFOBIFFcGFJ/g8XuwjTHCqHROSIsopI+ddn/d5Cfh36+7+e5edVS8dbSHnBNhrLEX0zg==", "dev": true, "license": "MIT", "engines": { @@ -896,9 +916,9 @@ } }, "node_modules/@eslint/plugin-kit": { - "version": "0.3.3", - "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.3.3.tgz", - "integrity": "sha512-1+WqvgNMhmlAambTvT3KPtCl/Ibr68VldY2XY40SL1CE0ZXiakFR/cbTspaF5HsnpDMvcYYoJHfl4980NBjGag==", + "version": "0.3.4", + "resolved": "https://registry.npmjs.org/@eslint/plugin-kit/-/plugin-kit-0.3.4.tgz", + "integrity": "sha512-Ul5l+lHEcw3L5+k8POx6r74mxEYKG5kOb6Xpy2gCRW6zweT6TEhAf8vhxGgjhqrd/VO/Dirhsb+1hNpD1ue9hw==", "dev": true, "license": "Apache-2.0", "dependencies": { @@ -909,29 +929,14 @@ "node": "^18.18.0 || ^20.9.0 || >=21.1.0" } }, - "node_modules/@eslint/plugin-kit/node_modules/@eslint/core": { - "version": "0.15.1", - "resolved": "https://registry.npmjs.org/@eslint/core/-/core-0.15.1.tgz", - "integrity": "sha512-bkOp+iumZCCbt1K1CmWf0R9pM5yKpDv+ZXtvSyQpudrI9kuFLp+bM2WOPXImuD/ceQuaa8f5pj93Y7zyECIGNA==", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "@types/json-schema": "^7.0.15" - }, - "engines": { - "node": "^18.18.0 || ^20.9.0 || >=21.1.0" - } - }, "node_modules/@google/genai": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/@google/genai/-/genai-1.8.0.tgz", - "integrity": "sha512-n3KiMFesQCy2R9iSdBIuJ0JWYQ1HZBJJkmt4PPZMGZKvlgHhBAGw1kUMyX+vsAIzprN3lK45DI755lm70wPOOg==", + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@google/genai/-/genai-1.9.0.tgz", + "integrity": "sha512-w9P93OXKPMs9H1mfAx9+p3zJqQGrWBGdvK/SVc7cLZEXNHr/3+vW2eif7ZShA6wU24rNLn9z9MK2vQFUvNRI2Q==", "license": "Apache-2.0", "dependencies": { "google-auth-library": "^9.14.2", - "ws": "^8.18.0", - "zod": "^3.22.4", - "zod-to-json-schema": "^3.22.4" + "ws": "^8.18.0" }, "engines": { "node": ">=20.0.0" @@ -976,105 +981,6 @@ "node": ">=6" } }, - "node_modules/@grpc/proto-loader/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/@grpc/proto-loader/node_modules/cliui": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", - "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", - "license": "ISC", - "dependencies": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.1", - "wrap-ansi": "^7.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/@grpc/proto-loader/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, - "node_modules/@grpc/proto-loader/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/@grpc/proto-loader/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/@grpc/proto-loader/node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/@grpc/proto-loader/node_modules/yargs": { - "version": "17.7.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", - "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", - "license": "MIT", - "dependencies": { - "cliui": "^8.0.1", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.3", - "y18n": "^5.0.5", - "yargs-parser": "^21.1.1" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/@grpc/proto-loader/node_modules/yargs-parser": { - "version": "21.1.1", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", - "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", - "license": "ISC", - "engines": { - "node": ">=12" - } - }, "node_modules/@humanfs/core": { "version": "0.19.1", "resolved": "https://registry.npmjs.org/@humanfs/core/-/core-0.19.1.tgz", @@ -1141,6 +1047,12 @@ "url": "https://github.com/sponsors/nzakas" } }, + "node_modules/@iarna/toml": { + "version": "2.2.5", + "resolved": "https://registry.npmjs.org/@iarna/toml/-/toml-2.2.5.tgz", + "integrity": "sha512-trnsAYxU3xnS1gPHPyU961coFyLkh4gAD/0zQ5mymY4yOZ+CYvsPqUbOFSw0aDM4y0tV7tiFxL/1XfXPNC6IPg==", + "license": "ISC" + }, "node_modules/@isaacs/cliui": { "version": "8.0.2", "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", @@ -1158,6 +1070,29 @@ "node": ">=12" } }, + "node_modules/@isaacs/cliui/node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "license": "MIT" + }, + "node_modules/@isaacs/cliui/node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/@istanbuljs/schema": { "version": "0.1.3", "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", @@ -1169,9 +1104,9 @@ } }, "node_modules/@jest/schemas": { - "version": "30.0.1", - "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.1.tgz", - "integrity": "sha512-+g/1TKjFuGrf1Hh0QPCv0gISwBxJ+MQSNXmG9zjHy7BmFhtoJ9fdNhWJp3qUKRi93AOZHXtdxZgJ1vAtz6z65w==", + "version": "30.0.5", + "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-30.0.5.tgz", + "integrity": "sha512-DmdYgtezMkh3cpU8/1uyXakv3tJRcmcXxBOcO0tbaozPwpmh4YMsnWrQm9ZmZMfa5ocbxzbFk6O4bDPEc/iAnA==", "dev": true, "license": "MIT", "dependencies": { @@ -1182,18 +1117,14 @@ } }, "node_modules/@jridgewell/gen-mapping": { - "version": "0.3.8", - "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz", - "integrity": "sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==", + "version": "0.3.12", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.12.tgz", + "integrity": "sha512-OuLGC46TjB5BbN1dH8JULVVZY4WTdkF7tV9Ys6wLL1rubZnCMstOhNHueU5bLCrnRuDhKPDM4g6sw4Bel5Gzqg==", "dev": true, "license": "MIT", "dependencies": { - "@jridgewell/set-array": "^1.2.1", - "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/sourcemap-codec": "^1.5.0", "@jridgewell/trace-mapping": "^0.3.24" - }, - "engines": { - "node": ">=6.0.0" } }, "node_modules/@jridgewell/resolve-uri": { @@ -1206,27 +1137,17 @@ "node": ">=6.0.0" } }, - "node_modules/@jridgewell/set-array": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz", - "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6.0.0" - } - }, "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", - "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", + "version": "1.5.4", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.4.tgz", + "integrity": "sha512-VT2+G1VQs/9oz078bLrYbecdZKs912zQlkelYpuf+SXF+QvZDYJlbx/LSx+meSAwdDFnF8FVXW92AVjjkVmgFw==", "dev": true, "license": "MIT" }, "node_modules/@jridgewell/trace-mapping": { - "version": "0.3.25", - "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", - "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "version": "0.3.29", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.29.tgz", + "integrity": "sha512-uw6guiW/gcAGPDhLmd77/6lW8QLeiV5RUTsAX46Db6oLhGaVj4lhnPwb184s1bkc8kdVg/+h988dro8GRDpmYQ==", "dev": true, "license": "MIT", "dependencies": { @@ -1262,9 +1183,9 @@ } }, "node_modules/@jsonjoy.com/json-pack": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@jsonjoy.com/json-pack/-/json-pack-1.2.0.tgz", - "integrity": "sha512-io1zEbbYcElht3tdlqEOFxZ0dMTYrHz9iMf0gqn1pPjZFTCgM5R4R5IMA20Chb2UPYYsxjzs8CgZ7Nb5n2K2rA==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/json-pack/-/json-pack-1.4.0.tgz", + "integrity": "sha512-Akn8XZqN3xO9YGcgvIiTauBBXTP92QSvw6EcGha+p5nm7brhbwvev5gw4fi+ouLGrBpfPpb72+S5pxl4mkMIGQ==", "dev": true, "license": "Apache-2.0", "dependencies": { @@ -1285,9 +1206,9 @@ } }, "node_modules/@jsonjoy.com/util": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/@jsonjoy.com/util/-/util-1.6.0.tgz", - "integrity": "sha512-sw/RMbehRhN68WRtcKCpQOPfnH6lLP4GJfqzi3iYej8tnzpZUDr6UkZYJjcjjC0FWEJOJbyM3PTIwxucUmDG2A==", + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/@jsonjoy.com/util/-/util-1.8.0.tgz", + "integrity": "sha512-HeR0JQNEdBozt+FrfyM5T0X3R+fIN0D+BRDkxPP5o41fTWzHfeZEqtK16aTW8haU+h+SG7XYq9PP5kobvOmkSA==", "dev": true, "license": "Apache-2.0", "engines": { @@ -1317,9 +1238,9 @@ "license": "MIT" }, "node_modules/@modelcontextprotocol/sdk": { - "version": "1.15.1", - "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.15.1.tgz", - "integrity": "sha512-W/XlN9c528yYn+9MQkVjxiTPgPxoxt+oczfjHBDsJx0+59+O7B75Zhsp0B16Xbwbz8ANISDajh6+V7nIcPMc5w==", + "version": "1.17.1", + "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.17.1.tgz", + "integrity": "sha512-CPle1OQehbWqd25La9Ack5B07StKIxh4+Bf19qnpZKJC1oI22Y0czZHbifjw1UoczIfKBwBDAp/dFxvHG13B5A==", "license": "MIT", "dependencies": { "ajv": "^6.12.6", @@ -1599,30 +1520,6 @@ "@opentelemetry/api": "^1.3.0" } }, - "node_modules/@opentelemetry/instrumentation-http/node_modules/semver": { - "version": "7.7.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", - "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/@opentelemetry/instrumentation/node_modules/semver": { - "version": "7.7.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", - "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/@opentelemetry/otlp-exporter-base": { "version": "0.52.1", "resolved": "https://registry.npmjs.org/@opentelemetry/otlp-exporter-base/-/otlp-exporter-base-0.52.1.tgz", @@ -1822,18 +1719,6 @@ "@opentelemetry/api": ">=1.0.0 <1.10.0" } }, - "node_modules/@opentelemetry/sdk-trace-node/node_modules/semver": { - "version": "7.7.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", - "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/@opentelemetry/semantic-conventions": { "version": "1.25.1", "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.25.1.tgz", @@ -1966,14 +1851,10 @@ "resolved": "packages/core", "link": true }, - "node_modules/@qwen-code/qwen-code-vscode-ide-companion": { - "resolved": "packages/vscode-ide-companion", - "link": true - }, "node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.44.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.44.0.tgz", - "integrity": "sha512-xEiEE5oDW6tK4jXCAyliuntGR+amEMO7HLtdSshVuhFnKTYoeYMyXQK7pLouAJJj5KHdwdn87bfHAR2nSdNAUA==", + "version": "4.46.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.46.2.tgz", + "integrity": "sha512-Zj3Hl6sN34xJtMv7Anwb5Gu01yujyE/cLBDB2gnHTAHaWS1Z38L7kuSG+oAh0giZMqG060f/YBStXtMH6FvPMA==", "cpu": [ "arm" ], @@ -1985,9 +1866,9 @@ ] }, "node_modules/@rollup/rollup-android-arm64": { - "version": "4.44.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.44.0.tgz", - "integrity": "sha512-uNSk/TgvMbskcHxXYHzqwiyBlJ/lGcv8DaUfcnNwict8ba9GTTNxfn3/FAoFZYgkaXXAdrAA+SLyKplyi349Jw==", + "version": "4.46.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.46.2.tgz", + "integrity": "sha512-nTeCWY83kN64oQ5MGz3CgtPx8NSOhC5lWtsjTs+8JAJNLcP3QbLCtDDgUKQc/Ro/frpMq4SHUaHN6AMltcEoLQ==", "cpu": [ "arm64" ], @@ -1999,9 +1880,9 @@ ] }, "node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.44.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.44.0.tgz", - "integrity": "sha512-VGF3wy0Eq1gcEIkSCr8Ke03CWT+Pm2yveKLaDvq51pPpZza3JX/ClxXOCmTYYq3us5MvEuNRTaeyFThCKRQhOA==", + "version": "4.46.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.46.2.tgz", + "integrity": "sha512-HV7bW2Fb/F5KPdM/9bApunQh68YVDU8sO8BvcW9OngQVN3HHHkw99wFupuUJfGR9pYLLAjcAOA6iO+evsbBaPQ==", "cpu": [ "arm64" ], @@ -2013,9 +1894,9 @@ ] }, "node_modules/@rollup/rollup-darwin-x64": { - "version": "4.44.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.44.0.tgz", - "integrity": "sha512-fBkyrDhwquRvrTxSGH/qqt3/T0w5Rg0L7ZIDypvBPc1/gzjJle6acCpZ36blwuwcKD/u6oCE/sRWlUAcxLWQbQ==", + "version": "4.46.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.46.2.tgz", + "integrity": "sha512-SSj8TlYV5nJixSsm/y3QXfhspSiLYP11zpfwp6G/YDXctf3Xkdnk4woJIF5VQe0of2OjzTt8EsxnJDCdHd2xMA==", "cpu": [ "x64" ], @@ -2027,9 +1908,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-arm64": { - "version": "4.44.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.44.0.tgz", - "integrity": "sha512-u5AZzdQJYJXByB8giQ+r4VyfZP+walV+xHWdaFx/1VxsOn6eWJhK2Vl2eElvDJFKQBo/hcYIBg/jaKS8ZmKeNQ==", + "version": "4.46.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.46.2.tgz", + "integrity": "sha512-ZyrsG4TIT9xnOlLsSSi9w/X29tCbK1yegE49RYm3tu3wF1L/B6LVMqnEWyDB26d9Ecx9zrmXCiPmIabVuLmNSg==", "cpu": [ "arm64" ], @@ -2041,9 +1922,9 @@ ] }, "node_modules/@rollup/rollup-freebsd-x64": { - "version": "4.44.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.44.0.tgz", - "integrity": "sha512-qC0kS48c/s3EtdArkimctY7h3nHicQeEUdjJzYVJYR3ct3kWSafmn6jkNCA8InbUdge6PVx6keqjk5lVGJf99g==", + "version": "4.46.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.46.2.tgz", + "integrity": "sha512-pCgHFoOECwVCJ5GFq8+gR8SBKnMO+xe5UEqbemxBpCKYQddRQMgomv1104RnLSg7nNvgKy05sLsY51+OVRyiVw==", "cpu": [ "x64" ], @@ -2055,9 +1936,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.44.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.44.0.tgz", - "integrity": "sha512-x+e/Z9H0RAWckn4V2OZZl6EmV0L2diuX3QB0uM1r6BvhUIv6xBPL5mrAX2E3e8N8rEHVPwFfz/ETUbV4oW9+lQ==", + "version": "4.46.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.46.2.tgz", + "integrity": "sha512-EtP8aquZ0xQg0ETFcxUbU71MZlHaw9MChwrQzatiE8U/bvi5uv/oChExXC4mWhjiqK7azGJBqU0tt5H123SzVA==", "cpu": [ "arm" ], @@ -2069,9 +1950,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm-musleabihf": { - "version": "4.44.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.44.0.tgz", - "integrity": "sha512-1exwiBFf4PU/8HvI8s80icyCcnAIB86MCBdst51fwFmH5dyeoWVPVgmQPcKrMtBQ0W5pAs7jBCWuRXgEpRzSCg==", + "version": "4.46.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.46.2.tgz", + "integrity": "sha512-qO7F7U3u1nfxYRPM8HqFtLd+raev2K137dsV08q/LRKRLEc7RsiDWihUnrINdsWQxPR9jqZ8DIIZ1zJJAm5PjQ==", "cpu": [ "arm" ], @@ -2083,9 +1964,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.44.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.44.0.tgz", - "integrity": "sha512-ZTR2mxBHb4tK4wGf9b8SYg0Y6KQPjGpR4UWwTFdnmjB4qRtoATZ5dWn3KsDwGa5Z2ZBOE7K52L36J9LueKBdOQ==", + "version": "4.46.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.46.2.tgz", + "integrity": "sha512-3dRaqLfcOXYsfvw5xMrxAk9Lb1f395gkoBYzSFcc/scgRFptRXL9DOaDpMiehf9CO8ZDRJW2z45b6fpU5nwjng==", "cpu": [ "arm64" ], @@ -2097,9 +1978,9 @@ ] }, "node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.44.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.44.0.tgz", - "integrity": "sha512-GFWfAhVhWGd4r6UxmnKRTBwP1qmModHtd5gkraeW2G490BpFOZkFtem8yuX2NyafIP/mGpRJgTJ2PwohQkUY/Q==", + "version": "4.46.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.46.2.tgz", + "integrity": "sha512-fhHFTutA7SM+IrR6lIfiHskxmpmPTJUXpWIsBXpeEwNgZzZZSg/q4i6FU4J8qOGyJ0TR+wXBwx/L7Ho9z0+uDg==", "cpu": [ "arm64" ], @@ -2111,9 +1992,9 @@ ] }, "node_modules/@rollup/rollup-linux-loongarch64-gnu": { - "version": "4.44.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loongarch64-gnu/-/rollup-linux-loongarch64-gnu-4.44.0.tgz", - "integrity": "sha512-xw+FTGcov/ejdusVOqKgMGW3c4+AgqrfvzWEVXcNP6zq2ue+lsYUgJ+5Rtn/OTJf7e2CbgTFvzLW2j0YAtj0Gg==", + "version": "4.46.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loongarch64-gnu/-/rollup-linux-loongarch64-gnu-4.46.2.tgz", + "integrity": "sha512-i7wfGFXu8x4+FRqPymzjD+Hyav8l95UIZ773j7J7zRYc3Xsxy2wIn4x+llpunexXe6laaO72iEjeeGyUFmjKeA==", "cpu": [ "loong64" ], @@ -2124,10 +2005,10 @@ "linux" ] }, - "node_modules/@rollup/rollup-linux-powerpc64le-gnu": { - "version": "4.44.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.44.0.tgz", - "integrity": "sha512-bKGibTr9IdF0zr21kMvkZT4K6NV+jjRnBoVMt2uNMG0BYWm3qOVmYnXKzx7UhwrviKnmK46IKMByMgvpdQlyJQ==", + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.46.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.46.2.tgz", + "integrity": "sha512-B/l0dFcHVUnqcGZWKcWBSV2PF01YUt0Rvlurci5P+neqY/yMKchGU8ullZvIv5e8Y1C6wOn+U03mrDylP5q9Yw==", "cpu": [ "ppc64" ], @@ -2139,9 +2020,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-gnu": { - "version": "4.44.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.44.0.tgz", - "integrity": "sha512-vV3cL48U5kDaKZtXrti12YRa7TyxgKAIDoYdqSIOMOFBXqFj2XbChHAtXquEn2+n78ciFgr4KIqEbydEGPxXgA==", + "version": "4.46.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.46.2.tgz", + "integrity": "sha512-32k4ENb5ygtkMwPMucAb8MtV8olkPT03oiTxJbgkJa7lJ7dZMr0GCFJlyvy+K8iq7F/iuOr41ZdUHaOiqyR3iQ==", "cpu": [ "riscv64" ], @@ -2153,9 +2034,9 @@ ] }, "node_modules/@rollup/rollup-linux-riscv64-musl": { - "version": "4.44.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.44.0.tgz", - "integrity": "sha512-TDKO8KlHJuvTEdfw5YYFBjhFts2TR0VpZsnLLSYmB7AaohJhM8ctDSdDnUGq77hUh4m/djRafw+9zQpkOanE2Q==", + "version": "4.46.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.46.2.tgz", + "integrity": "sha512-t5B2loThlFEauloaQkZg9gxV05BYeITLvLkWOkRXogP4qHXLkWSbSHKM9S6H1schf/0YGP/qNKtiISlxvfmmZw==", "cpu": [ "riscv64" ], @@ -2167,9 +2048,9 @@ ] }, "node_modules/@rollup/rollup-linux-s390x-gnu": { - "version": "4.44.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.44.0.tgz", - "integrity": "sha512-8541GEyktXaw4lvnGp9m84KENcxInhAt6vPWJ9RodsB/iGjHoMB2Pp5MVBCiKIRxrxzJhGCxmNzdu+oDQ7kwRA==", + "version": "4.46.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.46.2.tgz", + "integrity": "sha512-YKjekwTEKgbB7n17gmODSmJVUIvj8CX7q5442/CK80L8nqOUbMtf8b01QkG3jOqyr1rotrAnW6B/qiHwfcuWQA==", "cpu": [ "s390x" ], @@ -2181,9 +2062,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.44.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.44.0.tgz", - "integrity": "sha512-iUVJc3c0o8l9Sa/qlDL2Z9UP92UZZW1+EmQ4xfjTc1akr0iUFZNfxrXJ/R1T90h/ILm9iXEY6+iPrmYB3pXKjw==", + "version": "4.46.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.46.2.tgz", + "integrity": "sha512-Jj5a9RUoe5ra+MEyERkDKLwTXVu6s3aACP51nkfnK9wJTraCC8IMe3snOfALkrjTYd2G1ViE1hICj0fZ7ALBPA==", "cpu": [ "x64" ], @@ -2195,9 +2076,9 @@ ] }, "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.44.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.44.0.tgz", - "integrity": "sha512-PQUobbhLTQT5yz/SPg116VJBgz+XOtXt8D1ck+sfJJhuEsMj2jSej5yTdp8CvWBSceu+WW+ibVL6dm0ptG5fcA==", + "version": "4.46.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.46.2.tgz", + "integrity": "sha512-7kX69DIrBeD7yNp4A5b81izs8BqoZkCIaxQaOpumcJ1S/kmqNFjPhDu1LHeVXv0SexfHQv5cqHsxLOjETuqDuA==", "cpu": [ "x64" ], @@ -2209,9 +2090,9 @@ ] }, "node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.44.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.44.0.tgz", - "integrity": "sha512-M0CpcHf8TWn+4oTxJfh7LQuTuaYeXGbk0eageVjQCKzYLsajWS/lFC94qlRqOlyC2KvRT90ZrfXULYmukeIy7w==", + "version": "4.46.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.46.2.tgz", + "integrity": "sha512-wiJWMIpeaak/jsbaq2HMh/rzZxHVW1rU6coyeNNpMwk5isiPjSTx0a4YLSlYDwBH/WBvLz+EtsNqQScZTLJy3g==", "cpu": [ "arm64" ], @@ -2223,9 +2104,9 @@ ] }, "node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.44.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.44.0.tgz", - "integrity": "sha512-3XJ0NQtMAXTWFW8FqZKcw3gOQwBtVWP/u8TpHP3CRPXD7Pd6s8lLdH3sHWh8vqKCyyiI8xW5ltJScQmBU9j7WA==", + "version": "4.46.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.46.2.tgz", + "integrity": "sha512-gBgaUDESVzMgWZhcyjfs9QFK16D8K6QZpwAaVNJxYDLHWayOta4ZMjGm/vsAEy3hvlS2GosVFlBlP9/Wb85DqQ==", "cpu": [ "ia32" ], @@ -2237,9 +2118,9 @@ ] }, "node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.44.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.44.0.tgz", - "integrity": "sha512-Q2Mgwt+D8hd5FIPUuPDsvPR7Bguza6yTkJxspDGkZj7tBRn2y4KSWYuIXpftFSjBra76TbKerCV7rgFPQrn+wQ==", + "version": "4.46.2", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.46.2.tgz", + "integrity": "sha512-CvUo2ixeIQGtF6WvuB87XWqPQkoFAFqW+HUo/WzHwuHDvIwZCtjdWXoYCcr06iKGydiqTclC4jU/TNObC/xKZg==", "cpu": [ "x64" ], @@ -2271,12 +2152,110 @@ } }, "node_modules/@sinclair/typebox": { - "version": "0.34.37", - "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.34.37.tgz", - "integrity": "sha512-2TRuQVgQYfy+EzHRTIvkhv2ADEouJ2xNS/Vq+W5EuuewBdOrvATvljZTxHWZSTYr2sTjTHpGvucaGAt67S2akw==", + "version": "0.34.38", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.34.38.tgz", + "integrity": "sha512-HpkxMmc2XmZKhvaKIZZThlHmx1L0I/V1hWK1NubtlFnr6ZqdiOpV72TKudZUNQjZNsyDBay72qFEhEvb+bcwcA==", "dev": true, "license": "MIT" }, + "node_modules/@testing-library/dom": { + "version": "10.4.1", + "resolved": "https://registry.npmjs.org/@testing-library/dom/-/dom-10.4.1.tgz", + "integrity": "sha512-o4PXJQidqJl82ckFaXUeoAW+XysPLauYI43Abki5hABd853iMhitooc6znOnczgbTYmEP6U6/y1ZyKAIsvMKGg==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@babel/code-frame": "^7.10.4", + "@babel/runtime": "^7.12.5", + "@types/aria-query": "^5.0.1", + "aria-query": "5.3.0", + "dom-accessibility-api": "^0.5.9", + "lz-string": "^1.5.0", + "picocolors": "1.1.1", + "pretty-format": "^27.0.2" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@testing-library/dom/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/@testing-library/dom/node_modules/ansi-styles": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@testing-library/dom/node_modules/pretty-format": { + "version": "27.5.1", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-27.5.1.tgz", + "integrity": "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "ansi-regex": "^5.0.1", + "ansi-styles": "^5.0.0", + "react-is": "^17.0.1" + }, + "engines": { + "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" + } + }, + "node_modules/@testing-library/dom/node_modules/react-is": { + "version": "17.0.2", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", + "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/@testing-library/react": { + "version": "16.3.0", + "resolved": "https://registry.npmjs.org/@testing-library/react/-/react-16.3.0.tgz", + "integrity": "sha512-kFSyxiEDwv1WLl2fgsq6pPBbw5aWKrsY2/noi1Id0TK0UParSF62oFQFGHXIyaG4pp2tEub/Zlel+fjjZILDsw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.12.5" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@testing-library/dom": "^10.0.0", + "@types/react": "^18.0.0 || ^19.0.0", + "@types/react-dom": "^18.0.0 || ^19.0.0", + "react": "^18.0.0 || ^19.0.0", + "react-dom": "^18.0.0 || ^19.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, "node_modules/@types/aria-query": { "version": "5.0.4", "resolved": "https://registry.npmjs.org/@types/aria-query/-/aria-query-5.0.4.tgz", @@ -2402,16 +2381,6 @@ "@types/send": "*" } }, - "node_modules/@types/glob": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/@types/glob/-/glob-8.1.0.tgz", - "integrity": "sha512-IO+MJPVhoqz+28h1qLAcBEH2+xHMK6MTyHJc7MTnnYb6wsoLR29POVGJ7LycmVXIqyy/4/2ShP5sUwTXuOwb/w==", - "license": "MIT", - "dependencies": { - "@types/minimatch": "^5.1.2", - "@types/node": "*" - } - }, "node_modules/@types/gradient-string": { "version": "1.1.6", "resolved": "https://registry.npmjs.org/@types/gradient-string/-/gradient-string-1.1.6.tgz", @@ -2485,15 +2454,26 @@ "version": "5.1.2", "resolved": "https://registry.npmjs.org/@types/minimatch/-/minimatch-5.1.2.tgz", "integrity": "sha512-K0VQKziLUWkVKiRVrx4a40iPaxTUefQmjtkQofBkYRcoaaL/8rhwDWww9qWbrgicNOgnpIsMxyNIUM4+n6dUIA==", + "dev": true, "license": "MIT" }, - "node_modules/@types/node": { - "version": "20.19.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.1.tgz", - "integrity": "sha512-jJD50LtlD2dodAEO653i3YF04NWak6jN3ky+Ri3Em3mGR39/glWiboM/IePaRbgwSfqM1TpGXfAg8ohn/4dTgA==", + "node_modules/@types/mock-fs": { + "version": "4.13.4", + "resolved": "https://registry.npmjs.org/@types/mock-fs/-/mock-fs-4.13.4.tgz", + "integrity": "sha512-mXmM0o6lULPI8z3XNnQCpL0BGxPwx1Ul1wXYEPBGl4efShyxW2Rln0JOPEWGyZaYZMM6OVXM/15zUuFMY52ljg==", + "dev": true, "license": "MIT", "dependencies": { - "undici-types": "~6.21.0" + "@types/node": "*" + } + }, + "node_modules/@types/node": { + "version": "24.1.0", + "resolved": "https://registry.npmjs.org/@types/node/-/node-24.1.0.tgz", + "integrity": "sha512-ut5FthK5moxFKH2T1CUOC6ctR67rQRvvHdFLCD2Ql6KXmMuCrjsSsRI9UsLCm9M18BMwClv4pn327UvB7eeO1w==", + "license": "MIT", + "dependencies": { + "undici-types": "~7.8.0" } }, "node_modules/@types/normalize-package-data": { @@ -2517,9 +2497,9 @@ "license": "MIT" }, "node_modules/@types/react": { - "version": "19.1.8", - "resolved": "https://registry.npmjs.org/@types/react/-/react-19.1.8.tgz", - "integrity": "sha512-AwAfQ2Wa5bCx9WP8nZL2uMZWod7J7/JSplxbTmBQ5ms6QpqNYm672H0Vu9ZVKVngQ+ii4R/byguVEUZQyeg44g==", + "version": "19.1.9", + "resolved": "https://registry.npmjs.org/@types/react/-/react-19.1.9.tgz", + "integrity": "sha512-WmdoynAX8Stew/36uTSVMcLJJ1KRh6L3IZRx1PZ7qJtBqT3dYTgyDTx8H1qoRghErydW7xw9mSJ3wS//tCRpFA==", "devOptional": true, "license": "MIT", "dependencies": { @@ -2527,9 +2507,9 @@ } }, "node_modules/@types/react-dom": { - "version": "19.1.6", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.1.6.tgz", - "integrity": "sha512-4hOiT/dwO8Ko0gV1m/TJZYk3y0KBnY9vzDh7W+DH17b2HFSOGgdj33dhihPeuy3l0q23+4e+hoXHV6hCC4dCXw==", + "version": "19.1.7", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-19.1.7.tgz", + "integrity": "sha512-i5ZzwYpqjmrKenzkoLM2Ibzt6mAsM7pxB6BCIouEVVmgiqaMj1TjaK7hnA36hbW5aZv20kx7Lw6hWzPWg0Rurw==", "dev": true, "license": "MIT", "peerDependencies": { @@ -2538,7 +2518,7 @@ }, "node_modules/@types/semver": { "version": "7.7.0", - "resolved": "https://registry.npmmirror.com/@types/semver/-/semver-7.7.0.tgz", + "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.7.0.tgz", "integrity": "sha512-k107IF4+Xr7UHjwDc7Cfd6PRQfbdkiRabXGRjo07b4WyPahFBZCZ1sE+BNxYIJPPg73UkfOsVOLwqVc/6ETrIA==", "dev": true, "license": "MIT" @@ -2601,6 +2581,13 @@ "boxen": "^7.1.1" } }, + "node_modules/@types/uuid": { + "version": "10.0.0", + "resolved": "https://registry.npmjs.org/@types/uuid/-/uuid-10.0.0.tgz", + "integrity": "sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/vscode": { "version": "1.102.0", "resolved": "https://registry.npmjs.org/@types/vscode/-/vscode-1.102.0.tgz", @@ -2636,17 +2623,17 @@ "license": "MIT" }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "8.35.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.35.0.tgz", - "integrity": "sha512-ijItUYaiWuce0N1SoSMrEd0b6b6lYkYt99pqCPfybd+HKVXtEvYhICfLdwp42MhiI5mp0oq7PKEL+g1cNiz/Eg==", + "version": "8.38.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.38.0.tgz", + "integrity": "sha512-CPoznzpuAnIOl4nhj4tRr4gIPj5AfKgkiJmGQDaq+fQnRJTYlcBjbX3wbciGmpoPf8DREufuPRe1tNMZnGdanA==", "dev": true, "license": "MIT", "dependencies": { "@eslint-community/regexpp": "^4.10.0", - "@typescript-eslint/scope-manager": "8.35.0", - "@typescript-eslint/type-utils": "8.35.0", - "@typescript-eslint/utils": "8.35.0", - "@typescript-eslint/visitor-keys": "8.35.0", + "@typescript-eslint/scope-manager": "8.38.0", + "@typescript-eslint/type-utils": "8.38.0", + "@typescript-eslint/utils": "8.38.0", + "@typescript-eslint/visitor-keys": "8.38.0", "graphemer": "^1.4.0", "ignore": "^7.0.0", "natural-compare": "^1.4.0", @@ -2660,32 +2647,22 @@ "url": "https://opencollective.com/typescript-eslint" }, "peerDependencies": { - "@typescript-eslint/parser": "^8.35.0", + "@typescript-eslint/parser": "^8.38.0", "eslint": "^8.57.0 || ^9.0.0", "typescript": ">=4.8.4 <5.9.0" } }, - "node_modules/@typescript-eslint/eslint-plugin/node_modules/ignore": { - "version": "7.0.5", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz", - "integrity": "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 4" - } - }, "node_modules/@typescript-eslint/parser": { - "version": "8.35.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.35.0.tgz", - "integrity": "sha512-6sMvZePQrnZH2/cJkwRpkT7DxoAWh+g6+GFRK6bV3YQo7ogi3SX5rgF6099r5Q53Ma5qeT7LGmOmuIutF4t3lA==", + "version": "8.38.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.38.0.tgz", + "integrity": "sha512-Zhy8HCvBUEfBECzIl1PKqF4p11+d0aUJS1GeUiuqK9WmOug8YCmC4h4bjyBvMyAMI9sbRczmrYL5lKg/YMbrcQ==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/scope-manager": "8.35.0", - "@typescript-eslint/types": "8.35.0", - "@typescript-eslint/typescript-estree": "8.35.0", - "@typescript-eslint/visitor-keys": "8.35.0", + "@typescript-eslint/scope-manager": "8.38.0", + "@typescript-eslint/types": "8.38.0", + "@typescript-eslint/typescript-estree": "8.38.0", + "@typescript-eslint/visitor-keys": "8.38.0", "debug": "^4.3.4" }, "engines": { @@ -2701,14 +2678,14 @@ } }, "node_modules/@typescript-eslint/project-service": { - "version": "8.35.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.35.0.tgz", - "integrity": "sha512-41xatqRwWZuhUMF/aZm2fcUsOFKNcG28xqRSS6ZVr9BVJtGExosLAm5A1OxTjRMagx8nJqva+P5zNIGt8RIgbQ==", + "version": "8.38.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/project-service/-/project-service-8.38.0.tgz", + "integrity": "sha512-dbK7Jvqcb8c9QfH01YB6pORpqX1mn5gDZc9n63Ak/+jD67oWXn3Gs0M6vddAN+eDXBCS5EmNWzbSxsn9SzFWWg==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/tsconfig-utils": "^8.35.0", - "@typescript-eslint/types": "^8.35.0", + "@typescript-eslint/tsconfig-utils": "^8.38.0", + "@typescript-eslint/types": "^8.38.0", "debug": "^4.3.4" }, "engines": { @@ -2723,14 +2700,14 @@ } }, "node_modules/@typescript-eslint/scope-manager": { - "version": "8.35.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.35.0.tgz", - "integrity": "sha512-+AgL5+mcoLxl1vGjwNfiWq5fLDZM1TmTPYs2UkyHfFhgERxBbqHlNjRzhThJqz+ktBqTChRYY6zwbMwy0591AA==", + "version": "8.38.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.38.0.tgz", + "integrity": "sha512-WJw3AVlFFcdT9Ri1xs/lg8LwDqgekWXWhH3iAF+1ZM+QPd7oxQ6jvtW/JPwzAScxitILUIFs0/AnQ/UWHzbATQ==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.35.0", - "@typescript-eslint/visitor-keys": "8.35.0" + "@typescript-eslint/types": "8.38.0", + "@typescript-eslint/visitor-keys": "8.38.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -2741,9 +2718,9 @@ } }, "node_modules/@typescript-eslint/tsconfig-utils": { - "version": "8.35.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.35.0.tgz", - "integrity": "sha512-04k/7247kZzFraweuEirmvUj+W3bJLI9fX6fbo1Qm2YykuBvEhRTPl8tcxlYO8kZZW+HIXfkZNoasVb8EV4jpA==", + "version": "8.38.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/tsconfig-utils/-/tsconfig-utils-8.38.0.tgz", + "integrity": "sha512-Lum9RtSE3EroKk/bYns+sPOodqb2Fv50XOl/gMviMKNvanETUuUcC9ObRbzrJ4VSd2JalPqgSAavwrPiPvnAiQ==", "dev": true, "license": "MIT", "engines": { @@ -2758,14 +2735,15 @@ } }, "node_modules/@typescript-eslint/type-utils": { - "version": "8.35.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.35.0.tgz", - "integrity": "sha512-ceNNttjfmSEoM9PW87bWLDEIaLAyR+E6BoYJQ5PfaDau37UGca9Nyq3lBk8Bw2ad0AKvYabz6wxc7DMTO2jnNA==", + "version": "8.38.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.38.0.tgz", + "integrity": "sha512-c7jAvGEZVf0ao2z+nnz8BUaHZD09Agbh+DY7qvBQqLiz8uJzRgVPj5YvOh8I8uEiH8oIUGIfHzMwUcGVco/SJg==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/typescript-estree": "8.35.0", - "@typescript-eslint/utils": "8.35.0", + "@typescript-eslint/types": "8.38.0", + "@typescript-eslint/typescript-estree": "8.38.0", + "@typescript-eslint/utils": "8.38.0", "debug": "^4.3.4", "ts-api-utils": "^2.1.0" }, @@ -2782,9 +2760,9 @@ } }, "node_modules/@typescript-eslint/types": { - "version": "8.35.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.35.0.tgz", - "integrity": "sha512-0mYH3emanku0vHw2aRLNGqe7EXh9WHEhi7kZzscrMDf6IIRUQ5Jk4wp1QrledE/36KtdZrVfKnE32eZCf/vaVQ==", + "version": "8.38.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.38.0.tgz", + "integrity": "sha512-wzkUfX3plUqij4YwWaJyqhiPE5UCRVlFpKn1oCRn2O1bJ592XxWJj8ROQ3JD5MYXLORW84063z3tZTb/cs4Tyw==", "dev": true, "license": "MIT", "engines": { @@ -2796,16 +2774,16 @@ } }, "node_modules/@typescript-eslint/typescript-estree": { - "version": "8.35.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.35.0.tgz", - "integrity": "sha512-F+BhnaBemgu1Qf8oHrxyw14wq6vbL8xwWKKMwTMwYIRmFFY/1n/9T/jpbobZL8vp7QyEUcC6xGrnAO4ua8Kp7w==", + "version": "8.38.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.38.0.tgz", + "integrity": "sha512-fooELKcAKzxux6fA6pxOflpNS0jc+nOQEEOipXFNjSlBS6fqrJOVY/whSn70SScHrcJ2LDsxWrneFoWYSVfqhQ==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/project-service": "8.35.0", - "@typescript-eslint/tsconfig-utils": "8.35.0", - "@typescript-eslint/types": "8.35.0", - "@typescript-eslint/visitor-keys": "8.35.0", + "@typescript-eslint/project-service": "8.38.0", + "@typescript-eslint/tsconfig-utils": "8.38.0", + "@typescript-eslint/types": "8.38.0", + "@typescript-eslint/visitor-keys": "8.38.0", "debug": "^4.3.4", "fast-glob": "^3.3.2", "is-glob": "^4.0.3", @@ -2824,56 +2802,17 @@ "typescript": ">=4.8.4 <5.9.0" } }, - "node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", - "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0" - } - }, - "node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": { - "version": "9.0.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", - "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/@typescript-eslint/typescript-estree/node_modules/semver": { - "version": "7.7.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", - "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", - "dev": true, - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/@typescript-eslint/utils": { - "version": "8.35.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.35.0.tgz", - "integrity": "sha512-nqoMu7WWM7ki5tPgLVsmPM8CkqtoPUG6xXGeefM5t4x3XumOEKMoUZPdi+7F+/EotukN4R9OWdmDxN80fqoZeg==", + "version": "8.38.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.38.0.tgz", + "integrity": "sha512-hHcMA86Hgt+ijJlrD8fX0j1j8w4C92zue/8LOPAFioIno+W0+L7KqE8QZKCcPGc/92Vs9x36w/4MPTJhqXdyvg==", "dev": true, "license": "MIT", "dependencies": { "@eslint-community/eslint-utils": "^4.7.0", - "@typescript-eslint/scope-manager": "8.35.0", - "@typescript-eslint/types": "8.35.0", - "@typescript-eslint/typescript-estree": "8.35.0" + "@typescript-eslint/scope-manager": "8.38.0", + "@typescript-eslint/types": "8.38.0", + "@typescript-eslint/typescript-estree": "8.38.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -2888,13 +2827,13 @@ } }, "node_modules/@typescript-eslint/visitor-keys": { - "version": "8.35.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.35.0.tgz", - "integrity": "sha512-zTh2+1Y8ZpmeQaQVIc/ZZxsx8UzgKJyNg1PTvjzC7WMhPSVS8bfDX34k1SrwOf016qd5RU3az2UxUNue3IfQ5g==", + "version": "8.38.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.38.0.tgz", + "integrity": "sha512-pWrTcoFNWuwHlA9CvlfSsGWs14JxfN1TH25zM5L7o0pRLhsoZkDnTsXfQRJBEWJoV5DL0jf+Z+sxiud+K0mq1g==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/types": "8.35.0", + "@typescript-eslint/types": "8.38.0", "eslint-visitor-keys": "^4.2.1" }, "engines": { @@ -2905,6 +2844,19 @@ "url": "https://opencollective.com/typescript-eslint" } }, + "node_modules/@typescript-eslint/visitor-keys/node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, "node_modules/@vitest/coverage-v8": { "version": "3.2.4", "resolved": "https://registry.npmjs.org/@vitest/coverage-v8/-/coverage-v8-3.2.4.tgz", @@ -3099,9 +3051,9 @@ } }, "node_modules/agent-base": { - "version": "7.1.3", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.3.tgz", - "integrity": "sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw==", + "version": "7.1.4", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", + "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", "license": "MIT", "engines": { "node": ">= 14" @@ -3147,6 +3099,15 @@ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", "license": "MIT" }, + "node_modules/ansi-align/node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/ansi-align/node_modules/string-width": { "version": "4.2.3", "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", @@ -3201,15 +3162,12 @@ } }, "node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", "license": "MIT", - "dependencies": { - "color-convert": "^2.0.1" - }, "engines": { - "node": ">=8" + "node": ">=12" }, "funding": { "url": "https://github.com/chalk/ansi-styles?sponsor=1" @@ -3222,6 +3180,17 @@ "dev": true, "license": "Python-2.0" }, + "node_modules/aria-query": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.0.tgz", + "integrity": "sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==", + "dev": true, + "license": "Apache-2.0", + "peer": true, + "dependencies": { + "dequal": "^2.0.3" + } + }, "node_modules/array-buffer-byte-length": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/array-buffer-byte-length/-/array-buffer-byte-length-1.0.2.tgz", @@ -3404,6 +3373,13 @@ "js-tokens": "^9.0.1" } }, + "node_modules/ast-v8-to-istanbul/node_modules/js-tokens": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.1.tgz", + "integrity": "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==", + "dev": true, + "license": "MIT" + }, "node_modules/async-function": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/async-function/-/async-function-1.0.0.tgz", @@ -3478,9 +3454,9 @@ "license": "MIT" }, "node_modules/bignumber.js": { - "version": "9.3.0", - "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.3.0.tgz", - "integrity": "sha512-EM7aMFTXbptt/wZdMlBv2t8IViwQL+h6SLHosp8Yf0dqJMTnY6iL32opnAB6kAdL0SZPuvcAzFr31o0c/R3/RA==", + "version": "9.3.1", + "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.3.1.tgz", + "integrity": "sha512-Ko0uX15oIUS7wJ3Rb30Fs6SkVbLmPBAKdlm7q9+ak9bbIeFf0MwuBsQV6z7+X768/cHsfg+WlysDWJcmthjsjQ==", "license": "MIT", "engines": { "node": "*" @@ -3528,27 +3504,36 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/boxen/node_modules/chalk": { - "version": "5.4.1", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.4.1.tgz", - "integrity": "sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w==", + "node_modules/boxen/node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "license": "MIT" + }, + "node_modules/boxen/node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, "engines": { - "node": "^12.17.0 || ^14.13 || >=16.0.0" + "node": ">=12" }, "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/brace-expansion": { - "version": "1.1.12", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", - "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", - "dev": true, + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", "license": "MIT", "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" + "balanced-match": "^1.0.0" } }, "node_modules/braces": { @@ -3690,9 +3675,9 @@ } }, "node_modules/chai": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/chai/-/chai-5.2.0.tgz", - "integrity": "sha512-mCuXncKXk5iCLhfhwTc0izo0gtEmpz5CtG2y8GiOINBlMVS6v8TMRc5TaLWKS6692m9+dVVfzgeVxR5UxWHTYw==", + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/chai/-/chai-5.2.1.tgz", + "integrity": "sha512-5nFxhUrX0PqtyogoYOA8IPswy5sZFTOsBFl/9bNsmDLgsxYTzSZQJDPppDnZPTQbzSEm0hqGjWPzRemQCYbD6A==", "dev": true, "license": "MIT", "dependencies": { @@ -3703,36 +3688,26 @@ "pathval": "^2.0.0" }, "engines": { - "node": ">=12" + "node": ">=18" } }, "node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "version": "5.4.1", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.4.1.tgz", + "integrity": "sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w==", "license": "MIT", - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, "engines": { - "node": ">=10" + "node": "^12.17.0 || ^14.13 || >=16.0.0" }, "funding": { "url": "https://github.com/chalk/chalk?sponsor=1" } }, - "node_modules/chalk/node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "license": "MIT", - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } + "node_modules/chardet": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/chardet/-/chardet-2.1.0.tgz", + "integrity": "sha512-bNFETTG/pM5ryzQ9Ad0lJOTa6HWD/YsScAR3EnCPZRPlQh77JocYktSHOUHelyhm8IARL+o4c4F1bP5KVOjiRA==", + "license": "MIT" }, "node_modules/check-error": { "version": "2.1.1", @@ -3805,36 +3780,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/cli-truncate/node_modules/ansi-styles": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/cli-truncate/node_modules/emoji-regex": { - "version": "10.4.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.4.0.tgz", - "integrity": "sha512-EC+0oUMY1Rqm4O6LLrgjtYDvcVYTy7chDnM4Q7030tP4Kwj3u/pR6gP9ygnp2CJMK5Gq+9Q2oqmrFJAz01DXjw==", - "license": "MIT" - }, - "node_modules/cli-truncate/node_modules/is-fullwidth-code-point": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-4.0.0.tgz", - "integrity": "sha512-O4L094N2/dZ7xqVdrXhh9r1KODPJpFms8B5sGdJLPy664AgvXsreZUyCQQNItZRDlYug4xStLjNp/sz3HvBowQ==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/cli-truncate/node_modules/slice-ansi": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-5.0.0.tgz", @@ -3851,84 +3796,97 @@ "url": "https://github.com/chalk/slice-ansi?sponsor=1" } }, - "node_modules/cli-truncate/node_modules/string-width": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", - "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^10.3.0", - "get-east-asian-width": "^1.0.0", - "strip-ansi": "^7.1.0" - }, - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/cliui": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-9.0.1.tgz", - "integrity": "sha512-k7ndgKhwoQveBL+/1tqGJYNz097I7WOvwbmmU2AR5+magtbjPWQTS1C5vzGkBC8Ym8UWRzfKUzUUqFLypY4Q+w==", + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", "license": "ISC", "dependencies": { - "string-width": "^7.2.0", - "strip-ansi": "^7.1.0", - "wrap-ansi": "^9.0.0" + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" }, "engines": { - "node": ">=20" + "node": ">=12" + } + }, + "node_modules/cliui/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", + "engines": { + "node": ">=8" } }, "node_modules/cliui/node_modules/ansi-styles": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, "engines": { - "node": ">=12" + "node": ">=8" }, "funding": { "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, "node_modules/cliui/node_modules/emoji-regex": { - "version": "10.4.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.4.0.tgz", - "integrity": "sha512-EC+0oUMY1Rqm4O6LLrgjtYDvcVYTy7chDnM4Q7030tP4Kwj3u/pR6gP9ygnp2CJMK5Gq+9Q2oqmrFJAz01DXjw==", + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", "license": "MIT" }, + "node_modules/cliui/node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/cliui/node_modules/string-width": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", - "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", "license": "MIT", "dependencies": { - "emoji-regex": "^10.3.0", - "get-east-asian-width": "^1.0.0", - "strip-ansi": "^7.1.0" + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" }, "engines": { - "node": ">=18" + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "engines": { + "node": ">=8" } }, "node_modules/cliui/node_modules/wrap-ansi": { - "version": "9.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.0.tgz", - "integrity": "sha512-G8ura3S+3Z2G+mkgNRq8dqaFZAuxfsxpBB8OCTGRTCtp+l/v9nbFNmCUP1BZMts3G1142MsZfn6eeUKrr4PD1Q==", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", "license": "MIT", "dependencies": { - "ansi-styles": "^6.2.1", - "string-width": "^7.0.0", - "strip-ansi": "^7.1.0" + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" }, "engines": { - "node": ">=18" + "node": ">=10" }, "funding": { "url": "https://github.com/chalk/wrap-ansi?sponsor=1" @@ -4003,111 +3961,50 @@ "url": "https://github.com/open-cli-tools/concurrently?sponsor=1" } }, - "node_modules/concurrently/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/concurrently/node_modules/cliui": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", - "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", - "dev": true, - "license": "ISC", - "dependencies": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.1", - "wrap-ansi": "^7.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/concurrently/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true, - "license": "MIT" - }, - "node_modules/concurrently/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "node_modules/concurrently/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", "dev": true, "license": "MIT", "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" + "color-convert": "^2.0.1" }, "engines": { "node": ">=8" - } - }, - "node_modules/concurrently/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" }, - "engines": { - "node": ">=8" + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, - "node_modules/concurrently/node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "node_modules/concurrently/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", "dev": true, "license": "MIT", "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" }, "engines": { "node": ">=10" }, "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + "url": "https://github.com/chalk/chalk?sponsor=1" } }, - "node_modules/concurrently/node_modules/yargs": { - "version": "17.7.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", - "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "node_modules/concurrently/node_modules/chalk/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", "dev": true, "license": "MIT", "dependencies": { - "cliui": "^8.0.1", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.3", - "y18n": "^5.0.5", - "yargs-parser": "^21.1.1" + "has-flag": "^4.0.0" }, "engines": { - "node": ">=12" - } - }, - "node_modules/concurrently/node_modules/yargs-parser": { - "version": "21.1.1", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", - "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", - "dev": true, - "license": "ISC", - "engines": { - "node": ">=12" + "node": ">=8" } }, "node_modules/config-chain": { @@ -4259,15 +4156,6 @@ "devOptional": true, "license": "MIT" }, - "node_modules/data-uri-to-buffer": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-4.0.1.tgz", - "integrity": "sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==", - "license": "MIT", - "engines": { - "node": ">= 12" - } - }, "node_modules/data-urls": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-5.0.0.tgz", @@ -4354,9 +4242,9 @@ } }, "node_modules/decimal.js": { - "version": "10.5.0", - "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.5.0.tgz", - "integrity": "sha512-8vDa8Qxvr/+d94hSh5P3IJwI5t8/c0KsMp+g8bNw9cY2icONa5aPfvKeieW1WlG0WQYwwhJ7mjui2xtiePQSXw==", + "version": "10.6.0", + "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.6.0.tgz", + "integrity": "sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg==", "dev": true, "license": "MIT" }, @@ -4627,9 +4515,9 @@ } }, "node_modules/dotenv": { - "version": "17.1.0", - "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-17.1.0.tgz", - "integrity": "sha512-tG9VUTJTuju6GcXgbdsOuRhupE8cb4mRgY5JLRCh4MtGoVo3/gfGUtOMwmProM6d0ba2mCFvv+WrpYJV6qgJXQ==", + "version": "17.2.1", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-17.2.1.tgz", + "integrity": "sha512-kQhDYKZecqnM0fCnzI5eIv5L4cAe/iRI+HqMbO/hbRdTAeXDG+M9FjipUxNfbARuEg4iHIbhnhs78BCHNbSxEQ==", "license": "BSD-2-Clause", "engines": { "node": ">=12" @@ -4674,9 +4562,9 @@ "license": "MIT" }, "node_modules/emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "version": "10.4.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.4.0.tgz", + "integrity": "sha512-EC+0oUMY1Rqm4O6LLrgjtYDvcVYTy7chDnM4Q7030tP4Kwj3u/pR6gP9ygnp2CJMK5Gq+9Q2oqmrFJAz01DXjw==", "license": "MIT" }, "node_modules/encodeurl": { @@ -4904,9 +4792,9 @@ } }, "node_modules/es-toolkit": { - "version": "1.39.5", - "resolved": "https://registry.npmjs.org/es-toolkit/-/es-toolkit-1.39.5.tgz", - "integrity": "sha512-z9V0qU4lx1TBXDNFWfAASWk6RNU6c6+TJBKE+FLIg8u0XJ6Yw58Hi0yX8ftEouj6p1QARRlXLFfHbIli93BdQQ==", + "version": "1.39.8", + "resolved": "https://registry.npmjs.org/es-toolkit/-/es-toolkit-1.39.8.tgz", + "integrity": "sha512-A8QO9TfF+rltS8BXpdu8OS+rpGgEdnRhqIVxO/ZmNvnXBYgOdSsxukT55ELyP94gZIntWJ+Li9QRrT2u1Kitpg==", "license": "MIT", "workspaces": [ "docs", @@ -4914,9 +4802,9 @@ ] }, "node_modules/esbuild": { - "version": "0.25.6", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.6.tgz", - "integrity": "sha512-GVuzuUwtdsghE3ocJ9Bs8PNoF13HNQ5TXbEi2AhvVb8xU1Iwt9Fos9FEamfoee+u/TOsn7GUWc04lz46n2bbTg==", + "version": "0.25.8", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.8.tgz", + "integrity": "sha512-vVC0USHGtMi8+R4Kz8rt6JhEWLxsv9Rnu/lGYbPR8u47B+DCBksq9JarW0zOO7bs37hyOK1l2/oqtbciutL5+Q==", "dev": true, "hasInstallScript": true, "license": "MIT", @@ -4927,32 +4815,32 @@ "node": ">=18" }, "optionalDependencies": { - "@esbuild/aix-ppc64": "0.25.6", - "@esbuild/android-arm": "0.25.6", - "@esbuild/android-arm64": "0.25.6", - "@esbuild/android-x64": "0.25.6", - "@esbuild/darwin-arm64": "0.25.6", - "@esbuild/darwin-x64": "0.25.6", - "@esbuild/freebsd-arm64": "0.25.6", - "@esbuild/freebsd-x64": "0.25.6", - "@esbuild/linux-arm": "0.25.6", - "@esbuild/linux-arm64": "0.25.6", - "@esbuild/linux-ia32": "0.25.6", - "@esbuild/linux-loong64": "0.25.6", - "@esbuild/linux-mips64el": "0.25.6", - "@esbuild/linux-ppc64": "0.25.6", - "@esbuild/linux-riscv64": "0.25.6", - "@esbuild/linux-s390x": "0.25.6", - "@esbuild/linux-x64": "0.25.6", - "@esbuild/netbsd-arm64": "0.25.6", - "@esbuild/netbsd-x64": "0.25.6", - "@esbuild/openbsd-arm64": "0.25.6", - "@esbuild/openbsd-x64": "0.25.6", - "@esbuild/openharmony-arm64": "0.25.6", - "@esbuild/sunos-x64": "0.25.6", - "@esbuild/win32-arm64": "0.25.6", - "@esbuild/win32-ia32": "0.25.6", - "@esbuild/win32-x64": "0.25.6" + "@esbuild/aix-ppc64": "0.25.8", + "@esbuild/android-arm": "0.25.8", + "@esbuild/android-arm64": "0.25.8", + "@esbuild/android-x64": "0.25.8", + "@esbuild/darwin-arm64": "0.25.8", + "@esbuild/darwin-x64": "0.25.8", + "@esbuild/freebsd-arm64": "0.25.8", + "@esbuild/freebsd-x64": "0.25.8", + "@esbuild/linux-arm": "0.25.8", + "@esbuild/linux-arm64": "0.25.8", + "@esbuild/linux-ia32": "0.25.8", + "@esbuild/linux-loong64": "0.25.8", + "@esbuild/linux-mips64el": "0.25.8", + "@esbuild/linux-ppc64": "0.25.8", + "@esbuild/linux-riscv64": "0.25.8", + "@esbuild/linux-s390x": "0.25.8", + "@esbuild/linux-x64": "0.25.8", + "@esbuild/netbsd-arm64": "0.25.8", + "@esbuild/netbsd-x64": "0.25.8", + "@esbuild/openbsd-arm64": "0.25.8", + "@esbuild/openbsd-x64": "0.25.8", + "@esbuild/openharmony-arm64": "0.25.8", + "@esbuild/sunos-x64": "0.25.8", + "@esbuild/win32-arm64": "0.25.8", + "@esbuild/win32-ia32": "0.25.8", + "@esbuild/win32-x64": "0.25.8" } }, "node_modules/escalade": { @@ -4996,20 +4884,20 @@ } }, "node_modules/eslint": { - "version": "9.29.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.29.0.tgz", - "integrity": "sha512-GsGizj2Y1rCWDu6XoEekL3RLilp0voSePurjZIkxL3wlm5o5EC9VpgaP7lrCvjnkuLvzFBQWB3vWB3K5KQTveQ==", + "version": "9.32.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-9.32.0.tgz", + "integrity": "sha512-LSehfdpgMeWcTZkWZVIJl+tkZ2nuSkyyB9C27MZqFWXuph7DvaowgcTvKqxvpLW1JZIk8PN7hFY3Rj9LQ7m7lg==", "dev": true, "license": "MIT", "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.12.1", - "@eslint/config-array": "^0.20.1", - "@eslint/config-helpers": "^0.2.1", - "@eslint/core": "^0.14.0", + "@eslint/config-array": "^0.21.0", + "@eslint/config-helpers": "^0.3.0", + "@eslint/core": "^0.15.0", "@eslint/eslintrc": "^3.3.1", - "@eslint/js": "9.29.0", - "@eslint/plugin-kit": "^0.3.1", + "@eslint/js": "9.32.0", + "@eslint/plugin-kit": "^0.3.4", "@humanfs/node": "^0.16.6", "@humanwhocodes/module-importer": "^1.0.1", "@humanwhocodes/retry": "^0.4.2", @@ -5057,9 +4945,9 @@ } }, "node_modules/eslint-config-prettier": { - "version": "10.1.5", - "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-10.1.5.tgz", - "integrity": "sha512-zc1UmCpNltmVY34vuLRV61r1K27sWuX39E+uyUnY8xS2Bex88VV9cugG+UZbRSRGtGyFboj+D8JODyme1plMpw==", + "version": "10.1.8", + "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-10.1.8.tgz", + "integrity": "sha512-82GZUjRS0p/jganf6q1rEO25VSoHH0hKPCTrgillPjdI/3bgBhAE1QzHrHTizjpRvy6pGAvKjDJtk2pF9NDq8w==", "dev": true, "license": "MIT", "bin": { @@ -5156,6 +5044,17 @@ "eslint": "^2 || ^3 || ^4 || ^5 || ^6 || ^7.2.0 || ^8 || ^9" } }, + "node_modules/eslint-plugin-import/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, "node_modules/eslint-plugin-import/node_modules/debug": { "version": "3.2.7", "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", @@ -5166,6 +5065,29 @@ "ms": "^2.1.1" } }, + "node_modules/eslint-plugin-import/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/eslint-plugin-import/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, "node_modules/eslint-plugin-license-header": { "version": "0.8.0", "resolved": "https://registry.npmjs.org/eslint-plugin-license-header/-/eslint-plugin-license-header-0.8.0.tgz", @@ -5222,6 +5144,30 @@ "eslint": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 || ^9.0.0" } }, + "node_modules/eslint-plugin-react/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/eslint-plugin-react/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, "node_modules/eslint-plugin-react/node_modules/resolve": { "version": "2.0.0-next.5", "resolved": "https://registry.npmjs.org/resolve/-/resolve-2.0.0-next.5.tgz", @@ -5240,6 +5186,16 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/eslint-plugin-react/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, "node_modules/eslint-scope": { "version": "8.4.0", "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-8.4.0.tgz", @@ -5258,6 +5214,63 @@ } }, "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/eslint/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/eslint/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/eslint/node_modules/eslint-visitor-keys": { "version": "4.2.1", "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", @@ -5270,6 +5283,42 @@ "url": "https://opencollective.com/eslint" } }, + "node_modules/eslint/node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/eslint/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/eslint/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/espree": { "version": "10.4.0", "resolved": "https://registry.npmjs.org/espree/-/espree-10.4.0.tgz", @@ -5288,6 +5337,19 @@ "url": "https://opencollective.com/eslint" } }, + "node_modules/espree/node_modules/eslint-visitor-keys": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-4.2.1.tgz", + "integrity": "sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^18.18.0 || ^20.9.0 || >=21.1.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, "node_modules/esquery": { "version": "1.6.0", "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz", @@ -5375,9 +5437,9 @@ } }, "node_modules/expect-type": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.2.1.tgz", - "integrity": "sha512-/kP8CAwxzLVEeFrMm4kMmy4CCDlpipyA7MYLVrdJIkV0fYF0UaigQHRsxHiuY/GEea+bh4KSv3TIlgr+2UL6bw==", + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.2.2.tgz", + "integrity": "sha512-JhFGDVJ7tmDJItKhYgJCGLOWjuK9vPxiXoUFLwLDc99NlmklilbiQJwoctZtt13+xMw91MCk/REan6MWHqDjyA==", "dev": true, "license": "Apache-2.0", "engines": { @@ -5522,29 +5584,6 @@ "reusify": "^1.0.4" } }, - "node_modules/fetch-blob": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/fetch-blob/-/fetch-blob-3.2.0.tgz", - "integrity": "sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/jimmywarting" - }, - { - "type": "paypal", - "url": "https://paypal.me/jimmywarting" - } - ], - "license": "MIT", - "dependencies": { - "node-domexception": "^1.0.0", - "web-streams-polyfill": "^3.0.3" - }, - "engines": { - "node": "^12.20 || >= 14.13" - } - }, "node_modules/figures": { "version": "6.1.0", "resolved": "https://registry.npmjs.org/figures/-/figures-6.1.0.tgz", @@ -5684,18 +5723,6 @@ "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/formdata-polyfill": { - "version": "4.0.10", - "resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz", - "integrity": "sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==", - "license": "MIT", - "dependencies": { - "fetch-blob": "^3.1.2" - }, - "engines": { - "node": ">=12.20.0" - } - }, "node_modules/forwarded": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", @@ -5770,34 +5797,6 @@ } }, "node_modules/gaxios": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/gaxios/-/gaxios-7.1.1.tgz", - "integrity": "sha512-Odju3uBUJyVCkW64nLD4wKLhbh93bh6vIg/ZIXkWiLPBrdgtc65+tls/qml+un3pr6JqYVFDZbbmLDQT68rTOQ==", - "license": "Apache-2.0", - "dependencies": { - "extend": "^3.0.2", - "https-proxy-agent": "^7.0.1", - "node-fetch": "^3.3.2" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/gcp-metadata": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/gcp-metadata/-/gcp-metadata-6.1.1.tgz", - "integrity": "sha512-a4tiq7E0/5fTjxPAaH4jpjkSv/uCaU2p5KC6HVGrvl0cDjA8iBZv4vv1gyzlmK0ZUKqwpOyQMKzZQe3lTit77A==", - "license": "Apache-2.0", - "dependencies": { - "gaxios": "^6.1.1", - "google-logging-utils": "^0.0.2", - "json-bigint": "^1.0.0" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/gcp-metadata/node_modules/gaxios": { "version": "6.7.1", "resolved": "https://registry.npmjs.org/gaxios/-/gaxios-6.7.1.tgz", "integrity": "sha512-LDODD4TMYx7XXdpwxAVRAIAuB0bzv0s+ywFonY46k126qzQHT9ygyoa9tncmOiQmmDrik65UYsEkv3lbfqQ3yQ==", @@ -5813,46 +5812,18 @@ "node": ">=14" } }, - "node_modules/gcp-metadata/node_modules/node-fetch": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", - "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", - "license": "MIT", + "node_modules/gcp-metadata": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/gcp-metadata/-/gcp-metadata-6.1.1.tgz", + "integrity": "sha512-a4tiq7E0/5fTjxPAaH4jpjkSv/uCaU2p5KC6HVGrvl0cDjA8iBZv4vv1gyzlmK0ZUKqwpOyQMKzZQe3lTit77A==", + "license": "Apache-2.0", "dependencies": { - "whatwg-url": "^5.0.0" + "gaxios": "^6.1.1", + "google-logging-utils": "^0.0.2", + "json-bigint": "^1.0.0" }, "engines": { - "node": "4.x || >=6.0.0" - }, - "peerDependencies": { - "encoding": "^0.1.0" - }, - "peerDependenciesMeta": { - "encoding": { - "optional": true - } - } - }, - "node_modules/gcp-metadata/node_modules/tr46": { - "version": "0.0.3", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", - "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", - "license": "MIT" - }, - "node_modules/gcp-metadata/node_modules/webidl-conversions": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", - "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", - "license": "BSD-2-Clause" - }, - "node_modules/gcp-metadata/node_modules/whatwg-url": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", - "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", - "license": "MIT", - "dependencies": { - "tr46": "~0.0.3", - "webidl-conversions": "^3.0.0" + "node": ">=14" } }, "node_modules/get-caller-file": { @@ -5964,30 +5935,6 @@ "node": ">=10.13.0" } }, - "node_modules/glob/node_modules/brace-expansion": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", - "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0" - } - }, - "node_modules/glob/node_modules/minimatch": { - "version": "9.0.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", - "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", - "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, "node_modules/global-directory": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/global-directory/-/global-directory-4.0.1.tgz", @@ -6050,64 +5997,6 @@ "node": ">=14" } }, - "node_modules/google-auth-library/node_modules/gaxios": { - "version": "6.7.1", - "resolved": "https://registry.npmjs.org/gaxios/-/gaxios-6.7.1.tgz", - "integrity": "sha512-LDODD4TMYx7XXdpwxAVRAIAuB0bzv0s+ywFonY46k126qzQHT9ygyoa9tncmOiQmmDrik65UYsEkv3lbfqQ3yQ==", - "license": "Apache-2.0", - "dependencies": { - "extend": "^3.0.2", - "https-proxy-agent": "^7.0.1", - "is-stream": "^2.0.0", - "node-fetch": "^2.6.9", - "uuid": "^9.0.1" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/google-auth-library/node_modules/node-fetch": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", - "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", - "license": "MIT", - "dependencies": { - "whatwg-url": "^5.0.0" - }, - "engines": { - "node": "4.x || >=6.0.0" - }, - "peerDependencies": { - "encoding": "^0.1.0" - }, - "peerDependenciesMeta": { - "encoding": { - "optional": true - } - } - }, - "node_modules/google-auth-library/node_modules/tr46": { - "version": "0.0.3", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", - "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", - "license": "MIT" - }, - "node_modules/google-auth-library/node_modules/webidl-conversions": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", - "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", - "license": "BSD-2-Clause" - }, - "node_modules/google-auth-library/node_modules/whatwg-url": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", - "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", - "license": "MIT", - "dependencies": { - "tr46": "~0.0.3", - "webidl-conversions": "^3.0.0" - } - }, "node_modules/google-logging-utils": { "version": "0.0.2", "resolved": "https://registry.npmjs.org/google-logging-utils/-/google-logging-utils-0.0.2.tgz", @@ -6148,6 +6037,49 @@ "node": ">=10" } }, + "node_modules/gradient-string/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/gradient-string/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/gradient-string/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/graphemer": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", @@ -6168,64 +6100,6 @@ "node": ">=14.0.0" } }, - "node_modules/gtoken/node_modules/gaxios": { - "version": "6.7.1", - "resolved": "https://registry.npmjs.org/gaxios/-/gaxios-6.7.1.tgz", - "integrity": "sha512-LDODD4TMYx7XXdpwxAVRAIAuB0bzv0s+ywFonY46k126qzQHT9ygyoa9tncmOiQmmDrik65UYsEkv3lbfqQ3yQ==", - "license": "Apache-2.0", - "dependencies": { - "extend": "^3.0.2", - "https-proxy-agent": "^7.0.1", - "is-stream": "^2.0.0", - "node-fetch": "^2.6.9", - "uuid": "^9.0.1" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/gtoken/node_modules/node-fetch": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", - "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", - "license": "MIT", - "dependencies": { - "whatwg-url": "^5.0.0" - }, - "engines": { - "node": "4.x || >=6.0.0" - }, - "peerDependencies": { - "encoding": "^0.1.0" - }, - "peerDependenciesMeta": { - "encoding": { - "optional": true - } - } - }, - "node_modules/gtoken/node_modules/tr46": { - "version": "0.0.3", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", - "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", - "license": "MIT" - }, - "node_modules/gtoken/node_modules/webidl-conversions": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", - "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", - "license": "BSD-2-Clause" - }, - "node_modules/gtoken/node_modules/whatwg-url": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", - "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", - "license": "MIT", - "dependencies": { - "tr46": "~0.0.3", - "webidl-conversions": "^3.0.0" - } - }, "node_modules/has-bigints": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.1.0.tgz", @@ -6327,16 +6201,11 @@ } }, "node_modules/hosted-git-info": { - "version": "7.0.2", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-7.0.2.tgz", - "integrity": "sha512-puUZAUKT5m8Zzvs72XWy3HtvVbTWljRE66cP60bxJzAqf2DgICo7lYTY2IHUmLnNpjYvw5bvmoHvPc0QO2a62w==", - "license": "ISC", - "dependencies": { - "lru-cache": "^10.0.1" - }, - "engines": { - "node": "^16.14.0 || >=18.0.0" - } + "version": "2.8.9", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", + "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==", + "dev": true, + "license": "ISC" }, "node_modules/html-encoding-sniffer": { "version": "4.0.0", @@ -6468,10 +6337,9 @@ } }, "node_modules/ignore": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", - "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", - "dev": true, + "version": "7.0.5", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-7.0.5.tgz", + "integrity": "sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==", "license": "MIT", "engines": { "node": ">= 4" @@ -6556,9 +6424,9 @@ } }, "node_modules/ink": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/ink/-/ink-6.0.1.tgz", - "integrity": "sha512-vhhFrCodTHZAPPSdMYzLEbeI0Ug37R9j6yA0kLKok9kSK53lQtj/RJhEQJUjq6OwT4N33nxqSRd/7yXhEhVPIw==", + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ink/-/ink-6.1.0.tgz", + "integrity": "sha512-YQ+lbMD79y3FBAJXXZnuRajLEgaMFp102361eY5NrBIEVCi9oFo7gNZU4z2LBWlcjZFiTt7jetlkIbKCCH4KJA==", "license": "MIT", "dependencies": { "@alcalzone/ansi-tokenize": "^0.1.3", @@ -6714,59 +6582,12 @@ } } }, - "node_modules/ink/node_modules/ansi-styles": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/ink/node_modules/chalk": { - "version": "5.4.1", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.4.1.tgz", - "integrity": "sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w==", - "license": "MIT", - "engines": { - "node": "^12.17.0 || ^14.13 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/ink/node_modules/emoji-regex": { - "version": "10.4.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.4.0.tgz", - "integrity": "sha512-EC+0oUMY1Rqm4O6LLrgjtYDvcVYTy7chDnM4Q7030tP4Kwj3u/pR6gP9ygnp2CJMK5Gq+9Q2oqmrFJAz01DXjw==", - "license": "MIT" - }, "node_modules/ink/node_modules/signal-exit": { "version": "3.0.7", "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", "license": "ISC" }, - "node_modules/ink/node_modules/string-width": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", - "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^10.3.0", - "get-east-asian-width": "^1.0.0", - "strip-ansi": "^7.1.0" - }, - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/ink/node_modules/type-fest": { "version": "4.41.0", "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", @@ -7061,12 +6882,15 @@ } }, "node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-4.0.0.tgz", + "integrity": "sha512-O4L094N2/dZ7xqVdrXhh9r1KODPJpFms8B5sGdJLPy664AgvXsreZUyCQQNItZRDlYug4xStLjNp/sz3HvBowQ==", "license": "MIT", "engines": { - "node": ">=8" + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/is-generator-function": { @@ -7537,10 +7361,9 @@ } }, "node_modules/js-tokens": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.1.tgz", - "integrity": "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==", - "dev": true, + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", "license": "MIT" }, "node_modules/js-yaml": { @@ -7717,9 +7540,9 @@ } }, "node_modules/ky": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/ky/-/ky-1.8.1.tgz", - "integrity": "sha512-7Bp3TpsE+L+TARSnnDpk3xg8Idi8RwSLdj6CMbNWoOARIrGrbuLGusV0dYwbZOm4bB3jHNxSw8Wk/ByDqJEnDw==", + "version": "1.8.2", + "resolved": "https://registry.npmjs.org/ky/-/ky-1.8.2.tgz", + "integrity": "sha512-XybQJ3d4Ea1kI27DoelE5ZCT3bSJlibYTtQuMsyzKox3TMyayw1asgQdl54WroAm+fIA3ZCr8zXW2RpR7qWVpA==", "license": "MIT", "engines": { "node": ">=18" @@ -7782,20 +7605,6 @@ "node": ">=4" } }, - "node_modules/load-json-file/node_modules/parse-json": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", - "integrity": "sha512-aOIos8bujGN93/8Ox/jPLh7RwVnPEysynVFE+fQZyg6jKELEHwzgKdLRFHUgXJL6kylijVSBC4BvN9OmsB48Rw==", - "dev": true, - "license": "MIT", - "dependencies": { - "error-ex": "^1.3.1", - "json-parse-better-errors": "^1.0.1" - }, - "engines": { - "node": ">=4" - } - }, "node_modules/locate-path": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", @@ -7849,16 +7658,10 @@ "loose-envify": "cli.js" } }, - "node_modules/loose-envify/node_modules/js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", - "license": "MIT" - }, "node_modules/loupe": { - "version": "3.1.4", - "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.1.4.tgz", - "integrity": "sha512-wJzkKwJrheKtknCOKNEtDK4iqg/MxmZheEMtSTYvnzRdEYaZzmgH976nenp8WdJRdx5Vc1X/9MO0Oszl6ezeXg==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.2.0.tgz", + "integrity": "sha512-2NCfZcT5VGVNX9mSZIxLRkEAegDGBpuQZBy13desuHeVORmBDyAET4TkJr4SjqQy3A8JDofMN6LpkK8Xcm/dlw==", "dev": true, "license": "MIT" }, @@ -7932,19 +7735,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/make-dir/node_modules/semver": { - "version": "7.7.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", - "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", - "dev": true, - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/math-intrinsics": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", @@ -7964,9 +7754,9 @@ } }, "node_modules/memfs": { - "version": "4.17.2", - "resolved": "https://registry.npmjs.org/memfs/-/memfs-4.17.2.tgz", - "integrity": "sha512-NgYhCOWgovOXSzvYgUW0LQ7Qy72rWQMGGFJDoWg4G30RHd3z77VbYdtJ4fembJXBy8pMIUA31XNAupobOQlwdg==", + "version": "4.28.1", + "resolved": "https://registry.npmjs.org/memfs/-/memfs-4.28.1.tgz", + "integrity": "sha512-moZpQdp7bzWXO6H08h4vpKZ4Cymd2G6AuND7UG7ErBxr5pDntycGpECJB7N8ZIF8PA8HrKno8k1Rr0VOfRbMcA==", "dev": true, "license": "Apache-2.0", "dependencies": { @@ -8058,16 +7848,18 @@ } }, "node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", "license": "ISC", "dependencies": { - "brace-expansion": "^1.1.7" + "brace-expansion": "^2.0.1" }, "engines": { - "node": "*" + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" } }, "node_modules/minimist": { @@ -8088,6 +7880,16 @@ "node": ">=16 || 14 >=14.17" } }, + "node_modules/mock-fs": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/mock-fs/-/mock-fs-5.5.0.tgz", + "integrity": "sha512-d/P1M/RacgM3dB0sJ8rjeRNXxtapkPCUnMGmIN0ixJ16F/E4GUZCvWcSGfWGz8eaXYvn1s9baUwNjI4LOPEjiA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + } + }, "node_modules/module-details-from-path": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/module-details-from-path/-/module-details-from-path-1.0.4.tgz", @@ -8142,68 +7944,69 @@ "dev": true, "license": "MIT" }, - "node_modules/node-domexception": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", - "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==", - "deprecated": "Use your platform's native DOMException instead", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/jimmywarting" - }, - { - "type": "github", - "url": "https://paypal.me/jimmywarting" - } - ], - "license": "MIT", - "engines": { - "node": ">=10.5.0" - } - }, "node_modules/node-fetch": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-3.3.2.tgz", - "integrity": "sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==", + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", "license": "MIT", "dependencies": { - "data-uri-to-buffer": "^4.0.0", - "fetch-blob": "^3.1.4", - "formdata-polyfill": "^4.0.10" + "whatwg-url": "^5.0.0" }, "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + "node": "4.x || >=6.0.0" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/node-fetch" + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/node-fetch/node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", + "license": "MIT" + }, + "node_modules/node-fetch/node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", + "license": "BSD-2-Clause" + }, + "node_modules/node-fetch/node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "license": "MIT", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" } }, "node_modules/normalize-package-data": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-6.0.2.tgz", - "integrity": "sha512-V6gygoYb/5EmNI+MEGrWkC+e6+Rr7mTmfHrxDbLzxQogBkgzo76rkok0Am6thgSF7Mv2nLOajAJj5vDJZEFn7g==", + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", + "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", + "dev": true, "license": "BSD-2-Clause", "dependencies": { - "hosted-git-info": "^7.0.0", - "semver": "^7.3.5", - "validate-npm-package-license": "^3.0.4" - }, - "engines": { - "node": "^16.14.0 || >=18.0.0" + "hosted-git-info": "^2.1.4", + "resolve": "^1.10.0", + "semver": "2 || 3 || 4 || 5", + "validate-npm-package-license": "^3.0.1" } }, "node_modules/normalize-package-data/node_modules/semver": { - "version": "7.7.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", - "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", + "version": "5.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", + "dev": true, "license": "ISC", "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" + "semver": "bin/semver" } }, "node_modules/npm-run-all": { @@ -8245,6 +8048,17 @@ "node": ">=4" } }, + "node_modules/npm-run-all/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, "node_modules/npm-run-all/node_modules/chalk": { "version": "2.4.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", @@ -8314,24 +8128,17 @@ "node": ">=4" } }, - "node_modules/npm-run-all/node_modules/hosted-git-info": { - "version": "2.8.9", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.8.9.tgz", - "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==", + "node_modules/npm-run-all/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", "dev": true, - "license": "ISC" - }, - "node_modules/npm-run-all/node_modules/normalize-package-data": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.5.0.tgz", - "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", - "dev": true, - "license": "BSD-2-Clause", + "license": "ISC", "dependencies": { - "hosted-git-info": "^2.1.4", - "resolve": "^1.10.0", - "semver": "2 || 3 || 4 || 5", - "validate-npm-package-license": "^3.0.1" + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" } }, "node_modules/npm-run-all/node_modules/path-key": { @@ -8344,21 +8151,6 @@ "node": ">=4" } }, - "node_modules/npm-run-all/node_modules/read-pkg": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-3.0.0.tgz", - "integrity": "sha512-BLq/cCO9two+lBgiTYNqD6GdtK8s4NpaWrl6/rCO9w0TUS8oJl7cmToOZfRYllKTISY6nt1U7jQ53brmKqY6BA==", - "dev": true, - "license": "MIT", - "dependencies": { - "load-json-file": "^4.0.0", - "normalize-package-data": "^2.3.2", - "path-type": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, "node_modules/npm-run-all/node_modules/semver": { "version": "5.7.2", "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", @@ -8419,9 +8211,9 @@ } }, "node_modules/nwsapi": { - "version": "2.2.20", - "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.20.tgz", - "integrity": "sha512-/ieB+mDe4MrrKMT8z+mQL8klXydZWGR5Dowt4RAGKbJ3kIGEx3X4ljUo+6V73IXtUPWgfOlU5B9MlGxFO5T+cA==", + "version": "2.2.21", + "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.21.tgz", + "integrity": "sha512-o6nIY3qwiSXl7/LuOU0Dmuctd34Yay0yeuZRLFmDPrrdHpXKFndPj3hM+YEPVHYC5fx2otBx4Ilc/gyYSAUaIA==", "dev": true, "license": "MIT" }, @@ -8583,15 +8375,15 @@ } }, "node_modules/open": { - "version": "10.1.2", - "resolved": "https://registry.npmjs.org/open/-/open-10.1.2.tgz", - "integrity": "sha512-cxN6aIDPz6rm8hbebcP7vrQNhvRcveZoJU72Y7vskh4oIm+BZwBECnx5nTmrlres1Qapvx27Qo1Auukpf8PKXw==", + "version": "10.2.0", + "resolved": "https://registry.npmjs.org/open/-/open-10.2.0.tgz", + "integrity": "sha512-YgBpdJHPyQ2UE5x+hlSXcnejzAvD0b22U2OuAP+8OnlJT+PjWPxtgmGqKKc+RgTM63U9gN0YzrYc71R2WT/hTA==", "license": "MIT", "dependencies": { "default-browser": "^5.2.1", "define-lazy-prop": "^3.0.0", "is-inside-container": "^1.0.0", - "is-wsl": "^3.1.0" + "wsl-utils": "^0.1.0" }, "engines": { "node": ">=18" @@ -8601,9 +8393,9 @@ } }, "node_modules/openai": { - "version": "5.8.1", - "resolved": "https://registry.npmmirror.com/openai/-/openai-5.8.1.tgz", - "integrity": "sha512-+qp4vQjJs43pzMSb6quTYslOhVE0c0c7j4YMoEks83BnusG23UrsWn3Hey6/8mwYadY05KipLvbp+PTO4jxO9w==", + "version": "5.11.0", + "resolved": "https://registry.npmjs.org/openai/-/openai-5.11.0.tgz", + "integrity": "sha512-+AuTc5pVjlnTuA9zvn8rA/k+1RluPIx9AD4eDcnutv6JNwHHZxIhkFy+tmMKCvmMFDQzfA/r1ujvPWB19DQkYg==", "license": "Apache-2.0", "bin": { "openai": "bin/cli" @@ -8713,18 +8505,6 @@ "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", "license": "BlueOak-1.0.0" }, - "node_modules/package-json/node_modules/semver": { - "version": "7.7.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", - "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/parent-module": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", @@ -8739,32 +8519,17 @@ } }, "node_modules/parse-json": { - "version": "8.3.0", - "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-8.3.0.tgz", - "integrity": "sha512-ybiGyvspI+fAoRQbIPRddCcSTV9/LsJbf0e/S85VLowVGzRmokfneg2kwVW/KU5rOXrPSbF1qAKPMgNTqqROQQ==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", + "integrity": "sha512-aOIos8bujGN93/8Ox/jPLh7RwVnPEysynVFE+fQZyg6jKELEHwzgKdLRFHUgXJL6kylijVSBC4BvN9OmsB48Rw==", + "dev": true, "license": "MIT", "dependencies": { - "@babel/code-frame": "^7.26.2", - "index-to-position": "^1.1.0", - "type-fest": "^4.39.1" + "error-ex": "^1.3.1", + "json-parse-better-errors": "^1.0.1" }, "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/parse-json/node_modules/type-fest": { - "version": "4.41.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", - "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", - "license": "(MIT OR CC0-1.0)", - "engines": { - "node": ">=16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=4" } }, "node_modules/parse5": { @@ -9013,9 +8778,9 @@ } }, "node_modules/prettier": { - "version": "3.6.1", - "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.1.tgz", - "integrity": "sha512-5xGWRa90Sp2+x1dQtNpIpeOQpTDBs9cZDmA/qs2vDNN2i18PdapqY7CmBeyLlMuGqXJRIOPaCaVZTLNQRWUH/A==", + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.6.2.tgz", + "integrity": "sha512-I7AIg5boAr5R0FFtJ6rCfD+LFsWHp81dolrFD8S79U9tb8Az2nGrJncnMSnys+bpQJfRUzqs9hnA81OAA3hCuQ==", "dev": true, "license": "MIT", "bin": { @@ -9029,13 +8794,13 @@ } }, "node_modules/pretty-format": { - "version": "30.0.2", - "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-30.0.2.tgz", - "integrity": "sha512-yC5/EBSOrTtqhCKfLHqoUIAXVRZnukHPwWBJWR7h84Q3Be1DRQZLncwcfLoPA5RPQ65qfiCMqgYwdUuQ//eVpg==", + "version": "30.0.5", + "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-30.0.5.tgz", + "integrity": "sha512-D1tKtYvByrBkFLe2wHJl2bwMJIiT8rW+XA+TiataH79/FszLQMrpGEvzUVkzPau7OCO0Qnrhpe87PqtOAIB8Yw==", "dev": true, "license": "MIT", "dependencies": { - "@jest/schemas": "30.0.1", + "@jest/schemas": "30.0.5", "ansi-styles": "^5.2.0", "react-is": "^18.3.1" }, @@ -9056,13 +8821,6 @@ "url": "https://github.com/chalk/ansi-styles?sponsor=1" } }, - "node_modules/pretty-format/node_modules/react-is": { - "version": "18.3.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", - "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", - "dev": true, - "license": "MIT" - }, "node_modules/prop-types": { "version": "15.8.1", "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", @@ -9074,6 +8832,12 @@ "react-is": "^16.13.1" } }, + "node_modules/prop-types/node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", + "license": "MIT" + }, "node_modules/proto-list": { "version": "1.2.4", "resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz", @@ -9177,6 +8941,10 @@ ], "license": "MIT" }, + "node_modules/qwen-code-vscode-ide-companion": { + "resolved": "packages/vscode-ide-companion", + "link": true + }, "node_modules/range-parser": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", @@ -9232,9 +9000,9 @@ } }, "node_modules/react": { - "version": "19.1.0", - "resolved": "https://registry.npmjs.org/react/-/react-19.1.0.tgz", - "integrity": "sha512-FS+XFBNvn3GTAWq26joslQgWNoFu08F4kl0J4CgdNKADkdSGXQyTCnKteIAJy96Br6YbpEU1LSzV5dYtjMkMDg==", + "version": "19.1.1", + "resolved": "https://registry.npmjs.org/react/-/react-19.1.1.tgz", + "integrity": "sha512-w8nqGImo45dmMIfljjMwOGtbmC/mk4CMYhWIicdSflH91J9TyCyczcPFXJzrZ/ZXcgGRFeP6BU0BEJTw6tZdfQ==", "license": "MIT", "engines": { "node": ">=0.10.0" @@ -9274,16 +9042,16 @@ } }, "node_modules/react-dom": { - "version": "19.1.0", - "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.1.0.tgz", - "integrity": "sha512-Xs1hdnE+DyKgeHJeJznQmYMIBG3TKIHJJT95Q58nHLSrElKlGQqDTR2HQ9fx5CN/Gk6Vh/kupBTDLU11/nDk/g==", + "version": "19.1.1", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.1.1.tgz", + "integrity": "sha512-Dlq/5LAZgF0Gaz6yiqZCf6VCcZs1ghAJyrsu84Q/GT0gV+mCxbfmKNoGRKBYMJ8IEdGPqu49YWXD02GCknEDkw==", "dev": true, "license": "MIT", "dependencies": { "scheduler": "^0.26.0" }, "peerDependencies": { - "react": "^19.1.0" + "react": "^19.1.1" } }, "node_modules/react-dom/node_modules/scheduler": { @@ -9294,9 +9062,10 @@ "license": "MIT" }, "node_modules/react-is": { - "version": "16.13.1", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", - "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "dev": true, "license": "MIT" }, "node_modules/react-reconciler": { @@ -9337,19 +9106,50 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/read-package-up/node_modules/type-fest": { - "version": "4.41.0", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", - "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", - "license": "(MIT OR CC0-1.0)", + "node_modules/read-package-up/node_modules/hosted-git-info": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-7.0.2.tgz", + "integrity": "sha512-puUZAUKT5m8Zzvs72XWy3HtvVbTWljRE66cP60bxJzAqf2DgICo7lYTY2IHUmLnNpjYvw5bvmoHvPc0QO2a62w==", + "license": "ISC", + "dependencies": { + "lru-cache": "^10.0.1" + }, "engines": { - "node": ">=16" + "node": "^16.14.0 || >=18.0.0" + } + }, + "node_modules/read-package-up/node_modules/normalize-package-data": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-6.0.2.tgz", + "integrity": "sha512-V6gygoYb/5EmNI+MEGrWkC+e6+Rr7mTmfHrxDbLzxQogBkgzo76rkok0Am6thgSF7Mv2nLOajAJj5vDJZEFn7g==", + "license": "BSD-2-Clause", + "dependencies": { + "hosted-git-info": "^7.0.0", + "semver": "^7.3.5", + "validate-npm-package-license": "^3.0.4" + }, + "engines": { + "node": "^16.14.0 || >=18.0.0" + } + }, + "node_modules/read-package-up/node_modules/parse-json": { + "version": "8.3.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-8.3.0.tgz", + "integrity": "sha512-ybiGyvspI+fAoRQbIPRddCcSTV9/LsJbf0e/S85VLowVGzRmokfneg2kwVW/KU5rOXrPSbF1qAKPMgNTqqROQQ==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.26.2", + "index-to-position": "^1.1.0", + "type-fest": "^4.39.1" + }, + "engines": { + "node": ">=18" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/read-pkg": { + "node_modules/read-package-up/node_modules/read-pkg": { "version": "9.0.1", "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-9.0.1.tgz", "integrity": "sha512-9viLL4/n1BJUCT1NXVTdS1jtm80yDEgR5T4yCelII49Mbj0v1rZdKqj7zCiYdbB0CuCgdrvHcNogAKTFPBocFA==", @@ -9368,7 +9168,7 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/read-pkg/node_modules/type-fest": { + "node_modules/read-package-up/node_modules/type-fest": { "version": "4.41.0", "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", @@ -9380,6 +9180,21 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/read-pkg": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-3.0.0.tgz", + "integrity": "sha512-BLq/cCO9two+lBgiTYNqD6GdtK8s4NpaWrl6/rCO9w0TUS8oJl7cmToOZfRYllKTISY6nt1U7jQ53brmKqY6BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "load-json-file": "^4.0.0", + "normalize-package-data": "^2.3.2", + "path-type": "^3.0.0" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/reflect.getprototypeof": { "version": "1.0.10", "resolved": "https://registry.npmjs.org/reflect.getprototypeof/-/reflect.getprototypeof-1.0.10.tgz", @@ -9557,9 +9372,9 @@ } }, "node_modules/rollup": { - "version": "4.44.0", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.44.0.tgz", - "integrity": "sha512-qHcdEzLCiktQIfwBq420pn2dP+30uzqYxv9ETm91wdt2R9AFcWfjNAmje4NWlnCIQ5RMTzVf0ZyisOKqHR6RwA==", + "version": "4.46.2", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.46.2.tgz", + "integrity": "sha512-WMmLFI+Boh6xbop+OAGo9cQ3OgX9MIg7xOQjn+pTCwOkk+FNDAeAemXkJ3HzDJrVXleLOFVa1ipuc1AmEx1Dwg==", "dev": true, "license": "MIT", "dependencies": { @@ -9573,26 +9388,26 @@ "npm": ">=8.0.0" }, "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.44.0", - "@rollup/rollup-android-arm64": "4.44.0", - "@rollup/rollup-darwin-arm64": "4.44.0", - "@rollup/rollup-darwin-x64": "4.44.0", - "@rollup/rollup-freebsd-arm64": "4.44.0", - "@rollup/rollup-freebsd-x64": "4.44.0", - "@rollup/rollup-linux-arm-gnueabihf": "4.44.0", - "@rollup/rollup-linux-arm-musleabihf": "4.44.0", - "@rollup/rollup-linux-arm64-gnu": "4.44.0", - "@rollup/rollup-linux-arm64-musl": "4.44.0", - "@rollup/rollup-linux-loongarch64-gnu": "4.44.0", - "@rollup/rollup-linux-powerpc64le-gnu": "4.44.0", - "@rollup/rollup-linux-riscv64-gnu": "4.44.0", - "@rollup/rollup-linux-riscv64-musl": "4.44.0", - "@rollup/rollup-linux-s390x-gnu": "4.44.0", - "@rollup/rollup-linux-x64-gnu": "4.44.0", - "@rollup/rollup-linux-x64-musl": "4.44.0", - "@rollup/rollup-win32-arm64-msvc": "4.44.0", - "@rollup/rollup-win32-ia32-msvc": "4.44.0", - "@rollup/rollup-win32-x64-msvc": "4.44.0", + "@rollup/rollup-android-arm-eabi": "4.46.2", + "@rollup/rollup-android-arm64": "4.46.2", + "@rollup/rollup-darwin-arm64": "4.46.2", + "@rollup/rollup-darwin-x64": "4.46.2", + "@rollup/rollup-freebsd-arm64": "4.46.2", + "@rollup/rollup-freebsd-x64": "4.46.2", + "@rollup/rollup-linux-arm-gnueabihf": "4.46.2", + "@rollup/rollup-linux-arm-musleabihf": "4.46.2", + "@rollup/rollup-linux-arm64-gnu": "4.46.2", + "@rollup/rollup-linux-arm64-musl": "4.46.2", + "@rollup/rollup-linux-loongarch64-gnu": "4.46.2", + "@rollup/rollup-linux-ppc64-gnu": "4.46.2", + "@rollup/rollup-linux-riscv64-gnu": "4.46.2", + "@rollup/rollup-linux-riscv64-musl": "4.46.2", + "@rollup/rollup-linux-s390x-gnu": "4.46.2", + "@rollup/rollup-linux-x64-gnu": "4.46.2", + "@rollup/rollup-linux-x64-musl": "4.46.2", + "@rollup/rollup-win32-arm64-msvc": "4.46.2", + "@rollup/rollup-win32-ia32-msvc": "4.46.2", + "@rollup/rollup-win32-x64-msvc": "4.46.2", "fsevents": "~2.3.2" } }, @@ -9781,13 +9596,15 @@ } }, "node_modules/semver": { - "version": "6.3.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", - "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", - "dev": true, + "version": "7.7.2", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", + "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", "license": "ISC", "bin": { "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" } }, "node_modules/send": { @@ -10043,18 +9860,6 @@ "url": "https://github.com/chalk/slice-ansi?sponsor=1" } }, - "node_modules/slice-ansi/node_modules/ansi-styles": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, "node_modules/slice-ansi/node_modules/is-fullwidth-code-point": { "version": "5.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-5.0.0.tgz", @@ -10171,17 +9976,17 @@ } }, "node_modules/string-width": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", + "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", "license": "MIT", "dependencies": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" + "emoji-regex": "^10.3.0", + "get-east-asian-width": "^1.0.0", + "strip-ansi": "^7.1.0" }, "engines": { - "node": ">=12" + "node": ">=18" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" @@ -10217,6 +10022,15 @@ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", "license": "MIT" }, + "node_modules/string-width-cjs/node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/string-width-cjs/node_modules/strip-ansi": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", @@ -10418,6 +10232,13 @@ "url": "https://github.com/sponsors/antfu" } }, + "node_modules/strip-literal/node_modules/js-tokens": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.1.tgz", + "integrity": "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==", + "dev": true, + "license": "MIT" + }, "node_modules/stubborn-fs": { "version": "1.2.5", "resolved": "https://registry.npmjs.org/stubborn-fs/-/stubborn-fs-1.2.5.tgz", @@ -10540,32 +10361,6 @@ "node": ">=18" } }, - "node_modules/test-exclude/node_modules/brace-expansion": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", - "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0" - } - }, - "node_modules/test-exclude/node_modules/minimatch": { - "version": "9.0.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", - "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, "node_modules/thingies": { "version": "1.21.0", "resolved": "https://registry.npmjs.org/thingies/-/thingies-1.21.0.tgz", @@ -10638,9 +10433,9 @@ } }, "node_modules/tinyglobby/node_modules/picomatch": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz", - "integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==", + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", "engines": { @@ -10961,15 +10756,16 @@ } }, "node_modules/typescript-eslint": { - "version": "8.35.0", - "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.35.0.tgz", - "integrity": "sha512-uEnz70b7kBz6eg/j0Czy6K5NivaYopgxRjsnAJ2Fx5oTLo3wefTHIbL7AkQr1+7tJCRVpTs/wiM8JR/11Loq9A==", + "version": "8.38.0", + "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.38.0.tgz", + "integrity": "sha512-FsZlrYK6bPDGoLeZRuvx2v6qrM03I0U0SnfCLPs/XCCPCFD80xU9Pg09H/K+XFa68uJuZo7l/Xhs+eDRg2l3hg==", "dev": true, "license": "MIT", "dependencies": { - "@typescript-eslint/eslint-plugin": "8.35.0", - "@typescript-eslint/parser": "8.35.0", - "@typescript-eslint/utils": "8.35.0" + "@typescript-eslint/eslint-plugin": "8.38.0", + "@typescript-eslint/parser": "8.38.0", + "@typescript-eslint/typescript-estree": "8.38.0", + "@typescript-eslint/utils": "8.38.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -11003,18 +10799,18 @@ } }, "node_modules/undici": { - "version": "7.10.0", - "resolved": "https://registry.npmjs.org/undici/-/undici-7.10.0.tgz", - "integrity": "sha512-u5otvFBOBZvmdjWLVW+5DAc9Nkq8f24g0O9oY7qw2JVIF1VocIFoyz9JFkuVOS2j41AufeO0xnlweJ2RLT8nGw==", + "version": "7.13.0", + "resolved": "https://registry.npmjs.org/undici/-/undici-7.13.0.tgz", + "integrity": "sha512-l+zSMssRqrzDcb3fjMkjjLGmuiiK2pMIcV++mJaAc9vhjSGpvM7h43QgP+OAMb1GImHmbPyG2tBXeuyG5iY4gA==", "license": "MIT", "engines": { "node": ">=20.18.1" } }, "node_modules/undici-types": { - "version": "6.21.0", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", - "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "version": "7.8.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.8.0.tgz", + "integrity": "sha512-9UJ2xGDvQ43tYyVMpuHlsgApydB8ZKfVYTsLDhXkFL/6gfkp+U8xTGdh8pMJv1SpZna0zxG1DwsKZsreLbXBxw==", "license": "MIT" }, "node_modules/unicorn-magic": { @@ -11062,18 +10858,6 @@ "url": "https://github.com/yeoman/update-notifier?sponsor=1" } }, - "node_modules/update-notifier/node_modules/ansi-styles": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, "node_modules/update-notifier/node_modules/boxen": { "version": "8.0.1", "resolved": "https://registry.npmjs.org/boxen/-/boxen-8.0.1.tgz", @@ -11108,53 +10892,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/update-notifier/node_modules/chalk": { - "version": "5.4.1", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.4.1.tgz", - "integrity": "sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w==", - "license": "MIT", - "engines": { - "node": "^12.17.0 || ^14.13 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/update-notifier/node_modules/emoji-regex": { - "version": "10.4.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.4.0.tgz", - "integrity": "sha512-EC+0oUMY1Rqm4O6LLrgjtYDvcVYTy7chDnM4Q7030tP4Kwj3u/pR6gP9ygnp2CJMK5Gq+9Q2oqmrFJAz01DXjw==", - "license": "MIT" - }, - "node_modules/update-notifier/node_modules/semver": { - "version": "7.7.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", - "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/update-notifier/node_modules/string-width": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", - "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^10.3.0", - "get-east-asian-width": "^1.0.0", - "strip-ansi": "^7.1.0" - }, - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/update-notifier/node_modules/type-fest": { "version": "4.41.0", "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", @@ -11209,16 +10946,16 @@ } }, "node_modules/uuid": { - "version": "11.1.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-11.1.0.tgz", - "integrity": "sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==", + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz", + "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==", "funding": [ "https://github.com/sponsors/broofa", "https://github.com/sponsors/ctavan" ], "license": "MIT", "bin": { - "uuid": "dist/esm/bin/uuid" + "uuid": "dist/bin/uuid" } }, "node_modules/validate-npm-package-license": { @@ -11241,15 +10978,15 @@ } }, "node_modules/vite": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/vite/-/vite-7.0.0.tgz", - "integrity": "sha512-ixXJB1YRgDIw2OszKQS9WxGHKwLdCsbQNkpJN171udl6szi/rIySHL6/Os3s2+oE4P/FLD4dxg4mD7Wust+u5g==", + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/vite/-/vite-7.0.6.tgz", + "integrity": "sha512-MHFiOENNBd+Bd9uvc8GEsIzdkn1JxMmEeYX35tI3fv0sJBUTfW5tQsoaOwuY4KhBI09A3dUJ/DXf2yxPVPUceg==", "dev": true, "license": "MIT", "dependencies": { "esbuild": "^0.25.0", "fdir": "^6.4.6", - "picomatch": "^4.0.2", + "picomatch": "^4.0.3", "postcss": "^8.5.6", "rollup": "^4.40.0", "tinyglobby": "^0.2.14" @@ -11354,9 +11091,9 @@ } }, "node_modules/vite/node_modules/picomatch": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz", - "integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==", + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", "engines": { @@ -11440,9 +11177,9 @@ } }, "node_modules/vitest/node_modules/picomatch": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz", - "integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==", + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", "dev": true, "license": "MIT", "engines": { @@ -11465,15 +11202,6 @@ "node": ">=18" } }, - "node_modules/web-streams-polyfill": { - "version": "3.3.3", - "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", - "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==", - "license": "MIT", - "engines": { - "node": ">= 8" - } - }, "node_modules/webidl-conversions": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-7.0.0.tgz", @@ -11663,6 +11391,29 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/widest-line/node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "license": "MIT" + }, + "node_modules/widest-line/node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/window-size": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/window-size/-/window-size-1.1.1.tgz", @@ -11745,12 +11496,36 @@ "node": ">=8" } }, + "node_modules/wrap-ansi-cjs/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": { "version": "8.0.0", "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", "license": "MIT" }, + "node_modules/wrap-ansi-cjs/node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/wrap-ansi-cjs/node_modules/string-width": { "version": "4.2.3", "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", @@ -11777,16 +11552,27 @@ "node": ">=8" } }, - "node_modules/wrap-ansi/node_modules/ansi-styles": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "node_modules/wrap-ansi/node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "license": "MIT" + }, + "node_modules/wrap-ansi/node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, "engines": { "node": ">=12" }, "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/wrappy": { @@ -11816,6 +11602,21 @@ } } }, + "node_modules/wsl-utils": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/wsl-utils/-/wsl-utils-0.1.0.tgz", + "integrity": "sha512-h3Fbisa2nKGPxCpm89Hk33lBLsnaGBvctQopaBSOW/uIs6FTe1ATyAnKFJrzVs9vpGdsTe73WF3V4lIsk4Gacw==", + "license": "MIT", + "dependencies": { + "is-wsl": "^3.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/xdg-basedir": { "version": "5.1.0", "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-5.1.0.tgz", @@ -11855,52 +11656,80 @@ } }, "node_modules/yargs": { - "version": "18.0.0", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-18.0.0.tgz", - "integrity": "sha512-4UEqdc2RYGHZc7Doyqkrqiln3p9X2DZVxaGbwhn2pi7MrRagKaOcIKe8L3OxYcbhXLgLFUS3zAYuQjKBQgmuNg==", + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", "license": "MIT", "dependencies": { - "cliui": "^9.0.1", + "cliui": "^8.0.1", "escalade": "^3.1.1", "get-caller-file": "^2.0.5", - "string-width": "^7.2.0", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", "y18n": "^5.0.5", - "yargs-parser": "^22.0.0" + "yargs-parser": "^21.1.1" }, "engines": { - "node": "^20.19.0 || ^22.12.0 || >=23" + "node": ">=12" } }, "node_modules/yargs-parser": { - "version": "22.0.0", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-22.0.0.tgz", - "integrity": "sha512-rwu/ClNdSMpkSrUb+d6BRsSkLUq1fmfsY6TOpYzTwvwkg1/NRG85KBy3kq++A8LKQwX6lsu+aWad+2khvuXrqw==", + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", "license": "ISC", "engines": { - "node": "^20.19.0 || ^22.12.0 || >=23" + "node": ">=12" + } + }, + "node_modules/yargs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", + "engines": { + "node": ">=8" } }, "node_modules/yargs/node_modules/emoji-regex": { - "version": "10.4.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.4.0.tgz", - "integrity": "sha512-EC+0oUMY1Rqm4O6LLrgjtYDvcVYTy7chDnM4Q7030tP4Kwj3u/pR6gP9ygnp2CJMK5Gq+9Q2oqmrFJAz01DXjw==", + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", "license": "MIT" }, + "node_modules/yargs/node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/yargs/node_modules/string-width": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", - "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", "license": "MIT", "dependencies": { - "emoji-regex": "^10.3.0", - "get-east-asian-width": "^1.0.0", - "strip-ansi": "^7.1.0" + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" }, "engines": { - "node": ">=18" + "node": ">=8" + } + }, + "node_modules/yargs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "engines": { + "node": ">=8" } }, "node_modules/yocto-queue": { @@ -11944,12 +11773,13 @@ "name": "@qwen-code/qwen-code", "version": "0.0.3", "dependencies": { + "@google/genai": "1.9.0", + "@iarna/toml": "^2.2.5", "@qwen-code/qwen-code-core": "file:../core", "@types/update-notifier": "^6.0.8", "command-exists": "^1.2.9", "diff": "^7.0.0", "dotenv": "^17.1.0", - "gaxios": "^7.1.1", "glob": "^10.4.1", "highlight.js": "^11.11.1", "ink": "^6.0.1", @@ -11968,7 +11798,8 @@ "strip-ansi": "^7.1.0", "strip-json-comments": "^3.1.1", "update-notifier": "^7.3.1", - "yargs": "^18.0.0" + "yargs": "^17.7.2", + "zod": "^3.23.8" }, "bin": { "qwen": "dist/index.js" @@ -11996,133 +11827,28 @@ "node": ">=20" } }, - "packages/cli/node_modules/@testing-library/dom": { - "version": "10.4.0", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "@babel/code-frame": "^7.10.4", - "@babel/runtime": "^7.12.5", - "@types/aria-query": "^5.0.1", - "aria-query": "5.3.0", - "chalk": "^4.1.0", - "dom-accessibility-api": "^0.5.9", - "lz-string": "^1.5.0", - "pretty-format": "^27.0.2" - }, - "engines": { - "node": ">=18" - } - }, - "packages/cli/node_modules/@testing-library/dom/node_modules/pretty-format": { - "version": "27.5.1", - "resolved": "https://registry.npmjs.org/pretty-format/-/pretty-format-27.5.1.tgz", - "integrity": "sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==", - "dev": true, - "license": "MIT", - "peer": true, - "dependencies": { - "ansi-regex": "^5.0.1", - "ansi-styles": "^5.0.0", - "react-is": "^17.0.1" - }, - "engines": { - "node": "^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0" - } - }, - "packages/cli/node_modules/@testing-library/react": { - "version": "16.3.0", + "packages/cli/node_modules/@types/node": { + "version": "20.19.9", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.9.tgz", + "integrity": "sha512-cuVNgarYWZqxRJDQHEB58GEONhOK79QVR/qYx4S7kcUObQvUwvFnYxJuuHUKm2aieN9X3yZB4LZsuYNU1Qphsw==", "dev": true, "license": "MIT", "dependencies": { - "@babel/runtime": "^7.12.5" - }, - "engines": { - "node": ">=18" - }, - "peerDependencies": { - "@testing-library/dom": "^10.0.0", - "@types/react": "^18.0.0 || ^19.0.0", - "@types/react-dom": "^18.0.0 || ^19.0.0", - "react": "^18.0.0 || ^19.0.0", - "react-dom": "^18.0.0 || ^19.0.0" - }, - "peerDependenciesMeta": { - "@types/react": { - "optional": true - }, - "@types/react-dom": { - "optional": true - } + "undici-types": "~6.21.0" } }, - "packages/cli/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "packages/cli/node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=8" - } - }, - "packages/cli/node_modules/ansi-styles": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", - "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", - "dev": true, - "license": "MIT", - "peer": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "packages/cli/node_modules/aria-query": { - "version": "5.3.0", - "dev": true, - "license": "Apache-2.0", - "peer": true, - "dependencies": { - "dequal": "^2.0.3" - } - }, - "packages/cli/node_modules/emoji-regex": { - "version": "10.4.0", "license": "MIT" }, - "packages/cli/node_modules/react-is": { - "version": "17.0.2", - "resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz", - "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==", - "dev": true, - "license": "MIT", - "peer": true - }, - "packages/cli/node_modules/string-width": { - "version": "7.2.0", - "license": "MIT", - "dependencies": { - "emoji-regex": "^10.3.0", - "get-east-asian-width": "^1.0.0", - "strip-ansi": "^7.1.0" - }, - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "packages/core": { "name": "@qwen-code/qwen-code-core", "version": "0.0.3", "dependencies": { - "@google/genai": "1.8.0", + "@google/genai": "1.9.0", "@modelcontextprotocol/sdk": "^1.11.0", "@opentelemetry/api": "^1.9.0", "@opentelemetry/exporter-logs-otlp-grpc": "^0.52.0", @@ -12130,15 +11856,15 @@ "@opentelemetry/exporter-trace-otlp-grpc": "^0.52.0", "@opentelemetry/instrumentation-http": "^0.52.0", "@opentelemetry/sdk-node": "^0.52.0", - "@types/glob": "^8.1.0", "@types/html-to-text": "^9.0.4", "ajv": "^8.17.1", + "chardet": "^2.1.0", "diff": "^7.0.0", "dotenv": "^17.1.0", - "gaxios": "^7.1.1", "glob": "^10.4.5", "google-auth-library": "^9.11.0", "html-to-text": "^9.0.5", + "https-proxy-agent": "^7.0.6", "ignore": "^7.0.0", "micromatch": "^4.0.8", "open": "^10.1.2", @@ -12146,7 +11872,6 @@ "shell-quote": "^1.8.3", "simple-git": "^3.28.0", "strip-ansi": "^7.1.0", - "tiktoken": "^1.0.21", "undici": "^7.10.0", "ws": "^8.18.0" }, @@ -12179,13 +11904,6 @@ "url": "https://github.com/sponsors/epoberezkin" } }, - "packages/core/node_modules/ignore": { - "version": "7.0.5", - "license": "MIT", - "engines": { - "node": ">= 4" - } - }, "packages/core/node_modules/json-schema-traverse": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", @@ -12193,8 +11911,9 @@ "license": "MIT" }, "packages/vscode-ide-companion": { - "name": "@qwen-code/qwen-code-vscode-ide-companion", + "name": "qwen-code-vscode-ide-companion", "version": "0.0.3", + "license": "LICENSE", "dependencies": { "@modelcontextprotocol/sdk": "^1.15.1", "cors": "^2.8.5", @@ -12211,11 +11930,29 @@ "esbuild": "^0.25.3", "eslint": "^9.25.1", "npm-run-all": "^4.1.5", - "typescript": "^5.8.3" + "typescript": "^5.8.3", + "vitest": "^3.2.4" }, "engines": { "vscode": "^1.101.0" } + }, + "packages/vscode-ide-companion/node_modules/@types/node": { + "version": "20.19.9", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.9.tgz", + "integrity": "sha512-cuVNgarYWZqxRJDQHEB58GEONhOK79QVR/qYx4S7kcUObQvUwvFnYxJuuHUKm2aieN9X3yZB4LZsuYNU1Qphsw==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "packages/vscode-ide-companion/node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true, + "license": "MIT" } } } diff --git a/package.json b/package.json index 7d94862b2..ee4290717 100644 --- a/package.json +++ b/package.json @@ -2,7 +2,7 @@ "name": "@qwen-code/qwen-code", "version": "0.0.3", "engines": { - "node": ">=20" + "node": ">=20.0.0" }, "type": "module", "workspaces": [ @@ -23,7 +23,8 @@ "auth": "npm run auth:npm && npm run auth:docker", "generate": "node scripts/generate-git-commit-info.js", "build": "node scripts/build.js", - "build:all": "npm run build && npm run build:sandbox", + "build:vscode": "node scripts/build_vscode_companion.js", + "build:all": "npm run build && npm run build:sandbox && npm run build:vscode", "build:packages": "npm run build --workspaces", "build:sandbox": "node scripts/build_sandbox.js --skip-npm-install-build", "bundle": "npm run generate && node esbuild.config.js && node scripts/copy_bundle_assets.js", @@ -59,8 +60,9 @@ "@types/micromatch": "^4.0.9", "@types/mime-types": "^3.0.1", "@types/minimatch": "^5.1.2", - "@types/semver": "^7.7.0", + "@types/mock-fs": "^4.13.4", "@types/shell-quote": "^1.7.5", + "@types/uuid": "^10.0.0", "@vitest/coverage-v8": "^3.1.1", "concurrently": "^9.2.0", "cross-env": "^7.0.3", @@ -76,10 +78,14 @@ "json": "^11.0.0", "lodash": "^4.17.21", "memfs": "^4.17.2", + "mock-fs": "^5.5.0", "prettier": "^3.5.3", "react-devtools-core": "^4.28.5", "typescript-eslint": "^8.30.1", "vitest": "^3.2.4", - "yargs": "^18.0.0" + "yargs": "^17.7.2" + }, + "dependencies": { + "tiktoken": "^1.0.21" } } diff --git a/packages/cli/package.json b/packages/cli/package.json index 00ad413e9..4ef4835f6 100644 --- a/packages/cli/package.json +++ b/packages/cli/package.json @@ -29,11 +29,12 @@ }, "dependencies": { "@qwen-code/qwen-code-core": "file:../core", + "@google/genai": "1.9.0", + "@iarna/toml": "^2.2.5", "@types/update-notifier": "^6.0.8", "command-exists": "^1.2.9", "diff": "^7.0.0", "dotenv": "^17.1.0", - "gaxios": "^7.1.1", "glob": "^10.4.1", "highlight.js": "^11.11.1", "ink": "^6.0.1", @@ -52,7 +53,8 @@ "strip-ansi": "^7.1.0", "strip-json-comments": "^3.1.1", "update-notifier": "^7.3.1", - "yargs": "^18.0.0" + "yargs": "^17.7.2", + "zod": "^3.23.8" }, "devDependencies": { "@babel/runtime": "^7.27.6", diff --git a/packages/cli/src/acp/acp.ts b/packages/cli/src/acp/acp.ts new file mode 100644 index 000000000..0a42fdcbd --- /dev/null +++ b/packages/cli/src/acp/acp.ts @@ -0,0 +1,464 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +/* ACP defines a schema for a simple (experimental) JSON-RPC protocol that allows GUI applications to interact with agents. */ + +import { Icon } from '@qwen-code/qwen-code-core'; +import { WritableStream, ReadableStream } from 'node:stream/web'; + +export class ClientConnection implements Client { + #connection: Connection; + + constructor( + agent: (client: Client) => Agent, + input: WritableStream, + output: ReadableStream, + ) { + this.#connection = new Connection(agent(this), input, output); + } + + /** + * Streams part of an assistant response to the client + */ + async streamAssistantMessageChunk( + params: StreamAssistantMessageChunkParams, + ): Promise { + await this.#connection.sendRequest('streamAssistantMessageChunk', params); + } + + /** + * Request confirmation before running a tool + * + * When allowed, the client returns a [`ToolCallId`] which can be used + * to update the tool call's `status` and `content` as it runs. + */ + requestToolCallConfirmation( + params: RequestToolCallConfirmationParams, + ): Promise { + return this.#connection.sendRequest('requestToolCallConfirmation', params); + } + + /** + * pushToolCall allows the agent to start a tool call + * when it does not need to request permission to do so. + * + * The returned id can be used to update the UI for the tool + * call as needed. + */ + pushToolCall(params: PushToolCallParams): Promise { + return this.#connection.sendRequest('pushToolCall', params); + } + + /** + * updateToolCall allows the agent to update the content and status of the tool call. + * + * The new content replaces what is currently displayed in the UI. + * + * The [`ToolCallId`] is included in the response of + * `pushToolCall` or `requestToolCallConfirmation` respectively. + */ + async updateToolCall(params: UpdateToolCallParams): Promise { + await this.#connection.sendRequest('updateToolCall', params); + } +} + +type AnyMessage = AnyRequest | AnyResponse; + +type AnyRequest = { + id: number; + method: string; + params?: unknown; +}; + +type AnyResponse = { jsonrpc: '2.0'; id: number } & Result; + +type Result = + | { + result: T; + } + | { + error: ErrorResponse; + }; + +type ErrorResponse = { + code: number; + message: string; + data?: { details?: string }; +}; + +type PendingResponse = { + resolve: (response: unknown) => void; + reject: (error: ErrorResponse) => void; +}; + +class Connection { + #pendingResponses: Map = new Map(); + #nextRequestId: number = 0; + #delegate: D; + #peerInput: WritableStream; + #writeQueue: Promise = Promise.resolve(); + #textEncoder: TextEncoder; + + constructor( + delegate: D, + peerInput: WritableStream, + peerOutput: ReadableStream, + ) { + this.#peerInput = peerInput; + this.#textEncoder = new TextEncoder(); + + this.#delegate = delegate; + this.#receive(peerOutput); + } + + async #receive(output: ReadableStream) { + let content = ''; + const decoder = new TextDecoder(); + for await (const chunk of output) { + content += decoder.decode(chunk, { stream: true }); + const lines = content.split('\n'); + content = lines.pop() || ''; + + for (const line of lines) { + const trimmedLine = line.trim(); + + if (trimmedLine) { + const message = JSON.parse(trimmedLine); + this.#processMessage(message); + } + } + } + } + + async #processMessage(message: AnyMessage) { + if ('method' in message) { + const response = await this.#tryCallDelegateMethod( + message.method, + message.params, + ); + + await this.#sendMessage({ + jsonrpc: '2.0', + id: message.id, + ...response, + }); + } else { + this.#handleResponse(message); + } + } + + async #tryCallDelegateMethod( + method: string, + params?: unknown, + ): Promise> { + const methodName = method as keyof D; + if (typeof this.#delegate[methodName] !== 'function') { + return RequestError.methodNotFound(method).toResult(); + } + + try { + const result = await this.#delegate[methodName](params); + return { result: result ?? null }; + } catch (error: unknown) { + if (error instanceof RequestError) { + return error.toResult(); + } + + let details; + + if (error instanceof Error) { + details = error.message; + } else if ( + typeof error === 'object' && + error != null && + 'message' in error && + typeof error.message === 'string' + ) { + details = error.message; + } + + return RequestError.internalError(details).toResult(); + } + } + + #handleResponse(response: AnyResponse) { + const pendingResponse = this.#pendingResponses.get(response.id); + if (pendingResponse) { + if ('result' in response) { + pendingResponse.resolve(response.result); + } else if ('error' in response) { + pendingResponse.reject(response.error); + } + this.#pendingResponses.delete(response.id); + } + } + + async sendRequest(method: string, params?: Req): Promise { + const id = this.#nextRequestId++; + const responsePromise = new Promise((resolve, reject) => { + this.#pendingResponses.set(id, { resolve, reject }); + }); + await this.#sendMessage({ jsonrpc: '2.0', id, method, params }); + return responsePromise as Promise; + } + + async #sendMessage(json: AnyMessage) { + const content = JSON.stringify(json) + '\n'; + this.#writeQueue = this.#writeQueue + .then(async () => { + const writer = this.#peerInput.getWriter(); + try { + await writer.write(this.#textEncoder.encode(content)); + } finally { + writer.releaseLock(); + } + }) + .catch((error) => { + // Continue processing writes on error + console.error('ACP write error:', error); + }); + return this.#writeQueue; + } +} + +export class RequestError extends Error { + data?: { details?: string }; + + constructor( + public code: number, + message: string, + details?: string, + ) { + super(message); + this.name = 'RequestError'; + if (details) { + this.data = { details }; + } + } + + static parseError(details?: string): RequestError { + return new RequestError(-32700, 'Parse error', details); + } + + static invalidRequest(details?: string): RequestError { + return new RequestError(-32600, 'Invalid request', details); + } + + static methodNotFound(details?: string): RequestError { + return new RequestError(-32601, 'Method not found', details); + } + + static invalidParams(details?: string): RequestError { + return new RequestError(-32602, 'Invalid params', details); + } + + static internalError(details?: string): RequestError { + return new RequestError(-32603, 'Internal error', details); + } + + toResult(): Result { + return { + error: { + code: this.code, + message: this.message, + data: this.data, + }, + }; + } +} + +// Protocol types + +export const LATEST_PROTOCOL_VERSION = '0.0.9'; + +export type AssistantMessageChunk = + | { + text: string; + } + | { + thought: string; + }; + +export type ToolCallConfirmation = + | { + description?: string | null; + type: 'edit'; + } + | { + description?: string | null; + type: 'execute'; + command: string; + rootCommand: string; + } + | { + description?: string | null; + type: 'mcp'; + serverName: string; + toolDisplayName: string; + toolName: string; + } + | { + description?: string | null; + type: 'fetch'; + urls: string[]; + } + | { + description: string; + type: 'other'; + }; + +export type ToolCallContent = + | { + type: 'markdown'; + markdown: string; + } + | { + type: 'diff'; + newText: string; + oldText: string | null; + path: string; + }; + +export type ToolCallStatus = 'running' | 'finished' | 'error'; + +export type ToolCallId = number; + +export type ToolCallConfirmationOutcome = + | 'allow' + | 'alwaysAllow' + | 'alwaysAllowMcpServer' + | 'alwaysAllowTool' + | 'reject' + | 'cancel'; + +/** + * A part in a user message + */ +export type UserMessageChunk = + | { + text: string; + } + | { + path: string; + }; + +export interface StreamAssistantMessageChunkParams { + chunk: AssistantMessageChunk; +} + +export interface RequestToolCallConfirmationParams { + confirmation: ToolCallConfirmation; + content?: ToolCallContent | null; + icon: Icon; + label: string; + locations?: ToolCallLocation[]; +} + +export interface ToolCallLocation { + line?: number | null; + path: string; +} + +export interface PushToolCallParams { + content?: ToolCallContent | null; + icon: Icon; + label: string; + locations?: ToolCallLocation[]; +} + +export interface UpdateToolCallParams { + content: ToolCallContent | null; + status: ToolCallStatus; + toolCallId: ToolCallId; +} + +export interface RequestToolCallConfirmationResponse { + id: ToolCallId; + outcome: ToolCallConfirmationOutcome; +} + +export interface PushToolCallResponse { + id: ToolCallId; +} + +export interface InitializeParams { + /** + * The version of the protocol that the client supports. + * This should be the latest version supported by the client. + */ + protocolVersion: string; +} + +export interface SendUserMessageParams { + chunks: UserMessageChunk[]; +} + +export interface InitializeResponse { + /** + * Indicates whether the agent is authenticated and + * ready to handle requests. + */ + isAuthenticated: boolean; + /** + * The version of the protocol that the agent supports. + * If the agent supports the requested version, it should respond with the same version. + * Otherwise, the agent should respond with the latest version it supports. + */ + protocolVersion: string; +} + +export interface Error { + code: number; + data?: unknown; + message: string; +} + +export interface Client { + streamAssistantMessageChunk( + params: StreamAssistantMessageChunkParams, + ): Promise; + + requestToolCallConfirmation( + params: RequestToolCallConfirmationParams, + ): Promise; + + pushToolCall(params: PushToolCallParams): Promise; + + updateToolCall(params: UpdateToolCallParams): Promise; +} + +export interface Agent { + /** + * Initializes the agent's state. It should be called before any other method, + * and no other methods should be called until it has completed. + * + * If the agent is not authenticated, then the client should prompt the user to authenticate, + * and then call the `authenticate` method. + * Otherwise the client can send other messages to the agent. + */ + initialize(params: InitializeParams): Promise; + + /** + * Begins the authentication process. + * + * This method should only be called if `initialize` indicates the user isn't already authenticated. + * The Promise MUST not resolve until authentication is complete. + */ + authenticate(): Promise; + + /** + * Allows the user to send a message to the agent. + * This method should complete after the agent is finished, during + * which time the agent may update the client by calling + * streamAssistantMessageChunk and other methods. + */ + sendUserMessage(params: SendUserMessageParams): Promise; + + /** + * Cancels the current generation. + */ + cancelSendMessage(): Promise; +} diff --git a/packages/cli/src/acp/acpPeer.ts b/packages/cli/src/acp/acpPeer.ts new file mode 100644 index 000000000..bebd95926 --- /dev/null +++ b/packages/cli/src/acp/acpPeer.ts @@ -0,0 +1,674 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { WritableStream, ReadableStream } from 'node:stream/web'; + +import { + AuthType, + Config, + GeminiChat, + ToolRegistry, + logToolCall, + ToolResult, + convertToFunctionResponse, + ToolCallConfirmationDetails, + ToolConfirmationOutcome, + clearCachedCredentialFile, + isNodeError, + getErrorMessage, + isWithinRoot, + getErrorStatus, +} from '@qwen-code/qwen-code-core'; +import * as acp from './acp.js'; +import { Agent } from './acp.js'; +import { Readable, Writable } from 'node:stream'; +import { Content, Part, FunctionCall, PartListUnion } from '@google/genai'; +import { LoadedSettings, SettingScope } from '../config/settings.js'; +import * as fs from 'fs/promises'; +import * as path from 'path'; + +export async function runAcpPeer(config: Config, settings: LoadedSettings) { + const stdout = Writable.toWeb(process.stdout) as WritableStream; + const stdin = Readable.toWeb(process.stdin) as ReadableStream; + + // Stdout is used to send messages to the client, so console.log/console.info + // messages to stderr so that they don't interfere with ACP. + console.log = console.error; + console.info = console.error; + console.debug = console.error; + + new acp.ClientConnection( + (client: acp.Client) => new GeminiAgent(config, settings, client), + stdout, + stdin, + ); +} + +class GeminiAgent implements Agent { + chat?: GeminiChat; + pendingSend?: AbortController; + + constructor( + private config: Config, + private settings: LoadedSettings, + private client: acp.Client, + ) {} + + async initialize(_: acp.InitializeParams): Promise { + let isAuthenticated = false; + if (this.settings.merged.selectedAuthType) { + try { + await this.config.refreshAuth(this.settings.merged.selectedAuthType); + isAuthenticated = true; + } catch (error) { + console.error('Failed to refresh auth:', error); + } + } + return { protocolVersion: acp.LATEST_PROTOCOL_VERSION, isAuthenticated }; + } + + async authenticate(): Promise { + await clearCachedCredentialFile(); + await this.config.refreshAuth(AuthType.LOGIN_WITH_GOOGLE); + this.settings.setValue( + SettingScope.User, + 'selectedAuthType', + AuthType.LOGIN_WITH_GOOGLE, + ); + } + + async cancelSendMessage(): Promise { + if (!this.pendingSend) { + throw new Error('Not currently generating'); + } + + this.pendingSend.abort(); + delete this.pendingSend; + } + + async sendUserMessage(params: acp.SendUserMessageParams): Promise { + this.pendingSend?.abort(); + const pendingSend = new AbortController(); + this.pendingSend = pendingSend; + + if (!this.chat) { + const geminiClient = this.config.getGeminiClient(); + this.chat = await geminiClient.startChat(); + } + + const promptId = Math.random().toString(16).slice(2); + const chat = this.chat!; + const toolRegistry: ToolRegistry = await this.config.getToolRegistry(); + const parts = await this.#resolveUserMessage(params, pendingSend.signal); + + let nextMessage: Content | null = { role: 'user', parts }; + + while (nextMessage !== null) { + if (pendingSend.signal.aborted) { + chat.addHistory(nextMessage); + return; + } + + const functionCalls: FunctionCall[] = []; + + try { + const responseStream = await chat.sendMessageStream( + { + message: nextMessage?.parts ?? [], + config: { + abortSignal: pendingSend.signal, + tools: [ + { + functionDeclarations: toolRegistry.getFunctionDeclarations(), + }, + ], + }, + }, + promptId, + ); + nextMessage = null; + + for await (const resp of responseStream) { + if (pendingSend.signal.aborted) { + return; + } + + if (resp.candidates && resp.candidates.length > 0) { + const candidate = resp.candidates[0]; + for (const part of candidate.content?.parts ?? []) { + if (!part.text) { + continue; + } + + this.client.streamAssistantMessageChunk({ + chunk: part.thought + ? { thought: part.text } + : { text: part.text }, + }); + } + } + + if (resp.functionCalls) { + functionCalls.push(...resp.functionCalls); + } + } + } catch (error) { + if (getErrorStatus(error) === 429) { + throw new acp.RequestError( + 429, + 'Rate limit exceeded. Try again later.', + ); + } + + throw error; + } + + if (functionCalls.length > 0) { + const toolResponseParts: Part[] = []; + + for (const fc of functionCalls) { + const response = await this.#runTool( + pendingSend.signal, + promptId, + fc, + ); + + const parts = Array.isArray(response) ? response : [response]; + + for (const part of parts) { + if (typeof part === 'string') { + toolResponseParts.push({ text: part }); + } else if (part) { + toolResponseParts.push(part); + } + } + } + + nextMessage = { role: 'user', parts: toolResponseParts }; + } + } + } + + async #runTool( + abortSignal: AbortSignal, + promptId: string, + fc: FunctionCall, + ): Promise { + const callId = fc.id ?? `${fc.name}-${Date.now()}`; + const args = (fc.args ?? {}) as Record; + + const startTime = Date.now(); + + const errorResponse = (error: Error) => { + const durationMs = Date.now() - startTime; + logToolCall(this.config, { + 'event.name': 'tool_call', + 'event.timestamp': new Date().toISOString(), + prompt_id: promptId, + function_name: fc.name ?? '', + function_args: args, + duration_ms: durationMs, + success: false, + error: error.message, + }); + + return [ + { + functionResponse: { + id: callId, + name: fc.name ?? '', + response: { error: error.message }, + }, + }, + ]; + }; + + if (!fc.name) { + return errorResponse(new Error('Missing function name')); + } + + const toolRegistry: ToolRegistry = await this.config.getToolRegistry(); + const tool = toolRegistry.getTool(fc.name as string); + + if (!tool) { + return errorResponse( + new Error(`Tool "${fc.name}" not found in registry.`), + ); + } + + let toolCallId; + const confirmationDetails = await tool.shouldConfirmExecute( + args, + abortSignal, + ); + if (confirmationDetails) { + let content: acp.ToolCallContent | null = null; + if (confirmationDetails.type === 'edit') { + content = { + type: 'diff', + path: confirmationDetails.fileName, + oldText: confirmationDetails.originalContent, + newText: confirmationDetails.newContent, + }; + } + + const result = await this.client.requestToolCallConfirmation({ + label: tool.getDescription(args), + icon: tool.icon, + content, + confirmation: toAcpToolCallConfirmation(confirmationDetails), + locations: tool.toolLocations(args), + }); + + await confirmationDetails.onConfirm(toToolCallOutcome(result.outcome)); + switch (result.outcome) { + case 'reject': + return errorResponse( + new Error(`Tool "${fc.name}" not allowed to run by the user.`), + ); + + case 'cancel': + return errorResponse( + new Error(`Tool "${fc.name}" was canceled by the user.`), + ); + case 'allow': + case 'alwaysAllow': + case 'alwaysAllowMcpServer': + case 'alwaysAllowTool': + break; + default: { + const resultOutcome: never = result.outcome; + throw new Error(`Unexpected: ${resultOutcome}`); + } + } + + toolCallId = result.id; + } else { + const result = await this.client.pushToolCall({ + icon: tool.icon, + label: tool.getDescription(args), + locations: tool.toolLocations(args), + }); + + toolCallId = result.id; + } + + try { + const toolResult: ToolResult = await tool.execute(args, abortSignal); + const toolCallContent = toToolCallContent(toolResult); + + await this.client.updateToolCall({ + toolCallId, + status: 'finished', + content: toolCallContent, + }); + + const durationMs = Date.now() - startTime; + logToolCall(this.config, { + 'event.name': 'tool_call', + 'event.timestamp': new Date().toISOString(), + function_name: fc.name, + function_args: args, + duration_ms: durationMs, + success: true, + prompt_id: promptId, + }); + + return convertToFunctionResponse(fc.name, callId, toolResult.llmContent); + } catch (e) { + const error = e instanceof Error ? e : new Error(String(e)); + await this.client.updateToolCall({ + toolCallId, + status: 'error', + content: { type: 'markdown', markdown: error.message }, + }); + + return errorResponse(error); + } + } + + async #resolveUserMessage( + message: acp.SendUserMessageParams, + abortSignal: AbortSignal, + ): Promise { + const atPathCommandParts = message.chunks.filter((part) => 'path' in part); + + if (atPathCommandParts.length === 0) { + return message.chunks.map((chunk) => { + if ('text' in chunk) { + return { text: chunk.text }; + } else { + throw new Error('Unexpected chunk type'); + } + }); + } + + // Get centralized file discovery service + const fileDiscovery = this.config.getFileService(); + const respectGitIgnore = this.config.getFileFilteringRespectGitIgnore(); + + const pathSpecsToRead: string[] = []; + const atPathToResolvedSpecMap = new Map(); + const contentLabelsForDisplay: string[] = []; + const ignoredPaths: string[] = []; + + const toolRegistry = await this.config.getToolRegistry(); + const readManyFilesTool = toolRegistry.getTool('read_many_files'); + const globTool = toolRegistry.getTool('glob'); + + if (!readManyFilesTool) { + throw new Error('Error: read_many_files tool not found.'); + } + + for (const atPathPart of atPathCommandParts) { + const pathName = atPathPart.path; + + // Check if path should be ignored by git + if (fileDiscovery.shouldGitIgnoreFile(pathName)) { + ignoredPaths.push(pathName); + const reason = respectGitIgnore + ? 'git-ignored and will be skipped' + : 'ignored by custom patterns'; + console.warn(`Path ${pathName} is ${reason}.`); + continue; + } + + let currentPathSpec = pathName; + let resolvedSuccessfully = false; + + try { + const absolutePath = path.resolve(this.config.getTargetDir(), pathName); + if (isWithinRoot(absolutePath, this.config.getTargetDir())) { + const stats = await fs.stat(absolutePath); + if (stats.isDirectory()) { + currentPathSpec = pathName.endsWith('/') + ? `${pathName}**` + : `${pathName}/**`; + this.#debug( + `Path ${pathName} resolved to directory, using glob: ${currentPathSpec}`, + ); + } else { + this.#debug( + `Path ${pathName} resolved to file: ${currentPathSpec}`, + ); + } + resolvedSuccessfully = true; + } else { + this.#debug( + `Path ${pathName} is outside the project directory. Skipping.`, + ); + } + } catch (error) { + if (isNodeError(error) && error.code === 'ENOENT') { + if (this.config.getEnableRecursiveFileSearch() && globTool) { + this.#debug( + `Path ${pathName} not found directly, attempting glob search.`, + ); + try { + const globResult = await globTool.execute( + { + pattern: `**/*${pathName}*`, + path: this.config.getTargetDir(), + }, + abortSignal, + ); + if ( + globResult.llmContent && + typeof globResult.llmContent === 'string' && + !globResult.llmContent.startsWith('No files found') && + !globResult.llmContent.startsWith('Error:') + ) { + const lines = globResult.llmContent.split('\n'); + if (lines.length > 1 && lines[1]) { + const firstMatchAbsolute = lines[1].trim(); + currentPathSpec = path.relative( + this.config.getTargetDir(), + firstMatchAbsolute, + ); + this.#debug( + `Glob search for ${pathName} found ${firstMatchAbsolute}, using relative path: ${currentPathSpec}`, + ); + resolvedSuccessfully = true; + } else { + this.#debug( + `Glob search for '**/*${pathName}*' did not return a usable path. Path ${pathName} will be skipped.`, + ); + } + } else { + this.#debug( + `Glob search for '**/*${pathName}*' found no files or an error. Path ${pathName} will be skipped.`, + ); + } + } catch (globError) { + console.error( + `Error during glob search for ${pathName}: ${getErrorMessage(globError)}`, + ); + } + } else { + this.#debug( + `Glob tool not found. Path ${pathName} will be skipped.`, + ); + } + } else { + console.error( + `Error stating path ${pathName}. Path ${pathName} will be skipped.`, + ); + } + } + + if (resolvedSuccessfully) { + pathSpecsToRead.push(currentPathSpec); + atPathToResolvedSpecMap.set(pathName, currentPathSpec); + contentLabelsForDisplay.push(pathName); + } + } + + // Construct the initial part of the query for the LLM + let initialQueryText = ''; + for (let i = 0; i < message.chunks.length; i++) { + const chunk = message.chunks[i]; + if ('text' in chunk) { + initialQueryText += chunk.text; + } else { + // type === 'atPath' + const resolvedSpec = atPathToResolvedSpecMap.get(chunk.path); + if ( + i > 0 && + initialQueryText.length > 0 && + !initialQueryText.endsWith(' ') && + resolvedSpec + ) { + // Add space if previous part was text and didn't end with space, or if previous was @path + const prevPart = message.chunks[i - 1]; + if ( + 'text' in prevPart || + ('path' in prevPart && atPathToResolvedSpecMap.has(prevPart.path)) + ) { + initialQueryText += ' '; + } + } + if (resolvedSpec) { + initialQueryText += `@${resolvedSpec}`; + } else { + // If not resolved for reading (e.g. lone @ or invalid path that was skipped), + // add the original @-string back, ensuring spacing if it's not the first element. + if ( + i > 0 && + initialQueryText.length > 0 && + !initialQueryText.endsWith(' ') && + !chunk.path.startsWith(' ') + ) { + initialQueryText += ' '; + } + initialQueryText += `@${chunk.path}`; + } + } + } + initialQueryText = initialQueryText.trim(); + + // Inform user about ignored paths + if (ignoredPaths.length > 0) { + const ignoreType = respectGitIgnore ? 'git-ignored' : 'custom-ignored'; + this.#debug( + `Ignored ${ignoredPaths.length} ${ignoreType} files: ${ignoredPaths.join(', ')}`, + ); + } + + // Fallback for lone "@" or completely invalid @-commands resulting in empty initialQueryText + if (pathSpecsToRead.length === 0) { + console.warn('No valid file paths found in @ commands to read.'); + return [{ text: initialQueryText }]; + } + + const processedQueryParts: Part[] = [{ text: initialQueryText }]; + + const toolArgs = { + paths: pathSpecsToRead, + respectGitIgnore, // Use configuration setting + }; + + const toolCall = await this.client.pushToolCall({ + icon: readManyFilesTool.icon, + label: readManyFilesTool.getDescription(toolArgs), + }); + try { + const result = await readManyFilesTool.execute(toolArgs, abortSignal); + const content = toToolCallContent(result) || { + type: 'markdown', + markdown: `Successfully read: ${contentLabelsForDisplay.join(', ')}`, + }; + await this.client.updateToolCall({ + toolCallId: toolCall.id, + status: 'finished', + content, + }); + + if (Array.isArray(result.llmContent)) { + const fileContentRegex = /^--- (.*?) ---\n\n([\s\S]*?)\n\n$/; + processedQueryParts.push({ + text: '\n--- Content from referenced files ---', + }); + for (const part of result.llmContent) { + if (typeof part === 'string') { + const match = fileContentRegex.exec(part); + if (match) { + const filePathSpecInContent = match[1]; // This is a resolved pathSpec + const fileActualContent = match[2].trim(); + processedQueryParts.push({ + text: `\nContent from @${filePathSpecInContent}:\n`, + }); + processedQueryParts.push({ text: fileActualContent }); + } else { + processedQueryParts.push({ text: part }); + } + } else { + // part is a Part object. + processedQueryParts.push(part); + } + } + processedQueryParts.push({ text: '\n--- End of content ---' }); + } else { + console.warn( + 'read_many_files tool returned no content or empty content.', + ); + } + + return processedQueryParts; + } catch (error: unknown) { + await this.client.updateToolCall({ + toolCallId: toolCall.id, + status: 'error', + content: { + type: 'markdown', + markdown: `Error reading files (${contentLabelsForDisplay.join(', ')}): ${getErrorMessage(error)}`, + }, + }); + throw error; + } + } + + #debug(msg: string) { + if (this.config.getDebugMode()) { + console.warn(msg); + } + } +} + +function toToolCallContent(toolResult: ToolResult): acp.ToolCallContent | null { + if (toolResult.returnDisplay) { + if (typeof toolResult.returnDisplay === 'string') { + return { + type: 'markdown', + markdown: toolResult.returnDisplay, + }; + } else { + return { + type: 'diff', + path: toolResult.returnDisplay.fileName, + oldText: toolResult.returnDisplay.originalContent, + newText: toolResult.returnDisplay.newContent, + }; + } + } else { + return null; + } +} + +function toAcpToolCallConfirmation( + confirmationDetails: ToolCallConfirmationDetails, +): acp.ToolCallConfirmation { + switch (confirmationDetails.type) { + case 'edit': + return { type: 'edit' }; + case 'exec': + return { + type: 'execute', + rootCommand: confirmationDetails.rootCommand, + command: confirmationDetails.command, + }; + case 'mcp': + return { + type: 'mcp', + serverName: confirmationDetails.serverName, + toolName: confirmationDetails.toolName, + toolDisplayName: confirmationDetails.toolDisplayName, + }; + case 'info': + return { + type: 'fetch', + urls: confirmationDetails.urls || [], + description: confirmationDetails.urls?.length + ? null + : confirmationDetails.prompt, + }; + default: { + const unreachable: never = confirmationDetails; + throw new Error(`Unexpected: ${unreachable}`); + } + } +} + +function toToolCallOutcome( + outcome: acp.ToolCallConfirmationOutcome, +): ToolConfirmationOutcome { + switch (outcome) { + case 'allow': + return ToolConfirmationOutcome.ProceedOnce; + case 'alwaysAllow': + return ToolConfirmationOutcome.ProceedAlways; + case 'alwaysAllowMcpServer': + return ToolConfirmationOutcome.ProceedAlwaysServer; + case 'alwaysAllowTool': + return ToolConfirmationOutcome.ProceedAlwaysTool; + case 'reject': + case 'cancel': + return ToolConfirmationOutcome.Cancel; + default: { + const unreachable: never = outcome; + throw new Error(`Unexpected: ${unreachable}`); + } + } +} diff --git a/packages/cli/src/config/config.test.ts b/packages/cli/src/config/config.test.ts index 9a39e81cd..b1f13fd06 100644 --- a/packages/cli/src/config/config.test.ts +++ b/packages/cli/src/config/config.test.ts @@ -6,15 +6,10 @@ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; import * as os from 'os'; -import { loadCliConfig, parseArguments, CliArgs } from './config.js'; +import { loadCliConfig, parseArguments } from './config.js'; import { Settings } from './settings.js'; import { Extension } from './extension.js'; import * as ServerConfig from '@qwen-code/qwen-code-core'; -import { - TelemetryTarget, - ConfigParameters, - DEFAULT_TELEMETRY_TARGET, -} from '@qwen-code/qwen-code-core'; vi.mock('os', async (importOriginal) => { const actualOs = await importOriginal(); @@ -42,63 +37,19 @@ vi.mock('@qwen-code/qwen-code-core', async () => { ...actualServer, loadEnvironment: vi.fn(), loadServerHierarchicalMemory: vi.fn( - (cwd, debug, fileService, extensionPaths) => + (cwd, debug, fileService, extensionPaths, _maxDirs) => Promise.resolve({ memoryContent: extensionPaths?.join(',') || '', fileCount: extensionPaths?.length || 0, }), ), - Config: class MockConfig extends actualServer.Config { - private enableOpenAILogging: boolean; - - constructor(params: ConfigParameters) { - super(params); - this.enableOpenAILogging = params.enableOpenAILogging ?? false; - } - - getEnableOpenAILogging(): boolean { - return this.enableOpenAILogging; - } - - // Override other methods to ensure they work correctly - getShowMemoryUsage(): boolean { - return ( - (this as unknown as { showMemoryUsage?: boolean }).showMemoryUsage ?? - false - ); - } - - getTelemetryEnabled(): boolean { - return ( - (this as unknown as { telemetrySettings?: { enabled?: boolean } }) - .telemetrySettings?.enabled ?? false - ); - } - - getTelemetryLogPromptsEnabled(): boolean { - return ( - (this as unknown as { telemetrySettings?: { logPrompts?: boolean } }) - .telemetrySettings?.logPrompts ?? false - ); - } - - getTelemetryOtlpEndpoint(): string { - return ( - (this as unknown as { telemetrySettings?: { otlpEndpoint?: string } }) - .telemetrySettings?.otlpEndpoint ?? - 'http://tracing-analysis-dc-hz.aliyuncs.com:8090' - ); - } - - getTelemetryTarget(): TelemetryTarget { - return ( - ( - this as unknown as { - telemetrySettings?: { target?: TelemetryTarget }; - } - ).telemetrySettings?.target ?? DEFAULT_TELEMETRY_TARGET - ); - } + DEFAULT_MEMORY_FILE_FILTERING_OPTIONS: { + respectGitIgnore: false, + respectGeminiIgnore: true, + }, + DEFAULT_FILE_FILTERING_OPTIONS: { + respectGitIgnore: true, + respectGeminiIgnore: true, }, }; }); @@ -244,6 +195,85 @@ describe('loadCliConfig', () => { const config = await loadCliConfig(settings, [], 'test-session', argv); expect(config.getShowMemoryUsage()).toBe(true); }); + + it(`should leave proxy to empty by default`, async () => { + // Clear all proxy environment variables to ensure clean test + delete process.env.https_proxy; + delete process.env.http_proxy; + delete process.env.HTTPS_PROXY; + delete process.env.HTTP_PROXY; + + process.argv = ['node', 'script.js']; + const argv = await parseArguments(); + const settings: Settings = {}; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getProxy()).toBeFalsy(); + }); + + const proxy_url = 'http://localhost:7890'; + const testCases = [ + { + input: { + env_name: 'https_proxy', + proxy_url, + }, + expected: proxy_url, + }, + { + input: { + env_name: 'http_proxy', + proxy_url, + }, + expected: proxy_url, + }, + { + input: { + env_name: 'HTTPS_PROXY', + proxy_url, + }, + expected: proxy_url, + }, + { + input: { + env_name: 'HTTP_PROXY', + proxy_url, + }, + expected: proxy_url, + }, + ]; + testCases.forEach(({ input, expected }) => { + it(`should set proxy to ${expected} according to environment variable [${input.env_name}]`, async () => { + // Clear all proxy environment variables first + delete process.env.https_proxy; + delete process.env.http_proxy; + delete process.env.HTTPS_PROXY; + delete process.env.HTTP_PROXY; + + process.env[input.env_name] = input.proxy_url; + process.argv = ['node', 'script.js']; + const argv = await parseArguments(); + const settings: Settings = {}; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getProxy()).toBe(expected); + }); + }); + + it('should set proxy when --proxy flag is present', async () => { + process.argv = ['node', 'script.js', '--proxy', 'http://localhost:7890']; + const argv = await parseArguments(); + const settings: Settings = {}; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getProxy()).toBe('http://localhost:7890'); + }); + + it('should prioritize CLI flag over environment variable for proxy (CLI http://localhost:7890, environment variable http://localhost:7891)', async () => { + process.env['http_proxy'] = 'http://localhost:7891'; + process.argv = ['node', 'script.js', '--proxy', 'http://localhost:7890']; + const argv = await parseArguments(); + const settings: Settings = {}; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getProxy()).toBe('http://localhost:7890'); + }); }); describe('loadCliConfig telemetry', () => { @@ -350,9 +380,7 @@ describe('loadCliConfig telemetry', () => { const argv = await parseArguments(); const settings: Settings = { telemetry: { enabled: true } }; const config = await loadCliConfig(settings, [], 'test-session', argv); - expect(config.getTelemetryOtlpEndpoint()).toBe( - 'http://tracing-analysis-dc-hz.aliyuncs.com:8090', - ); + expect(config.getTelemetryOtlpEndpoint()).toBe('http://localhost:4317'); }); it('should use telemetry target from settings if CLI flag is not present', async () => { @@ -411,81 +439,12 @@ describe('loadCliConfig telemetry', () => { expect(config.getTelemetryLogPromptsEnabled()).toBe(false); }); - it('should use default log prompts (false) if no value is provided via CLI or settings', async () => { + it('should use default log prompts (true) if no value is provided via CLI or settings', async () => { process.argv = ['node', 'script.js']; const argv = await parseArguments(); const settings: Settings = { telemetry: { enabled: true } }; const config = await loadCliConfig(settings, [], 'test-session', argv); - expect(config.getTelemetryLogPromptsEnabled()).toBe(false); - }); - - it('should set enableOpenAILogging to true when --openai-logging flag is present', async () => { - const settings: Settings = {}; - const argv = await parseArguments(); - argv.openaiLogging = true; - const config = await loadCliConfig(settings, [], 'test-session', argv); - expect( - ( - config as unknown as { getEnableOpenAILogging(): boolean } - ).getEnableOpenAILogging(), - ).toBe(true); - }); - - it('should set enableOpenAILogging to false when --openai-logging flag is not present', async () => { - const settings: Settings = {}; - const argv = await parseArguments(); - const config = await loadCliConfig(settings, [], 'test-session', argv); - expect( - ( - config as unknown as { getEnableOpenAILogging(): boolean } - ).getEnableOpenAILogging(), - ).toBe(false); - }); - - it('should use enableOpenAILogging value from settings if CLI flag is not present (settings true)', async () => { - const settings: Settings = { enableOpenAILogging: true }; - const argv = await parseArguments(); - const config = await loadCliConfig(settings, [], 'test-session', argv); - expect( - ( - config as unknown as { getEnableOpenAILogging(): boolean } - ).getEnableOpenAILogging(), - ).toBe(true); - }); - - it('should use enableOpenAILogging value from settings if CLI flag is not present (settings false)', async () => { - const settings: Settings = { enableOpenAILogging: false }; - const argv = await parseArguments(); - const config = await loadCliConfig(settings, [], 'test-session', argv); - expect( - ( - config as unknown as { getEnableOpenAILogging(): boolean } - ).getEnableOpenAILogging(), - ).toBe(false); - }); - - it('should prioritize --openai-logging CLI flag (true) over settings (false)', async () => { - const settings: Settings = { enableOpenAILogging: false }; - const argv = await parseArguments(); - argv.openaiLogging = true; - const config = await loadCliConfig(settings, [], 'test-session', argv); - expect( - ( - config as unknown as { getEnableOpenAILogging(): boolean } - ).getEnableOpenAILogging(), - ).toBe(true); - }); - - it('should prioritize --openai-logging CLI flag (false) over settings (true)', async () => { - const settings: Settings = { enableOpenAILogging: true }; - const argv = await parseArguments(); - argv.openaiLogging = false; - const config = await loadCliConfig(settings, [], 'test-session', argv); - expect( - ( - config as unknown as { getEnableOpenAILogging(): boolean } - ).getEnableOpenAILogging(), - ).toBe(false); + expect(config.getTelemetryLogPromptsEnabled()).toBe(true); }); }); @@ -540,6 +499,11 @@ describe('Hierarchical Memory Loading (config.ts) - Placeholder Suite', () => { '/path/to/ext3/context1.md', '/path/to/ext3/context2.md', ], + { + respectGitIgnore: false, + respectGeminiIgnore: true, + }, + undefined, // maxDirs ); }); @@ -853,6 +817,66 @@ describe('loadCliConfig with allowed-mcp-server-names', () => { const config = await loadCliConfig(baseSettings, [], 'test-session', argv); expect(config.getMcpServers()).toEqual({}); }); + + it('should read allowMCPServers from settings', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments(); + const settings: Settings = { + ...baseSettings, + allowMCPServers: ['server1', 'server2'], + }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getMcpServers()).toEqual({ + server1: { url: 'http://localhost:8080' }, + server2: { url: 'http://localhost:8081' }, + }); + }); + + it('should read excludeMCPServers from settings', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments(); + const settings: Settings = { + ...baseSettings, + excludeMCPServers: ['server1', 'server2'], + }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getMcpServers()).toEqual({ + server3: { url: 'http://localhost:8082' }, + }); + }); + + it('should override allowMCPServers with excludeMCPServers if overlapping ', async () => { + process.argv = ['node', 'script.js']; + const argv = await parseArguments(); + const settings: Settings = { + ...baseSettings, + excludeMCPServers: ['server1'], + allowMCPServers: ['server1', 'server2'], + }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getMcpServers()).toEqual({ + server2: { url: 'http://localhost:8081' }, + }); + }); + + it('should prioritize mcp server flag if set ', async () => { + process.argv = [ + 'node', + 'script.js', + '--allowed-mcp-server-names', + 'server1', + ]; + const argv = await parseArguments(); + const settings: Settings = { + ...baseSettings, + excludeMCPServers: ['server1'], + allowMCPServers: ['server2'], + }; + const config = await loadCliConfig(settings, [], 'test-session', argv); + expect(config.getMcpServers()).toEqual({ + server1: { url: 'http://localhost:8080' }, + }); + }); }); describe('loadCliConfig extensions', () => { @@ -908,6 +932,7 @@ describe('loadCliConfig ideMode', () => { // Explicitly delete TERM_PROGRAM and SANDBOX before each test delete process.env.TERM_PROGRAM; delete process.env.SANDBOX; + delete process.env.GEMINI_CLI_IDE_SERVER_PORT; }); afterEach(() => { @@ -944,6 +969,7 @@ describe('loadCliConfig ideMode', () => { process.argv = ['node', 'script.js', '--ide-mode']; const argv = await parseArguments(); process.env.TERM_PROGRAM = 'vscode'; + process.env.GEMINI_CLI_IDE_SERVER_PORT = '3000'; const settings: Settings = {}; const config = await loadCliConfig(settings, [], 'test-session', argv); expect(config.getIdeMode()).toBe(true); @@ -953,6 +979,7 @@ describe('loadCliConfig ideMode', () => { process.argv = ['node', 'script.js']; const argv = await parseArguments(); process.env.TERM_PROGRAM = 'vscode'; + process.env.GEMINI_CLI_IDE_SERVER_PORT = '3000'; const settings: Settings = { ideMode: true }; const config = await loadCliConfig(settings, [], 'test-session', argv); expect(config.getIdeMode()).toBe(true); @@ -962,6 +989,7 @@ describe('loadCliConfig ideMode', () => { process.argv = ['node', 'script.js', '--ide-mode']; const argv = await parseArguments(); process.env.TERM_PROGRAM = 'vscode'; + process.env.GEMINI_CLI_IDE_SERVER_PORT = '3000'; const settings: Settings = { ideMode: false }; const config = await loadCliConfig(settings, [], 'test-session', argv); expect(config.getIdeMode()).toBe(true); @@ -995,82 +1023,4 @@ describe('loadCliConfig ideMode', () => { const config = await loadCliConfig(settings, [], 'test-session', argv); expect(config.getIdeMode()).toBe(false); }); - - it('should add __ide_server when ideMode is true', async () => { - process.argv = ['node', 'script.js', '--ide-mode']; - const argv = await parseArguments(); - process.env.TERM_PROGRAM = 'vscode'; - const settings: Settings = {}; - const config = await loadCliConfig(settings, [], 'test-session', argv); - expect(config.getIdeMode()).toBe(true); - const mcpServers = config.getMcpServers(); - expect(mcpServers?.['_ide_server']).toBeDefined(); - expect(mcpServers?.['_ide_server']?.httpUrl).toBe( - 'http://localhost:3000/mcp', - ); - expect(mcpServers?.['_ide_server']?.description).toBe('IDE connection'); - expect(mcpServers?.['_ide_server']?.trust).toBe(false); - }); -}); - -describe('loadCliConfig systemPromptMappings', () => { - it('should use default systemPromptMappings when not provided in settings', async () => { - const mockSettings: Settings = { - theme: 'dark', - }; - const mockExtensions: Extension[] = []; - const mockSessionId = 'test-session'; - const mockArgv: CliArgs = { - model: 'test-model', - } as CliArgs; - - const config = await loadCliConfig( - mockSettings, - mockExtensions, - mockSessionId, - mockArgv, - ); - - expect(config.getSystemPromptMappings()).toEqual([ - { - baseUrls: [ - 'https://dashscope.aliyuncs.com/compatible-mode/v1/', - 'https://dashscope-intl.aliyuncs.com/compatible-mode/v1/', - ], - modelNames: ['qwen3-coder-plus'], - template: - 'SYSTEM_TEMPLATE:{"name":"qwen3_coder","params":{"is_git_repository":{RUNTIME_VARS_IS_GIT_REPO},"sandbox":"{RUNTIME_VARS_SANDBOX}"}}', - }, - ]); - }); - - it('should use custom systemPromptMappings when provided in settings', async () => { - const customSystemPromptMappings = [ - { - baseUrls: ['https://custom-api.com'], - modelNames: ['custom-model'], - template: 'Custom template', - }, - ]; - const mockSettings: Settings = { - theme: 'dark', - systemPromptMappings: customSystemPromptMappings, - }; - const mockExtensions: Extension[] = []; - const mockSessionId = 'test-session'; - const mockArgv: CliArgs = { - model: 'test-model', - } as CliArgs; - - const config = await loadCliConfig( - mockSettings, - mockExtensions, - mockSessionId, - mockArgv, - ); - - expect(config.getSystemPromptMappings()).toEqual( - customSystemPromptMappings, - ); - }); }); diff --git a/packages/cli/src/config/config.ts b/packages/cli/src/config/config.ts index f3f9900a7..a8271e26a 100644 --- a/packages/cli/src/config/config.ts +++ b/packages/cli/src/config/config.ts @@ -15,13 +15,15 @@ import { ApprovalMode, DEFAULT_GEMINI_MODEL, DEFAULT_GEMINI_EMBEDDING_MODEL, + DEFAULT_MEMORY_FILE_FILTERING_OPTIONS, FileDiscoveryService, TelemetryTarget, - MCPServerConfig, + FileFilteringOptions, + IdeClient, } from '@qwen-code/qwen-code-core'; import { Settings } from './settings.js'; -import { Extension, filterActiveExtensions } from './extension.js'; +import { Extension, annotateActiveExtensions } from './extension.js'; import { getCliVersion } from '../utils/version.js'; import { loadSandboxConfig } from './sandboxConfig.js'; @@ -52,13 +54,16 @@ export interface CliArgs { telemetryTarget: string | undefined; telemetryOtlpEndpoint: string | undefined; telemetryLogPrompts: boolean | undefined; + telemetryOutfile: string | undefined; allowedMcpServerNames: string[] | undefined; + experimentalAcp: boolean | undefined; extensions: string[] | undefined; listExtensions: boolean | undefined; ideMode: boolean | undefined; openaiLogging: boolean | undefined; openaiApiKey: string | undefined; openaiBaseUrl: string | undefined; + proxy: string | undefined; } export async function parseArguments(): Promise { @@ -157,12 +162,20 @@ export async function parseArguments(): Promise { description: 'Enable or disable logging of user prompts for telemetry. Overrides settings files.', }) + .option('telemetry-outfile', { + type: 'string', + description: 'Redirect all telemetry output to the specified file.', + }) .option('checkpointing', { alias: 'c', type: 'boolean', description: 'Enables checkpointing of file edits', default: false, }) + .option('experimental-acp', { + type: 'boolean', + description: 'Starts the agent in ACP mode', + }) .option('allowed-mcp-server-names', { type: 'array', string: true, @@ -197,7 +210,11 @@ export async function parseArguments(): Promise { type: 'string', description: 'OpenAI base URL (for custom endpoints)', }) - + .option('proxy', { + type: 'string', + description: + 'Proxy for gemini client, like schema://user:password@host:port', + }) .version(await getCliVersion()) // This will enable the --version flag based on package.json .alias('v', 'version') .help() @@ -223,13 +240,16 @@ export async function loadHierarchicalGeminiMemory( currentWorkingDirectory: string, debugMode: boolean, fileService: FileDiscoveryService, + settings: Settings, extensionContextFilePaths: string[] = [], + fileFilteringOptions?: FileFilteringOptions, ): Promise<{ memoryContent: string; fileCount: number }> { if (debugMode) { logger.debug( `CLI: Delegating hierarchical memory load to server for CWD: ${currentWorkingDirectory}`, ); } + // Directly call the server function. // The server function will use its own homedir() for the global path. return loadServerHierarchicalMemory( @@ -237,6 +257,8 @@ export async function loadHierarchicalGeminiMemory( debugMode, fileService, extensionContextFilePaths, + fileFilteringOptions, + settings.memoryDiscoveryMaxDirs, ); } @@ -257,11 +279,19 @@ export async function loadCliConfig( process.env.TERM_PROGRAM === 'vscode' && !process.env.SANDBOX; - const activeExtensions = filterActiveExtensions( + let ideClient: IdeClient | undefined; + if (ideMode) { + ideClient = new IdeClient(); + } + + const allExtensions = annotateActiveExtensions( extensions, argv.extensions || [], ); + const activeExtensions = extensions.filter( + (_, i) => allExtensions[i].isActive, + ); // Handle OpenAI API key from command line if (argv.openaiApiKey) { process.env.OPENAI_API_KEY = argv.openaiApiKey; @@ -288,46 +318,72 @@ export async function loadCliConfig( ); const fileService = new FileDiscoveryService(process.cwd()); + + const fileFiltering = { + ...DEFAULT_MEMORY_FILE_FILTERING_OPTIONS, + ...settings.fileFiltering, + }; + // Call the (now wrapper) loadHierarchicalGeminiMemory which calls the server's version const { memoryContent, fileCount } = await loadHierarchicalGeminiMemory( process.cwd(), debugMode, fileService, + settings, extensionContextFilePaths, + fileFiltering, ); let mcpServers = mergeMcpServers(settings, activeExtensions); const excludeTools = mergeExcludeTools(settings, activeExtensions); + const blockedMcpServers: Array<{ name: string; extensionName: string }> = []; + + if (!argv.allowedMcpServerNames) { + if (settings.allowMCPServers) { + const allowedNames = new Set(settings.allowMCPServers.filter(Boolean)); + if (allowedNames.size > 0) { + mcpServers = Object.fromEntries( + Object.entries(mcpServers).filter(([key]) => allowedNames.has(key)), + ); + } + } + + if (settings.excludeMCPServers) { + const excludedNames = new Set(settings.excludeMCPServers.filter(Boolean)); + if (excludedNames.size > 0) { + mcpServers = Object.fromEntries( + Object.entries(mcpServers).filter(([key]) => !excludedNames.has(key)), + ); + } + } + } if (argv.allowedMcpServerNames) { const allowedNames = new Set(argv.allowedMcpServerNames.filter(Boolean)); if (allowedNames.size > 0) { mcpServers = Object.fromEntries( - Object.entries(mcpServers).filter(([key]) => allowedNames.has(key)), + Object.entries(mcpServers).filter(([key, server]) => { + const isAllowed = allowedNames.has(key); + if (!isAllowed) { + blockedMcpServers.push({ + name: key, + extensionName: server.extensionName || '', + }); + } + return isAllowed; + }), ); } else { + blockedMcpServers.push( + ...Object.entries(mcpServers).map(([key, server]) => ({ + name: key, + extensionName: server.extensionName || '', + })), + ); mcpServers = {}; } } - if (ideMode) { - mcpServers['_ide_server'] = new MCPServerConfig( - undefined, // command - undefined, // args - undefined, // env - undefined, // cwd - undefined, // url - 'http://localhost:3000/mcp', // httpUrl - undefined, // headers - undefined, // tcp - undefined, // timeout - false, // trust - 'IDE connection', // description - undefined, // includeTools - undefined, // excludeTools - ); - } - const sandboxConfig = await loadSandboxConfig(settings, argv); return new Config({ @@ -362,16 +418,19 @@ export async function loadCliConfig( process.env.OTEL_EXPORTER_OTLP_ENDPOINT ?? settings.telemetry?.otlpEndpoint, logPrompts: argv.telemetryLogPrompts ?? settings.telemetry?.logPrompts, + outfile: argv.telemetryOutfile ?? settings.telemetry?.outfile, }, usageStatisticsEnabled: settings.usageStatisticsEnabled ?? true, // Git-aware file filtering settings fileFiltering: { respectGitIgnore: settings.fileFiltering?.respectGitIgnore, + respectGeminiIgnore: settings.fileFiltering?.respectGeminiIgnore, enableRecursiveFileSearch: settings.fileFiltering?.enableRecursiveFileSearch, }, checkpointing: argv.checkpointing || settings.checkpointing?.enabled, proxy: + argv.proxy || process.env.HTTPS_PROXY || process.env.https_proxy || process.env.HTTP_PROXY || @@ -382,15 +441,14 @@ export async function loadCliConfig( model: argv.model!, extensionContextFilePaths, maxSessionTurns: settings.maxSessionTurns ?? -1, - sessionTokenLimit: settings.sessionTokenLimit ?? 32000, - maxFolderItems: settings.maxFolderItems ?? 20, + experimentalAcp: argv.experimentalAcp || false, listExtensions: argv.listExtensions || false, - activeExtensions: activeExtensions.map((e) => ({ - name: e.config.name, - version: e.config.version, - })), + extensions: allExtensions, + blockedMcpServers, noBrowser: !!process.env.NO_BROWSER, + summarizeToolOutput: settings.summarizeToolOutput, ideMode, + ideClient, enableOpenAILogging: (typeof argv.openaiLogging === 'undefined' ? settings.enableOpenAILogging @@ -421,7 +479,10 @@ function mergeMcpServers(settings: Settings, extensions: Extension[]) { ); return; } - mcpServers[key] = server; + mcpServers[key] = { + ...server, + extensionName: extension.config.name, + }; }, ); } diff --git a/packages/cli/src/config/extension.test.ts b/packages/cli/src/config/extension.test.ts index 690dd3122..1ee46d4cd 100644 --- a/packages/cli/src/config/extension.test.ts +++ b/packages/cli/src/config/extension.test.ts @@ -11,7 +11,7 @@ import * as path from 'path'; import { EXTENSIONS_CONFIG_FILENAME, EXTENSIONS_DIRECTORY_NAME, - filterActiveExtensions, + annotateActiveExtensions, loadExtensions, } from './extension.js'; @@ -42,7 +42,7 @@ describe('loadExtensions', () => { fs.rmSync(tempHomeDir, { recursive: true, force: true }); }); - it('should load context file path when GEMINI.md is present', () => { + it('should load context file path when QWEN.md is present', () => { const workspaceExtensionsDir = path.join( tempWorkspaceDir, EXTENSIONS_DIRECTORY_NAME, @@ -86,42 +86,52 @@ describe('loadExtensions', () => { }); }); -describe('filterActiveExtensions', () => { +describe('annotateActiveExtensions', () => { const extensions = [ { config: { name: 'ext1', version: '1.0.0' }, contextFiles: [] }, { config: { name: 'ext2', version: '1.0.0' }, contextFiles: [] }, { config: { name: 'ext3', version: '1.0.0' }, contextFiles: [] }, ]; - it('should return all extensions if no enabled extensions are provided', () => { - const activeExtensions = filterActiveExtensions(extensions, []); + it('should mark all extensions as active if no enabled extensions are provided', () => { + const activeExtensions = annotateActiveExtensions(extensions, []); expect(activeExtensions).toHaveLength(3); + expect(activeExtensions.every((e) => e.isActive)).toBe(true); }); - it('should return only the enabled extensions', () => { - const activeExtensions = filterActiveExtensions(extensions, [ + it('should mark only the enabled extensions as active', () => { + const activeExtensions = annotateActiveExtensions(extensions, [ 'ext1', 'ext3', ]); - expect(activeExtensions).toHaveLength(2); - expect(activeExtensions.some((e) => e.config.name === 'ext1')).toBe(true); - expect(activeExtensions.some((e) => e.config.name === 'ext3')).toBe(true); + expect(activeExtensions).toHaveLength(3); + expect(activeExtensions.find((e) => e.name === 'ext1')?.isActive).toBe( + true, + ); + expect(activeExtensions.find((e) => e.name === 'ext2')?.isActive).toBe( + false, + ); + expect(activeExtensions.find((e) => e.name === 'ext3')?.isActive).toBe( + true, + ); }); - it('should return no extensions when "none" is provided', () => { - const activeExtensions = filterActiveExtensions(extensions, ['none']); - expect(activeExtensions).toHaveLength(0); + it('should mark all extensions as inactive when "none" is provided', () => { + const activeExtensions = annotateActiveExtensions(extensions, ['none']); + expect(activeExtensions).toHaveLength(3); + expect(activeExtensions.every((e) => !e.isActive)).toBe(true); }); it('should handle case-insensitivity', () => { - const activeExtensions = filterActiveExtensions(extensions, ['EXT1']); - expect(activeExtensions).toHaveLength(1); - expect(activeExtensions[0].config.name).toBe('ext1'); + const activeExtensions = annotateActiveExtensions(extensions, ['EXT1']); + expect(activeExtensions.find((e) => e.name === 'ext1')?.isActive).toBe( + true, + ); }); it('should log an error for unknown extensions', () => { - const consoleSpy = vi.spyOn(console, 'log').mockImplementation(() => {}); - filterActiveExtensions(extensions, ['ext4']); + const consoleSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + annotateActiveExtensions(extensions, ['ext4']); expect(consoleSpy).toHaveBeenCalledWith('Extension not found: ext4'); consoleSpy.mockRestore(); }); diff --git a/packages/cli/src/config/extension.ts b/packages/cli/src/config/extension.ts index 75fc4ed10..1c77ff04e 100644 --- a/packages/cli/src/config/extension.ts +++ b/packages/cli/src/config/extension.ts @@ -4,7 +4,7 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { MCPServerConfig } from '@qwen-code/qwen-code-core'; +import { MCPServerConfig, GeminiCLIExtension } from '@qwen-code/qwen-code-core'; import * as fs from 'fs'; import * as path from 'path'; import * as os from 'os'; @@ -34,9 +34,6 @@ export function loadExtensions(workspaceDir: string): Extension[] { const uniqueExtensions = new Map(); for (const extension of allExtensions) { if (!uniqueExtensions.has(extension.config.name)) { - console.log( - `Loading extension: ${extension.config.name} (version: ${extension.config.version})`, - ); uniqueExtensions.set(extension.config.name, extension); } } @@ -113,12 +110,18 @@ function getContextFileNames(config: ExtensionConfig): string[] { return config.contextFileName; } -export function filterActiveExtensions( +export function annotateActiveExtensions( extensions: Extension[], enabledExtensionNames: string[], -): Extension[] { +): GeminiCLIExtension[] { + const annotatedExtensions: GeminiCLIExtension[] = []; + if (enabledExtensionNames.length === 0) { - return extensions; + return extensions.map((extension) => ({ + name: extension.config.name, + version: extension.config.version, + isActive: true, + })); } const lowerCaseEnabledExtensions = new Set( @@ -129,31 +132,33 @@ export function filterActiveExtensions( lowerCaseEnabledExtensions.size === 1 && lowerCaseEnabledExtensions.has('none') ) { - if (extensions.length > 0) { - console.log('All extensions are disabled.'); - } - return []; + return extensions.map((extension) => ({ + name: extension.config.name, + version: extension.config.version, + isActive: false, + })); } - const activeExtensions: Extension[] = []; const notFoundNames = new Set(lowerCaseEnabledExtensions); for (const extension of extensions) { const lowerCaseName = extension.config.name.toLowerCase(); - if (lowerCaseEnabledExtensions.has(lowerCaseName)) { - console.log( - `Activated extension: ${extension.config.name} (version: ${extension.config.version})`, - ); - activeExtensions.push(extension); + const isActive = lowerCaseEnabledExtensions.has(lowerCaseName); + + if (isActive) { notFoundNames.delete(lowerCaseName); - } else { - console.log(`Disabled extension: ${extension.config.name}`); } + + annotatedExtensions.push({ + name: extension.config.name, + version: extension.config.version, + isActive, + }); } for (const requestedName of notFoundNames) { - console.log(`Extension not found: ${requestedName}`); + console.error(`Extension not found: ${requestedName}`); } - return activeExtensions; + return annotatedExtensions; } diff --git a/packages/cli/src/config/settings.test.ts b/packages/cli/src/config/settings.test.ts index 44de24fe1..ae655fe10 100644 --- a/packages/cli/src/config/settings.test.ts +++ b/packages/cli/src/config/settings.test.ts @@ -46,7 +46,7 @@ import stripJsonComments from 'strip-json-comments'; // Will be mocked separatel import { loadSettings, USER_SETTINGS_PATH, // This IS the mocked path. - SYSTEM_SETTINGS_PATH, + getSystemSettingsPath, SETTINGS_DIRECTORY_NAME, // This is from the original module, but used by the mock. SettingScope, } from './settings.js'; @@ -95,13 +95,16 @@ describe('Settings Loading and Merging', () => { expect(settings.system.settings).toEqual({}); expect(settings.user.settings).toEqual({}); expect(settings.workspace.settings).toEqual({}); - expect(settings.merged).toEqual({}); + expect(settings.merged).toEqual({ + customThemes: {}, + mcpServers: {}, + }); expect(settings.errors.length).toBe(0); }); it('should load system settings if only system file exists', () => { (mockFsExistsSync as Mock).mockImplementation( - (p: fs.PathLike) => p === SYSTEM_SETTINGS_PATH, + (p: fs.PathLike) => p === getSystemSettingsPath(), ); const systemSettingsContent = { theme: 'system-default', @@ -109,7 +112,7 @@ describe('Settings Loading and Merging', () => { }; (fs.readFileSync as Mock).mockImplementation( (p: fs.PathOrFileDescriptor) => { - if (p === SYSTEM_SETTINGS_PATH) + if (p === getSystemSettingsPath()) return JSON.stringify(systemSettingsContent); return '{}'; }, @@ -118,13 +121,17 @@ describe('Settings Loading and Merging', () => { const settings = loadSettings(MOCK_WORKSPACE_DIR); expect(fs.readFileSync).toHaveBeenCalledWith( - SYSTEM_SETTINGS_PATH, + getSystemSettingsPath(), 'utf-8', ); expect(settings.system.settings).toEqual(systemSettingsContent); expect(settings.user.settings).toEqual({}); expect(settings.workspace.settings).toEqual({}); - expect(settings.merged).toEqual(systemSettingsContent); + expect(settings.merged).toEqual({ + ...systemSettingsContent, + customThemes: {}, + mcpServers: {}, + }); }); it('should load user settings if only user file exists', () => { @@ -153,7 +160,11 @@ describe('Settings Loading and Merging', () => { ); expect(settings.user.settings).toEqual(userSettingsContent); expect(settings.workspace.settings).toEqual({}); - expect(settings.merged).toEqual(userSettingsContent); + expect(settings.merged).toEqual({ + ...userSettingsContent, + customThemes: {}, + mcpServers: {}, + }); }); it('should load workspace settings if only workspace file exists', () => { @@ -180,7 +191,11 @@ describe('Settings Loading and Merging', () => { ); expect(settings.user.settings).toEqual({}); expect(settings.workspace.settings).toEqual(workspaceSettingsContent); - expect(settings.merged).toEqual(workspaceSettingsContent); + expect(settings.merged).toEqual({ + ...workspaceSettingsContent, + customThemes: {}, + mcpServers: {}, + }); }); it('should merge user and workspace settings, with workspace taking precedence', () => { @@ -215,6 +230,8 @@ describe('Settings Loading and Merging', () => { sandbox: true, coreTools: ['tool1'], contextFileName: 'WORKSPACE_CONTEXT.md', + customThemes: {}, + mcpServers: {}, }); }); @@ -223,6 +240,7 @@ describe('Settings Loading and Merging', () => { const systemSettingsContent = { theme: 'system-theme', sandbox: false, + allowMCPServers: ['server1', 'server2'], telemetry: { enabled: false }, }; const userSettingsContent = { @@ -234,11 +252,12 @@ describe('Settings Loading and Merging', () => { sandbox: false, coreTools: ['tool1'], contextFileName: 'WORKSPACE_CONTEXT.md', + allowMCPServers: ['server1', 'server2', 'server3'], }; (fs.readFileSync as Mock).mockImplementation( (p: fs.PathOrFileDescriptor) => { - if (p === SYSTEM_SETTINGS_PATH) + if (p === getSystemSettingsPath()) return JSON.stringify(systemSettingsContent); if (p === USER_SETTINGS_PATH) return JSON.stringify(userSettingsContent); @@ -259,6 +278,9 @@ describe('Settings Loading and Merging', () => { telemetry: { enabled: false }, coreTools: ['tool1'], contextFileName: 'WORKSPACE_CONTEXT.md', + allowMCPServers: ['server1', 'server2'], + customThemes: {}, + mcpServers: {}, }); }); @@ -370,6 +392,134 @@ describe('Settings Loading and Merging', () => { (fs.readFileSync as Mock).mockReturnValue('{}'); const settings = loadSettings(MOCK_WORKSPACE_DIR); expect(settings.merged.telemetry).toBeUndefined(); + expect(settings.merged.customThemes).toEqual({}); + expect(settings.merged.mcpServers).toEqual({}); + }); + + it('should merge MCP servers correctly, with workspace taking precedence', () => { + (mockFsExistsSync as Mock).mockReturnValue(true); + const userSettingsContent = { + mcpServers: { + 'user-server': { + command: 'user-command', + args: ['--user-arg'], + description: 'User MCP server', + }, + 'shared-server': { + command: 'user-shared-command', + description: 'User shared server config', + }, + }, + }; + const workspaceSettingsContent = { + mcpServers: { + 'workspace-server': { + command: 'workspace-command', + args: ['--workspace-arg'], + description: 'Workspace MCP server', + }, + 'shared-server': { + command: 'workspace-shared-command', + description: 'Workspace shared server config', + }, + }, + }; + + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return ''; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + expect(settings.user.settings).toEqual(userSettingsContent); + expect(settings.workspace.settings).toEqual(workspaceSettingsContent); + expect(settings.merged.mcpServers).toEqual({ + 'user-server': { + command: 'user-command', + args: ['--user-arg'], + description: 'User MCP server', + }, + 'workspace-server': { + command: 'workspace-command', + args: ['--workspace-arg'], + description: 'Workspace MCP server', + }, + 'shared-server': { + command: 'workspace-shared-command', + description: 'Workspace shared server config', + }, + }); + }); + + it('should handle MCP servers when only in user settings', () => { + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === USER_SETTINGS_PATH, + ); + const userSettingsContent = { + mcpServers: { + 'user-only-server': { + command: 'user-only-command', + description: 'User only server', + }, + }, + }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === USER_SETTINGS_PATH) + return JSON.stringify(userSettingsContent); + return ''; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.merged.mcpServers).toEqual({ + 'user-only-server': { + command: 'user-only-command', + description: 'User only server', + }, + }); + }); + + it('should handle MCP servers when only in workspace settings', () => { + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === MOCK_WORKSPACE_SETTINGS_PATH, + ); + const workspaceSettingsContent = { + mcpServers: { + 'workspace-only-server': { + command: 'workspace-only-command', + description: 'Workspace only server', + }, + }, + }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === MOCK_WORKSPACE_SETTINGS_PATH) + return JSON.stringify(workspaceSettingsContent); + return ''; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.merged.mcpServers).toEqual({ + 'workspace-only-server': { + command: 'workspace-only-command', + description: 'Workspace only server', + }, + }); + }); + + it('should have mcpServers as empty object if not in any settings file', () => { + (mockFsExistsSync as Mock).mockReturnValue(false); // No settings files exist + (fs.readFileSync as Mock).mockReturnValue('{}'); + const settings = loadSettings(MOCK_WORKSPACE_DIR); + expect(settings.merged.mcpServers).toEqual({}); }); it('should handle JSON parsing errors gracefully', () => { @@ -407,7 +557,10 @@ describe('Settings Loading and Merging', () => { // Check that settings are empty due to parsing errors expect(settings.user.settings).toEqual({}); expect(settings.workspace.settings).toEqual({}); - expect(settings.merged).toEqual({}); + expect(settings.merged).toEqual({ + customThemes: {}, + mcpServers: {}, + }); // Check that error objects are populated in settings.errors expect(settings.errors).toBeDefined(); @@ -448,10 +601,13 @@ describe('Settings Loading and Merging', () => { ); const settings = loadSettings(MOCK_WORKSPACE_DIR); + // @ts-expect-error: dynamic property for test expect(settings.user.settings.apiKey).toBe('user_api_key_from_env'); + // @ts-expect-error: dynamic property for test expect(settings.user.settings.someUrl).toBe( 'https://test.com/user_api_key_from_env', ); + // @ts-expect-error: dynamic property for test expect(settings.merged.apiKey).toBe('user_api_key_from_env'); delete process.env.TEST_API_KEY; }); @@ -480,6 +636,7 @@ describe('Settings Loading and Merging', () => { expect(settings.workspace.settings.nested.value).toBe( 'workspace_endpoint_from_env', ); + // @ts-expect-error: dynamic property for test expect(settings.merged.endpoint).toBe('workspace_endpoint_from_env/api'); delete process.env.WORKSPACE_ENDPOINT; }); @@ -509,13 +666,16 @@ describe('Settings Loading and Merging', () => { const settings = loadSettings(MOCK_WORKSPACE_DIR); + // @ts-expect-error: dynamic property for test expect(settings.user.settings.configValue).toBe( 'user_value_for_user_read', ); + // @ts-expect-error: dynamic property for test expect(settings.workspace.settings.configValue).toBe( 'workspace_value_for_workspace_read', ); // Merged should take workspace's resolved value + // @ts-expect-error: dynamic property for test expect(settings.merged.configValue).toBe( 'workspace_value_for_workspace_read', ); @@ -583,7 +743,7 @@ describe('Settings Loading and Merging', () => { (fs.readFileSync as Mock).mockImplementation( (p: fs.PathOrFileDescriptor) => { - if (p === SYSTEM_SETTINGS_PATH) { + if (p === getSystemSettingsPath()) { process.env.SHARED_VAR = 'system_value_for_system_read'; // Set for system settings read return JSON.stringify(systemSettingsContent); } @@ -597,13 +757,16 @@ describe('Settings Loading and Merging', () => { const settings = loadSettings(MOCK_WORKSPACE_DIR); + // @ts-expect-error: dynamic property for test expect(settings.system.settings.configValue).toBe( 'system_value_for_system_read', ); + // @ts-expect-error: dynamic property for test expect(settings.workspace.settings.configValue).toBe( 'workspace_value_for_workspace_read', ); - // Merged should take workspace's resolved value + // Merged should take system's resolved value + // @ts-expect-error: dynamic property for test expect(settings.merged.configValue).toBe('system_value_for_system_read'); // Restore original environment variable state @@ -750,6 +913,50 @@ describe('Settings Loading and Merging', () => { delete process.env.TEST_HOST; delete process.env.TEST_PORT; }); + + describe('when GEMINI_CLI_SYSTEM_SETTINGS_PATH is set', () => { + const MOCK_ENV_SYSTEM_SETTINGS_PATH = '/mock/env/system/settings.json'; + + beforeEach(() => { + process.env.GEMINI_CLI_SYSTEM_SETTINGS_PATH = + MOCK_ENV_SYSTEM_SETTINGS_PATH; + }); + + afterEach(() => { + delete process.env.GEMINI_CLI_SYSTEM_SETTINGS_PATH; + }); + + it('should load system settings from the path specified in the environment variable', () => { + (mockFsExistsSync as Mock).mockImplementation( + (p: fs.PathLike) => p === MOCK_ENV_SYSTEM_SETTINGS_PATH, + ); + const systemSettingsContent = { + theme: 'env-var-theme', + sandbox: true, + }; + (fs.readFileSync as Mock).mockImplementation( + (p: fs.PathOrFileDescriptor) => { + if (p === MOCK_ENV_SYSTEM_SETTINGS_PATH) + return JSON.stringify(systemSettingsContent); + return '{}'; + }, + ); + + const settings = loadSettings(MOCK_WORKSPACE_DIR); + + expect(fs.readFileSync).toHaveBeenCalledWith( + MOCK_ENV_SYSTEM_SETTINGS_PATH, + 'utf-8', + ); + expect(settings.system.path).toBe(MOCK_ENV_SYSTEM_SETTINGS_PATH); + expect(settings.system.settings).toEqual(systemSettingsContent); + expect(settings.merged).toEqual({ + ...systemSettingsContent, + customThemes: {}, + mcpServers: {}, + }); + }); + }); }); describe('LoadedSettings class', () => { diff --git a/packages/cli/src/config/settings.ts b/packages/cli/src/config/settings.ts index bd9b4178f..13528df46 100644 --- a/packages/cli/src/config/settings.ts +++ b/packages/cli/src/config/settings.ts @@ -19,12 +19,16 @@ import { import stripJsonComments from 'strip-json-comments'; import { DefaultLight } from '../ui/themes/default-light.js'; import { DefaultDark } from '../ui/themes/default.js'; +import { CustomTheme } from '../ui/themes/theme.js'; export const SETTINGS_DIRECTORY_NAME = '.qwen'; export const USER_SETTINGS_DIR = path.join(homedir(), SETTINGS_DIRECTORY_NAME); export const USER_SETTINGS_PATH = path.join(USER_SETTINGS_DIR, 'settings.json'); -function getSystemSettingsPath(): string { +export function getSystemSettingsPath(): string { + if (process.env.GEMINI_CLI_SYSTEM_SETTINGS_PATH) { + return process.env.GEMINI_CLI_SYSTEM_SETTINGS_PATH; + } if (platform() === 'darwin') { return '/Library/Application Support/QwenCode/settings.json'; } else if (platform() === 'win32') { @@ -34,8 +38,6 @@ function getSystemSettingsPath(): string { } } -export const SYSTEM_SETTINGS_PATH = getSystemSettingsPath(); - export enum SettingScope { User = 'User', Workspace = 'Workspace', @@ -46,12 +48,17 @@ export interface CheckpointingSettings { enabled?: boolean; } +export interface SummarizeToolOutputSettings { + tokenBudget?: number; +} + export interface AccessibilitySettings { disableLoadingPhrases?: boolean; } export interface Settings { theme?: string; + customThemes?: Record; selectedAuthType?: AuthType; sandbox?: boolean | string; coreTools?: string[]; @@ -60,6 +67,8 @@ export interface Settings { toolCallCommand?: string; mcpServerCommand?: string; mcpServers?: Record; + allowMCPServers?: string[]; + excludeMCPServers?: string[]; showMemoryUsage?: boolean; contextFileName?: string | string[]; accessibility?: AccessibilitySettings; @@ -74,43 +83,32 @@ export interface Settings { // Git-aware file filtering settings fileFiltering?: { respectGitIgnore?: boolean; + respectGeminiIgnore?: boolean; enableRecursiveFileSearch?: boolean; }; - // UI setting. Does not display the ANSI-controlled terminal title. hideWindowTitle?: boolean; + hideTips?: boolean; hideBanner?: boolean; // Setting for setting maximum number of user/model/tool turns in a session. maxSessionTurns?: number; - // Setting for maximum token limit for conversation history before blocking requests - sessionTokenLimit?: number; + // A map of tool names to their summarization settings. + summarizeToolOutput?: Record; - // Setting for maximum number of files and folders to show in folder structure - maxFolderItems?: number; - - // Sampling parameters for content generation - sampling_params?: { - top_p?: number; - top_k?: number; - repetition_penalty?: number; - presence_penalty?: number; - frequency_penalty?: number; - temperature?: number; - max_tokens?: number; - }; - - // System prompt mappings for different base URLs and model names - systemPromptMappings?: Array<{ - baseUrls?: string[]; - modelNames?: string[]; - template?: string; - }>; + vimMode?: boolean; // Add other settings here. ideMode?: boolean; + memoryDiscoveryMaxDirs?: number; + sampling_params?: Record; + systemPromptMappings?: Array<{ + baseUrls: string[]; + modelNames: string[]; + template: string; + }>; } export interface SettingsError { @@ -148,10 +146,24 @@ export class LoadedSettings { } private computeMergedSettings(): Settings { + const system = this.system.settings; + const user = this.user.settings; + const workspace = this.workspace.settings; + return { - ...this.user.settings, - ...this.workspace.settings, - ...this.system.settings, + ...user, + ...workspace, + ...system, + customThemes: { + ...(user.customThemes || {}), + ...(workspace.customThemes || {}), + ...(system.customThemes || {}), + }, + mcpServers: { + ...(user.mcpServers || {}), + ...(workspace.mcpServers || {}), + ...(system.mcpServers || {}), + }, }; } @@ -168,13 +180,12 @@ export class LoadedSettings { } } - setValue( + setValue( scope: SettingScope, - key: keyof Settings, - value: string | Record | undefined, + key: K, + value: Settings[K], ): void { const settingsFile = this.forScope(scope); - // @ts-expect-error - value can be string | Record settingsFile.settings[key] = value; this._merged = this.computeMergedSettings(); saveSettings(settingsFile); @@ -296,11 +307,11 @@ export function loadSettings(workspaceDir: string): LoadedSettings { let userSettings: Settings = {}; let workspaceSettings: Settings = {}; const settingsErrors: SettingsError[] = []; - + const systemSettingsPath = getSystemSettingsPath(); // Load system settings try { - if (fs.existsSync(SYSTEM_SETTINGS_PATH)) { - const systemContent = fs.readFileSync(SYSTEM_SETTINGS_PATH, 'utf-8'); + if (fs.existsSync(systemSettingsPath)) { + const systemContent = fs.readFileSync(systemSettingsPath, 'utf-8'); const parsedSystemSettings = JSON.parse( stripJsonComments(systemContent), ) as Settings; @@ -309,7 +320,7 @@ export function loadSettings(workspaceDir: string): LoadedSettings { } catch (error: unknown) { settingsErrors.push({ message: getErrorMessage(error), - path: SYSTEM_SETTINGS_PATH, + path: systemSettingsPath, }); } @@ -367,7 +378,7 @@ export function loadSettings(workspaceDir: string): LoadedSettings { return new LoadedSettings( { - path: SYSTEM_SETTINGS_PATH, + path: systemSettingsPath, settings: systemSettings, }, { diff --git a/packages/cli/src/gemini.test.tsx b/packages/cli/src/gemini.test.tsx index ac803f52b..505841c7d 100644 --- a/packages/cli/src/gemini.test.tsx +++ b/packages/cli/src/gemini.test.tsx @@ -6,12 +6,13 @@ import stripAnsi from 'strip-ansi'; import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; -import { main } from './gemini.js'; +import { main, setupUnhandledRejectionHandler } from './gemini.js'; import { LoadedSettings, SettingsFile, loadSettings, } from './config/settings.js'; +import { appEvents, AppEvent } from './utils/events.js'; // Custom error to identify mock process.exit calls class MockProcessExitError extends Error { @@ -55,6 +56,16 @@ vi.mock('update-notifier', () => ({ })), })); +vi.mock('./utils/events.js', async (importOriginal) => { + const actual = await importOriginal(); + return { + ...actual, + appEvents: { + emit: vi.fn(), + }, + }; +}); + vi.mock('./utils/sandbox.js', () => ({ sandbox_command: vi.fn(() => ''), // Default to no sandbox command start_sandbox: vi.fn(() => Promise.resolve()), // Mock as an async function that resolves @@ -65,6 +76,8 @@ describe('gemini.tsx main function', () => { let loadSettingsMock: ReturnType>; let originalEnvGeminiSandbox: string | undefined; let originalEnvSandbox: string | undefined; + let initialUnhandledRejectionListeners: NodeJS.UnhandledRejectionListener[] = + []; const processExitSpy = vi .spyOn(process, 'exit') @@ -82,6 +95,8 @@ describe('gemini.tsx main function', () => { delete process.env.SANDBOX; consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + initialUnhandledRejectionListeners = + process.listeners('unhandledRejection'); }); afterEach(() => { @@ -96,6 +111,15 @@ describe('gemini.tsx main function', () => { } else { delete process.env.SANDBOX; } + + const currentListeners = process.listeners('unhandledRejection'); + const addedListener = currentListeners.find( + (listener) => !initialUnhandledRejectionListeners.includes(listener), + ); + + if (addedListener) { + process.removeListener('unhandledRejection', addedListener); + } vi.restoreAllMocks(); }); @@ -109,7 +133,7 @@ describe('gemini.tsx main function', () => { settings: {}, }; const workspaceSettingsFile: SettingsFile = { - path: '/workspace/.qwen/settings.json', + path: '/workspace/.gemini/settings.json', settings: {}, }; const systemSettingsFile: SettingsFile = { @@ -145,7 +169,45 @@ describe('gemini.tsx main function', () => { 'Please fix /test/settings.json and try again.', ); - // Verify process.exit was called (indirectly, via the thrown error) + // Verify process.exit was called. expect(processExitSpy).toHaveBeenCalledWith(1); }); + + it('should log unhandled promise rejections and open debug console on first error', async () => { + const appEventsMock = vi.mocked(appEvents); + const rejectionError = new Error('Test unhandled rejection'); + + setupUnhandledRejectionHandler(); + // Simulate an unhandled rejection. + // We are not using Promise.reject here as vitest will catch it. + // Instead we will dispatch the event manually. + process.emit('unhandledRejection', rejectionError, Promise.resolve()); + + // We need to wait for the rejection handler to be called. + await new Promise(process.nextTick); + + expect(appEventsMock.emit).toHaveBeenCalledWith(AppEvent.OpenDebugConsole); + expect(appEventsMock.emit).toHaveBeenCalledWith( + AppEvent.LogError, + expect.stringContaining('Unhandled Promise Rejection'), + ); + expect(appEventsMock.emit).toHaveBeenCalledWith( + AppEvent.LogError, + expect.stringContaining('Please file a bug report using the /bug tool.'), + ); + + // Simulate a second rejection + const secondRejectionError = new Error('Second test unhandled rejection'); + process.emit('unhandledRejection', secondRejectionError, Promise.resolve()); + await new Promise(process.nextTick); + + // Ensure emit was only called once for OpenDebugConsole + const openDebugConsoleCalls = appEventsMock.emit.mock.calls.filter( + (call) => call[0] === AppEvent.OpenDebugConsole, + ); + expect(openDebugConsoleCalls.length).toBe(1); + + // Avoid the process.exit error from being thrown. + processExitSpy.mockRestore(); + }); }); diff --git a/packages/cli/src/gemini.tsx b/packages/cli/src/gemini.tsx index 630720cc3..1b28ec123 100644 --- a/packages/cli/src/gemini.tsx +++ b/packages/cli/src/gemini.tsx @@ -17,7 +17,6 @@ import { start_sandbox } from './utils/sandbox.js'; import { LoadedSettings, loadSettings, - USER_SETTINGS_PATH, SettingScope, } from './config/settings.js'; import { themeManager } from './ui/themes/theme-manager.js'; @@ -40,6 +39,8 @@ import { } from '@qwen-code/qwen-code-core'; import { validateAuthMethod } from './config/auth.js'; import { setMaxSizedBoxDebugging } from './ui/components/shared/MaxSizedBox.js'; +import { validateNonInteractiveAuth } from './validateNonInterActiveAuth.js'; +import { appEvents, AppEvent } from './utils/events.js'; function getNodeMemoryArgs(config: Config): string[] { const totalMemoryMB = os.totalmem() / (1024 * 1024); @@ -84,8 +85,32 @@ async function relaunchWithAdditionalArgs(additionalArgs: string[]) { await new Promise((resolve) => child.on('close', resolve)); process.exit(0); } +import { runAcpPeer } from './acp/acpPeer.js'; + +export function setupUnhandledRejectionHandler() { + let unhandledRejectionOccurred = false; + process.on('unhandledRejection', (reason, _promise) => { + const errorMessage = `========================================= +This is an unexpected error. Please file a bug report using the /bug tool. +CRITICAL: Unhandled Promise Rejection! +========================================= +Reason: ${reason}${ + reason instanceof Error && reason.stack + ? ` +Stack trace: +${reason.stack}` + : '' + }`; + appEvents.emit(AppEvent.LogError, errorMessage); + if (!unhandledRejectionOccurred) { + unhandledRejectionOccurred = true; + appEvents.emit(AppEvent.OpenDebugConsole); + } + }); +} export async function main() { + setupUnhandledRejectionHandler(); const workspaceRoot = process.cwd(); const settings = loadSettings(workspaceRoot); @@ -141,6 +166,9 @@ export async function main() { await config.initialize(); + // Load custom themes from settings + themeManager.loadCustomThemes(settings.merged.customThemes); + if (settings.merged.theme) { if (!themeManager.setActiveTheme(settings.merged.theme)) { // If the theme is not found during initial load, log a warning and continue. @@ -183,12 +211,16 @@ export async function main() { if ( settings.merged.selectedAuthType === AuthType.LOGIN_WITH_GOOGLE && - config.getNoBrowser() + config.isBrowserLaunchSuppressed() ) { // Do oauth before app renders to make copying the link possible. await getOauthClient(settings.merged.selectedAuthType, config); } + if (config.getExperimentalAcp()) { + return runAcpPeer(config, settings); + } + let input = config.getQuestion(); const startupWarnings = [ ...(await getStartupWarnings()), @@ -264,21 +296,6 @@ function setWindowTitle(title: string, settings: LoadedSettings) { } } -// --- Global Unhandled Rejection Handler --- -process.on('unhandledRejection', (reason, _promise) => { - // Log other unexpected unhandled rejections as critical errors - console.error('========================================='); - console.error('CRITICAL: Unhandled Promise Rejection!'); - console.error('========================================='); - console.error('Reason:', reason); - console.error('Stack trace may follow:'); - if (!(reason instanceof Error)) { - console.error(reason); - } - // Exit for genuinely unhandled errors - process.exit(1); -}); - async function loadNonInteractiveConfig( config: Config, extensions: Extension[], @@ -312,51 +329,8 @@ async function loadNonInteractiveConfig( await finalConfig.initialize(); } - return await validateNonInterActiveAuth( + return await validateNonInteractiveAuth( settings.merged.selectedAuthType, finalConfig, ); } - -async function validateNonInterActiveAuth( - selectedAuthType: AuthType | undefined, - nonInteractiveConfig: Config, -) { - // making a special case for the cli. many headless environments might not have a settings.json set - // so if GEMINI_API_KEY or OPENAI_API_KEY is set, we'll use that. However since the oauth things are interactive anyway, we'll - // still expect that exists - if ( - !selectedAuthType && - !process.env.GEMINI_API_KEY && - !process.env.OPENAI_API_KEY - ) { - console.error( - `Please set an Auth method in your ${USER_SETTINGS_PATH} OR specify GEMINI_API_KEY or OPENAI_API_KEY env variable before running`, - ); - process.exit(1); - } - - // Determine auth type based on available environment variables - if (!selectedAuthType) { - if (process.env.OPENAI_API_KEY) { - selectedAuthType = AuthType.USE_OPENAI; - } else if (process.env.GEMINI_API_KEY) { - selectedAuthType = AuthType.USE_GEMINI; - } - } - - // This should never happen due to the check above, but TypeScript needs assurance - if (!selectedAuthType) { - console.error('No valid authentication method found'); - process.exit(1); - } - - const err = validateAuthMethod(selectedAuthType); - if (err != null) { - console.error(err); - process.exit(1); - } - - await nonInteractiveConfig.refreshAuth(selectedAuthType); - return nonInteractiveConfig; -} diff --git a/packages/cli/src/nonInteractiveCli.test.ts b/packages/cli/src/nonInteractiveCli.test.ts index 9a0d4eba4..6c37efb81 100644 --- a/packages/cli/src/nonInteractiveCli.test.ts +++ b/packages/cli/src/nonInteractiveCli.test.ts @@ -229,14 +229,14 @@ describe('runNonInteractive', () => { it('should not exit if a tool is not found, and should send error back to model', async () => { const functionCall: FunctionCall = { id: 'fcNotFound', - name: 'nonExistentTool', + name: 'nonexistentTool', args: {}, }; const errorResponsePart: Part = { functionResponse: { - name: 'nonExistentTool', + name: 'nonexistentTool', id: 'fcNotFound', - response: { error: 'Tool "nonExistentTool" not found in registry.' }, + response: { error: 'Tool "nonexistentTool" not found in registry.' }, }, }; @@ -246,8 +246,8 @@ describe('runNonInteractive', () => { vi.mocked(mockCoreExecuteToolCall).mockResolvedValue({ callId: 'fcNotFound', responseParts: [errorResponsePart], - resultDisplay: 'Tool "nonExistentTool" not found in registry.', - error: new Error('Tool "nonExistentTool" not found in registry.'), + resultDisplay: 'Tool "nonexistentTool" not found in registry.', + error: new Error('Tool "nonexistentTool" not found in registry.'), }); const stream1 = (async function* () { @@ -278,7 +278,7 @@ describe('runNonInteractive', () => { ); expect(consoleErrorSpy).toHaveBeenCalledWith( - 'Error executing tool nonExistentTool: Tool "nonExistentTool" not found in registry.', + 'Error executing tool nonexistentTool: Tool "nonexistentTool" not found in registry.', ); expect(mockProcessExit).not.toHaveBeenCalled(); diff --git a/packages/cli/src/nonInteractiveCli.ts b/packages/cli/src/nonInteractiveCli.ts index f2421baa7..d3d646e9f 100644 --- a/packages/cli/src/nonInteractiveCli.ts +++ b/packages/cli/src/nonInteractiveCli.ts @@ -11,7 +11,6 @@ import { ToolRegistry, shutdownTelemetry, isTelemetrySdkInitialized, - ToolResultDisplay, } from '@qwen-code/qwen-code-core'; import { Content, @@ -44,83 +43,6 @@ function getResponseText(response: GenerateContentResponse): string | null { return null; } -// Helper function to format tool call arguments for display -function formatToolArgs(args: Record): string { - if (!args || Object.keys(args).length === 0) { - return '(no arguments)'; - } - - const formattedArgs = Object.entries(args) - .map(([key, value]) => { - if (typeof value === 'string') { - return `${key}: "${value}"`; - } else if (typeof value === 'object' && value !== null) { - return `${key}: ${JSON.stringify(value)}`; - } else { - return `${key}: ${value}`; - } - }) - .join(', '); - - return `(${formattedArgs})`; -} -// Helper function to display tool call information -function displayToolCallInfo( - toolName: string, - args: Record, - status: 'start' | 'success' | 'error', - resultDisplay?: ToolResultDisplay, - errorMessage?: string, -): void { - const timestamp = new Date().toLocaleTimeString(); - const argsStr = formatToolArgs(args); - - switch (status) { - case 'start': - process.stdout.write( - `\n[${timestamp}] 🔧 Executing tool: ${toolName} ${argsStr}\n`, - ); - break; - case 'success': - if (resultDisplay) { - if (typeof resultDisplay === 'string' && resultDisplay.trim()) { - process.stdout.write( - `[${timestamp}] ✅ Tool ${toolName} completed successfully\n`, - ); - process.stdout.write(`📋 Result:\n${resultDisplay}\n`); - } else if ( - typeof resultDisplay === 'object' && - 'fileDiff' in resultDisplay - ) { - process.stdout.write( - `[${timestamp}] ✅ Tool ${toolName} completed successfully\n`, - ); - process.stdout.write(`📋 File: ${resultDisplay.fileName}\n`); - process.stdout.write(`📋 Diff:\n${resultDisplay.fileDiff}\n`); - } else { - process.stdout.write( - `[${timestamp}] ✅ Tool ${toolName} completed successfully (no output)\n`, - ); - } - } else { - process.stdout.write( - `[${timestamp}] ✅ Tool ${toolName} completed successfully (no output)\n`, - ); - } - break; - case 'error': - process.stdout.write( - `[${timestamp}] ❌ Tool ${toolName} failed: ${errorMessage}\n`, - ); - break; - default: - process.stdout.write( - `[${timestamp}] ⚠️ Tool ${toolName} reported unknown status: ${status}\n`, - ); - break; - } -} - export async function runNonInteractive( config: Config, input: string, @@ -196,9 +118,6 @@ export async function runNonInteractive( prompt_id, }; - //Display tool call start information - displayToolCallInfo(fc.name as string, fc.args ?? {}, 'start'); - const toolResponse = await executeToolCall( config, requestInfo, @@ -207,20 +126,6 @@ export async function runNonInteractive( ); if (toolResponse.error) { - // Display tool call error information - const errorMessage = - typeof toolResponse.resultDisplay === 'string' - ? toolResponse.resultDisplay - : toolResponse.error?.message; - - displayToolCallInfo( - fc.name as string, - fc.args ?? {}, - 'error', - undefined, - errorMessage, - ); - const isToolNotFound = toolResponse.error.message.includes( 'not found in registry', ); @@ -230,14 +135,6 @@ export async function runNonInteractive( if (!isToolNotFound) { process.exit(1); } - } else { - // Display tool call success information - displayToolCallInfo( - fc.name as string, - fc.args ?? {}, - 'success', - toolResponse.resultDisplay, - ); } if (toolResponse.responseParts) { diff --git a/packages/cli/src/patches/is-in-ci.ts b/packages/cli/src/patches/is-in-ci.ts new file mode 100644 index 000000000..a37c8678a --- /dev/null +++ b/packages/cli/src/patches/is-in-ci.ts @@ -0,0 +1,17 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// This is a replacement for the `is-in-ci` package that always returns false. +// We are doing this to avoid the issue where `ink` does not render the UI +// when it detects that it is running in a CI environment. +// This is safe because `ink` (and thus `is-in-ci`) is only used in the +// interactive code path of the CLI. +// See issue #1563 for more details. + +const isInCi = false; + +// eslint-disable-next-line import/no-default-export +export default isInCi; diff --git a/packages/cli/src/services/BuiltinCommandLoader.test.ts b/packages/cli/src/services/BuiltinCommandLoader.test.ts new file mode 100644 index 000000000..bb4a62177 --- /dev/null +++ b/packages/cli/src/services/BuiltinCommandLoader.test.ts @@ -0,0 +1,127 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +vi.mock('../ui/commands/aboutCommand.js', async () => { + const { CommandKind } = await import('../ui/commands/types.js'); + return { + aboutCommand: { + name: 'about', + description: 'About the CLI', + kind: CommandKind.BUILT_IN, + }, + }; +}); + +vi.mock('../ui/commands/ideCommand.js', () => ({ ideCommand: vi.fn() })); +vi.mock('../ui/commands/restoreCommand.js', () => ({ + restoreCommand: vi.fn(), +})); + +import { describe, it, expect, vi, beforeEach, type Mock } from 'vitest'; +import { BuiltinCommandLoader } from './BuiltinCommandLoader.js'; +import { Config } from '@qwen-code/qwen-code-core'; +import { CommandKind } from '../ui/commands/types.js'; + +import { ideCommand } from '../ui/commands/ideCommand.js'; +import { restoreCommand } from '../ui/commands/restoreCommand.js'; + +vi.mock('../ui/commands/authCommand.js', () => ({ authCommand: {} })); +vi.mock('../ui/commands/bugCommand.js', () => ({ bugCommand: {} })); +vi.mock('../ui/commands/chatCommand.js', () => ({ chatCommand: {} })); +vi.mock('../ui/commands/clearCommand.js', () => ({ clearCommand: {} })); +vi.mock('../ui/commands/compressCommand.js', () => ({ compressCommand: {} })); +vi.mock('../ui/commands/corgiCommand.js', () => ({ corgiCommand: {} })); +vi.mock('../ui/commands/docsCommand.js', () => ({ docsCommand: {} })); +vi.mock('../ui/commands/editorCommand.js', () => ({ editorCommand: {} })); +vi.mock('../ui/commands/extensionsCommand.js', () => ({ + extensionsCommand: {}, +})); +vi.mock('../ui/commands/helpCommand.js', () => ({ helpCommand: {} })); +vi.mock('../ui/commands/memoryCommand.js', () => ({ memoryCommand: {} })); +vi.mock('../ui/commands/privacyCommand.js', () => ({ privacyCommand: {} })); +vi.mock('../ui/commands/quitCommand.js', () => ({ quitCommand: {} })); +vi.mock('../ui/commands/statsCommand.js', () => ({ statsCommand: {} })); +vi.mock('../ui/commands/themeCommand.js', () => ({ themeCommand: {} })); +vi.mock('../ui/commands/toolsCommand.js', () => ({ toolsCommand: {} })); +vi.mock('../ui/commands/mcpCommand.js', () => ({ + mcpCommand: { + name: 'mcp', + description: 'MCP command', + kind: 'BUILT_IN', + }, +})); + +describe('BuiltinCommandLoader', () => { + let mockConfig: Config; + + const ideCommandMock = ideCommand as Mock; + const restoreCommandMock = restoreCommand as Mock; + + beforeEach(() => { + vi.clearAllMocks(); + mockConfig = { some: 'config' } as unknown as Config; + + ideCommandMock.mockReturnValue({ + name: 'ide', + description: 'IDE command', + kind: CommandKind.BUILT_IN, + }); + restoreCommandMock.mockReturnValue({ + name: 'restore', + description: 'Restore command', + kind: CommandKind.BUILT_IN, + }); + }); + + it('should correctly pass the config object to command factory functions', async () => { + const loader = new BuiltinCommandLoader(mockConfig); + await loader.loadCommands(new AbortController().signal); + + expect(ideCommandMock).toHaveBeenCalledTimes(1); + expect(ideCommandMock).toHaveBeenCalledWith(mockConfig); + expect(restoreCommandMock).toHaveBeenCalledTimes(1); + expect(restoreCommandMock).toHaveBeenCalledWith(mockConfig); + }); + + it('should filter out null command definitions returned by factories', async () => { + // Override the mock's behavior for this specific test. + ideCommandMock.mockReturnValue(null); + const loader = new BuiltinCommandLoader(mockConfig); + const commands = await loader.loadCommands(new AbortController().signal); + + // The 'ide' command should be filtered out. + const ideCmd = commands.find((c) => c.name === 'ide'); + expect(ideCmd).toBeUndefined(); + + // Other commands should still be present. + const aboutCmd = commands.find((c) => c.name === 'about'); + expect(aboutCmd).toBeDefined(); + }); + + it('should handle a null config gracefully when calling factories', async () => { + const loader = new BuiltinCommandLoader(null); + await loader.loadCommands(new AbortController().signal); + expect(ideCommandMock).toHaveBeenCalledTimes(1); + expect(ideCommandMock).toHaveBeenCalledWith(null); + expect(restoreCommandMock).toHaveBeenCalledTimes(1); + expect(restoreCommandMock).toHaveBeenCalledWith(null); + }); + + it('should return a list of all loaded commands', async () => { + const loader = new BuiltinCommandLoader(mockConfig); + const commands = await loader.loadCommands(new AbortController().signal); + + const aboutCmd = commands.find((c) => c.name === 'about'); + expect(aboutCmd).toBeDefined(); + expect(aboutCmd?.kind).toBe(CommandKind.BUILT_IN); + + const ideCmd = commands.find((c) => c.name === 'ide'); + expect(ideCmd).toBeDefined(); + + const mcpCmd = commands.find((c) => c.name === 'mcp'); + expect(mcpCmd).toBeDefined(); + }); +}); diff --git a/packages/cli/src/services/BuiltinCommandLoader.ts b/packages/cli/src/services/BuiltinCommandLoader.ts new file mode 100644 index 000000000..ebceba538 --- /dev/null +++ b/packages/cli/src/services/BuiltinCommandLoader.ts @@ -0,0 +1,75 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { ICommandLoader } from './types.js'; +import { SlashCommand } from '../ui/commands/types.js'; +import { Config } from '@qwen-code/qwen-code-core'; +import { aboutCommand } from '../ui/commands/aboutCommand.js'; +import { authCommand } from '../ui/commands/authCommand.js'; +import { bugCommand } from '../ui/commands/bugCommand.js'; +import { chatCommand } from '../ui/commands/chatCommand.js'; +import { clearCommand } from '../ui/commands/clearCommand.js'; +import { compressCommand } from '../ui/commands/compressCommand.js'; +import { copyCommand } from '../ui/commands/copyCommand.js'; +import { corgiCommand } from '../ui/commands/corgiCommand.js'; +import { docsCommand } from '../ui/commands/docsCommand.js'; +import { editorCommand } from '../ui/commands/editorCommand.js'; +import { extensionsCommand } from '../ui/commands/extensionsCommand.js'; +import { helpCommand } from '../ui/commands/helpCommand.js'; +import { ideCommand } from '../ui/commands/ideCommand.js'; +import { mcpCommand } from '../ui/commands/mcpCommand.js'; +import { memoryCommand } from '../ui/commands/memoryCommand.js'; +import { privacyCommand } from '../ui/commands/privacyCommand.js'; +import { quitCommand } from '../ui/commands/quitCommand.js'; +import { restoreCommand } from '../ui/commands/restoreCommand.js'; +import { statsCommand } from '../ui/commands/statsCommand.js'; +import { themeCommand } from '../ui/commands/themeCommand.js'; +import { toolsCommand } from '../ui/commands/toolsCommand.js'; +import { vimCommand } from '../ui/commands/vimCommand.js'; + +/** + * Loads the core, hard-coded slash commands that are an integral part + * of the Gemini CLI application. + */ +export class BuiltinCommandLoader implements ICommandLoader { + constructor(private config: Config | null) {} + + /** + * Gathers all raw built-in command definitions, injects dependencies where + * needed (e.g., config) and filters out any that are not available. + * + * @param _signal An AbortSignal (unused for this synchronous loader). + * @returns A promise that resolves to an array of `SlashCommand` objects. + */ + async loadCommands(_signal: AbortSignal): Promise { + const allDefinitions: Array = [ + aboutCommand, + authCommand, + bugCommand, + chatCommand, + clearCommand, + compressCommand, + copyCommand, + corgiCommand, + docsCommand, + editorCommand, + extensionsCommand, + helpCommand, + ideCommand(this.config), + memoryCommand, + privacyCommand, + mcpCommand, + quitCommand, + restoreCommand(this.config), + statsCommand, + themeCommand, + toolsCommand, + vimCommand, + ]; + + return allDefinitions.filter((cmd): cmd is SlashCommand => cmd !== null); + } +} diff --git a/packages/cli/src/services/CommandService.test.ts b/packages/cli/src/services/CommandService.test.ts index e780ec5fc..28731f81d 100644 --- a/packages/cli/src/services/CommandService.test.ts +++ b/packages/cli/src/services/CommandService.test.ts @@ -4,135 +4,177 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { vi, describe, it, expect, beforeEach } from 'vitest'; +import { vi, describe, it, expect, beforeEach, afterEach } from 'vitest'; import { CommandService } from './CommandService.js'; -import { type SlashCommand } from '../ui/commands/types.js'; -import { memoryCommand } from '../ui/commands/memoryCommand.js'; -import { helpCommand } from '../ui/commands/helpCommand.js'; -import { clearCommand } from '../ui/commands/clearCommand.js'; -import { authCommand } from '../ui/commands/authCommand.js'; -import { themeCommand } from '../ui/commands/themeCommand.js'; -import { privacyCommand } from '../ui/commands/privacyCommand.js'; -import { aboutCommand } from '../ui/commands/aboutCommand.js'; +import { type ICommandLoader } from './types.js'; +import { CommandKind, type SlashCommand } from '../ui/commands/types.js'; -// Mock the command modules to isolate the service from the command implementations. -vi.mock('../ui/commands/memoryCommand.js', () => ({ - memoryCommand: { name: 'memory', description: 'Mock Memory' }, -})); -vi.mock('../ui/commands/helpCommand.js', () => ({ - helpCommand: { name: 'help', description: 'Mock Help' }, -})); -vi.mock('../ui/commands/clearCommand.js', () => ({ - clearCommand: { name: 'clear', description: 'Mock Clear' }, -})); -vi.mock('../ui/commands/authCommand.js', () => ({ - authCommand: { name: 'auth', description: 'Mock Auth' }, -})); -vi.mock('../ui/commands/themeCommand.js', () => ({ - themeCommand: { name: 'theme', description: 'Mock Theme' }, -})); -vi.mock('../ui/commands/privacyCommand.js', () => ({ - privacyCommand: { name: 'privacy', description: 'Mock Privacy' }, -})); -vi.mock('../ui/commands/aboutCommand.js', () => ({ - aboutCommand: { name: 'about', description: 'Mock About' }, -})); +const createMockCommand = (name: string, kind: CommandKind): SlashCommand => ({ + name, + description: `Description for ${name}`, + kind, + action: vi.fn(), +}); + +const mockCommandA = createMockCommand('command-a', CommandKind.BUILT_IN); +const mockCommandB = createMockCommand('command-b', CommandKind.BUILT_IN); +const mockCommandC = createMockCommand('command-c', CommandKind.FILE); +const mockCommandB_Override = createMockCommand('command-b', CommandKind.FILE); + +class MockCommandLoader implements ICommandLoader { + private commandsToLoad: SlashCommand[]; + + constructor(commandsToLoad: SlashCommand[]) { + this.commandsToLoad = commandsToLoad; + } + + loadCommands = vi.fn( + async (): Promise => Promise.resolve(this.commandsToLoad), + ); +} describe('CommandService', () => { - describe('when using default production loader', () => { - let commandService: CommandService; - - beforeEach(() => { - commandService = new CommandService(); - }); - - it('should initialize with an empty command tree', () => { - const tree = commandService.getCommands(); - expect(tree).toBeInstanceOf(Array); - expect(tree.length).toBe(0); - }); - - describe('loadCommands', () => { - it('should load the built-in commands into the command tree', async () => { - // Pre-condition check - expect(commandService.getCommands().length).toBe(0); - - // Action - await commandService.loadCommands(); - const tree = commandService.getCommands(); - - // Post-condition assertions - expect(tree.length).toBe(7); - - const commandNames = tree.map((cmd) => cmd.name); - expect(commandNames).toContain('auth'); - expect(commandNames).toContain('memory'); - expect(commandNames).toContain('help'); - expect(commandNames).toContain('clear'); - expect(commandNames).toContain('theme'); - expect(commandNames).toContain('privacy'); - expect(commandNames).toContain('about'); - }); - - it('should overwrite any existing commands when called again', async () => { - // Load once - await commandService.loadCommands(); - expect(commandService.getCommands().length).toBe(7); - - // Load again - await commandService.loadCommands(); - const tree = commandService.getCommands(); - - // Should not append, but overwrite - expect(tree.length).toBe(7); - }); - }); - - describe('getCommandTree', () => { - it('should return the current command tree', async () => { - const initialTree = commandService.getCommands(); - expect(initialTree).toEqual([]); - - await commandService.loadCommands(); - - const loadedTree = commandService.getCommands(); - expect(loadedTree.length).toBe(7); - expect(loadedTree).toEqual([ - aboutCommand, - authCommand, - clearCommand, - helpCommand, - memoryCommand, - privacyCommand, - themeCommand, - ]); - }); - }); + beforeEach(() => { + vi.spyOn(console, 'debug').mockImplementation(() => {}); }); - describe('when initialized with an injected loader function', () => { - it('should use the provided loader instead of the built-in one', async () => { - // Arrange: Create a set of mock commands. - const mockCommands: SlashCommand[] = [ - { name: 'injected-test-1', description: 'injected 1' }, - { name: 'injected-test-2', description: 'injected 2' }, - ]; + afterEach(() => { + vi.restoreAllMocks(); + }); - // Arrange: Create a mock loader FUNCTION that resolves with our mock commands. - const mockLoader = vi.fn().mockResolvedValue(mockCommands); + it('should load commands from a single loader', async () => { + const mockLoader = new MockCommandLoader([mockCommandA, mockCommandB]); + const service = await CommandService.create( + [mockLoader], + new AbortController().signal, + ); - // Act: Instantiate the service WITH the injected loader function. - const commandService = new CommandService(mockLoader); - await commandService.loadCommands(); - const tree = commandService.getCommands(); + const commands = service.getCommands(); - // Assert: The tree should contain ONLY our injected commands. - expect(mockLoader).toHaveBeenCalled(); // Verify our mock loader was actually called. - expect(tree.length).toBe(2); - expect(tree).toEqual(mockCommands); + expect(mockLoader.loadCommands).toHaveBeenCalledTimes(1); + expect(commands).toHaveLength(2); + expect(commands).toEqual( + expect.arrayContaining([mockCommandA, mockCommandB]), + ); + }); - const commandNames = tree.map((cmd) => cmd.name); - expect(commandNames).not.toContain('memory'); // Verify it didn't load production commands. - }); + it('should aggregate commands from multiple loaders', async () => { + const loader1 = new MockCommandLoader([mockCommandA]); + const loader2 = new MockCommandLoader([mockCommandC]); + const service = await CommandService.create( + [loader1, loader2], + new AbortController().signal, + ); + + const commands = service.getCommands(); + + expect(loader1.loadCommands).toHaveBeenCalledTimes(1); + expect(loader2.loadCommands).toHaveBeenCalledTimes(1); + expect(commands).toHaveLength(2); + expect(commands).toEqual( + expect.arrayContaining([mockCommandA, mockCommandC]), + ); + }); + + it('should override commands from earlier loaders with those from later loaders', async () => { + const loader1 = new MockCommandLoader([mockCommandA, mockCommandB]); + const loader2 = new MockCommandLoader([ + mockCommandB_Override, + mockCommandC, + ]); + const service = await CommandService.create( + [loader1, loader2], + new AbortController().signal, + ); + + const commands = service.getCommands(); + + expect(commands).toHaveLength(3); // Should be A, C, and the overridden B. + + // The final list should contain the override from the *last* loader. + const commandB = commands.find((cmd) => cmd.name === 'command-b'); + expect(commandB).toBeDefined(); + expect(commandB?.kind).toBe(CommandKind.FILE); // Verify it's the overridden version. + expect(commandB).toEqual(mockCommandB_Override); + + // Ensure the other commands are still present. + expect(commands).toEqual( + expect.arrayContaining([ + mockCommandA, + mockCommandC, + mockCommandB_Override, + ]), + ); + }); + + it('should handle loaders that return an empty array of commands gracefully', async () => { + const loader1 = new MockCommandLoader([mockCommandA]); + const emptyLoader = new MockCommandLoader([]); + const loader3 = new MockCommandLoader([mockCommandB]); + const service = await CommandService.create( + [loader1, emptyLoader, loader3], + new AbortController().signal, + ); + + const commands = service.getCommands(); + + expect(emptyLoader.loadCommands).toHaveBeenCalledTimes(1); + expect(commands).toHaveLength(2); + expect(commands).toEqual( + expect.arrayContaining([mockCommandA, mockCommandB]), + ); + }); + + it('should load commands from successful loaders even if one fails', async () => { + const successfulLoader = new MockCommandLoader([mockCommandA]); + const failingLoader = new MockCommandLoader([]); + const error = new Error('Loader failed'); + vi.spyOn(failingLoader, 'loadCommands').mockRejectedValue(error); + + const service = await CommandService.create( + [successfulLoader, failingLoader], + new AbortController().signal, + ); + + const commands = service.getCommands(); + expect(commands).toHaveLength(1); + expect(commands).toEqual([mockCommandA]); + expect(console.debug).toHaveBeenCalledWith( + 'A command loader failed:', + error, + ); + }); + + it('getCommands should return a readonly array that cannot be mutated', async () => { + const service = await CommandService.create( + [new MockCommandLoader([mockCommandA])], + new AbortController().signal, + ); + + const commands = service.getCommands(); + + // Expect it to throw a TypeError at runtime because the array is frozen. + expect(() => { + // @ts-expect-error - Testing immutability is intentional here. + commands.push(mockCommandB); + }).toThrow(); + + // Verify the original array was not mutated. + expect(service.getCommands()).toHaveLength(1); + }); + + it('should pass the abort signal to all loaders', async () => { + const controller = new AbortController(); + const signal = controller.signal; + + const loader1 = new MockCommandLoader([mockCommandA]); + const loader2 = new MockCommandLoader([mockCommandB]); + + await CommandService.create([loader1, loader2], signal); + + expect(loader1.loadCommands).toHaveBeenCalledTimes(1); + expect(loader1.loadCommands).toHaveBeenCalledWith(signal); + expect(loader2.loadCommands).toHaveBeenCalledTimes(1); + expect(loader2.loadCommands).toHaveBeenCalledWith(signal); }); }); diff --git a/packages/cli/src/services/CommandService.ts b/packages/cli/src/services/CommandService.ts index ef31952df..ef4f4d14a 100644 --- a/packages/cli/src/services/CommandService.ts +++ b/packages/cli/src/services/CommandService.ts @@ -5,40 +5,79 @@ */ import { SlashCommand } from '../ui/commands/types.js'; -import { memoryCommand } from '../ui/commands/memoryCommand.js'; -import { helpCommand } from '../ui/commands/helpCommand.js'; -import { clearCommand } from '../ui/commands/clearCommand.js'; -import { authCommand } from '../ui/commands/authCommand.js'; -import { themeCommand } from '../ui/commands/themeCommand.js'; -import { privacyCommand } from '../ui/commands/privacyCommand.js'; -import { aboutCommand } from '../ui/commands/aboutCommand.js'; - -const loadBuiltInCommands = async (): Promise => [ - aboutCommand, - authCommand, - clearCommand, - helpCommand, - memoryCommand, - privacyCommand, - themeCommand, -]; +import { ICommandLoader } from './types.js'; +/** + * Orchestrates the discovery and loading of all slash commands for the CLI. + * + * This service operates on a provider-based loader pattern. It is initialized + * with an array of `ICommandLoader` instances, each responsible for fetching + * commands from a specific source (e.g., built-in code, local files). + * + * The CommandService is responsible for invoking these loaders, aggregating their + * results, and resolving any name conflicts. This architecture allows the command + * system to be extended with new sources without modifying the service itself. + */ export class CommandService { - private commands: SlashCommand[] = []; + /** + * Private constructor to enforce the use of the async factory. + * @param commands A readonly array of the fully loaded and de-duplicated commands. + */ + private constructor(private readonly commands: readonly SlashCommand[]) {} - constructor( - private commandLoader: () => Promise = loadBuiltInCommands, - ) { - // The constructor can be used for dependency injection in the future. + /** + * Asynchronously creates and initializes a new CommandService instance. + * + * This factory method orchestrates the entire command loading process. It + * runs all provided loaders in parallel, aggregates their results, handles + * name conflicts by letting the last-loaded command win, and then returns a + * fully constructed `CommandService` instance. + * + * @param loaders An array of objects that conform to the `ICommandLoader` + * interface. The order of loaders is significant: if multiple loaders + * provide a command with the same name, the command from the loader that + * appears later in the array will take precedence. + * @param signal An AbortSignal to cancel the loading process. + * @returns A promise that resolves to a new, fully initialized `CommandService` instance. + */ + static async create( + loaders: ICommandLoader[], + signal: AbortSignal, + ): Promise { + const results = await Promise.allSettled( + loaders.map((loader) => loader.loadCommands(signal)), + ); + + const allCommands: SlashCommand[] = []; + for (const result of results) { + if (result.status === 'fulfilled') { + allCommands.push(...result.value); + } else { + console.debug('A command loader failed:', result.reason); + } + } + + // De-duplicate commands using a Map. The last one found with a given name wins. + // This creates a natural override system based on the order of the loaders + // passed to the constructor. + const commandMap = new Map(); + for (const cmd of allCommands) { + commandMap.set(cmd.name, cmd); + } + + const finalCommands = Object.freeze(Array.from(commandMap.values())); + return new CommandService(finalCommands); } - async loadCommands(): Promise { - // For now, we only load the built-in commands. - // File-based and remote commands will be added later. - this.commands = await this.commandLoader(); - } - - getCommands(): SlashCommand[] { + /** + * Retrieves the currently loaded and de-duplicated list of slash commands. + * + * This method is a safe accessor for the service's state. It returns a + * readonly array, preventing consumers from modifying the service's internal state. + * + * @returns A readonly, unified array of available `SlashCommand` objects. + */ + getCommands(): readonly SlashCommand[] { return this.commands; } } diff --git a/packages/cli/src/services/FileCommandLoader.test.ts b/packages/cli/src/services/FileCommandLoader.test.ts new file mode 100644 index 000000000..fb565f413 --- /dev/null +++ b/packages/cli/src/services/FileCommandLoader.test.ts @@ -0,0 +1,606 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { FileCommandLoader } from './FileCommandLoader.js'; +import { + Config, + getProjectCommandsDir, + getUserCommandsDir, +} from '@qwen-code/qwen-code-core'; +import mock from 'mock-fs'; +import { assert, vi } from 'vitest'; +import { createMockCommandContext } from '../test-utils/mockCommandContext.js'; +import { + SHELL_INJECTION_TRIGGER, + SHORTHAND_ARGS_PLACEHOLDER, +} from './prompt-processors/types.js'; +import { + ConfirmationRequiredError, + ShellProcessor, +} from './prompt-processors/shellProcessor.js'; +import { ShorthandArgumentProcessor } from './prompt-processors/argumentProcessor.js'; + +const mockShellProcess = vi.hoisted(() => vi.fn()); +vi.mock('./prompt-processors/shellProcessor.js', () => ({ + ShellProcessor: vi.fn().mockImplementation(() => ({ + process: mockShellProcess, + })), + ConfirmationRequiredError: class extends Error { + constructor( + message: string, + public commandsToConfirm: string[], + ) { + super(message); + this.name = 'ConfirmationRequiredError'; + } + }, +})); + +vi.mock('./prompt-processors/argumentProcessor.js', async (importOriginal) => { + const original = + await importOriginal< + typeof import('./prompt-processors/argumentProcessor.js') + >(); + return { + ShorthandArgumentProcessor: vi + .fn() + .mockImplementation(() => new original.ShorthandArgumentProcessor()), + DefaultArgumentProcessor: vi + .fn() + .mockImplementation(() => new original.DefaultArgumentProcessor()), + }; +}); +vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => { + const original = + await importOriginal(); + return { + ...original, + isCommandAllowed: vi.fn(), + ShellExecutionService: { + execute: vi.fn(), + }, + }; +}); + +describe('FileCommandLoader', () => { + const signal: AbortSignal = new AbortController().signal; + + beforeEach(() => { + vi.clearAllMocks(); + mockShellProcess.mockImplementation((prompt) => Promise.resolve(prompt)); + }); + + afterEach(() => { + mock.restore(); + }); + + it('loads a single command from a file', async () => { + const userCommandsDir = getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'test.toml': 'prompt = "This is a test prompt"', + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + + expect(commands).toHaveLength(1); + const command = commands[0]; + expect(command).toBeDefined(); + expect(command.name).toBe('test'); + + const result = await command.action?.( + createMockCommandContext({ + invocation: { + raw: '/test', + name: 'test', + args: '', + }, + }), + '', + ); + if (result?.type === 'submit_prompt') { + expect(result.content).toBe('This is a test prompt'); + } else { + assert.fail('Incorrect action type'); + } + }); + + // Symlink creation on Windows requires special permissions that are not + // available in the standard CI environment. Therefore, we skip these tests + // on Windows to prevent CI failures. The core functionality is still + // validated on Linux and macOS. + const itif = (condition: boolean) => (condition ? it : it.skip); + + itif(process.platform !== 'win32')( + 'loads commands from a symlinked directory', + async () => { + const userCommandsDir = getUserCommandsDir(); + const realCommandsDir = '/real/commands'; + mock({ + [realCommandsDir]: { + 'test.toml': 'prompt = "This is a test prompt"', + }, + // Symlink the user commands directory to the real one + [userCommandsDir]: mock.symlink({ + path: realCommandsDir, + }), + }); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + + expect(commands).toHaveLength(1); + const command = commands[0]; + expect(command).toBeDefined(); + expect(command.name).toBe('test'); + }, + ); + + itif(process.platform !== 'win32')( + 'loads commands from a symlinked subdirectory', + async () => { + const userCommandsDir = getUserCommandsDir(); + const realNamespacedDir = '/real/namespaced-commands'; + mock({ + [userCommandsDir]: { + namespaced: mock.symlink({ + path: realNamespacedDir, + }), + }, + [realNamespacedDir]: { + 'my-test.toml': 'prompt = "This is a test prompt"', + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + + expect(commands).toHaveLength(1); + const command = commands[0]; + expect(command).toBeDefined(); + expect(command.name).toBe('namespaced:my-test'); + }, + ); + + it('loads multiple commands', async () => { + const userCommandsDir = getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'test1.toml': 'prompt = "Prompt 1"', + 'test2.toml': 'prompt = "Prompt 2"', + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + + expect(commands).toHaveLength(2); + }); + + it('creates deeply nested namespaces correctly', async () => { + const userCommandsDir = getUserCommandsDir(); + + mock({ + [userCommandsDir]: { + gcp: { + pipelines: { + 'run.toml': 'prompt = "run pipeline"', + }, + }, + }, + }); + const loader = new FileCommandLoader({ + getProjectRoot: () => '/path/to/project', + } as Config); + const commands = await loader.loadCommands(signal); + expect(commands).toHaveLength(1); + expect(commands[0]!.name).toBe('gcp:pipelines:run'); + }); + + it('creates namespaces from nested directories', async () => { + const userCommandsDir = getUserCommandsDir(); + mock({ + [userCommandsDir]: { + git: { + 'commit.toml': 'prompt = "git commit prompt"', + }, + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + + expect(commands).toHaveLength(1); + const command = commands[0]; + expect(command).toBeDefined(); + expect(command.name).toBe('git:commit'); + }); + + it('overrides user commands with project commands', async () => { + const userCommandsDir = getUserCommandsDir(); + const projectCommandsDir = getProjectCommandsDir(process.cwd()); + mock({ + [userCommandsDir]: { + 'test.toml': 'prompt = "User prompt"', + }, + [projectCommandsDir]: { + 'test.toml': 'prompt = "Project prompt"', + }, + }); + + const loader = new FileCommandLoader({ + getProjectRoot: () => process.cwd(), + } as Config); + const commands = await loader.loadCommands(signal); + + expect(commands).toHaveLength(1); + const command = commands[0]; + expect(command).toBeDefined(); + + const result = await command.action?.( + createMockCommandContext({ + invocation: { + raw: '/test', + name: 'test', + args: '', + }, + }), + '', + ); + if (result?.type === 'submit_prompt') { + expect(result.content).toBe('Project prompt'); + } else { + assert.fail('Incorrect action type'); + } + }); + + it('ignores files with TOML syntax errors', async () => { + const userCommandsDir = getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'invalid.toml': 'this is not valid toml', + 'good.toml': 'prompt = "This one is fine"', + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + + expect(commands).toHaveLength(1); + expect(commands[0].name).toBe('good'); + }); + + it('ignores files that are semantically invalid (missing prompt)', async () => { + const userCommandsDir = getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'no_prompt.toml': 'description = "This file is missing a prompt"', + 'good.toml': 'prompt = "This one is fine"', + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + + expect(commands).toHaveLength(1); + expect(commands[0].name).toBe('good'); + }); + + it('handles filename edge cases correctly', async () => { + const userCommandsDir = getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'test.v1.toml': 'prompt = "Test prompt"', + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + const command = commands[0]; + expect(command).toBeDefined(); + expect(command.name).toBe('test.v1'); + }); + + it('handles file system errors gracefully', async () => { + mock({}); // Mock an empty file system + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + expect(commands).toHaveLength(0); + }); + + it('uses a default description if not provided', async () => { + const userCommandsDir = getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'test.toml': 'prompt = "Test prompt"', + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + const command = commands[0]; + expect(command).toBeDefined(); + expect(command.description).toBe('Custom command from test.toml'); + }); + + it('uses the provided description', async () => { + const userCommandsDir = getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'test.toml': 'prompt = "Test prompt"\ndescription = "My test command"', + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + const command = commands[0]; + expect(command).toBeDefined(); + expect(command.description).toBe('My test command'); + }); + + it('should sanitize colons in filenames to prevent namespace conflicts', async () => { + const userCommandsDir = getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'legacy:command.toml': 'prompt = "This is a legacy command"', + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + + expect(commands).toHaveLength(1); + const command = commands[0]; + expect(command).toBeDefined(); + + // Verify that the ':' in the filename was replaced with an '_' + expect(command.name).toBe('legacy_command'); + }); + + describe('Shorthand Argument Processor Integration', () => { + it('correctly processes a command with {{args}}', async () => { + const userCommandsDir = getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'shorthand.toml': + 'prompt = "The user wants to: {{args}}"\ndescription = "Shorthand test"', + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + const command = commands.find((c) => c.name === 'shorthand'); + expect(command).toBeDefined(); + + const result = await command!.action?.( + createMockCommandContext({ + invocation: { + raw: '/shorthand do something cool', + name: 'shorthand', + args: 'do something cool', + }, + }), + 'do something cool', + ); + expect(result?.type).toBe('submit_prompt'); + if (result?.type === 'submit_prompt') { + expect(result.content).toBe('The user wants to: do something cool'); + } + }); + }); + + describe('Default Argument Processor Integration', () => { + it('correctly processes a command without {{args}}', async () => { + const userCommandsDir = getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'model_led.toml': + 'prompt = "This is the instruction."\ndescription = "Default processor test"', + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + const command = commands.find((c) => c.name === 'model_led'); + expect(command).toBeDefined(); + + const result = await command!.action?.( + createMockCommandContext({ + invocation: { + raw: '/model_led 1.2.0 added "a feature"', + name: 'model_led', + args: '1.2.0 added "a feature"', + }, + }), + '1.2.0 added "a feature"', + ); + expect(result?.type).toBe('submit_prompt'); + if (result?.type === 'submit_prompt') { + const expectedContent = + 'This is the instruction.\n\n/model_led 1.2.0 added "a feature"'; + expect(result.content).toBe(expectedContent); + } + }); + }); + + describe('Shell Processor Integration', () => { + it('instantiates ShellProcessor if the trigger is present', async () => { + const userCommandsDir = getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'shell.toml': `prompt = "Run this: ${SHELL_INJECTION_TRIGGER}echo hello}"`, + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + await loader.loadCommands(signal); + + expect(ShellProcessor).toHaveBeenCalledWith('shell'); + }); + + it('does not instantiate ShellProcessor if trigger is missing', async () => { + const userCommandsDir = getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'regular.toml': `prompt = "Just a regular prompt"`, + }, + }); + + const loader = new FileCommandLoader(null as unknown as Config); + await loader.loadCommands(signal); + + expect(ShellProcessor).not.toHaveBeenCalled(); + }); + + it('returns a "submit_prompt" action if shell processing succeeds', async () => { + const userCommandsDir = getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'shell.toml': `prompt = "Run !{echo 'hello'}"`, + }, + }); + mockShellProcess.mockResolvedValue('Run hello'); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + const command = commands.find((c) => c.name === 'shell'); + expect(command).toBeDefined(); + + const result = await command!.action!( + createMockCommandContext({ + invocation: { raw: '/shell', name: 'shell', args: '' }, + }), + '', + ); + + expect(result?.type).toBe('submit_prompt'); + if (result?.type === 'submit_prompt') { + expect(result.content).toBe('Run hello'); + } + }); + + it('returns a "confirm_shell_commands" action if shell processing requires it', async () => { + const userCommandsDir = getUserCommandsDir(); + const rawInvocation = '/shell rm -rf /'; + mock({ + [userCommandsDir]: { + 'shell.toml': `prompt = "Run !{rm -rf /}"`, + }, + }); + + // Mock the processor to throw the specific error + const error = new ConfirmationRequiredError('Confirmation needed', [ + 'rm -rf /', + ]); + mockShellProcess.mockRejectedValue(error); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + const command = commands.find((c) => c.name === 'shell'); + expect(command).toBeDefined(); + + const result = await command!.action!( + createMockCommandContext({ + invocation: { raw: rawInvocation, name: 'shell', args: 'rm -rf /' }, + }), + 'rm -rf /', + ); + + expect(result?.type).toBe('confirm_shell_commands'); + if (result?.type === 'confirm_shell_commands') { + expect(result.commandsToConfirm).toEqual(['rm -rf /']); + expect(result.originalInvocation.raw).toBe(rawInvocation); + } + }); + + it('re-throws other errors from the processor', async () => { + const userCommandsDir = getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'shell.toml': `prompt = "Run !{something}"`, + }, + }); + + const genericError = new Error('Something else went wrong'); + mockShellProcess.mockRejectedValue(genericError); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + const command = commands.find((c) => c.name === 'shell'); + expect(command).toBeDefined(); + + await expect( + command!.action!( + createMockCommandContext({ + invocation: { raw: '/shell', name: 'shell', args: '' }, + }), + '', + ), + ).rejects.toThrow('Something else went wrong'); + }); + + it('assembles the processor pipeline in the correct order (Shell -> Argument)', async () => { + const userCommandsDir = getUserCommandsDir(); + mock({ + [userCommandsDir]: { + 'pipeline.toml': ` + prompt = "Shell says: ${SHELL_INJECTION_TRIGGER}echo foo} and user says: ${SHORTHAND_ARGS_PLACEHOLDER}" + `, + }, + }); + + // Mock the process methods to track call order + const argProcessMock = vi + .fn() + .mockImplementation((p) => `${p}-arg-processed`); + + // Redefine the mock for this specific test + mockShellProcess.mockImplementation((p) => + Promise.resolve(`${p}-shell-processed`), + ); + + vi.mocked(ShorthandArgumentProcessor).mockImplementation( + () => + ({ + process: argProcessMock, + }) as unknown as ShorthandArgumentProcessor, + ); + + const loader = new FileCommandLoader(null as unknown as Config); + const commands = await loader.loadCommands(signal); + const command = commands.find((c) => c.name === 'pipeline'); + expect(command).toBeDefined(); + + await command!.action!( + createMockCommandContext({ + invocation: { + raw: '/pipeline bar', + name: 'pipeline', + args: 'bar', + }, + }), + 'bar', + ); + + // Verify that the shell processor was called before the argument processor + expect(mockShellProcess.mock.invocationCallOrder[0]).toBeLessThan( + argProcessMock.mock.invocationCallOrder[0], + ); + + // Also verify the flow of the prompt through the processors + expect(mockShellProcess).toHaveBeenCalledWith( + expect.any(String), + expect.any(Object), + ); + expect(argProcessMock).toHaveBeenCalledWith( + expect.stringContaining('-shell-processed'), // It receives the output of the shell processor + expect.any(Object), + ); + }); + }); +}); diff --git a/packages/cli/src/services/FileCommandLoader.ts b/packages/cli/src/services/FileCommandLoader.ts new file mode 100644 index 000000000..5494ca552 --- /dev/null +++ b/packages/cli/src/services/FileCommandLoader.ts @@ -0,0 +1,240 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { promises as fs } from 'fs'; +import path from 'path'; +import toml from '@iarna/toml'; +import { glob } from 'glob'; +import { z } from 'zod'; +import { + Config, + getProjectCommandsDir, + getUserCommandsDir, +} from '@qwen-code/qwen-code-core'; +import { ICommandLoader } from './types.js'; +import { + CommandContext, + CommandKind, + SlashCommand, + SlashCommandActionReturn, +} from '../ui/commands/types.js'; +import { + DefaultArgumentProcessor, + ShorthandArgumentProcessor, +} from './prompt-processors/argumentProcessor.js'; +import { + IPromptProcessor, + SHORTHAND_ARGS_PLACEHOLDER, + SHELL_INJECTION_TRIGGER, +} from './prompt-processors/types.js'; +import { + ConfirmationRequiredError, + ShellProcessor, +} from './prompt-processors/shellProcessor.js'; + +/** + * Defines the Zod schema for a command definition file. This serves as the + * single source of truth for both validation and type inference. + */ +const TomlCommandDefSchema = z.object({ + prompt: z.string({ + required_error: "The 'prompt' field is required.", + invalid_type_error: "The 'prompt' field must be a string.", + }), + description: z.string().optional(), +}); + +/** + * Discovers and loads custom slash commands from .toml files in both the + * user's global config directory and the current project's directory. + * + * This loader is responsible for: + * - Recursively scanning command directories. + * - Parsing and validating TOML files. + * - Adapting valid definitions into executable SlashCommand objects. + * - Handling file system errors and malformed files gracefully. + */ +export class FileCommandLoader implements ICommandLoader { + private readonly projectRoot: string; + + constructor(private readonly config: Config | null) { + this.projectRoot = config?.getProjectRoot() || process.cwd(); + } + + /** + * Loads all commands, applying the precedence rule where project-level + * commands override user-level commands with the same name. + * @param signal An AbortSignal to cancel the loading process. + * @returns A promise that resolves to an array of loaded SlashCommands. + */ + async loadCommands(signal: AbortSignal): Promise { + const commandMap = new Map(); + const globOptions = { + nodir: true, + dot: true, + signal, + follow: true, + }; + + try { + // User Commands + const userDir = getUserCommandsDir(); + const userFiles = await glob('**/*.toml', { + ...globOptions, + cwd: userDir, + }); + const userCommandPromises = userFiles.map((file) => + this.parseAndAdaptFile(path.join(userDir, file), userDir), + ); + const userCommands = (await Promise.all(userCommandPromises)).filter( + (cmd): cmd is SlashCommand => cmd !== null, + ); + for (const cmd of userCommands) { + commandMap.set(cmd.name, cmd); + } + + // Project Commands (these intentionally override user commands) + const projectDir = getProjectCommandsDir(this.projectRoot); + const projectFiles = await glob('**/*.toml', { + ...globOptions, + cwd: projectDir, + }); + const projectCommandPromises = projectFiles.map((file) => + this.parseAndAdaptFile(path.join(projectDir, file), projectDir), + ); + const projectCommands = ( + await Promise.all(projectCommandPromises) + ).filter((cmd): cmd is SlashCommand => cmd !== null); + for (const cmd of projectCommands) { + commandMap.set(cmd.name, cmd); + } + } catch (error) { + console.error(`[FileCommandLoader] Error during file search:`, error); + } + + return Array.from(commandMap.values()); + } + + /** + * Parses a single .toml file and transforms it into a SlashCommand object. + * @param filePath The absolute path to the .toml file. + * @param baseDir The root command directory for name calculation. + * @returns A promise resolving to a SlashCommand, or null if the file is invalid. + */ + private async parseAndAdaptFile( + filePath: string, + baseDir: string, + ): Promise { + let fileContent: string; + try { + fileContent = await fs.readFile(filePath, 'utf-8'); + } catch (error: unknown) { + console.error( + `[FileCommandLoader] Failed to read file ${filePath}:`, + error instanceof Error ? error.message : String(error), + ); + return null; + } + + let parsed: unknown; + try { + parsed = toml.parse(fileContent); + } catch (error: unknown) { + console.error( + `[FileCommandLoader] Failed to parse TOML file ${filePath}:`, + error instanceof Error ? error.message : String(error), + ); + return null; + } + + const validationResult = TomlCommandDefSchema.safeParse(parsed); + + if (!validationResult.success) { + console.error( + `[FileCommandLoader] Skipping invalid command file: ${filePath}. Validation errors:`, + validationResult.error.flatten(), + ); + return null; + } + + const validDef = validationResult.data; + + const relativePathWithExt = path.relative(baseDir, filePath); + const relativePath = relativePathWithExt.substring( + 0, + relativePathWithExt.length - 5, // length of '.toml' + ); + const commandName = relativePath + .split(path.sep) + // Sanitize each path segment to prevent ambiguity. Since ':' is our + // namespace separator, we replace any literal colons in filenames + // with underscores to avoid naming conflicts. + .map((segment) => segment.replaceAll(':', '_')) + .join(':'); + + const processors: IPromptProcessor[] = []; + + // Add the Shell Processor if needed. + if (validDef.prompt.includes(SHELL_INJECTION_TRIGGER)) { + processors.push(new ShellProcessor(commandName)); + } + + // The presence of '{{args}}' is the switch that determines the behavior. + if (validDef.prompt.includes(SHORTHAND_ARGS_PLACEHOLDER)) { + processors.push(new ShorthandArgumentProcessor()); + } else { + processors.push(new DefaultArgumentProcessor()); + } + + return { + name: commandName, + description: + validDef.description || + `Custom command from ${path.basename(filePath)}`, + kind: CommandKind.FILE, + action: async ( + context: CommandContext, + _args: string, + ): Promise => { + if (!context.invocation) { + console.error( + `[FileCommandLoader] Critical error: Command '${commandName}' was executed without invocation context.`, + ); + return { + type: 'submit_prompt', + content: validDef.prompt, // Fallback to unprocessed prompt + }; + } + + try { + let processedPrompt = validDef.prompt; + for (const processor of processors) { + processedPrompt = await processor.process(processedPrompt, context); + } + + return { + type: 'submit_prompt', + content: processedPrompt, + }; + } catch (e) { + // Check if it's our specific error type + if (e instanceof ConfirmationRequiredError) { + // Halt and request confirmation from the UI layer. + return { + type: 'confirm_shell_commands', + commandsToConfirm: e.commandsToConfirm, + originalInvocation: { + raw: context.invocation.raw, + }, + }; + } + // Re-throw other errors to be handled by the global error handler. + throw e; + } + }, + }; + } +} diff --git a/packages/cli/src/services/McpPromptLoader.ts b/packages/cli/src/services/McpPromptLoader.ts new file mode 100644 index 000000000..fc92fef90 --- /dev/null +++ b/packages/cli/src/services/McpPromptLoader.ts @@ -0,0 +1,231 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { + Config, + getErrorMessage, + getMCPServerPrompts, +} from '@qwen-code/qwen-code-core'; +import { + CommandContext, + CommandKind, + SlashCommand, + SlashCommandActionReturn, +} from '../ui/commands/types.js'; +import { ICommandLoader } from './types.js'; +import { PromptArgument } from '@modelcontextprotocol/sdk/types.js'; + +/** + * Discovers and loads executable slash commands from prompts exposed by + * Model-Context-Protocol (MCP) servers. + */ +export class McpPromptLoader implements ICommandLoader { + constructor(private readonly config: Config | null) {} + + /** + * Loads all available prompts from all configured MCP servers and adapts + * them into executable SlashCommand objects. + * + * @param _signal An AbortSignal (unused for this synchronous loader). + * @returns A promise that resolves to an array of loaded SlashCommands. + */ + loadCommands(_signal: AbortSignal): Promise { + const promptCommands: SlashCommand[] = []; + if (!this.config) { + return Promise.resolve([]); + } + const mcpServers = this.config.getMcpServers() || {}; + for (const serverName in mcpServers) { + const prompts = getMCPServerPrompts(this.config, serverName) || []; + for (const prompt of prompts) { + const commandName = `${prompt.name}`; + const newPromptCommand: SlashCommand = { + name: commandName, + description: prompt.description || `Invoke prompt ${prompt.name}`, + kind: CommandKind.MCP_PROMPT, + subCommands: [ + { + name: 'help', + description: 'Show help for this prompt', + kind: CommandKind.MCP_PROMPT, + action: async (): Promise => { + if (!prompt.arguments || prompt.arguments.length === 0) { + return { + type: 'message', + messageType: 'info', + content: `Prompt "${prompt.name}" has no arguments.`, + }; + } + + let helpMessage = `Arguments for "${prompt.name}":\n\n`; + if (prompt.arguments && prompt.arguments.length > 0) { + helpMessage += `You can provide arguments by name (e.g., --argName="value") or by position.\n\n`; + helpMessage += `e.g., ${prompt.name} ${prompt.arguments?.map((_) => `"foo"`)} is equivalent to ${prompt.name} ${prompt.arguments?.map((arg) => `--${arg.name}="foo"`)}\n\n`; + } + for (const arg of prompt.arguments) { + helpMessage += ` --${arg.name}\n`; + if (arg.description) { + helpMessage += ` ${arg.description}\n`; + } + helpMessage += ` (required: ${ + arg.required ? 'yes' : 'no' + })\n\n`; + } + return { + type: 'message', + messageType: 'info', + content: helpMessage, + }; + }, + }, + ], + action: async ( + context: CommandContext, + args: string, + ): Promise => { + if (!this.config) { + return { + type: 'message', + messageType: 'error', + content: 'Config not loaded.', + }; + } + + const promptInputs = this.parseArgs(args, prompt.arguments); + if (promptInputs instanceof Error) { + return { + type: 'message', + messageType: 'error', + content: promptInputs.message, + }; + } + + try { + const mcpServers = this.config.getMcpServers() || {}; + const mcpServerConfig = mcpServers[serverName]; + if (!mcpServerConfig) { + return { + type: 'message', + messageType: 'error', + content: `MCP server config not found for '${serverName}'.`, + }; + } + const result = await prompt.invoke(promptInputs); + + if (result.error) { + return { + type: 'message', + messageType: 'error', + content: `Error invoking prompt: ${result.error}`, + }; + } + + if (!result.messages?.[0]?.content?.text) { + return { + type: 'message', + messageType: 'error', + content: + 'Received an empty or invalid prompt response from the server.', + }; + } + + return { + type: 'submit_prompt', + content: JSON.stringify(result.messages[0].content.text), + }; + } catch (error) { + return { + type: 'message', + messageType: 'error', + content: `Error: ${getErrorMessage(error)}`, + }; + } + }, + completion: async (_: CommandContext, partialArg: string) => { + if (!prompt || !prompt.arguments) { + return []; + } + + const suggestions: string[] = []; + const usedArgNames = new Set( + (partialArg.match(/--([^=]+)/g) || []).map((s) => s.substring(2)), + ); + + for (const arg of prompt.arguments) { + if (!usedArgNames.has(arg.name)) { + suggestions.push(`--${arg.name}=""`); + } + } + + return suggestions; + }, + }; + promptCommands.push(newPromptCommand); + } + } + return Promise.resolve(promptCommands); + } + + private parseArgs( + userArgs: string, + promptArgs: PromptArgument[] | undefined, + ): Record | Error { + const argValues: { [key: string]: string } = {}; + const promptInputs: Record = {}; + + // arg parsing: --key="value" or --key=value + const namedArgRegex = /--([^=]+)=(?:"((?:\\.|[^"\\])*)"|([^ ]*))/g; + let match; + const remainingArgs: string[] = []; + let lastIndex = 0; + + while ((match = namedArgRegex.exec(userArgs)) !== null) { + const key = match[1]; + const value = match[2] ?? match[3]; // Quoted or unquoted value + argValues[key] = value; + // Capture text between matches as potential positional args + if (match.index > lastIndex) { + remainingArgs.push(userArgs.substring(lastIndex, match.index).trim()); + } + lastIndex = namedArgRegex.lastIndex; + } + + // Capture any remaining text after the last named arg + if (lastIndex < userArgs.length) { + remainingArgs.push(userArgs.substring(lastIndex).trim()); + } + + const positionalArgs = remainingArgs.join(' ').split(/ +/); + + if (!promptArgs) { + return promptInputs; + } + for (const arg of promptArgs) { + if (argValues[arg.name]) { + promptInputs[arg.name] = argValues[arg.name]; + } + } + + const unfilledArgs = promptArgs.filter( + (arg) => arg.required && !promptInputs[arg.name], + ); + + const missingArgs: string[] = []; + for (let i = 0; i < unfilledArgs.length; i++) { + if (positionalArgs.length > i && positionalArgs[i]) { + promptInputs[unfilledArgs[i].name] = positionalArgs[i]; + } else { + missingArgs.push(unfilledArgs[i].name); + } + } + + if (missingArgs.length > 0) { + const missingArgNames = missingArgs.map((name) => `--${name}`).join(', '); + return new Error(`Missing required argument(s): ${missingArgNames}`); + } + return promptInputs; + } +} diff --git a/packages/cli/src/services/prompt-processors/argumentProcessor.test.ts b/packages/cli/src/services/prompt-processors/argumentProcessor.test.ts new file mode 100644 index 000000000..6af578a91 --- /dev/null +++ b/packages/cli/src/services/prompt-processors/argumentProcessor.test.ts @@ -0,0 +1,99 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { + DefaultArgumentProcessor, + ShorthandArgumentProcessor, +} from './argumentProcessor.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; + +describe('Argument Processors', () => { + describe('ShorthandArgumentProcessor', () => { + const processor = new ShorthandArgumentProcessor(); + + it('should replace a single {{args}} instance', async () => { + const prompt = 'Refactor the following code: {{args}}'; + const context = createMockCommandContext({ + invocation: { + raw: '/refactor make it faster', + name: 'refactor', + args: 'make it faster', + }, + }); + const result = await processor.process(prompt, context); + expect(result).toBe('Refactor the following code: make it faster'); + }); + + it('should replace multiple {{args}} instances', async () => { + const prompt = 'User said: {{args}}. I repeat: {{args}}!'; + const context = createMockCommandContext({ + invocation: { + raw: '/repeat hello world', + name: 'repeat', + args: 'hello world', + }, + }); + const result = await processor.process(prompt, context); + expect(result).toBe('User said: hello world. I repeat: hello world!'); + }); + + it('should handle an empty args string', async () => { + const prompt = 'The user provided no input: {{args}}.'; + const context = createMockCommandContext({ + invocation: { + raw: '/input', + name: 'input', + args: '', + }, + }); + const result = await processor.process(prompt, context); + expect(result).toBe('The user provided no input: .'); + }); + + it('should not change the prompt if {{args}} is not present', async () => { + const prompt = 'This is a static prompt.'; + const context = createMockCommandContext({ + invocation: { + raw: '/static some arguments', + name: 'static', + args: 'some arguments', + }, + }); + const result = await processor.process(prompt, context); + expect(result).toBe('This is a static prompt.'); + }); + }); + + describe('DefaultArgumentProcessor', () => { + const processor = new DefaultArgumentProcessor(); + + it('should append the full command if args are provided', async () => { + const prompt = 'Parse the command.'; + const context = createMockCommandContext({ + invocation: { + raw: '/mycommand arg1 "arg two"', + name: 'mycommand', + args: 'arg1 "arg two"', + }, + }); + const result = await processor.process(prompt, context); + expect(result).toBe('Parse the command.\n\n/mycommand arg1 "arg two"'); + }); + + it('should NOT append the full command if no args are provided', async () => { + const prompt = 'Parse the command.'; + const context = createMockCommandContext({ + invocation: { + raw: '/mycommand', + name: 'mycommand', + args: '', + }, + }); + const result = await processor.process(prompt, context); + expect(result).toBe('Parse the command.'); + }); + }); +}); diff --git a/packages/cli/src/services/prompt-processors/argumentProcessor.ts b/packages/cli/src/services/prompt-processors/argumentProcessor.ts new file mode 100644 index 000000000..a7efeea9d --- /dev/null +++ b/packages/cli/src/services/prompt-processors/argumentProcessor.ts @@ -0,0 +1,34 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { IPromptProcessor, SHORTHAND_ARGS_PLACEHOLDER } from './types.js'; +import { CommandContext } from '../../ui/commands/types.js'; + +/** + * Replaces all instances of `{{args}}` in a prompt with the user-provided + * argument string. + */ +export class ShorthandArgumentProcessor implements IPromptProcessor { + async process(prompt: string, context: CommandContext): Promise { + return prompt.replaceAll( + SHORTHAND_ARGS_PLACEHOLDER, + context.invocation!.args, + ); + } +} + +/** + * Appends the user's full command invocation to the prompt if arguments are + * provided, allowing the model to perform its own argument parsing. + */ +export class DefaultArgumentProcessor implements IPromptProcessor { + async process(prompt: string, context: CommandContext): Promise { + if (context.invocation!.args) { + return `${prompt}\n\n${context.invocation!.raw}`; + } + return prompt; + } +} diff --git a/packages/cli/src/services/prompt-processors/shellProcessor.test.ts b/packages/cli/src/services/prompt-processors/shellProcessor.test.ts new file mode 100644 index 000000000..6e93705d7 --- /dev/null +++ b/packages/cli/src/services/prompt-processors/shellProcessor.test.ts @@ -0,0 +1,300 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { vi, describe, it, expect, beforeEach } from 'vitest'; +import { ConfirmationRequiredError, ShellProcessor } from './shellProcessor.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; +import { CommandContext } from '../../ui/commands/types.js'; +import { Config } from '@qwen-code/qwen-code-core'; + +const mockCheckCommandPermissions = vi.hoisted(() => vi.fn()); +const mockShellExecute = vi.hoisted(() => vi.fn()); + +vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => { + const original = await importOriginal(); + return { + ...original, + checkCommandPermissions: mockCheckCommandPermissions, + ShellExecutionService: { + execute: mockShellExecute, + }, + }; +}); + +describe('ShellProcessor', () => { + let context: CommandContext; + let mockConfig: Partial; + + beforeEach(() => { + vi.clearAllMocks(); + + mockConfig = { + getTargetDir: vi.fn().mockReturnValue('/test/dir'), + }; + + context = createMockCommandContext({ + services: { + config: mockConfig as Config, + }, + session: { + sessionShellAllowlist: new Set(), + }, + }); + + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ + output: 'default shell output', + }), + }); + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: true, + disallowedCommands: [], + }); + }); + + it('should not change the prompt if no shell injections are present', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'This is a simple prompt with no injections.'; + const result = await processor.process(prompt, context); + expect(result).toBe(prompt); + expect(mockShellExecute).not.toHaveBeenCalled(); + }); + + it('should process a single valid shell injection if allowed', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'The current status is: !{git status}'; + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: true, + disallowedCommands: [], + }); + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ output: 'On branch main' }), + }); + + const result = await processor.process(prompt, context); + + expect(mockCheckCommandPermissions).toHaveBeenCalledWith( + 'git status', + expect.any(Object), + context.session.sessionShellAllowlist, + ); + expect(mockShellExecute).toHaveBeenCalledWith( + 'git status', + expect.any(String), + expect.any(Function), + expect.any(Object), + ); + expect(result).toBe('The current status is: On branch main'); + }); + + it('should process multiple valid shell injections if all are allowed', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = '!{git status} in !{pwd}'; + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: true, + disallowedCommands: [], + }); + + mockShellExecute + .mockReturnValueOnce({ + result: Promise.resolve({ output: 'On branch main' }), + }) + .mockReturnValueOnce({ + result: Promise.resolve({ output: '/usr/home' }), + }); + + const result = await processor.process(prompt, context); + + expect(mockCheckCommandPermissions).toHaveBeenCalledTimes(2); + expect(mockShellExecute).toHaveBeenCalledTimes(2); + expect(result).toBe('On branch main in /usr/home'); + }); + + it('should throw ConfirmationRequiredError if a command is not allowed', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'Do something dangerous: !{rm -rf /}'; + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: false, + disallowedCommands: ['rm -rf /'], + }); + + await expect(processor.process(prompt, context)).rejects.toThrow( + ConfirmationRequiredError, + ); + }); + + it('should throw ConfirmationRequiredError with the correct command', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'Do something dangerous: !{rm -rf /}'; + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: false, + disallowedCommands: ['rm -rf /'], + }); + + try { + await processor.process(prompt, context); + // Fail if it doesn't throw + expect(true).toBe(false); + } catch (e) { + expect(e).toBeInstanceOf(ConfirmationRequiredError); + if (e instanceof ConfirmationRequiredError) { + expect(e.commandsToConfirm).toEqual(['rm -rf /']); + } + } + + expect(mockShellExecute).not.toHaveBeenCalled(); + }); + + it('should throw ConfirmationRequiredError with multiple commands if multiple are disallowed', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = '!{cmd1} and !{cmd2}'; + mockCheckCommandPermissions.mockImplementation((cmd) => { + if (cmd === 'cmd1') { + return { allAllowed: false, disallowedCommands: ['cmd1'] }; + } + if (cmd === 'cmd2') { + return { allAllowed: false, disallowedCommands: ['cmd2'] }; + } + return { allAllowed: true, disallowedCommands: [] }; + }); + + try { + await processor.process(prompt, context); + // Fail if it doesn't throw + expect(true).toBe(false); + } catch (e) { + expect(e).toBeInstanceOf(ConfirmationRequiredError); + if (e instanceof ConfirmationRequiredError) { + expect(e.commandsToConfirm).toEqual(['cmd1', 'cmd2']); + } + } + }); + + it('should not execute any commands if at least one requires confirmation', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'First: !{echo "hello"}, Second: !{rm -rf /}'; + + mockCheckCommandPermissions.mockImplementation((cmd) => { + if (cmd.includes('rm')) { + return { allAllowed: false, disallowedCommands: [cmd] }; + } + return { allAllowed: true, disallowedCommands: [] }; + }); + + await expect(processor.process(prompt, context)).rejects.toThrow( + ConfirmationRequiredError, + ); + + // Ensure no commands were executed because the pipeline was halted. + expect(mockShellExecute).not.toHaveBeenCalled(); + }); + + it('should only request confirmation for disallowed commands in a mixed prompt', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'Allowed: !{ls -l}, Disallowed: !{rm -rf /}'; + + mockCheckCommandPermissions.mockImplementation((cmd) => ({ + allAllowed: !cmd.includes('rm'), + disallowedCommands: cmd.includes('rm') ? [cmd] : [], + })); + + try { + await processor.process(prompt, context); + expect.fail('Should have thrown ConfirmationRequiredError'); + } catch (e) { + expect(e).toBeInstanceOf(ConfirmationRequiredError); + if (e instanceof ConfirmationRequiredError) { + expect(e.commandsToConfirm).toEqual(['rm -rf /']); + } + } + }); + + it('should execute all commands if they are on the session allowlist', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'Run !{cmd1} and !{cmd2}'; + + // Add commands to the session allowlist + context.session.sessionShellAllowlist = new Set(['cmd1', 'cmd2']); + + // checkCommandPermissions should now pass for these + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: true, + disallowedCommands: [], + }); + + mockShellExecute + .mockReturnValueOnce({ result: Promise.resolve({ output: 'output1' }) }) + .mockReturnValueOnce({ result: Promise.resolve({ output: 'output2' }) }); + + const result = await processor.process(prompt, context); + + expect(mockCheckCommandPermissions).toHaveBeenCalledWith( + 'cmd1', + expect.any(Object), + context.session.sessionShellAllowlist, + ); + expect(mockCheckCommandPermissions).toHaveBeenCalledWith( + 'cmd2', + expect.any(Object), + context.session.sessionShellAllowlist, + ); + expect(mockShellExecute).toHaveBeenCalledTimes(2); + expect(result).toBe('Run output1 and output2'); + }); + + it('should trim whitespace from the command inside the injection', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'Files: !{ ls -l }'; + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: true, + disallowedCommands: [], + }); + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ output: 'total 0' }), + }); + + await processor.process(prompt, context); + + expect(mockCheckCommandPermissions).toHaveBeenCalledWith( + 'ls -l', // Verifies that the command was trimmed + expect.any(Object), + context.session.sessionShellAllowlist, + ); + expect(mockShellExecute).toHaveBeenCalledWith( + 'ls -l', + expect.any(String), + expect.any(Function), + expect.any(Object), + ); + }); + + it('should handle an empty command inside the injection gracefully', async () => { + const processor = new ShellProcessor('test-command'); + const prompt = 'This is weird: !{}'; + mockCheckCommandPermissions.mockReturnValue({ + allAllowed: true, + disallowedCommands: [], + }); + mockShellExecute.mockReturnValue({ + result: Promise.resolve({ output: 'empty output' }), + }); + + const result = await processor.process(prompt, context); + + expect(mockCheckCommandPermissions).toHaveBeenCalledWith( + '', + expect.any(Object), + context.session.sessionShellAllowlist, + ); + expect(mockShellExecute).toHaveBeenCalledWith( + '', + expect.any(String), + expect.any(Function), + expect.any(Object), + ); + expect(result).toBe('This is weird: empty output'); + }); +}); diff --git a/packages/cli/src/services/prompt-processors/shellProcessor.ts b/packages/cli/src/services/prompt-processors/shellProcessor.ts new file mode 100644 index 000000000..0171903c4 --- /dev/null +++ b/packages/cli/src/services/prompt-processors/shellProcessor.ts @@ -0,0 +1,106 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { + checkCommandPermissions, + ShellExecutionService, +} from '@qwen-code/qwen-code-core'; + +import { CommandContext } from '../../ui/commands/types.js'; +import { IPromptProcessor } from './types.js'; + +export class ConfirmationRequiredError extends Error { + constructor( + message: string, + public commandsToConfirm: string[], + ) { + super(message); + this.name = 'ConfirmationRequiredError'; + } +} + +/** + * Finds all instances of shell command injections (`!{...}`) in a prompt, + * executes them, and replaces the injection site with the command's output. + * + * This processor ensures that only allowlisted commands are executed. If a + * disallowed command is found, it halts execution and reports an error. + */ +export class ShellProcessor implements IPromptProcessor { + /** + * A regular expression to find all instances of `!{...}`. The inner + * capture group extracts the command itself. + */ + private static readonly SHELL_INJECTION_REGEX = /!\{([^}]*)\}/g; + + /** + * @param commandName The name of the custom command being executed, used + * for logging and error messages. + */ + constructor(private readonly commandName: string) {} + + async process(prompt: string, context: CommandContext): Promise { + const { config, sessionShellAllowlist } = { + ...context.services, + ...context.session, + }; + const commandsToExecute: Array<{ fullMatch: string; command: string }> = []; + const commandsToConfirm = new Set(); + + const matches = [...prompt.matchAll(ShellProcessor.SHELL_INJECTION_REGEX)]; + if (matches.length === 0) { + return prompt; // No shell commands, nothing to do. + } + + // Discover all commands and check permissions. + for (const match of matches) { + const command = match[1].trim(); + const { allAllowed, disallowedCommands, blockReason, isHardDenial } = + checkCommandPermissions(command, config!, sessionShellAllowlist); + + if (!allAllowed) { + // If it's a hard denial, this is a non-recoverable security error. + if (isHardDenial) { + throw new Error( + `${this.commandName} cannot be run. ${blockReason || 'A shell command in this custom command is explicitly blocked in your config settings.'}`, + ); + } + + // Add each soft denial disallowed command to the set for confirmation. + disallowedCommands.forEach((uc) => commandsToConfirm.add(uc)); + } + commandsToExecute.push({ fullMatch: match[0], command }); + } + + // If any commands require confirmation, throw a special error to halt the + // pipeline and trigger the UI flow. + if (commandsToConfirm.size > 0) { + throw new ConfirmationRequiredError( + 'Shell command confirmation required', + Array.from(commandsToConfirm), + ); + } + + // Execute all commands (only runs if no confirmation was needed). + let processedPrompt = prompt; + for (const { fullMatch, command } of commandsToExecute) { + const { result } = ShellExecutionService.execute( + command, + config!.getTargetDir(), + () => {}, // No streaming needed. + new AbortController().signal, // For now, we don't support cancellation from here. + ); + + const executionResult = await result; + processedPrompt = processedPrompt.replace( + fullMatch, + executionResult.output, + ); + } + + return processedPrompt; + } +} diff --git a/packages/cli/src/services/prompt-processors/types.ts b/packages/cli/src/services/prompt-processors/types.ts new file mode 100644 index 000000000..2653d2b7e --- /dev/null +++ b/packages/cli/src/services/prompt-processors/types.ts @@ -0,0 +1,42 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { CommandContext } from '../../ui/commands/types.js'; + +/** + * Defines the interface for a prompt processor, a module that can transform + * a prompt string before it is sent to the model. Processors are chained + * together to create a processing pipeline. + */ +export interface IPromptProcessor { + /** + * Processes a prompt string, applying a specific transformation as part of a pipeline. + * + * Each processor in a command's pipeline receives the output of the previous + * processor. This method provides the full command context, allowing for + * complex transformations that may require access to invocation details, + * application services, or UI state. + * + * @param prompt The current state of the prompt string. This may have been + * modified by previous processors in the pipeline. + * @param context The full command context, providing access to invocation + * details (like `context.invocation.raw` and `context.invocation.args`), + * application services, and UI handlers. + * @returns A promise that resolves to the transformed prompt string, which + * will be passed to the next processor or, if it's the last one, sent to the model. + */ + process(prompt: string, context: CommandContext): Promise; +} + +/** + * The placeholder string for shorthand argument injection in custom commands. + */ +export const SHORTHAND_ARGS_PLACEHOLDER = '{{args}}'; + +/** + * The trigger string for shell command injection in custom commands. + */ +export const SHELL_INJECTION_TRIGGER = '!{'; diff --git a/packages/cli/src/services/types.ts b/packages/cli/src/services/types.ts new file mode 100644 index 000000000..9d30e791c --- /dev/null +++ b/packages/cli/src/services/types.ts @@ -0,0 +1,24 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { SlashCommand } from '../ui/commands/types.js'; + +/** + * Defines the contract for any class that can load and provide slash commands. + * This allows the CommandService to be extended with new command sources + * (e.g., file-based, remote APIs) without modification. + * + * Loaders should receive any necessary dependencies (like Config) via their + * constructor. + */ +export interface ICommandLoader { + /** + * Discovers and returns a list of slash commands from the loader's source. + * @param signal An AbortSignal to allow cancellation. + * @returns A promise that resolves to an array of SlashCommand objects. + */ + loadCommands(signal: AbortSignal): Promise; +} diff --git a/packages/cli/src/test-utils/mockCommandContext.ts b/packages/cli/src/test-utils/mockCommandContext.ts index 5891494f1..7d38b2136 100644 --- a/packages/cli/src/test-utils/mockCommandContext.ts +++ b/packages/cli/src/test-utils/mockCommandContext.ts @@ -7,7 +7,7 @@ import { vi } from 'vitest'; import { CommandContext } from '../ui/commands/types.js'; import { LoadedSettings } from '../config/settings.js'; -import { GitService } from '@google/gemini-cli-core'; +import { GitService } from '@qwen-code/qwen-code-core'; import { SessionStatsState } from '../ui/contexts/SessionContext.js'; // A utility type to make all properties of an object, and its nested objects, partial. @@ -28,6 +28,11 @@ export const createMockCommandContext = ( overrides: DeepPartial = {}, ): CommandContext => { const defaultMocks: CommandContext = { + invocation: { + raw: '', + name: '', + args: '', + }, services: { config: null, settings: { merged: {} } as LoadedSettings, @@ -44,6 +49,10 @@ export const createMockCommandContext = ( addItem: vi.fn(), clear: vi.fn(), setDebugMessage: vi.fn(), + pendingItem: null, + setPendingItem: vi.fn(), + loadHistory: vi.fn(), + toggleCorgiMode: vi.fn(), }, session: { stats: { @@ -60,9 +69,7 @@ export const createMockCommandContext = ( byName: {}, }, }, - promptCount: 0, } as SessionStatsState, - resetSession: vi.fn(), }, }; @@ -76,15 +83,13 @@ export const createMockCommandContext = ( const targetValue = output[key]; if ( - sourceValue && - typeof sourceValue === 'object' && - !Array.isArray(sourceValue) && - targetValue && - typeof targetValue === 'object' && - !Array.isArray(targetValue) + // We only want to recursivlty merge plain objects + Object.prototype.toString.call(sourceValue) === '[object Object]' && + Object.prototype.toString.call(targetValue) === '[object Object]' ) { output[key] = merge(targetValue, sourceValue); } else { + // If not, we do a direct assignment. This preserves Date objects and others. output[key] = sourceValue; } } diff --git a/packages/cli/src/ui/App.test.tsx b/packages/cli/src/ui/App.test.tsx index 00a932f2e..5c629fedc 100644 --- a/packages/cli/src/ui/App.test.tsx +++ b/packages/cli/src/ui/App.test.tsx @@ -15,11 +15,13 @@ import { AccessibilitySettings, SandboxConfig, GeminiClient, + ideContext, } from '@qwen-code/qwen-code-core'; import { LoadedSettings, SettingsFile, Settings } from '../config/settings.js'; import process from 'node:process'; import { useGeminiStream } from './hooks/useGeminiStream.js'; -import { StreamingState } from './types.js'; +import { useConsoleMessages } from './hooks/useConsoleMessages.js'; +import { StreamingState, ConsoleMessageItem } from './types.js'; import { Tips } from './components/Tips.js'; // Define a more complete mock server config based on actual Config @@ -58,6 +60,12 @@ interface MockServerConfig { getToolCallCommand: Mock<() => string | undefined>; getMcpServerCommand: Mock<() => string | undefined>; getMcpServers: Mock<() => Record | undefined>; + getExtensions: Mock< + () => Array<{ name: string; version: string; isActive: boolean }> + >; + getBlockedMcpServers: Mock< + () => Array<{ name: string; extensionName: string }> + >; getUserAgent: Mock<() => string>; getUserMemory: Mock<() => string>; setUserMemory: Mock<(newUserMemory: string) => void>; @@ -118,6 +126,9 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => { getToolCallCommand: vi.fn(() => opts.toolCallCommand), getMcpServerCommand: vi.fn(() => opts.mcpServerCommand), getMcpServers: vi.fn(() => opts.mcpServers), + getPromptRegistry: vi.fn(), + getExtensions: vi.fn(() => []), + getBlockedMcpServers: vi.fn(() => []), getUserAgent: vi.fn(() => opts.userAgent || 'test-agent'), getUserMemory: vi.fn(() => opts.userMemory || ''), setUserMemory: vi.fn(), @@ -129,19 +140,29 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => { getShowMemoryUsage: vi.fn(() => opts.showMemoryUsage ?? false), getAccessibility: vi.fn(() => opts.accessibility ?? {}), getProjectRoot: vi.fn(() => opts.targetDir), - getGeminiClient: vi.fn(() => ({})), + getGeminiClient: vi.fn(() => ({ + getUserTier: vi.fn(), + })), getCheckpointingEnabled: vi.fn(() => opts.checkpointing ?? true), getAllGeminiMdFilenames: vi.fn(() => ['GEMINI.md']), setFlashFallbackHandler: vi.fn(), getSessionId: vi.fn(() => 'test-session-id'), getUserTier: vi.fn().mockResolvedValue(undefined), + getIdeMode: vi.fn(() => false), }; }); + + const ideContextMock = { + getOpenFilesContext: vi.fn(), + subscribeToOpenFiles: vi.fn(() => vi.fn()), // subscribe returns an unsubscribe function + }; + return { ...actualCore, Config: ConfigClassMock, MCPServerConfig: actualCore.MCPServerConfig, getAllGeminiMdFilenames: vi.fn(() => ['GEMINI.md']), + ideContext: ideContextMock, }; }); @@ -172,6 +193,14 @@ vi.mock('./hooks/useLogger', () => ({ })), })); +vi.mock('./hooks/useConsoleMessages.js', () => ({ + useConsoleMessages: vi.fn(() => ({ + consoleMessages: [], + handleNewMessage: vi.fn(), + clearConsoleMessages: vi.fn(), + })), +})); + vi.mock('../config/config.js', async (importOriginal) => { const actual = await importOriginal(); return { @@ -213,7 +242,7 @@ describe('App UI', () => { settings: settings.user || {}, }; const workspaceSettingsFile: SettingsFile = { - path: '/workspace/.qwen/settings.json', + path: '/workspace/.gemini/settings.json', settings: settings.workspace || {}, }; return new LoadedSettings( @@ -248,6 +277,7 @@ describe('App UI', () => { // Ensure a theme is set so the theme dialog does not appear. mockSettings = createMockSettings({ workspace: { theme: 'Default' } }); + vi.mocked(ideContext.getOpenFilesContext).mockReturnValue(undefined); }); afterEach(() => { @@ -258,8 +288,68 @@ describe('App UI', () => { vi.clearAllMocks(); // Clear mocks after each test }); + it('should display active file when available', async () => { + vi.mocked(ideContext.getOpenFilesContext).mockReturnValue({ + activeFile: '/path/to/my-file.ts', + recentOpenFiles: [{ filePath: '/path/to/my-file.ts', content: 'hello' }], + selectedText: 'hello', + }); + + const { lastFrame, unmount } = render( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + expect(lastFrame()).toContain('1 recent file (ctrl+e to view)'); + }); + + it('should not display active file when not available', async () => { + vi.mocked(ideContext.getOpenFilesContext).mockReturnValue({ + activeFile: '', + }); + + const { lastFrame, unmount } = render( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + expect(lastFrame()).not.toContain('Open File'); + }); + + it('should display active file and other context', async () => { + vi.mocked(ideContext.getOpenFilesContext).mockReturnValue({ + activeFile: '/path/to/my-file.ts', + recentOpenFiles: [{ filePath: '/path/to/my-file.ts', content: 'hello' }], + selectedText: 'hello', + }); + mockConfig.getGeminiMdFileCount.mockReturnValue(1); + mockConfig.getAllGeminiMdFilenames.mockReturnValue(['GEMINI.md']); + + const { lastFrame, unmount } = render( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + expect(lastFrame()).toContain( + 'Using: 1 recent file (ctrl+e to view) | 1 GEMINI.md file', + ); + }); + it('should display default "GEMINI.md" in footer when contextFileName is not set and count is 1', async () => { mockConfig.getGeminiMdFileCount.mockReturnValue(1); + mockConfig.getAllGeminiMdFilenames.mockReturnValue(['GEMINI.md']); // For this test, ensure showMemoryUsage is false or debugMode is false if it relies on that mockConfig.getDebugMode.mockReturnValue(false); mockConfig.getShowMemoryUsage.mockReturnValue(false); @@ -273,11 +363,15 @@ describe('App UI', () => { ); currentUnmount = unmount; await Promise.resolve(); // Wait for any async updates - expect(lastFrame()).toContain('Using 1 GEMINI.md file'); + expect(lastFrame()).toContain('Using: 1 GEMINI.md file'); }); it('should display default "GEMINI.md" with plural when contextFileName is not set and count is > 1', async () => { mockConfig.getGeminiMdFileCount.mockReturnValue(2); + mockConfig.getAllGeminiMdFilenames.mockReturnValue([ + 'GEMINI.md', + 'GEMINI.md', + ]); mockConfig.getDebugMode.mockReturnValue(false); mockConfig.getShowMemoryUsage.mockReturnValue(false); @@ -290,7 +384,7 @@ describe('App UI', () => { ); currentUnmount = unmount; await Promise.resolve(); - expect(lastFrame()).toContain('Using 2 GEMINI.md files'); + expect(lastFrame()).toContain('Using: 2 GEMINI.md files'); }); it('should display custom contextFileName in footer when set and count is 1', async () => { @@ -298,6 +392,7 @@ describe('App UI', () => { workspace: { contextFileName: 'AGENTS.md', theme: 'Default' }, }); mockConfig.getGeminiMdFileCount.mockReturnValue(1); + mockConfig.getAllGeminiMdFilenames.mockReturnValue(['AGENTS.md']); mockConfig.getDebugMode.mockReturnValue(false); mockConfig.getShowMemoryUsage.mockReturnValue(false); @@ -310,7 +405,7 @@ describe('App UI', () => { ); currentUnmount = unmount; await Promise.resolve(); - expect(lastFrame()).toContain('Using 1 AGENTS.md file'); + expect(lastFrame()).toContain('Using: 1 AGENTS.md file'); }); it('should display a generic message when multiple context files with different names are provided', async () => { @@ -321,6 +416,10 @@ describe('App UI', () => { }, }); mockConfig.getGeminiMdFileCount.mockReturnValue(2); + mockConfig.getAllGeminiMdFilenames.mockReturnValue([ + 'AGENTS.md', + 'CONTEXT.md', + ]); mockConfig.getDebugMode.mockReturnValue(false); mockConfig.getShowMemoryUsage.mockReturnValue(false); @@ -333,7 +432,7 @@ describe('App UI', () => { ); currentUnmount = unmount; await Promise.resolve(); - expect(lastFrame()).toContain('Using 2 context files'); + expect(lastFrame()).toContain('Using: 2 context files'); }); it('should display custom contextFileName with plural when set and count is > 1', async () => { @@ -341,6 +440,11 @@ describe('App UI', () => { workspace: { contextFileName: 'MY_NOTES.TXT', theme: 'Default' }, }); mockConfig.getGeminiMdFileCount.mockReturnValue(3); + mockConfig.getAllGeminiMdFilenames.mockReturnValue([ + 'MY_NOTES.TXT', + 'MY_NOTES.TXT', + 'MY_NOTES.TXT', + ]); mockConfig.getDebugMode.mockReturnValue(false); mockConfig.getShowMemoryUsage.mockReturnValue(false); @@ -353,7 +457,7 @@ describe('App UI', () => { ); currentUnmount = unmount; await Promise.resolve(); - expect(lastFrame()).toContain('Using 3 MY_NOTES.TXT files'); + expect(lastFrame()).toContain('Using: 3 MY_NOTES.TXT files'); }); it('should not display context file message if count is 0, even if contextFileName is set', async () => { @@ -361,6 +465,7 @@ describe('App UI', () => { workspace: { contextFileName: 'ANY_FILE.MD', theme: 'Default' }, }); mockConfig.getGeminiMdFileCount.mockReturnValue(0); + mockConfig.getAllGeminiMdFilenames.mockReturnValue([]); mockConfig.getDebugMode.mockReturnValue(false); mockConfig.getShowMemoryUsage.mockReturnValue(false); @@ -378,6 +483,10 @@ describe('App UI', () => { it('should display GEMINI.md and MCP server count when both are present', async () => { mockConfig.getGeminiMdFileCount.mockReturnValue(2); + mockConfig.getAllGeminiMdFilenames.mockReturnValue([ + 'GEMINI.md', + 'GEMINI.md', + ]); mockConfig.getMcpServers.mockReturnValue({ server1: {} as MCPServerConfig, }); @@ -393,11 +502,12 @@ describe('App UI', () => { ); currentUnmount = unmount; await Promise.resolve(); - expect(lastFrame()).toContain('server'); + expect(lastFrame()).toContain('1 MCP server'); }); it('should display only MCP server count when GEMINI.md count is 0', async () => { mockConfig.getGeminiMdFileCount.mockReturnValue(0); + mockConfig.getAllGeminiMdFilenames.mockReturnValue([]); mockConfig.getMcpServers.mockReturnValue({ server1: {} as MCPServerConfig, server2: {} as MCPServerConfig, @@ -414,7 +524,7 @@ describe('App UI', () => { ); currentUnmount = unmount; await Promise.resolve(); - expect(lastFrame()).toContain('Using 2 MCP servers'); + expect(lastFrame()).toContain('Using: 2 MCP servers (ctrl+t to view)'); }); it('should display Tips component by default', async () => { @@ -527,7 +637,7 @@ describe('App UI', () => { ); currentUnmount = unmount; - expect(lastFrame()).toContain('Select Theme'); + expect(lastFrame()).toContain("I'm Feeling Lucky (esc to cancel"); }); it('should display a message if NO_COLOR is set', async () => { @@ -542,13 +652,43 @@ describe('App UI', () => { ); currentUnmount = unmount; - expect(lastFrame()).toContain( - 'Theme configuration unavailable due to NO_COLOR env variable.', - ); + expect(lastFrame()).toContain("I'm Feeling Lucky (esc to cancel"); expect(lastFrame()).not.toContain('Select Theme'); }); }); + it('should render the initial UI correctly', () => { + const { lastFrame, unmount } = render( + , + ); + currentUnmount = unmount; + expect(lastFrame()).toMatchSnapshot(); + }); + + it('should render correctly with the prompt input box', () => { + vi.mocked(useGeminiStream).mockReturnValue({ + streamingState: StreamingState.Idle, + submitQuery: vi.fn(), + initError: null, + pendingHistoryItems: [], + thought: null, + }); + + const { lastFrame, unmount } = render( + , + ); + currentUnmount = unmount; + expect(lastFrame()).toMatchSnapshot(); + }); + describe('with initial prompt from --prompt-interactive', () => { it('should submit the initial prompt automatically', async () => { const mockSubmitQuery = vi.fn(); @@ -565,6 +705,7 @@ describe('App UI', () => { mockConfig.getGeminiClient.mockReturnValue({ isInitialized: vi.fn(() => true), + getUserTier: vi.fn(), } as unknown as GeminiClient); const { unmount, rerender } = render( @@ -592,4 +733,35 @@ describe('App UI', () => { ); }); }); + + describe('errorCount', () => { + it('should correctly sum the counts of error messages', async () => { + const mockConsoleMessages: ConsoleMessageItem[] = [ + { type: 'error', content: 'First error', count: 1 }, + { type: 'log', content: 'some log', count: 1 }, + { type: 'error', content: 'Second error', count: 3 }, + { type: 'warn', content: 'a warning', count: 1 }, + { type: 'error', content: 'Third error', count: 1 }, + ]; + + vi.mocked(useConsoleMessages).mockReturnValue({ + consoleMessages: mockConsoleMessages, + handleNewMessage: vi.fn(), + clearConsoleMessages: vi.fn(), + }); + + const { lastFrame, unmount } = render( + , + ); + currentUnmount = unmount; + await Promise.resolve(); + + // Total error count should be 1 + 3 + 1 = 5 + expect(lastFrame()).toContain('5 errors'); + }); + }); }); diff --git a/packages/cli/src/ui/App.tsx b/packages/cli/src/ui/App.tsx index 090e1c133..d7745b144 100644 --- a/packages/cli/src/ui/App.tsx +++ b/packages/cli/src/ui/App.tsx @@ -36,6 +36,7 @@ import { ThemeDialog } from './components/ThemeDialog.js'; import { AuthDialog } from './components/AuthDialog.js'; import { AuthInProgress } from './components/AuthInProgress.js'; import { EditorSettingsDialog } from './components/EditorSettingsDialog.js'; +import { ShellConfirmationDialog } from './components/ShellConfirmationDialog.js'; import { Colors } from './colors.js'; import { Help } from './components/Help.js'; import { loadHierarchicalGeminiMemory } from '../config/config.js'; @@ -46,6 +47,7 @@ import { registerCleanup } from '../utils/cleanup.js'; import { DetailedMessagesDisplay } from './components/DetailedMessagesDisplay.js'; import { HistoryItemDisplay } from './components/HistoryItemDisplay.js'; import { ContextSummaryDisplay } from './components/ContextSummaryDisplay.js'; +import { IDEContextDetailDisplay } from './components/IDEContextDetailDisplay.js'; import { useHistory } from './hooks/useHistoryManager.js'; import process from 'node:process'; import { @@ -57,6 +59,9 @@ import { EditorType, FlashFallbackEvent, logFlashFallback, + AuthType, + type OpenFiles, + ideContext, } from '@qwen-code/qwen-code-core'; import { validateAuthMethod } from '../config/auth.js'; import { useLogger } from './hooks/useLogger.js'; @@ -66,8 +71,11 @@ import { useSessionStats, } from './contexts/SessionContext.js'; import { useGitBranchName } from './hooks/useGitBranchName.js'; +import { useFocus } from './hooks/useFocus.js'; import { useBracketedPaste } from './hooks/useBracketedPaste.js'; import { useTextBuffer } from './components/shared/text-buffer.js'; +import { useVimMode, VimModeProvider } from './contexts/VimModeContext.js'; +import { useVim } from './hooks/vim.js'; import * as fs from 'fs'; import { UpdateNotification } from './components/UpdateNotification.js'; import { @@ -80,6 +88,7 @@ import ansiEscapes from 'ansi-escapes'; import { OverflowProvider } from './contexts/OverflowContext.js'; import { ShowMoreLines } from './components/ShowMoreLines.js'; import { PrivacyNotice } from './privacy/PrivacyNotice.js'; +import { appEvents, AppEvent } from '../utils/events.js'; const CTRL_EXIT_PROMPT_DURATION_MS = 1000; @@ -92,11 +101,14 @@ interface AppProps { export const AppWrapper = (props: AppProps) => ( - + + + ); const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { + const isFocused = useFocus(); useBracketedPaste(); const [updateMessage, setUpdateMessage] = useState(null); const { stdout } = useStdout(); @@ -143,6 +155,8 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { const [showErrorDetails, setShowErrorDetails] = useState(false); const [showToolDescriptions, setShowToolDescriptions] = useState(false); + const [showIDEContextDetail, setShowIDEContextDetail] = + useState(false); const [ctrlCPressedOnce, setCtrlCPressedOnce] = useState(false); const [quittingMessages, setQuittingMessages] = useState< HistoryItem[] | null @@ -155,6 +169,37 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { const [modelSwitchedFromQuotaError, setModelSwitchedFromQuotaError] = useState(false); const [userTier, setUserTier] = useState(undefined); + const [openFiles, setOpenFiles] = useState(); + const [isProcessing, setIsProcessing] = useState(false); + + useEffect(() => { + const unsubscribe = ideContext.subscribeToOpenFiles(setOpenFiles); + // Set the initial value + setOpenFiles(ideContext.getOpenFilesContext()); + return unsubscribe; + }, []); + + useEffect(() => { + const openDebugConsole = () => { + setShowErrorDetails(true); + setConstrainHeight(false); // Make sure the user sees the full message. + }; + appEvents.on(AppEvent.OpenDebugConsole, openDebugConsole); + + const logErrorHandler = (errorMessage: unknown) => { + handleNewMessage({ + type: 'error', + content: String(errorMessage), + count: 1, + }); + }; + appEvents.on(AppEvent.LogError, logErrorHandler); + + return () => { + appEvents.off(AppEvent.OpenDebugConsole, openDebugConsole); + appEvents.off(AppEvent.LogError, logErrorHandler); + }; + }, [handleNewMessage]); const openPrivacyNotice = useCallback(() => { setShowPrivacyNotice(true); @@ -162,7 +207,10 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { const initialPromptSubmitted = useRef(false); const errorCount = useMemo( - () => consoleMessages.filter((msg) => msg.type === 'error').length, + () => + consoleMessages + .filter((msg) => msg.type === 'error') + .reduce((total, msg) => total + msg.count, 0), [consoleMessages], ); @@ -193,26 +241,11 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { // Sync user tier from config when authentication changes useEffect(() => { - const syncUserTier = async () => { - try { - const configUserTier = await config.getUserTier(); - if (configUserTier !== userTier) { - setUserTier(configUserTier); - } - } catch (error) { - // Silently fail - this is not critical functionality - // Only log in debug mode to avoid cluttering the console - if (config.getDebugMode()) { - console.debug('Failed to sync user tier:', error); - } - } - }; - // Only sync when not currently authenticating if (!isAuthenticating) { - syncUserTier(); + setUserTier(config.getGeminiClient()?.getUserTier()); } - }, [config, userTier, isAuthenticating]); + }, [config, isAuthenticating]); const { isEditorDialogOpen, @@ -238,8 +271,11 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { process.cwd(), config.getDebugMode(), config.getFileService(), + settings.merged, config.getExtensionContextFilePaths(), + config.getFileFilteringOptions(), ); + config.setUserMemory(memoryContent); config.setGeminiMdFileCount(fileCount); setGeminiMdFileCount(fileCount); @@ -267,7 +303,7 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { ); console.error('Error refreshing memory:', error); } - }, [config, addItem]); + }, [config, addItem, settings.merged]); // Watch for model changes (e.g., from Flash fallback) useEffect(() => { @@ -294,64 +330,70 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { ): Promise => { let message: string; - // Use actual user tier if available, otherwise default to FREE tier behavior (safe default) - const isPaidTier = - userTier === UserTierId.LEGACY || userTier === UserTierId.STANDARD; + if ( + config.getContentGeneratorConfig().authType === + AuthType.LOGIN_WITH_GOOGLE + ) { + // Use actual user tier if available; otherwise, default to FREE tier behavior (safe default) + const isPaidTier = + userTier === UserTierId.LEGACY || userTier === UserTierId.STANDARD; - // Check if this is a Pro quota exceeded error - if (error && isProQuotaExceededError(error)) { - if (isPaidTier) { - message = `⚡ You have reached your daily ${currentModel} quota limit. + // Check if this is a Pro quota exceeded error + if (error && isProQuotaExceededError(error)) { + if (isPaidTier) { + message = `⚡ You have reached your daily ${currentModel} quota limit. ⚡ Automatically switching from ${currentModel} to ${fallbackModel} for the remainder of this session. ⚡ To continue accessing the ${currentModel} model today, consider using /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`; - } else { - message = `⚡ You have reached your daily ${currentModel} quota limit. + } else { + message = `⚡ You have reached your daily ${currentModel} quota limit. ⚡ Automatically switching from ${currentModel} to ${fallbackModel} for the remainder of this session. ⚡ To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist ⚡ Or you can utilize a Gemini API Key. See: https://goo.gle/gemini-cli-docs-auth#gemini-api-key ⚡ You can switch authentication methods by typing /auth`; - } - } else if (error && isGenericQuotaExceededError(error)) { - if (isPaidTier) { - message = `⚡ You have reached your daily quota limit. + } + } else if (error && isGenericQuotaExceededError(error)) { + if (isPaidTier) { + message = `⚡ You have reached your daily quota limit. ⚡ Automatically switching from ${currentModel} to ${fallbackModel} for the remainder of this session. ⚡ To continue accessing the ${currentModel} model today, consider using /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`; - } else { - message = `⚡ You have reached your daily quota limit. + } else { + message = `⚡ You have reached your daily quota limit. ⚡ Automatically switching from ${currentModel} to ${fallbackModel} for the remainder of this session. ⚡ To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist ⚡ Or you can utilize a Gemini API Key. See: https://goo.gle/gemini-cli-docs-auth#gemini-api-key ⚡ You can switch authentication methods by typing /auth`; - } - } else { - if (isPaidTier) { - // Default fallback message for other cases (like consecutive 429s) - message = `⚡ Automatically switching from ${currentModel} to ${fallbackModel} for faster responses for the remainder of this session. + } + } else { + if (isPaidTier) { + // Default fallback message for other cases (like consecutive 429s) + message = `⚡ Automatically switching from ${currentModel} to ${fallbackModel} for faster responses for the remainder of this session. ⚡ Possible reasons for this are that you have received multiple consecutive capacity errors or you have reached your daily ${currentModel} quota limit ⚡ To continue accessing the ${currentModel} model today, consider using /auth to switch to using a paid API key from AI Studio at https://aistudio.google.com/apikey`; - } else { - // Default fallback message for other cases (like consecutive 429s) - message = `⚡ Automatically switching from ${currentModel} to ${fallbackModel} for faster responses for the remainder of this session. + } else { + // Default fallback message for other cases (like consecutive 429s) + message = `⚡ Automatically switching from ${currentModel} to ${fallbackModel} for faster responses for the remainder of this session. ⚡ Possible reasons for this are that you have received multiple consecutive capacity errors or you have reached your daily ${currentModel} quota limit ⚡ To increase your limits, upgrade to a Gemini Code Assist Standard or Enterprise plan with higher limits at https://goo.gle/set-up-gemini-code-assist ⚡ Or you can utilize a Gemini API Key. See: https://goo.gle/gemini-cli-docs-auth#gemini-api-key ⚡ You can switch authentication methods by typing /auth`; + } } + + // Add message to UI history + addItem( + { + type: MessageType.INFO, + text: message, + }, + Date.now(), + ); + + // Set the flag to prevent tool continuation + setModelSwitchedFromQuotaError(true); + // Set global quota error flag to prevent Flash model calls + config.setQuotaErrorOccurred(true); } - // Add message to UI history - addItem( - { - type: MessageType.INFO, - text: message, - }, - Date.now(), - ); - - // Set the flag to prevent tool continuation - setModelSwitchedFromQuotaError(true); - // Set global quota error flag to prevent Flash model calls - config.setQuotaErrorOccurred(true); // Switch model for future use but return false to stop current retry config.setModel(fallbackModel); logFlashFallback( @@ -364,15 +406,58 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { config.setFlashFallbackHandler(flashFallbackHandler); }, [config, addItem, userTier]); + // Terminal and UI setup + const { rows: terminalHeight, columns: terminalWidth } = useTerminalSize(); + const { stdin, setRawMode } = useStdin(); + const isInitialMount = useRef(true); + + const widthFraction = 0.9; + const inputWidth = Math.max( + 20, + Math.floor(terminalWidth * widthFraction) - 3, + ); + const suggestionsWidth = Math.max(60, Math.floor(terminalWidth * 0.8)); + + // Utility callbacks + const isValidPath = useCallback((filePath: string): boolean => { + try { + return fs.existsSync(filePath) && fs.statSync(filePath).isFile(); + } catch (_e) { + return false; + } + }, []); + + const getPreferredEditor = useCallback(() => { + const editorType = settings.merged.preferredEditor; + const isValidEditor = isEditorAvailable(editorType); + if (!isValidEditor) { + openEditorDialog(); + return; + } + return editorType as EditorType; + }, [settings, openEditorDialog]); + + const onAuthError = useCallback(() => { + setAuthError('reauth required'); + openAuthDialog(); + }, [openAuthDialog, setAuthError]); + + // Core hooks and processors + const { + vimEnabled: vimModeEnabled, + vimMode, + toggleVimEnabled, + } = useVimMode(); + const { handleSlashCommand, slashCommands, pendingHistoryItems: pendingSlashCommandHistoryItems, commandContext, + shellConfirmationRequest, } = useSlashCommandProcessor( config, settings, - history, addItem, clearItems, loadHistory, @@ -383,29 +468,44 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { openAuthDialog, openEditorDialog, toggleCorgiMode, - showToolDescriptions, setQuittingMessages, openPrivacyNotice, + toggleVimEnabled, + setIsProcessing, ); - const pendingHistoryItems = [...pendingSlashCommandHistoryItems]; - const { rows: terminalHeight, columns: terminalWidth } = useTerminalSize(); - const isInitialMount = useRef(true); - const { stdin, setRawMode } = useStdin(); - const isValidPath = useCallback((filePath: string): boolean => { - try { - return fs.existsSync(filePath) && fs.statSync(filePath).isFile(); - } catch (_e) { - return false; - } - }, []); - - const widthFraction = 0.9; - const inputWidth = Math.max( - 20, - Math.floor(terminalWidth * widthFraction) - 3, + const { + streamingState, + submitQuery, + initError, + pendingHistoryItems: pendingGeminiHistoryItems, + thought, + } = useGeminiStream( + config.getGeminiClient(), + history, + addItem, + setShowHelp, + config, + setDebugMessage, + handleSlashCommand, + shellModeActive, + getPreferredEditor, + onAuthError, + performMemoryRefresh, + modelSwitchedFromQuotaError, + setModelSwitchedFromQuotaError, + ); + + // Input handling + const handleFinalSubmit = useCallback( + (submittedValue: string) => { + const trimmedValue = submittedValue.trim(); + if (trimmedValue.length > 0) { + submitQuery(trimmedValue); + } + }, + [submitQuery], ); - const suggestionsWidth = Math.max(60, Math.floor(terminalWidth * 0.8)); const buffer = useTextBuffer({ initialText: '', @@ -416,6 +516,14 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { shellModeActive, }); + const { handleInput: vimHandleInput } = useVim(buffer, handleFinalSubmit); + const pendingHistoryItems = [...pendingSlashCommandHistoryItems]; + pendingHistoryItems.push(...pendingGeminiHistoryItems); + + const { elapsedTime, currentLoadingPhrase } = + useLoadingIndicator(streamingState); + const showAutoAcceptIndicator = useAutoAcceptIndicator({ config }); + const handleExit = useCallback( ( pressedOnce: boolean, @@ -426,15 +534,8 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { if (timerRef.current) { clearTimeout(timerRef.current); } - const quitCommand = slashCommands.find( - (cmd) => cmd.name === 'quit' || cmd.altName === 'exit', - ); - if (quitCommand && quitCommand.action) { - quitCommand.action(commandContext, ''); - } else { - // This is unlikely to be needed but added for an additional fallback. - process.exit(0); - } + // Directly invoke the central command handler. + handleSlashCommand('/quit'); } else { setPressedOnce(true); timerRef.current = setTimeout(() => { @@ -443,8 +544,7 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { }, CTRL_EXIT_PROMPT_DURATION_MS); } }, - // Add commandContext to the dependency array here! - [slashCommands, commandContext], + [handleSlashCommand], ); useInput((input: string, key: InkKeyType) => { @@ -468,6 +568,8 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { if (Object.keys(mcpServers || {}).length > 0) { handleSlashCommand(newValue ? '/mcp desc' : '/mcp nodesc'); } + } else if (key.ctrl && input === 'e' && ideContext) { + setShowIDEContextDetail((prev) => !prev); } else if (key.ctrl && (input === 'c' || input === 'C')) { handleExit(ctrlCPressedOnce, setCtrlCPressedOnce, ctrlCTimerRef); } else if (key.ctrl && (input === 'd' || input === 'D')) { @@ -487,57 +589,6 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { } }, [config]); - const getPreferredEditor = useCallback(() => { - const editorType = settings.merged.preferredEditor; - const isValidEditor = isEditorAvailable(editorType); - if (!isValidEditor) { - openEditorDialog(); - return; - } - return editorType as EditorType; - }, [settings, openEditorDialog]); - - const onAuthError = useCallback(() => { - setAuthError('reauth required'); - openAuthDialog(); - }, [openAuthDialog, setAuthError]); - - const { - streamingState, - submitQuery, - initError, - pendingHistoryItems: pendingGeminiHistoryItems, - thought, - } = useGeminiStream( - config.getGeminiClient(), - history, - addItem, - setShowHelp, - config, - setDebugMessage, - handleSlashCommand, - shellModeActive, - getPreferredEditor, - onAuthError, - performMemoryRefresh, - modelSwitchedFromQuotaError, - setModelSwitchedFromQuotaError, - ); - pendingHistoryItems.push(...pendingGeminiHistoryItems); - const { elapsedTime, currentLoadingPhrase } = - useLoadingIndicator(streamingState); - const showAutoAcceptIndicator = useAutoAcceptIndicator({ config }); - - const handleFinalSubmit = useCallback( - (submittedValue: string) => { - const trimmedValue = submittedValue.trim(); - if (trimmedValue.length > 0) { - submitQuery(trimmedValue); - } - }, - [submitQuery], - ); - const logger = useLogger(); const [userMessages, setUserMessages] = useState([]); @@ -577,7 +628,8 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { fetchUserMessages(); }, [history, logger]); - const isInputActive = streamingState === StreamingState.Idle && !initError; + const isInputActive = + streamingState === StreamingState.Idle && !initError && !isProcessing; const handleClearScreen = useCallback(() => { clearItems(); @@ -695,9 +747,13 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { // Arbitrary threshold to ensure that items in the static area are large // enough but not too large to make the terminal hard to use. const staticAreaMaxItemHeight = Math.max(terminalHeight * 4, 100); + const placeholder = vimModeEnabled + ? " Press 'i' for INSERT mode and 'Esc' for NORMAL mode." + : ' Type your message or @path/to/file'; + return ( - + {/* Move UpdateNotification outside Static so it can re-render when updateMessage changes */} {updateMessage && } @@ -779,7 +835,9 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { )} - {isThemeDialogOpen ? ( + {shellConfirmationRequest ? ( + + ) : isThemeDialogOpen ? ( {themeError && ( @@ -864,6 +922,7 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { } elapsedTime={elapsedTime} /> + { ) : ( )} @@ -901,7 +962,9 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { {shellModeActive && } - + {showIDEContextDetail && ( + + )} {showErrorDetails && ( @@ -930,6 +993,9 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { commandContext={commandContext} shellModeActive={shellModeActive} setShellModeActive={setShellModeActive} + focus={isFocused} + vimHandleInput={vimHandleInput} + placeholder={placeholder} /> )} @@ -981,6 +1047,7 @@ const App = ({ config, settings, startupWarnings = [], version }: AppProps) => { } promptTokenCount={sessionStats.lastPromptTokenCount} nightly={nightly} + vimMode={vimModeEnabled ? vimMode : undefined} /> diff --git a/packages/cli/src/ui/__snapshots__/App.test.tsx.snap b/packages/cli/src/ui/__snapshots__/App.test.tsx.snap new file mode 100644 index 000000000..891a16af1 --- /dev/null +++ b/packages/cli/src/ui/__snapshots__/App.test.tsx.snap @@ -0,0 +1,18 @@ +// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html + +exports[`App UI > should render correctly with the prompt input box 1`] = ` +" + +╭────────────────────────────────────────────────────────────────────────────────────────╮ +│ > Type your message or @path/to/file │ +╰────────────────────────────────────────────────────────────────────────────────────────╯ +/test/dir no sandbox (see /docs) model (100% context left)" +`; + +exports[`App UI > should render the initial UI correctly 1`] = ` +" + I'm Feeling Lucky (esc to cancel, 0s) + + +/test/dir no sandbox (see /docs) model (100% context left)" +`; diff --git a/packages/cli/src/ui/colors.ts b/packages/cli/src/ui/colors.ts index bb8451cc5..f87055e43 100644 --- a/packages/cli/src/ui/colors.ts +++ b/packages/cli/src/ui/colors.ts @@ -38,6 +38,12 @@ export const Colors: ColorsTheme = { get AccentRed() { return themeManager.getActiveTheme().colors.AccentRed; }, + get DiffAdded() { + return themeManager.getActiveTheme().colors.DiffAdded; + }, + get DiffRemoved() { + return themeManager.getActiveTheme().colors.DiffRemoved; + }, get Comment() { return themeManager.getActiveTheme().colors.Comment; }, diff --git a/packages/cli/src/ui/commands/aboutCommand.ts b/packages/cli/src/ui/commands/aboutCommand.ts index 3cb8c2f6a..18a82682d 100644 --- a/packages/cli/src/ui/commands/aboutCommand.ts +++ b/packages/cli/src/ui/commands/aboutCommand.ts @@ -5,13 +5,14 @@ */ import { getCliVersion } from '../../utils/version.js'; -import { SlashCommand } from './types.js'; +import { CommandKind, SlashCommand } from './types.js'; import process from 'node:process'; import { MessageType, type HistoryItemAbout } from '../types.js'; export const aboutCommand: SlashCommand = { name: 'about', description: 'show version info', + kind: CommandKind.BUILT_IN, action: async (context) => { const osVersion = process.platform; let sandboxEnv = 'no sandbox'; diff --git a/packages/cli/src/ui/commands/authCommand.ts b/packages/cli/src/ui/commands/authCommand.ts index 29bd2c9d6..8e78cf862 100644 --- a/packages/cli/src/ui/commands/authCommand.ts +++ b/packages/cli/src/ui/commands/authCommand.ts @@ -4,11 +4,12 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { OpenDialogActionReturn, SlashCommand } from './types.js'; +import { CommandKind, OpenDialogActionReturn, SlashCommand } from './types.js'; export const authCommand: SlashCommand = { name: 'auth', description: 'change the auth method', + kind: CommandKind.BUILT_IN, action: (_context, _args): OpenDialogActionReturn => ({ type: 'dialog', dialog: 'auth', diff --git a/packages/cli/src/ui/commands/bugCommand.test.ts b/packages/cli/src/ui/commands/bugCommand.test.ts new file mode 100644 index 000000000..4cdc446f5 --- /dev/null +++ b/packages/cli/src/ui/commands/bugCommand.test.ts @@ -0,0 +1,98 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import open from 'open'; +import { bugCommand } from './bugCommand.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; +import { getCliVersion } from '../../utils/version.js'; +import { GIT_COMMIT_INFO } from '../../generated/git-commit.js'; +import { formatMemoryUsage } from '../utils/formatters.js'; + +// Mock dependencies +vi.mock('open'); +vi.mock('../../utils/version.js'); +vi.mock('../utils/formatters.js'); +vi.mock('node:process', () => ({ + default: { + platform: 'test-platform', + version: 'v20.0.0', + // Keep other necessary process properties if needed by other parts of the code + env: process.env, + memoryUsage: () => ({ rss: 0 }), + }, +})); + +describe('bugCommand', () => { + beforeEach(() => { + vi.mocked(getCliVersion).mockResolvedValue('0.1.0'); + vi.mocked(formatMemoryUsage).mockReturnValue('100 MB'); + vi.stubEnv('SANDBOX', 'qwen-test'); + }); + + afterEach(() => { + vi.unstubAllEnvs(); + vi.clearAllMocks(); + }); + + it('should generate the default GitHub issue URL', async () => { + const mockContext = createMockCommandContext({ + services: { + config: { + getModel: () => 'qwen3-coder-plus', + getBugCommand: () => undefined, + }, + }, + }); + + if (!bugCommand.action) throw new Error('Action is not defined'); + await bugCommand.action(mockContext, 'A test bug'); + + const expectedInfo = ` +* **CLI Version:** 0.1.0 +* **Git Commit:** ${GIT_COMMIT_INFO} +* **Operating System:** test-platform v20.0.0 +* **Sandbox Environment:** test +* **Model Version:** qwen3-coder-plus +* **Memory Usage:** 100 MB +`; + const expectedUrl = + 'https://github.com/QwenLM/qwen-code/issues/new?template=bug_report.yml&title=A%20test%20bug&info=' + + encodeURIComponent(expectedInfo); + + expect(open).toHaveBeenCalledWith(expectedUrl); + }); + + it('should use a custom URL template from config if provided', async () => { + const customTemplate = + 'https://internal.bug-tracker.com/new?desc={title}&details={info}'; + const mockContext = createMockCommandContext({ + services: { + config: { + getModel: () => 'qwen3-coder-plus', + getBugCommand: () => ({ urlTemplate: customTemplate }), + }, + }, + }); + + if (!bugCommand.action) throw new Error('Action is not defined'); + await bugCommand.action(mockContext, 'A custom bug'); + + const expectedInfo = ` +* **CLI Version:** 0.1.0 +* **Git Commit:** ${GIT_COMMIT_INFO} +* **Operating System:** test-platform v20.0.0 +* **Sandbox Environment:** test +* **Model Version:** qwen3-coder-plus +* **Memory Usage:** 100 MB +`; + const expectedUrl = customTemplate + .replace('{title}', encodeURIComponent('A custom bug')) + .replace('{info}', encodeURIComponent(expectedInfo)); + + expect(open).toHaveBeenCalledWith(expectedUrl); + }); +}); diff --git a/packages/cli/src/ui/commands/bugCommand.ts b/packages/cli/src/ui/commands/bugCommand.ts new file mode 100644 index 000000000..b6724fc28 --- /dev/null +++ b/packages/cli/src/ui/commands/bugCommand.ts @@ -0,0 +1,83 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import open from 'open'; +import process from 'node:process'; +import { + type CommandContext, + type SlashCommand, + CommandKind, +} from './types.js'; +import { MessageType } from '../types.js'; +import { GIT_COMMIT_INFO } from '../../generated/git-commit.js'; +import { formatMemoryUsage } from '../utils/formatters.js'; +import { getCliVersion } from '../../utils/version.js'; + +export const bugCommand: SlashCommand = { + name: 'bug', + description: 'submit a bug report', + kind: CommandKind.BUILT_IN, + action: async (context: CommandContext, args?: string): Promise => { + const bugDescription = (args || '').trim(); + const { config } = context.services; + + const osVersion = `${process.platform} ${process.version}`; + let sandboxEnv = 'no sandbox'; + if (process.env.SANDBOX && process.env.SANDBOX !== 'sandbox-exec') { + sandboxEnv = process.env.SANDBOX.replace(/^qwen-(?:code-)?/, ''); + } else if (process.env.SANDBOX === 'sandbox-exec') { + sandboxEnv = `sandbox-exec (${ + process.env.SEATBELT_PROFILE || 'unknown' + })`; + } + const modelVersion = config?.getModel() || 'Unknown'; + const cliVersion = await getCliVersion(); + const memoryUsage = formatMemoryUsage(process.memoryUsage().rss); + + const info = ` +* **CLI Version:** ${cliVersion} +* **Git Commit:** ${GIT_COMMIT_INFO} +* **Operating System:** ${osVersion} +* **Sandbox Environment:** ${sandboxEnv} +* **Model Version:** ${modelVersion} +* **Memory Usage:** ${memoryUsage} +`; + + let bugReportUrl = + 'https://github.com/QwenLM/qwen-code/issues/new?template=bug_report.yml&title={title}&info={info}'; + + const bugCommandSettings = config?.getBugCommand(); + if (bugCommandSettings?.urlTemplate) { + bugReportUrl = bugCommandSettings.urlTemplate; + } + + bugReportUrl = bugReportUrl + .replace('{title}', encodeURIComponent(bugDescription)) + .replace('{info}', encodeURIComponent(info)); + + context.ui.addItem( + { + type: MessageType.INFO, + text: `To submit your bug report, please open the following URL in your browser:\n${bugReportUrl}`, + }, + Date.now(), + ); + + try { + await open(bugReportUrl); + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : String(error); + context.ui.addItem( + { + type: MessageType.ERROR, + text: `Could not open URL in browser: ${errorMessage}`, + }, + Date.now(), + ); + } + }, +}; diff --git a/packages/cli/src/ui/commands/chatCommand.test.ts b/packages/cli/src/ui/commands/chatCommand.test.ts new file mode 100644 index 000000000..7b331d1d5 --- /dev/null +++ b/packages/cli/src/ui/commands/chatCommand.test.ts @@ -0,0 +1,300 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { + vi, + describe, + it, + expect, + beforeEach, + afterEach, + Mocked, +} from 'vitest'; + +import { + type CommandContext, + MessageActionReturn, + SlashCommand, +} from './types.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; +import { Content } from '@google/genai'; +import { GeminiClient } from '@qwen-code/qwen-code-core'; + +import * as fsPromises from 'fs/promises'; +import { chatCommand } from './chatCommand.js'; +import { Stats } from 'fs'; +import { HistoryItemWithoutId } from '../types.js'; + +vi.mock('fs/promises', () => ({ + stat: vi.fn(), + readdir: vi.fn().mockResolvedValue(['file1.txt', 'file2.txt'] as string[]), +})); + +describe('chatCommand', () => { + const mockFs = fsPromises as Mocked; + + let mockContext: CommandContext; + let mockGetChat: ReturnType; + let mockSaveCheckpoint: ReturnType; + let mockLoadCheckpoint: ReturnType; + let mockGetHistory: ReturnType; + + const getSubCommand = (name: 'list' | 'save' | 'resume'): SlashCommand => { + const subCommand = chatCommand.subCommands?.find( + (cmd) => cmd.name === name, + ); + if (!subCommand) { + throw new Error(`/memory ${name} command not found.`); + } + return subCommand; + }; + + beforeEach(() => { + mockGetHistory = vi.fn().mockReturnValue([]); + mockGetChat = vi.fn().mockResolvedValue({ + getHistory: mockGetHistory, + }); + mockSaveCheckpoint = vi.fn().mockResolvedValue(undefined); + mockLoadCheckpoint = vi.fn().mockResolvedValue([]); + + mockContext = createMockCommandContext({ + services: { + config: { + getProjectTempDir: () => '/tmp/gemini', + getGeminiClient: () => + ({ + getChat: mockGetChat, + }) as unknown as GeminiClient, + }, + logger: { + saveCheckpoint: mockSaveCheckpoint, + loadCheckpoint: mockLoadCheckpoint, + initialize: vi.fn().mockResolvedValue(undefined), + }, + }, + }); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + it('should have the correct main command definition', () => { + expect(chatCommand.name).toBe('chat'); + expect(chatCommand.description).toBe('Manage conversation history.'); + expect(chatCommand.subCommands).toHaveLength(3); + }); + + describe('list subcommand', () => { + let listCommand: SlashCommand; + + beforeEach(() => { + listCommand = getSubCommand('list'); + }); + + it('should inform when no checkpoints are found', async () => { + mockFs.readdir.mockImplementation( + (async (_: string): Promise => + [] as string[]) as unknown as typeof fsPromises.readdir, + ); + const result = await listCommand?.action?.(mockContext, ''); + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: 'No saved conversation checkpoints found.', + }); + }); + + it('should list found checkpoints', async () => { + const fakeFiles = ['checkpoint-test1.json', 'checkpoint-test2.json']; + const date = new Date(); + + mockFs.readdir.mockImplementation( + (async (_: string): Promise => + fakeFiles as string[]) as unknown as typeof fsPromises.readdir, + ); + mockFs.stat.mockImplementation((async (path: string): Promise => { + if (path.endsWith('test1.json')) { + return { mtime: date } as Stats; + } + return { mtime: new Date(date.getTime() + 1000) } as Stats; + }) as unknown as typeof fsPromises.stat); + + const result = (await listCommand?.action?.( + mockContext, + '', + )) as MessageActionReturn; + + const content = result?.content ?? ''; + expect(result?.type).toBe('message'); + expect(content).toContain('List of saved conversations:'); + const isoDate = date + .toISOString() + .match(/(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2})/); + const formattedDate = isoDate ? `${isoDate[1]} ${isoDate[2]}` : ''; + expect(content).toContain(formattedDate); + const index1 = content.indexOf('- \u001b[36mtest1\u001b[0m'); + const index2 = content.indexOf('- \u001b[36mtest2\u001b[0m'); + expect(index1).toBeGreaterThanOrEqual(0); + expect(index2).toBeGreaterThan(index1); + }); + + it('should handle invalid date formats gracefully', async () => { + const fakeFiles = ['checkpoint-baddate.json']; + const badDate = { + toISOString: () => 'an-invalid-date-string', + } as Date; + + mockFs.readdir.mockResolvedValue(fakeFiles); + mockFs.stat.mockResolvedValue({ mtime: badDate } as Stats); + + const result = (await listCommand?.action?.( + mockContext, + '', + )) as MessageActionReturn; + + const content = result?.content ?? ''; + expect(content).toContain('(saved on Invalid Date)'); + }); + }); + describe('save subcommand', () => { + let saveCommand: SlashCommand; + const tag = 'my-tag'; + beforeEach(() => { + saveCommand = getSubCommand('save'); + }); + + it('should return an error if tag is missing', async () => { + const result = await saveCommand?.action?.(mockContext, ' '); + expect(result).toEqual({ + type: 'message', + messageType: 'error', + content: 'Missing tag. Usage: /chat save ', + }); + }); + + it('should inform if conversation history is empty', async () => { + mockGetHistory.mockReturnValue([]); + const result = await saveCommand?.action?.(mockContext, tag); + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: 'No conversation found to save.', + }); + }); + + it('should save the conversation', async () => { + const history: HistoryItemWithoutId[] = [ + { + type: 'user', + text: 'hello', + }, + ]; + mockGetHistory.mockReturnValue(history); + const result = await saveCommand?.action?.(mockContext, tag); + + expect(mockSaveCheckpoint).toHaveBeenCalledWith(history, tag); + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: `Conversation checkpoint saved with tag: ${tag}.`, + }); + }); + }); + + describe('resume subcommand', () => { + const goodTag = 'good-tag'; + const badTag = 'bad-tag'; + + let resumeCommand: SlashCommand; + beforeEach(() => { + resumeCommand = getSubCommand('resume'); + }); + + it('should return an error if tag is missing', async () => { + const result = await resumeCommand?.action?.(mockContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'error', + content: 'Missing tag. Usage: /chat resume ', + }); + }); + + it('should inform if checkpoint is not found', async () => { + mockLoadCheckpoint.mockResolvedValue([]); + + const result = await resumeCommand?.action?.(mockContext, badTag); + + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: `No saved checkpoint found with tag: ${badTag}.`, + }); + }); + + it('should resume a conversation', async () => { + const conversation: Content[] = [ + { role: 'user', parts: [{ text: 'hello gemini' }] }, + { role: 'model', parts: [{ text: 'hello world' }] }, + ]; + mockLoadCheckpoint.mockResolvedValue(conversation); + + const result = await resumeCommand?.action?.(mockContext, goodTag); + + expect(result).toEqual({ + type: 'load_history', + history: [ + { type: 'user', text: 'hello gemini' }, + { type: 'gemini', text: 'hello world' }, + ] as HistoryItemWithoutId[], + clientHistory: conversation, + }); + }); + + describe('completion', () => { + it('should provide completion suggestions', async () => { + const fakeFiles = ['checkpoint-alpha.json', 'checkpoint-beta.json']; + mockFs.readdir.mockImplementation( + (async (_: string): Promise => + fakeFiles as string[]) as unknown as typeof fsPromises.readdir, + ); + + mockFs.stat.mockImplementation( + (async (_: string): Promise => + ({ + mtime: new Date(), + }) as Stats) as unknown as typeof fsPromises.stat, + ); + + const result = await resumeCommand?.completion?.(mockContext, 'a'); + + expect(result).toEqual(['alpha']); + }); + + it('should suggest filenames sorted by modified time (newest first)', async () => { + const fakeFiles = ['checkpoint-test1.json', 'checkpoint-test2.json']; + const date = new Date(); + mockFs.readdir.mockImplementation( + (async (_: string): Promise => + fakeFiles as string[]) as unknown as typeof fsPromises.readdir, + ); + mockFs.stat.mockImplementation((async ( + path: string, + ): Promise => { + if (path.endsWith('test1.json')) { + return { mtime: date } as Stats; + } + return { mtime: new Date(date.getTime() + 1000) } as Stats; + }) as unknown as typeof fsPromises.stat); + + const result = await resumeCommand?.completion?.(mockContext, ''); + // Sort items by last modified time (newest first) + expect(result).toEqual(['test2', 'test1']); + }); + }); + }); +}); diff --git a/packages/cli/src/ui/commands/chatCommand.ts b/packages/cli/src/ui/commands/chatCommand.ts new file mode 100644 index 000000000..739097e3e --- /dev/null +++ b/packages/cli/src/ui/commands/chatCommand.ts @@ -0,0 +1,214 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as fsPromises from 'fs/promises'; +import { + CommandContext, + SlashCommand, + MessageActionReturn, + CommandKind, +} from './types.js'; +import path from 'path'; +import { HistoryItemWithoutId, MessageType } from '../types.js'; + +interface ChatDetail { + name: string; + mtime: Date; +} + +const getSavedChatTags = async ( + context: CommandContext, + mtSortDesc: boolean, +): Promise => { + const geminiDir = context.services.config?.getProjectTempDir(); + if (!geminiDir) { + return []; + } + try { + const file_head = 'checkpoint-'; + const file_tail = '.json'; + const files = await fsPromises.readdir(geminiDir); + const chatDetails: Array<{ name: string; mtime: Date }> = []; + + for (const file of files) { + if (file.startsWith(file_head) && file.endsWith(file_tail)) { + const filePath = path.join(geminiDir, file); + const stats = await fsPromises.stat(filePath); + chatDetails.push({ + name: file.slice(file_head.length, -file_tail.length), + mtime: stats.mtime, + }); + } + } + + chatDetails.sort((a, b) => + mtSortDesc + ? b.mtime.getTime() - a.mtime.getTime() + : a.mtime.getTime() - b.mtime.getTime(), + ); + + return chatDetails; + } catch (_err) { + return []; + } +}; + +const listCommand: SlashCommand = { + name: 'list', + description: 'List saved conversation checkpoints', + kind: CommandKind.BUILT_IN, + action: async (context): Promise => { + const chatDetails = await getSavedChatTags(context, false); + if (chatDetails.length === 0) { + return { + type: 'message', + messageType: 'info', + content: 'No saved conversation checkpoints found.', + }; + } + + const maxNameLength = Math.max( + ...chatDetails.map((chat) => chat.name.length), + ); + + let message = 'List of saved conversations:\n\n'; + for (const chat of chatDetails) { + const paddedName = chat.name.padEnd(maxNameLength, ' '); + const isoString = chat.mtime.toISOString(); + const match = isoString.match(/(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2})/); + const formattedDate = match ? `${match[1]} ${match[2]}` : 'Invalid Date'; + message += ` - \u001b[36m${paddedName}\u001b[0m \u001b[90m(saved on ${formattedDate})\u001b[0m\n`; + } + message += `\n\u001b[90mNote: Newest last, oldest first\u001b[0m`; + return { + type: 'message', + messageType: 'info', + content: message, + }; + }, +}; + +const saveCommand: SlashCommand = { + name: 'save', + description: + 'Save the current conversation as a checkpoint. Usage: /chat save ', + kind: CommandKind.BUILT_IN, + action: async (context, args): Promise => { + const tag = args.trim(); + if (!tag) { + return { + type: 'message', + messageType: 'error', + content: 'Missing tag. Usage: /chat save ', + }; + } + + const { logger, config } = context.services; + await logger.initialize(); + const chat = await config?.getGeminiClient()?.getChat(); + if (!chat) { + return { + type: 'message', + messageType: 'error', + content: 'No chat client available to save conversation.', + }; + } + + const history = chat.getHistory(); + if (history.length > 0) { + await logger.saveCheckpoint(history, tag); + return { + type: 'message', + messageType: 'info', + content: `Conversation checkpoint saved with tag: ${tag}.`, + }; + } else { + return { + type: 'message', + messageType: 'info', + content: 'No conversation found to save.', + }; + } + }, +}; + +const resumeCommand: SlashCommand = { + name: 'resume', + altNames: ['load'], + description: + 'Resume a conversation from a checkpoint. Usage: /chat resume ', + kind: CommandKind.BUILT_IN, + action: async (context, args) => { + const tag = args.trim(); + if (!tag) { + return { + type: 'message', + messageType: 'error', + content: 'Missing tag. Usage: /chat resume ', + }; + } + + const { logger } = context.services; + await logger.initialize(); + const conversation = await logger.loadCheckpoint(tag); + + if (conversation.length === 0) { + return { + type: 'message', + messageType: 'info', + content: `No saved checkpoint found with tag: ${tag}.`, + }; + } + + const rolemap: { [key: string]: MessageType } = { + user: MessageType.USER, + model: MessageType.GEMINI, + }; + + const uiHistory: HistoryItemWithoutId[] = []; + let hasSystemPrompt = false; + let i = 0; + + for (const item of conversation) { + i += 1; + const text = + item.parts + ?.filter((m) => !!m.text) + .map((m) => m.text) + .join('') || ''; + if (!text) { + continue; + } + if (i === 1 && text.match(/context for our chat/)) { + hasSystemPrompt = true; + } + if (i > 2 || !hasSystemPrompt) { + uiHistory.push({ + type: (item.role && rolemap[item.role]) || MessageType.GEMINI, + text, + } as HistoryItemWithoutId); + } + } + return { + type: 'load_history', + history: uiHistory, + clientHistory: conversation, + }; + }, + completion: async (context, partialArg) => { + const chatDetails = await getSavedChatTags(context, true); + return chatDetails + .map((chat) => chat.name) + .filter((name) => name.startsWith(partialArg)); + }, +}; + +export const chatCommand: SlashCommand = { + name: 'chat', + description: 'Manage conversation history.', + kind: CommandKind.BUILT_IN, + subCommands: [listCommand, saveCommand, resumeCommand], +}; diff --git a/packages/cli/src/ui/commands/clearCommand.test.ts b/packages/cli/src/ui/commands/clearCommand.test.ts index f9e382ee0..d34b3d595 100644 --- a/packages/cli/src/ui/commands/clearCommand.test.ts +++ b/packages/cli/src/ui/commands/clearCommand.test.ts @@ -8,7 +8,19 @@ import { vi, describe, it, expect, beforeEach, Mock } from 'vitest'; import { clearCommand } from './clearCommand.js'; import { type CommandContext } from './types.js'; import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; -import { GeminiClient } from '@google/gemini-cli-core'; + +// Mock the telemetry service +vi.mock('@qwen-code/qwen-code-core', async () => { + const actual = await vi.importActual('@qwen-code/qwen-code-core'); + return { + ...actual, + uiTelemetryService: { + resetLastPromptTokenCount: vi.fn(), + }, + }; +}); + +import { GeminiClient, uiTelemetryService } from '@qwen-code/qwen-code-core'; describe('clearCommand', () => { let mockContext: CommandContext; @@ -16,6 +28,7 @@ describe('clearCommand', () => { beforeEach(() => { mockResetChat = vi.fn().mockResolvedValue(undefined); + vi.clearAllMocks(); mockContext = createMockCommandContext({ services: { @@ -29,7 +42,7 @@ describe('clearCommand', () => { }); }); - it('should set debug message, reset chat, and clear UI when config is available', async () => { + it('should set debug message, reset chat, reset telemetry, and clear UI when config is available', async () => { if (!clearCommand.action) { throw new Error('clearCommand must have an action.'); } @@ -42,23 +55,24 @@ describe('clearCommand', () => { expect(mockContext.ui.setDebugMessage).toHaveBeenCalledTimes(1); expect(mockResetChat).toHaveBeenCalledTimes(1); - - expect(mockContext.session.resetSession).toHaveBeenCalledTimes(1); - + expect(uiTelemetryService.resetLastPromptTokenCount).toHaveBeenCalledTimes( + 1, + ); expect(mockContext.ui.clear).toHaveBeenCalledTimes(1); // Check the order of operations. const setDebugMessageOrder = (mockContext.ui.setDebugMessage as Mock).mock .invocationCallOrder[0]; const resetChatOrder = mockResetChat.mock.invocationCallOrder[0]; - const resetSessionOrder = (mockContext.session.resetSession as Mock).mock - .invocationCallOrder[0]; + const resetTelemetryOrder = ( + uiTelemetryService.resetLastPromptTokenCount as Mock + ).mock.invocationCallOrder[0]; const clearOrder = (mockContext.ui.clear as Mock).mock .invocationCallOrder[0]; expect(setDebugMessageOrder).toBeLessThan(resetChatOrder); - expect(resetChatOrder).toBeLessThan(resetSessionOrder); - expect(resetSessionOrder).toBeLessThan(clearOrder); + expect(resetChatOrder).toBeLessThan(resetTelemetryOrder); + expect(resetTelemetryOrder).toBeLessThan(clearOrder); }); it('should not attempt to reset chat if config service is not available', async () => { @@ -75,10 +89,12 @@ describe('clearCommand', () => { await clearCommand.action(nullConfigContext, ''); expect(nullConfigContext.ui.setDebugMessage).toHaveBeenCalledWith( - 'Clearing terminal and resetting chat.', + 'Clearing terminal.', ); expect(mockResetChat).not.toHaveBeenCalled(); - expect(nullConfigContext.session.resetSession).toHaveBeenCalledTimes(1); + expect(uiTelemetryService.resetLastPromptTokenCount).toHaveBeenCalledTimes( + 1, + ); expect(nullConfigContext.ui.clear).toHaveBeenCalledTimes(1); }); }); diff --git a/packages/cli/src/ui/commands/clearCommand.ts b/packages/cli/src/ui/commands/clearCommand.ts index 726f5be33..0bf46af1b 100644 --- a/packages/cli/src/ui/commands/clearCommand.ts +++ b/packages/cli/src/ui/commands/clearCommand.ts @@ -4,15 +4,26 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { SlashCommand } from './types.js'; +import { uiTelemetryService } from '@qwen-code/qwen-code-core'; +import { CommandKind, SlashCommand } from './types.js'; export const clearCommand: SlashCommand = { name: 'clear', description: 'clear the screen and conversation history', + kind: CommandKind.BUILT_IN, action: async (context, _args) => { - context.ui.setDebugMessage('Clearing terminal and resetting chat.'); - await context.services.config?.getGeminiClient()?.resetChat(); - context.session.resetSession(); + const geminiClient = context.services.config?.getGeminiClient(); + + if (geminiClient) { + context.ui.setDebugMessage('Clearing terminal and resetting chat.'); + // If resetChat fails, the exception will propagate and halt the command, + // which is the correct behavior to signal a failure to the user. + await geminiClient.resetChat(); + } else { + context.ui.setDebugMessage('Clearing terminal.'); + } + + uiTelemetryService.resetLastPromptTokenCount(); context.ui.clear(); }, }; diff --git a/packages/cli/src/ui/commands/compressCommand.test.ts b/packages/cli/src/ui/commands/compressCommand.test.ts new file mode 100644 index 000000000..a6d3ab1b9 --- /dev/null +++ b/packages/cli/src/ui/commands/compressCommand.test.ts @@ -0,0 +1,129 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { GeminiClient } from '@qwen-code/qwen-code-core'; +import { vi, describe, it, expect, beforeEach } from 'vitest'; +import { compressCommand } from './compressCommand.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; +import { MessageType } from '../types.js'; + +describe('compressCommand', () => { + let context: ReturnType; + let mockTryCompressChat: ReturnType; + + beforeEach(() => { + mockTryCompressChat = vi.fn(); + context = createMockCommandContext({ + services: { + config: { + getGeminiClient: () => + ({ + tryCompressChat: mockTryCompressChat, + }) as unknown as GeminiClient, + }, + }, + }); + }); + + it('should do nothing if a compression is already pending', async () => { + context.ui.pendingItem = { + type: MessageType.COMPRESSION, + compression: { + isPending: true, + originalTokenCount: null, + newTokenCount: null, + }, + }; + await compressCommand.action!(context, ''); + expect(context.ui.addItem).toHaveBeenCalledWith( + expect.objectContaining({ + type: MessageType.ERROR, + text: 'Already compressing, wait for previous request to complete', + }), + expect.any(Number), + ); + expect(context.ui.setPendingItem).not.toHaveBeenCalled(); + expect(mockTryCompressChat).not.toHaveBeenCalled(); + }); + + it('should set pending item, call tryCompressChat, and add result on success', async () => { + const compressedResult = { + originalTokenCount: 200, + newTokenCount: 100, + }; + mockTryCompressChat.mockResolvedValue(compressedResult); + + await compressCommand.action!(context, ''); + + expect(context.ui.setPendingItem).toHaveBeenNthCalledWith( + 1, + expect.objectContaining({ + type: MessageType.COMPRESSION, + compression: { + isPending: true, + originalTokenCount: null, + newTokenCount: null, + }, + }), + ); + + expect(mockTryCompressChat).toHaveBeenCalledWith( + expect.stringMatching(/^compress-\d+$/), + true, + ); + + expect(context.ui.addItem).toHaveBeenCalledWith( + expect.objectContaining({ + type: MessageType.COMPRESSION, + compression: { + isPending: false, + originalTokenCount: 200, + newTokenCount: 100, + }, + }), + expect.any(Number), + ); + + expect(context.ui.setPendingItem).toHaveBeenNthCalledWith(2, null); + }); + + it('should add an error message if tryCompressChat returns falsy', async () => { + mockTryCompressChat.mockResolvedValue(null); + + await compressCommand.action!(context, ''); + + expect(context.ui.addItem).toHaveBeenCalledWith( + expect.objectContaining({ + type: MessageType.ERROR, + text: 'Failed to compress chat history.', + }), + expect.any(Number), + ); + expect(context.ui.setPendingItem).toHaveBeenCalledWith(null); + }); + + it('should add an error message if tryCompressChat throws', async () => { + const error = new Error('Compression failed'); + mockTryCompressChat.mockRejectedValue(error); + + await compressCommand.action!(context, ''); + + expect(context.ui.addItem).toHaveBeenCalledWith( + expect.objectContaining({ + type: MessageType.ERROR, + text: `Failed to compress chat history: ${error.message}`, + }), + expect.any(Number), + ); + expect(context.ui.setPendingItem).toHaveBeenCalledWith(null); + }); + + it('should clear the pending item in a finally block', async () => { + mockTryCompressChat.mockRejectedValue(new Error('some error')); + await compressCommand.action!(context, ''); + expect(context.ui.setPendingItem).toHaveBeenCalledWith(null); + }); +}); diff --git a/packages/cli/src/ui/commands/compressCommand.ts b/packages/cli/src/ui/commands/compressCommand.ts new file mode 100644 index 000000000..792e8b5b0 --- /dev/null +++ b/packages/cli/src/ui/commands/compressCommand.ts @@ -0,0 +1,78 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { HistoryItemCompression, MessageType } from '../types.js'; +import { CommandKind, SlashCommand } from './types.js'; + +export const compressCommand: SlashCommand = { + name: 'compress', + altNames: ['summarize'], + description: 'Compresses the context by replacing it with a summary.', + kind: CommandKind.BUILT_IN, + action: async (context) => { + const { ui } = context; + if (ui.pendingItem) { + ui.addItem( + { + type: MessageType.ERROR, + text: 'Already compressing, wait for previous request to complete', + }, + Date.now(), + ); + return; + } + + const pendingMessage: HistoryItemCompression = { + type: MessageType.COMPRESSION, + compression: { + isPending: true, + originalTokenCount: null, + newTokenCount: null, + }, + }; + + try { + ui.setPendingItem(pendingMessage); + const promptId = `compress-${Date.now()}`; + const compressed = await context.services.config + ?.getGeminiClient() + ?.tryCompressChat(promptId, true); + if (compressed) { + ui.addItem( + { + type: MessageType.COMPRESSION, + compression: { + isPending: false, + originalTokenCount: compressed.originalTokenCount, + newTokenCount: compressed.newTokenCount, + }, + } as HistoryItemCompression, + Date.now(), + ); + } else { + ui.addItem( + { + type: MessageType.ERROR, + text: 'Failed to compress chat history.', + }, + Date.now(), + ); + } + } catch (e) { + ui.addItem( + { + type: MessageType.ERROR, + text: `Failed to compress chat history: ${ + e instanceof Error ? e.message : String(e) + }`, + }, + Date.now(), + ); + } finally { + ui.setPendingItem(null); + } + }, +}; diff --git a/packages/cli/src/ui/commands/copyCommand.test.ts b/packages/cli/src/ui/commands/copyCommand.test.ts new file mode 100644 index 000000000..b163b43f4 --- /dev/null +++ b/packages/cli/src/ui/commands/copyCommand.test.ts @@ -0,0 +1,296 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { vi, describe, it, expect, beforeEach, Mock } from 'vitest'; +import { copyCommand } from './copyCommand.js'; +import { type CommandContext } from './types.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; +import { copyToClipboard } from '../utils/commandUtils.js'; + +vi.mock('../utils/commandUtils.js', () => ({ + copyToClipboard: vi.fn(), +})); + +describe('copyCommand', () => { + let mockContext: CommandContext; + let mockCopyToClipboard: Mock; + let mockGetChat: Mock; + let mockGetHistory: Mock; + + beforeEach(() => { + vi.clearAllMocks(); + + mockCopyToClipboard = vi.mocked(copyToClipboard); + mockGetChat = vi.fn(); + mockGetHistory = vi.fn(); + + mockContext = createMockCommandContext({ + services: { + config: { + getGeminiClient: () => ({ + getChat: mockGetChat, + }), + }, + }, + }); + + mockGetChat.mockReturnValue({ + getHistory: mockGetHistory, + }); + }); + + it('should return info message when no history is available', async () => { + if (!copyCommand.action) throw new Error('Command has no action'); + + mockGetChat.mockReturnValue(undefined); + + const result = await copyCommand.action(mockContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: 'No output in history', + }); + + expect(mockCopyToClipboard).not.toHaveBeenCalled(); + }); + + it('should return info message when history is empty', async () => { + if (!copyCommand.action) throw new Error('Command has no action'); + + mockGetHistory.mockReturnValue([]); + + const result = await copyCommand.action(mockContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: 'No output in history', + }); + + expect(mockCopyToClipboard).not.toHaveBeenCalled(); + }); + + it('should return info message when no AI messages are found in history', async () => { + if (!copyCommand.action) throw new Error('Command has no action'); + + const historyWithUserOnly = [ + { + role: 'user', + parts: [{ text: 'Hello' }], + }, + ]; + + mockGetHistory.mockReturnValue(historyWithUserOnly); + + const result = await copyCommand.action(mockContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: 'No output in history', + }); + + expect(mockCopyToClipboard).not.toHaveBeenCalled(); + }); + + it('should copy last AI message to clipboard successfully', async () => { + if (!copyCommand.action) throw new Error('Command has no action'); + + const historyWithAiMessage = [ + { + role: 'user', + parts: [{ text: 'Hello' }], + }, + { + role: 'model', + parts: [{ text: 'Hi there! How can I help you?' }], + }, + ]; + + mockGetHistory.mockReturnValue(historyWithAiMessage); + mockCopyToClipboard.mockResolvedValue(undefined); + + const result = await copyCommand.action(mockContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: 'Last output copied to the clipboard', + }); + + expect(mockCopyToClipboard).toHaveBeenCalledWith( + 'Hi there! How can I help you?', + ); + }); + + it('should handle multiple text parts in AI message', async () => { + if (!copyCommand.action) throw new Error('Command has no action'); + + const historyWithMultipleParts = [ + { + role: 'model', + parts: [{ text: 'Part 1: ' }, { text: 'Part 2: ' }, { text: 'Part 3' }], + }, + ]; + + mockGetHistory.mockReturnValue(historyWithMultipleParts); + mockCopyToClipboard.mockResolvedValue(undefined); + + const result = await copyCommand.action(mockContext, ''); + + expect(mockCopyToClipboard).toHaveBeenCalledWith('Part 1: Part 2: Part 3'); + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: 'Last output copied to the clipboard', + }); + }); + + it('should filter out non-text parts', async () => { + if (!copyCommand.action) throw new Error('Command has no action'); + + const historyWithMixedParts = [ + { + role: 'model', + parts: [ + { text: 'Text part' }, + { image: 'base64data' }, // Non-text part + { text: ' more text' }, + ], + }, + ]; + + mockGetHistory.mockReturnValue(historyWithMixedParts); + mockCopyToClipboard.mockResolvedValue(undefined); + + const result = await copyCommand.action(mockContext, ''); + + expect(mockCopyToClipboard).toHaveBeenCalledWith('Text part more text'); + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: 'Last output copied to the clipboard', + }); + }); + + it('should get the last AI message when multiple AI messages exist', async () => { + if (!copyCommand.action) throw new Error('Command has no action'); + + const historyWithMultipleAiMessages = [ + { + role: 'model', + parts: [{ text: 'First AI response' }], + }, + { + role: 'user', + parts: [{ text: 'User message' }], + }, + { + role: 'model', + parts: [{ text: 'Second AI response' }], + }, + ]; + + mockGetHistory.mockReturnValue(historyWithMultipleAiMessages); + mockCopyToClipboard.mockResolvedValue(undefined); + + const result = await copyCommand.action(mockContext, ''); + + expect(mockCopyToClipboard).toHaveBeenCalledWith('Second AI response'); + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: 'Last output copied to the clipboard', + }); + }); + + it('should handle clipboard copy error', async () => { + if (!copyCommand.action) throw new Error('Command has no action'); + + const historyWithAiMessage = [ + { + role: 'model', + parts: [{ text: 'AI response' }], + }, + ]; + + mockGetHistory.mockReturnValue(historyWithAiMessage); + const clipboardError = new Error('Clipboard access denied'); + mockCopyToClipboard.mockRejectedValue(clipboardError); + + const result = await copyCommand.action(mockContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'error', + content: 'Failed to copy to the clipboard.', + }); + }); + + it('should handle non-Error clipboard errors', async () => { + if (!copyCommand.action) throw new Error('Command has no action'); + + const historyWithAiMessage = [ + { + role: 'model', + parts: [{ text: 'AI response' }], + }, + ]; + + mockGetHistory.mockReturnValue(historyWithAiMessage); + mockCopyToClipboard.mockRejectedValue('String error'); + + const result = await copyCommand.action(mockContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'error', + content: 'Failed to copy to the clipboard.', + }); + }); + + it('should return info message when no text parts found in AI message', async () => { + if (!copyCommand.action) throw new Error('Command has no action'); + + const historyWithEmptyParts = [ + { + role: 'model', + parts: [{ image: 'base64data' }], // No text parts + }, + ]; + + mockGetHistory.mockReturnValue(historyWithEmptyParts); + + const result = await copyCommand.action(mockContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: 'Last AI output contains no text to copy.', + }); + + expect(mockCopyToClipboard).not.toHaveBeenCalled(); + }); + + it('should handle unavailable config service', async () => { + if (!copyCommand.action) throw new Error('Command has no action'); + + const nullConfigContext = createMockCommandContext({ + services: { config: null }, + }); + + const result = await copyCommand.action(nullConfigContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: 'No output in history', + }); + + expect(mockCopyToClipboard).not.toHaveBeenCalled(); + }); +}); diff --git a/packages/cli/src/ui/commands/copyCommand.ts b/packages/cli/src/ui/commands/copyCommand.ts new file mode 100644 index 000000000..bd330faae --- /dev/null +++ b/packages/cli/src/ui/commands/copyCommand.ts @@ -0,0 +1,67 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { copyToClipboard } from '../utils/commandUtils.js'; +import { + CommandKind, + SlashCommand, + SlashCommandActionReturn, +} from './types.js'; + +export const copyCommand: SlashCommand = { + name: 'copy', + description: 'Copy the last result or code snippet to clipboard', + kind: CommandKind.BUILT_IN, + action: async (context, _args): Promise => { + const chat = await context.services.config?.getGeminiClient()?.getChat(); + const history = chat?.getHistory(); + + // Get the last message from the AI (model role) + const lastAiMessage = history + ? history.filter((item) => item.role === 'model').pop() + : undefined; + + if (!lastAiMessage) { + return { + type: 'message', + messageType: 'info', + content: 'No output in history', + }; + } + // Extract text from the parts + const lastAiOutput = lastAiMessage.parts + ?.filter((part) => part.text) + .map((part) => part.text) + .join(''); + + if (lastAiOutput) { + try { + await copyToClipboard(lastAiOutput); + + return { + type: 'message', + messageType: 'info', + content: 'Last output copied to the clipboard', + }; + } catch (error) { + const message = error instanceof Error ? error.message : String(error); + console.debug(message); + + return { + type: 'message', + messageType: 'error', + content: 'Failed to copy to the clipboard.', + }; + } + } else { + return { + type: 'message', + messageType: 'info', + content: 'Last AI output contains no text to copy.', + }; + } + }, +}; diff --git a/packages/cli/src/ui/commands/corgiCommand.test.ts b/packages/cli/src/ui/commands/corgiCommand.test.ts new file mode 100644 index 000000000..3c25e8cd0 --- /dev/null +++ b/packages/cli/src/ui/commands/corgiCommand.test.ts @@ -0,0 +1,34 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, beforeEach, vi } from 'vitest'; +import { corgiCommand } from './corgiCommand.js'; +import { type CommandContext } from './types.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; + +describe('corgiCommand', () => { + let mockContext: CommandContext; + + beforeEach(() => { + mockContext = createMockCommandContext(); + vi.spyOn(mockContext.ui, 'toggleCorgiMode'); + }); + + it('should call the toggleCorgiMode function on the UI context', async () => { + if (!corgiCommand.action) { + throw new Error('The corgi command must have an action.'); + } + + await corgiCommand.action(mockContext, ''); + + expect(mockContext.ui.toggleCorgiMode).toHaveBeenCalledTimes(1); + }); + + it('should have the correct name and description', () => { + expect(corgiCommand.name).toBe('corgi'); + expect(corgiCommand.description).toBe('Toggles corgi mode.'); + }); +}); diff --git a/packages/cli/src/ui/commands/corgiCommand.ts b/packages/cli/src/ui/commands/corgiCommand.ts new file mode 100644 index 000000000..cb3ecd1c8 --- /dev/null +++ b/packages/cli/src/ui/commands/corgiCommand.ts @@ -0,0 +1,16 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { CommandKind, type SlashCommand } from './types.js'; + +export const corgiCommand: SlashCommand = { + name: 'corgi', + description: 'Toggles corgi mode.', + kind: CommandKind.BUILT_IN, + action: (context, _args) => { + context.ui.toggleCorgiMode(); + }, +}; diff --git a/packages/cli/src/ui/commands/docsCommand.test.ts b/packages/cli/src/ui/commands/docsCommand.test.ts new file mode 100644 index 000000000..73b7396ae --- /dev/null +++ b/packages/cli/src/ui/commands/docsCommand.test.ts @@ -0,0 +1,99 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { vi, describe, it, expect, beforeEach, afterEach } from 'vitest'; +import open from 'open'; +import { docsCommand } from './docsCommand.js'; +import { type CommandContext } from './types.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; +import { MessageType } from '../types.js'; + +// Mock the 'open' library +vi.mock('open', () => ({ + default: vi.fn(), +})); + +describe('docsCommand', () => { + let mockContext: CommandContext; + beforeEach(() => { + // Create a fresh mock context before each test + mockContext = createMockCommandContext(); + // Reset the `open` mock + vi.mocked(open).mockClear(); + }); + + afterEach(() => { + // Restore any stubbed environment variables + vi.unstubAllEnvs(); + }); + + it("should add an info message and call 'open' in a non-sandbox environment", async () => { + if (!docsCommand.action) { + throw new Error('docsCommand must have an action.'); + } + + const docsUrl = 'https://goo.gle/gemini-cli-docs'; + + await docsCommand.action(mockContext, ''); + + expect(mockContext.ui.addItem).toHaveBeenCalledWith( + { + type: MessageType.INFO, + text: `Opening documentation in your browser: ${docsUrl}`, + }, + expect.any(Number), + ); + + expect(open).toHaveBeenCalledWith(docsUrl); + }); + + it('should only add an info message in a sandbox environment', async () => { + if (!docsCommand.action) { + throw new Error('docsCommand must have an action.'); + } + + // Simulate a sandbox environment + process.env.SANDBOX = 'gemini-sandbox'; + const docsUrl = 'https://goo.gle/gemini-cli-docs'; + + await docsCommand.action(mockContext, ''); + + expect(mockContext.ui.addItem).toHaveBeenCalledWith( + { + type: MessageType.INFO, + text: `Please open the following URL in your browser to view the documentation:\n${docsUrl}`, + }, + expect.any(Number), + ); + + // Ensure 'open' was not called in the sandbox + expect(open).not.toHaveBeenCalled(); + }); + + it("should not open browser for 'sandbox-exec'", async () => { + if (!docsCommand.action) { + throw new Error('docsCommand must have an action.'); + } + + // Simulate the specific 'sandbox-exec' environment + process.env.SANDBOX = 'sandbox-exec'; + const docsUrl = 'https://goo.gle/gemini-cli-docs'; + + await docsCommand.action(mockContext, ''); + + // The logic should fall through to the 'else' block + expect(mockContext.ui.addItem).toHaveBeenCalledWith( + { + type: MessageType.INFO, + text: `Opening documentation in your browser: ${docsUrl}`, + }, + expect.any(Number), + ); + + // 'open' should be called in this specific sandbox case + expect(open).toHaveBeenCalledWith(docsUrl); + }); +}); diff --git a/packages/cli/src/ui/commands/docsCommand.ts b/packages/cli/src/ui/commands/docsCommand.ts new file mode 100644 index 000000000..922b236a2 --- /dev/null +++ b/packages/cli/src/ui/commands/docsCommand.ts @@ -0,0 +1,42 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import open from 'open'; +import process from 'node:process'; +import { + type CommandContext, + type SlashCommand, + CommandKind, +} from './types.js'; +import { MessageType } from '../types.js'; + +export const docsCommand: SlashCommand = { + name: 'docs', + description: 'open full Gemini CLI documentation in your browser', + kind: CommandKind.BUILT_IN, + action: async (context: CommandContext): Promise => { + const docsUrl = 'https://goo.gle/gemini-cli-docs'; + + if (process.env.SANDBOX && process.env.SANDBOX !== 'sandbox-exec') { + context.ui.addItem( + { + type: MessageType.INFO, + text: `Please open the following URL in your browser to view the documentation:\n${docsUrl}`, + }, + Date.now(), + ); + } else { + context.ui.addItem( + { + type: MessageType.INFO, + text: `Opening documentation in your browser: ${docsUrl}`, + }, + Date.now(), + ); + await open(docsUrl); + } + }, +}; diff --git a/packages/cli/src/ui/commands/editorCommand.test.ts b/packages/cli/src/ui/commands/editorCommand.test.ts new file mode 100644 index 000000000..9b5e84d3d --- /dev/null +++ b/packages/cli/src/ui/commands/editorCommand.test.ts @@ -0,0 +1,30 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect } from 'vitest'; +import { editorCommand } from './editorCommand.js'; +// 1. Import the mock context utility +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; + +describe('editorCommand', () => { + it('should return a dialog action to open the editor dialog', () => { + if (!editorCommand.action) { + throw new Error('The editor command must have an action.'); + } + const mockContext = createMockCommandContext(); + const result = editorCommand.action(mockContext, ''); + + expect(result).toEqual({ + type: 'dialog', + dialog: 'editor', + }); + }); + + it('should have the correct name and description', () => { + expect(editorCommand.name).toBe('editor'); + expect(editorCommand.description).toBe('set external editor preference'); + }); +}); diff --git a/packages/cli/src/ui/commands/editorCommand.ts b/packages/cli/src/ui/commands/editorCommand.ts new file mode 100644 index 000000000..5b5c4c5dc --- /dev/null +++ b/packages/cli/src/ui/commands/editorCommand.ts @@ -0,0 +1,21 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { + CommandKind, + type OpenDialogActionReturn, + type SlashCommand, +} from './types.js'; + +export const editorCommand: SlashCommand = { + name: 'editor', + description: 'set external editor preference', + kind: CommandKind.BUILT_IN, + action: (): OpenDialogActionReturn => ({ + type: 'dialog', + dialog: 'editor', + }), +}; diff --git a/packages/cli/src/ui/commands/extensionsCommand.test.ts b/packages/cli/src/ui/commands/extensionsCommand.test.ts new file mode 100644 index 000000000..0a69e01c6 --- /dev/null +++ b/packages/cli/src/ui/commands/extensionsCommand.test.ts @@ -0,0 +1,67 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect } from 'vitest'; +import { extensionsCommand } from './extensionsCommand.js'; +import { type CommandContext } from './types.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; +import { MessageType } from '../types.js'; + +describe('extensionsCommand', () => { + let mockContext: CommandContext; + + it('should display "No active extensions." when none are found', async () => { + mockContext = createMockCommandContext({ + services: { + config: { + getExtensions: () => [], + }, + }, + }); + + if (!extensionsCommand.action) throw new Error('Action not defined'); + await extensionsCommand.action(mockContext, ''); + + expect(mockContext.ui.addItem).toHaveBeenCalledWith( + { + type: MessageType.INFO, + text: 'No active extensions.', + }, + expect.any(Number), + ); + }); + + it('should list active extensions when they are found', async () => { + const mockExtensions = [ + { name: 'ext-one', version: '1.0.0', isActive: true }, + { name: 'ext-two', version: '2.1.0', isActive: true }, + { name: 'ext-three', version: '3.0.0', isActive: false }, + ]; + mockContext = createMockCommandContext({ + services: { + config: { + getExtensions: () => mockExtensions, + }, + }, + }); + + if (!extensionsCommand.action) throw new Error('Action not defined'); + await extensionsCommand.action(mockContext, ''); + + const expectedMessage = + 'Active extensions:\n\n' + + ` - \u001b[36mext-one (v1.0.0)\u001b[0m\n` + + ` - \u001b[36mext-two (v2.1.0)\u001b[0m\n`; + + expect(mockContext.ui.addItem).toHaveBeenCalledWith( + { + type: MessageType.INFO, + text: expectedMessage, + }, + expect.any(Number), + ); + }); +}); diff --git a/packages/cli/src/ui/commands/extensionsCommand.ts b/packages/cli/src/ui/commands/extensionsCommand.ts new file mode 100644 index 000000000..ea9f9a4f4 --- /dev/null +++ b/packages/cli/src/ui/commands/extensionsCommand.ts @@ -0,0 +1,46 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { + type CommandContext, + type SlashCommand, + CommandKind, +} from './types.js'; +import { MessageType } from '../types.js'; + +export const extensionsCommand: SlashCommand = { + name: 'extensions', + description: 'list active extensions', + kind: CommandKind.BUILT_IN, + action: async (context: CommandContext): Promise => { + const activeExtensions = context.services.config + ?.getExtensions() + .filter((ext) => ext.isActive); + if (!activeExtensions || activeExtensions.length === 0) { + context.ui.addItem( + { + type: MessageType.INFO, + text: 'No active extensions.', + }, + Date.now(), + ); + return; + } + + const extensionLines = activeExtensions.map( + (ext) => ` - \u001b[36m${ext.name} (v${ext.version})\u001b[0m`, + ); + const message = `Active extensions:\n\n${extensionLines.join('\n')}\n`; + + context.ui.addItem( + { + type: MessageType.INFO, + text: message, + }, + Date.now(), + ); + }, +}; diff --git a/packages/cli/src/ui/commands/helpCommand.test.ts b/packages/cli/src/ui/commands/helpCommand.test.ts index a6b19c05b..b04411069 100644 --- a/packages/cli/src/ui/commands/helpCommand.test.ts +++ b/packages/cli/src/ui/commands/helpCommand.test.ts @@ -32,9 +32,9 @@ describe('helpCommand', () => { }); it("should also be triggered by its alternative name '?'", () => { - // This test is more conceptual. The routing of altName to the command + // This test is more conceptual. The routing of altNames to the command // is handled by the slash command processor, but we can assert the - // altName is correctly defined on the command object itself. - expect(helpCommand.altName).toBe('?'); + // altNames is correctly defined on the command object itself. + expect(helpCommand.altNames).toContain('?'); }); }); diff --git a/packages/cli/src/ui/commands/helpCommand.ts b/packages/cli/src/ui/commands/helpCommand.ts index 6612626fe..a01693090 100644 --- a/packages/cli/src/ui/commands/helpCommand.ts +++ b/packages/cli/src/ui/commands/helpCommand.ts @@ -4,12 +4,13 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { OpenDialogActionReturn, SlashCommand } from './types.js'; +import { CommandKind, OpenDialogActionReturn, SlashCommand } from './types.js'; export const helpCommand: SlashCommand = { name: 'help', - altName: '?', - description: 'for help on qwen code', + altNames: ['?'], + description: 'for help on Qwen Code', + kind: CommandKind.BUILT_IN, action: (_context, _args): OpenDialogActionReturn => { console.debug('Opening help UI ...'); return { diff --git a/packages/cli/src/ui/commands/ideCommand.test.ts b/packages/cli/src/ui/commands/ideCommand.test.ts new file mode 100644 index 000000000..224647813 --- /dev/null +++ b/packages/cli/src/ui/commands/ideCommand.test.ts @@ -0,0 +1,270 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { + MockInstance, + vi, + describe, + it, + expect, + beforeEach, + afterEach, +} from 'vitest'; +import { ideCommand } from './ideCommand.js'; +import { type CommandContext } from './types.js'; +import { type Config } from '@qwen-code/qwen-code-core'; +import * as child_process from 'child_process'; +import { glob } from 'glob'; + +import { IDEConnectionStatus } from '@qwen-code/qwen-code-core/index.js'; + +vi.mock('child_process'); +vi.mock('glob'); + +function regexEscape(value: string) { + return value.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); +} + +describe('ideCommand', () => { + let mockContext: CommandContext; + let mockConfig: Config; + let execSyncSpy: MockInstance; + let globSyncSpy: MockInstance; + let platformSpy: MockInstance; + + beforeEach(() => { + mockContext = { + ui: { + addItem: vi.fn(), + }, + } as unknown as CommandContext; + + mockConfig = { + getIdeMode: vi.fn(), + getIdeClient: vi.fn(), + } as unknown as Config; + + execSyncSpy = vi.spyOn(child_process, 'execSync'); + globSyncSpy = vi.spyOn(glob, 'sync'); + platformSpy = vi.spyOn(process, 'platform', 'get'); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + it('should return null if ideMode is not enabled', () => { + vi.mocked(mockConfig.getIdeMode).mockReturnValue(false); + const command = ideCommand(mockConfig); + expect(command).toBeNull(); + }); + + it('should return the ide command if ideMode is enabled', () => { + vi.mocked(mockConfig.getIdeMode).mockReturnValue(true); + const command = ideCommand(mockConfig); + expect(command).not.toBeNull(); + expect(command?.name).toBe('ide'); + expect(command?.subCommands).toHaveLength(2); + expect(command?.subCommands?.[0].name).toBe('status'); + expect(command?.subCommands?.[1].name).toBe('install'); + }); + + describe('status subcommand', () => { + const mockGetConnectionStatus = vi.fn(); + beforeEach(() => { + vi.mocked(mockConfig.getIdeMode).mockReturnValue(true); + vi.mocked(mockConfig.getIdeClient).mockReturnValue({ + getConnectionStatus: mockGetConnectionStatus, + } as ReturnType); + }); + + it('should show connected status', () => { + mockGetConnectionStatus.mockReturnValue({ + status: IDEConnectionStatus.Connected, + }); + const command = ideCommand(mockConfig); + const result = command!.subCommands![0].action!(mockContext, ''); + expect(mockGetConnectionStatus).toHaveBeenCalled(); + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: '🟢 Connected', + }); + }); + + it('should show connecting status', () => { + mockGetConnectionStatus.mockReturnValue({ + status: IDEConnectionStatus.Connecting, + }); + const command = ideCommand(mockConfig); + const result = command!.subCommands![0].action!(mockContext, ''); + expect(mockGetConnectionStatus).toHaveBeenCalled(); + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: `🟡 Connecting...`, + }); + }); + it('should show disconnected status', () => { + mockGetConnectionStatus.mockReturnValue({ + status: IDEConnectionStatus.Disconnected, + }); + const command = ideCommand(mockConfig); + const result = command!.subCommands![0].action!(mockContext, ''); + expect(mockGetConnectionStatus).toHaveBeenCalled(); + expect(result).toEqual({ + type: 'message', + messageType: 'error', + content: `🔴 Disconnected`, + }); + }); + + it('should show disconnected status with details', () => { + const details = 'Something went wrong'; + mockGetConnectionStatus.mockReturnValue({ + status: IDEConnectionStatus.Disconnected, + details, + }); + const command = ideCommand(mockConfig); + const result = command!.subCommands![0].action!(mockContext, ''); + expect(mockGetConnectionStatus).toHaveBeenCalled(); + expect(result).toEqual({ + type: 'message', + messageType: 'error', + content: `🔴 Disconnected: ${details}`, + }); + }); + }); + + describe('install subcommand', () => { + beforeEach(() => { + vi.mocked(mockConfig.getIdeMode).mockReturnValue(true); + platformSpy.mockReturnValue('linux'); + }); + + it('should show an error if VSCode is not installed', async () => { + execSyncSpy.mockImplementation(() => { + throw new Error('Command not found'); + }); + + const command = ideCommand(mockConfig); + + await command!.subCommands![1].action!(mockContext, ''); + expect(mockContext.ui.addItem).toHaveBeenCalledWith( + expect.objectContaining({ + type: 'error', + text: expect.stringMatching(/VS Code command-line tool .* not found/), + }), + expect.any(Number), + ); + }); + + it('should show an error if the VSIX file is not found', async () => { + execSyncSpy.mockReturnValue(''); // VSCode is installed + globSyncSpy.mockReturnValue([]); // No .vsix file found + + const command = ideCommand(mockConfig); + await command!.subCommands![1].action!(mockContext, ''); + + expect(mockContext.ui.addItem).toHaveBeenCalledWith( + expect.objectContaining({ + type: 'error', + text: 'Could not find the required VS Code companion extension. Please file a bug via /bug.', + }), + expect.any(Number), + ); + }); + + it('should install the extension if found in the bundle directory', async () => { + const vsixPath = '/path/to/bundle/gemini.vsix'; + execSyncSpy.mockReturnValue(''); // VSCode is installed + globSyncSpy.mockReturnValue([vsixPath]); // Found .vsix file + + const command = ideCommand(mockConfig); + await command!.subCommands![1].action!(mockContext, ''); + + expect(globSyncSpy).toHaveBeenCalledWith( + expect.stringContaining('.vsix'), + ); + expect(execSyncSpy).toHaveBeenCalledWith( + expect.stringMatching( + new RegExp( + `code(.cmd)? --install-extension ${regexEscape(vsixPath)} --force`, + ), + ), + { stdio: 'pipe' }, + ); + expect(mockContext.ui.addItem).toHaveBeenCalledWith( + expect.objectContaining({ + type: 'info', + text: `Installing VS Code companion extension...`, + }), + expect.any(Number), + ); + expect(mockContext.ui.addItem).toHaveBeenCalledWith( + expect.objectContaining({ + type: 'info', + text: 'VS Code companion extension installed successfully. Restart gemini-cli in a fresh terminal window.', + }), + expect.any(Number), + ); + }); + + it('should install the extension if found in the dev directory', async () => { + const vsixPath = '/path/to/dev/gemini.vsix'; + execSyncSpy.mockReturnValue(''); // VSCode is installed + // First glob call for bundle returns nothing, second for dev returns path. + globSyncSpy.mockReturnValueOnce([]).mockReturnValueOnce([vsixPath]); + + const command = ideCommand(mockConfig); + await command!.subCommands![1].action!(mockContext, ''); + + expect(globSyncSpy).toHaveBeenCalledTimes(2); + expect(execSyncSpy).toHaveBeenCalledWith( + expect.stringMatching( + new RegExp( + `code(.cmd)? --install-extension ${regexEscape(vsixPath)} --force`, + ), + ), + { stdio: 'pipe' }, + ); + expect(mockContext.ui.addItem).toHaveBeenCalledWith( + expect.objectContaining({ + type: 'info', + text: 'VS Code companion extension installed successfully. Restart gemini-cli in a fresh terminal window.', + }), + expect.any(Number), + ); + }); + + it('should show an error if installation fails', async () => { + const vsixPath = '/path/to/bundle/gemini.vsix'; + const errorMessage = 'Installation failed'; + execSyncSpy + .mockReturnValueOnce('') // VSCode is installed check + .mockImplementation(() => { + // Installation command + const error: Error & { stderr?: Buffer } = new Error( + 'Command failed', + ); + error.stderr = Buffer.from(errorMessage); + throw error; + }); + globSyncSpy.mockReturnValue([vsixPath]); + + const command = ideCommand(mockConfig); + await command!.subCommands![1].action!(mockContext, ''); + + expect(mockContext.ui.addItem).toHaveBeenCalledWith( + expect.objectContaining({ + type: 'error', + text: `Failed to install VS Code companion extension.`, + }), + expect.any(Number), + ); + }); + }); +}); diff --git a/packages/cli/src/ui/commands/ideCommand.ts b/packages/cli/src/ui/commands/ideCommand.ts new file mode 100644 index 000000000..5631e7c26 --- /dev/null +++ b/packages/cli/src/ui/commands/ideCommand.ts @@ -0,0 +1,157 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { fileURLToPath } from 'url'; +import { Config, IDEConnectionStatus } from '@qwen-code/qwen-code-core'; +import { + CommandContext, + SlashCommand, + SlashCommandActionReturn, + CommandKind, +} from './types.js'; +import * as child_process from 'child_process'; +import * as process from 'process'; +import { glob } from 'glob'; +import * as path from 'path'; + +const VSCODE_COMMAND = process.platform === 'win32' ? 'code.cmd' : 'code'; +const VSCODE_COMPANION_EXTENSION_FOLDER = 'vscode-ide-companion'; + +function isVSCodeInstalled(): boolean { + try { + child_process.execSync( + process.platform === 'win32' + ? `where.exe ${VSCODE_COMMAND}` + : `command -v ${VSCODE_COMMAND}`, + { stdio: 'ignore' }, + ); + return true; + } catch { + return false; + } +} + +export const ideCommand = (config: Config | null): SlashCommand | null => { + if (!config?.getIdeMode()) { + return null; + } + + return { + name: 'ide', + description: 'manage IDE integration', + kind: CommandKind.BUILT_IN, + subCommands: [ + { + name: 'status', + description: 'check status of IDE integration', + kind: CommandKind.BUILT_IN, + action: (_context: CommandContext): SlashCommandActionReturn => { + const connection = config.getIdeClient()?.getConnectionStatus(); + switch (connection?.status) { + case IDEConnectionStatus.Connected: + return { + type: 'message', + messageType: 'info', + content: `🟢 Connected`, + } as const; + case IDEConnectionStatus.Connecting: + return { + type: 'message', + messageType: 'info', + content: `🟡 Connecting...`, + } as const; + default: { + let content = `🔴 Disconnected`; + if (connection?.details) { + content += `: ${connection.details}`; + } + return { + type: 'message', + messageType: 'error', + content, + } as const; + } + } + }, + }, + { + name: 'install', + description: 'install required VS Code companion extension', + kind: CommandKind.BUILT_IN, + action: async (context) => { + if (!isVSCodeInstalled()) { + context.ui.addItem( + { + type: 'error', + text: `VS Code command-line tool "${VSCODE_COMMAND}" not found in your PATH.`, + }, + Date.now(), + ); + return; + } + + const bundleDir = path.dirname(fileURLToPath(import.meta.url)); + // The VSIX file is copied to the bundle directory as part of the build. + let vsixFiles = glob.sync(path.join(bundleDir, '*.vsix')); + if (vsixFiles.length === 0) { + // If the VSIX file is not in the bundle, it might be a dev + // environment running with `npm start`. Look for it in the original + // package location, relative to the bundle dir. + const devPath = path.join( + bundleDir, + '..', + '..', + '..', + '..', + '..', + VSCODE_COMPANION_EXTENSION_FOLDER, + '*.vsix', + ); + vsixFiles = glob.sync(devPath); + } + if (vsixFiles.length === 0) { + context.ui.addItem( + { + type: 'error', + text: 'Could not find the required VS Code companion extension. Please file a bug via /bug.', + }, + Date.now(), + ); + return; + } + + const vsixPath = vsixFiles[0]; + const command = `${VSCODE_COMMAND} --install-extension ${vsixPath} --force`; + context.ui.addItem( + { + type: 'info', + text: `Installing VS Code companion extension...`, + }, + Date.now(), + ); + try { + child_process.execSync(command, { stdio: 'pipe' }); + context.ui.addItem( + { + type: 'info', + text: 'VS Code companion extension installed successfully. Restart gemini-cli in a fresh terminal window.', + }, + Date.now(), + ); + } catch (_error) { + context.ui.addItem( + { + type: 'error', + text: `Failed to install VS Code companion extension.`, + }, + Date.now(), + ); + } + }, + }, + ], + }; +}; diff --git a/packages/cli/src/ui/commands/mcpCommand.test.ts b/packages/cli/src/ui/commands/mcpCommand.test.ts new file mode 100644 index 000000000..53a23d842 --- /dev/null +++ b/packages/cli/src/ui/commands/mcpCommand.test.ts @@ -0,0 +1,1067 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { vi, describe, it, expect, beforeEach } from 'vitest'; +import { mcpCommand } from './mcpCommand.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; +import { + MCPServerStatus, + MCPDiscoveryState, + getMCPServerStatus, + getMCPDiscoveryState, + DiscoveredMCPTool, +} from '@qwen-code/qwen-code-core'; +import open from 'open'; +import { MessageActionReturn } from './types.js'; +import { Type, CallableTool } from '@google/genai'; + +// Mock external dependencies +vi.mock('open', () => ({ + default: vi.fn(), +})); + +vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => { + const actual = + await importOriginal(); + return { + ...actual, + getMCPServerStatus: vi.fn(), + getMCPDiscoveryState: vi.fn(), + MCPOAuthProvider: { + authenticate: vi.fn(), + }, + MCPOAuthTokenStorage: { + getToken: vi.fn(), + isTokenExpired: vi.fn(), + }, + }; +}); + +// Helper function to check if result is a message action +const isMessageAction = (result: unknown): result is MessageActionReturn => + result !== null && + typeof result === 'object' && + 'type' in result && + result.type === 'message'; + +// Helper function to create a mock DiscoveredMCPTool +const createMockMCPTool = ( + name: string, + serverName: string, + description?: string, +) => + new DiscoveredMCPTool( + { + callTool: vi.fn(), + tool: vi.fn(), + } as unknown as CallableTool, + serverName, + name, + description || `Description for ${name}`, + { type: Type.OBJECT, properties: {} }, + name, // serverToolName same as name for simplicity + ); + +describe('mcpCommand', () => { + let mockContext: ReturnType; + let mockConfig: { + getToolRegistry: ReturnType; + getMcpServers: ReturnType; + getBlockedMcpServers: ReturnType; + getPromptRegistry: ReturnType; + }; + + beforeEach(() => { + vi.clearAllMocks(); + + // Set up default mock environment + delete process.env.SANDBOX; + + // Default mock implementations + vi.mocked(getMCPServerStatus).mockReturnValue(MCPServerStatus.CONNECTED); + vi.mocked(getMCPDiscoveryState).mockReturnValue( + MCPDiscoveryState.COMPLETED, + ); + + // Create mock config with all necessary methods + mockConfig = { + getToolRegistry: vi.fn().mockResolvedValue({ + getAllTools: vi.fn().mockReturnValue([]), + }), + getMcpServers: vi.fn().mockReturnValue({}), + getBlockedMcpServers: vi.fn().mockReturnValue([]), + getPromptRegistry: vi.fn().mockResolvedValue({ + getAllPrompts: vi.fn().mockReturnValue([]), + getPromptsByServer: vi.fn().mockReturnValue([]), + }), + }; + + mockContext = createMockCommandContext({ + services: { + config: mockConfig, + }, + }); + }); + + describe('basic functionality', () => { + it('should show an error if config is not available', async () => { + const contextWithoutConfig = createMockCommandContext({ + services: { + config: null, + }, + }); + + const result = await mcpCommand.action!(contextWithoutConfig, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'error', + content: 'Config not loaded.', + }); + }); + + it('should show an error if tool registry is not available', async () => { + mockConfig.getToolRegistry = vi.fn().mockResolvedValue(undefined); + + const result = await mcpCommand.action!(mockContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'error', + content: 'Could not retrieve tool registry.', + }); + }); + }); + + describe('no MCP servers configured', () => { + beforeEach(() => { + mockConfig.getToolRegistry = vi.fn().mockResolvedValue({ + getAllTools: vi.fn().mockReturnValue([]), + }); + mockConfig.getMcpServers = vi.fn().mockReturnValue({}); + }); + + it('should display a message with a URL when no MCP servers are configured in a sandbox', async () => { + process.env.SANDBOX = 'sandbox'; + + const result = await mcpCommand.action!(mockContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: + 'No MCP servers configured. Please open the following URL in your browser to view documentation:\nhttps://goo.gle/gemini-cli-docs-mcp', + }); + expect(open).not.toHaveBeenCalled(); + }); + + it('should display a message and open a URL when no MCP servers are configured outside a sandbox', async () => { + const result = await mcpCommand.action!(mockContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: + 'No MCP servers configured. Opening documentation in your browser: https://goo.gle/gemini-cli-docs-mcp', + }); + expect(open).toHaveBeenCalledWith('https://goo.gle/gemini-cli-docs-mcp'); + }); + }); + + describe('with configured MCP servers', () => { + beforeEach(() => { + const mockMcpServers = { + server1: { command: 'cmd1' }, + server2: { command: 'cmd2' }, + server3: { command: 'cmd3' }, + }; + + mockConfig.getMcpServers = vi.fn().mockReturnValue(mockMcpServers); + }); + + it('should display configured MCP servers with status indicators and their tools', async () => { + // Setup getMCPServerStatus mock implementation + vi.mocked(getMCPServerStatus).mockImplementation((serverName) => { + if (serverName === 'server1') return MCPServerStatus.CONNECTED; + if (serverName === 'server2') return MCPServerStatus.CONNECTED; + return MCPServerStatus.DISCONNECTED; // server3 + }); + + // Mock tools from each server using actual DiscoveredMCPTool instances + const mockServer1Tools = [ + createMockMCPTool('server1_tool1', 'server1'), + createMockMCPTool('server1_tool2', 'server1'), + ]; + const mockServer2Tools = [createMockMCPTool('server2_tool1', 'server2')]; + const mockServer3Tools = [createMockMCPTool('server3_tool1', 'server3')]; + + const allTools = [ + ...mockServer1Tools, + ...mockServer2Tools, + ...mockServer3Tools, + ]; + + mockConfig.getToolRegistry = vi.fn().mockResolvedValue({ + getAllTools: vi.fn().mockReturnValue(allTools), + }); + + const result = await mcpCommand.action!(mockContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: expect.stringContaining('Configured MCP servers:'), + }); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + const message = result.content; + // Server 1 - Connected + expect(message).toContain( + '🟢 \u001b[1mserver1\u001b[0m - Ready (2 tools)', + ); + expect(message).toContain('server1_tool1'); + expect(message).toContain('server1_tool2'); + + // Server 2 - Connected + expect(message).toContain( + '🟢 \u001b[1mserver2\u001b[0m - Ready (1 tool)', + ); + expect(message).toContain('server2_tool1'); + + // Server 3 - Disconnected + expect(message).toContain( + '🔴 \u001b[1mserver3\u001b[0m - Disconnected (1 tools cached)', + ); + expect(message).toContain('server3_tool1'); + + // Check that helpful tips are displayed when no arguments are provided + expect(message).toContain('💡 Tips:'); + expect(message).toContain('/mcp desc'); + expect(message).toContain('/mcp schema'); + expect(message).toContain('/mcp nodesc'); + expect(message).toContain('Ctrl+T'); + } + }); + + it('should display tool descriptions when desc argument is used', async () => { + const mockMcpServers = { + server1: { + command: 'cmd1', + description: 'This is a server description', + }, + }; + + mockConfig.getMcpServers = vi.fn().mockReturnValue(mockMcpServers); + + // Mock tools with descriptions using actual DiscoveredMCPTool instances + const mockServerTools = [ + createMockMCPTool('tool1', 'server1', 'This is tool 1 description'), + createMockMCPTool('tool2', 'server1', 'This is tool 2 description'), + ]; + + mockConfig.getToolRegistry = vi.fn().mockResolvedValue({ + getAllTools: vi.fn().mockReturnValue(mockServerTools), + }); + + const result = await mcpCommand.action!(mockContext, 'desc'); + + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: expect.stringContaining('Configured MCP servers:'), + }); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + const message = result.content; + + // Check that server description is included + expect(message).toContain( + '\u001b[1mserver1\u001b[0m - Ready (2 tools)', + ); + expect(message).toContain( + '\u001b[32mThis is a server description\u001b[0m', + ); + + // Check that tool descriptions are included + expect(message).toContain('\u001b[36mtool1\u001b[0m'); + expect(message).toContain( + '\u001b[32mThis is tool 1 description\u001b[0m', + ); + expect(message).toContain('\u001b[36mtool2\u001b[0m'); + expect(message).toContain( + '\u001b[32mThis is tool 2 description\u001b[0m', + ); + + // Check that tips are NOT displayed when arguments are provided + expect(message).not.toContain('💡 Tips:'); + } + }); + + it('should not display descriptions when nodesc argument is used', async () => { + const mockMcpServers = { + server1: { + command: 'cmd1', + description: 'This is a server description', + }, + }; + + mockConfig.getMcpServers = vi.fn().mockReturnValue(mockMcpServers); + + const mockServerTools = [ + createMockMCPTool('tool1', 'server1', 'This is tool 1 description'), + ]; + + mockConfig.getToolRegistry = vi.fn().mockResolvedValue({ + getAllTools: vi.fn().mockReturnValue(mockServerTools), + }); + + const result = await mcpCommand.action!(mockContext, 'nodesc'); + + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: expect.stringContaining('Configured MCP servers:'), + }); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + const message = result.content; + + // Check that descriptions are not included + expect(message).not.toContain('This is a server description'); + expect(message).not.toContain('This is tool 1 description'); + expect(message).toContain('\u001b[36mtool1\u001b[0m'); + + // Check that tips are NOT displayed when arguments are provided + expect(message).not.toContain('💡 Tips:'); + } + }); + + it('should indicate when a server has no tools', async () => { + const mockMcpServers = { + server1: { command: 'cmd1' }, + server2: { command: 'cmd2' }, + }; + + mockConfig.getMcpServers = vi.fn().mockReturnValue(mockMcpServers); + + // Setup server statuses + vi.mocked(getMCPServerStatus).mockImplementation((serverName) => { + if (serverName === 'server1') return MCPServerStatus.CONNECTED; + if (serverName === 'server2') return MCPServerStatus.DISCONNECTED; + return MCPServerStatus.DISCONNECTED; + }); + + // Mock tools - only server1 has tools + const mockServerTools = [createMockMCPTool('server1_tool1', 'server1')]; + + mockConfig.getToolRegistry = vi.fn().mockResolvedValue({ + getAllTools: vi.fn().mockReturnValue(mockServerTools), + }); + + const result = await mcpCommand.action!(mockContext, ''); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + const message = result.content; + expect(message).toContain( + '🟢 \u001b[1mserver1\u001b[0m - Ready (1 tool)', + ); + expect(message).toContain('\u001b[36mserver1_tool1\u001b[0m'); + expect(message).toContain( + '🔴 \u001b[1mserver2\u001b[0m - Disconnected (0 tools cached)', + ); + expect(message).toContain('No tools or prompts available'); + } + }); + + it('should show startup indicator when servers are connecting', async () => { + const mockMcpServers = { + server1: { command: 'cmd1' }, + server2: { command: 'cmd2' }, + }; + + mockConfig.getMcpServers = vi.fn().mockReturnValue(mockMcpServers); + + // Setup server statuses with one connecting + vi.mocked(getMCPServerStatus).mockImplementation((serverName) => { + if (serverName === 'server1') return MCPServerStatus.CONNECTED; + if (serverName === 'server2') return MCPServerStatus.CONNECTING; + return MCPServerStatus.DISCONNECTED; + }); + + // Setup discovery state as in progress + vi.mocked(getMCPDiscoveryState).mockReturnValue( + MCPDiscoveryState.IN_PROGRESS, + ); + + // Mock tools + const mockServerTools = [ + createMockMCPTool('server1_tool1', 'server1'), + createMockMCPTool('server2_tool1', 'server2'), + ]; + + mockConfig.getToolRegistry = vi.fn().mockResolvedValue({ + getAllTools: vi.fn().mockReturnValue(mockServerTools), + }); + + const result = await mcpCommand.action!(mockContext, ''); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + const message = result.content; + + // Check that startup indicator is shown + expect(message).toContain( + '⏳ MCP servers are starting up (1 initializing)...', + ); + expect(message).toContain( + 'Note: First startup may take longer. Tool availability will update automatically.', + ); + + // Check server statuses + expect(message).toContain( + '🟢 \u001b[1mserver1\u001b[0m - Ready (1 tool)', + ); + expect(message).toContain( + '🔄 \u001b[1mserver2\u001b[0m - Starting... (first startup may take longer) (tools and prompts will appear when ready)', + ); + } + }); + + it('should display the extension name for servers from extensions', async () => { + const mockMcpServers = { + server1: { command: 'cmd1', extensionName: 'my-extension' }, + }; + mockConfig.getMcpServers = vi.fn().mockReturnValue(mockMcpServers); + + const result = await mcpCommand.action!(mockContext, ''); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + const message = result.content; + expect(message).toContain('server1 (from my-extension)'); + } + }); + + it('should display blocked MCP servers', async () => { + mockConfig.getMcpServers = vi.fn().mockReturnValue({}); + const blockedServers = [ + { name: 'blocked-server', extensionName: 'my-extension' }, + ]; + mockConfig.getBlockedMcpServers = vi.fn().mockReturnValue(blockedServers); + + const result = await mcpCommand.action!(mockContext, ''); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + const message = result.content; + expect(message).toContain( + '🔴 \u001b[1mblocked-server (from my-extension)\u001b[0m - Blocked', + ); + } + }); + + it('should display both active and blocked servers correctly', async () => { + const mockMcpServers = { + server1: { command: 'cmd1', extensionName: 'my-extension' }, + }; + mockConfig.getMcpServers = vi.fn().mockReturnValue(mockMcpServers); + const blockedServers = [ + { name: 'blocked-server', extensionName: 'another-extension' }, + ]; + mockConfig.getBlockedMcpServers = vi.fn().mockReturnValue(blockedServers); + + const result = await mcpCommand.action!(mockContext, ''); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + const message = result.content; + expect(message).toContain('server1 (from my-extension)'); + expect(message).toContain( + '🔴 \u001b[1mblocked-server (from another-extension)\u001b[0m - Blocked', + ); + } + }); + }); + + describe('schema functionality', () => { + it('should display tool schemas when schema argument is used', async () => { + const mockMcpServers = { + server1: { + command: 'cmd1', + description: 'This is a server description', + }, + }; + + mockConfig.getMcpServers = vi.fn().mockReturnValue(mockMcpServers); + + // Create tools with parameter schemas + const mockCallableTool1: CallableTool = { + callTool: vi.fn(), + tool: vi.fn(), + } as unknown as CallableTool; + const mockCallableTool2: CallableTool = { + callTool: vi.fn(), + tool: vi.fn(), + } as unknown as CallableTool; + + const tool1 = new DiscoveredMCPTool( + mockCallableTool1, + 'server1', + 'tool1', + 'This is tool 1 description', + { + type: Type.OBJECT, + properties: { + param1: { type: Type.STRING, description: 'First parameter' }, + }, + required: ['param1'], + }, + 'tool1', + ); + + const tool2 = new DiscoveredMCPTool( + mockCallableTool2, + 'server1', + 'tool2', + 'This is tool 2 description', + { + type: Type.OBJECT, + properties: { + param2: { type: Type.NUMBER, description: 'Second parameter' }, + }, + required: ['param2'], + }, + 'tool2', + ); + + const mockServerTools = [tool1, tool2]; + + mockConfig.getToolRegistry = vi.fn().mockResolvedValue({ + getAllTools: vi.fn().mockReturnValue(mockServerTools), + }); + + const result = await mcpCommand.action!(mockContext, 'schema'); + + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: expect.stringContaining('Configured MCP servers:'), + }); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + const message = result.content; + + // Check that server description is included + expect(message).toContain('Ready (2 tools)'); + expect(message).toContain('This is a server description'); + + // Check that tool descriptions and schemas are included + expect(message).toContain('This is tool 1 description'); + expect(message).toContain('Parameters:'); + expect(message).toContain('param1'); + expect(message).toContain('STRING'); + expect(message).toContain('This is tool 2 description'); + expect(message).toContain('param2'); + expect(message).toContain('NUMBER'); + } + }); + + it('should handle tools without parameter schemas gracefully', async () => { + const mockMcpServers = { + server1: { command: 'cmd1' }, + }; + + mockConfig.getMcpServers = vi.fn().mockReturnValue(mockMcpServers); + + // Mock tools without parameter schemas + const mockServerTools = [ + createMockMCPTool('tool1', 'server1', 'Tool without schema'), + ]; + + mockConfig.getToolRegistry = vi.fn().mockResolvedValue({ + getAllTools: vi.fn().mockReturnValue(mockServerTools), + }); + + const result = await mcpCommand.action!(mockContext, 'schema'); + + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: expect.stringContaining('Configured MCP servers:'), + }); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + const message = result.content; + expect(message).toContain('tool1'); + expect(message).toContain('Tool without schema'); + // Should not crash when parameterSchema is undefined + } + }); + }); + + describe('argument parsing', () => { + beforeEach(() => { + const mockMcpServers = { + server1: { + command: 'cmd1', + description: 'Server description', + }, + }; + + mockConfig.getMcpServers = vi.fn().mockReturnValue(mockMcpServers); + + const mockServerTools = [ + createMockMCPTool('tool1', 'server1', 'Test tool'), + ]; + + mockConfig.getToolRegistry = vi.fn().mockResolvedValue({ + getAllTools: vi.fn().mockReturnValue(mockServerTools), + }); + }); + + it('should handle "descriptions" as alias for "desc"', async () => { + const result = await mcpCommand.action!(mockContext, 'descriptions'); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + const message = result.content; + expect(message).toContain('Test tool'); + expect(message).toContain('Server description'); + } + }); + + it('should handle "nodescriptions" as alias for "nodesc"', async () => { + const result = await mcpCommand.action!(mockContext, 'nodescriptions'); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + const message = result.content; + expect(message).not.toContain('Test tool'); + expect(message).not.toContain('Server description'); + expect(message).toContain('\u001b[36mtool1\u001b[0m'); + } + }); + + it('should handle mixed case arguments', async () => { + const result = await mcpCommand.action!(mockContext, 'DESC'); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + const message = result.content; + expect(message).toContain('Test tool'); + expect(message).toContain('Server description'); + } + }); + + it('should handle multiple arguments - "schema desc"', async () => { + const result = await mcpCommand.action!(mockContext, 'schema desc'); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + const message = result.content; + expect(message).toContain('Test tool'); + expect(message).toContain('Server description'); + expect(message).toContain('Parameters:'); + } + }); + + it('should handle multiple arguments - "desc schema"', async () => { + const result = await mcpCommand.action!(mockContext, 'desc schema'); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + const message = result.content; + expect(message).toContain('Test tool'); + expect(message).toContain('Server description'); + expect(message).toContain('Parameters:'); + } + }); + + it('should handle "schema" alone showing descriptions', async () => { + const result = await mcpCommand.action!(mockContext, 'schema'); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + const message = result.content; + expect(message).toContain('Test tool'); + expect(message).toContain('Server description'); + expect(message).toContain('Parameters:'); + } + }); + + it('should handle "nodesc" overriding "schema" - "schema nodesc"', async () => { + const result = await mcpCommand.action!(mockContext, 'schema nodesc'); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + const message = result.content; + expect(message).not.toContain('Test tool'); + expect(message).not.toContain('Server description'); + expect(message).toContain('Parameters:'); // Schema should still show + expect(message).toContain('\u001b[36mtool1\u001b[0m'); + } + }); + + it('should handle "nodesc" overriding "desc" - "desc nodesc"', async () => { + const result = await mcpCommand.action!(mockContext, 'desc nodesc'); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + const message = result.content; + expect(message).not.toContain('Test tool'); + expect(message).not.toContain('Server description'); + expect(message).not.toContain('Parameters:'); + expect(message).toContain('\u001b[36mtool1\u001b[0m'); + } + }); + + it('should handle "nodesc" overriding both "desc" and "schema" - "desc schema nodesc"', async () => { + const result = await mcpCommand.action!( + mockContext, + 'desc schema nodesc', + ); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + const message = result.content; + expect(message).not.toContain('Test tool'); + expect(message).not.toContain('Server description'); + expect(message).toContain('Parameters:'); // Schema should still show + expect(message).toContain('\u001b[36mtool1\u001b[0m'); + } + }); + + it('should handle extra whitespace in arguments', async () => { + const result = await mcpCommand.action!(mockContext, ' desc schema '); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + const message = result.content; + expect(message).toContain('Test tool'); + expect(message).toContain('Server description'); + expect(message).toContain('Parameters:'); + } + }); + + it('should handle empty arguments gracefully', async () => { + const result = await mcpCommand.action!(mockContext, ''); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + const message = result.content; + expect(message).not.toContain('Test tool'); + expect(message).not.toContain('Server description'); + expect(message).not.toContain('Parameters:'); + expect(message).toContain('\u001b[36mtool1\u001b[0m'); + } + }); + + it('should handle unknown arguments gracefully', async () => { + const result = await mcpCommand.action!(mockContext, 'unknown arg'); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + const message = result.content; + expect(message).not.toContain('Test tool'); + expect(message).not.toContain('Server description'); + expect(message).not.toContain('Parameters:'); + expect(message).toContain('\u001b[36mtool1\u001b[0m'); + } + }); + }); + + describe('edge cases', () => { + it('should handle empty server names gracefully', async () => { + const mockMcpServers = { + '': { command: 'cmd1' }, // Empty server name + }; + + mockConfig.getMcpServers = vi.fn().mockReturnValue(mockMcpServers); + mockConfig.getToolRegistry = vi.fn().mockResolvedValue({ + getAllTools: vi.fn().mockReturnValue([]), + }); + + const result = await mcpCommand.action!(mockContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'info', + content: expect.stringContaining('Configured MCP servers:'), + }); + }); + + it('should handle servers with special characters in names', async () => { + const mockMcpServers = { + 'server-with-dashes': { command: 'cmd1' }, + server_with_underscores: { command: 'cmd2' }, + 'server.with.dots': { command: 'cmd3' }, + }; + + mockConfig.getMcpServers = vi.fn().mockReturnValue(mockMcpServers); + mockConfig.getToolRegistry = vi.fn().mockResolvedValue({ + getAllTools: vi.fn().mockReturnValue([]), + }); + + const result = await mcpCommand.action!(mockContext, ''); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + const message = result.content; + expect(message).toContain('server-with-dashes'); + expect(message).toContain('server_with_underscores'); + expect(message).toContain('server.with.dots'); + } + }); + }); + + describe('auth subcommand', () => { + beforeEach(() => { + vi.clearAllMocks(); + }); + + it('should list OAuth-enabled servers when no server name is provided', async () => { + const context = createMockCommandContext({ + services: { + config: { + getMcpServers: vi.fn().mockReturnValue({ + 'oauth-server': { oauth: { enabled: true } }, + 'regular-server': {}, + 'another-oauth': { oauth: { enabled: true } }, + }), + }, + }, + }); + + const authCommand = mcpCommand.subCommands?.find( + (cmd) => cmd.name === 'auth', + ); + expect(authCommand).toBeDefined(); + + const result = await authCommand!.action!(context, ''); + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + expect(result.messageType).toBe('info'); + expect(result.content).toContain('oauth-server'); + expect(result.content).toContain('another-oauth'); + expect(result.content).not.toContain('regular-server'); + expect(result.content).toContain('/mcp auth '); + } + }); + + it('should show message when no OAuth servers are configured', async () => { + const context = createMockCommandContext({ + services: { + config: { + getMcpServers: vi.fn().mockReturnValue({ + 'regular-server': {}, + }), + }, + }, + }); + + const authCommand = mcpCommand.subCommands?.find( + (cmd) => cmd.name === 'auth', + ); + const result = await authCommand!.action!(context, ''); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + expect(result.messageType).toBe('info'); + expect(result.content).toBe( + 'No MCP servers configured with OAuth authentication.', + ); + } + }); + + it('should authenticate with a specific server', async () => { + const mockToolRegistry = { + discoverToolsForServer: vi.fn(), + }; + const mockGeminiClient = { + setTools: vi.fn(), + }; + + const context = createMockCommandContext({ + services: { + config: { + getMcpServers: vi.fn().mockReturnValue({ + 'test-server': { + url: 'http://localhost:3000', + oauth: { enabled: true }, + }, + }), + getToolRegistry: vi.fn().mockResolvedValue(mockToolRegistry), + getGeminiClient: vi.fn().mockReturnValue(mockGeminiClient), + }, + }, + }); + + const { MCPOAuthProvider } = await import('@qwen-code/qwen-code-core'); + + const authCommand = mcpCommand.subCommands?.find( + (cmd) => cmd.name === 'auth', + ); + const result = await authCommand!.action!(context, 'test-server'); + + expect(MCPOAuthProvider.authenticate).toHaveBeenCalledWith( + 'test-server', + { enabled: true }, + 'http://localhost:3000', + ); + expect(mockToolRegistry.discoverToolsForServer).toHaveBeenCalledWith( + 'test-server', + ); + expect(mockGeminiClient.setTools).toHaveBeenCalled(); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + expect(result.messageType).toBe('info'); + expect(result.content).toContain('Successfully authenticated'); + } + }); + + it('should handle authentication errors', async () => { + const context = createMockCommandContext({ + services: { + config: { + getMcpServers: vi.fn().mockReturnValue({ + 'test-server': { oauth: { enabled: true } }, + }), + }, + }, + }); + + const { MCPOAuthProvider } = await import('@qwen-code/qwen-code-core'); + ( + MCPOAuthProvider.authenticate as ReturnType + ).mockRejectedValue(new Error('Auth failed')); + + const authCommand = mcpCommand.subCommands?.find( + (cmd) => cmd.name === 'auth', + ); + const result = await authCommand!.action!(context, 'test-server'); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + expect(result.messageType).toBe('error'); + expect(result.content).toContain('Failed to authenticate'); + expect(result.content).toContain('Auth failed'); + } + }); + + it('should handle non-existent server', async () => { + const context = createMockCommandContext({ + services: { + config: { + getMcpServers: vi.fn().mockReturnValue({ + 'existing-server': {}, + }), + }, + }, + }); + + const authCommand = mcpCommand.subCommands?.find( + (cmd) => cmd.name === 'auth', + ); + const result = await authCommand!.action!(context, 'non-existent'); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + expect(result.messageType).toBe('error'); + expect(result.content).toContain("MCP server 'non-existent' not found"); + } + }); + }); + + describe('refresh subcommand', () => { + it('should refresh the list of tools and display the status', async () => { + const mockToolRegistry = { + discoverMcpTools: vi.fn(), + getAllTools: vi.fn().mockReturnValue([]), + }; + const mockGeminiClient = { + setTools: vi.fn(), + }; + + const context = createMockCommandContext({ + services: { + config: { + getMcpServers: vi.fn().mockReturnValue({ server1: {} }), + getBlockedMcpServers: vi.fn().mockReturnValue([]), + getToolRegistry: vi.fn().mockResolvedValue(mockToolRegistry), + getGeminiClient: vi.fn().mockReturnValue(mockGeminiClient), + getPromptRegistry: vi.fn().mockResolvedValue({ + getPromptsByServer: vi.fn().mockReturnValue([]), + }), + }, + }, + }); + + const refreshCommand = mcpCommand.subCommands?.find( + (cmd) => cmd.name === 'refresh', + ); + expect(refreshCommand).toBeDefined(); + + const result = await refreshCommand!.action!(context, ''); + + expect(context.ui.addItem).toHaveBeenCalledWith( + { + type: 'info', + text: 'Refreshing MCP servers and tools...', + }, + expect.any(Number), + ); + expect(mockToolRegistry.discoverMcpTools).toHaveBeenCalled(); + expect(mockGeminiClient.setTools).toHaveBeenCalled(); + + expect(isMessageAction(result)).toBe(true); + if (isMessageAction(result)) { + expect(result.messageType).toBe('info'); + expect(result.content).toContain('Configured MCP servers:'); + } + }); + + it('should show an error if config is not available', async () => { + const contextWithoutConfig = createMockCommandContext({ + services: { + config: null, + }, + }); + + const refreshCommand = mcpCommand.subCommands?.find( + (cmd) => cmd.name === 'refresh', + ); + const result = await refreshCommand!.action!(contextWithoutConfig, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'error', + content: 'Config not loaded.', + }); + }); + + it('should show an error if tool registry is not available', async () => { + mockConfig.getToolRegistry = vi.fn().mockResolvedValue(undefined); + + const refreshCommand = mcpCommand.subCommands?.find( + (cmd) => cmd.name === 'refresh', + ); + const result = await refreshCommand!.action!(mockContext, ''); + + expect(result).toEqual({ + type: 'message', + messageType: 'error', + content: 'Could not retrieve tool registry.', + }); + }); + }); +}); diff --git a/packages/cli/src/ui/commands/mcpCommand.ts b/packages/cli/src/ui/commands/mcpCommand.ts new file mode 100644 index 000000000..2a3ba7187 --- /dev/null +++ b/packages/cli/src/ui/commands/mcpCommand.ts @@ -0,0 +1,524 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { + SlashCommand, + SlashCommandActionReturn, + CommandContext, + CommandKind, + MessageActionReturn, +} from './types.js'; +import { + DiscoveredMCPPrompt, + DiscoveredMCPTool, + getMCPDiscoveryState, + getMCPServerStatus, + MCPDiscoveryState, + MCPServerStatus, + mcpServerRequiresOAuth, + getErrorMessage, +} from '@qwen-code/qwen-code-core'; +import open from 'open'; + +const COLOR_GREEN = '\u001b[32m'; +const COLOR_YELLOW = '\u001b[33m'; +const COLOR_RED = '\u001b[31m'; +const COLOR_CYAN = '\u001b[36m'; +const COLOR_GREY = '\u001b[90m'; +const RESET_COLOR = '\u001b[0m'; + +const getMcpStatus = async ( + context: CommandContext, + showDescriptions: boolean, + showSchema: boolean, + showTips: boolean = false, +): Promise => { + const { config } = context.services; + if (!config) { + return { + type: 'message', + messageType: 'error', + content: 'Config not loaded.', + }; + } + + const toolRegistry = await config.getToolRegistry(); + if (!toolRegistry) { + return { + type: 'message', + messageType: 'error', + content: 'Could not retrieve tool registry.', + }; + } + + const mcpServers = config.getMcpServers() || {}; + const serverNames = Object.keys(mcpServers); + const blockedMcpServers = config.getBlockedMcpServers() || []; + + if (serverNames.length === 0 && blockedMcpServers.length === 0) { + const docsUrl = 'https://goo.gle/gemini-cli-docs-mcp'; + if (process.env.SANDBOX && process.env.SANDBOX !== 'sandbox-exec') { + return { + type: 'message', + messageType: 'info', + content: `No MCP servers configured. Please open the following URL in your browser to view documentation:\n${docsUrl}`, + }; + } else { + // Open the URL in the browser + await open(docsUrl); + return { + type: 'message', + messageType: 'info', + content: `No MCP servers configured. Opening documentation in your browser: ${docsUrl}`, + }; + } + } + + // Check if any servers are still connecting + const connectingServers = serverNames.filter( + (name) => getMCPServerStatus(name) === MCPServerStatus.CONNECTING, + ); + const discoveryState = getMCPDiscoveryState(); + + let message = ''; + + // Add overall discovery status message if needed + if ( + discoveryState === MCPDiscoveryState.IN_PROGRESS || + connectingServers.length > 0 + ) { + message += `${COLOR_YELLOW}⏳ MCP servers are starting up (${connectingServers.length} initializing)...${RESET_COLOR}\n`; + message += `${COLOR_CYAN}Note: First startup may take longer. Tool availability will update automatically.${RESET_COLOR}\n\n`; + } + + message += 'Configured MCP servers:\n\n'; + + const allTools = toolRegistry.getAllTools(); + for (const serverName of serverNames) { + const serverTools = allTools.filter( + (tool) => + tool instanceof DiscoveredMCPTool && tool.serverName === serverName, + ) as DiscoveredMCPTool[]; + const promptRegistry = await config.getPromptRegistry(); + const serverPrompts = promptRegistry.getPromptsByServer(serverName) || []; + + const status = getMCPServerStatus(serverName); + + // Add status indicator with descriptive text + let statusIndicator = ''; + let statusText = ''; + switch (status) { + case MCPServerStatus.CONNECTED: + statusIndicator = '🟢'; + statusText = 'Ready'; + break; + case MCPServerStatus.CONNECTING: + statusIndicator = '🔄'; + statusText = 'Starting... (first startup may take longer)'; + break; + case MCPServerStatus.DISCONNECTED: + default: + statusIndicator = '🔴'; + statusText = 'Disconnected'; + break; + } + + // Get server description if available + const server = mcpServers[serverName]; + let serverDisplayName = serverName; + if (server.extensionName) { + serverDisplayName += ` (from ${server.extensionName})`; + } + + // Format server header with bold formatting and status + message += `${statusIndicator} \u001b[1m${serverDisplayName}\u001b[0m - ${statusText}`; + + let needsAuthHint = mcpServerRequiresOAuth.get(serverName) || false; + // Add OAuth status if applicable + if (server?.oauth?.enabled) { + needsAuthHint = true; + try { + const { MCPOAuthTokenStorage } = await import( + '@qwen-code/qwen-code-core' + ); + const hasToken = await MCPOAuthTokenStorage.getToken(serverName); + if (hasToken) { + const isExpired = MCPOAuthTokenStorage.isTokenExpired(hasToken.token); + if (isExpired) { + message += ` ${COLOR_YELLOW}(OAuth token expired)${RESET_COLOR}`; + } else { + message += ` ${COLOR_GREEN}(OAuth authenticated)${RESET_COLOR}`; + needsAuthHint = false; + } + } else { + message += ` ${COLOR_RED}(OAuth not authenticated)${RESET_COLOR}`; + } + } catch (_err) { + // If we can't check OAuth status, just continue + } + } + + // Add tool count with conditional messaging + if (status === MCPServerStatus.CONNECTED) { + const parts = []; + if (serverTools.length > 0) { + parts.push( + `${serverTools.length} ${serverTools.length === 1 ? 'tool' : 'tools'}`, + ); + } + if (serverPrompts.length > 0) { + parts.push( + `${serverPrompts.length} ${ + serverPrompts.length === 1 ? 'prompt' : 'prompts' + }`, + ); + } + if (parts.length > 0) { + message += ` (${parts.join(', ')})`; + } else { + message += ` (0 tools)`; + } + } else if (status === MCPServerStatus.CONNECTING) { + message += ` (tools and prompts will appear when ready)`; + } else { + message += ` (${serverTools.length} tools cached)`; + } + + // Add server description with proper handling of multi-line descriptions + if (showDescriptions && server?.description) { + const descLines = server.description.trim().split('\n'); + if (descLines) { + message += ':\n'; + for (const descLine of descLines) { + message += ` ${COLOR_GREEN}${descLine}${RESET_COLOR}\n`; + } + } else { + message += '\n'; + } + } else { + message += '\n'; + } + + // Reset formatting after server entry + message += RESET_COLOR; + + if (serverTools.length > 0) { + message += ` ${COLOR_CYAN}Tools:${RESET_COLOR}\n`; + serverTools.forEach((tool) => { + if (showDescriptions && tool.description) { + // Format tool name in cyan using simple ANSI cyan color + message += ` - ${COLOR_CYAN}${tool.name}${RESET_COLOR}`; + + // Handle multi-line descriptions by properly indenting and preserving formatting + const descLines = tool.description.trim().split('\n'); + if (descLines) { + message += ':\n'; + for (const descLine of descLines) { + message += ` ${COLOR_GREEN}${descLine}${RESET_COLOR}\n`; + } + } else { + message += '\n'; + } + // Reset is handled inline with each line now + } else { + // Use cyan color for the tool name even when not showing descriptions + message += ` - ${COLOR_CYAN}${tool.name}${RESET_COLOR}\n`; + } + const parameters = + tool.schema.parametersJsonSchema ?? tool.schema.parameters; + if (showSchema && parameters) { + // Prefix the parameters in cyan + message += ` ${COLOR_CYAN}Parameters:${RESET_COLOR}\n`; + + const paramsLines = JSON.stringify(parameters, null, 2) + .trim() + .split('\n'); + if (paramsLines) { + for (const paramsLine of paramsLines) { + message += ` ${COLOR_GREEN}${paramsLine}${RESET_COLOR}\n`; + } + } + } + }); + } + if (serverPrompts.length > 0) { + if (serverTools.length > 0) { + message += '\n'; + } + message += ` ${COLOR_CYAN}Prompts:${RESET_COLOR}\n`; + serverPrompts.forEach((prompt: DiscoveredMCPPrompt) => { + if (showDescriptions && prompt.description) { + message += ` - ${COLOR_CYAN}${prompt.name}${RESET_COLOR}`; + const descLines = prompt.description.trim().split('\n'); + if (descLines) { + message += ':\n'; + for (const descLine of descLines) { + message += ` ${COLOR_GREEN}${descLine}${RESET_COLOR}\n`; + } + } else { + message += '\n'; + } + } else { + message += ` - ${COLOR_CYAN}${prompt.name}${RESET_COLOR}\n`; + } + }); + } + + if (serverTools.length === 0 && serverPrompts.length === 0) { + message += ' No tools or prompts available\n'; + } else if (serverTools.length === 0) { + message += ' No tools available'; + if (status === MCPServerStatus.DISCONNECTED && needsAuthHint) { + message += ` ${COLOR_GREY}(type: "/mcp auth ${serverName}" to authenticate this server)${RESET_COLOR}`; + } + message += '\n'; + } else if (status === MCPServerStatus.DISCONNECTED && needsAuthHint) { + // This case is for when serverTools.length > 0 + message += ` ${COLOR_GREY}(type: "/mcp auth ${serverName}" to authenticate this server)${RESET_COLOR}\n`; + } + message += '\n'; + } + + for (const server of blockedMcpServers) { + let serverDisplayName = server.name; + if (server.extensionName) { + serverDisplayName += ` (from ${server.extensionName})`; + } + message += `🔴 \u001b[1m${serverDisplayName}\u001b[0m - Blocked\n\n`; + } + + // Add helpful tips when no arguments are provided + if (showTips) { + message += '\n'; + message += `${COLOR_CYAN}💡 Tips:${RESET_COLOR}\n`; + message += ` • Use ${COLOR_CYAN}/mcp desc${RESET_COLOR} to show server and tool descriptions\n`; + message += ` • Use ${COLOR_CYAN}/mcp schema${RESET_COLOR} to show tool parameter schemas\n`; + message += ` • Use ${COLOR_CYAN}/mcp nodesc${RESET_COLOR} to hide descriptions\n`; + message += ` • Use ${COLOR_CYAN}/mcp auth ${RESET_COLOR} to authenticate with OAuth-enabled servers\n`; + message += ` • Press ${COLOR_CYAN}Ctrl+T${RESET_COLOR} to toggle tool descriptions on/off\n`; + message += '\n'; + } + + // Make sure to reset any ANSI formatting at the end to prevent it from affecting the terminal + message += RESET_COLOR; + + return { + type: 'message', + messageType: 'info', + content: message, + }; +}; + +const authCommand: SlashCommand = { + name: 'auth', + description: 'Authenticate with an OAuth-enabled MCP server', + kind: CommandKind.BUILT_IN, + action: async ( + context: CommandContext, + args: string, + ): Promise => { + const serverName = args.trim(); + const { config } = context.services; + + if (!config) { + return { + type: 'message', + messageType: 'error', + content: 'Config not loaded.', + }; + } + + const mcpServers = config.getMcpServers() || {}; + + if (!serverName) { + // List servers that support OAuth + const oauthServers = Object.entries(mcpServers) + .filter(([_, server]) => server.oauth?.enabled) + .map(([name, _]) => name); + + if (oauthServers.length === 0) { + return { + type: 'message', + messageType: 'info', + content: 'No MCP servers configured with OAuth authentication.', + }; + } + + return { + type: 'message', + messageType: 'info', + content: `MCP servers with OAuth authentication:\n${oauthServers.map((s) => ` - ${s}`).join('\n')}\n\nUse /mcp auth to authenticate.`, + }; + } + + const server = mcpServers[serverName]; + if (!server) { + return { + type: 'message', + messageType: 'error', + content: `MCP server '${serverName}' not found.`, + }; + } + + // Always attempt OAuth authentication, even if not explicitly configured + // The authentication process will discover OAuth requirements automatically + + try { + context.ui.addItem( + { + type: 'info', + text: `Starting OAuth authentication for MCP server '${serverName}'...`, + }, + Date.now(), + ); + + // Import dynamically to avoid circular dependencies + const { MCPOAuthProvider } = await import('@qwen-code/qwen-code-core'); + + let oauthConfig = server.oauth; + if (!oauthConfig) { + oauthConfig = { enabled: false }; + } + + // Pass the MCP server URL for OAuth discovery + const mcpServerUrl = server.httpUrl || server.url; + await MCPOAuthProvider.authenticate( + serverName, + oauthConfig, + mcpServerUrl, + ); + + context.ui.addItem( + { + type: 'info', + text: `✅ Successfully authenticated with MCP server '${serverName}'!`, + }, + Date.now(), + ); + + // Trigger tool re-discovery to pick up authenticated server + const toolRegistry = await config.getToolRegistry(); + if (toolRegistry) { + context.ui.addItem( + { + type: 'info', + text: `Re-discovering tools from '${serverName}'...`, + }, + Date.now(), + ); + await toolRegistry.discoverToolsForServer(serverName); + } + // Update the client with the new tools + const geminiClient = config.getGeminiClient(); + if (geminiClient) { + await geminiClient.setTools(); + } + + return { + type: 'message', + messageType: 'info', + content: `Successfully authenticated and refreshed tools for '${serverName}'.`, + }; + } catch (error) { + return { + type: 'message', + messageType: 'error', + content: `Failed to authenticate with MCP server '${serverName}': ${getErrorMessage(error)}`, + }; + } + }, + completion: async (context: CommandContext, partialArg: string) => { + const { config } = context.services; + if (!config) return []; + + const mcpServers = config.getMcpServers() || {}; + return Object.keys(mcpServers).filter((name) => + name.startsWith(partialArg), + ); + }, +}; + +const listCommand: SlashCommand = { + name: 'list', + description: 'List configured MCP servers and tools', + kind: CommandKind.BUILT_IN, + action: async (context: CommandContext, args: string) => { + const lowerCaseArgs = args.toLowerCase().split(/\s+/).filter(Boolean); + + const hasDesc = + lowerCaseArgs.includes('desc') || lowerCaseArgs.includes('descriptions'); + const hasNodesc = + lowerCaseArgs.includes('nodesc') || + lowerCaseArgs.includes('nodescriptions'); + const showSchema = lowerCaseArgs.includes('schema'); + + // Show descriptions if `desc` or `schema` is present, + // but `nodesc` takes precedence and disables them. + const showDescriptions = !hasNodesc && (hasDesc || showSchema); + + // Show tips only when no arguments are provided + const showTips = lowerCaseArgs.length === 0; + + return getMcpStatus(context, showDescriptions, showSchema, showTips); + }, +}; + +const refreshCommand: SlashCommand = { + name: 'refresh', + description: 'Refresh the list of MCP servers and tools', + kind: CommandKind.BUILT_IN, + action: async ( + context: CommandContext, + ): Promise => { + const { config } = context.services; + if (!config) { + return { + type: 'message', + messageType: 'error', + content: 'Config not loaded.', + }; + } + + const toolRegistry = await config.getToolRegistry(); + if (!toolRegistry) { + return { + type: 'message', + messageType: 'error', + content: 'Could not retrieve tool registry.', + }; + } + + context.ui.addItem( + { + type: 'info', + text: 'Refreshing MCP servers and tools...', + }, + Date.now(), + ); + + await toolRegistry.discoverMcpTools(); + + // Update the client with the new tools + const geminiClient = config.getGeminiClient(); + if (geminiClient) { + await geminiClient.setTools(); + } + + return getMcpStatus(context, false, false, false); + }, +}; + +export const mcpCommand: SlashCommand = { + name: 'mcp', + description: + 'list configured MCP servers and tools, or authenticate with OAuth-enabled servers', + kind: CommandKind.BUILT_IN, + subCommands: [listCommand, authCommand, refreshCommand], + // Default action when no subcommand is provided + action: async (context: CommandContext, args: string) => + // If no subcommand, run the list command + listCommand.action!(context, args), +}; diff --git a/packages/cli/src/ui/commands/memoryCommand.test.ts b/packages/cli/src/ui/commands/memoryCommand.test.ts index 9aa2e05ae..c673b735e 100644 --- a/packages/cli/src/ui/commands/memoryCommand.test.ts +++ b/packages/cli/src/ui/commands/memoryCommand.test.ts @@ -9,7 +9,12 @@ import { memoryCommand } from './memoryCommand.js'; import { type CommandContext, SlashCommand } from './types.js'; import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; import { MessageType } from '../types.js'; -import { getErrorMessage } from '@qwen-code/qwen-code-core'; +import { LoadedSettings } from '../../config/settings.js'; +import { + getErrorMessage, + loadServerHierarchicalMemory, + type FileDiscoveryService, +} from '@qwen-code/qwen-code-core'; vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => { const original = @@ -20,9 +25,12 @@ vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => { if (error instanceof Error) return error.message; return String(error); }), + loadServerHierarchicalMemory: vi.fn(), }; }); +const mockLoadServerHierarchicalMemory = loadServerHierarchicalMemory as Mock; + describe('memoryCommand', () => { let mockContext: CommandContext; @@ -139,19 +147,37 @@ describe('memoryCommand', () => { describe('/memory refresh', () => { let refreshCommand: SlashCommand; - let mockRefreshMemory: Mock; + let mockSetUserMemory: Mock; + let mockSetGeminiMdFileCount: Mock; beforeEach(() => { refreshCommand = getSubCommand('refresh'); - mockRefreshMemory = vi.fn(); + mockSetUserMemory = vi.fn(); + mockSetGeminiMdFileCount = vi.fn(); + const mockConfig = { + setUserMemory: mockSetUserMemory, + setGeminiMdFileCount: mockSetGeminiMdFileCount, + getWorkingDir: () => '/test/dir', + getDebugMode: () => false, + getFileService: () => ({}) as FileDiscoveryService, + getExtensionContextFilePaths: () => [], + getFileFilteringOptions: () => ({ + ignore: [], + include: [], + }), + }; + mockContext = createMockCommandContext({ services: { - config: { - refreshMemory: mockRefreshMemory, - // eslint-disable-next-line @typescript-eslint/no-explicit-any - } as any, + config: Promise.resolve(mockConfig), + settings: { + merged: { + memoryDiscoveryMaxDirs: 1000, + }, + } as LoadedSettings, }, }); + mockLoadServerHierarchicalMemory.mockClear(); }); it('should display success message when memory is refreshed with content', async () => { @@ -161,7 +187,7 @@ describe('memoryCommand', () => { memoryContent: 'new memory content', fileCount: 2, }; - mockRefreshMemory.mockResolvedValue(refreshResult); + mockLoadServerHierarchicalMemory.mockResolvedValue(refreshResult); await refreshCommand.action(mockContext, ''); @@ -173,7 +199,13 @@ describe('memoryCommand', () => { expect.any(Number), ); - expect(mockRefreshMemory).toHaveBeenCalledOnce(); + expect(loadServerHierarchicalMemory).toHaveBeenCalledOnce(); + expect(mockSetUserMemory).toHaveBeenCalledWith( + refreshResult.memoryContent, + ); + expect(mockSetGeminiMdFileCount).toHaveBeenCalledWith( + refreshResult.fileCount, + ); expect(mockContext.ui.addItem).toHaveBeenCalledWith( { @@ -188,11 +220,13 @@ describe('memoryCommand', () => { if (!refreshCommand.action) throw new Error('Command has no action'); const refreshResult = { memoryContent: '', fileCount: 0 }; - mockRefreshMemory.mockResolvedValue(refreshResult); + mockLoadServerHierarchicalMemory.mockResolvedValue(refreshResult); await refreshCommand.action(mockContext, ''); - expect(mockRefreshMemory).toHaveBeenCalledOnce(); + expect(loadServerHierarchicalMemory).toHaveBeenCalledOnce(); + expect(mockSetUserMemory).toHaveBeenCalledWith(''); + expect(mockSetGeminiMdFileCount).toHaveBeenCalledWith(0); expect(mockContext.ui.addItem).toHaveBeenCalledWith( { @@ -207,11 +241,13 @@ describe('memoryCommand', () => { if (!refreshCommand.action) throw new Error('Command has no action'); const error = new Error('Failed to read memory files.'); - mockRefreshMemory.mockRejectedValue(error); + mockLoadServerHierarchicalMemory.mockRejectedValue(error); await refreshCommand.action(mockContext, ''); - expect(mockRefreshMemory).toHaveBeenCalledOnce(); + expect(loadServerHierarchicalMemory).toHaveBeenCalledOnce(); + expect(mockSetUserMemory).not.toHaveBeenCalled(); + expect(mockSetGeminiMdFileCount).not.toHaveBeenCalled(); expect(mockContext.ui.addItem).toHaveBeenCalledWith( { @@ -243,7 +279,7 @@ describe('memoryCommand', () => { expect.any(Number), ); - expect(mockRefreshMemory).not.toHaveBeenCalled(); + expect(loadServerHierarchicalMemory).not.toHaveBeenCalled(); }); }); }); diff --git a/packages/cli/src/ui/commands/memoryCommand.ts b/packages/cli/src/ui/commands/memoryCommand.ts index 3abd9571b..e8f1224af 100644 --- a/packages/cli/src/ui/commands/memoryCommand.ts +++ b/packages/cli/src/ui/commands/memoryCommand.ts @@ -4,17 +4,26 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { getErrorMessage } from '@qwen-code/qwen-code-core'; +import { + getErrorMessage, + loadServerHierarchicalMemory, +} from '@qwen-code/qwen-code-core'; import { MessageType } from '../types.js'; -import { SlashCommand, SlashCommandActionReturn } from './types.js'; +import { + CommandKind, + SlashCommand, + SlashCommandActionReturn, +} from './types.js'; export const memoryCommand: SlashCommand = { name: 'memory', description: 'Commands for interacting with memory.', + kind: CommandKind.BUILT_IN, subCommands: [ { name: 'show', description: 'Show the current memory contents.', + kind: CommandKind.BUILT_IN, action: async (context) => { const memoryContent = context.services.config?.getUserMemory() || ''; const fileCount = context.services.config?.getGeminiMdFileCount() || 0; @@ -36,6 +45,7 @@ export const memoryCommand: SlashCommand = { { name: 'add', description: 'Add content to the memory.', + kind: CommandKind.BUILT_IN, action: (context, args): SlashCommandActionReturn | void => { if (!args || args.trim() === '') { return { @@ -63,6 +73,7 @@ export const memoryCommand: SlashCommand = { { name: 'refresh', description: 'Refresh the memory from the source.', + kind: CommandKind.BUILT_IN, action: async (context) => { context.ui.addItem( { @@ -73,10 +84,20 @@ export const memoryCommand: SlashCommand = { ); try { - const result = await context.services.config?.refreshMemory(); + const config = await context.services.config; + if (config) { + const { memoryContent, fileCount } = + await loadServerHierarchicalMemory( + config.getWorkingDir(), + config.getDebugMode(), + config.getFileService(), + config.getExtensionContextFilePaths(), + config.getFileFilteringOptions(), + context.services.settings.merged.memoryDiscoveryMaxDirs, + ); + config.setUserMemory(memoryContent); + config.setGeminiMdFileCount(fileCount); - if (result) { - const { memoryContent, fileCount } = result; const successMessage = memoryContent.length > 0 ? `Memory refreshed successfully. Loaded ${memoryContent.length} characters from ${fileCount} file(s).` diff --git a/packages/cli/src/ui/commands/privacyCommand.ts b/packages/cli/src/ui/commands/privacyCommand.ts index f239158cc..ef9d08a04 100644 --- a/packages/cli/src/ui/commands/privacyCommand.ts +++ b/packages/cli/src/ui/commands/privacyCommand.ts @@ -4,11 +4,12 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { OpenDialogActionReturn, SlashCommand } from './types.js'; +import { CommandKind, OpenDialogActionReturn, SlashCommand } from './types.js'; export const privacyCommand: SlashCommand = { name: 'privacy', description: 'display the privacy notice', + kind: CommandKind.BUILT_IN, action: (): OpenDialogActionReturn => ({ type: 'dialog', dialog: 'privacy', diff --git a/packages/cli/src/ui/commands/quitCommand.test.ts b/packages/cli/src/ui/commands/quitCommand.test.ts new file mode 100644 index 000000000..e67723fdf --- /dev/null +++ b/packages/cli/src/ui/commands/quitCommand.test.ts @@ -0,0 +1,55 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { vi, describe, it, expect, beforeEach, afterEach } from 'vitest'; +import { quitCommand } from './quitCommand.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; +import { formatDuration } from '../utils/formatters.js'; + +vi.mock('../utils/formatters.js'); + +describe('quitCommand', () => { + beforeEach(() => { + vi.useFakeTimers(); + vi.setSystemTime(new Date('2025-01-01T01:00:00Z')); + vi.mocked(formatDuration).mockReturnValue('1h 0m 0s'); + }); + + afterEach(() => { + vi.useRealTimers(); + vi.clearAllMocks(); + }); + + it('returns a QuitActionReturn object with the correct messages', () => { + const mockContext = createMockCommandContext({ + session: { + stats: { + sessionStartTime: new Date('2025-01-01T00:00:00Z'), + }, + }, + }); + + if (!quitCommand.action) throw new Error('Action is not defined'); + const result = quitCommand.action(mockContext, 'quit'); + + expect(formatDuration).toHaveBeenCalledWith(3600000); // 1 hour in ms + expect(result).toEqual({ + type: 'quit', + messages: [ + { + type: 'user', + text: '/quit', + id: expect.any(Number), + }, + { + type: 'quit', + duration: '1h 0m 0s', + id: expect.any(Number), + }, + ], + }); + }); +}); diff --git a/packages/cli/src/ui/commands/quitCommand.ts b/packages/cli/src/ui/commands/quitCommand.ts new file mode 100644 index 000000000..36f15c71c --- /dev/null +++ b/packages/cli/src/ui/commands/quitCommand.ts @@ -0,0 +1,36 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { formatDuration } from '../utils/formatters.js'; +import { CommandKind, type SlashCommand } from './types.js'; + +export const quitCommand: SlashCommand = { + name: 'quit', + altNames: ['exit'], + description: 'exit the cli', + kind: CommandKind.BUILT_IN, + action: (context) => { + const now = Date.now(); + const { sessionStartTime } = context.session.stats; + const wallDuration = now - sessionStartTime.getTime(); + + return { + type: 'quit', + messages: [ + { + type: 'user', + text: `/quit`, // Keep it consistent, even if /exit was used + id: now - 1, + }, + { + type: 'quit', + duration: formatDuration(wallDuration), + id: now, + }, + ], + }; + }, +}; diff --git a/packages/cli/src/ui/commands/restoreCommand.test.ts b/packages/cli/src/ui/commands/restoreCommand.test.ts new file mode 100644 index 000000000..b4bcef7f9 --- /dev/null +++ b/packages/cli/src/ui/commands/restoreCommand.test.ts @@ -0,0 +1,250 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { vi, describe, it, expect, beforeEach, afterEach } from 'vitest'; +import * as fs from 'fs/promises'; +import * as os from 'os'; +import * as path from 'path'; +import { restoreCommand } from './restoreCommand.js'; +import { type CommandContext } from './types.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; +import { Config, GitService } from '@qwen-code/qwen-code-core'; + +describe('restoreCommand', () => { + let mockContext: CommandContext; + let mockConfig: Config; + let mockGitService: GitService; + let mockSetHistory: ReturnType; + let testRootDir: string; + let geminiTempDir: string; + let checkpointsDir: string; + + beforeEach(async () => { + testRootDir = await fs.mkdtemp( + path.join(os.tmpdir(), 'restore-command-test-'), + ); + geminiTempDir = path.join(testRootDir, '.gemini'); + checkpointsDir = path.join(geminiTempDir, 'checkpoints'); + // The command itself creates this, but for tests it's easier to have it ready. + // Some tests might remove it to test error paths. + await fs.mkdir(checkpointsDir, { recursive: true }); + + mockSetHistory = vi.fn().mockResolvedValue(undefined); + mockGitService = { + restoreProjectFromSnapshot: vi.fn().mockResolvedValue(undefined), + } as unknown as GitService; + + mockConfig = { + getCheckpointingEnabled: vi.fn().mockReturnValue(true), + getProjectTempDir: vi.fn().mockReturnValue(geminiTempDir), + getGeminiClient: vi.fn().mockReturnValue({ + setHistory: mockSetHistory, + }), + } as unknown as Config; + + mockContext = createMockCommandContext({ + services: { + config: mockConfig, + git: mockGitService, + }, + }); + }); + + afterEach(async () => { + vi.restoreAllMocks(); + await fs.rm(testRootDir, { recursive: true, force: true }); + }); + + it('should return null if checkpointing is not enabled', () => { + vi.mocked(mockConfig.getCheckpointingEnabled).mockReturnValue(false); + + expect(restoreCommand(mockConfig)).toBeNull(); + }); + + it('should return the command if checkpointing is enabled', () => { + expect(restoreCommand(mockConfig)).toEqual( + expect.objectContaining({ + name: 'restore', + description: expect.any(String), + action: expect.any(Function), + completion: expect.any(Function), + }), + ); + }); + + describe('action', () => { + it('should return an error if temp dir is not found', async () => { + vi.mocked(mockConfig.getProjectTempDir).mockReturnValue(''); + + expect( + await restoreCommand(mockConfig)?.action?.(mockContext, ''), + ).toEqual({ + type: 'message', + messageType: 'error', + content: 'Could not determine the .gemini directory path.', + }); + }); + + it('should inform when no checkpoints are found if no args are passed', async () => { + // Remove the directory to ensure the command creates it. + await fs.rm(checkpointsDir, { recursive: true, force: true }); + const command = restoreCommand(mockConfig); + + expect(await command?.action?.(mockContext, '')).toEqual({ + type: 'message', + messageType: 'info', + content: 'No restorable tool calls found.', + }); + // Verify the directory was created by the command. + await expect(fs.stat(checkpointsDir)).resolves.toBeDefined(); + }); + + it('should list available checkpoints if no args are passed', async () => { + await fs.writeFile(path.join(checkpointsDir, 'test1.json'), '{}'); + await fs.writeFile(path.join(checkpointsDir, 'test2.json'), '{}'); + const command = restoreCommand(mockConfig); + + expect(await command?.action?.(mockContext, '')).toEqual({ + type: 'message', + messageType: 'info', + content: 'Available tool calls to restore:\n\ntest1\ntest2', + }); + }); + + it('should return an error if the specified file is not found', async () => { + await fs.writeFile(path.join(checkpointsDir, 'test1.json'), '{}'); + const command = restoreCommand(mockConfig); + + expect(await command?.action?.(mockContext, 'test2')).toEqual({ + type: 'message', + messageType: 'error', + content: 'File not found: test2.json', + }); + }); + + it('should handle file read errors gracefully', async () => { + const checkpointName = 'test1'; + const checkpointPath = path.join( + checkpointsDir, + `${checkpointName}.json`, + ); + // Create a directory instead of a file to cause a read error. + await fs.mkdir(checkpointPath); + const command = restoreCommand(mockConfig); + + expect(await command?.action?.(mockContext, checkpointName)).toEqual({ + type: 'message', + messageType: 'error', + content: expect.stringContaining( + 'Could not read restorable tool calls.', + ), + }); + }); + + it('should restore a tool call and project state', async () => { + const toolCallData = { + history: [{ type: 'user', text: 'do a thing' }], + clientHistory: [{ role: 'user', parts: [{ text: 'do a thing' }] }], + commitHash: 'abcdef123', + toolCall: { name: 'run_shell_command', args: 'ls' }, + }; + await fs.writeFile( + path.join(checkpointsDir, 'my-checkpoint.json'), + JSON.stringify(toolCallData), + ); + const command = restoreCommand(mockConfig); + + expect(await command?.action?.(mockContext, 'my-checkpoint')).toEqual({ + type: 'tool', + toolName: 'run_shell_command', + toolArgs: 'ls', + }); + expect(mockContext.ui.loadHistory).toHaveBeenCalledWith( + toolCallData.history, + ); + expect(mockSetHistory).toHaveBeenCalledWith(toolCallData.clientHistory); + expect(mockGitService.restoreProjectFromSnapshot).toHaveBeenCalledWith( + toolCallData.commitHash, + ); + expect(mockContext.ui.addItem).toHaveBeenCalledWith( + { + type: 'info', + text: 'Restored project to the state before the tool call.', + }, + expect.any(Number), + ); + }); + + it('should restore even if only toolCall is present', async () => { + const toolCallData = { + toolCall: { name: 'run_shell_command', args: 'ls' }, + }; + await fs.writeFile( + path.join(checkpointsDir, 'my-checkpoint.json'), + JSON.stringify(toolCallData), + ); + + const command = restoreCommand(mockConfig); + + expect(await command?.action?.(mockContext, 'my-checkpoint')).toEqual({ + type: 'tool', + toolName: 'run_shell_command', + toolArgs: 'ls', + }); + + expect(mockContext.ui.loadHistory).not.toHaveBeenCalled(); + expect(mockSetHistory).not.toHaveBeenCalled(); + expect(mockGitService.restoreProjectFromSnapshot).not.toHaveBeenCalled(); + }); + }); + + it('should return an error for a checkpoint file missing the toolCall property', async () => { + const checkpointName = 'missing-toolcall'; + await fs.writeFile( + path.join(checkpointsDir, `${checkpointName}.json`), + JSON.stringify({ history: [] }), // An object that is valid JSON but missing the 'toolCall' property + ); + const command = restoreCommand(mockConfig); + + expect(await command?.action?.(mockContext, checkpointName)).toEqual({ + type: 'message', + messageType: 'error', + // A more specific error message would be ideal, but for now, we can assert the current behavior. + content: expect.stringContaining('Could not read restorable tool calls.'), + }); + }); + + describe('completion', () => { + it('should return an empty array if temp dir is not found', async () => { + vi.mocked(mockConfig.getProjectTempDir).mockReturnValue(''); + const command = restoreCommand(mockConfig); + + expect(await command?.completion?.(mockContext, '')).toEqual([]); + }); + + it('should return an empty array on readdir error', async () => { + await fs.rm(checkpointsDir, { recursive: true, force: true }); + const command = restoreCommand(mockConfig); + + expect(await command?.completion?.(mockContext, '')).toEqual([]); + }); + + it('should return a list of checkpoint names', async () => { + await fs.writeFile(path.join(checkpointsDir, 'test1.json'), '{}'); + await fs.writeFile(path.join(checkpointsDir, 'test2.json'), '{}'); + await fs.writeFile( + path.join(checkpointsDir, 'not-a-checkpoint.txt'), + '{}', + ); + const command = restoreCommand(mockConfig); + + expect(await command?.completion?.(mockContext, '')).toEqual([ + 'test1', + 'test2', + ]); + }); + }); +}); diff --git a/packages/cli/src/ui/commands/restoreCommand.ts b/packages/cli/src/ui/commands/restoreCommand.ts new file mode 100644 index 000000000..14f4bff30 --- /dev/null +++ b/packages/cli/src/ui/commands/restoreCommand.ts @@ -0,0 +1,157 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as fs from 'fs/promises'; +import path from 'path'; +import { + type CommandContext, + type SlashCommand, + type SlashCommandActionReturn, + CommandKind, +} from './types.js'; +import { Config } from '@qwen-code/qwen-code-core'; + +async function restoreAction( + context: CommandContext, + args: string, +): Promise { + const { services, ui } = context; + const { config, git: gitService } = services; + const { addItem, loadHistory } = ui; + + const checkpointDir = config?.getProjectTempDir() + ? path.join(config.getProjectTempDir(), 'checkpoints') + : undefined; + + if (!checkpointDir) { + return { + type: 'message', + messageType: 'error', + content: 'Could not determine the .gemini directory path.', + }; + } + + try { + // Ensure the directory exists before trying to read it. + await fs.mkdir(checkpointDir, { recursive: true }); + const files = await fs.readdir(checkpointDir); + const jsonFiles = files.filter((file) => file.endsWith('.json')); + + if (!args) { + if (jsonFiles.length === 0) { + return { + type: 'message', + messageType: 'info', + content: 'No restorable tool calls found.', + }; + } + const truncatedFiles = jsonFiles.map((file) => { + const components = file.split('.'); + if (components.length <= 1) { + return file; + } + components.pop(); + return components.join('.'); + }); + const fileList = truncatedFiles.join('\n'); + return { + type: 'message', + messageType: 'info', + content: `Available tool calls to restore:\n\n${fileList}`, + }; + } + + const selectedFile = args.endsWith('.json') ? args : `${args}.json`; + + if (!jsonFiles.includes(selectedFile)) { + return { + type: 'message', + messageType: 'error', + content: `File not found: ${selectedFile}`, + }; + } + + const filePath = path.join(checkpointDir, selectedFile); + const data = await fs.readFile(filePath, 'utf-8'); + const toolCallData = JSON.parse(data); + + if (toolCallData.history) { + if (!loadHistory) { + // This should not happen + return { + type: 'message', + messageType: 'error', + content: 'loadHistory function is not available.', + }; + } + loadHistory(toolCallData.history); + } + + if (toolCallData.clientHistory) { + await config?.getGeminiClient()?.setHistory(toolCallData.clientHistory); + } + + if (toolCallData.commitHash) { + await gitService?.restoreProjectFromSnapshot(toolCallData.commitHash); + addItem( + { + type: 'info', + text: 'Restored project to the state before the tool call.', + }, + Date.now(), + ); + } + + return { + type: 'tool', + toolName: toolCallData.toolCall.name, + toolArgs: toolCallData.toolCall.args, + }; + } catch (error) { + return { + type: 'message', + messageType: 'error', + content: `Could not read restorable tool calls. This is the error: ${error}`, + }; + } +} + +async function completion( + context: CommandContext, + _partialArg: string, +): Promise { + const { services } = context; + const { config } = services; + const checkpointDir = config?.getProjectTempDir() + ? path.join(config.getProjectTempDir(), 'checkpoints') + : undefined; + if (!checkpointDir) { + return []; + } + try { + const files = await fs.readdir(checkpointDir); + return files + .filter((file) => file.endsWith('.json')) + .map((file) => file.replace('.json', '')); + } catch (_err) { + return []; + } +} + +export const restoreCommand = (config: Config | null): SlashCommand | null => { + if (!config?.getCheckpointingEnabled()) { + return null; + } + + return { + name: 'restore', + description: + 'Restore a tool call. This will reset the conversation and file history to the state it was in when the tool call was suggested', + kind: CommandKind.BUILT_IN, + action: restoreAction, + completion, + }; +}; diff --git a/packages/cli/src/ui/commands/statsCommand.test.ts b/packages/cli/src/ui/commands/statsCommand.test.ts new file mode 100644 index 000000000..485fcf693 --- /dev/null +++ b/packages/cli/src/ui/commands/statsCommand.test.ts @@ -0,0 +1,78 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { vi, describe, it, expect, beforeEach } from 'vitest'; +import { statsCommand } from './statsCommand.js'; +import { type CommandContext } from './types.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; +import { MessageType } from '../types.js'; +import { formatDuration } from '../utils/formatters.js'; + +describe('statsCommand', () => { + let mockContext: CommandContext; + const startTime = new Date('2025-07-14T10:00:00.000Z'); + const endTime = new Date('2025-07-14T10:00:30.000Z'); + + beforeEach(() => { + vi.useFakeTimers(); + vi.setSystemTime(endTime); + + // 1. Create the mock context with all default values + mockContext = createMockCommandContext(); + + // 2. Directly set the property on the created mock context + mockContext.session.stats.sessionStartTime = startTime; + }); + + it('should display general session stats when run with no subcommand', () => { + if (!statsCommand.action) throw new Error('Command has no action'); + + statsCommand.action(mockContext, ''); + + const expectedDuration = formatDuration( + endTime.getTime() - startTime.getTime(), + ); + expect(mockContext.ui.addItem).toHaveBeenCalledWith( + { + type: MessageType.STATS, + duration: expectedDuration, + }, + expect.any(Number), + ); + }); + + it('should display model stats when using the "model" subcommand', () => { + const modelSubCommand = statsCommand.subCommands?.find( + (sc) => sc.name === 'model', + ); + if (!modelSubCommand?.action) throw new Error('Subcommand has no action'); + + modelSubCommand.action(mockContext, ''); + + expect(mockContext.ui.addItem).toHaveBeenCalledWith( + { + type: MessageType.MODEL_STATS, + }, + expect.any(Number), + ); + }); + + it('should display tool stats when using the "tools" subcommand', () => { + const toolsSubCommand = statsCommand.subCommands?.find( + (sc) => sc.name === 'tools', + ); + if (!toolsSubCommand?.action) throw new Error('Subcommand has no action'); + + toolsSubCommand.action(mockContext, ''); + + expect(mockContext.ui.addItem).toHaveBeenCalledWith( + { + type: MessageType.TOOL_STATS, + }, + expect.any(Number), + ); + }); +}); diff --git a/packages/cli/src/ui/commands/statsCommand.ts b/packages/cli/src/ui/commands/statsCommand.ts new file mode 100644 index 000000000..e9e697564 --- /dev/null +++ b/packages/cli/src/ui/commands/statsCommand.ts @@ -0,0 +1,70 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { MessageType, HistoryItemStats } from '../types.js'; +import { formatDuration } from '../utils/formatters.js'; +import { + type CommandContext, + type SlashCommand, + CommandKind, +} from './types.js'; + +export const statsCommand: SlashCommand = { + name: 'stats', + altNames: ['usage'], + description: 'check session stats. Usage: /stats [model|tools]', + kind: CommandKind.BUILT_IN, + action: (context: CommandContext) => { + const now = new Date(); + const { sessionStartTime } = context.session.stats; + if (!sessionStartTime) { + context.ui.addItem( + { + type: MessageType.ERROR, + text: 'Session start time is unavailable, cannot calculate stats.', + }, + Date.now(), + ); + return; + } + const wallDuration = now.getTime() - sessionStartTime.getTime(); + + const statsItem: HistoryItemStats = { + type: MessageType.STATS, + duration: formatDuration(wallDuration), + }; + + context.ui.addItem(statsItem, Date.now()); + }, + subCommands: [ + { + name: 'model', + description: 'Show model-specific usage statistics.', + kind: CommandKind.BUILT_IN, + action: (context: CommandContext) => { + context.ui.addItem( + { + type: MessageType.MODEL_STATS, + }, + Date.now(), + ); + }, + }, + { + name: 'tools', + description: 'Show tool-specific usage statistics.', + kind: CommandKind.BUILT_IN, + action: (context: CommandContext) => { + context.ui.addItem( + { + type: MessageType.TOOL_STATS, + }, + Date.now(), + ); + }, + }, + ], +}; diff --git a/packages/cli/src/ui/commands/themeCommand.ts b/packages/cli/src/ui/commands/themeCommand.ts index 29e9a491d..755d59d97 100644 --- a/packages/cli/src/ui/commands/themeCommand.ts +++ b/packages/cli/src/ui/commands/themeCommand.ts @@ -4,11 +4,12 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { OpenDialogActionReturn, SlashCommand } from './types.js'; +import { CommandKind, OpenDialogActionReturn, SlashCommand } from './types.js'; export const themeCommand: SlashCommand = { name: 'theme', description: 'change the theme', + kind: CommandKind.BUILT_IN, action: (_context, _args): OpenDialogActionReturn => ({ type: 'dialog', dialog: 'theme', diff --git a/packages/cli/src/ui/commands/toolsCommand.test.ts b/packages/cli/src/ui/commands/toolsCommand.test.ts new file mode 100644 index 000000000..ed8953144 --- /dev/null +++ b/packages/cli/src/ui/commands/toolsCommand.test.ts @@ -0,0 +1,108 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, vi } from 'vitest'; +import { toolsCommand } from './toolsCommand.js'; +import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; +import { MessageType } from '../types.js'; +import { Tool } from '@qwen-code/qwen-code-core'; + +// Mock tools for testing +const mockTools = [ + { + name: 'file-reader', + displayName: 'File Reader', + description: 'Reads files from the local system.', + schema: {}, + }, + { + name: 'code-editor', + displayName: 'Code Editor', + description: 'Edits code files.', + schema: {}, + }, +] as Tool[]; + +describe('toolsCommand', () => { + it('should display an error if the tool registry is unavailable', async () => { + const mockContext = createMockCommandContext({ + services: { + config: { + getToolRegistry: () => Promise.resolve(undefined), + }, + }, + }); + + if (!toolsCommand.action) throw new Error('Action not defined'); + await toolsCommand.action(mockContext, ''); + + expect(mockContext.ui.addItem).toHaveBeenCalledWith( + { + type: MessageType.ERROR, + text: 'Could not retrieve tool registry.', + }, + expect.any(Number), + ); + }); + + it('should display "No tools available" when none are found', async () => { + const mockContext = createMockCommandContext({ + services: { + config: { + getToolRegistry: () => + Promise.resolve({ getAllTools: () => [] as Tool[] }), + }, + }, + }); + + if (!toolsCommand.action) throw new Error('Action not defined'); + await toolsCommand.action(mockContext, ''); + + expect(mockContext.ui.addItem).toHaveBeenCalledWith( + expect.objectContaining({ + text: expect.stringContaining('No tools available'), + }), + expect.any(Number), + ); + }); + + it('should list tools without descriptions by default', async () => { + const mockContext = createMockCommandContext({ + services: { + config: { + getToolRegistry: () => + Promise.resolve({ getAllTools: () => mockTools }), + }, + }, + }); + + if (!toolsCommand.action) throw new Error('Action not defined'); + await toolsCommand.action(mockContext, ''); + + const message = (mockContext.ui.addItem as vi.Mock).mock.calls[0][0].text; + expect(message).not.toContain('Reads files from the local system.'); + expect(message).toContain('File Reader'); + expect(message).toContain('Code Editor'); + }); + + it('should list tools with descriptions when "desc" arg is passed', async () => { + const mockContext = createMockCommandContext({ + services: { + config: { + getToolRegistry: () => + Promise.resolve({ getAllTools: () => mockTools }), + }, + }, + }); + + if (!toolsCommand.action) throw new Error('Action not defined'); + await toolsCommand.action(mockContext, 'desc'); + + const message = (mockContext.ui.addItem as vi.Mock).mock.calls[0][0].text; + expect(message).toContain('Reads files from the local system.'); + expect(message).toContain('Edits code files.'); + }); +}); diff --git a/packages/cli/src/ui/commands/toolsCommand.ts b/packages/cli/src/ui/commands/toolsCommand.ts new file mode 100644 index 000000000..e993bab3f --- /dev/null +++ b/packages/cli/src/ui/commands/toolsCommand.ts @@ -0,0 +1,71 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { + type CommandContext, + type SlashCommand, + CommandKind, +} from './types.js'; +import { MessageType } from '../types.js'; + +export const toolsCommand: SlashCommand = { + name: 'tools', + description: 'list available Gemini CLI tools', + kind: CommandKind.BUILT_IN, + action: async (context: CommandContext, args?: string): Promise => { + const subCommand = args?.trim(); + + // Default to NOT showing descriptions. The user must opt in with an argument. + let useShowDescriptions = false; + if (subCommand === 'desc' || subCommand === 'descriptions') { + useShowDescriptions = true; + } + + const toolRegistry = await context.services.config?.getToolRegistry(); + if (!toolRegistry) { + context.ui.addItem( + { + type: MessageType.ERROR, + text: 'Could not retrieve tool registry.', + }, + Date.now(), + ); + return; + } + + const tools = toolRegistry.getAllTools(); + // Filter out MCP tools by checking for the absence of a serverName property + const geminiTools = tools.filter((tool) => !('serverName' in tool)); + + let message = 'Available Gemini CLI tools:\n\n'; + + if (geminiTools.length > 0) { + geminiTools.forEach((tool) => { + if (useShowDescriptions && tool.description) { + message += ` - \u001b[36m${tool.displayName} (${tool.name})\u001b[0m:\n`; + + const greenColor = '\u001b[32m'; + const resetColor = '\u001b[0m'; + + // Handle multi-line descriptions + const descLines = tool.description.trim().split('\n'); + for (const descLine of descLines) { + message += ` ${greenColor}${descLine}${resetColor}\n`; + } + } else { + message += ` - \u001b[36m${tool.displayName}\u001b[0m\n`; + } + }); + } else { + message += ' No tools available\n'; + } + message += '\n'; + + message += '\u001b[0m'; + + context.ui.addItem({ type: MessageType.INFO, text: message }, Date.now()); + }, +}; diff --git a/packages/cli/src/ui/commands/types.ts b/packages/cli/src/ui/commands/types.ts index 4d341da07..f36aff3a4 100644 --- a/packages/cli/src/ui/commands/types.ts +++ b/packages/cli/src/ui/commands/types.ts @@ -4,13 +4,25 @@ * SPDX-License-Identifier: Apache-2.0 */ +import { Content } from '@google/genai'; +import { HistoryItemWithoutId } from '../types.js'; import { Config, GitService, Logger } from '@qwen-code/qwen-code-core'; import { LoadedSettings } from '../../config/settings.js'; import { UseHistoryManagerReturn } from '../hooks/useHistoryManager.js'; +import type { HistoryItem } from '../types.js'; import { SessionStatsState } from '../contexts/SessionContext.js'; // Grouped dependencies for clarity and easier mocking export interface CommandContext { + // Invocation properties for when commands are called. + invocation?: { + /** The raw, untrimmed input string from the user. */ + raw: string; + /** The primary name of the command that was matched. */ + name: string; + /** The arguments string that follows the command name. */ + args: string; + }; // Core services and configuration services: { // TODO(abhipatel12): Ensure that config is never null. @@ -21,11 +33,6 @@ export interface CommandContext { }; // UI state and history management ui: { - // TODO - As more commands are add some additions may be needed or reworked using this new context. - // Ex. - // history: HistoryItem[]; - // pendingHistoryItems: HistoryItemWithoutId[]; - /** Adds a new item to the history display. */ addItem: UseHistoryManagerReturn['addItem']; /** Clears all history items and the console screen. */ @@ -34,11 +41,30 @@ export interface CommandContext { * Sets the transient debug message displayed in the application footer in debug mode. */ setDebugMessage: (message: string) => void; + /** The currently pending history item, if any. */ + pendingItem: HistoryItemWithoutId | null; + /** + * Sets a pending item in the history, which is useful for indicating + * that a long-running operation is in progress. + * + * @param item The history item to display as pending, or `null` to clear. + */ + setPendingItem: (item: HistoryItemWithoutId | null) => void; + /** + * Loads a new set of history items, replacing the current history. + * + * @param history The array of history items to load. + */ + loadHistory: UseHistoryManagerReturn['loadHistory']; + /** Toggles a special display mode. */ + toggleCorgiMode: () => void; + toggleVimEnabled: () => Promise; }; // Session-specific data session: { stats: SessionStatsState; - resetSession: () => void; + /** A transient list of shell commands the user has approved for this session. */ + sessionShellAllowlist: Set; }; } @@ -51,6 +77,12 @@ export interface ToolActionReturn { toolArgs: Record; } +/** The return type for a command action that results in the app quitting. */ +export interface QuitActionReturn { + type: 'quit'; + messages: HistoryItem[]; +} + /** * The return type for a command action that results in a simple message * being displayed to the user. @@ -66,24 +98,69 @@ export interface MessageActionReturn { */ export interface OpenDialogActionReturn { type: 'dialog'; - // TODO: Add 'theme' | 'auth' | 'editor' | 'privacy' as migration happens. - dialog: 'help' | 'auth' | 'theme' | 'privacy'; + dialog: 'help' | 'auth' | 'theme' | 'editor' | 'privacy'; +} + +/** + * The return type for a command action that results in replacing + * the entire conversation history. + */ +export interface LoadHistoryActionReturn { + type: 'load_history'; + history: HistoryItemWithoutId[]; + clientHistory: Content[]; // The history for the generative client +} + +/** + * The return type for a command action that should immediately submit + * content as a prompt to the Gemini model. + */ +export interface SubmitPromptActionReturn { + type: 'submit_prompt'; + content: string; +} + +/** + * The return type for a command action that needs to pause and request + * confirmation for a set of shell commands before proceeding. + */ +export interface ConfirmShellCommandsActionReturn { + type: 'confirm_shell_commands'; + /** The list of shell commands that require user confirmation. */ + commandsToConfirm: string[]; + /** The original invocation context to be re-run after confirmation. */ + originalInvocation: { + raw: string; + }; } export type SlashCommandActionReturn = | ToolActionReturn | MessageActionReturn - | OpenDialogActionReturn; + | QuitActionReturn + | OpenDialogActionReturn + | LoadHistoryActionReturn + | SubmitPromptActionReturn + | ConfirmShellCommandsActionReturn; + +export enum CommandKind { + BUILT_IN = 'built-in', + FILE = 'file', + MCP_PROMPT = 'mcp-prompt', +} + // The standardized contract for any command in the system. export interface SlashCommand { name: string; - altName?: string; - description?: string; + altNames?: string[]; + description: string; + + kind: CommandKind; // The action to run. Optional for parent commands that only group sub-commands. action?: ( context: CommandContext, - args: string, + args: string, // TODO: Remove args. CommandContext now contains the complete invocation. ) => | void | SlashCommandActionReturn diff --git a/packages/cli/src/ui/commands/vimCommand.ts b/packages/cli/src/ui/commands/vimCommand.ts new file mode 100644 index 000000000..40e658dfb --- /dev/null +++ b/packages/cli/src/ui/commands/vimCommand.ts @@ -0,0 +1,25 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { CommandKind, SlashCommand } from './types.js'; + +export const vimCommand: SlashCommand = { + name: 'vim', + description: 'toggle vim mode on/off', + kind: CommandKind.BUILT_IN, + action: async (context, _args) => { + const newVimState = await context.ui.toggleVimEnabled(); + + const message = newVimState + ? 'Entered Vim mode. Run /vim again to exit.' + : 'Exited Vim mode.'; + return { + type: 'message', + messageType: 'info', + content: message, + }; + }, +}; diff --git a/packages/cli/src/ui/components/AuthDialog.test.tsx b/packages/cli/src/ui/components/AuthDialog.test.tsx index 171912eef..5269b9a69 100644 --- a/packages/cli/src/ui/components/AuthDialog.test.tsx +++ b/packages/cli/src/ui/components/AuthDialog.test.tsx @@ -31,7 +31,7 @@ describe('AuthDialog', () => { const settings: LoadedSettings = new LoadedSettings( { - settings: {}, + settings: { customThemes: {}, mcpServers: {} }, path: '', }, { @@ -41,7 +41,7 @@ describe('AuthDialog', () => { path: '', }, { - settings: {}, + settings: { customThemes: {}, mcpServers: {} }, path: '', }, [], @@ -68,11 +68,17 @@ describe('AuthDialog', () => { { settings: { selectedAuthType: undefined, + customThemes: {}, + mcpServers: {}, }, path: '', }, { - settings: {}, + settings: { customThemes: {}, mcpServers: {} }, + path: '', + }, + { + settings: { customThemes: {}, mcpServers: {} }, path: '', }, [], @@ -95,11 +101,17 @@ describe('AuthDialog', () => { { settings: { selectedAuthType: undefined, + customThemes: {}, + mcpServers: {}, }, path: '', }, { - settings: {}, + settings: { customThemes: {}, mcpServers: {} }, + path: '', + }, + { + settings: { customThemes: {}, mcpServers: {} }, path: '', }, [], @@ -122,11 +134,17 @@ describe('AuthDialog', () => { { settings: { selectedAuthType: undefined, + customThemes: {}, + mcpServers: {}, }, path: '', }, { - settings: {}, + settings: { customThemes: {}, mcpServers: {} }, + path: '', + }, + { + settings: { customThemes: {}, mcpServers: {} }, path: '', }, [], @@ -144,17 +162,23 @@ describe('AuthDialog', () => { describe('GEMINI_DEFAULT_AUTH_TYPE environment variable', () => { it('should select the auth type specified by GEMINI_DEFAULT_AUTH_TYPE', () => { - process.env.GEMINI_DEFAULT_AUTH_TYPE = AuthType.LOGIN_WITH_GOOGLE; + process.env.GEMINI_DEFAULT_AUTH_TYPE = AuthType.USE_OPENAI; const settings: LoadedSettings = new LoadedSettings( { settings: { selectedAuthType: undefined, + customThemes: {}, + mcpServers: {}, }, path: '', }, { - settings: {}, + settings: { customThemes: {}, mcpServers: {} }, + path: '', + }, + { + settings: { customThemes: {}, mcpServers: {} }, path: '', }, [], @@ -164,8 +188,8 @@ describe('AuthDialog', () => { {}} settings={settings} />, ); - // Since only OpenAI is available, it should be selected by default - expect(lastFrame()).toContain('● OpenAI'); + // This is a bit brittle, but it's the best way to check which item is selected. + expect(lastFrame()).toContain('● 1. OpenAI'); }); it('should fall back to default if GEMINI_DEFAULT_AUTH_TYPE is not set', () => { @@ -173,11 +197,17 @@ describe('AuthDialog', () => { { settings: { selectedAuthType: undefined, + customThemes: {}, + mcpServers: {}, }, path: '', }, { - settings: {}, + settings: { customThemes: {}, mcpServers: {} }, + path: '', + }, + { + settings: { customThemes: {}, mcpServers: {} }, path: '', }, [], @@ -187,8 +217,8 @@ describe('AuthDialog', () => { {}} settings={settings} />, ); - // Default is OpenAI (the only option) - expect(lastFrame()).toContain('● OpenAI'); + // Default is OpenAI (only option available) + expect(lastFrame()).toContain('● 1. OpenAI'); }); it('should show an error and fall back to default if GEMINI_DEFAULT_AUTH_TYPE is invalid', () => { @@ -198,11 +228,17 @@ describe('AuthDialog', () => { { settings: { selectedAuthType: undefined, + customThemes: {}, + mcpServers: {}, }, path: '', }, { - settings: {}, + settings: { customThemes: {}, mcpServers: {} }, + path: '', + }, + { + settings: { customThemes: {}, mcpServers: {} }, path: '', }, [], @@ -214,7 +250,7 @@ describe('AuthDialog', () => { // Since the auth dialog doesn't show GEMINI_DEFAULT_AUTH_TYPE errors anymore, // it will just show the default OpenAI option - expect(lastFrame()).toContain('● OpenAI'); + expect(lastFrame()).toContain('● 1. OpenAI'); }); }); @@ -259,11 +295,19 @@ describe('AuthDialog', () => { const onSelect = vi.fn(); const settings: LoadedSettings = new LoadedSettings( { - settings: {}, + settings: { customThemes: {}, mcpServers: {} }, path: '', }, { - settings: {}, + settings: { + selectedAuthType: undefined, + customThemes: {}, + mcpServers: {}, + }, + path: '', + }, + { + settings: { customThemes: {}, mcpServers: {} }, path: '', }, [], @@ -293,17 +337,19 @@ describe('AuthDialog', () => { const onSelect = vi.fn(); const settings: LoadedSettings = new LoadedSettings( { - settings: {}, + settings: { customThemes: {}, mcpServers: {} }, path: '', }, { settings: { selectedAuthType: AuthType.USE_GEMINI, + customThemes: {}, + mcpServers: {}, }, path: '', }, { - settings: {}, + settings: { customThemes: {}, mcpServers: {} }, path: '', }, [], diff --git a/packages/cli/src/ui/components/AuthDialog.tsx b/packages/cli/src/ui/components/AuthDialog.tsx index 5888fb7ef..ab5ddf81e 100644 --- a/packages/cli/src/ui/components/AuthDialog.tsx +++ b/packages/cli/src/ui/components/AuthDialog.tsx @@ -102,7 +102,6 @@ export function AuthDialog({ }; useInput((_input, key) => { - // 当显示 OpenAIKeyPrompt 时,不处理输入事件 if (showOpenAIKeyPrompt) { return; } diff --git a/packages/cli/src/ui/components/ContextSummaryDisplay.tsx b/packages/cli/src/ui/components/ContextSummaryDisplay.tsx index a31d39e89..ef281f5ff 100644 --- a/packages/cli/src/ui/components/ContextSummaryDisplay.tsx +++ b/packages/cli/src/ui/components/ContextSummaryDisplay.tsx @@ -7,27 +7,48 @@ import React from 'react'; import { Text } from 'ink'; import { Colors } from '../colors.js'; -import { type MCPServerConfig } from '@qwen-code/qwen-code-core'; +import { + type OpenFiles, + type MCPServerConfig, +} from '@qwen-code/qwen-code-core'; interface ContextSummaryDisplayProps { geminiMdFileCount: number; contextFileNames: string[]; mcpServers?: Record; + blockedMcpServers?: Array<{ name: string; extensionName: string }>; showToolDescriptions?: boolean; + openFiles?: OpenFiles; } export const ContextSummaryDisplay: React.FC = ({ geminiMdFileCount, contextFileNames, mcpServers, + blockedMcpServers, showToolDescriptions, + openFiles, }) => { const mcpServerCount = Object.keys(mcpServers || {}).length; + const blockedMcpServerCount = blockedMcpServers?.length || 0; - if (geminiMdFileCount === 0 && mcpServerCount === 0) { + if ( + geminiMdFileCount === 0 && + mcpServerCount === 0 && + blockedMcpServerCount === 0 && + (openFiles?.recentOpenFiles?.length ?? 0) === 0 + ) { return ; // Render an empty space to reserve height } + const recentFilesText = (() => { + const count = openFiles?.recentOpenFiles?.length ?? 0; + if (count === 0) { + return ''; + } + return `${count} recent file${count > 1 ? 's' : ''} (ctrl+e to view)`; + })(); + const geminiMdText = (() => { if (geminiMdFileCount === 0) { return ''; @@ -39,27 +60,47 @@ export const ContextSummaryDisplay: React.FC = ({ }`; })(); - const mcpText = - mcpServerCount > 0 - ? `${mcpServerCount} MCP server${mcpServerCount > 1 ? 's' : ''}` - : ''; + const mcpText = (() => { + if (mcpServerCount === 0 && blockedMcpServerCount === 0) { + return ''; + } - let summaryText = 'Using '; - if (geminiMdText) { - summaryText += geminiMdText; + const parts = []; + if (mcpServerCount > 0) { + parts.push( + `${mcpServerCount} MCP server${mcpServerCount > 1 ? 's' : ''}`, + ); + } + + if (blockedMcpServerCount > 0) { + let blockedText = `${blockedMcpServerCount} Blocked`; + if (mcpServerCount === 0) { + blockedText += ` MCP server${blockedMcpServerCount > 1 ? 's' : ''}`; + } + parts.push(blockedText); + } + return parts.join(', '); + })(); + + let summaryText = 'Using: '; + const summaryParts = []; + if (recentFilesText) { + summaryParts.push(recentFilesText); } - if (geminiMdText && mcpText) { - summaryText += ' and '; + if (geminiMdText) { + summaryParts.push(geminiMdText); } if (mcpText) { - summaryText += mcpText; - // Add ctrl+t hint when MCP servers are available - if (mcpServers && Object.keys(mcpServers).length > 0) { - if (showToolDescriptions) { - summaryText += ' (ctrl+t to toggle)'; - } else { - summaryText += ' (ctrl+t to view)'; - } + summaryParts.push(mcpText); + } + summaryText += summaryParts.join(' | '); + + // Add ctrl+t hint when MCP servers are available + if (mcpServers && Object.keys(mcpServers).length > 0) { + if (showToolDescriptions) { + summaryText += ' (ctrl+t to toggle)'; + } else { + summaryText += ' (ctrl+t to view)'; } } diff --git a/packages/cli/src/ui/components/Footer.tsx b/packages/cli/src/ui/components/Footer.tsx index cdcc68793..af2fb9b3d 100644 --- a/packages/cli/src/ui/components/Footer.tsx +++ b/packages/cli/src/ui/components/Footer.tsx @@ -29,6 +29,7 @@ interface FooterProps { showMemoryUsage?: boolean; promptTokenCount: number; nightly: boolean; + vimMode?: string; } export const Footer: React.FC = ({ @@ -43,13 +44,15 @@ export const Footer: React.FC = ({ showMemoryUsage, promptTokenCount, nightly, + vimMode, }) => { const limit = tokenLimit(model); const percentage = promptTokenCount / limit; return ( - + + {vimMode && [{vimMode}] } {nightly ? ( @@ -83,7 +86,7 @@ export const Footer: React.FC = ({ ) : process.env.SANDBOX === 'sandbox-exec' ? ( - MacOS Seatbelt{' '} + macOS Seatbelt{' '} ({process.env.SEATBELT_PROFILE}) ) : ( diff --git a/packages/cli/src/ui/components/Header.tsx b/packages/cli/src/ui/components/Header.tsx index b99382e03..4038e4154 100644 --- a/packages/cli/src/ui/components/Header.tsx +++ b/packages/cli/src/ui/components/Header.tsx @@ -38,7 +38,6 @@ export const Header: React.FC = ({ return ( = ({ commands }) => ( diff --git a/packages/cli/src/ui/components/IDEContextDetailDisplay.tsx b/packages/cli/src/ui/components/IDEContextDetailDisplay.tsx new file mode 100644 index 000000000..0568ac917 --- /dev/null +++ b/packages/cli/src/ui/components/IDEContextDetailDisplay.tsx @@ -0,0 +1,52 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { Box, Text } from 'ink'; +import { type OpenFiles } from '@qwen-code/qwen-code-core'; +import { Colors } from '../colors.js'; +import path from 'node:path'; + +interface IDEContextDetailDisplayProps { + openFiles: OpenFiles | undefined; +} + +export function IDEContextDetailDisplay({ + openFiles, +}: IDEContextDetailDisplayProps) { + if ( + !openFiles || + !openFiles.recentOpenFiles || + openFiles.recentOpenFiles.length === 0 + ) { + return null; + } + const recentFiles = openFiles.recentOpenFiles || []; + + return ( + + + IDE Context (ctrl+e to toggle) + + {recentFiles.length > 0 && ( + + Recent files: + {recentFiles.map((file) => ( + + - {path.basename(file.filePath)} + {file.filePath === openFiles.activeFile ? ' (active)' : ''} + + ))} + + )} + + ); +} diff --git a/packages/cli/src/ui/components/InputPrompt.test.tsx b/packages/cli/src/ui/components/InputPrompt.test.tsx index 3bd0ef654..b5246dc41 100644 --- a/packages/cli/src/ui/components/InputPrompt.test.tsx +++ b/packages/cli/src/ui/components/InputPrompt.test.tsx @@ -8,11 +8,22 @@ import { render } from 'ink-testing-library'; import { InputPrompt, InputPromptProps } from './InputPrompt.js'; import type { TextBuffer } from './shared/text-buffer.js'; import { Config } from '@qwen-code/qwen-code-core'; -import { CommandContext, SlashCommand } from '../commands/types.js'; -import { vi } from 'vitest'; -import { useShellHistory } from '../hooks/useShellHistory.js'; -import { useCompletion } from '../hooks/useCompletion.js'; -import { useInputHistory } from '../hooks/useInputHistory.js'; +import * as path from 'path'; +import { + CommandContext, + SlashCommand, + CommandKind, +} from '../commands/types.js'; +import { describe, it, expect, beforeEach, vi } from 'vitest'; +import { + useShellHistory, + UseShellHistoryReturn, +} from '../hooks/useShellHistory.js'; +import { useCompletion, UseCompletionReturn } from '../hooks/useCompletion.js'; +import { + useInputHistory, + UseInputHistoryReturn, +} from '../hooks/useInputHistory.js'; import * as clipboardUtils from '../utils/clipboardUtils.js'; import { createMockCommandContext } from '../../test-utils/mockCommandContext.js'; @@ -21,28 +32,47 @@ vi.mock('../hooks/useCompletion.js'); vi.mock('../hooks/useInputHistory.js'); vi.mock('../utils/clipboardUtils.js'); -type MockedUseShellHistory = ReturnType; -type MockedUseCompletion = ReturnType; -type MockedUseInputHistory = ReturnType; - const mockSlashCommands: SlashCommand[] = [ - { name: 'clear', description: 'Clear screen', action: vi.fn() }, + { + name: 'clear', + kind: CommandKind.BUILT_IN, + description: 'Clear screen', + action: vi.fn(), + }, { name: 'memory', + kind: CommandKind.BUILT_IN, description: 'Manage memory', subCommands: [ - { name: 'show', description: 'Show memory', action: vi.fn() }, - { name: 'add', description: 'Add to memory', action: vi.fn() }, - { name: 'refresh', description: 'Refresh memory', action: vi.fn() }, + { + name: 'show', + kind: CommandKind.BUILT_IN, + description: 'Show memory', + action: vi.fn(), + }, + { + name: 'add', + kind: CommandKind.BUILT_IN, + description: 'Add to memory', + action: vi.fn(), + }, + { + name: 'refresh', + kind: CommandKind.BUILT_IN, + description: 'Refresh memory', + action: vi.fn(), + }, ], }, { name: 'chat', description: 'Manage chats', + kind: CommandKind.BUILT_IN, subCommands: [ { name: 'resume', description: 'Resume a chat', + kind: CommandKind.BUILT_IN, action: vi.fn(), completion: async () => ['fix-foo', 'fix-bar'], }, @@ -52,9 +82,9 @@ const mockSlashCommands: SlashCommand[] = [ describe('InputPrompt', () => { let props: InputPromptProps; - let mockShellHistory: MockedUseShellHistory; - let mockCompletion: MockedUseCompletion; - let mockInputHistory: MockedUseInputHistory; + let mockShellHistory: UseShellHistoryReturn; + let mockCompletion: UseCompletionReturn; + let mockInputHistory: UseInputHistoryReturn; let mockBuffer: TextBuffer; let mockCommandContext: CommandContext; @@ -91,6 +121,15 @@ describe('InputPrompt', () => { openInExternalEditor: vi.fn(), newline: vi.fn(), backspace: vi.fn(), + preferredCol: null, + selectionAnchor: null, + insert: vi.fn(), + del: vi.fn(), + undo: vi.fn(), + redo: vi.fn(), + replaceRange: vi.fn(), + deleteWordLeft: vi.fn(), + deleteWordRight: vi.fn(), } as unknown as TextBuffer; mockShellHistory = { @@ -107,11 +146,13 @@ describe('InputPrompt', () => { isLoadingSuggestions: false, showSuggestions: false, visibleStartIndex: 0, + isPerfectMatch: false, navigateUp: vi.fn(), navigateDown: vi.fn(), resetCompletionState: vi.fn(), setActiveSuggestionIndex: vi.fn(), setShowSuggestions: vi.fn(), + handleAutocomplete: vi.fn(), }; mockedUseCompletion.mockReturnValue(mockCompletion); @@ -128,10 +169,11 @@ describe('InputPrompt', () => { userMessages: [], onClearScreen: vi.fn(), config: { - getProjectRoot: () => '/test/project', - getTargetDir: () => '/test/project/src', + getProjectRoot: () => path.join('test', 'project'), + getTargetDir: () => path.join('test', 'project', 'src'), + getVimMode: () => false, } as unknown as Config, - slashCommands: [], + slashCommands: mockSlashCommands, commandContext: mockCommandContext, shellModeActive: false, setShellModeActive: vi.fn(), @@ -139,8 +181,6 @@ describe('InputPrompt', () => { suggestionsWidth: 80, focus: true, }; - - props.slashCommands = mockSlashCommands; }); const wait = (ms = 50) => new Promise((resolve) => setTimeout(resolve, ms)); @@ -148,10 +188,10 @@ describe('InputPrompt', () => { it('should call shellHistory.getPreviousCommand on up arrow in shell mode', async () => { props.shellModeActive = true; const { stdin, unmount } = render(); - await wait(100); // Increased wait time for CI environment + await wait(); stdin.write('\u001B[A'); - await wait(100); // Increased wait time to ensure input is processed + await wait(); expect(mockShellHistory.getPreviousCommand).toHaveBeenCalled(); unmount(); @@ -160,10 +200,10 @@ describe('InputPrompt', () => { it('should call shellHistory.getNextCommand on down arrow in shell mode', async () => { props.shellModeActive = true; const { stdin, unmount } = render(); - await wait(100); // Increased wait time for CI environment + await wait(); stdin.write('\u001B[B'); - await wait(100); // Increased wait time to ensure input is processed + await wait(); expect(mockShellHistory.getNextCommand).toHaveBeenCalled(); unmount(); @@ -175,10 +215,10 @@ describe('InputPrompt', () => { 'previous command', ); const { stdin, unmount } = render(); - await wait(100); // Increased wait time for CI environment + await wait(); stdin.write('\u001B[A'); - await wait(100); // Increased wait time to ensure input is processed + await wait(); expect(mockShellHistory.getPreviousCommand).toHaveBeenCalled(); expect(props.buffer.setText).toHaveBeenCalledWith('previous command'); @@ -221,6 +261,83 @@ describe('InputPrompt', () => { unmount(); }); + it('should call completion.navigateUp for both up arrow and Ctrl+P when suggestions are showing', async () => { + mockedUseCompletion.mockReturnValue({ + ...mockCompletion, + showSuggestions: true, + suggestions: [ + { label: 'memory', value: 'memory' }, + { label: 'memcache', value: 'memcache' }, + ], + }); + + props.buffer.setText('/mem'); + + const { stdin, unmount } = render(); + await wait(); + + // Test up arrow + stdin.write('\u001B[A'); // Up arrow + await wait(); + + stdin.write('\u0010'); // Ctrl+P + await wait(); + expect(mockCompletion.navigateUp).toHaveBeenCalledTimes(2); + expect(mockCompletion.navigateDown).not.toHaveBeenCalled(); + + unmount(); + }); + + it('should call completion.navigateDown for both down arrow and Ctrl+N when suggestions are showing', async () => { + mockedUseCompletion.mockReturnValue({ + ...mockCompletion, + showSuggestions: true, + suggestions: [ + { label: 'memory', value: 'memory' }, + { label: 'memcache', value: 'memcache' }, + ], + }); + props.buffer.setText('/mem'); + + const { stdin, unmount } = render(); + await wait(); + + // Test down arrow + stdin.write('\u001B[B'); // Down arrow + await wait(); + + stdin.write('\u000E'); // Ctrl+N + await wait(); + expect(mockCompletion.navigateDown).toHaveBeenCalledTimes(2); + expect(mockCompletion.navigateUp).not.toHaveBeenCalled(); + + unmount(); + }); + + it('should NOT call completion navigation when suggestions are not showing', async () => { + mockedUseCompletion.mockReturnValue({ + ...mockCompletion, + showSuggestions: false, + }); + props.buffer.setText('some text'); + + const { stdin, unmount } = render(); + await wait(); + + stdin.write('\u001B[A'); // Up arrow + await wait(); + stdin.write('\u001B[B'); // Down arrow + await wait(); + stdin.write('\u0010'); // Ctrl+P + await wait(); + stdin.write('\u000E'); // Ctrl+N + await wait(); + + expect(mockCompletion.navigateUp).not.toHaveBeenCalled(); + expect(mockCompletion.navigateDown).not.toHaveBeenCalled(); + unmount(); + }); + describe('clipboard image paste', () => { beforeEach(() => { vi.mocked(clipboardUtils.clipboardHasImage).mockResolvedValue(false); @@ -285,10 +402,13 @@ describe('InputPrompt', () => { }); it('should insert image path at cursor position with proper spacing', async () => { - vi.mocked(clipboardUtils.clipboardHasImage).mockResolvedValue(true); - vi.mocked(clipboardUtils.saveClipboardImage).mockResolvedValue( - '/test/.gemini-clipboard/clipboard-456.png', + const imagePath = path.join( + 'test', + '.gemini-clipboard', + 'clipboard-456.png', ); + vi.mocked(clipboardUtils.clipboardHasImage).mockResolvedValue(true); + vi.mocked(clipboardUtils.saveClipboardImage).mockResolvedValue(imagePath); // Set initial text and cursor position mockBuffer.text = 'Hello world'; @@ -310,9 +430,9 @@ describe('InputPrompt', () => { .calls[0]; expect(actualCall[0]).toBe(5); // start offset expect(actualCall[1]).toBe(5); // end offset - expect(actualCall[2]).toMatch( - /@.*\.gemini-clipboard\/clipboard-456\.png/, - ); // flexible path match + expect(actualCall[2]).toBe( + ' @' + path.relative(path.join('test', 'project', 'src'), imagePath), + ); unmount(); }); @@ -341,7 +461,7 @@ describe('InputPrompt', () => { }); }); - it('should complete a partial parent command and add a space', async () => { + it('should complete a partial parent command', async () => { // SCENARIO: /mem -> Tab mockedUseCompletion.mockReturnValue({ ...mockCompletion, @@ -357,12 +477,12 @@ describe('InputPrompt', () => { stdin.write('\t'); // Press Tab await wait(); - expect(props.buffer.setText).toHaveBeenCalledWith('/memory '); + expect(mockCompletion.handleAutocomplete).toHaveBeenCalledWith(0); unmount(); }); - it('should append a sub-command when the parent command is already complete with a space', async () => { - // SCENARIO: /memory -> Tab (to accept 'add') + it('should append a sub-command when the parent command is already complete', async () => { + // SCENARIO: /memory -> Tab (to accept 'add') mockedUseCompletion.mockReturnValue({ ...mockCompletion, showSuggestions: true, @@ -380,13 +500,12 @@ describe('InputPrompt', () => { stdin.write('\t'); // Press Tab await wait(); - expect(props.buffer.setText).toHaveBeenCalledWith('/memory add '); + expect(mockCompletion.handleAutocomplete).toHaveBeenCalledWith(1); unmount(); }); it('should handle the "backspace" edge case correctly', async () => { - // SCENARIO: /memory -> Backspace -> /memory -> Tab (to accept 'show') - // This is the critical bug we fixed. + // SCENARIO: /memory -> Backspace -> /memory -> Tab (to accept 'show') mockedUseCompletion.mockReturnValue({ ...mockCompletion, showSuggestions: true, @@ -405,8 +524,8 @@ describe('InputPrompt', () => { stdin.write('\t'); // Press Tab await wait(); - // It should NOT become '/show '. It should correctly become '/memory show '. - expect(props.buffer.setText).toHaveBeenCalledWith('/memory show '); + // It should NOT become '/show'. It should correctly become '/memory show'. + expect(mockCompletion.handleAutocomplete).toHaveBeenCalledWith(0); unmount(); }); @@ -426,7 +545,7 @@ describe('InputPrompt', () => { stdin.write('\t'); // Press Tab await wait(); - expect(props.buffer.setText).toHaveBeenCalledWith('/chat resume fix-foo '); + expect(mockCompletion.handleAutocomplete).toHaveBeenCalledWith(0); unmount(); }); @@ -446,19 +565,21 @@ describe('InputPrompt', () => { await wait(); // The app should autocomplete the text, NOT submit. - expect(props.buffer.setText).toHaveBeenCalledWith('/memory '); + expect(mockCompletion.handleAutocomplete).toHaveBeenCalledWith(0); expect(props.onSubmit).not.toHaveBeenCalled(); unmount(); }); - it('should complete a command based on its altName', async () => { - // Add a command with an altName to our mock for this test - props.slashCommands.push({ - name: 'help', - altName: '?', - description: '...', - }); + it('should complete a command based on its altNames', async () => { + props.slashCommands = [ + { + name: 'help', + altNames: ['?'], + kind: CommandKind.BUILT_IN, + description: '...', + }, + ]; mockedUseCompletion.mockReturnValue({ ...mockCompletion, @@ -471,10 +592,10 @@ describe('InputPrompt', () => { const { stdin, unmount } = render(); await wait(); - stdin.write('\t'); // Press Tab + stdin.write('\t'); // Press Tab for autocomplete await wait(); - expect(props.buffer.setText).toHaveBeenCalledWith('/help '); + expect(mockCompletion.handleAutocomplete).toHaveBeenCalledWith(0); unmount(); }); @@ -491,10 +612,29 @@ describe('InputPrompt', () => { unmount(); }); - it('should submit directly on Enter when a complete leaf command is typed', async () => { + it('should submit directly on Enter when isPerfectMatch is true', async () => { mockedUseCompletion.mockReturnValue({ ...mockCompletion, showSuggestions: false, + isPerfectMatch: true, + }); + props.buffer.setText('/clear'); + + const { stdin, unmount } = render(); + await wait(); + + stdin.write('\r'); + await wait(); + + expect(props.onSubmit).toHaveBeenCalledWith('/clear'); + unmount(); + }); + + it('should submit directly on Enter when a complete leaf command is typed', async () => { + mockedUseCompletion.mockReturnValue({ + ...mockCompletion, + showSuggestions: false, + isPerfectMatch: false, // Added explicit isPerfectMatch false }); props.buffer.setText('/clear'); @@ -505,7 +645,6 @@ describe('InputPrompt', () => { await wait(); expect(props.onSubmit).toHaveBeenCalledWith('/clear'); - expect(props.buffer.setText).not.toHaveBeenCalledWith('/clear '); unmount(); }); @@ -524,13 +663,16 @@ describe('InputPrompt', () => { stdin.write('\r'); await wait(); - expect(props.buffer.replaceRangeByOffset).toHaveBeenCalled(); + expect(mockCompletion.handleAutocomplete).toHaveBeenCalledWith(0); expect(props.onSubmit).not.toHaveBeenCalled(); unmount(); }); it('should add a newline on enter when the line ends with a backslash', async () => { - props.buffer.setText('first line\\'); + // This test simulates multi-line input, not submission + mockBuffer.text = 'first line\\'; + mockBuffer.cursor = [0, 11]; + mockBuffer.lines = ['first line\\']; const { stdin, unmount } = render(); await wait(); @@ -543,4 +685,471 @@ describe('InputPrompt', () => { expect(props.buffer.newline).toHaveBeenCalled(); unmount(); }); + + it('should clear the buffer on Ctrl+C if it has text', async () => { + props.buffer.setText('some text to clear'); + const { stdin, unmount } = render(); + await wait(); + + stdin.write('\x03'); // Ctrl+C character + await wait(); + + expect(props.buffer.setText).toHaveBeenCalledWith(''); + expect(mockCompletion.resetCompletionState).toHaveBeenCalled(); + expect(props.onSubmit).not.toHaveBeenCalled(); + unmount(); + }); + + it('should NOT clear the buffer on Ctrl+C if it is empty', async () => { + props.buffer.text = ''; + const { stdin, unmount } = render(); + await wait(); + + stdin.write('\x03'); // Ctrl+C character + await wait(); + + expect(props.buffer.setText).not.toHaveBeenCalled(); + unmount(); + }); + + describe('cursor-based completion trigger', () => { + it('should trigger completion when cursor is after @ without spaces', async () => { + // Set up buffer state + mockBuffer.text = '@src/components'; + mockBuffer.lines = ['@src/components']; + mockBuffer.cursor = [0, 15]; + + mockedUseCompletion.mockReturnValue({ + ...mockCompletion, + showSuggestions: true, + suggestions: [{ label: 'Button.tsx', value: 'Button.tsx' }], + }); + + const { unmount } = render(); + await wait(); + + // Verify useCompletion was called with correct signature + expect(mockedUseCompletion).toHaveBeenCalledWith( + mockBuffer, + path.join('test', 'project', 'src'), + mockSlashCommands, + mockCommandContext, + expect.any(Object), + ); + + unmount(); + }); + + it('should trigger completion when cursor is after / without spaces', async () => { + mockBuffer.text = '/memory'; + mockBuffer.lines = ['/memory']; + mockBuffer.cursor = [0, 7]; + + mockedUseCompletion.mockReturnValue({ + ...mockCompletion, + showSuggestions: true, + suggestions: [{ label: 'show', value: 'show' }], + }); + + const { unmount } = render(); + await wait(); + + expect(mockedUseCompletion).toHaveBeenCalledWith( + mockBuffer, + path.join('test', 'project', 'src'), + mockSlashCommands, + mockCommandContext, + expect.any(Object), + ); + + unmount(); + }); + + it('should NOT trigger completion when cursor is after space following @', async () => { + mockBuffer.text = '@src/file.ts hello'; + mockBuffer.lines = ['@src/file.ts hello']; + mockBuffer.cursor = [0, 18]; + + mockedUseCompletion.mockReturnValue({ + ...mockCompletion, + showSuggestions: false, + suggestions: [], + }); + + const { unmount } = render(); + await wait(); + + expect(mockedUseCompletion).toHaveBeenCalledWith( + mockBuffer, + path.join('test', 'project', 'src'), + mockSlashCommands, + mockCommandContext, + expect.any(Object), + ); + + unmount(); + }); + + it('should NOT trigger completion when cursor is after space following /', async () => { + mockBuffer.text = '/memory add'; + mockBuffer.lines = ['/memory add']; + mockBuffer.cursor = [0, 11]; + + mockedUseCompletion.mockReturnValue({ + ...mockCompletion, + showSuggestions: false, + suggestions: [], + }); + + const { unmount } = render(); + await wait(); + + expect(mockedUseCompletion).toHaveBeenCalledWith( + mockBuffer, + path.join('test', 'project', 'src'), + mockSlashCommands, + mockCommandContext, + expect.any(Object), + ); + + unmount(); + }); + + it('should NOT trigger completion when cursor is not after @ or /', async () => { + mockBuffer.text = 'hello world'; + mockBuffer.lines = ['hello world']; + mockBuffer.cursor = [0, 5]; + + mockedUseCompletion.mockReturnValue({ + ...mockCompletion, + showSuggestions: false, + suggestions: [], + }); + + const { unmount } = render(); + await wait(); + + expect(mockedUseCompletion).toHaveBeenCalledWith( + mockBuffer, + path.join('test', 'project', 'src'), + mockSlashCommands, + mockCommandContext, + expect.any(Object), + ); + + unmount(); + }); + + it('should handle multiline text correctly', async () => { + mockBuffer.text = 'first line\n/memory'; + mockBuffer.lines = ['first line', '/memory']; + mockBuffer.cursor = [1, 7]; + + mockedUseCompletion.mockReturnValue({ + ...mockCompletion, + showSuggestions: false, + suggestions: [], + }); + + const { unmount } = render(); + await wait(); + + // Verify useCompletion was called with the buffer + expect(mockedUseCompletion).toHaveBeenCalledWith( + mockBuffer, + path.join('test', 'project', 'src'), + mockSlashCommands, + mockCommandContext, + expect.any(Object), + ); + + unmount(); + }); + + it('should handle single line slash command correctly', async () => { + mockBuffer.text = '/memory'; + mockBuffer.lines = ['/memory']; + mockBuffer.cursor = [0, 7]; + + mockedUseCompletion.mockReturnValue({ + ...mockCompletion, + showSuggestions: true, + suggestions: [{ label: 'show', value: 'show' }], + }); + + const { unmount } = render(); + await wait(); + + expect(mockedUseCompletion).toHaveBeenCalledWith( + mockBuffer, + path.join('test', 'project', 'src'), + mockSlashCommands, + mockCommandContext, + expect.any(Object), + ); + + unmount(); + }); + + it('should handle Unicode characters (emojis) correctly in paths', async () => { + // Test with emoji in path after @ + mockBuffer.text = '@src/file👍.txt'; + mockBuffer.lines = ['@src/file👍.txt']; + mockBuffer.cursor = [0, 14]; // After the emoji character + + mockedUseCompletion.mockReturnValue({ + ...mockCompletion, + showSuggestions: true, + suggestions: [{ label: 'file👍.txt', value: 'file👍.txt' }], + }); + + const { unmount } = render(); + await wait(); + + expect(mockedUseCompletion).toHaveBeenCalledWith( + mockBuffer, + path.join('test', 'project', 'src'), + mockSlashCommands, + mockCommandContext, + expect.any(Object), + ); + + unmount(); + }); + + it('should handle Unicode characters with spaces after them', async () => { + // Test with emoji followed by space - should NOT trigger completion + mockBuffer.text = '@src/file👍.txt hello'; + mockBuffer.lines = ['@src/file👍.txt hello']; + mockBuffer.cursor = [0, 20]; // After the space + + mockedUseCompletion.mockReturnValue({ + ...mockCompletion, + showSuggestions: false, + suggestions: [], + }); + + const { unmount } = render(); + await wait(); + + expect(mockedUseCompletion).toHaveBeenCalledWith( + mockBuffer, + path.join('test', 'project', 'src'), + mockSlashCommands, + mockCommandContext, + expect.any(Object), + ); + + unmount(); + }); + + it('should handle escaped spaces in paths correctly', async () => { + // Test with escaped space in path - should trigger completion + mockBuffer.text = '@src/my\\ file.txt'; + mockBuffer.lines = ['@src/my\\ file.txt']; + mockBuffer.cursor = [0, 16]; // After the escaped space and filename + + mockedUseCompletion.mockReturnValue({ + ...mockCompletion, + showSuggestions: true, + suggestions: [{ label: 'my file.txt', value: 'my file.txt' }], + }); + + const { unmount } = render(); + await wait(); + + expect(mockedUseCompletion).toHaveBeenCalledWith( + mockBuffer, + path.join('test', 'project', 'src'), + mockSlashCommands, + mockCommandContext, + expect.any(Object), + ); + + unmount(); + }); + + it('should NOT trigger completion after unescaped space following escaped space', async () => { + // Test: @path/my\ file.txt hello (unescaped space after escaped space) + mockBuffer.text = '@path/my\\ file.txt hello'; + mockBuffer.lines = ['@path/my\\ file.txt hello']; + mockBuffer.cursor = [0, 24]; // After "hello" + + mockedUseCompletion.mockReturnValue({ + ...mockCompletion, + showSuggestions: false, + suggestions: [], + }); + + const { unmount } = render(); + await wait(); + + expect(mockedUseCompletion).toHaveBeenCalledWith( + mockBuffer, + path.join('test', 'project', 'src'), + mockSlashCommands, + mockCommandContext, + expect.any(Object), + ); + + unmount(); + }); + + it('should handle multiple escaped spaces in paths', async () => { + // Test with multiple escaped spaces + mockBuffer.text = '@docs/my\\ long\\ file\\ name.md'; + mockBuffer.lines = ['@docs/my\\ long\\ file\\ name.md']; + mockBuffer.cursor = [0, 29]; // At the end + + mockedUseCompletion.mockReturnValue({ + ...mockCompletion, + showSuggestions: true, + suggestions: [ + { label: 'my long file name.md', value: 'my long file name.md' }, + ], + }); + + const { unmount } = render(); + await wait(); + + expect(mockedUseCompletion).toHaveBeenCalledWith( + mockBuffer, + path.join('test', 'project', 'src'), + mockSlashCommands, + mockCommandContext, + expect.any(Object), + ); + + unmount(); + }); + + it('should handle escaped spaces in slash commands', async () => { + // Test escaped spaces with slash commands (though less common) + mockBuffer.text = '/memory\\ test'; + mockBuffer.lines = ['/memory\\ test']; + mockBuffer.cursor = [0, 13]; // At the end + + mockedUseCompletion.mockReturnValue({ + ...mockCompletion, + showSuggestions: true, + suggestions: [{ label: 'test-command', value: 'test-command' }], + }); + + const { unmount } = render(); + await wait(); + + expect(mockedUseCompletion).toHaveBeenCalledWith( + mockBuffer, + path.join('test', 'project', 'src'), + mockSlashCommands, + mockCommandContext, + expect.any(Object), + ); + + unmount(); + }); + + it('should handle Unicode characters with escaped spaces', async () => { + // Test combining Unicode and escaped spaces + mockBuffer.text = '@' + path.join('files', 'emoji\\ 👍\\ test.txt'); + mockBuffer.lines = ['@' + path.join('files', 'emoji\\ 👍\\ test.txt')]; + mockBuffer.cursor = [0, 25]; // After the escaped space and emoji + + mockedUseCompletion.mockReturnValue({ + ...mockCompletion, + showSuggestions: true, + suggestions: [ + { label: 'emoji 👍 test.txt', value: 'emoji 👍 test.txt' }, + ], + }); + + const { unmount } = render(); + await wait(); + + expect(mockedUseCompletion).toHaveBeenCalledWith( + mockBuffer, + path.join('test', 'project', 'src'), + mockSlashCommands, + mockCommandContext, + expect.any(Object), + ); + + unmount(); + }); + }); + + describe('vim mode', () => { + it('should not call buffer.handleInput when vim mode is enabled and vim handles the input', async () => { + props.vimModeEnabled = true; + props.vimHandleInput = vi.fn().mockReturnValue(true); // Mock that vim handled it. + const { stdin, unmount } = render(); + await wait(); + + stdin.write('i'); + await wait(); + + expect(props.vimHandleInput).toHaveBeenCalled(); + expect(mockBuffer.handleInput).not.toHaveBeenCalled(); + unmount(); + }); + + it('should call buffer.handleInput when vim mode is enabled but vim does not handle the input', async () => { + props.vimModeEnabled = true; + props.vimHandleInput = vi.fn().mockReturnValue(false); // Mock that vim did NOT handle it. + const { stdin, unmount } = render(); + await wait(); + + stdin.write('i'); + await wait(); + + expect(props.vimHandleInput).toHaveBeenCalled(); + expect(mockBuffer.handleInput).toHaveBeenCalled(); + unmount(); + }); + + it('should call handleInput when vim mode is disabled', async () => { + // Mock vimHandleInput to return false (vim didn't handle the input) + props.vimHandleInput = vi.fn().mockReturnValue(false); + const { stdin, unmount } = render(); + await wait(); + + stdin.write('i'); + await wait(); + + expect(props.vimHandleInput).toHaveBeenCalled(); + expect(mockBuffer.handleInput).toHaveBeenCalled(); + unmount(); + }); + }); + + describe('unfocused paste', () => { + it('should handle bracketed paste when not focused', async () => { + props.focus = false; + const { stdin, unmount } = render(); + await wait(); + + stdin.write('\x1B[200~pasted text\x1B[201~'); + await wait(); + + expect(mockBuffer.handleInput).toHaveBeenCalledWith( + expect.objectContaining({ + paste: true, + sequence: 'pasted text', + }), + ); + unmount(); + }); + + it('should ignore regular keypresses when not focused', async () => { + props.focus = false; + const { stdin, unmount } = render(); + await wait(); + + stdin.write('a'); + await wait(); + + expect(mockBuffer.handleInput).not.toHaveBeenCalled(); + unmount(); + }); + }); }); diff --git a/packages/cli/src/ui/components/InputPrompt.tsx b/packages/cli/src/ui/components/InputPrompt.tsx index 50e64fa88..00b6ac81d 100644 --- a/packages/cli/src/ui/components/InputPrompt.tsx +++ b/packages/cli/src/ui/components/InputPrompt.tsx @@ -16,7 +16,6 @@ import stringWidth from 'string-width'; import { useShellHistory } from '../hooks/useShellHistory.js'; import { useCompletion } from '../hooks/useCompletion.js'; import { useKeypress, Key } from '../hooks/useKeypress.js'; -import { isAtCommand, isSlashCommand } from '../utils/commandUtils.js'; import { CommandContext, SlashCommand } from '../commands/types.js'; import { Config } from '@qwen-code/qwen-code-core'; import { @@ -32,7 +31,7 @@ export interface InputPromptProps { userMessages: readonly string[]; onClearScreen: () => void; config: Config; - slashCommands: SlashCommand[]; + slashCommands: readonly SlashCommand[]; commandContext: CommandContext; placeholder?: string; focus?: boolean; @@ -40,6 +39,7 @@ export interface InputPromptProps { suggestionsWidth: number; shellModeActive: boolean; setShellModeActive: (value: boolean) => void; + vimHandleInput?: (key: Key) => boolean; } export const InputPrompt: React.FC = ({ @@ -56,12 +56,13 @@ export const InputPrompt: React.FC = ({ suggestionsWidth, shellModeActive, setShellModeActive, + vimHandleInput, }) => { const [justNavigatedHistory, setJustNavigatedHistory] = useState(false); + const completion = useCompletion( - buffer.text, + buffer, config.getTargetDir(), - isAtCommand(buffer.text) || isSlashCommand(buffer.text), slashCommands, commandContext, config, @@ -95,7 +96,9 @@ export const InputPrompt: React.FC = ({ const inputHistory = useInputHistory({ userMessages, onSubmit: handleSubmitAndClear, - isActive: !completion.showSuggestions && !shellModeActive, + isActive: + (!completion.showSuggestions || completion.suggestions.length === 1) && + !shellModeActive, currentQuery: buffer.text, onChange: customSetTextAndResetCompletionSignal, }); @@ -113,76 +116,6 @@ export const InputPrompt: React.FC = ({ setJustNavigatedHistory, ]); - const completionSuggestions = completion.suggestions; - const handleAutocomplete = useCallback( - (indexToUse: number) => { - if (indexToUse < 0 || indexToUse >= completionSuggestions.length) { - return; - } - const query = buffer.text; - const suggestion = completionSuggestions[indexToUse].value; - - if (query.trimStart().startsWith('/')) { - const hasTrailingSpace = query.endsWith(' '); - const parts = query - .trimStart() - .substring(1) - .split(/\s+/) - .filter(Boolean); - - let isParentPath = false; - // If there's no trailing space, we need to check if the current query - // is already a complete path to a parent command. - if (!hasTrailingSpace) { - let currentLevel: SlashCommand[] | undefined = slashCommands; - for (let i = 0; i < parts.length; i++) { - const part = parts[i]; - const found: SlashCommand | undefined = currentLevel?.find( - (cmd) => cmd.name === part || cmd.altName === part, - ); - - if (found) { - if (i === parts.length - 1 && found.subCommands) { - isParentPath = true; - } - currentLevel = found.subCommands; - } else { - // Path is invalid, so it can't be a parent path. - currentLevel = undefined; - break; - } - } - } - - // Determine the base path of the command. - // - If there's a trailing space, the whole command is the base. - // - If it's a known parent path, the whole command is the base. - // - Otherwise, the base is everything EXCEPT the last partial part. - const basePath = - hasTrailingSpace || isParentPath ? parts : parts.slice(0, -1); - const newValue = `/${[...basePath, suggestion].join(' ')} `; - - buffer.setText(newValue); - } else { - const atIndex = query.lastIndexOf('@'); - if (atIndex === -1) return; - const pathPart = query.substring(atIndex + 1); - const lastSlashIndexInPath = pathPart.lastIndexOf('/'); - let autoCompleteStartIndex = atIndex + 1; - if (lastSlashIndexInPath !== -1) { - autoCompleteStartIndex += lastSlashIndexInPath + 1; - } - buffer.replaceRangeByOffset( - autoCompleteStartIndex, - buffer.text.length, - suggestion, - ); - } - resetCompletionState(); - }, - [resetCompletionState, buffer, completionSuggestions, slashCommands], - ); - // Handle clipboard image pasting with Ctrl+V const handleClipboardImage = useCallback(async () => { try { @@ -233,7 +166,12 @@ export const InputPrompt: React.FC = ({ const handleInput = useCallback( (key: Key) => { - if (!focus) { + /// We want to handle paste even when not focused to support drag and drop. + if (!focus && !key.paste) { + return; + } + + if (vimHandleInput && vimHandleInput(key)) { return; } @@ -264,14 +202,22 @@ export const InputPrompt: React.FC = ({ return; } + // If the command is a perfect match, pressing enter should execute it. + if (completion.isPerfectMatch && key.name === 'return') { + handleSubmitAndClear(buffer.text); + return; + } + if (completion.showSuggestions) { - if (key.name === 'up') { - completion.navigateUp(); - return; - } - if (key.name === 'down') { - completion.navigateDown(); - return; + if (completion.suggestions.length > 1) { + if (key.name === 'up' || (key.ctrl && key.name === 'p')) { + completion.navigateUp(); + return; + } + if (key.name === 'down' || (key.ctrl && key.name === 'n')) { + completion.navigateDown(); + return; + } } if (key.name === 'tab' || (key.name === 'return' && !key.ctrl)) { @@ -281,66 +227,66 @@ export const InputPrompt: React.FC = ({ ? 0 // Default to the first if none is active : completion.activeSuggestionIndex; if (targetIndex < completion.suggestions.length) { - handleAutocomplete(targetIndex); + completion.handleAutocomplete(targetIndex); } } return; } + } + + if (!shellModeActive) { + if (key.ctrl && key.name === 'p') { + inputHistory.navigateUp(); + return; + } + if (key.ctrl && key.name === 'n') { + inputHistory.navigateDown(); + return; + } + // Handle arrow-up/down for history on single-line or at edges + if ( + key.name === 'up' && + (buffer.allVisualLines.length === 1 || + (buffer.visualCursor[0] === 0 && buffer.visualScrollRow === 0)) + ) { + inputHistory.navigateUp(); + return; + } + if ( + key.name === 'down' && + (buffer.allVisualLines.length === 1 || + buffer.visualCursor[0] === buffer.allVisualLines.length - 1) + ) { + inputHistory.navigateDown(); + return; + } } else { - if (!shellModeActive) { - if (key.ctrl && key.name === 'p') { - inputHistory.navigateUp(); - return; - } - if (key.ctrl && key.name === 'n') { - inputHistory.navigateDown(); - return; - } - // Handle arrow-up/down for history on single-line or at edges - if ( - key.name === 'up' && - (buffer.allVisualLines.length === 1 || - (buffer.visualCursor[0] === 0 && buffer.visualScrollRow === 0)) - ) { - inputHistory.navigateUp(); - return; - } - if ( - key.name === 'down' && - (buffer.allVisualLines.length === 1 || - buffer.visualCursor[0] === buffer.allVisualLines.length - 1) - ) { - inputHistory.navigateDown(); - return; - } - } else { - // Shell History Navigation - if (key.name === 'up') { - const prevCommand = shellHistory.getPreviousCommand(); - if (prevCommand !== null) buffer.setText(prevCommand); - return; - } - if (key.name === 'down') { - const nextCommand = shellHistory.getNextCommand(); - if (nextCommand !== null) buffer.setText(nextCommand); - return; - } - } - - if (key.name === 'return' && !key.ctrl && !key.meta && !key.paste) { - if (buffer.text.trim()) { - const [row, col] = buffer.cursor; - const line = buffer.lines[row]; - const charBefore = col > 0 ? cpSlice(line, col - 1, col) : ''; - if (charBefore === '\\') { - buffer.backspace(); - buffer.newline(); - } else { - handleSubmitAndClear(buffer.text); - } - } + // Shell History Navigation + if (key.name === 'up') { + const prevCommand = shellHistory.getPreviousCommand(); + if (prevCommand !== null) buffer.setText(prevCommand); return; } + if (key.name === 'down') { + const nextCommand = shellHistory.getNextCommand(); + if (nextCommand !== null) buffer.setText(nextCommand); + return; + } + } + + if (key.name === 'return' && !key.ctrl && !key.meta && !key.paste) { + if (buffer.text.trim()) { + const [row, col] = buffer.cursor; + const line = buffer.lines[row]; + const charBefore = col > 0 ? cpSlice(line, col - 1, col) : ''; + if (charBefore === '\\') { + buffer.backspace(); + buffer.newline(); + } else { + handleSubmitAndClear(buffer.text); + } + } + return; } // Newline insertion @@ -356,6 +302,16 @@ export const InputPrompt: React.FC = ({ } if (key.ctrl && key.name === 'e') { buffer.move('end'); + buffer.moveToOffset(cpLen(buffer.text)); + return; + } + // Ctrl+C (Clear input) + if (key.ctrl && key.name === 'c') { + if (buffer.text.length > 0) { + buffer.setText(''); + resetCompletionState(); + return; + } return; } @@ -382,7 +338,7 @@ export const InputPrompt: React.FC = ({ return; } - // Fallback to the text buffer's default input handling for all other keys + // Fall back to the text buffer's default input handling for all other keys buffer.handleInput(key); }, [ @@ -393,14 +349,15 @@ export const InputPrompt: React.FC = ({ setShellModeActive, onClearScreen, inputHistory, - handleAutocomplete, handleSubmitAndClear, shellHistory, handleClipboardImage, + resetCompletionState, + vimHandleInput, ], ); - useKeypress(handleInput, { isActive: focus }); + useKeypress(handleInput, { isActive: true }); const linesToRender = buffer.viewportVisualLines; const [cursorVisualRowAbsolute, cursorVisualColAbsolute] = @@ -438,7 +395,7 @@ export const InputPrompt: React.FC = ({ display = display + ' '.repeat(inputWidth - currentVisualWidth); } - if (visualIdxInRenderedSet === cursorVisualRow) { + if (focus && visualIdxInRenderedSet === cursorVisualRow) { const relativeVisualColForHighlight = cursorVisualColAbsolute; if (relativeVisualColForHighlight >= 0) { diff --git a/packages/cli/src/ui/components/ShellConfirmationDialog.test.tsx b/packages/cli/src/ui/components/ShellConfirmationDialog.test.tsx new file mode 100644 index 000000000..35783d44e --- /dev/null +++ b/packages/cli/src/ui/components/ShellConfirmationDialog.test.tsx @@ -0,0 +1,45 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { render } from 'ink-testing-library'; +import { describe, it, expect, vi } from 'vitest'; +import { ShellConfirmationDialog } from './ShellConfirmationDialog.js'; + +describe('ShellConfirmationDialog', () => { + const onConfirm = vi.fn(); + + const request = { + commands: ['ls -la', 'echo "hello"'], + onConfirm, + }; + + it('renders correctly', () => { + const { lastFrame } = render(); + expect(lastFrame()).toMatchSnapshot(); + }); + + it('calls onConfirm with ProceedOnce when "Yes, allow once" is selected', () => { + const { lastFrame } = render(); + const select = lastFrame()!.toString(); + // Simulate selecting the first option + // This is a simplified way to test the selection + expect(select).toContain('Yes, allow once'); + }); + + it('calls onConfirm with ProceedAlways when "Yes, allow always for this session" is selected', () => { + const { lastFrame } = render(); + const select = lastFrame()!.toString(); + // Simulate selecting the second option + expect(select).toContain('Yes, allow always for this session'); + }); + + it('calls onConfirm with Cancel when "No (esc)" is selected', () => { + const { lastFrame } = render(); + const select = lastFrame()!.toString(); + // Simulate selecting the third option + expect(select).toContain('No (esc)'); + }); +}); diff --git a/packages/cli/src/ui/components/ShellConfirmationDialog.tsx b/packages/cli/src/ui/components/ShellConfirmationDialog.tsx new file mode 100644 index 000000000..5a6dcf661 --- /dev/null +++ b/packages/cli/src/ui/components/ShellConfirmationDialog.tsx @@ -0,0 +1,98 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { ToolConfirmationOutcome } from '@qwen-code/qwen-code-core'; +import { Box, Text, useInput } from 'ink'; +import React from 'react'; +import { Colors } from '../colors.js'; +import { + RadioButtonSelect, + RadioSelectItem, +} from './shared/RadioButtonSelect.js'; + +export interface ShellConfirmationRequest { + commands: string[]; + onConfirm: ( + outcome: ToolConfirmationOutcome, + approvedCommands?: string[], + ) => void; +} + +export interface ShellConfirmationDialogProps { + request: ShellConfirmationRequest; +} + +export const ShellConfirmationDialog: React.FC< + ShellConfirmationDialogProps +> = ({ request }) => { + const { commands, onConfirm } = request; + + useInput((_, key) => { + if (key.escape) { + onConfirm(ToolConfirmationOutcome.Cancel); + } + }); + + const handleSelect = (item: ToolConfirmationOutcome) => { + if (item === ToolConfirmationOutcome.Cancel) { + onConfirm(item); + } else { + // For both ProceedOnce and ProceedAlways, we approve all the + // commands that were requested. + onConfirm(item, commands); + } + }; + + const options: Array> = [ + { + label: 'Yes, allow once', + value: ToolConfirmationOutcome.ProceedOnce, + }, + { + label: 'Yes, allow always for this session', + value: ToolConfirmationOutcome.ProceedAlways, + }, + { + label: 'No (esc)', + value: ToolConfirmationOutcome.Cancel, + }, + ]; + + return ( + + + Shell Command Execution + A custom command wants to run the following shell commands: + + {commands.map((cmd) => ( + + {cmd} + + ))} + + + + + Do you want to proceed? + + + + + ); +}; diff --git a/packages/cli/src/ui/components/ThemeDialog.tsx b/packages/cli/src/ui/components/ThemeDialog.tsx index 0ca176cba..7c38bb4b9 100644 --- a/packages/cli/src/ui/components/ThemeDialog.tsx +++ b/packages/cli/src/ui/components/ThemeDialog.tsx @@ -36,23 +36,45 @@ export function ThemeDialog({ SettingScope.User, ); + // Track the currently highlighted theme name + const [highlightedThemeName, setHighlightedThemeName] = useState< + string | undefined + >(settings.merged.theme || DEFAULT_THEME.name); + + // Generate theme items filtered by selected scope + const customThemes = + selectedScope === SettingScope.User + ? settings.user.settings.customThemes || {} + : settings.merged.customThemes || {}; + const builtInThemes = themeManager + .getAvailableThemes() + .filter((theme) => theme.type !== 'custom'); + const customThemeNames = Object.keys(customThemes); + const capitalize = (s: string) => s.charAt(0).toUpperCase() + s.slice(1); // Generate theme items - const themeItems = themeManager.getAvailableThemes().map((theme) => { - const typeString = theme.type.charAt(0).toUpperCase() + theme.type.slice(1); - return { + const themeItems = [ + ...builtInThemes.map((theme) => ({ label: theme.name, value: theme.name, themeNameDisplay: theme.name, - themeTypeDisplay: typeString, - }; - }); + themeTypeDisplay: capitalize(theme.type), + })), + ...customThemeNames.map((name) => ({ + label: name, + value: name, + themeNameDisplay: name, + themeTypeDisplay: 'Custom', + })), + ]; const [selectInputKey, setSelectInputKey] = useState(Date.now()); - // Determine which radio button should be initially selected in the theme list - // This should reflect the theme *saved* for the selected scope, or the default + // Find the index of the selected theme, but only if it exists in the list + const selectedThemeName = settings.merged.theme || DEFAULT_THEME.name; const initialThemeIndex = themeItems.findIndex( - (item) => item.value === (settings.merged.theme || DEFAULT_THEME.name), + (item) => item.value === selectedThemeName, ); + // If not found, fall back to the first theme + const safeInitialThemeIndex = initialThemeIndex >= 0 ? initialThemeIndex : 0; const scopeItems = [ { label: 'User Settings', value: SettingScope.User }, @@ -67,6 +89,11 @@ export function ThemeDialog({ [onSelect, selectedScope], ); + const handleThemeHighlight = (themeName: string) => { + setHighlightedThemeName(themeName); + onHighlight(themeName); + }; + const handleScopeHighlight = useCallback((scope: SettingScope) => { setSelectedScope(scope); setSelectInputKey(Date.now()); @@ -158,7 +185,7 @@ export function ThemeDialog({ } // Don't focus the scope selection if it is hidden due to height constraints. - const currenFocusedSection = !showScopeSelection ? 'theme' : focusedSection; + const currentFocusedSection = !showScopeSelection ? 'theme' : focusedSection; // Vertical space taken by elements other than the two code blocks in the preview pane. // Includes "Preview" title, borders, and margin between blocks. @@ -173,10 +200,16 @@ export function ThemeDialog({ availableTerminalHeight - PREVIEW_PANE_FIXED_VERTICAL_SPACE - (includePadding ? 2 : 0) * 2; - // Give slightly more space to the code block as it is 3 lines longer. - const diffHeight = Math.floor(availableTerminalHeightCodeBlock / 2) - 1; - const codeBlockHeight = Math.ceil(availableTerminalHeightCodeBlock / 2) + 1; + // Subtract margin between code blocks from available height. + const availableHeightForPanes = Math.max( + 0, + availableTerminalHeightCodeBlock - 1, + ); + + // The code block is slightly longer than the diff, so give it more space. + const codeBlockHeight = Math.ceil(availableHeightForPanes * 0.6); + const diffHeight = Math.floor(availableHeightForPanes * 0.4); return ( {/* Left Column: Selection */} - - {currenFocusedSection === 'theme' ? '> ' : ' '}Select Theme{' '} + + {currentFocusedSection === 'theme' ? '> ' : ' '}Select Theme{' '} {otherScopeModifiedMessage} {/* Scope Selection */} {showScopeSelection && ( - - {currenFocusedSection === 'scope' ? '> ' : ' '}Apply To + + {currentFocusedSection === 'scope' ? '> ' : ' '}Apply To )} @@ -226,39 +261,48 @@ export function ThemeDialog({ {/* Right Column: Preview */} Preview - - {colorizeCode( - `# function --def fibonacci(n): -- a, b = 0, 1 -- for _ in range(n): -- a, b = b, a + b -- return a`, - 'python', - codeBlockHeight, - colorizeCodeWidth, - )} - - - + {/* Get the Theme object for the highlighted theme, fall back to default if not found */} + {(() => { + const previewTheme = + themeManager.getTheme( + highlightedThemeName || DEFAULT_THEME.name, + ) || DEFAULT_THEME; + return ( + + {colorizeCode( + `# function +def fibonacci(n): + a, b = 0, 1 + for _ in range(n): + a, b = b, a + b + return a`, + 'python', + codeBlockHeight, + colorizeCodeWidth, + )} + + + + ); + })()} diff --git a/packages/cli/src/ui/components/Tips.tsx b/packages/cli/src/ui/components/Tips.tsx index 3f0f8bbc9..17944bf86 100644 --- a/packages/cli/src/ui/components/Tips.tsx +++ b/packages/cli/src/ui/components/Tips.tsx @@ -16,7 +16,7 @@ interface TipsProps { export const Tips: React.FC = ({ config }) => { const geminiMdFileCount = config.getGeminiMdFileCount(); return ( - + Tips for getting started: 1. Ask questions, edit files, or run commands. diff --git a/packages/cli/src/ui/components/__snapshots__/ShellConfirmationDialog.test.tsx.snap b/packages/cli/src/ui/components/__snapshots__/ShellConfirmationDialog.test.tsx.snap new file mode 100644 index 000000000..8c9ceb298 --- /dev/null +++ b/packages/cli/src/ui/components/__snapshots__/ShellConfirmationDialog.test.tsx.snap @@ -0,0 +1,21 @@ +// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html + +exports[`ShellConfirmationDialog > renders correctly 1`] = ` +" ╭──────────────────────────────────────────────────────────────────────────────────────────────────╮ + │ │ + │ Shell Command Execution │ + │ A custom command wants to run the following shell commands: │ + │ │ + │ ╭──────────────────────────────────────────────────────────────────────────────────────────────╮ │ + │ │ ls -la │ │ + │ │ echo "hello" │ │ + │ ╰──────────────────────────────────────────────────────────────────────────────────────────────╯ │ + │ │ + │ Do you want to proceed? │ + │ │ + │ ● 1. Yes, allow once │ + │ 2. Yes, allow always for this session │ + │ 3. No (esc) │ + │ │ + ╰──────────────────────────────────────────────────────────────────────────────────────────────────╯" +`; diff --git a/packages/cli/src/ui/components/messages/DiffRenderer.test.tsx b/packages/cli/src/ui/components/messages/DiffRenderer.test.tsx index a6f906a6d..9629b94ba 100644 --- a/packages/cli/src/ui/components/messages/DiffRenderer.test.tsx +++ b/packages/cli/src/ui/components/messages/DiffRenderer.test.tsx @@ -44,6 +44,7 @@ index 0000000..e69de29 'python', undefined, 80, + undefined, ); }); @@ -71,6 +72,7 @@ index 0000000..e69de29 null, undefined, 80, + undefined, ); }); @@ -94,6 +96,7 @@ index 0000000..e69de29 null, undefined, 80, + undefined, ); }); @@ -127,8 +130,8 @@ index 0000001..0000002 100644 ); const output = lastFrame(); const lines = output!.split('\n'); - expect(lines[0]).toBe('1 - old line'); - expect(lines[1]).toBe('1 + new line'); + expect(lines[0]).toBe('1 - old line'); + expect(lines[1]).toBe('1 + new line'); }); it('should handle diff with only header and no changes', () => { @@ -250,35 +253,35 @@ index 123..789 100644 { terminalWidth: 80, height: undefined, - expected: `1 console.log('first hunk'); -2 - const oldVar = 1; -2 + const newVar = 1; -3 console.log('end of first hunk'); + expected: ` 1 console.log('first hunk'); + 2 - const oldVar = 1; + 2 + const newVar = 1; + 3 console.log('end of first hunk'); ════════════════════════════════════════════════════════════════════════════════ -20 console.log('second hunk'); -21 - const anotherOld = 'test'; -21 + const anotherNew = 'test'; -22 console.log('end of second hunk');`, +20 console.log('second hunk'); +21 - const anotherOld = 'test'; +21 + const anotherNew = 'test'; +22 console.log('end of second hunk');`, }, { terminalWidth: 80, height: 6, expected: `... first 4 lines hidden ... ════════════════════════════════════════════════════════════════════════════════ -20 console.log('second hunk'); -21 - const anotherOld = 'test'; -21 + const anotherNew = 'test'; -22 console.log('end of second hunk');`, +20 console.log('second hunk'); +21 - const anotherOld = 'test'; +21 + const anotherNew = 'test'; +22 console.log('end of second hunk');`, }, { terminalWidth: 30, height: 6, expected: `... first 10 lines hidden ... - 'test'; -21 + const anotherNew = - 'test'; -22 console.log('end of - second hunk');`, + ; +21 + const anotherNew = 'test' + ; +22 console.log('end of + second hunk');`, }, ])( 'with terminalWidth $terminalWidth and height $height', @@ -326,11 +329,11 @@ fileDiff Index: file.txt ); const output = lastFrame(); - expect(output).toEqual(`1 - const oldVar = 1; -1 + const newVar = 1; + expect(output).toEqual(` 1 - const oldVar = 1; + 1 + const newVar = 1; ════════════════════════════════════════════════════════════════════════════════ -20 - const anotherOld = 'test'; -20 + const anotherNew = 'test';`); +20 - const anotherOld = 'test'; +20 + const anotherNew = 'test';`); }); it('should correctly render a new file with no file extension correctly', () => { diff --git a/packages/cli/src/ui/components/messages/DiffRenderer.tsx b/packages/cli/src/ui/components/messages/DiffRenderer.tsx index 25fb293e5..7f130b3f9 100644 --- a/packages/cli/src/ui/components/messages/DiffRenderer.tsx +++ b/packages/cli/src/ui/components/messages/DiffRenderer.tsx @@ -8,7 +8,7 @@ import React from 'react'; import { Box, Text } from 'ink'; import { Colors } from '../../colors.js'; import crypto from 'crypto'; -import { colorizeCode } from '../../utils/CodeColorizer.js'; +import { colorizeCode, colorizeLine } from '../../utils/CodeColorizer.js'; import { MaxSizedBox } from '../shared/MaxSizedBox.js'; interface DiffLine { @@ -93,6 +93,7 @@ interface DiffRendererProps { tabWidth?: number; availableTerminalHeight?: number; terminalWidth: number; + theme?: import('../../themes/theme.js').Theme; } const DEFAULT_TAB_WIDTH = 4; // Spaces per tab for normalization @@ -103,6 +104,7 @@ export const DiffRenderer: React.FC = ({ tabWidth = DEFAULT_TAB_WIDTH, availableTerminalHeight, terminalWidth, + theme, }) => { if (!diffContent || typeof diffContent !== 'string') { return No diff content.; @@ -146,6 +148,7 @@ export const DiffRenderer: React.FC = ({ language, availableTerminalHeight, terminalWidth, + theme, ); } else { renderedOutput = renderDiffContent( @@ -186,6 +189,18 @@ const renderDiffContent = ( ); } + const maxLineNumber = Math.max( + 0, + ...displayableLines.map((l) => l.oldLine ?? 0), + ...displayableLines.map((l) => l.newLine ?? 0), + ); + const gutterWidth = Math.max(1, maxLineNumber.toString().length); + + const fileExtension = filename?.split('.').pop() || null; + const language = fileExtension + ? getLanguageFromExtension(fileExtension) + : null; + // Calculate the minimum indentation across all displayable lines let baseIndentation = Infinity; // Start high to find the minimum for (const line of displayableLines) { @@ -232,27 +247,25 @@ const renderDiffContent = ( ) { acc.push( - {'═'.repeat(terminalWidth)} + + {'═'.repeat(terminalWidth)} + , ); } const lineKey = `diff-line-${index}`; let gutterNumStr = ''; - let color: string | undefined = undefined; let prefixSymbol = ' '; - let dim = false; switch (line.type) { case 'add': gutterNumStr = (line.newLine ?? '').toString(); - color = 'green'; prefixSymbol = '+'; lastLineNumber = line.newLine ?? null; break; case 'del': gutterNumStr = (line.oldLine ?? '').toString(); - color = 'red'; prefixSymbol = '-'; // For deletions, update lastLineNumber based on oldLine if it's advancing. // This helps manage gaps correctly if there are multiple consecutive deletions @@ -263,7 +276,6 @@ const renderDiffContent = ( break; case 'context': gutterNumStr = (line.newLine ?? '').toString(); - dim = true; prefixSymbol = ' '; lastLineNumber = line.newLine ?? null; break; @@ -275,13 +287,26 @@ const renderDiffContent = ( acc.push( - {gutterNumStr.padEnd(4)} - - {prefixSymbol}{' '} - - - {displayContent} + + {gutterNumStr.padStart(gutterWidth)}{' '} + {line.type === 'context' ? ( + <> + {prefixSymbol} + + {colorizeLine(displayContent, language)} + + + ) : ( + + {prefixSymbol} {colorizeLine(displayContent, language)} + + )} , ); return acc; diff --git a/packages/cli/src/ui/components/messages/ToolConfirmationMessage.tsx b/packages/cli/src/ui/components/messages/ToolConfirmationMessage.tsx index fae8c852c..e6f718c0c 100644 --- a/packages/cli/src/ui/components/messages/ToolConfirmationMessage.tsx +++ b/packages/cli/src/ui/components/messages/ToolConfirmationMessage.tsx @@ -132,19 +132,20 @@ export const ToolConfirmationMessage: React.FC< const executionProps = confirmationDetails as ToolExecuteConfirmationDetails; - question = `Allow execution?`; + question = `Allow execution of: '${executionProps.rootCommand}'?`; options.push( { - label: 'Yes, allow once', + label: `Yes, allow once`, value: ToolConfirmationOutcome.ProceedOnce, }, { - label: `Yes, allow always "${executionProps.rootCommand} ..."`, + label: `Yes, allow always ...`, value: ToolConfirmationOutcome.ProceedAlways, }, - { label: 'No (esc)', value: ToolConfirmationOutcome.Cancel }, ); + options.push({ label: 'No (esc)', value: ToolConfirmationOutcome.Cancel }); + let bodyContentHeight = availableBodyContentHeight(); if (bodyContentHeight !== undefined) { bodyContentHeight -= 2; // Account for padding; diff --git a/packages/cli/src/ui/components/messages/ToolGroupMessage.tsx b/packages/cli/src/ui/components/messages/ToolGroupMessage.tsx index 2eb80431b..cd233188b 100644 --- a/packages/cli/src/ui/components/messages/ToolGroupMessage.tsx +++ b/packages/cli/src/ui/components/messages/ToolGroupMessage.tsx @@ -11,6 +11,7 @@ import { ToolMessage } from './ToolMessage.js'; import { ToolConfirmationMessage } from './ToolConfirmationMessage.js'; import { Colors } from '../../colors.js'; import { Config } from '@qwen-code/qwen-code-core'; +import { SHELL_COMMAND_NAME } from '../../constants.js'; interface ToolGroupMessageProps { groupId: number; @@ -32,7 +33,9 @@ export const ToolGroupMessage: React.FC = ({ const hasPending = !toolCalls.every( (t) => t.status === ToolCallStatus.Success, ); - const borderColor = hasPending ? Colors.AccentYellow : Colors.Gray; + const isShellCommand = toolCalls.some((t) => t.name === SHELL_COMMAND_NAME); + const borderColor = + hasPending || isShellCommand ? Colors.AccentYellow : Colors.Gray; const staticHeight = /* border */ 2 + /* marginBottom */ 1; // This is a bit of a magic number, but it accounts for the border and diff --git a/packages/cli/src/ui/components/messages/ToolMessage.test.tsx b/packages/cli/src/ui/components/messages/ToolMessage.test.tsx index 7b9de92e4..c9bed003f 100644 --- a/packages/cli/src/ui/components/messages/ToolMessage.test.tsx +++ b/packages/cli/src/ui/components/messages/ToolMessage.test.tsx @@ -152,6 +152,8 @@ describe('', () => { const diffResult = { fileDiff: '--- a/file.txt\n+++ b/file.txt\n@@ -1 +1 @@\n-old\n+new', fileName: 'file.txt', + originalContent: 'old', + newContent: 'new', }; const { lastFrame } = renderWithContext( , diff --git a/packages/cli/src/ui/components/shared/MaxSizedBox.test.tsx b/packages/cli/src/ui/components/shared/MaxSizedBox.test.tsx index 50951b4fa..92147d3c5 100644 --- a/packages/cli/src/ui/components/shared/MaxSizedBox.test.tsx +++ b/packages/cli/src/ui/components/shared/MaxSizedBox.test.tsx @@ -248,6 +248,89 @@ Line 3`); 🐶`); }); + it('falls back to an ellipsis when width is extremely small', () => { + const { lastFrame } = render( + + + + No + wrap + + + , + ); + + expect(lastFrame()).equals('N…'); + }); + + it('truncates long non-wrapping text with ellipsis', () => { + const { lastFrame } = render( + + + + ABCDE + wrap + + + , + ); + + expect(lastFrame()).equals('AB…'); + }); + + it('truncates non-wrapping text containing line breaks', () => { + const { lastFrame } = render( + + + + {'A\nBCDE'} + wrap + + + , + ); + + expect(lastFrame()).equals(`A\n…`); + }); + + it('truncates emoji characters correctly with ellipsis', () => { + const { lastFrame } = render( + + + + 🐶🐶🐶 + wrap + + + , + ); + + expect(lastFrame()).equals(`🐶…`); + }); + + it('shows ellipsis for multiple rows with long non-wrapping text', () => { + const { lastFrame } = render( + + + + AAA + first + + + BBB + second + + + CCC + third + + + , + ); + + expect(lastFrame()).equals(`AA…\nBB…\nCC…`); + }); + it('accounts for additionalHiddenLinesCount', () => { const { lastFrame } = render( diff --git a/packages/cli/src/ui/components/shared/MaxSizedBox.tsx b/packages/cli/src/ui/components/shared/MaxSizedBox.tsx index eb5ef6b44..346472bf0 100644 --- a/packages/cli/src/ui/components/shared/MaxSizedBox.tsx +++ b/packages/cli/src/ui/components/shared/MaxSizedBox.tsx @@ -432,8 +432,85 @@ function layoutInkElementAsStyledText( const availableWidth = maxWidth - noWrappingWidth; if (availableWidth < 1) { - // No room to render the wrapping segments. TODO(jacob314): consider an alternative fallback strategy. - output.push(nonWrappingContent); + // No room to render the wrapping segments. Truncate the non-wrapping + // content and append an ellipsis so the line always fits within maxWidth. + + // Handle line breaks in non-wrapping content when truncating + const lines: StyledText[][] = []; + let currentLine: StyledText[] = []; + let currentLineWidth = 0; + + for (const segment of nonWrappingContent) { + const textLines = segment.text.split('\n'); + textLines.forEach((text, index) => { + if (index > 0) { + // New line encountered, finish current line and start new one + lines.push(currentLine); + currentLine = []; + currentLineWidth = 0; + } + + if (text) { + const textWidth = stringWidth(text); + + // When there's no room for wrapping content, be very conservative + // For lines after the first line break, show only ellipsis if the text would be truncated + if (index > 0 && textWidth > 0) { + // This is content after a line break - just show ellipsis to indicate truncation + currentLine.push({ text: '…', props: {} }); + currentLineWidth = stringWidth('…'); + } else { + // This is the first line or a continuation, try to fit what we can + const maxContentWidth = Math.max(0, maxWidth - stringWidth('…')); + + if (textWidth <= maxContentWidth && currentLineWidth === 0) { + // Text fits completely on this line + currentLine.push({ text, props: segment.props }); + currentLineWidth += textWidth; + } else { + // Text needs truncation + const codePoints = toCodePoints(text); + let truncatedWidth = currentLineWidth; + let sliceEndIndex = 0; + + for (const char of codePoints) { + const charWidth = stringWidth(char); + if (truncatedWidth + charWidth > maxContentWidth) { + break; + } + truncatedWidth += charWidth; + sliceEndIndex++; + } + + const slice = codePoints.slice(0, sliceEndIndex).join(''); + if (slice) { + currentLine.push({ text: slice, props: segment.props }); + } + currentLine.push({ text: '…', props: {} }); + currentLineWidth = truncatedWidth + stringWidth('…'); + } + } + } + }); + } + + // Add the last line if it has content or if the last segment ended with \n + if ( + currentLine.length > 0 || + (nonWrappingContent.length > 0 && + nonWrappingContent[nonWrappingContent.length - 1].text.endsWith('\n')) + ) { + lines.push(currentLine); + } + + // If we don't have any lines yet, add an ellipsis line + if (lines.length === 0) { + lines.push([{ text: '…', props: {} }]); + } + + for (const line of lines) { + output.push(line); + } return; } diff --git a/packages/cli/src/ui/components/shared/RadioButtonSelect.test.tsx b/packages/cli/src/ui/components/shared/RadioButtonSelect.test.tsx new file mode 100644 index 000000000..4b36fe3cc --- /dev/null +++ b/packages/cli/src/ui/components/shared/RadioButtonSelect.test.tsx @@ -0,0 +1,115 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { render } from 'ink-testing-library'; +import { + RadioButtonSelect, + type RadioSelectItem, +} from './RadioButtonSelect.js'; +import { describe, it, expect } from 'vitest'; + +const ITEMS: Array> = [ + { label: 'Option 1', value: 'one' }, + { label: 'Option 2', value: 'two' }, + { label: 'Option 3', value: 'three', disabled: true }, +]; + +describe('', () => { + it('renders a list of items and matches snapshot', () => { + const { lastFrame } = render( + {}} isFocused={true} />, + ); + expect(lastFrame()).toMatchSnapshot(); + }); + + it('renders with the second item selected and matches snapshot', () => { + const { lastFrame } = render( + {}} + isFocused={true} + />, + ); + expect(lastFrame()).toMatchSnapshot(); + }); + + it('renders with numbers hidden and matches snapshot', () => { + const { lastFrame } = render( + {}} + isFocused={true} + showNumbers={false} + />, + ); + expect(lastFrame()).toMatchSnapshot(); + }); + + it('renders with scroll arrows and matches snapshot', () => { + const manyItems = Array.from({ length: 20 }, (_, i) => ({ + label: `Item ${i + 1}`, + value: `item-${i + 1}`, + })); + const { lastFrame } = render( + {}} + isFocused={true} + showScrollArrows={true} + maxItemsToShow={5} + />, + ); + expect(lastFrame()).toMatchSnapshot(); + }); + + it('renders with special theme display and matches snapshot', () => { + const themeItems: Array> = [ + { + label: 'Theme A (Light)', + value: 'a-light', + themeNameDisplay: 'Theme A', + themeTypeDisplay: '(Light)', + }, + { + label: 'Theme B (Dark)', + value: 'b-dark', + themeNameDisplay: 'Theme B', + themeTypeDisplay: '(Dark)', + }, + ]; + const { lastFrame } = render( + {}} + isFocused={true} + />, + ); + expect(lastFrame()).toMatchSnapshot(); + }); + + it('renders a list with >10 items and matches snapshot', () => { + const manyItems = Array.from({ length: 12 }, (_, i) => ({ + label: `Item ${i + 1}`, + value: `item-${i + 1}`, + })); + const { lastFrame } = render( + {}} + isFocused={true} + />, + ); + expect(lastFrame()).toMatchSnapshot(); + }); + + it('renders nothing when no items are provided', () => { + const { lastFrame } = render( + {}} isFocused={true} />, + ); + expect(lastFrame()).toBe(''); + }); +}); diff --git a/packages/cli/src/ui/components/shared/RadioButtonSelect.tsx b/packages/cli/src/ui/components/shared/RadioButtonSelect.tsx index d680a825c..8b0057ca0 100644 --- a/packages/cli/src/ui/components/shared/RadioButtonSelect.tsx +++ b/packages/cli/src/ui/components/shared/RadioButtonSelect.tsx @@ -4,7 +4,7 @@ * SPDX-License-Identifier: Apache-2.0 */ -import React, { useEffect, useState } from 'react'; +import React, { useEffect, useState, useRef } from 'react'; import { Text, Box, useInput } from 'ink'; import { Colors } from '../../colors.js'; @@ -39,6 +39,8 @@ export interface RadioButtonSelectProps { showScrollArrows?: boolean; /** The maximum number of items to show at once. */ maxItemsToShow?: number; + /** Whether to show numbers next to items. */ + showNumbers?: boolean; } /** @@ -55,23 +57,12 @@ export function RadioButtonSelect({ isFocused, showScrollArrows = false, maxItemsToShow = 10, + showNumbers = true, }: RadioButtonSelectProps): React.JSX.Element { - // Ensure initialIndex is within bounds - const safeInitialIndex = - items.length > 0 - ? Math.max(0, Math.min(initialIndex, items.length - 1)) - : 0; - const [activeIndex, setActiveIndex] = useState(safeInitialIndex); + const [activeIndex, setActiveIndex] = useState(initialIndex); const [scrollOffset, setScrollOffset] = useState(0); - - // Ensure activeIndex is always within bounds when items change - useEffect(() => { - if (items.length === 0) { - setActiveIndex(0); - } else if (activeIndex >= items.length) { - setActiveIndex(Math.max(0, items.length - 1)); - } - }, [items.length, activeIndex]); + const [numberInput, setNumberInput] = useState(''); + const numberInputTimer = useRef(null); useEffect(() => { const newScrollOffset = Math.max( @@ -85,55 +76,85 @@ export function RadioButtonSelect({ } }, [activeIndex, items.length, scrollOffset, maxItemsToShow]); + useEffect( + () => () => { + if (numberInputTimer.current) { + clearTimeout(numberInputTimer.current); + } + }, + [], + ); + useInput( (input, key) => { - if (input === 'k' || key.upArrow) { - if (items.length > 0) { - const newIndex = activeIndex > 0 ? activeIndex - 1 : items.length - 1; - setActiveIndex(newIndex); - if (items[newIndex]) { - onHighlight?.(items[newIndex].value); - } - } - } - if (input === 'j' || key.downArrow) { - if (items.length > 0) { - const newIndex = activeIndex < items.length - 1 ? activeIndex + 1 : 0; - setActiveIndex(newIndex); - if (items[newIndex]) { - onHighlight?.(items[newIndex].value); - } - } - } - if (key.return) { - // Add bounds check before accessing items[activeIndex] - if ( - activeIndex >= 0 && - activeIndex < items.length && - items[activeIndex] - ) { - onSelect(items[activeIndex].value); - } + const isNumeric = showNumbers && /^[0-9]$/.test(input); + + // Any key press that is not a digit should clear the number input buffer. + if (!isNumeric && numberInputTimer.current) { + clearTimeout(numberInputTimer.current); + setNumberInput(''); } - // Enable selection directly from number keys. - if (/^[1-9]$/.test(input)) { - const targetIndex = Number.parseInt(input, 10) - 1; - if (targetIndex >= 0 && targetIndex < visibleItems.length) { - const selectedItem = visibleItems[targetIndex]; - if (selectedItem) { - onSelect?.(selectedItem.value); + if (input === 'k' || key.upArrow) { + const newIndex = activeIndex > 0 ? activeIndex - 1 : items.length - 1; + setActiveIndex(newIndex); + onHighlight?.(items[newIndex]!.value); + return; + } + + if (input === 'j' || key.downArrow) { + const newIndex = activeIndex < items.length - 1 ? activeIndex + 1 : 0; + setActiveIndex(newIndex); + onHighlight?.(items[newIndex]!.value); + return; + } + + if (key.return) { + onSelect(items[activeIndex]!.value); + return; + } + + // Handle numeric input for selection. + if (isNumeric) { + if (numberInputTimer.current) { + clearTimeout(numberInputTimer.current); + } + + const newNumberInput = numberInput + input; + setNumberInput(newNumberInput); + + const targetIndex = Number.parseInt(newNumberInput, 10) - 1; + + // A single '0' is not a valid selection since items are 1-indexed. + if (newNumberInput === '0') { + numberInputTimer.current = setTimeout(() => setNumberInput(''), 350); + return; + } + + if (targetIndex >= 0 && targetIndex < items.length) { + const targetItem = items[targetIndex]!; + setActiveIndex(targetIndex); + onHighlight?.(targetItem.value); + + // If the typed number can't be a prefix for another valid number, + // select it immediately. Otherwise, wait for more input. + const potentialNextNumber = Number.parseInt(newNumberInput + '0', 10); + if (potentialNextNumber > items.length) { + onSelect(targetItem.value); + setNumberInput(''); + } else { + numberInputTimer.current = setTimeout(() => { + onSelect(targetItem.value); + setNumberInput(''); + }, 350); // Debounce time for multi-digit input. } + } else { + // The typed number is out of bounds, clear the buffer + setNumberInput(''); } } }, - { - isActive: - isFocused && - items.length > 0 && - activeIndex >= 0 && - activeIndex < items.length, - }, + { isActive: isFocused && items.length > 0 }, ); const visibleItems = items.slice(scrollOffset, scrollOffset + maxItemsToShow); @@ -150,19 +171,38 @@ export function RadioButtonSelect({ const isSelected = activeIndex === itemIndex; let textColor = Colors.Foreground; + let numberColor = Colors.Foreground; if (isSelected) { textColor = Colors.AccentGreen; + numberColor = Colors.AccentGreen; } else if (item.disabled) { textColor = Colors.Gray; + numberColor = Colors.Gray; } + if (!showNumbers) { + numberColor = Colors.Gray; + } + + const numberColumnWidth = String(items.length).length; + const itemNumberText = `${String(itemIndex + 1).padStart( + numberColumnWidth, + )}.`; + return ( - + - {isSelected ? '●' : '○'} + {isSelected ? '●' : ' '} + + {itemNumberText} + {item.themeNameDisplay && item.themeTypeDisplay ? ( {item.themeNameDisplay}{' '} diff --git a/packages/cli/src/ui/components/shared/__snapshots__/RadioButtonSelect.test.tsx.snap b/packages/cli/src/ui/components/shared/__snapshots__/RadioButtonSelect.test.tsx.snap new file mode 100644 index 000000000..aeb4ac16e --- /dev/null +++ b/packages/cli/src/ui/components/shared/__snapshots__/RadioButtonSelect.test.tsx.snap @@ -0,0 +1,47 @@ +// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html + +exports[` > renders a list of items and matches snapshot 1`] = ` +"● 1. Option 1 + 2. Option 2 + 3. Option 3" +`; + +exports[` > renders a list with >10 items and matches snapshot 1`] = ` +"● 1. Item 1 + 2. Item 2 + 3. Item 3 + 4. Item 4 + 5. Item 5 + 6. Item 6 + 7. Item 7 + 8. Item 8 + 9. Item 9 + 10. Item 10" +`; + +exports[` > renders with numbers hidden and matches snapshot 1`] = ` +"● 1. Option 1 + 2. Option 2 + 3. Option 3" +`; + +exports[` > renders with scroll arrows and matches snapshot 1`] = ` +"▲ +● 1. Item 1 + 2. Item 2 + 3. Item 3 + 4. Item 4 + 5. Item 5 +▼" +`; + +exports[` > renders with special theme display and matches snapshot 1`] = ` +"● 1. Theme A (Light) + 2. Theme B (Dark)" +`; + +exports[` > renders with the second item selected and matches snapshot 1`] = ` +" 1. Option 1 +● 2. Option 2 + 3. Option 3" +`; diff --git a/packages/cli/src/ui/components/shared/text-buffer.test.ts b/packages/cli/src/ui/components/shared/text-buffer.test.ts index 89930c184..807c33df6 100644 --- a/packages/cli/src/ui/components/shared/text-buffer.test.ts +++ b/packages/cli/src/ui/components/shared/text-buffer.test.ts @@ -11,6 +11,7 @@ import { Viewport, TextBuffer, offsetToLogicalPos, + logicalPosToOffset, textBufferReducer, TextBufferState, TextBufferAction, @@ -407,8 +408,8 @@ describe('useTextBuffer', () => { useTextBuffer({ viewport, isValidPath: () => true }), ); const filePath = '/path/to/a/valid/file.txt'; - act(() => result.current.insert(filePath)); - expect(getBufferState(result).text).toBe(`@${filePath}`); + act(() => result.current.insert(filePath, { paste: true })); + expect(getBufferState(result).text).toBe(`@${filePath} `); }); it('should not prepend @ to an invalid file path on insert', () => { @@ -416,7 +417,7 @@ describe('useTextBuffer', () => { useTextBuffer({ viewport, isValidPath: () => false }), ); const notAPath = 'this is just some long text'; - act(() => result.current.insert(notAPath)); + act(() => result.current.insert(notAPath, { paste: true })); expect(getBufferState(result).text).toBe(notAPath); }); @@ -425,8 +426,8 @@ describe('useTextBuffer', () => { useTextBuffer({ viewport, isValidPath: () => true }), ); const filePath = "'/path/to/a/valid/file.txt'"; - act(() => result.current.insert(filePath)); - expect(getBufferState(result).text).toBe(`@/path/to/a/valid/file.txt`); + act(() => result.current.insert(filePath, { paste: true })); + expect(getBufferState(result).text).toBe(`@/path/to/a/valid/file.txt `); }); it('should not prepend @ to short text that is not a path', () => { @@ -434,7 +435,7 @@ describe('useTextBuffer', () => { useTextBuffer({ viewport, isValidPath: () => true }), ); const shortText = 'ab'; - act(() => result.current.insert(shortText)); + act(() => result.current.insert(shortText, { paste: true })); expect(getBufferState(result).text).toBe(shortText); }); }); @@ -449,7 +450,7 @@ describe('useTextBuffer', () => { }), ); const filePath = '/path/to/a/valid/file.txt'; - act(() => result.current.insert(filePath)); + act(() => result.current.insert(filePath, { paste: true })); expect(getBufferState(result).text).toBe(filePath); // No @ prefix }); @@ -462,7 +463,7 @@ describe('useTextBuffer', () => { }), ); const quotedFilePath = "'/path/to/a/valid/file.txt'"; - act(() => result.current.insert(quotedFilePath)); + act(() => result.current.insert(quotedFilePath, { paste: true })); expect(getBufferState(result).text).toBe(quotedFilePath); // No @ prefix, keeps quotes }); @@ -475,7 +476,7 @@ describe('useTextBuffer', () => { }), ); const notAPath = 'this is just some text'; - act(() => result.current.insert(notAPath)); + act(() => result.current.insert(notAPath, { paste: true })); expect(getBufferState(result).text).toBe(notAPath); }); @@ -488,7 +489,7 @@ describe('useTextBuffer', () => { }), ); const shortText = 'ls'; - act(() => result.current.insert(shortText)); + act(() => result.current.insert(shortText, { paste: true })); expect(getBufferState(result).text).toBe(shortText); // No @ prefix for short text }); }); @@ -849,6 +850,7 @@ describe('useTextBuffer', () => { ctrl: false, meta: false, shift: false, + paste: false, sequence: '\x7f', }); result.current.handleInput({ @@ -856,6 +858,7 @@ describe('useTextBuffer', () => { ctrl: false, meta: false, shift: false, + paste: false, sequence: '\x7f', }); result.current.handleInput({ @@ -863,6 +866,7 @@ describe('useTextBuffer', () => { ctrl: false, meta: false, shift: false, + paste: false, sequence: '\x7f', }); }); @@ -990,9 +994,9 @@ Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots // Simulate pasting the long text multiple times act(() => { - result.current.insert(longText); - result.current.insert(longText); - result.current.insert(longText); + result.current.insert(longText, { paste: true }); + result.current.insert(longText, { paste: true }); + result.current.insert(longText, { paste: true }); }); const state = getBufferState(result); @@ -1338,3 +1342,216 @@ describe('offsetToLogicalPos', () => { expect(offsetToLogicalPos(text, 2)).toEqual([0, 2]); // After 🐱 }); }); + +describe('logicalPosToOffset', () => { + it('should convert row/col position to offset correctly', () => { + const lines = ['hello', 'world', '123']; + + // Line 0: "hello" (5 chars) + expect(logicalPosToOffset(lines, 0, 0)).toBe(0); // Start of 'hello' + expect(logicalPosToOffset(lines, 0, 3)).toBe(3); // 'l' in 'hello' + expect(logicalPosToOffset(lines, 0, 5)).toBe(5); // End of 'hello' + + // Line 1: "world" (5 chars), offset starts at 6 (5 + 1 for newline) + expect(logicalPosToOffset(lines, 1, 0)).toBe(6); // Start of 'world' + expect(logicalPosToOffset(lines, 1, 2)).toBe(8); // 'r' in 'world' + expect(logicalPosToOffset(lines, 1, 5)).toBe(11); // End of 'world' + + // Line 2: "123" (3 chars), offset starts at 12 (5 + 1 + 5 + 1) + expect(logicalPosToOffset(lines, 2, 0)).toBe(12); // Start of '123' + expect(logicalPosToOffset(lines, 2, 1)).toBe(13); // '2' in '123' + expect(logicalPosToOffset(lines, 2, 3)).toBe(15); // End of '123' + }); + + it('should handle empty lines', () => { + const lines = ['a', '', 'c']; + + expect(logicalPosToOffset(lines, 0, 0)).toBe(0); // 'a' + expect(logicalPosToOffset(lines, 0, 1)).toBe(1); // End of 'a' + expect(logicalPosToOffset(lines, 1, 0)).toBe(2); // Empty line + expect(logicalPosToOffset(lines, 2, 0)).toBe(3); // 'c' + expect(logicalPosToOffset(lines, 2, 1)).toBe(4); // End of 'c' + }); + + it('should handle single empty line', () => { + const lines = ['']; + + expect(logicalPosToOffset(lines, 0, 0)).toBe(0); + }); + + it('should be inverse of offsetToLogicalPos', () => { + const lines = ['hello', 'world', '123']; + const text = lines.join('\n'); + + // Test round-trip conversion + for (let offset = 0; offset <= text.length; offset++) { + const [row, col] = offsetToLogicalPos(text, offset); + const convertedOffset = logicalPosToOffset(lines, row, col); + expect(convertedOffset).toBe(offset); + } + }); + + it('should handle out-of-bounds positions', () => { + const lines = ['hello']; + + // Beyond end of line + expect(logicalPosToOffset(lines, 0, 10)).toBe(5); // Clamps to end of line + + // Beyond array bounds - should clamp to the last line + expect(logicalPosToOffset(lines, 5, 0)).toBe(0); // Clamps to start of last line (row 0) + expect(logicalPosToOffset(lines, 5, 10)).toBe(5); // Clamps to end of last line + }); +}); + +describe('textBufferReducer vim operations', () => { + describe('vim_delete_line', () => { + it('should delete a single line including newline in multi-line text', () => { + const initialState: TextBufferState = { + lines: ['line1', 'line2', 'line3'], + cursorRow: 1, + cursorCol: 2, + preferredCol: null, + visualLines: [['line1'], ['line2'], ['line3']], + visualScrollRow: 0, + visualCursor: { row: 1, col: 2 }, + viewport: { width: 10, height: 5 }, + undoStack: [], + redoStack: [], + }; + + const action: TextBufferAction = { + type: 'vim_delete_line', + payload: { count: 1 }, + }; + + const result = textBufferReducer(initialState, action); + + // After deleting line2, we should have line1 and line3, with cursor on line3 (now at index 1) + expect(result.lines).toEqual(['line1', 'line3']); + expect(result.cursorRow).toBe(1); + expect(result.cursorCol).toBe(0); + }); + + it('should delete multiple lines when count > 1', () => { + const initialState: TextBufferState = { + lines: ['line1', 'line2', 'line3', 'line4'], + cursorRow: 1, + cursorCol: 0, + preferredCol: null, + visualLines: [['line1'], ['line2'], ['line3'], ['line4']], + visualScrollRow: 0, + visualCursor: { row: 1, col: 0 }, + viewport: { width: 10, height: 5 }, + undoStack: [], + redoStack: [], + }; + + const action: TextBufferAction = { + type: 'vim_delete_line', + payload: { count: 2 }, + }; + + const result = textBufferReducer(initialState, action); + + // Should delete line2 and line3, leaving line1 and line4 + expect(result.lines).toEqual(['line1', 'line4']); + expect(result.cursorRow).toBe(1); + expect(result.cursorCol).toBe(0); + }); + + it('should clear single line content when only one line exists', () => { + const initialState: TextBufferState = { + lines: ['only line'], + cursorRow: 0, + cursorCol: 5, + preferredCol: null, + visualLines: [['only line']], + visualScrollRow: 0, + visualCursor: { row: 0, col: 5 }, + viewport: { width: 10, height: 5 }, + undoStack: [], + redoStack: [], + }; + + const action: TextBufferAction = { + type: 'vim_delete_line', + payload: { count: 1 }, + }; + + const result = textBufferReducer(initialState, action); + + // Should clear the line content but keep the line + expect(result.lines).toEqual(['']); + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(0); + }); + + it('should handle deleting the last line properly', () => { + const initialState: TextBufferState = { + lines: ['line1', 'line2'], + cursorRow: 1, + cursorCol: 0, + preferredCol: null, + visualLines: [['line1'], ['line2']], + visualScrollRow: 0, + visualCursor: { row: 1, col: 0 }, + viewport: { width: 10, height: 5 }, + undoStack: [], + redoStack: [], + }; + + const action: TextBufferAction = { + type: 'vim_delete_line', + payload: { count: 1 }, + }; + + const result = textBufferReducer(initialState, action); + + // Should delete the last line completely, not leave empty line + expect(result.lines).toEqual(['line1']); + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(0); + }); + + it('should handle deleting all lines and maintain valid state for subsequent paste', () => { + const initialState: TextBufferState = { + lines: ['line1', 'line2', 'line3', 'line4'], + cursorRow: 0, + cursorCol: 0, + preferredCol: null, + visualLines: [['line1'], ['line2'], ['line3'], ['line4']], + visualScrollRow: 0, + visualCursor: { row: 0, col: 0 }, + viewport: { width: 10, height: 5 }, + undoStack: [], + redoStack: [], + }; + + // Delete all 4 lines with 4dd + const deleteAction: TextBufferAction = { + type: 'vim_delete_line', + payload: { count: 4 }, + }; + + const afterDelete = textBufferReducer(initialState, deleteAction); + + // After deleting all lines, should have one empty line + expect(afterDelete.lines).toEqual(['']); + expect(afterDelete.cursorRow).toBe(0); + expect(afterDelete.cursorCol).toBe(0); + + // Now paste multiline content - this should work correctly + const pasteAction: TextBufferAction = { + type: 'insert', + payload: 'new1\nnew2\nnew3\nnew4', + }; + + const afterPaste = textBufferReducer(afterDelete, pasteAction); + + // All lines including the first one should be present + expect(afterPaste.lines).toEqual(['new1', 'new2', 'new3', 'new4']); + expect(afterPaste.cursorRow).toBe(3); + expect(afterPaste.cursorCol).toBe(4); + }); + }); +}); diff --git a/packages/cli/src/ui/components/shared/text-buffer.ts b/packages/cli/src/ui/components/shared/text-buffer.ts index 899f37ab4..9ed742d88 100644 --- a/packages/cli/src/ui/components/shared/text-buffer.ts +++ b/packages/cli/src/ui/components/shared/text-buffer.ts @@ -13,6 +13,7 @@ import { useState, useCallback, useEffect, useMemo, useReducer } from 'react'; import stringWidth from 'string-width'; import { unescapePath } from '@qwen-code/qwen-code-core'; import { toCodePoints, cpLen, cpSlice } from '../../utils/textUtils.js'; +import { handleVimAction, VimAction } from './vim-buffer-actions.js'; export type Direction = | 'left' @@ -32,6 +33,283 @@ function isWordChar(ch: string | undefined): boolean { return !/[\s,.;!?]/.test(ch); } +// Vim-specific word boundary functions +export const findNextWordStart = ( + text: string, + currentOffset: number, +): number => { + let i = currentOffset; + + if (i >= text.length) return i; + + const currentChar = text[i]; + + // Skip current word/sequence based on character type + if (/\w/.test(currentChar)) { + // Skip current word characters + while (i < text.length && /\w/.test(text[i])) { + i++; + } + } else if (!/\s/.test(currentChar)) { + // Skip current non-word, non-whitespace characters (like "/", ".", etc.) + while (i < text.length && !/\w/.test(text[i]) && !/\s/.test(text[i])) { + i++; + } + } + + // Skip whitespace + while (i < text.length && /\s/.test(text[i])) { + i++; + } + + // If we reached the end of text and there's no next word, + // vim behavior for dw is to delete to the end of the current word + if (i >= text.length) { + // Go back to find the end of the last word + let endOfLastWord = text.length - 1; + while (endOfLastWord >= 0 && /\s/.test(text[endOfLastWord])) { + endOfLastWord--; + } + // For dw on last word, return position AFTER the last character to delete entire word + return Math.max(currentOffset + 1, endOfLastWord + 1); + } + + return i; +}; + +export const findPrevWordStart = ( + text: string, + currentOffset: number, +): number => { + let i = currentOffset; + + // If at beginning of text, return current position + if (i <= 0) { + return currentOffset; + } + + // Move back one character to start searching + i--; + + // Skip whitespace moving backwards + while (i >= 0 && (text[i] === ' ' || text[i] === '\t' || text[i] === '\n')) { + i--; + } + + if (i < 0) { + return 0; // Reached beginning of text + } + + const charAtI = text[i]; + + if (/\w/.test(charAtI)) { + // We're in a word, move to its beginning + while (i >= 0 && /\w/.test(text[i])) { + i--; + } + return i + 1; // Return first character of word + } else { + // We're in punctuation, move to its beginning + while ( + i >= 0 && + !/\w/.test(text[i]) && + text[i] !== ' ' && + text[i] !== '\t' && + text[i] !== '\n' + ) { + i--; + } + return i + 1; // Return first character of punctuation sequence + } +}; + +export const findWordEnd = (text: string, currentOffset: number): number => { + let i = currentOffset; + + // If we're already at the end of a word, advance to next word + if ( + i < text.length && + /\w/.test(text[i]) && + (i + 1 >= text.length || !/\w/.test(text[i + 1])) + ) { + // We're at the end of a word, move forward to find next word + i++; + // Skip whitespace/punctuation to find next word + while (i < text.length && !/\w/.test(text[i])) { + i++; + } + } + + // If we're not on a word character, find the next word + if (i < text.length && !/\w/.test(text[i])) { + while (i < text.length && !/\w/.test(text[i])) { + i++; + } + } + + // Move to end of current word + while (i < text.length && /\w/.test(text[i])) { + i++; + } + + // Move back one to be on the last character of the word + return Math.max(currentOffset, i - 1); +}; + +// Helper functions for vim operations +export const getOffsetFromPosition = ( + row: number, + col: number, + lines: string[], +): number => { + let offset = 0; + for (let i = 0; i < row; i++) { + offset += lines[i].length + 1; // +1 for newline + } + offset += col; + return offset; +}; + +export const getPositionFromOffsets = ( + startOffset: number, + endOffset: number, + lines: string[], +) => { + let offset = 0; + let startRow = 0; + let startCol = 0; + let endRow = 0; + let endCol = 0; + + // Find start position + for (let i = 0; i < lines.length; i++) { + const lineLength = lines[i].length + 1; // +1 for newline + if (offset + lineLength > startOffset) { + startRow = i; + startCol = startOffset - offset; + break; + } + offset += lineLength; + } + + // Find end position + offset = 0; + for (let i = 0; i < lines.length; i++) { + const lineLength = lines[i].length + (i < lines.length - 1 ? 1 : 0); // +1 for newline except last line + if (offset + lineLength >= endOffset) { + endRow = i; + endCol = endOffset - offset; + break; + } + offset += lineLength; + } + + return { startRow, startCol, endRow, endCol }; +}; + +export const getLineRangeOffsets = ( + startRow: number, + lineCount: number, + lines: string[], +) => { + let startOffset = 0; + + // Calculate start offset + for (let i = 0; i < startRow; i++) { + startOffset += lines[i].length + 1; // +1 for newline + } + + // Calculate end offset + let endOffset = startOffset; + for (let i = 0; i < lineCount; i++) { + const lineIndex = startRow + i; + if (lineIndex < lines.length) { + endOffset += lines[lineIndex].length; + if (lineIndex < lines.length - 1) { + endOffset += 1; // +1 for newline + } + } + } + + return { startOffset, endOffset }; +}; + +export const replaceRangeInternal = ( + state: TextBufferState, + startRow: number, + startCol: number, + endRow: number, + endCol: number, + text: string, +): TextBufferState => { + const currentLine = (row: number) => state.lines[row] || ''; + const currentLineLen = (row: number) => cpLen(currentLine(row)); + const clamp = (value: number, min: number, max: number) => + Math.min(Math.max(value, min), max); + + if ( + startRow > endRow || + (startRow === endRow && startCol > endCol) || + startRow < 0 || + startCol < 0 || + endRow >= state.lines.length || + (endRow < state.lines.length && endCol > currentLineLen(endRow)) + ) { + return state; // Invalid range + } + + const newLines = [...state.lines]; + + const sCol = clamp(startCol, 0, currentLineLen(startRow)); + const eCol = clamp(endCol, 0, currentLineLen(endRow)); + + const prefix = cpSlice(currentLine(startRow), 0, sCol); + const suffix = cpSlice(currentLine(endRow), eCol); + + const normalisedReplacement = text + .replace(/\r\n/g, '\n') + .replace(/\r/g, '\n'); + const replacementParts = normalisedReplacement.split('\n'); + + // Replace the content + if (startRow === endRow) { + newLines[startRow] = prefix + normalisedReplacement + suffix; + } else { + const firstLine = prefix + replacementParts[0]; + if (replacementParts.length === 1) { + // Single line of replacement text, but spanning multiple original lines + newLines.splice(startRow, endRow - startRow + 1, firstLine + suffix); + } else { + // Multi-line replacement text + const lastLine = replacementParts[replacementParts.length - 1] + suffix; + const middleLines = replacementParts.slice(1, -1); + newLines.splice( + startRow, + endRow - startRow + 1, + firstLine, + ...middleLines, + lastLine, + ); + } + } + + const finalCursorRow = startRow + replacementParts.length - 1; + const finalCursorCol = + (replacementParts.length > 1 ? 0 : sCol) + + cpLen(replacementParts[replacementParts.length - 1]); + + return { + ...state, + lines: newLines, + cursorRow: Math.min(Math.max(finalCursorRow, 0), newLines.length - 1), + cursorCol: Math.max( + 0, + Math.min(finalCursorCol, cpLen(newLines[finalCursorRow] || '')), + ), + preferredCol: null, + }; +}; + /** * Strip characters that can break terminal rendering. * @@ -40,7 +318,7 @@ function isWordChar(ch: string | undefined): boolean { */ function stripUnsafeCharacters(str: string): string { const stripped = stripAnsi(str); - return toCodePoints(stripAnsi(stripped)) + return toCodePoints(stripped) .filter((char) => { if (char.length > 1) return false; const code = char.codePointAt(0); @@ -158,6 +436,33 @@ export function offsetToLogicalPos( return [row, col]; } +/** + * Converts logical row/col position to absolute text offset + * Inverse operation of offsetToLogicalPos + */ +export function logicalPosToOffset( + lines: string[], + row: number, + col: number, +): number { + let offset = 0; + + // Clamp row to valid range + const actualRow = Math.min(row, lines.length - 1); + + // Add lengths of all lines before the target row + for (let i = 0; i < actualRow; i++) { + offset += cpLen(lines[i]) + 1; // +1 for newline + } + + // Add column offset within the target row + if (actualRow >= 0 && actualRow < lines.length) { + offset += Math.min(col, cpLen(lines[actualRow])); + } + + return offset; +} + // Helper to calculate visual lines and map cursor positions function calculateVisualLayout( logicalLines: string[], @@ -376,7 +681,7 @@ function calculateVisualLayout( // --- Start of reducer logic --- -interface TextBufferState { +export interface TextBufferState { lines: string[]; cursorRow: number; cursorCol: number; @@ -390,7 +695,20 @@ interface TextBufferState { const historyLimit = 100; -type TextBufferAction = +export const pushUndo = (currentState: TextBufferState): TextBufferState => { + const snapshot = { + lines: [...currentState.lines], + cursorRow: currentState.cursorRow, + cursorCol: currentState.cursorCol, + }; + const newStack = [...currentState.undoStack, snapshot]; + if (newStack.length > historyLimit) { + newStack.shift(); + } + return { ...currentState, undoStack: newStack, redoStack: [] }; +}; + +export type TextBufferAction = | { type: 'set_text'; payload: string; pushToUndo?: boolean } | { type: 'insert'; payload: string } | { type: 'backspace' } @@ -419,24 +737,49 @@ type TextBufferAction = } | { type: 'move_to_offset'; payload: { offset: number } } | { type: 'create_undo_snapshot' } - | { type: 'set_viewport_width'; payload: number }; + | { type: 'set_viewport_width'; payload: number } + | { type: 'vim_delete_word_forward'; payload: { count: number } } + | { type: 'vim_delete_word_backward'; payload: { count: number } } + | { type: 'vim_delete_word_end'; payload: { count: number } } + | { type: 'vim_change_word_forward'; payload: { count: number } } + | { type: 'vim_change_word_backward'; payload: { count: number } } + | { type: 'vim_change_word_end'; payload: { count: number } } + | { type: 'vim_delete_line'; payload: { count: number } } + | { type: 'vim_change_line'; payload: { count: number } } + | { type: 'vim_delete_to_end_of_line' } + | { type: 'vim_change_to_end_of_line' } + | { + type: 'vim_change_movement'; + payload: { movement: 'h' | 'j' | 'k' | 'l'; count: number }; + } + // New vim actions for stateless command handling + | { type: 'vim_move_left'; payload: { count: number } } + | { type: 'vim_move_right'; payload: { count: number } } + | { type: 'vim_move_up'; payload: { count: number } } + | { type: 'vim_move_down'; payload: { count: number } } + | { type: 'vim_move_word_forward'; payload: { count: number } } + | { type: 'vim_move_word_backward'; payload: { count: number } } + | { type: 'vim_move_word_end'; payload: { count: number } } + | { type: 'vim_delete_char'; payload: { count: number } } + | { type: 'vim_insert_at_cursor' } + | { type: 'vim_append_at_cursor' } + | { type: 'vim_open_line_below' } + | { type: 'vim_open_line_above' } + | { type: 'vim_append_at_line_end' } + | { type: 'vim_insert_at_line_start' } + | { type: 'vim_move_to_line_start' } + | { type: 'vim_move_to_line_end' } + | { type: 'vim_move_to_first_nonwhitespace' } + | { type: 'vim_move_to_first_line' } + | { type: 'vim_move_to_last_line' } + | { type: 'vim_move_to_line'; payload: { lineNumber: number } } + | { type: 'vim_escape_insert_mode' }; export function textBufferReducer( state: TextBufferState, action: TextBufferAction, ): TextBufferState { - const pushUndo = (currentState: TextBufferState): TextBufferState => { - const snapshot = { - lines: [...currentState.lines], - cursorRow: currentState.cursorRow, - cursorCol: currentState.cursorCol, - }; - const newStack = [...currentState.undoStack, snapshot]; - if (newStack.length > historyLimit) { - newStack.shift(); - } - return { ...currentState, undoStack: newStack, redoStack: [] }; - }; + const pushUndoLocal = pushUndo; const currentLine = (r: number): string => state.lines[r] ?? ''; const currentLineLen = (r: number): number => cpLen(currentLine(r)); @@ -445,7 +788,7 @@ export function textBufferReducer( case 'set_text': { let nextState = state; if (action.pushToUndo !== false) { - nextState = pushUndo(state); + nextState = pushUndoLocal(state); } const newContentLines = action.payload .replace(/\r\n?/g, '\n') @@ -462,7 +805,7 @@ export function textBufferReducer( } case 'insert': { - const nextState = pushUndo(state); + const nextState = pushUndoLocal(state); const newLines = [...nextState.lines]; let newCursorRow = nextState.cursorRow; let newCursorCol = nextState.cursorCol; @@ -504,7 +847,7 @@ export function textBufferReducer( } case 'backspace': { - const nextState = pushUndo(state); + const nextState = pushUndoLocal(state); const newLines = [...nextState.lines]; let newCursorRow = nextState.cursorRow; let newCursorCol = nextState.cursorCol; @@ -700,14 +1043,14 @@ export function textBufferReducer( const { cursorRow, cursorCol, lines } = state; const lineContent = currentLine(cursorRow); if (cursorCol < currentLineLen(cursorRow)) { - const nextState = pushUndo(state); + const nextState = pushUndoLocal(state); const newLines = [...nextState.lines]; newLines[cursorRow] = cpSlice(lineContent, 0, cursorCol) + cpSlice(lineContent, cursorCol + 1); return { ...nextState, lines: newLines, preferredCol: null }; } else if (cursorRow < lines.length - 1) { - const nextState = pushUndo(state); + const nextState = pushUndoLocal(state); const nextLineContent = currentLine(cursorRow + 1); const newLines = [...nextState.lines]; newLines[cursorRow] = lineContent + nextLineContent; @@ -722,7 +1065,7 @@ export function textBufferReducer( if (cursorCol === 0 && cursorRow === 0) return state; if (cursorCol === 0) { // Act as a backspace - const nextState = pushUndo(state); + const nextState = pushUndoLocal(state); const prevLineContent = currentLine(cursorRow - 1); const currentLineContentVal = currentLine(cursorRow); const newCol = cpLen(prevLineContent); @@ -737,7 +1080,7 @@ export function textBufferReducer( preferredCol: null, }; } - const nextState = pushUndo(state); + const nextState = pushUndoLocal(state); const lineContent = currentLine(cursorRow); const arr = toCodePoints(lineContent); let start = cursorCol; @@ -773,14 +1116,14 @@ export function textBufferReducer( return state; if (cursorCol >= arr.length) { // Act as a delete - const nextState = pushUndo(state); + const nextState = pushUndoLocal(state); const nextLineContent = currentLine(cursorRow + 1); const newLines = [...nextState.lines]; newLines[cursorRow] = lineContent + nextLineContent; newLines.splice(cursorRow + 1, 1); return { ...nextState, lines: newLines, preferredCol: null }; } - const nextState = pushUndo(state); + const nextState = pushUndoLocal(state); let end = cursorCol; while (end < arr.length && !isWordChar(arr[end])) end++; while (end < arr.length && isWordChar(arr[end])) end++; @@ -794,13 +1137,13 @@ export function textBufferReducer( const { cursorRow, cursorCol, lines } = state; const lineContent = currentLine(cursorRow); if (cursorCol < currentLineLen(cursorRow)) { - const nextState = pushUndo(state); + const nextState = pushUndoLocal(state); const newLines = [...nextState.lines]; newLines[cursorRow] = cpSlice(lineContent, 0, cursorCol); return { ...nextState, lines: newLines }; } else if (cursorRow < lines.length - 1) { // Act as a delete - const nextState = pushUndo(state); + const nextState = pushUndoLocal(state); const nextLineContent = currentLine(cursorRow + 1); const newLines = [...nextState.lines]; newLines[cursorRow] = lineContent + nextLineContent; @@ -813,7 +1156,7 @@ export function textBufferReducer( case 'kill_line_left': { const { cursorRow, cursorCol } = state; if (cursorCol > 0) { - const nextState = pushUndo(state); + const nextState = pushUndoLocal(state); const lineContent = currentLine(cursorRow); const newLines = [...nextState.lines]; newLines[cursorRow] = cpSlice(lineContent, cursorCol); @@ -863,66 +1206,15 @@ export function textBufferReducer( case 'replace_range': { const { startRow, startCol, endRow, endCol, text } = action.payload; - if ( - startRow > endRow || - (startRow === endRow && startCol > endCol) || - startRow < 0 || - startCol < 0 || - endRow >= state.lines.length || - (endRow < state.lines.length && endCol > currentLineLen(endRow)) - ) { - return state; // Invalid range - } - - const nextState = pushUndo(state); - const newLines = [...nextState.lines]; - - const sCol = clamp(startCol, 0, currentLineLen(startRow)); - const eCol = clamp(endCol, 0, currentLineLen(endRow)); - - const prefix = cpSlice(currentLine(startRow), 0, sCol); - const suffix = cpSlice(currentLine(endRow), eCol); - - const normalisedReplacement = text - .replace(/\r\n/g, '\n') - .replace(/\r/g, '\n'); - const replacementParts = normalisedReplacement.split('\n'); - - // Replace the content - if (startRow === endRow) { - newLines[startRow] = prefix + normalisedReplacement + suffix; - } else { - const firstLine = prefix + replacementParts[0]; - if (replacementParts.length === 1) { - // Single line of replacement text, but spanning multiple original lines - newLines.splice(startRow, endRow - startRow + 1, firstLine + suffix); - } else { - // Multi-line replacement text - const lastLine = - replacementParts[replacementParts.length - 1] + suffix; - const middleLines = replacementParts.slice(1, -1); - newLines.splice( - startRow, - endRow - startRow + 1, - firstLine, - ...middleLines, - lastLine, - ); - } - } - - const finalCursorRow = startRow + replacementParts.length - 1; - const finalCursorCol = - (replacementParts.length > 1 ? 0 : sCol) + - cpLen(replacementParts[replacementParts.length - 1]); - - return { - ...nextState, - lines: newLines, - cursorRow: finalCursorRow, - cursorCol: finalCursorCol, - preferredCol: null, - }; + const nextState = pushUndoLocal(state); + return replaceRangeInternal( + nextState, + startRow, + startCol, + endRow, + endCol, + text, + ); } case 'move_to_offset': { @@ -940,9 +1232,44 @@ export function textBufferReducer( } case 'create_undo_snapshot': { - return pushUndo(state); + return pushUndoLocal(state); } + // Vim-specific operations + case 'vim_delete_word_forward': + case 'vim_delete_word_backward': + case 'vim_delete_word_end': + case 'vim_change_word_forward': + case 'vim_change_word_backward': + case 'vim_change_word_end': + case 'vim_delete_line': + case 'vim_change_line': + case 'vim_delete_to_end_of_line': + case 'vim_change_to_end_of_line': + case 'vim_change_movement': + case 'vim_move_left': + case 'vim_move_right': + case 'vim_move_up': + case 'vim_move_down': + case 'vim_move_word_forward': + case 'vim_move_word_backward': + case 'vim_move_word_end': + case 'vim_delete_char': + case 'vim_insert_at_cursor': + case 'vim_append_at_cursor': + case 'vim_open_line_below': + case 'vim_open_line_above': + case 'vim_append_at_line_end': + case 'vim_insert_at_line_start': + case 'vim_move_to_line_start': + case 'vim_move_to_line_end': + case 'vim_move_to_first_nonwhitespace': + case 'vim_move_to_first_line': + case 'vim_move_to_last_line': + case 'vim_move_to_line': + case 'vim_escape_insert_mode': + return handleVimAction(state, action as VimAction); + default: { const exhaustiveCheck: never = action; console.error(`Unknown action encountered: ${exhaustiveCheck}`); @@ -1023,26 +1350,27 @@ export function useTextBuffer({ }, [visualCursor, visualScrollRow, viewport]); const insert = useCallback( - (ch: string): void => { + (ch: string, { paste = false }: { paste?: boolean } = {}): void => { if (/[\n\r]/.test(ch)) { dispatch({ type: 'insert', payload: ch }); return; } const minLengthToInferAsDragDrop = 3; - if (ch.length >= minLengthToInferAsDragDrop && !shellModeActive) { - let potentialPath = ch; - if ( - potentialPath.length > 2 && - potentialPath.startsWith("'") && - potentialPath.endsWith("'") - ) { - potentialPath = ch.slice(1, -1); + if ( + ch.length >= minLengthToInferAsDragDrop && + !shellModeActive && + paste + ) { + let potentialPath = ch.trim(); + const quoteMatch = potentialPath.match(/^'(.*)'$/); + if (quoteMatch) { + potentialPath = quoteMatch[1]; } potentialPath = potentialPath.trim(); if (isValidPath(unescapePath(potentialPath))) { - ch = `@${potentialPath}`; + ch = `@${potentialPath} `; } } @@ -1109,6 +1437,139 @@ export function useTextBuffer({ dispatch({ type: 'kill_line_left' }); }, []); + // Vim-specific operations + const vimDeleteWordForward = useCallback((count: number): void => { + dispatch({ type: 'vim_delete_word_forward', payload: { count } }); + }, []); + + const vimDeleteWordBackward = useCallback((count: number): void => { + dispatch({ type: 'vim_delete_word_backward', payload: { count } }); + }, []); + + const vimDeleteWordEnd = useCallback((count: number): void => { + dispatch({ type: 'vim_delete_word_end', payload: { count } }); + }, []); + + const vimChangeWordForward = useCallback((count: number): void => { + dispatch({ type: 'vim_change_word_forward', payload: { count } }); + }, []); + + const vimChangeWordBackward = useCallback((count: number): void => { + dispatch({ type: 'vim_change_word_backward', payload: { count } }); + }, []); + + const vimChangeWordEnd = useCallback((count: number): void => { + dispatch({ type: 'vim_change_word_end', payload: { count } }); + }, []); + + const vimDeleteLine = useCallback((count: number): void => { + dispatch({ type: 'vim_delete_line', payload: { count } }); + }, []); + + const vimChangeLine = useCallback((count: number): void => { + dispatch({ type: 'vim_change_line', payload: { count } }); + }, []); + + const vimDeleteToEndOfLine = useCallback((): void => { + dispatch({ type: 'vim_delete_to_end_of_line' }); + }, []); + + const vimChangeToEndOfLine = useCallback((): void => { + dispatch({ type: 'vim_change_to_end_of_line' }); + }, []); + + const vimChangeMovement = useCallback( + (movement: 'h' | 'j' | 'k' | 'l', count: number): void => { + dispatch({ type: 'vim_change_movement', payload: { movement, count } }); + }, + [], + ); + + // New vim navigation and operation methods + const vimMoveLeft = useCallback((count: number): void => { + dispatch({ type: 'vim_move_left', payload: { count } }); + }, []); + + const vimMoveRight = useCallback((count: number): void => { + dispatch({ type: 'vim_move_right', payload: { count } }); + }, []); + + const vimMoveUp = useCallback((count: number): void => { + dispatch({ type: 'vim_move_up', payload: { count } }); + }, []); + + const vimMoveDown = useCallback((count: number): void => { + dispatch({ type: 'vim_move_down', payload: { count } }); + }, []); + + const vimMoveWordForward = useCallback((count: number): void => { + dispatch({ type: 'vim_move_word_forward', payload: { count } }); + }, []); + + const vimMoveWordBackward = useCallback((count: number): void => { + dispatch({ type: 'vim_move_word_backward', payload: { count } }); + }, []); + + const vimMoveWordEnd = useCallback((count: number): void => { + dispatch({ type: 'vim_move_word_end', payload: { count } }); + }, []); + + const vimDeleteChar = useCallback((count: number): void => { + dispatch({ type: 'vim_delete_char', payload: { count } }); + }, []); + + const vimInsertAtCursor = useCallback((): void => { + dispatch({ type: 'vim_insert_at_cursor' }); + }, []); + + const vimAppendAtCursor = useCallback((): void => { + dispatch({ type: 'vim_append_at_cursor' }); + }, []); + + const vimOpenLineBelow = useCallback((): void => { + dispatch({ type: 'vim_open_line_below' }); + }, []); + + const vimOpenLineAbove = useCallback((): void => { + dispatch({ type: 'vim_open_line_above' }); + }, []); + + const vimAppendAtLineEnd = useCallback((): void => { + dispatch({ type: 'vim_append_at_line_end' }); + }, []); + + const vimInsertAtLineStart = useCallback((): void => { + dispatch({ type: 'vim_insert_at_line_start' }); + }, []); + + const vimMoveToLineStart = useCallback((): void => { + dispatch({ type: 'vim_move_to_line_start' }); + }, []); + + const vimMoveToLineEnd = useCallback((): void => { + dispatch({ type: 'vim_move_to_line_end' }); + }, []); + + const vimMoveToFirstNonWhitespace = useCallback((): void => { + dispatch({ type: 'vim_move_to_first_nonwhitespace' }); + }, []); + + const vimMoveToFirstLine = useCallback((): void => { + dispatch({ type: 'vim_move_to_first_line' }); + }, []); + + const vimMoveToLastLine = useCallback((): void => { + dispatch({ type: 'vim_move_to_last_line' }); + }, []); + + const vimMoveToLine = useCallback((lineNumber: number): void => { + dispatch({ type: 'vim_move_to_line', payload: { lineNumber } }); + }, []); + + const vimEscapeInsertMode = useCallback((): void => { + dispatch({ type: 'vim_escape_insert_mode' }); + }, []); + const openInExternalEditor = useCallback( async (opts: { editor?: string } = {}): Promise => { const editor = @@ -1203,7 +1664,7 @@ export function useTextBuffer({ backspace(); else if (key.name === 'delete' || (key.ctrl && key.name === 'd')) del(); else if (input && !key.ctrl && !key.meta) { - insert(input); + insert(input, { paste: key.paste }); } }, [newline, move, deleteWordLeft, deleteWordRight, backspace, del, insert], @@ -1272,6 +1733,39 @@ export function useTextBuffer({ killLineLeft, handleInput, openInExternalEditor, + // Vim-specific operations + vimDeleteWordForward, + vimDeleteWordBackward, + vimDeleteWordEnd, + vimChangeWordForward, + vimChangeWordBackward, + vimChangeWordEnd, + vimDeleteLine, + vimChangeLine, + vimDeleteToEndOfLine, + vimChangeToEndOfLine, + vimChangeMovement, + vimMoveLeft, + vimMoveRight, + vimMoveUp, + vimMoveDown, + vimMoveWordForward, + vimMoveWordBackward, + vimMoveWordEnd, + vimDeleteChar, + vimInsertAtCursor, + vimAppendAtCursor, + vimOpenLineBelow, + vimOpenLineAbove, + vimAppendAtLineEnd, + vimInsertAtLineStart, + vimMoveToLineStart, + vimMoveToLineEnd, + vimMoveToFirstNonWhitespace, + vimMoveToFirstLine, + vimMoveToLastLine, + vimMoveToLine, + vimEscapeInsertMode, }; return returnValue; } @@ -1306,7 +1800,7 @@ export interface TextBuffer { /** * Insert a single character or string without newlines. */ - insert: (ch: string) => void; + insert: (ch: string, opts?: { paste?: boolean }) => void; newline: () => void; backspace: () => void; del: () => void; @@ -1386,4 +1880,134 @@ export interface TextBuffer { replacementText: string, ) => void; moveToOffset(offset: number): void; + + // Vim-specific operations + /** + * Delete N words forward from cursor position (vim 'dw' command) + */ + vimDeleteWordForward: (count: number) => void; + /** + * Delete N words backward from cursor position (vim 'db' command) + */ + vimDeleteWordBackward: (count: number) => void; + /** + * Delete to end of N words from cursor position (vim 'de' command) + */ + vimDeleteWordEnd: (count: number) => void; + /** + * Change N words forward from cursor position (vim 'cw' command) + */ + vimChangeWordForward: (count: number) => void; + /** + * Change N words backward from cursor position (vim 'cb' command) + */ + vimChangeWordBackward: (count: number) => void; + /** + * Change to end of N words from cursor position (vim 'ce' command) + */ + vimChangeWordEnd: (count: number) => void; + /** + * Delete N lines from cursor position (vim 'dd' command) + */ + vimDeleteLine: (count: number) => void; + /** + * Change N lines from cursor position (vim 'cc' command) + */ + vimChangeLine: (count: number) => void; + /** + * Delete from cursor to end of line (vim 'D' command) + */ + vimDeleteToEndOfLine: () => void; + /** + * Change from cursor to end of line (vim 'C' command) + */ + vimChangeToEndOfLine: () => void; + /** + * Change movement operations (vim 'ch', 'cj', 'ck', 'cl' commands) + */ + vimChangeMovement: (movement: 'h' | 'j' | 'k' | 'l', count: number) => void; + /** + * Move cursor left N times (vim 'h' command) + */ + vimMoveLeft: (count: number) => void; + /** + * Move cursor right N times (vim 'l' command) + */ + vimMoveRight: (count: number) => void; + /** + * Move cursor up N times (vim 'k' command) + */ + vimMoveUp: (count: number) => void; + /** + * Move cursor down N times (vim 'j' command) + */ + vimMoveDown: (count: number) => void; + /** + * Move cursor forward N words (vim 'w' command) + */ + vimMoveWordForward: (count: number) => void; + /** + * Move cursor backward N words (vim 'b' command) + */ + vimMoveWordBackward: (count: number) => void; + /** + * Move cursor to end of Nth word (vim 'e' command) + */ + vimMoveWordEnd: (count: number) => void; + /** + * Delete N characters at cursor (vim 'x' command) + */ + vimDeleteChar: (count: number) => void; + /** + * Enter insert mode at cursor (vim 'i' command) + */ + vimInsertAtCursor: () => void; + /** + * Enter insert mode after cursor (vim 'a' command) + */ + vimAppendAtCursor: () => void; + /** + * Open new line below and enter insert mode (vim 'o' command) + */ + vimOpenLineBelow: () => void; + /** + * Open new line above and enter insert mode (vim 'O' command) + */ + vimOpenLineAbove: () => void; + /** + * Move to end of line and enter insert mode (vim 'A' command) + */ + vimAppendAtLineEnd: () => void; + /** + * Move to first non-whitespace and enter insert mode (vim 'I' command) + */ + vimInsertAtLineStart: () => void; + /** + * Move cursor to beginning of line (vim '0' command) + */ + vimMoveToLineStart: () => void; + /** + * Move cursor to end of line (vim '$' command) + */ + vimMoveToLineEnd: () => void; + /** + * Move cursor to first non-whitespace character (vim '^' command) + */ + vimMoveToFirstNonWhitespace: () => void; + /** + * Move cursor to first line (vim 'gg' command) + */ + vimMoveToFirstLine: () => void; + /** + * Move cursor to last line (vim 'G' command) + */ + vimMoveToLastLine: () => void; + /** + * Move cursor to specific line number (vim '[N]G' command) + */ + vimMoveToLine: (lineNumber: number) => void; + /** + * Handle escape from insert mode (moves cursor left if not at line start) + */ + vimEscapeInsertMode: () => void; } diff --git a/packages/cli/src/ui/components/shared/vim-buffer-actions.test.ts b/packages/cli/src/ui/components/shared/vim-buffer-actions.test.ts new file mode 100644 index 000000000..f268bb1ef --- /dev/null +++ b/packages/cli/src/ui/components/shared/vim-buffer-actions.test.ts @@ -0,0 +1,796 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect } from 'vitest'; +import { handleVimAction } from './vim-buffer-actions.js'; +import type { TextBufferState } from './text-buffer.js'; + +// Helper to create test state +const createTestState = ( + lines: string[] = ['hello world'], + cursorRow = 0, + cursorCol = 0, +): TextBufferState => ({ + lines, + cursorRow, + cursorCol, + preferredCol: null, + undoStack: [], + redoStack: [], + clipboard: null, + selectionAnchor: null, + viewportWidth: 80, +}); + +describe('vim-buffer-actions', () => { + describe('Movement commands', () => { + describe('vim_move_left', () => { + it('should move cursor left by count', () => { + const state = createTestState(['hello world'], 0, 5); + const action = { + type: 'vim_move_left' as const, + payload: { count: 3 }, + }; + + const result = handleVimAction(state, action); + + expect(result.cursorCol).toBe(2); + expect(result.preferredCol).toBeNull(); + }); + + it('should not move past beginning of line', () => { + const state = createTestState(['hello'], 0, 2); + const action = { + type: 'vim_move_left' as const, + payload: { count: 5 }, + }; + + const result = handleVimAction(state, action); + + expect(result.cursorCol).toBe(0); + }); + + it('should wrap to previous line when at beginning', () => { + const state = createTestState(['line1', 'line2'], 1, 0); + const action = { + type: 'vim_move_left' as const, + payload: { count: 1 }, + }; + + const result = handleVimAction(state, action); + + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(4); // On last character '1' of 'line1' + }); + + it('should handle multiple line wrapping', () => { + const state = createTestState(['abc', 'def', 'ghi'], 2, 0); + const action = { + type: 'vim_move_left' as const, + payload: { count: 5 }, + }; + + const result = handleVimAction(state, action); + + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(1); // On 'b' after 5 left movements + }); + + it('should correctly handle h/l movement between lines', () => { + // Start at end of first line at 'd' (position 10) + let state = createTestState(['hello world', 'foo bar'], 0, 10); + + // Move right - should go to beginning of next line + state = handleVimAction(state, { + type: 'vim_move_right' as const, + payload: { count: 1 }, + }); + expect(state.cursorRow).toBe(1); + expect(state.cursorCol).toBe(0); // Should be on 'f' + + // Move left - should go back to end of previous line on 'd' + state = handleVimAction(state, { + type: 'vim_move_left' as const, + payload: { count: 1 }, + }); + expect(state.cursorRow).toBe(0); + expect(state.cursorCol).toBe(10); // Should be on 'd', not past it + }); + }); + + describe('vim_move_right', () => { + it('should move cursor right by count', () => { + const state = createTestState(['hello world'], 0, 2); + const action = { + type: 'vim_move_right' as const, + payload: { count: 3 }, + }; + + const result = handleVimAction(state, action); + + expect(result.cursorCol).toBe(5); + }); + + it('should not move past last character of line', () => { + const state = createTestState(['hello'], 0, 3); + const action = { + type: 'vim_move_right' as const, + payload: { count: 5 }, + }; + + const result = handleVimAction(state, action); + + expect(result.cursorCol).toBe(4); // Last character of 'hello' + }); + + it('should wrap to next line when at end', () => { + const state = createTestState(['line1', 'line2'], 0, 4); // At end of 'line1' + const action = { + type: 'vim_move_right' as const, + payload: { count: 1 }, + }; + + const result = handleVimAction(state, action); + + expect(result.cursorRow).toBe(1); + expect(result.cursorCol).toBe(0); + }); + }); + + describe('vim_move_up', () => { + it('should move cursor up by count', () => { + const state = createTestState(['line1', 'line2', 'line3'], 2, 3); + const action = { type: 'vim_move_up' as const, payload: { count: 2 } }; + + const result = handleVimAction(state, action); + + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(3); + }); + + it('should not move past first line', () => { + const state = createTestState(['line1', 'line2'], 1, 3); + const action = { type: 'vim_move_up' as const, payload: { count: 5 } }; + + const result = handleVimAction(state, action); + + expect(result.cursorRow).toBe(0); + }); + + it('should adjust column for shorter lines', () => { + const state = createTestState(['short', 'very long line'], 1, 10); + const action = { type: 'vim_move_up' as const, payload: { count: 1 } }; + + const result = handleVimAction(state, action); + + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(5); // End of 'short' + }); + }); + + describe('vim_move_down', () => { + it('should move cursor down by count', () => { + const state = createTestState(['line1', 'line2', 'line3'], 0, 2); + const action = { + type: 'vim_move_down' as const, + payload: { count: 2 }, + }; + + const result = handleVimAction(state, action); + + expect(result.cursorRow).toBe(2); + expect(result.cursorCol).toBe(2); + }); + + it('should not move past last line', () => { + const state = createTestState(['line1', 'line2'], 0, 2); + const action = { + type: 'vim_move_down' as const, + payload: { count: 5 }, + }; + + const result = handleVimAction(state, action); + + expect(result.cursorRow).toBe(1); + }); + }); + + describe('vim_move_word_forward', () => { + it('should move to start of next word', () => { + const state = createTestState(['hello world test'], 0, 0); + const action = { + type: 'vim_move_word_forward' as const, + payload: { count: 1 }, + }; + + const result = handleVimAction(state, action); + + expect(result.cursorCol).toBe(6); // Start of 'world' + }); + + it('should handle multiple words', () => { + const state = createTestState(['hello world test'], 0, 0); + const action = { + type: 'vim_move_word_forward' as const, + payload: { count: 2 }, + }; + + const result = handleVimAction(state, action); + + expect(result.cursorCol).toBe(12); // Start of 'test' + }); + + it('should handle punctuation correctly', () => { + const state = createTestState(['hello, world!'], 0, 0); + const action = { + type: 'vim_move_word_forward' as const, + payload: { count: 1 }, + }; + + const result = handleVimAction(state, action); + + expect(result.cursorCol).toBe(5); // Start of ',' + }); + }); + + describe('vim_move_word_backward', () => { + it('should move to start of previous word', () => { + const state = createTestState(['hello world test'], 0, 12); + const action = { + type: 'vim_move_word_backward' as const, + payload: { count: 1 }, + }; + + const result = handleVimAction(state, action); + + expect(result.cursorCol).toBe(6); // Start of 'world' + }); + + it('should handle multiple words', () => { + const state = createTestState(['hello world test'], 0, 12); + const action = { + type: 'vim_move_word_backward' as const, + payload: { count: 2 }, + }; + + const result = handleVimAction(state, action); + + expect(result.cursorCol).toBe(0); // Start of 'hello' + }); + }); + + describe('vim_move_word_end', () => { + it('should move to end of current word', () => { + const state = createTestState(['hello world'], 0, 0); + const action = { + type: 'vim_move_word_end' as const, + payload: { count: 1 }, + }; + + const result = handleVimAction(state, action); + + expect(result.cursorCol).toBe(4); // End of 'hello' + }); + + it('should move to end of next word if already at word end', () => { + const state = createTestState(['hello world'], 0, 4); + const action = { + type: 'vim_move_word_end' as const, + payload: { count: 1 }, + }; + + const result = handleVimAction(state, action); + + expect(result.cursorCol).toBe(10); // End of 'world' + }); + }); + + describe('Position commands', () => { + it('vim_move_to_line_start should move to column 0', () => { + const state = createTestState(['hello world'], 0, 5); + const action = { type: 'vim_move_to_line_start' as const }; + + const result = handleVimAction(state, action); + + expect(result.cursorCol).toBe(0); + }); + + it('vim_move_to_line_end should move to last character', () => { + const state = createTestState(['hello world'], 0, 0); + const action = { type: 'vim_move_to_line_end' as const }; + + const result = handleVimAction(state, action); + + expect(result.cursorCol).toBe(10); // Last character of 'hello world' + }); + + it('vim_move_to_first_nonwhitespace should skip leading whitespace', () => { + const state = createTestState([' hello world'], 0, 0); + const action = { type: 'vim_move_to_first_nonwhitespace' as const }; + + const result = handleVimAction(state, action); + + expect(result.cursorCol).toBe(3); // Position of 'h' + }); + + it('vim_move_to_first_line should move to row 0', () => { + const state = createTestState(['line1', 'line2', 'line3'], 2, 5); + const action = { type: 'vim_move_to_first_line' as const }; + + const result = handleVimAction(state, action); + + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(0); + }); + + it('vim_move_to_last_line should move to last row', () => { + const state = createTestState(['line1', 'line2', 'line3'], 0, 5); + const action = { type: 'vim_move_to_last_line' as const }; + + const result = handleVimAction(state, action); + + expect(result.cursorRow).toBe(2); + expect(result.cursorCol).toBe(0); + }); + + it('vim_move_to_line should move to specific line', () => { + const state = createTestState(['line1', 'line2', 'line3'], 0, 5); + const action = { + type: 'vim_move_to_line' as const, + payload: { lineNumber: 2 }, + }; + + const result = handleVimAction(state, action); + + expect(result.cursorRow).toBe(1); // 0-indexed + expect(result.cursorCol).toBe(0); + }); + + it('vim_move_to_line should clamp to valid range', () => { + const state = createTestState(['line1', 'line2'], 0, 0); + const action = { + type: 'vim_move_to_line' as const, + payload: { lineNumber: 10 }, + }; + + const result = handleVimAction(state, action); + + expect(result.cursorRow).toBe(1); // Last line + }); + }); + }); + + describe('Edit commands', () => { + describe('vim_delete_char', () => { + it('should delete single character', () => { + const state = createTestState(['hello'], 0, 1); + const action = { + type: 'vim_delete_char' as const, + payload: { count: 1 }, + }; + + const result = handleVimAction(state, action); + + expect(result.lines[0]).toBe('hllo'); + expect(result.cursorCol).toBe(1); + }); + + it('should delete multiple characters', () => { + const state = createTestState(['hello'], 0, 1); + const action = { + type: 'vim_delete_char' as const, + payload: { count: 3 }, + }; + + const result = handleVimAction(state, action); + + expect(result.lines[0]).toBe('ho'); + expect(result.cursorCol).toBe(1); + }); + + it('should not delete past end of line', () => { + const state = createTestState(['hello'], 0, 3); + const action = { + type: 'vim_delete_char' as const, + payload: { count: 5 }, + }; + + const result = handleVimAction(state, action); + + expect(result.lines[0]).toBe('hel'); + expect(result.cursorCol).toBe(3); + }); + + it('should do nothing at end of line', () => { + const state = createTestState(['hello'], 0, 5); + const action = { + type: 'vim_delete_char' as const, + payload: { count: 1 }, + }; + + const result = handleVimAction(state, action); + + expect(result.lines[0]).toBe('hello'); + expect(result.cursorCol).toBe(5); + }); + }); + + describe('vim_delete_word_forward', () => { + it('should delete from cursor to next word start', () => { + const state = createTestState(['hello world test'], 0, 0); + const action = { + type: 'vim_delete_word_forward' as const, + payload: { count: 1 }, + }; + + const result = handleVimAction(state, action); + + expect(result.lines[0]).toBe('world test'); + expect(result.cursorCol).toBe(0); + }); + + it('should delete multiple words', () => { + const state = createTestState(['hello world test'], 0, 0); + const action = { + type: 'vim_delete_word_forward' as const, + payload: { count: 2 }, + }; + + const result = handleVimAction(state, action); + + expect(result.lines[0]).toBe('test'); + expect(result.cursorCol).toBe(0); + }); + + it('should delete to end if no more words', () => { + const state = createTestState(['hello world'], 0, 6); + const action = { + type: 'vim_delete_word_forward' as const, + payload: { count: 2 }, + }; + + const result = handleVimAction(state, action); + + expect(result.lines[0]).toBe('hello '); + expect(result.cursorCol).toBe(6); + }); + }); + + describe('vim_delete_word_backward', () => { + it('should delete from cursor to previous word start', () => { + const state = createTestState(['hello world test'], 0, 12); + const action = { + type: 'vim_delete_word_backward' as const, + payload: { count: 1 }, + }; + + const result = handleVimAction(state, action); + + expect(result.lines[0]).toBe('hello test'); + expect(result.cursorCol).toBe(6); + }); + + it('should delete multiple words backward', () => { + const state = createTestState(['hello world test'], 0, 12); + const action = { + type: 'vim_delete_word_backward' as const, + payload: { count: 2 }, + }; + + const result = handleVimAction(state, action); + + expect(result.lines[0]).toBe('test'); + expect(result.cursorCol).toBe(0); + }); + }); + + describe('vim_delete_line', () => { + it('should delete current line', () => { + const state = createTestState(['line1', 'line2', 'line3'], 1, 2); + const action = { + type: 'vim_delete_line' as const, + payload: { count: 1 }, + }; + + const result = handleVimAction(state, action); + + expect(result.lines).toEqual(['line1', 'line3']); + expect(result.cursorRow).toBe(1); + expect(result.cursorCol).toBe(0); + }); + + it('should delete multiple lines', () => { + const state = createTestState(['line1', 'line2', 'line3'], 0, 2); + const action = { + type: 'vim_delete_line' as const, + payload: { count: 2 }, + }; + + const result = handleVimAction(state, action); + + expect(result.lines).toEqual(['line3']); + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(0); + }); + + it('should leave empty line when deleting all lines', () => { + const state = createTestState(['only line'], 0, 0); + const action = { + type: 'vim_delete_line' as const, + payload: { count: 1 }, + }; + + const result = handleVimAction(state, action); + + expect(result.lines).toEqual(['']); + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(0); + }); + }); + + describe('vim_delete_to_end_of_line', () => { + it('should delete from cursor to end of line', () => { + const state = createTestState(['hello world'], 0, 5); + const action = { type: 'vim_delete_to_end_of_line' as const }; + + const result = handleVimAction(state, action); + + expect(result.lines[0]).toBe('hello'); + expect(result.cursorCol).toBe(5); + }); + + it('should do nothing at end of line', () => { + const state = createTestState(['hello'], 0, 5); + const action = { type: 'vim_delete_to_end_of_line' as const }; + + const result = handleVimAction(state, action); + + expect(result.lines[0]).toBe('hello'); + }); + }); + }); + + describe('Insert mode commands', () => { + describe('vim_insert_at_cursor', () => { + it('should not change cursor position', () => { + const state = createTestState(['hello'], 0, 2); + const action = { type: 'vim_insert_at_cursor' as const }; + + const result = handleVimAction(state, action); + + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(2); + }); + }); + + describe('vim_append_at_cursor', () => { + it('should move cursor right by one', () => { + const state = createTestState(['hello'], 0, 2); + const action = { type: 'vim_append_at_cursor' as const }; + + const result = handleVimAction(state, action); + + expect(result.cursorCol).toBe(3); + }); + + it('should not move past end of line', () => { + const state = createTestState(['hello'], 0, 5); + const action = { type: 'vim_append_at_cursor' as const }; + + const result = handleVimAction(state, action); + + expect(result.cursorCol).toBe(5); + }); + }); + + describe('vim_append_at_line_end', () => { + it('should move cursor to end of line', () => { + const state = createTestState(['hello world'], 0, 3); + const action = { type: 'vim_append_at_line_end' as const }; + + const result = handleVimAction(state, action); + + expect(result.cursorCol).toBe(11); + }); + }); + + describe('vim_insert_at_line_start', () => { + it('should move to first non-whitespace character', () => { + const state = createTestState([' hello world'], 0, 5); + const action = { type: 'vim_insert_at_line_start' as const }; + + const result = handleVimAction(state, action); + + expect(result.cursorCol).toBe(2); + }); + + it('should move to column 0 for line with only whitespace', () => { + const state = createTestState([' '], 0, 1); + const action = { type: 'vim_insert_at_line_start' as const }; + + const result = handleVimAction(state, action); + + expect(result.cursorCol).toBe(3); + }); + }); + + describe('vim_open_line_below', () => { + it('should insert newline at end of current line', () => { + const state = createTestState(['hello world'], 0, 5); + const action = { type: 'vim_open_line_below' as const }; + + const result = handleVimAction(state, action); + + // The implementation inserts newline at end of current line and cursor moves to column 0 + expect(result.lines[0]).toBe('hello world\n'); + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(0); // Cursor position after replaceRangeInternal + }); + }); + + describe('vim_open_line_above', () => { + it('should insert newline before current line', () => { + const state = createTestState(['hello', 'world'], 1, 2); + const action = { type: 'vim_open_line_above' as const }; + + const result = handleVimAction(state, action); + + // The implementation inserts newline at beginning of current line + expect(result.lines).toEqual(['hello', '\nworld']); + expect(result.cursorRow).toBe(1); + expect(result.cursorCol).toBe(0); + }); + }); + + describe('vim_escape_insert_mode', () => { + it('should move cursor left', () => { + const state = createTestState(['hello'], 0, 3); + const action = { type: 'vim_escape_insert_mode' as const }; + + const result = handleVimAction(state, action); + + expect(result.cursorCol).toBe(2); + }); + + it('should not move past beginning of line', () => { + const state = createTestState(['hello'], 0, 0); + const action = { type: 'vim_escape_insert_mode' as const }; + + const result = handleVimAction(state, action); + + expect(result.cursorCol).toBe(0); + }); + }); + }); + + describe('Change commands', () => { + describe('vim_change_word_forward', () => { + it('should delete from cursor to next word start', () => { + const state = createTestState(['hello world test'], 0, 0); + const action = { + type: 'vim_change_word_forward' as const, + payload: { count: 1 }, + }; + + const result = handleVimAction(state, action); + + expect(result.lines[0]).toBe('world test'); + expect(result.cursorCol).toBe(0); + }); + }); + + describe('vim_change_line', () => { + it('should delete entire line content', () => { + const state = createTestState(['hello world'], 0, 5); + const action = { + type: 'vim_change_line' as const, + payload: { count: 1 }, + }; + + const result = handleVimAction(state, action); + + expect(result.lines[0]).toBe(''); + expect(result.cursorCol).toBe(0); + }); + }); + + describe('vim_change_movement', () => { + it('should change characters to the left', () => { + const state = createTestState(['hello world'], 0, 5); + const action = { + type: 'vim_change_movement' as const, + payload: { movement: 'h', count: 2 }, + }; + + const result = handleVimAction(state, action); + + expect(result.lines[0]).toBe('hel world'); + expect(result.cursorCol).toBe(3); + }); + + it('should change characters to the right', () => { + const state = createTestState(['hello world'], 0, 5); + const action = { + type: 'vim_change_movement' as const, + payload: { movement: 'l', count: 3 }, + }; + + const result = handleVimAction(state, action); + + expect(result.lines[0]).toBe('hellorld'); // Deletes ' wo' (3 chars to the right) + expect(result.cursorCol).toBe(5); + }); + + it('should change multiple lines down', () => { + const state = createTestState(['line1', 'line2', 'line3'], 0, 2); + const action = { + type: 'vim_change_movement' as const, + payload: { movement: 'j', count: 2 }, + }; + + const result = handleVimAction(state, action); + + // The movement 'j' with count 2 changes 2 lines starting from cursor row + // Since we're at cursor position 2, it changes lines starting from current row + expect(result.lines).toEqual(['line1', 'line2', 'line3']); // No change because count > available lines + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(2); + }); + }); + }); + + describe('Edge cases', () => { + it('should handle empty text', () => { + const state = createTestState([''], 0, 0); + const action = { + type: 'vim_move_word_forward' as const, + payload: { count: 1 }, + }; + + const result = handleVimAction(state, action); + + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(0); + }); + + it('should handle single character line', () => { + const state = createTestState(['a'], 0, 0); + const action = { type: 'vim_move_to_line_end' as const }; + + const result = handleVimAction(state, action); + + expect(result.cursorCol).toBe(0); // Should be last character position + }); + + it('should handle empty lines in multi-line text', () => { + const state = createTestState(['line1', '', 'line3'], 1, 0); + const action = { + type: 'vim_move_word_forward' as const, + payload: { count: 1 }, + }; + + const result = handleVimAction(state, action); + + // Should move to next line with content + expect(result.cursorRow).toBe(2); + expect(result.cursorCol).toBe(0); + }); + + it('should preserve undo stack in operations', () => { + const state = createTestState(['hello'], 0, 0); + state.undoStack = [{ lines: ['previous'], cursorRow: 0, cursorCol: 0 }]; + + const action = { + type: 'vim_delete_char' as const, + payload: { count: 1 }, + }; + + const result = handleVimAction(state, action); + + expect(result.undoStack).toHaveLength(2); // Original plus new snapshot + }); + }); +}); diff --git a/packages/cli/src/ui/components/shared/vim-buffer-actions.ts b/packages/cli/src/ui/components/shared/vim-buffer-actions.ts new file mode 100644 index 000000000..ab52e9911 --- /dev/null +++ b/packages/cli/src/ui/components/shared/vim-buffer-actions.ts @@ -0,0 +1,887 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { + TextBufferState, + TextBufferAction, + findNextWordStart, + findPrevWordStart, + findWordEnd, + getOffsetFromPosition, + getPositionFromOffsets, + getLineRangeOffsets, + replaceRangeInternal, + pushUndo, +} from './text-buffer.js'; +import { cpLen } from '../../utils/textUtils.js'; + +export type VimAction = Extract< + TextBufferAction, + | { type: 'vim_delete_word_forward' } + | { type: 'vim_delete_word_backward' } + | { type: 'vim_delete_word_end' } + | { type: 'vim_change_word_forward' } + | { type: 'vim_change_word_backward' } + | { type: 'vim_change_word_end' } + | { type: 'vim_delete_line' } + | { type: 'vim_change_line' } + | { type: 'vim_delete_to_end_of_line' } + | { type: 'vim_change_to_end_of_line' } + | { type: 'vim_change_movement' } + | { type: 'vim_move_left' } + | { type: 'vim_move_right' } + | { type: 'vim_move_up' } + | { type: 'vim_move_down' } + | { type: 'vim_move_word_forward' } + | { type: 'vim_move_word_backward' } + | { type: 'vim_move_word_end' } + | { type: 'vim_delete_char' } + | { type: 'vim_insert_at_cursor' } + | { type: 'vim_append_at_cursor' } + | { type: 'vim_open_line_below' } + | { type: 'vim_open_line_above' } + | { type: 'vim_append_at_line_end' } + | { type: 'vim_insert_at_line_start' } + | { type: 'vim_move_to_line_start' } + | { type: 'vim_move_to_line_end' } + | { type: 'vim_move_to_first_nonwhitespace' } + | { type: 'vim_move_to_first_line' } + | { type: 'vim_move_to_last_line' } + | { type: 'vim_move_to_line' } + | { type: 'vim_escape_insert_mode' } +>; + +export function handleVimAction( + state: TextBufferState, + action: VimAction, +): TextBufferState { + const { lines, cursorRow, cursorCol } = state; + // Cache text join to avoid repeated calculations for word operations + let text: string | null = null; + const getText = () => text ?? (text = lines.join('\n')); + + switch (action.type) { + case 'vim_delete_word_forward': { + const { count } = action.payload; + const currentOffset = getOffsetFromPosition(cursorRow, cursorCol, lines); + + let endOffset = currentOffset; + let searchOffset = currentOffset; + + for (let i = 0; i < count; i++) { + const nextWordOffset = findNextWordStart(getText(), searchOffset); + if (nextWordOffset > searchOffset) { + searchOffset = nextWordOffset; + endOffset = nextWordOffset; + } else { + // If no next word, delete to end of current word + const wordEndOffset = findWordEnd(getText(), searchOffset); + endOffset = Math.min(wordEndOffset + 1, getText().length); + break; + } + } + + if (endOffset > currentOffset) { + const nextState = pushUndo(state); + const { startRow, startCol, endRow, endCol } = getPositionFromOffsets( + currentOffset, + endOffset, + nextState.lines, + ); + return replaceRangeInternal( + nextState, + startRow, + startCol, + endRow, + endCol, + '', + ); + } + return state; + } + + case 'vim_delete_word_backward': { + const { count } = action.payload; + const currentOffset = getOffsetFromPosition(cursorRow, cursorCol, lines); + + let startOffset = currentOffset; + let searchOffset = currentOffset; + + for (let i = 0; i < count; i++) { + const prevWordOffset = findPrevWordStart(getText(), searchOffset); + if (prevWordOffset < searchOffset) { + searchOffset = prevWordOffset; + startOffset = prevWordOffset; + } else { + break; + } + } + + if (startOffset < currentOffset) { + const nextState = pushUndo(state); + const { startRow, startCol, endRow, endCol } = getPositionFromOffsets( + startOffset, + currentOffset, + nextState.lines, + ); + const newState = replaceRangeInternal( + nextState, + startRow, + startCol, + endRow, + endCol, + '', + ); + // Cursor is already at the correct position after deletion + return newState; + } + return state; + } + + case 'vim_delete_word_end': { + const { count } = action.payload; + const currentOffset = getOffsetFromPosition(cursorRow, cursorCol, lines); + + let offset = currentOffset; + let endOffset = currentOffset; + + for (let i = 0; i < count; i++) { + const wordEndOffset = findWordEnd(getText(), offset); + if (wordEndOffset >= offset) { + endOffset = wordEndOffset + 1; // Include the character at word end + // For next iteration, move to start of next word + if (i < count - 1) { + const nextWordStart = findNextWordStart( + getText(), + wordEndOffset + 1, + ); + offset = nextWordStart; + if (nextWordStart <= wordEndOffset) { + break; // No more words + } + } + } else { + break; + } + } + + endOffset = Math.min(endOffset, getText().length); + + if (endOffset > currentOffset) { + const nextState = pushUndo(state); + const { startRow, startCol, endRow, endCol } = getPositionFromOffsets( + currentOffset, + endOffset, + nextState.lines, + ); + return replaceRangeInternal( + nextState, + startRow, + startCol, + endRow, + endCol, + '', + ); + } + return state; + } + + case 'vim_change_word_forward': { + const { count } = action.payload; + const currentOffset = getOffsetFromPosition(cursorRow, cursorCol, lines); + + let searchOffset = currentOffset; + let endOffset = currentOffset; + + for (let i = 0; i < count; i++) { + const nextWordOffset = findNextWordStart(getText(), searchOffset); + if (nextWordOffset > searchOffset) { + searchOffset = nextWordOffset; + endOffset = nextWordOffset; + } else { + // If no next word, change to end of current word + const wordEndOffset = findWordEnd(getText(), searchOffset); + endOffset = Math.min(wordEndOffset + 1, getText().length); + break; + } + } + + if (endOffset > currentOffset) { + const nextState = pushUndo(state); + const { startRow, startCol, endRow, endCol } = getPositionFromOffsets( + currentOffset, + endOffset, + nextState.lines, + ); + return replaceRangeInternal( + nextState, + startRow, + startCol, + endRow, + endCol, + '', + ); + } + return state; + } + + case 'vim_change_word_backward': { + const { count } = action.payload; + const currentOffset = getOffsetFromPosition(cursorRow, cursorCol, lines); + + let startOffset = currentOffset; + let searchOffset = currentOffset; + + for (let i = 0; i < count; i++) { + const prevWordOffset = findPrevWordStart(getText(), searchOffset); + if (prevWordOffset < searchOffset) { + searchOffset = prevWordOffset; + startOffset = prevWordOffset; + } else { + break; + } + } + + if (startOffset < currentOffset) { + const nextState = pushUndo(state); + const { startRow, startCol, endRow, endCol } = getPositionFromOffsets( + startOffset, + currentOffset, + nextState.lines, + ); + return replaceRangeInternal( + nextState, + startRow, + startCol, + endRow, + endCol, + '', + ); + } + return state; + } + + case 'vim_change_word_end': { + const { count } = action.payload; + const currentOffset = getOffsetFromPosition(cursorRow, cursorCol, lines); + + let offset = currentOffset; + let endOffset = currentOffset; + + for (let i = 0; i < count; i++) { + const wordEndOffset = findWordEnd(getText(), offset); + if (wordEndOffset >= offset) { + endOffset = wordEndOffset + 1; // Include the character at word end + // For next iteration, move to start of next word + if (i < count - 1) { + const nextWordStart = findNextWordStart( + getText(), + wordEndOffset + 1, + ); + offset = nextWordStart; + if (nextWordStart <= wordEndOffset) { + break; // No more words + } + } + } else { + break; + } + } + + endOffset = Math.min(endOffset, getText().length); + + if (endOffset !== currentOffset) { + const nextState = pushUndo(state); + const { startRow, startCol, endRow, endCol } = getPositionFromOffsets( + Math.min(currentOffset, endOffset), + Math.max(currentOffset, endOffset), + nextState.lines, + ); + return replaceRangeInternal( + nextState, + startRow, + startCol, + endRow, + endCol, + '', + ); + } + return state; + } + + case 'vim_delete_line': { + const { count } = action.payload; + if (lines.length === 0) return state; + + const linesToDelete = Math.min(count, lines.length - cursorRow); + const totalLines = lines.length; + + if (totalLines === 1 || linesToDelete >= totalLines) { + // If there's only one line, or we're deleting all remaining lines, + // clear the content but keep one empty line (text editors should never be completely empty) + const nextState = pushUndo(state); + return { + ...nextState, + lines: [''], + cursorRow: 0, + cursorCol: 0, + preferredCol: null, + }; + } + + const nextState = pushUndo(state); + const newLines = [...nextState.lines]; + newLines.splice(cursorRow, linesToDelete); + + // Adjust cursor position + const newCursorRow = Math.min(cursorRow, newLines.length - 1); + const newCursorCol = 0; // Vim places cursor at beginning of line after dd + + return { + ...nextState, + lines: newLines, + cursorRow: newCursorRow, + cursorCol: newCursorCol, + preferredCol: null, + }; + } + + case 'vim_change_line': { + const { count } = action.payload; + if (lines.length === 0) return state; + + const linesToChange = Math.min(count, lines.length - cursorRow); + const nextState = pushUndo(state); + + const { startOffset, endOffset } = getLineRangeOffsets( + cursorRow, + linesToChange, + nextState.lines, + ); + const { startRow, startCol, endRow, endCol } = getPositionFromOffsets( + startOffset, + endOffset, + nextState.lines, + ); + return replaceRangeInternal( + nextState, + startRow, + startCol, + endRow, + endCol, + '', + ); + } + + case 'vim_delete_to_end_of_line': { + const currentLine = lines[cursorRow] || ''; + if (cursorCol < currentLine.length) { + const nextState = pushUndo(state); + return replaceRangeInternal( + nextState, + cursorRow, + cursorCol, + cursorRow, + currentLine.length, + '', + ); + } + return state; + } + + case 'vim_change_to_end_of_line': { + const currentLine = lines[cursorRow] || ''; + if (cursorCol < currentLine.length) { + const nextState = pushUndo(state); + return replaceRangeInternal( + nextState, + cursorRow, + cursorCol, + cursorRow, + currentLine.length, + '', + ); + } + return state; + } + + case 'vim_change_movement': { + const { movement, count } = action.payload; + const totalLines = lines.length; + + switch (movement) { + case 'h': { + // Left + // Change N characters to the left + const startCol = Math.max(0, cursorCol - count); + return replaceRangeInternal( + pushUndo(state), + cursorRow, + startCol, + cursorRow, + cursorCol, + '', + ); + } + + case 'j': { + // Down + const linesToChange = Math.min(count, totalLines - cursorRow); + if (linesToChange > 0) { + if (totalLines === 1) { + const currentLine = state.lines[0] || ''; + return replaceRangeInternal( + pushUndo(state), + 0, + 0, + 0, + cpLen(currentLine), + '', + ); + } else { + const nextState = pushUndo(state); + const { startOffset, endOffset } = getLineRangeOffsets( + cursorRow, + linesToChange, + nextState.lines, + ); + const { startRow, startCol, endRow, endCol } = + getPositionFromOffsets(startOffset, endOffset, nextState.lines); + return replaceRangeInternal( + nextState, + startRow, + startCol, + endRow, + endCol, + '', + ); + } + } + return state; + } + + case 'k': { + // Up + const upLines = Math.min(count, cursorRow + 1); + if (upLines > 0) { + if (state.lines.length === 1) { + const currentLine = state.lines[0] || ''; + return replaceRangeInternal( + pushUndo(state), + 0, + 0, + 0, + cpLen(currentLine), + '', + ); + } else { + const startRow = Math.max(0, cursorRow - count + 1); + const linesToChange = cursorRow - startRow + 1; + const nextState = pushUndo(state); + const { startOffset, endOffset } = getLineRangeOffsets( + startRow, + linesToChange, + nextState.lines, + ); + const { + startRow: newStartRow, + startCol, + endRow, + endCol, + } = getPositionFromOffsets( + startOffset, + endOffset, + nextState.lines, + ); + const resultState = replaceRangeInternal( + nextState, + newStartRow, + startCol, + endRow, + endCol, + '', + ); + return { + ...resultState, + cursorRow: startRow, + cursorCol: 0, + }; + } + } + return state; + } + + case 'l': { + // Right + // Change N characters to the right + return replaceRangeInternal( + pushUndo(state), + cursorRow, + cursorCol, + cursorRow, + Math.min(cpLen(lines[cursorRow] || ''), cursorCol + count), + '', + ); + } + + default: + return state; + } + } + + case 'vim_move_left': { + const { count } = action.payload; + const { cursorRow, cursorCol, lines } = state; + let newRow = cursorRow; + let newCol = cursorCol; + + for (let i = 0; i < count; i++) { + if (newCol > 0) { + newCol--; + } else if (newRow > 0) { + // Move to end of previous line + newRow--; + const prevLine = lines[newRow] || ''; + const prevLineLength = cpLen(prevLine); + // Position on last character, or column 0 for empty lines + newCol = prevLineLength === 0 ? 0 : prevLineLength - 1; + } + } + + return { + ...state, + cursorRow: newRow, + cursorCol: newCol, + preferredCol: null, + }; + } + + case 'vim_move_right': { + const { count } = action.payload; + const { cursorRow, cursorCol, lines } = state; + let newRow = cursorRow; + let newCol = cursorCol; + + for (let i = 0; i < count; i++) { + const currentLine = lines[newRow] || ''; + const lineLength = cpLen(currentLine); + // Don't move past the last character of the line + // For empty lines, stay at column 0; for non-empty lines, don't go past last character + if (lineLength === 0) { + // Empty line - try to move to next line + if (newRow < lines.length - 1) { + newRow++; + newCol = 0; + } + } else if (newCol < lineLength - 1) { + newCol++; + } else if (newRow < lines.length - 1) { + // At end of line - move to beginning of next line + newRow++; + newCol = 0; + } + } + + return { + ...state, + cursorRow: newRow, + cursorCol: newCol, + preferredCol: null, + }; + } + + case 'vim_move_up': { + const { count } = action.payload; + const { cursorRow, cursorCol, lines } = state; + const newRow = Math.max(0, cursorRow - count); + const newCol = Math.min(cursorCol, cpLen(lines[newRow] || '')); + + return { + ...state, + cursorRow: newRow, + cursorCol: newCol, + preferredCol: null, + }; + } + + case 'vim_move_down': { + const { count } = action.payload; + const { cursorRow, cursorCol, lines } = state; + const newRow = Math.min(lines.length - 1, cursorRow + count); + const newCol = Math.min(cursorCol, cpLen(lines[newRow] || '')); + + return { + ...state, + cursorRow: newRow, + cursorCol: newCol, + preferredCol: null, + }; + } + + case 'vim_move_word_forward': { + const { count } = action.payload; + let offset = getOffsetFromPosition(cursorRow, cursorCol, lines); + + for (let i = 0; i < count; i++) { + const nextWordOffset = findNextWordStart(getText(), offset); + if (nextWordOffset > offset) { + offset = nextWordOffset; + } else { + // No more words to move to + break; + } + } + + const { startRow, startCol } = getPositionFromOffsets( + offset, + offset, + lines, + ); + return { + ...state, + cursorRow: startRow, + cursorCol: startCol, + preferredCol: null, + }; + } + + case 'vim_move_word_backward': { + const { count } = action.payload; + let offset = getOffsetFromPosition(cursorRow, cursorCol, lines); + + for (let i = 0; i < count; i++) { + offset = findPrevWordStart(getText(), offset); + } + + const { startRow, startCol } = getPositionFromOffsets( + offset, + offset, + lines, + ); + return { + ...state, + cursorRow: startRow, + cursorCol: startCol, + preferredCol: null, + }; + } + + case 'vim_move_word_end': { + const { count } = action.payload; + let offset = getOffsetFromPosition(cursorRow, cursorCol, lines); + + for (let i = 0; i < count; i++) { + offset = findWordEnd(getText(), offset); + } + + const { startRow, startCol } = getPositionFromOffsets( + offset, + offset, + lines, + ); + return { + ...state, + cursorRow: startRow, + cursorCol: startCol, + preferredCol: null, + }; + } + + case 'vim_delete_char': { + const { count } = action.payload; + const { cursorRow, cursorCol, lines } = state; + const currentLine = lines[cursorRow] || ''; + const lineLength = cpLen(currentLine); + + if (cursorCol < lineLength) { + const deleteCount = Math.min(count, lineLength - cursorCol); + const nextState = pushUndo(state); + return replaceRangeInternal( + nextState, + cursorRow, + cursorCol, + cursorRow, + cursorCol + deleteCount, + '', + ); + } + return state; + } + + case 'vim_insert_at_cursor': { + // Just return state - mode change is handled elsewhere + return state; + } + + case 'vim_append_at_cursor': { + const { cursorRow, cursorCol, lines } = state; + const currentLine = lines[cursorRow] || ''; + const newCol = cursorCol < cpLen(currentLine) ? cursorCol + 1 : cursorCol; + + return { + ...state, + cursorCol: newCol, + preferredCol: null, + }; + } + + case 'vim_open_line_below': { + const { cursorRow, lines } = state; + const nextState = pushUndo(state); + + // Insert newline at end of current line + const endOfLine = cpLen(lines[cursorRow] || ''); + return replaceRangeInternal( + nextState, + cursorRow, + endOfLine, + cursorRow, + endOfLine, + '\n', + ); + } + + case 'vim_open_line_above': { + const { cursorRow } = state; + const nextState = pushUndo(state); + + // Insert newline at beginning of current line + const resultState = replaceRangeInternal( + nextState, + cursorRow, + 0, + cursorRow, + 0, + '\n', + ); + + // Move cursor to the new line above + return { + ...resultState, + cursorRow, + cursorCol: 0, + }; + } + + case 'vim_append_at_line_end': { + const { cursorRow, lines } = state; + const lineLength = cpLen(lines[cursorRow] || ''); + + return { + ...state, + cursorCol: lineLength, + preferredCol: null, + }; + } + + case 'vim_insert_at_line_start': { + const { cursorRow, lines } = state; + const currentLine = lines[cursorRow] || ''; + let col = 0; + + // Find first non-whitespace character using proper Unicode handling + const lineCodePoints = [...currentLine]; // Proper Unicode iteration + while (col < lineCodePoints.length && /\s/.test(lineCodePoints[col])) { + col++; + } + + return { + ...state, + cursorCol: col, + preferredCol: null, + }; + } + + case 'vim_move_to_line_start': { + return { + ...state, + cursorCol: 0, + preferredCol: null, + }; + } + + case 'vim_move_to_line_end': { + const { cursorRow, lines } = state; + const lineLength = cpLen(lines[cursorRow] || ''); + + return { + ...state, + cursorCol: lineLength > 0 ? lineLength - 1 : 0, + preferredCol: null, + }; + } + + case 'vim_move_to_first_nonwhitespace': { + const { cursorRow, lines } = state; + const currentLine = lines[cursorRow] || ''; + let col = 0; + + // Find first non-whitespace character using proper Unicode handling + const lineCodePoints = [...currentLine]; // Proper Unicode iteration + while (col < lineCodePoints.length && /\s/.test(lineCodePoints[col])) { + col++; + } + + return { + ...state, + cursorCol: col, + preferredCol: null, + }; + } + + case 'vim_move_to_first_line': { + return { + ...state, + cursorRow: 0, + cursorCol: 0, + preferredCol: null, + }; + } + + case 'vim_move_to_last_line': { + const { lines } = state; + const lastRow = lines.length - 1; + + return { + ...state, + cursorRow: lastRow, + cursorCol: 0, + preferredCol: null, + }; + } + + case 'vim_move_to_line': { + const { lineNumber } = action.payload; + const { lines } = state; + const targetRow = Math.min(Math.max(0, lineNumber - 1), lines.length - 1); + + return { + ...state, + cursorRow: targetRow, + cursorCol: 0, + preferredCol: null, + }; + } + + case 'vim_escape_insert_mode': { + // Move cursor left if not at beginning of line (vim behavior when exiting insert mode) + const { cursorCol } = state; + const newCol = cursorCol > 0 ? cursorCol - 1 : 0; + + return { + ...state, + cursorCol: newCol, + preferredCol: null, + }; + } + + default: { + // This should never happen if TypeScript is working correctly + const _exhaustiveCheck: never = action; + return state; + } + } +} diff --git a/packages/cli/src/ui/constants.ts b/packages/cli/src/ui/constants.ts index 6a0a93758..6a77631cd 100644 --- a/packages/cli/src/ui/constants.ts +++ b/packages/cli/src/ui/constants.ts @@ -13,3 +13,5 @@ export const UI_WIDTH = EstimatedArtWidth + BOX_PADDING_X * 2 + BoxBorderWidth * 2; // ~63 export const STREAM_DEBOUNCE_MS = 100; + +export const SHELL_COMMAND_NAME = 'Shell Command'; diff --git a/packages/cli/src/ui/contexts/SessionContext.tsx b/packages/cli/src/ui/contexts/SessionContext.tsx index 001badf7c..0a588c0b8 100644 --- a/packages/cli/src/ui/contexts/SessionContext.tsx +++ b/packages/cli/src/ui/contexts/SessionContext.tsx @@ -50,7 +50,6 @@ interface SessionStatsContextValue { stats: SessionStatsState; startNewPrompt: () => void; getPromptCount: () => number; - resetSession: () => void; } // --- Context Definition --- @@ -110,23 +109,13 @@ export const SessionStatsProvider: React.FC<{ children: React.ReactNode }> = ({ [stats.promptCount], ); - const resetSession = useCallback(() => { - setStats({ - sessionStartTime: new Date(), - metrics: uiTelemetryService.getMetrics(), - lastPromptTokenCount: uiTelemetryService.getLastPromptTokenCount(), - promptCount: 0, - }); - }, []); - const value = useMemo( () => ({ stats, startNewPrompt, getPromptCount, - resetSession, }), - [stats, startNewPrompt, getPromptCount, resetSession], + [stats, startNewPrompt, getPromptCount], ); return ( diff --git a/packages/cli/src/ui/contexts/VimModeContext.tsx b/packages/cli/src/ui/contexts/VimModeContext.tsx new file mode 100644 index 000000000..b27034efd --- /dev/null +++ b/packages/cli/src/ui/contexts/VimModeContext.tsx @@ -0,0 +1,79 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { + createContext, + useCallback, + useContext, + useEffect, + useState, +} from 'react'; +import { LoadedSettings, SettingScope } from '../../config/settings.js'; + +export type VimMode = 'NORMAL' | 'INSERT'; + +interface VimModeContextType { + vimEnabled: boolean; + vimMode: VimMode; + toggleVimEnabled: () => Promise; + setVimMode: (mode: VimMode) => void; +} + +const VimModeContext = createContext(undefined); + +export const VimModeProvider = ({ + children, + settings, +}: { + children: React.ReactNode; + settings: LoadedSettings; +}) => { + const initialVimEnabled = settings.merged.vimMode ?? false; + const [vimEnabled, setVimEnabled] = useState(initialVimEnabled); + const [vimMode, setVimMode] = useState( + initialVimEnabled ? 'NORMAL' : 'INSERT', + ); + + useEffect(() => { + // Initialize vimEnabled from settings on mount + const enabled = settings.merged.vimMode ?? false; + setVimEnabled(enabled); + // When vim mode is enabled, always start in NORMAL mode + if (enabled) { + setVimMode('NORMAL'); + } + }, [settings.merged.vimMode]); + + const toggleVimEnabled = useCallback(async () => { + const newValue = !vimEnabled; + setVimEnabled(newValue); + // When enabling vim mode, start in NORMAL mode + if (newValue) { + setVimMode('NORMAL'); + } + await settings.setValue(SettingScope.User, 'vimMode', newValue); + return newValue; + }, [vimEnabled, settings]); + + const value = { + vimEnabled, + vimMode, + toggleVimEnabled, + setVimMode, + }; + + return ( + {children} + ); +}; + +export const useVimMode = () => { + const context = useContext(VimModeContext); + if (context === undefined) { + throw new Error('useVimMode must be used within a VimModeProvider'); + } + return context; +}; diff --git a/packages/cli/src/ui/hooks/atCommandProcessor.test.ts b/packages/cli/src/ui/hooks/atCommandProcessor.test.ts index 5bd90f75c..10ec608d8 100644 --- a/packages/cli/src/ui/hooks/atCommandProcessor.test.ts +++ b/packages/cli/src/ui/hooks/atCommandProcessor.test.ts @@ -5,112 +5,74 @@ */ import { describe, it, expect, vi, beforeEach, afterEach, Mock } from 'vitest'; -import type { Mocked } from 'vitest'; import { handleAtCommand } from './atCommandProcessor.js'; -import { Config, FileDiscoveryService } from '@qwen-code/qwen-code-core'; +import { + Config, + FileDiscoveryService, + GlobTool, + ReadManyFilesTool, + ToolRegistry, +} from '@qwen-code/qwen-code-core'; +import * as os from 'os'; import { ToolCallStatus } from '../types.js'; import { UseHistoryManagerReturn } from './useHistoryManager.js'; import * as fsPromises from 'fs/promises'; -import type { Stats } from 'fs'; - -const mockGetToolRegistry = vi.fn(); -const mockGetTargetDir = vi.fn(); -const mockConfig = { - getToolRegistry: mockGetToolRegistry, - getTargetDir: mockGetTargetDir, - isSandboxed: vi.fn(() => false), - getFileService: vi.fn(), - getFileFilteringRespectGitIgnore: vi.fn(() => true), - getEnableRecursiveFileSearch: vi.fn(() => true), -} as unknown as Config; - -const mockReadManyFilesExecute = vi.fn(); -const mockReadManyFilesTool = { - name: 'read_many_files', - displayName: 'Read Many Files', - description: 'Reads multiple files.', - execute: mockReadManyFilesExecute, - getDescription: vi.fn((params) => `Read files: ${params.paths.join(', ')}`), -}; - -const mockGlobExecute = vi.fn(); -const mockGlobTool = { - name: 'glob', - displayName: 'Glob Tool', - execute: mockGlobExecute, - getDescription: vi.fn(() => 'Glob tool description'), -}; - -const mockAddItem: Mock = vi.fn(); -const mockOnDebugMessage: Mock<(message: string) => void> = vi.fn(); - -vi.mock('fs/promises', async () => { - const actual = await vi.importActual('fs/promises'); - return { - ...actual, - stat: vi.fn(), - }; -}); - -vi.mock('@qwen-code/qwen-code-core', async () => { - const actual = await vi.importActual('@qwen-code/qwen-code-core'); - return { - ...actual, - FileDiscoveryService: vi.fn(), - }; -}); +import * as path from 'path'; describe('handleAtCommand', () => { + let testRootDir: string; + let mockConfig: Config; + + const mockAddItem: Mock = vi.fn(); + const mockOnDebugMessage: Mock<(message: string) => void> = vi.fn(); + let abortController: AbortController; - let mockFileDiscoveryService: Mocked; - beforeEach(() => { + async function createTestFile(fullPath: string, fileContents: string) { + await fsPromises.mkdir(path.dirname(fullPath), { recursive: true }); + await fsPromises.writeFile(fullPath, fileContents); + return path.resolve(testRootDir, fullPath); + } + + beforeEach(async () => { vi.resetAllMocks(); - abortController = new AbortController(); - mockGetTargetDir.mockReturnValue('/test/dir'); - mockGetToolRegistry.mockReturnValue({ - getTool: vi.fn((toolName: string) => { - if (toolName === 'read_many_files') return mockReadManyFilesTool; - if (toolName === 'glob') return mockGlobTool; - return undefined; - }), - }); - vi.mocked(fsPromises.stat).mockResolvedValue({ - isDirectory: () => false, - } as Stats); - mockReadManyFilesExecute.mockResolvedValue({ - llmContent: '', - returnDisplay: '', - }); - mockGlobExecute.mockResolvedValue({ - llmContent: 'No files found', - returnDisplay: '', - }); - // Mock FileDiscoveryService - mockFileDiscoveryService = { - initialize: vi.fn(), - shouldIgnoreFile: vi.fn(() => false), - filterFiles: vi.fn((files) => files), - getIgnoreInfo: vi.fn(() => ({ gitIgnored: [] })), - isGitRepository: vi.fn(() => true), - }; - vi.mocked(FileDiscoveryService).mockImplementation( - () => mockFileDiscoveryService, + testRootDir = await fsPromises.mkdtemp( + path.join(os.tmpdir(), 'folder-structure-test-'), ); - // Mock getFileService to return the mocked FileDiscoveryService - mockConfig.getFileService = vi - .fn() - .mockReturnValue(mockFileDiscoveryService); + abortController = new AbortController(); + + const getToolRegistry = vi.fn(); + + mockConfig = { + getToolRegistry, + getTargetDir: () => testRootDir, + isSandboxed: () => false, + getFileService: () => new FileDiscoveryService(testRootDir), + getFileFilteringRespectGitIgnore: () => true, + getFileFilteringRespectGeminiIgnore: () => true, + getFileFilteringOptions: () => ({ + respectGitIgnore: true, + respectGeminiIgnore: true, + }), + getEnableRecursiveFileSearch: vi.fn(() => true), + } as unknown as Config; + + const registry = new ToolRegistry(mockConfig); + registry.registerTool(new ReadManyFilesTool(mockConfig)); + registry.registerTool(new GlobTool(mockConfig)); + getToolRegistry.mockReturnValue(registry); }); - afterEach(() => { + afterEach(async () => { abortController.abort(); + await fsPromises.rm(testRootDir, { recursive: true, force: true }); }); it('should pass through query if no @ command is present', async () => { const query = 'regular user query'; + const result = await handleAtCommand({ query, config: mockConfig, @@ -119,17 +81,20 @@ describe('handleAtCommand', () => { messageId: 123, signal: abortController.signal, }); + + expect(result).toEqual({ + processedQuery: [{ text: query }], + shouldProceed: true, + }); expect(mockAddItem).toHaveBeenCalledWith( { type: 'user', text: query }, 123, ); - expect(result.processedQuery).toEqual([{ text: query }]); - expect(result.shouldProceed).toBe(true); - expect(mockReadManyFilesExecute).not.toHaveBeenCalled(); }); it('should pass through original query if only a lone @ symbol is present', async () => { const queryWithSpaces = ' @ '; + const result = await handleAtCommand({ query: queryWithSpaces, config: mockConfig, @@ -138,25 +103,27 @@ describe('handleAtCommand', () => { messageId: 124, signal: abortController.signal, }); + + expect(result).toEqual({ + processedQuery: [{ text: queryWithSpaces }], + shouldProceed: true, + }); expect(mockAddItem).toHaveBeenCalledWith( { type: 'user', text: queryWithSpaces }, 124, ); - expect(result.processedQuery).toEqual([{ text: queryWithSpaces }]); - expect(result.shouldProceed).toBe(true); expect(mockOnDebugMessage).toHaveBeenCalledWith( 'Lone @ detected, will be treated as text in the modified query.', ); }); it('should process a valid text file path', async () => { - const filePath = 'path/to/file.txt'; - const query = `@${filePath}`; const fileContent = 'This is the file content.'; - mockReadManyFilesExecute.mockResolvedValue({ - llmContent: [`--- ${filePath} ---\n\n${fileContent}\n\n`], - returnDisplay: 'Read 1 file.', - }); + const filePath = await createTestFile( + path.join(testRootDir, 'path', 'to', 'file.txt'), + fileContent, + ); + const query = `@${filePath}`; const result = await handleAtCommand({ query, @@ -166,14 +133,21 @@ describe('handleAtCommand', () => { messageId: 125, signal: abortController.signal, }); + + expect(result).toEqual({ + processedQuery: [ + { text: `@${filePath}` }, + { text: '\n--- Content from referenced files ---' }, + { text: `\nContent from @${filePath}:\n` }, + { text: fileContent }, + { text: '\n--- End of content ---' }, + ], + shouldProceed: true, + }); expect(mockAddItem).toHaveBeenCalledWith( { type: 'user', text: query }, 125, ); - expect(mockReadManyFilesExecute).toHaveBeenCalledWith( - { paths: [filePath], respect_git_ignore: true }, - abortController.signal, - ); expect(mockAddItem).toHaveBeenCalledWith( expect.objectContaining({ type: 'tool_group', @@ -181,28 +155,17 @@ describe('handleAtCommand', () => { }), 125, ); - expect(result.processedQuery).toEqual([ - { text: `@${filePath}` }, - { text: '\n--- Content from referenced files ---' }, - { text: `\nContent from @${filePath}:\n` }, - { text: fileContent }, - { text: '\n--- End of content ---' }, - ]); - expect(result.shouldProceed).toBe(true); }); it('should process a valid directory path and convert to glob', async () => { - const dirPath = 'path/to/dir'; + const fileContent = 'This is the file content.'; + const filePath = await createTestFile( + path.join(testRootDir, 'path', 'to', 'file.txt'), + fileContent, + ); + const dirPath = path.dirname(filePath); const query = `@${dirPath}`; const resolvedGlob = `${dirPath}/**`; - const fileContent = 'Directory content.'; - vi.mocked(fsPromises.stat).mockResolvedValue({ - isDirectory: () => true, - } as Stats); - mockReadManyFilesExecute.mockResolvedValue({ - llmContent: [`--- ${resolvedGlob} ---\n\n${fileContent}\n\n`], - returnDisplay: 'Read directory contents.', - }); const result = await handleAtCommand({ query, @@ -212,70 +175,35 @@ describe('handleAtCommand', () => { messageId: 126, signal: abortController.signal, }); + + expect(result).toEqual({ + processedQuery: [ + { text: `@${resolvedGlob}` }, + { text: '\n--- Content from referenced files ---' }, + { text: `\nContent from @${filePath}:\n` }, + { text: fileContent }, + { text: '\n--- End of content ---' }, + ], + shouldProceed: true, + }); expect(mockAddItem).toHaveBeenCalledWith( { type: 'user', text: query }, 126, ); - expect(mockReadManyFilesExecute).toHaveBeenCalledWith( - { paths: [resolvedGlob], respect_git_ignore: true }, - abortController.signal, - ); expect(mockOnDebugMessage).toHaveBeenCalledWith( `Path ${dirPath} resolved to directory, using glob: ${resolvedGlob}`, ); - expect(result.processedQuery).toEqual([ - { text: `@${resolvedGlob}` }, - { text: '\n--- Content from referenced files ---' }, - { text: `\nContent from @${resolvedGlob}:\n` }, - { text: fileContent }, - { text: '\n--- End of content ---' }, - ]); - expect(result.shouldProceed).toBe(true); - }); - - it('should process a valid image file path (as text content for now)', async () => { - const imagePath = 'path/to/image.png'; - const query = `@${imagePath}`; - // For @-commands, read_many_files is expected to return text or structured text. - // If it were to return actual image Part, the test and handling would be different. - // Current implementation of read_many_files for images returns base64 in text. - const imageFileTextContent = '[base64 image data for path/to/image.png]'; - const imagePart = { - mimeType: 'image/png', - inlineData: imageFileTextContent, - }; - mockReadManyFilesExecute.mockResolvedValue({ - llmContent: [imagePart], - returnDisplay: 'Read 1 image.', - }); - - const result = await handleAtCommand({ - query, - config: mockConfig, - addItem: mockAddItem, - onDebugMessage: mockOnDebugMessage, - messageId: 127, - signal: abortController.signal, - }); - expect(result.processedQuery).toEqual([ - { text: `@${imagePath}` }, - { text: '\n--- Content from referenced files ---' }, - imagePart, - { text: '\n--- End of content ---' }, - ]); - expect(result.shouldProceed).toBe(true); }); it('should handle query with text before and after @command', async () => { + const fileContent = 'Markdown content.'; + const filePath = await createTestFile( + path.join(testRootDir, 'doc.md'), + fileContent, + ); const textBefore = 'Explain this: '; - const filePath = 'doc.md'; const textAfter = ' in detail.'; const query = `${textBefore}@${filePath}${textAfter}`; - const fileContent = 'Markdown content.'; - mockReadManyFilesExecute.mockResolvedValue({ - llmContent: [`--- ${filePath} ---\n\n${fileContent}\n\n`], - returnDisplay: 'Read 1 doc.', - }); const result = await handleAtCommand({ query, @@ -285,58 +213,76 @@ describe('handleAtCommand', () => { messageId: 128, signal: abortController.signal, }); + + expect(result).toEqual({ + processedQuery: [ + { text: `${textBefore}@${filePath}${textAfter}` }, + { text: '\n--- Content from referenced files ---' }, + { text: `\nContent from @${filePath}:\n` }, + { text: fileContent }, + { text: '\n--- End of content ---' }, + ], + shouldProceed: true, + }); expect(mockAddItem).toHaveBeenCalledWith( { type: 'user', text: query }, 128, ); - expect(result.processedQuery).toEqual([ - { text: `${textBefore}@${filePath}${textAfter}` }, - { text: '\n--- Content from referenced files ---' }, - { text: `\nContent from @${filePath}:\n` }, - { text: fileContent }, - { text: '\n--- End of content ---' }, - ]); - expect(result.shouldProceed).toBe(true); }); it('should correctly unescape paths with escaped spaces', async () => { - const rawPath = 'path/to/my\\ file.txt'; - const unescapedPath = 'path/to/my file.txt'; - const query = `@${rawPath}`; - const fileContent = 'Content of file with space.'; - mockReadManyFilesExecute.mockResolvedValue({ - llmContent: [`--- ${unescapedPath} ---\n\n${fileContent}\n\n`], - returnDisplay: 'Read 1 file.', - }); + const fileContent = 'This is the file content.'; + const filePath = await createTestFile( + path.join(testRootDir, 'path', 'to', 'my file.txt'), + fileContent, + ); + const escapedpath = path.join(testRootDir, 'path', 'to', 'my\\ file.txt'); + const query = `@${escapedpath}`; - await handleAtCommand({ + const result = await handleAtCommand({ query, config: mockConfig, addItem: mockAddItem, onDebugMessage: mockOnDebugMessage, - messageId: 129, + messageId: 125, signal: abortController.signal, }); - expect(mockReadManyFilesExecute).toHaveBeenCalledWith( - { paths: [unescapedPath], respect_git_ignore: true }, - abortController.signal, + + expect(result).toEqual({ + processedQuery: [ + { text: `@${filePath}` }, + { text: '\n--- Content from referenced files ---' }, + { text: `\nContent from @${filePath}:\n` }, + { text: fileContent }, + { text: '\n--- End of content ---' }, + ], + shouldProceed: true, + }); + expect(mockAddItem).toHaveBeenCalledWith( + { type: 'user', text: query }, + 125, + ); + expect(mockAddItem).toHaveBeenCalledWith( + expect.objectContaining({ + type: 'tool_group', + tools: [expect.objectContaining({ status: ToolCallStatus.Success })], + }), + 125, ); }); it('should handle multiple @file references', async () => { - const file1 = 'file1.txt'; const content1 = 'Content file1'; - const file2 = 'file2.md'; + const file1Path = await createTestFile( + path.join(testRootDir, 'file1.txt'), + content1, + ); const content2 = 'Content file2'; - const query = `@${file1} @${file2}`; - - mockReadManyFilesExecute.mockResolvedValue({ - llmContent: [ - `--- ${file1} ---\n\n${content1}\n\n`, - `--- ${file2} ---\n\n${content2}\n\n`, - ], - returnDisplay: 'Read 2 files.', - }); + const file2Path = await createTestFile( + path.join(testRootDir, 'file2.md'), + content2, + ); + const query = `@${file1Path} @${file2Path}`; const result = await handleAtCommand({ query, @@ -346,39 +292,36 @@ describe('handleAtCommand', () => { messageId: 130, signal: abortController.signal, }); - expect(mockReadManyFilesExecute).toHaveBeenCalledWith( - { paths: [file1, file2], respect_git_ignore: true }, - abortController.signal, - ); - expect(result.processedQuery).toEqual([ - { text: `@${file1} @${file2}` }, - { text: '\n--- Content from referenced files ---' }, - { text: `\nContent from @${file1}:\n` }, - { text: content1 }, - { text: `\nContent from @${file2}:\n` }, - { text: content2 }, - { text: '\n--- End of content ---' }, - ]); - expect(result.shouldProceed).toBe(true); + + expect(result).toEqual({ + processedQuery: [ + { text: query }, + { text: '\n--- Content from referenced files ---' }, + { text: `\nContent from @${file1Path}:\n` }, + { text: content1 }, + { text: `\nContent from @${file2Path}:\n` }, + { text: content2 }, + { text: '\n--- End of content ---' }, + ], + shouldProceed: true, + }); }); it('should handle multiple @file references with interleaved text', async () => { const text1 = 'Check '; - const file1 = 'f1.txt'; const content1 = 'C1'; + const file1Path = await createTestFile( + path.join(testRootDir, 'f1.txt'), + content1, + ); const text2 = ' and '; - const file2 = 'f2.md'; const content2 = 'C2'; + const file2Path = await createTestFile( + path.join(testRootDir, 'f2.md'), + content2, + ); const text3 = ' please.'; - const query = `${text1}@${file1}${text2}@${file2}${text3}`; - - mockReadManyFilesExecute.mockResolvedValue({ - llmContent: [ - `--- ${file1} ---\n\n${content1}\n\n`, - `--- ${file2} ---\n\n${content2}\n\n`, - ], - returnDisplay: 'Read 2 files.', - }); + const query = `${text1}@${file1Path}${text2}@${file2Path}${text3}`; const result = await handleAtCommand({ query, @@ -388,61 +331,34 @@ describe('handleAtCommand', () => { messageId: 131, signal: abortController.signal, }); - expect(mockReadManyFilesExecute).toHaveBeenCalledWith( - { paths: [file1, file2], respect_git_ignore: true }, - abortController.signal, - ); - expect(result.processedQuery).toEqual([ - { text: `${text1}@${file1}${text2}@${file2}${text3}` }, - { text: '\n--- Content from referenced files ---' }, - { text: `\nContent from @${file1}:\n` }, - { text: content1 }, - { text: `\nContent from @${file2}:\n` }, - { text: content2 }, - { text: '\n--- End of content ---' }, - ]); - expect(result.shouldProceed).toBe(true); + + expect(result).toEqual({ + processedQuery: [ + { text: query }, + { text: '\n--- Content from referenced files ---' }, + { text: `\nContent from @${file1Path}:\n` }, + { text: content1 }, + { text: `\nContent from @${file2Path}:\n` }, + { text: content2 }, + { text: '\n--- End of content ---' }, + ], + shouldProceed: true, + }); }); it('should handle a mix of valid, invalid, and lone @ references', async () => { - const file1 = 'valid1.txt'; const content1 = 'Valid content 1'; + const file1Path = await createTestFile( + path.join(testRootDir, 'valid1.txt'), + content1, + ); const invalidFile = 'nonexistent.txt'; - const query = `Look at @${file1} then @${invalidFile} and also just @ symbol, then @valid2.glob`; - const file2Glob = 'valid2.glob'; - const resolvedFile2 = 'resolved/valid2.actual'; const content2 = 'Globbed content'; - - // Mock fs.stat for file1 (valid) - vi.mocked(fsPromises.stat).mockImplementation(async (p) => { - if (p.toString().endsWith(file1)) - return { isDirectory: () => false } as Stats; - if (p.toString().endsWith(invalidFile)) - throw Object.assign(new Error('ENOENT'), { code: 'ENOENT' }); - // For valid2.glob, stat will fail, triggering glob - if (p.toString().endsWith(file2Glob)) - throw Object.assign(new Error('ENOENT'), { code: 'ENOENT' }); - return { isDirectory: () => false } as Stats; // Default - }); - - // Mock glob to find resolvedFile2 for valid2.glob - mockGlobExecute.mockImplementation(async (params) => { - if (params.pattern.includes('valid2.glob')) { - return { - llmContent: `Found files:\n${mockGetTargetDir()}/${resolvedFile2}`, - returnDisplay: 'Found 1 file', - }; - } - return { llmContent: 'No files found', returnDisplay: '' }; - }); - - mockReadManyFilesExecute.mockResolvedValue({ - llmContent: [ - `--- ${file1} ---\n\n${content1}\n\n`, - `--- ${resolvedFile2} ---\n\n${content2}\n\n`, - ], - returnDisplay: 'Read 2 files.', - }); + const file2Path = await createTestFile( + path.join(testRootDir, 'resolved', 'valid2.actual'), + content2, + ); + const query = `Look at @${file1Path} then @${invalidFile} and also just @ symbol, then @${file2Path}`; const result = await handleAtCommand({ query, @@ -453,23 +369,20 @@ describe('handleAtCommand', () => { signal: abortController.signal, }); - expect(mockReadManyFilesExecute).toHaveBeenCalledWith( - { paths: [file1, resolvedFile2], respect_git_ignore: true }, - abortController.signal, - ); - expect(result.processedQuery).toEqual([ - // Original query has @nonexistent.txt and @, but resolved has @resolved/valid2.actual - { - text: `Look at @${file1} then @${invalidFile} and also just @ symbol, then @${resolvedFile2}`, - }, - { text: '\n--- Content from referenced files ---' }, - { text: `\nContent from @${file1}:\n` }, - { text: content1 }, - { text: `\nContent from @${resolvedFile2}:\n` }, - { text: content2 }, - { text: '\n--- End of content ---' }, - ]); - expect(result.shouldProceed).toBe(true); + expect(result).toEqual({ + processedQuery: [ + { + text: `Look at @${file1Path} then @${invalidFile} and also just @ symbol, then @${file2Path}`, + }, + { text: '\n--- Content from referenced files ---' }, + { text: `\nContent from @${file2Path}:\n` }, + { text: content2 }, + { text: `\nContent from @${file1Path}:\n` }, + { text: content1 }, + { text: '\n--- End of content ---' }, + ], + shouldProceed: true, + }); expect(mockOnDebugMessage).toHaveBeenCalledWith( `Path ${invalidFile} not found directly, attempting glob search.`, ); @@ -483,13 +396,6 @@ describe('handleAtCommand', () => { it('should return original query if all @paths are invalid or lone @', async () => { const query = 'Check @nonexistent.txt and @ also'; - vi.mocked(fsPromises.stat).mockRejectedValue( - Object.assign(new Error('ENOENT'), { code: 'ENOENT' }), - ); - mockGlobExecute.mockResolvedValue({ - llmContent: 'No files found', - returnDisplay: '', - }); const result = await handleAtCommand({ query, @@ -499,93 +405,31 @@ describe('handleAtCommand', () => { messageId: 133, signal: abortController.signal, }); - expect(mockReadManyFilesExecute).not.toHaveBeenCalled(); - // The modified query string will be "Check @nonexistent.txt and @ also" because no paths were resolved for reading. - expect(result.processedQuery).toEqual([ - { text: 'Check @nonexistent.txt and @ also' }, - ]); - expect(result.shouldProceed).toBe(true); - }); - - it('should process a file path case-insensitively', async () => { - // const actualFilePath = 'path/to/MyFile.txt'; // Unused, path in llmContent should match queryPath - const queryPath = 'path/to/myfile.txt'; // Different case - const query = `@${queryPath}`; - const fileContent = 'This is the case-insensitive file content.'; - - // Mock fs.stat to "find" MyFile.txt when looking for myfile.txt - // This simulates a case-insensitive file system or resolution - vi.mocked(fsPromises.stat).mockImplementation(async (p) => { - if (p.toString().toLowerCase().endsWith('myfile.txt')) { - return { - isDirectory: () => false, - // You might need to add other Stats properties if your code uses them - } as Stats; - } - throw Object.assign(new Error('ENOENT'), { code: 'ENOENT' }); + expect(result).toEqual({ + processedQuery: [{ text: 'Check @nonexistent.txt and @ also' }], + shouldProceed: true, }); - - mockReadManyFilesExecute.mockResolvedValue({ - llmContent: [`--- ${queryPath} ---\n\n${fileContent}\n\n`], - returnDisplay: 'Read 1 file.', - }); - - const result = await handleAtCommand({ - query, - config: mockConfig, - addItem: mockAddItem, - onDebugMessage: mockOnDebugMessage, - messageId: 134, // New messageId - signal: abortController.signal, - }); - - expect(mockAddItem).toHaveBeenCalledWith( - { type: 'user', text: query }, - 134, - ); - // The atCommandProcessor resolves the path before calling read_many_files. - // We expect it to be called with the path that fs.stat "found". - // In a real case-insensitive FS, stat(myfile.txt) might return info for MyFile.txt. - // The key is that *a* valid path that points to the content is used. - expect(mockReadManyFilesExecute).toHaveBeenCalledWith( - // Depending on how path resolution and fs.stat mock interact, - // this could be queryPath or actualFilePath. - // For this test, we'll assume the processor uses the path that stat "succeeded" with. - // If the underlying fs/stat is truly case-insensitive, it might resolve to actualFilePath. - // If the mock is simpler, it might use queryPath if stat(queryPath) succeeds. - // The most important part is that *some* version of the path that leads to the content is used. - // Let's assume it uses the path from the query if stat confirms it exists (even if different case on disk) - { paths: [queryPath], respect_git_ignore: true }, - abortController.signal, - ); - expect(mockAddItem).toHaveBeenCalledWith( - expect.objectContaining({ - type: 'tool_group', - tools: [expect.objectContaining({ status: ToolCallStatus.Success })], - }), - 134, - ); - expect(result.processedQuery).toEqual([ - { text: `@${queryPath}` }, // Query uses the input path - { text: '\n--- Content from referenced files ---' }, - { text: `\nContent from @${queryPath}:\n` }, // Content display also uses input path - { text: fileContent }, - { text: '\n--- End of content ---' }, - ]); - expect(result.shouldProceed).toBe(true); }); describe('git-aware filtering', () => { - it('should skip git-ignored files in @ commands', async () => { - const gitIgnoredFile = 'node_modules/package.json'; - const query = `@${gitIgnoredFile}`; + beforeEach(async () => { + await fsPromises.mkdir(path.join(testRootDir, '.git'), { + recursive: true, + }); + }); - // Mock the file discovery service to report this file as git-ignored - mockFileDiscoveryService.shouldIgnoreFile.mockImplementation( - (path: string, options?: { respectGitIgnore?: boolean }) => - path === gitIgnoredFile && options?.respectGitIgnore !== false, + it('should skip git-ignored files in @ commands', async () => { + await createTestFile( + path.join(testRootDir, '.gitignore'), + 'node_modules/package.json', ); + const gitIgnoredFile = await createTestFile( + path.join(testRootDir, 'node_modules', 'package.json'), + 'the file contents', + ); + + const query = `@${gitIgnoredFile}`; const result = await handleAtCommand({ query, @@ -596,31 +440,29 @@ describe('handleAtCommand', () => { signal: abortController.signal, }); - expect(mockFileDiscoveryService.shouldIgnoreFile).toHaveBeenCalledWith( - gitIgnoredFile, - { respectGitIgnore: true }, - ); + expect(result).toEqual({ + processedQuery: [{ text: query }], + shouldProceed: true, + }); expect(mockOnDebugMessage).toHaveBeenCalledWith( `Path ${gitIgnoredFile} is git-ignored and will be skipped.`, ); expect(mockOnDebugMessage).toHaveBeenCalledWith( - 'Ignored 1 git-ignored files: node_modules/package.json', + `Ignored 1 files:\nGit-ignored: ${gitIgnoredFile}`, ); - expect(mockReadManyFilesExecute).not.toHaveBeenCalled(); - expect(result.processedQuery).toEqual([{ text: query }]); - expect(result.shouldProceed).toBe(true); }); it('should process non-git-ignored files normally', async () => { - const validFile = 'src/index.ts'; - const query = `@${validFile}`; - const fileContent = 'console.log("Hello world");'; + await createTestFile( + path.join(testRootDir, '.gitignore'), + 'node_modules/package.json', + ); - mockFileDiscoveryService.shouldIgnoreFile.mockReturnValue(false); - mockReadManyFilesExecute.mockResolvedValue({ - llmContent: [`--- ${validFile} ---\n\n${fileContent}\n\n`], - returnDisplay: 'Read 1 file.', - }); + const validFile = await createTestFile( + path.join(testRootDir, 'src', 'index.ts'), + 'console.log("Hello world");', + ); + const query = `@${validFile}`; const result = await handleAtCommand({ query, @@ -631,38 +473,29 @@ describe('handleAtCommand', () => { signal: abortController.signal, }); - expect(mockFileDiscoveryService.shouldIgnoreFile).toHaveBeenCalledWith( - validFile, - { respectGitIgnore: true }, - ); - expect(mockReadManyFilesExecute).toHaveBeenCalledWith( - { paths: [validFile], respect_git_ignore: true }, - abortController.signal, - ); - expect(result.processedQuery).toEqual([ - { text: `@${validFile}` }, - { text: '\n--- Content from referenced files ---' }, - { text: `\nContent from @${validFile}:\n` }, - { text: fileContent }, - { text: '\n--- End of content ---' }, - ]); - expect(result.shouldProceed).toBe(true); + expect(result).toEqual({ + processedQuery: [ + { text: `@${validFile}` }, + { text: '\n--- Content from referenced files ---' }, + { text: `\nContent from @${validFile}:\n` }, + { text: 'console.log("Hello world");' }, + { text: '\n--- End of content ---' }, + ], + shouldProceed: true, + }); }); it('should handle mixed git-ignored and valid files', async () => { - const validFile = 'README.md'; - const gitIgnoredFile = '.env'; - const query = `@${validFile} @${gitIgnoredFile}`; - const fileContent = '# Project README'; - - mockFileDiscoveryService.shouldIgnoreFile.mockImplementation( - (path: string, options?: { respectGitIgnore?: boolean }) => - path === gitIgnoredFile && options?.respectGitIgnore !== false, + await createTestFile(path.join(testRootDir, '.gitignore'), '.env'); + const validFile = await createTestFile( + path.join(testRootDir, 'README.md'), + '# Project README', ); - mockReadManyFilesExecute.mockResolvedValue({ - llmContent: [`--- ${validFile} ---\n\n${fileContent}\n\n`], - returnDisplay: 'Read 1 file.', - }); + const gitIgnoredFile = await createTestFile( + path.join(testRootDir, '.env'), + 'SECRET=123', + ); + const query = `@${validFile} @${gitIgnoredFile}`; const result = await handleAtCommand({ query, @@ -673,40 +506,31 @@ describe('handleAtCommand', () => { signal: abortController.signal, }); - expect(mockFileDiscoveryService.shouldIgnoreFile).toHaveBeenCalledWith( - validFile, - { respectGitIgnore: true }, - ); - expect(mockFileDiscoveryService.shouldIgnoreFile).toHaveBeenCalledWith( - gitIgnoredFile, - { respectGitIgnore: true }, - ); + expect(result).toEqual({ + processedQuery: [ + { text: `@${validFile} @${gitIgnoredFile}` }, + { text: '\n--- Content from referenced files ---' }, + { text: `\nContent from @${validFile}:\n` }, + { text: '# Project README' }, + { text: '\n--- End of content ---' }, + ], + shouldProceed: true, + }); expect(mockOnDebugMessage).toHaveBeenCalledWith( `Path ${gitIgnoredFile} is git-ignored and will be skipped.`, ); expect(mockOnDebugMessage).toHaveBeenCalledWith( - 'Ignored 1 git-ignored files: .env', + `Ignored 1 files:\nGit-ignored: ${gitIgnoredFile}`, ); - expect(mockReadManyFilesExecute).toHaveBeenCalledWith( - { paths: [validFile], respect_git_ignore: true }, - abortController.signal, - ); - expect(result.processedQuery).toEqual([ - { text: `@${validFile} @${gitIgnoredFile}` }, - { text: '\n--- Content from referenced files ---' }, - { text: `\nContent from @${validFile}:\n` }, - { text: fileContent }, - { text: '\n--- End of content ---' }, - ]); - expect(result.shouldProceed).toBe(true); }); it('should always ignore .git directory files', async () => { - const gitFile = '.git/config'; + const gitFile = await createTestFile( + path.join(testRootDir, '.git', 'config'), + '[core]\n\trepositoryformatversion = 0\n', + ); const query = `@${gitFile}`; - mockFileDiscoveryService.shouldIgnoreFile.mockReturnValue(true); - const result = await handleAtCommand({ query, config: mockConfig, @@ -716,16 +540,16 @@ describe('handleAtCommand', () => { signal: abortController.signal, }); - expect(mockFileDiscoveryService.shouldIgnoreFile).toHaveBeenCalledWith( - gitFile, - { respectGitIgnore: true }, - ); + expect(result).toEqual({ + processedQuery: [{ text: query }], + shouldProceed: true, + }); expect(mockOnDebugMessage).toHaveBeenCalledWith( `Path ${gitFile} is git-ignored and will be skipped.`, ); - expect(mockReadManyFilesExecute).not.toHaveBeenCalled(); - expect(result.processedQuery).toEqual([{ text: query }]); - expect(result.shouldProceed).toBe(true); + expect(mockOnDebugMessage).toHaveBeenCalledWith( + `Ignored 1 files:\nGit-ignored: ${gitFile}`, + ); }); }); @@ -738,10 +562,6 @@ describe('handleAtCommand', () => { const invalidFile = 'nonexistent.txt'; const query = `@${invalidFile}`; - vi.mocked(fsPromises.stat).mockRejectedValue( - Object.assign(new Error('ENOENT'), { code: 'ENOENT' }), - ); - const result = await handleAtCommand({ query, config: mockConfig, @@ -751,7 +571,6 @@ describe('handleAtCommand', () => { signal: abortController.signal, }); - expect(mockGlobExecute).not.toHaveBeenCalled(); expect(mockOnDebugMessage).toHaveBeenCalledWith( `Glob tool not found. Path ${invalidFile} will be skipped.`, ); @@ -759,4 +578,112 @@ describe('handleAtCommand', () => { expect(result.shouldProceed).toBe(true); }); }); + + describe('gemini-ignore filtering', () => { + it('should skip gemini-ignored files in @ commands', async () => { + await createTestFile( + path.join(testRootDir, '.geminiignore'), + 'build/output.js', + ); + const geminiIgnoredFile = await createTestFile( + path.join(testRootDir, 'build', 'output.js'), + 'console.log("Hello");', + ); + const query = `@${geminiIgnoredFile}`; + + const result = await handleAtCommand({ + query, + config: mockConfig, + addItem: mockAddItem, + onDebugMessage: mockOnDebugMessage, + messageId: 204, + signal: abortController.signal, + }); + + expect(result).toEqual({ + processedQuery: [{ text: query }], + shouldProceed: true, + }); + expect(mockOnDebugMessage).toHaveBeenCalledWith( + `Path ${geminiIgnoredFile} is gemini-ignored and will be skipped.`, + ); + expect(mockOnDebugMessage).toHaveBeenCalledWith( + `Ignored 1 files:\nGemini-ignored: ${geminiIgnoredFile}`, + ); + }); + }); + it('should process non-ignored files when .geminiignore is present', async () => { + await createTestFile( + path.join(testRootDir, '.geminiignore'), + 'build/output.js', + ); + const validFile = await createTestFile( + path.join(testRootDir, 'src', 'index.ts'), + 'console.log("Hello world");', + ); + const query = `@${validFile}`; + + const result = await handleAtCommand({ + query, + config: mockConfig, + addItem: mockAddItem, + onDebugMessage: mockOnDebugMessage, + messageId: 205, + signal: abortController.signal, + }); + + expect(result).toEqual({ + processedQuery: [ + { text: `@${validFile}` }, + { text: '\n--- Content from referenced files ---' }, + { text: `\nContent from @${validFile}:\n` }, + { text: 'console.log("Hello world");' }, + { text: '\n--- End of content ---' }, + ], + shouldProceed: true, + }); + }); + + it('should handle mixed gemini-ignored and valid files', async () => { + await createTestFile( + path.join(testRootDir, '.geminiignore'), + 'dist/bundle.js', + ); + const validFile = await createTestFile( + path.join(testRootDir, 'src', 'main.ts'), + '// Main application entry', + ); + const geminiIgnoredFile = await createTestFile( + path.join(testRootDir, 'dist', 'bundle.js'), + 'console.log("bundle");', + ); + const query = `@${validFile} @${geminiIgnoredFile}`; + + const result = await handleAtCommand({ + query, + config: mockConfig, + addItem: mockAddItem, + onDebugMessage: mockOnDebugMessage, + messageId: 206, + signal: abortController.signal, + }); + + expect(result).toEqual({ + processedQuery: [ + { text: `@${validFile} @${geminiIgnoredFile}` }, + { text: '\n--- Content from referenced files ---' }, + { text: `\nContent from @${validFile}:\n` }, + { text: '// Main application entry' }, + { text: '\n--- End of content ---' }, + ], + shouldProceed: true, + }); + expect(mockOnDebugMessage).toHaveBeenCalledWith( + `Path ${geminiIgnoredFile} is gemini-ignored and will be skipped.`, + ); + expect(mockOnDebugMessage).toHaveBeenCalledWith( + `Ignored 1 files:\nGemini-ignored: ${geminiIgnoredFile}`, + ); + }); + // }); }); diff --git a/packages/cli/src/ui/hooks/atCommandProcessor.ts b/packages/cli/src/ui/hooks/atCommandProcessor.ts index eff284a65..25d576993 100644 --- a/packages/cli/src/ui/hooks/atCommandProcessor.ts +++ b/packages/cli/src/ui/hooks/atCommandProcessor.ts @@ -136,12 +136,17 @@ export async function handleAtCommand({ // Get centralized file discovery service const fileDiscovery = config.getFileService(); - const respectGitIgnore = config.getFileFilteringRespectGitIgnore(); + + const respectFileIgnore = config.getFileFilteringOptions(); const pathSpecsToRead: string[] = []; const atPathToResolvedSpecMap = new Map(); const contentLabelsForDisplay: string[] = []; - const ignoredPaths: string[] = []; + const ignoredByReason: Record = { + git: [], + gemini: [], + both: [], + }; const toolRegistry = await config.getToolRegistry(); const readManyFilesTool = toolRegistry.getTool('read_many_files'); @@ -182,10 +187,31 @@ export async function handleAtCommand({ } // Check if path should be ignored based on filtering options - if (fileDiscovery.shouldIgnoreFile(pathName, { respectGitIgnore })) { - const reason = respectGitIgnore ? 'git-ignored' : 'custom-ignored'; - onDebugMessage(`Path ${pathName} is ${reason} and will be skipped.`); - ignoredPaths.push(pathName); + + const gitIgnored = + respectFileIgnore.respectGitIgnore && + fileDiscovery.shouldIgnoreFile(pathName, { + respectGitIgnore: true, + respectGeminiIgnore: false, + }); + const geminiIgnored = + respectFileIgnore.respectGeminiIgnore && + fileDiscovery.shouldIgnoreFile(pathName, { + respectGitIgnore: false, + respectGeminiIgnore: true, + }); + + if (gitIgnored || geminiIgnored) { + const reason = + gitIgnored && geminiIgnored ? 'both' : gitIgnored ? 'git' : 'gemini'; + ignoredByReason[reason].push(pathName); + const reasonText = + reason === 'both' + ? 'ignored by both git and gemini' + : reason === 'git' + ? 'git-ignored' + : 'gemini-ignored'; + onDebugMessage(`Path ${pathName} is ${reasonText} and will be skipped.`); continue; } @@ -196,14 +222,13 @@ export async function handleAtCommand({ const absolutePath = path.resolve(config.getTargetDir(), pathName); const stats = await fs.stat(absolutePath); if (stats.isDirectory()) { - currentPathSpec = pathName.endsWith('/') - ? `${pathName}**` - : `${pathName}/**`; + currentPathSpec = + pathName + (pathName.endsWith(path.sep) ? `**` : `/**`); onDebugMessage( `Path ${pathName} resolved to directory, using glob: ${currentPathSpec}`, ); } else { - onDebugMessage(`Path ${pathName} resolved to file: ${currentPathSpec}`); + onDebugMessage(`Path ${pathName} resolved to file: ${absolutePath}`); } resolvedSuccessfully = true; } catch (error) { @@ -214,7 +239,10 @@ export async function handleAtCommand({ ); try { const globResult = await globTool.execute( - { pattern: `**/*${pathName}*`, path: config.getTargetDir() }, + { + pattern: `**/*${pathName}*`, + path: config.getTargetDir(), + }, signal, ); if ( @@ -319,11 +347,26 @@ export async function handleAtCommand({ initialQueryText = initialQueryText.trim(); // Inform user about ignored paths - if (ignoredPaths.length > 0) { - const ignoreType = respectGitIgnore ? 'git-ignored' : 'custom-ignored'; - onDebugMessage( - `Ignored ${ignoredPaths.length} ${ignoreType} files: ${ignoredPaths.join(', ')}`, - ); + const totalIgnored = + ignoredByReason.git.length + + ignoredByReason.gemini.length + + ignoredByReason.both.length; + + if (totalIgnored > 0) { + const messages = []; + if (ignoredByReason.git.length) { + messages.push(`Git-ignored: ${ignoredByReason.git.join(', ')}`); + } + if (ignoredByReason.gemini.length) { + messages.push(`Gemini-ignored: ${ignoredByReason.gemini.join(', ')}`); + } + if (ignoredByReason.both.length) { + messages.push(`Ignored by both: ${ignoredByReason.both.join(', ')}`); + } + + const message = `Ignored ${totalIgnored} files:\n${messages.join('\n')}`; + console.log(message); + onDebugMessage(message); } // Fallback for lone "@" or completely invalid @-commands resulting in empty initialQueryText @@ -347,7 +390,11 @@ export async function handleAtCommand({ const toolArgs = { paths: pathSpecsToRead, - respect_git_ignore: respectGitIgnore, // Use configuration setting + file_filtering_options: { + respect_git_ignore: respectFileIgnore.respectGitIgnore, + respect_gemini_ignore: respectFileIgnore.respectGeminiIgnore, + }, + // Use configuration setting }; let toolCallDisplay: IndividualToolCallDisplay; diff --git a/packages/cli/src/ui/hooks/shellCommandProcessor.test.ts b/packages/cli/src/ui/hooks/shellCommandProcessor.test.ts index 1094eb86b..14b71dc09 100644 --- a/packages/cli/src/ui/hooks/shellCommandProcessor.test.ts +++ b/packages/cli/src/ui/hooks/shellCommandProcessor.test.ts @@ -5,64 +5,86 @@ */ import { act, renderHook } from '@testing-library/react'; -import { vi } from 'vitest'; -import { useShellCommandProcessor } from './shellCommandProcessor'; -import { Config, GeminiClient } from '@qwen-code/qwen-code-core'; -import * as fs from 'fs'; -import EventEmitter from 'events'; +import { + vi, + describe, + it, + expect, + beforeEach, + afterEach, + type Mock, +} from 'vitest'; -// Mock dependencies -vi.mock('child_process'); +const mockIsBinary = vi.hoisted(() => vi.fn()); +const mockShellExecutionService = vi.hoisted(() => vi.fn()); +vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => { + const original = + await importOriginal(); + return { + ...original, + ShellExecutionService: { execute: mockShellExecutionService }, + isBinary: mockIsBinary, + }; +}); vi.mock('fs'); -vi.mock('os', () => ({ - default: { - platform: () => 'linux', - tmpdir: () => '/tmp', - }, - platform: () => 'linux', - tmpdir: () => '/tmp', -})); -vi.mock('@qwen-code/qwen-code-core'); -vi.mock('../utils/textUtils.js', () => ({ - isBinary: vi.fn(), -})); +vi.mock('os'); +vi.mock('crypto'); +vi.mock('../utils/textUtils.js'); + +import { + useShellCommandProcessor, + OUTPUT_UPDATE_INTERVAL_MS, +} from './shellCommandProcessor.js'; +import { + type Config, + type GeminiClient, + type ShellExecutionResult, + type ShellOutputEvent, +} from '@qwen-code/qwen-code-core'; +import * as fs from 'fs'; +import * as os from 'os'; +import * as path from 'path'; +import * as crypto from 'crypto'; +import { ToolCallStatus } from '../types.js'; describe('useShellCommandProcessor', () => { - let spawnEmitter: EventEmitter; - let addItemToHistoryMock: vi.Mock; - let setPendingHistoryItemMock: vi.Mock; - let onExecMock: vi.Mock; - let onDebugMessageMock: vi.Mock; - let configMock: Config; - let geminiClientMock: GeminiClient; + let addItemToHistoryMock: Mock; + let setPendingHistoryItemMock: Mock; + let onExecMock: Mock; + let onDebugMessageMock: Mock; + let mockConfig: Config; + let mockGeminiClient: GeminiClient; - beforeEach(async () => { - const { spawn } = await import('child_process'); - spawnEmitter = new EventEmitter(); - spawnEmitter.stdout = new EventEmitter(); - spawnEmitter.stderr = new EventEmitter(); - (spawn as vi.Mock).mockReturnValue(spawnEmitter); + let mockShellOutputCallback: (event: ShellOutputEvent) => void; + let resolveExecutionPromise: (result: ShellExecutionResult) => void; - vi.spyOn(fs, 'existsSync').mockReturnValue(false); - vi.spyOn(fs, 'readFileSync').mockReturnValue(''); - vi.spyOn(fs, 'unlinkSync').mockReturnValue(undefined); + beforeEach(() => { + vi.clearAllMocks(); addItemToHistoryMock = vi.fn(); setPendingHistoryItemMock = vi.fn(); onExecMock = vi.fn(); onDebugMessageMock = vi.fn(); + mockConfig = { getTargetDir: () => '/test/dir' } as Config; + mockGeminiClient = { addHistory: vi.fn() } as unknown as GeminiClient; - configMock = { - getTargetDir: () => '/test/dir', - } as unknown as Config; + vi.mocked(os.platform).mockReturnValue('linux'); + vi.mocked(os.tmpdir).mockReturnValue('/tmp'); + (vi.mocked(crypto.randomBytes) as Mock).mockReturnValue( + Buffer.from('abcdef', 'hex'), + ); + mockIsBinary.mockReturnValue(false); + vi.mocked(fs.existsSync).mockReturnValue(false); - geminiClientMock = { - addHistory: vi.fn(), - } as unknown as GeminiClient; - }); - - afterEach(() => { - vi.restoreAllMocks(); + mockShellExecutionService.mockImplementation((_cmd, _cwd, callback) => { + mockShellOutputCallback = callback; + return { + pid: 12345, + result: new Promise((resolve) => { + resolveExecutionPromise = resolve; + }), + }; + }); }); const renderProcessorHook = () => @@ -72,108 +94,386 @@ describe('useShellCommandProcessor', () => { setPendingHistoryItemMock, onExecMock, onDebugMessageMock, - configMock, - geminiClientMock, + mockConfig, + mockGeminiClient, ), ); - it('should execute a command and update history on success', async () => { - const { result } = renderProcessorHook(); - const abortController = new AbortController(); - - act(() => { - result.current.handleShellCommand('ls -l', abortController.signal); - }); - - expect(onExecMock).toHaveBeenCalledTimes(1); - const execPromise = onExecMock.mock.calls[0][0]; - - // Simulate stdout - act(() => { - spawnEmitter.stdout.emit('data', Buffer.from('file1.txt\nfile2.txt')); - }); - - // Simulate process exit - act(() => { - spawnEmitter.emit('exit', 0, null); - }); - - await act(async () => { - await execPromise; - }); - - expect(addItemToHistoryMock).toHaveBeenCalledTimes(2); - expect(addItemToHistoryMock.mock.calls[1][0]).toEqual({ - type: 'info', - text: 'file1.txt\nfile2.txt', - }); - expect(geminiClientMock.addHistory).toHaveBeenCalledTimes(1); + const createMockServiceResult = ( + overrides: Partial = {}, + ): ShellExecutionResult => ({ + rawOutput: Buffer.from(overrides.output || ''), + output: 'Success', + stdout: 'Success', + stderr: '', + exitCode: 0, + signal: null, + error: null, + aborted: false, + pid: 12345, + ...overrides, }); - it('should handle binary output', async () => { + it('should initiate command execution and set pending state', async () => { + const { result } = renderProcessorHook(); + + act(() => { + result.current.handleShellCommand('ls -l', new AbortController().signal); + }); + + expect(addItemToHistoryMock).toHaveBeenCalledWith( + { type: 'user_shell', text: 'ls -l' }, + expect.any(Number), + ); + expect(setPendingHistoryItemMock).toHaveBeenCalledWith({ + type: 'tool_group', + tools: [ + expect.objectContaining({ + name: 'Shell Command', + status: ToolCallStatus.Executing, + }), + ], + }); + const tmpFile = path.join(os.tmpdir(), 'shell_pwd_abcdef.tmp'); + const wrappedCommand = `{ ls -l; }; __code=$?; pwd > "${tmpFile}"; exit $__code`; + expect(mockShellExecutionService).toHaveBeenCalledWith( + wrappedCommand, + '/test/dir', + expect.any(Function), + expect.any(Object), + ); + expect(onExecMock).toHaveBeenCalledWith(expect.any(Promise)); + }); + + it('should handle successful execution and update history correctly', async () => { const { result } = renderProcessorHook(); - const abortController = new AbortController(); - const { isBinary } = await import('../utils/textUtils.js'); - (isBinary as vi.Mock).mockReturnValue(true); act(() => { result.current.handleShellCommand( - 'cat myimage.png', - abortController.signal, + 'echo "ok"', + new AbortController().signal, ); }); - - expect(onExecMock).toHaveBeenCalledTimes(1); const execPromise = onExecMock.mock.calls[0][0]; act(() => { - spawnEmitter.stdout.emit('data', Buffer.from([0x89, 0x50, 0x4e, 0x47])); + resolveExecutionPromise(createMockServiceResult({ output: 'ok' })); }); + await act(async () => await execPromise); + + expect(setPendingHistoryItemMock).toHaveBeenCalledWith(null); + expect(addItemToHistoryMock).toHaveBeenCalledTimes(2); // Initial + final + expect(addItemToHistoryMock.mock.calls[1][0]).toEqual( + expect.objectContaining({ + tools: [ + expect.objectContaining({ + status: ToolCallStatus.Success, + resultDisplay: 'ok', + }), + ], + }), + ); + expect(mockGeminiClient.addHistory).toHaveBeenCalled(); + }); + + it('should handle command failure and display error status', async () => { + const { result } = renderProcessorHook(); act(() => { - spawnEmitter.emit('exit', 0, null); + result.current.handleShellCommand( + 'bad-cmd', + new AbortController().signal, + ); + }); + const execPromise = onExecMock.mock.calls[0][0]; + + act(() => { + resolveExecutionPromise( + createMockServiceResult({ exitCode: 127, output: 'not found' }), + ); + }); + await act(async () => await execPromise); + + const finalHistoryItem = addItemToHistoryMock.mock.calls[1][0]; + expect(finalHistoryItem.tools[0].status).toBe(ToolCallStatus.Error); + expect(finalHistoryItem.tools[0].resultDisplay).toContain( + 'Command exited with code 127', + ); + expect(finalHistoryItem.tools[0].resultDisplay).toContain('not found'); + }); + + describe('UI Streaming and Throttling', () => { + beforeEach(() => { + vi.useFakeTimers({ toFake: ['Date'] }); + }); + afterEach(() => { + vi.useRealTimers(); }); - await act(async () => { - await execPromise; + it('should throttle pending UI updates for text streams', async () => { + const { result } = renderProcessorHook(); + act(() => { + result.current.handleShellCommand( + 'stream', + new AbortController().signal, + ); + }); + + // Simulate rapid output + act(() => { + mockShellOutputCallback({ + type: 'data', + stream: 'stdout', + chunk: 'hello', + }); + }); + + // Should not have updated the UI yet + expect(setPendingHistoryItemMock).toHaveBeenCalledTimes(1); // Only the initial call + + // Advance time and send another event to trigger the throttled update + await act(async () => { + await vi.advanceTimersByTimeAsync(OUTPUT_UPDATE_INTERVAL_MS + 1); + }); + act(() => { + mockShellOutputCallback({ + type: 'data', + stream: 'stdout', + chunk: ' world', + }); + }); + + // Should now have been called with the cumulative output + expect(setPendingHistoryItemMock).toHaveBeenCalledTimes(2); + expect(setPendingHistoryItemMock).toHaveBeenLastCalledWith( + expect.objectContaining({ + tools: [expect.objectContaining({ resultDisplay: 'hello world' })], + }), + ); }); - expect(addItemToHistoryMock).toHaveBeenCalledTimes(2); - expect(addItemToHistoryMock.mock.calls[1][0]).toEqual({ - type: 'info', - text: '[Command produced binary output, which is not shown.]', + it('should show binary progress messages correctly', async () => { + const { result } = renderProcessorHook(); + act(() => { + result.current.handleShellCommand( + 'cat img', + new AbortController().signal, + ); + }); + + // Should immediately show the detection message + act(() => { + mockShellOutputCallback({ type: 'binary_detected' }); + }); + await act(async () => { + await vi.advanceTimersByTimeAsync(OUTPUT_UPDATE_INTERVAL_MS + 1); + }); + // Send another event to trigger the update + act(() => { + mockShellOutputCallback({ type: 'binary_progress', bytesReceived: 0 }); + }); + + expect(setPendingHistoryItemMock).toHaveBeenLastCalledWith( + expect.objectContaining({ + tools: [ + expect.objectContaining({ + resultDisplay: '[Binary output detected. Halting stream...]', + }), + ], + }), + ); + + // Now test progress updates + await act(async () => { + await vi.advanceTimersByTimeAsync(OUTPUT_UPDATE_INTERVAL_MS + 1); + }); + act(() => { + mockShellOutputCallback({ + type: 'binary_progress', + bytesReceived: 2048, + }); + }); + + expect(setPendingHistoryItemMock).toHaveBeenLastCalledWith( + expect.objectContaining({ + tools: [ + expect.objectContaining({ + resultDisplay: '[Receiving binary output... 2.0 KB received]', + }), + ], + }), + ); }); }); - it('should handle command failure', async () => { + it('should not wrap the command on Windows', async () => { + vi.mocked(os.platform).mockReturnValue('win32'); + const { result } = renderProcessorHook(); + + act(() => { + result.current.handleShellCommand('dir', new AbortController().signal); + }); + + expect(mockShellExecutionService).toHaveBeenCalledWith( + 'dir', + '/test/dir', + expect.any(Function), + expect.any(Object), + ); + }); + + it('should handle command abort and display cancelled status', async () => { const { result } = renderProcessorHook(); const abortController = new AbortController(); act(() => { - result.current.handleShellCommand( - 'a-bad-command', - abortController.signal, - ); + result.current.handleShellCommand('sleep 5', abortController.signal); }); - const execPromise = onExecMock.mock.calls[0][0]; act(() => { - spawnEmitter.stderr.emit('data', Buffer.from('command not found')); + abortController.abort(); + resolveExecutionPromise( + createMockServiceResult({ aborted: true, output: 'Canceled' }), + ); }); + await act(async () => await execPromise); + + const finalHistoryItem = addItemToHistoryMock.mock.calls[1][0]; + expect(finalHistoryItem.tools[0].status).toBe(ToolCallStatus.Canceled); + expect(finalHistoryItem.tools[0].resultDisplay).toContain( + 'Command was cancelled.', + ); + }); + + it('should handle binary output result correctly', async () => { + const { result } = renderProcessorHook(); + const binaryBuffer = Buffer.from([0x89, 0x50, 0x4e, 0x47]); + mockIsBinary.mockReturnValue(true); act(() => { - spawnEmitter.emit('exit', 127, null); + result.current.handleShellCommand( + 'cat image.png', + new AbortController().signal, + ); }); + const execPromise = onExecMock.mock.calls[0][0]; - await act(async () => { - await execPromise; + act(() => { + resolveExecutionPromise( + createMockServiceResult({ rawOutput: binaryBuffer }), + ); }); + await act(async () => await execPromise); + const finalHistoryItem = addItemToHistoryMock.mock.calls[1][0]; + expect(finalHistoryItem.tools[0].status).toBe(ToolCallStatus.Success); + expect(finalHistoryItem.tools[0].resultDisplay).toBe( + '[Command produced binary output, which is not shown.]', + ); + }); + + it('should handle promise rejection and show an error', async () => { + const { result } = renderProcessorHook(); + const testError = new Error('Unexpected failure'); + mockShellExecutionService.mockImplementation(() => ({ + pid: 12345, + result: Promise.reject(testError), + })); + + act(() => { + result.current.handleShellCommand( + 'a-command', + new AbortController().signal, + ); + }); + const execPromise = onExecMock.mock.calls[0][0]; + + await act(async () => await execPromise); + + expect(setPendingHistoryItemMock).toHaveBeenCalledWith(null); expect(addItemToHistoryMock).toHaveBeenCalledTimes(2); expect(addItemToHistoryMock.mock.calls[1][0]).toEqual({ type: 'error', - text: 'Command exited with code 127.\ncommand not found', + text: 'An unexpected error occurred: Unexpected failure', + }); + }); + + it('should handle synchronous errors during execution and clean up resources', async () => { + const testError = new Error('Synchronous spawn error'); + mockShellExecutionService.mockImplementation(() => { + throw testError; + }); + // Mock that the temp file was created before the error was thrown + vi.mocked(fs.existsSync).mockReturnValue(true); + + const { result } = renderProcessorHook(); + + act(() => { + result.current.handleShellCommand( + 'a-command', + new AbortController().signal, + ); + }); + const execPromise = onExecMock.mock.calls[0][0]; + + await act(async () => await execPromise); + + expect(setPendingHistoryItemMock).toHaveBeenCalledWith(null); + expect(addItemToHistoryMock).toHaveBeenCalledTimes(2); + expect(addItemToHistoryMock.mock.calls[1][0]).toEqual({ + type: 'error', + text: 'An unexpected error occurred: Synchronous spawn error', + }); + const tmpFile = path.join(os.tmpdir(), 'shell_pwd_abcdef.tmp'); + // Verify that the temporary file was cleaned up + expect(vi.mocked(fs.unlinkSync)).toHaveBeenCalledWith(tmpFile); + }); + + describe('Directory Change Warning', () => { + it('should show a warning if the working directory changes', async () => { + const tmpFile = path.join(os.tmpdir(), 'shell_pwd_abcdef.tmp'); + vi.mocked(fs.existsSync).mockReturnValue(true); + vi.mocked(fs.readFileSync).mockReturnValue('/test/dir/new'); // A different directory + + const { result } = renderProcessorHook(); + act(() => { + result.current.handleShellCommand( + 'cd new', + new AbortController().signal, + ); + }); + const execPromise = onExecMock.mock.calls[0][0]; + + act(() => { + resolveExecutionPromise(createMockServiceResult()); + }); + await act(async () => await execPromise); + + const finalHistoryItem = addItemToHistoryMock.mock.calls[1][0]; + expect(finalHistoryItem.tools[0].resultDisplay).toContain( + "WARNING: shell mode is stateless; the directory change to '/test/dir/new' will not persist.", + ); + expect(vi.mocked(fs.unlinkSync)).toHaveBeenCalledWith(tmpFile); + }); + + it('should NOT show a warning if the directory does not change', async () => { + vi.mocked(fs.existsSync).mockReturnValue(true); + vi.mocked(fs.readFileSync).mockReturnValue('/test/dir'); // The same directory + + const { result } = renderProcessorHook(); + act(() => { + result.current.handleShellCommand('ls', new AbortController().signal); + }); + const execPromise = onExecMock.mock.calls[0][0]; + + act(() => { + resolveExecutionPromise(createMockServiceResult()); + }); + await act(async () => await execPromise); + + const finalHistoryItem = addItemToHistoryMock.mock.calls[1][0]; + expect(finalHistoryItem.tools[0].resultDisplay).not.toContain('WARNING'); }); }); }); diff --git a/packages/cli/src/ui/hooks/shellCommandProcessor.ts b/packages/cli/src/ui/hooks/shellCommandProcessor.ts index bc67eedc5..b7de2d896 100644 --- a/packages/cli/src/ui/hooks/shellCommandProcessor.ts +++ b/packages/cli/src/ui/hooks/shellCommandProcessor.ts @@ -4,174 +4,31 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { spawn } from 'child_process'; -import { StringDecoder } from 'string_decoder'; -import type { HistoryItemWithoutId } from '../types.js'; +import { + HistoryItemWithoutId, + IndividualToolCallDisplay, + ToolCallStatus, +} from '../types.js'; import { useCallback } from 'react'; -import { Config, GeminiClient } from '@qwen-code/qwen-code-core'; +import { + Config, + GeminiClient, + isBinary, + ShellExecutionResult, + ShellExecutionService, +} from '@qwen-code/qwen-code-core'; import { type PartListUnion } from '@google/genai'; -import { formatMemoryUsage } from '../utils/formatters.js'; -import { isBinary } from '../utils/textUtils.js'; import { UseHistoryManagerReturn } from './useHistoryManager.js'; +import { SHELL_COMMAND_NAME } from '../constants.js'; +import { formatMemoryUsage } from '../utils/formatters.js'; import crypto from 'crypto'; import path from 'path'; import os from 'os'; import fs from 'fs'; -import stripAnsi from 'strip-ansi'; -const OUTPUT_UPDATE_INTERVAL_MS = 1000; +export const OUTPUT_UPDATE_INTERVAL_MS = 1000; const MAX_OUTPUT_LENGTH = 10000; -/** - * A structured result from a shell command execution. - */ -interface ShellExecutionResult { - rawOutput: Buffer; - output: string; - exitCode: number | null; - signal: NodeJS.Signals | null; - error: Error | null; - aborted: boolean; -} - -/** - * Executes a shell command using `spawn`, capturing all output and lifecycle events. - * This is the single, unified implementation for shell execution. - * - * @param commandToExecute The exact command string to run. - * @param cwd The working directory to execute the command in. - * @param abortSignal An AbortSignal to terminate the process. - * @param onOutputChunk A callback for streaming real-time output. - * @param onDebugMessage A callback for logging debug information. - * @returns A promise that resolves with the complete execution result. - */ -function executeShellCommand( - commandToExecute: string, - cwd: string, - abortSignal: AbortSignal, - onOutputChunk: (chunk: string) => void, - onDebugMessage: (message: string) => void, -): Promise { - return new Promise((resolve) => { - const isWindows = os.platform() === 'win32'; - const shell = isWindows ? 'cmd.exe' : 'bash'; - const shellArgs = isWindows - ? ['/c', commandToExecute] - : ['-c', commandToExecute]; - - const child = spawn(shell, shellArgs, { - cwd, - stdio: ['ignore', 'pipe', 'pipe'], - detached: !isWindows, // Use process groups on non-Windows for robust killing - }); - - // Use decoders to handle multi-byte characters safely (for streaming output). - const stdoutDecoder = new StringDecoder('utf8'); - const stderrDecoder = new StringDecoder('utf8'); - - let stdout = ''; - let stderr = ''; - const outputChunks: Buffer[] = []; - let error: Error | null = null; - let exited = false; - - let streamToUi = true; - const MAX_SNIFF_SIZE = 4096; - let sniffedBytes = 0; - - const handleOutput = (data: Buffer, stream: 'stdout' | 'stderr') => { - outputChunks.push(data); - - if (streamToUi && sniffedBytes < MAX_SNIFF_SIZE) { - // Use a limited-size buffer for the check to avoid performance issues. - const sniffBuffer = Buffer.concat(outputChunks.slice(0, 20)); - sniffedBytes = sniffBuffer.length; - - if (isBinary(sniffBuffer)) { - streamToUi = false; - // Overwrite any garbled text that may have streamed with a clear message. - onOutputChunk('[Binary output detected. Halting stream...]'); - } - } - - const decodedChunk = - stream === 'stdout' - ? stdoutDecoder.write(data) - : stderrDecoder.write(data); - if (stream === 'stdout') { - stdout += stripAnsi(decodedChunk); - } else { - stderr += stripAnsi(decodedChunk); - } - - if (!exited && streamToUi) { - // Send only the new chunk to avoid re-rendering the whole output. - const combinedOutput = stdout + (stderr ? `\n${stderr}` : ''); - onOutputChunk(combinedOutput); - } else if (!exited && !streamToUi) { - // Send progress updates for the binary stream - const totalBytes = outputChunks.reduce( - (sum, chunk) => sum + chunk.length, - 0, - ); - onOutputChunk( - `[Receiving binary output... ${formatMemoryUsage(totalBytes)} received]`, - ); - } - }; - - child.stdout.on('data', (data) => handleOutput(data, 'stdout')); - child.stderr.on('data', (data) => handleOutput(data, 'stderr')); - child.on('error', (err) => { - error = err; - }); - - const abortHandler = async () => { - if (child.pid && !exited) { - onDebugMessage(`Aborting shell command (PID: ${child.pid})`); - if (isWindows) { - spawn('taskkill', ['/pid', child.pid.toString(), '/f', '/t']); - } else { - try { - // Kill the entire process group (negative PID). - // SIGTERM first, then SIGKILL if it doesn't die. - process.kill(-child.pid, 'SIGTERM'); - await new Promise((res) => setTimeout(res, 200)); - if (!exited) { - process.kill(-child.pid, 'SIGKILL'); - } - } catch (_e) { - // Fallback to killing just the main process if group kill fails. - if (!exited) child.kill('SIGKILL'); - } - } - } - }; - - abortSignal.addEventListener('abort', abortHandler, { once: true }); - - child.on('exit', (code, signal) => { - exited = true; - abortSignal.removeEventListener('abort', abortHandler); - - // Handle any final bytes lingering in the decoders - stdout += stdoutDecoder.end(); - stderr += stderrDecoder.end(); - - const finalBuffer = Buffer.concat(outputChunks); - - resolve({ - rawOutput: finalBuffer, - output: stdout + (stderr ? `\n${stderr}` : ''), - exitCode: code, - signal, - error, - aborted: abortSignal.aborted, - }); - }); - }); -} - function addShellCommandToGeminiHistory( geminiClient: GeminiClient, rawQuery: string, @@ -221,6 +78,7 @@ export const useShellCommandProcessor = ( } const userMessageTimestamp = Date.now(); + const callId = `shell-${userMessageTimestamp}`; addItemToHistory( { type: 'user_shell', text: rawQuery }, userMessageTimestamp, @@ -244,95 +102,203 @@ export const useShellCommandProcessor = ( } const execPromise = new Promise((resolve) => { - let lastUpdateTime = 0; + let lastUpdateTime = Date.now(); + let cumulativeStdout = ''; + let cumulativeStderr = ''; + let isBinaryStream = false; + let binaryBytesReceived = 0; + + const initialToolDisplay: IndividualToolCallDisplay = { + callId, + name: SHELL_COMMAND_NAME, + description: rawQuery, + status: ToolCallStatus.Executing, + resultDisplay: '', + confirmationDetails: undefined, + }; + + setPendingHistoryItem({ + type: 'tool_group', + tools: [initialToolDisplay], + }); + + let executionPid: number | undefined; + + const abortHandler = () => { + onDebugMessage( + `Aborting shell command (PID: ${executionPid ?? 'unknown'})`, + ); + }; + abortSignal.addEventListener('abort', abortHandler, { once: true }); onDebugMessage(`Executing in ${targetDir}: ${commandToExecute}`); - executeShellCommand( - commandToExecute, - targetDir, - abortSignal, - (streamedOutput) => { - // Throttle pending UI updates to avoid excessive re-renders. - if (Date.now() - lastUpdateTime > OUTPUT_UPDATE_INTERVAL_MS) { - setPendingHistoryItem({ type: 'info', text: streamedOutput }); - lastUpdateTime = Date.now(); - } - }, - onDebugMessage, - ) - .then((result) => { - // TODO(abhipatel12) - Consider updating pending item and using timeout to ensure - // there is no jump where intermediate output is skipped. - setPendingHistoryItem(null); - let historyItemType: HistoryItemWithoutId['type'] = 'info'; - let mainContent: string; - - // The context sent to the model utilizes a text tokenizer which means raw binary data is - // cannot be parsed and understood and thus would only pollute the context window and waste - // tokens. - if (isBinary(result.rawOutput)) { - mainContent = - '[Command produced binary output, which is not shown.]'; - } else { - mainContent = - result.output.trim() || '(Command produced no output)'; - } - - let finalOutput = mainContent; - - if (result.error) { - historyItemType = 'error'; - finalOutput = `${result.error.message}\n${finalOutput}`; - } else if (result.aborted) { - finalOutput = `Command was cancelled.\n${finalOutput}`; - } else if (result.signal) { - historyItemType = 'error'; - finalOutput = `Command terminated by signal: ${result.signal}.\n${finalOutput}`; - } else if (result.exitCode !== 0) { - historyItemType = 'error'; - finalOutput = `Command exited with code ${result.exitCode}.\n${finalOutput}`; - } - - if (pwdFilePath && fs.existsSync(pwdFilePath)) { - const finalPwd = fs.readFileSync(pwdFilePath, 'utf8').trim(); - if (finalPwd && finalPwd !== targetDir) { - const warning = `WARNING: shell mode is stateless; the directory change to '${finalPwd}' will not persist.`; - finalOutput = `${warning}\n\n${finalOutput}`; + try { + const { pid, result } = ShellExecutionService.execute( + commandToExecute, + targetDir, + (event) => { + switch (event.type) { + case 'data': + // Do not process text data if we've already switched to binary mode. + if (isBinaryStream) break; + if (event.stream === 'stdout') { + cumulativeStdout += event.chunk; + } else { + cumulativeStderr += event.chunk; + } + break; + case 'binary_detected': + isBinaryStream = true; + break; + case 'binary_progress': + isBinaryStream = true; + binaryBytesReceived = event.bytesReceived; + break; + default: { + throw new Error('An unhandled ShellOutputEvent was found.'); + } } - } - // Add the complete, contextual result to the local UI history. - addItemToHistory( - { type: historyItemType, text: finalOutput }, - userMessageTimestamp, - ); + // Compute the display string based on the *current* state. + let currentDisplayOutput: string; + if (isBinaryStream) { + if (binaryBytesReceived > 0) { + currentDisplayOutput = `[Receiving binary output... ${formatMemoryUsage( + binaryBytesReceived, + )} received]`; + } else { + currentDisplayOutput = + '[Binary output detected. Halting stream...]'; + } + } else { + currentDisplayOutput = + cumulativeStdout + + (cumulativeStderr ? `\n${cumulativeStderr}` : ''); + } - // Add the same complete, contextual result to the LLM's history. - addShellCommandToGeminiHistory(geminiClient, rawQuery, finalOutput); - }) - .catch((err) => { - setPendingHistoryItem(null); - const errorMessage = - err instanceof Error ? err.message : String(err); - addItemToHistory( - { - type: 'error', - text: `An unexpected error occurred: ${errorMessage}`, - }, - userMessageTimestamp, - ); - }) - .finally(() => { - if (pwdFilePath && fs.existsSync(pwdFilePath)) { - fs.unlinkSync(pwdFilePath); - } - resolve(); - }); + // Throttle pending UI updates to avoid excessive re-renders. + if (Date.now() - lastUpdateTime > OUTPUT_UPDATE_INTERVAL_MS) { + setPendingHistoryItem({ + type: 'tool_group', + tools: [ + { + ...initialToolDisplay, + resultDisplay: currentDisplayOutput, + }, + ], + }); + lastUpdateTime = Date.now(); + } + }, + abortSignal, + ); + + executionPid = pid; + + result + .then((result: ShellExecutionResult) => { + setPendingHistoryItem(null); + + let mainContent: string; + + if (isBinary(result.rawOutput)) { + mainContent = + '[Command produced binary output, which is not shown.]'; + } else { + mainContent = + result.output.trim() || '(Command produced no output)'; + } + + let finalOutput = mainContent; + let finalStatus = ToolCallStatus.Success; + + if (result.error) { + finalStatus = ToolCallStatus.Error; + finalOutput = `${result.error.message}\n${finalOutput}`; + } else if (result.aborted) { + finalStatus = ToolCallStatus.Canceled; + finalOutput = `Command was cancelled.\n${finalOutput}`; + } else if (result.signal) { + finalStatus = ToolCallStatus.Error; + finalOutput = `Command terminated by signal: ${result.signal}.\n${finalOutput}`; + } else if (result.exitCode !== 0) { + finalStatus = ToolCallStatus.Error; + finalOutput = `Command exited with code ${result.exitCode}.\n${finalOutput}`; + } + + if (pwdFilePath && fs.existsSync(pwdFilePath)) { + const finalPwd = fs.readFileSync(pwdFilePath, 'utf8').trim(); + if (finalPwd && finalPwd !== targetDir) { + const warning = `WARNING: shell mode is stateless; the directory change to '${finalPwd}' will not persist.`; + finalOutput = `${warning}\n\n${finalOutput}`; + } + } + + const finalToolDisplay: IndividualToolCallDisplay = { + ...initialToolDisplay, + status: finalStatus, + resultDisplay: finalOutput, + }; + + // Add the complete, contextual result to the local UI history. + addItemToHistory( + { + type: 'tool_group', + tools: [finalToolDisplay], + } as HistoryItemWithoutId, + userMessageTimestamp, + ); + + // Add the same complete, contextual result to the LLM's history. + addShellCommandToGeminiHistory( + geminiClient, + rawQuery, + finalOutput, + ); + }) + .catch((err) => { + setPendingHistoryItem(null); + const errorMessage = + err instanceof Error ? err.message : String(err); + addItemToHistory( + { + type: 'error', + text: `An unexpected error occurred: ${errorMessage}`, + }, + userMessageTimestamp, + ); + }) + .finally(() => { + abortSignal.removeEventListener('abort', abortHandler); + if (pwdFilePath && fs.existsSync(pwdFilePath)) { + fs.unlinkSync(pwdFilePath); + } + resolve(); + }); + } catch (err) { + // This block handles synchronous errors from `execute` + setPendingHistoryItem(null); + const errorMessage = err instanceof Error ? err.message : String(err); + addItemToHistory( + { + type: 'error', + text: `An unexpected error occurred: ${errorMessage}`, + }, + userMessageTimestamp, + ); + + // Perform cleanup here as well + if (pwdFilePath && fs.existsSync(pwdFilePath)) { + fs.unlinkSync(pwdFilePath); + } + + resolve(); // Resolve the promise to unblock `onExec` + } }); onExec(execPromise); - return true; // Command was initiated + return true; }, [ config, diff --git a/packages/cli/src/ui/hooks/slashCommandProcessor.test.ts b/packages/cli/src/ui/hooks/slashCommandProcessor.test.ts index 7ce3a0bc7..99e40fd72 100644 --- a/packages/cli/src/ui/hooks/slashCommandProcessor.test.ts +++ b/packages/cli/src/ui/hooks/slashCommandProcessor.test.ts @@ -11,1292 +11,806 @@ const { mockProcessExit } = vi.hoisted(() => ({ vi.mock('node:process', () => ({ default: { exit: mockProcessExit, - cwd: vi.fn(() => '/mock/cwd'), - get env() { - return process.env; - }, // Use a getter to ensure current process.env is used - platform: 'test-platform', - version: 'test-node-version', - memoryUsage: vi.fn(() => ({ - rss: 12345678, - heapTotal: 23456789, - heapUsed: 10234567, - external: 1234567, - arrayBuffers: 123456, - })), }, - // Provide top-level exports as well for compatibility - exit: mockProcessExit, - cwd: vi.fn(() => '/mock/cwd'), - get env() { - return process.env; - }, // Use a getter here too - platform: 'test-platform', - version: 'test-node-version', - memoryUsage: vi.fn(() => ({ - rss: 12345678, - heapTotal: 23456789, - heapUsed: 10234567, - external: 1234567, - arrayBuffers: 123456, +})); + +const mockBuiltinLoadCommands = vi.fn(); +vi.mock('../../services/BuiltinCommandLoader.js', () => ({ + BuiltinCommandLoader: vi.fn().mockImplementation(() => ({ + loadCommands: mockBuiltinLoadCommands, })), })); -vi.mock('node:fs/promises', () => ({ - readFile: vi.fn(), - writeFile: vi.fn(), - mkdir: vi.fn(), +const mockFileLoadCommands = vi.fn(); +vi.mock('../../services/FileCommandLoader.js', () => ({ + FileCommandLoader: vi.fn().mockImplementation(() => ({ + loadCommands: mockFileLoadCommands, + })), })); -const mockGetCliVersionFn = vi.fn(() => Promise.resolve('0.1.0')); -vi.mock('../../utils/version.js', () => ({ - getCliVersion: (...args: []) => mockGetCliVersionFn(...args), +const mockMcpLoadCommands = vi.fn(); +vi.mock('../../services/McpPromptLoader.js', () => ({ + McpPromptLoader: vi.fn().mockImplementation(() => ({ + loadCommands: mockMcpLoadCommands, + })), })); -import { act, renderHook } from '@testing-library/react'; -import { vi, describe, it, expect, beforeEach, afterEach, Mock } from 'vitest'; -import open from 'open'; -import { useSlashCommandProcessor } from './slashCommandProcessor.js'; -import { MessageType, SlashCommandProcessorResult } from '../types.js'; -import { - Config, - MCPDiscoveryState, - MCPServerStatus, - getMCPDiscoveryState, - getMCPServerStatus, - GeminiClient, -} from '@qwen-code/qwen-code-core'; -import { useSessionStats } from '../contexts/SessionContext.js'; -import { LoadedSettings } from '../../config/settings.js'; -import * as ShowMemoryCommandModule from './useShowMemoryCommand.js'; -import { GIT_COMMIT_INFO } from '../../generated/git-commit.js'; -import { CommandService } from '../../services/CommandService.js'; -import { SlashCommand } from '../commands/types.js'; - vi.mock('../contexts/SessionContext.js', () => ({ - useSessionStats: vi.fn(), + useSessionStats: vi.fn(() => ({ stats: {} })), })); -vi.mock('../../services/CommandService.js'); +import { act, renderHook, waitFor } from '@testing-library/react'; +import { vi, describe, it, expect, beforeEach, type Mock } from 'vitest'; +import { useSlashCommandProcessor } from './slashCommandProcessor.js'; +import { + CommandContext, + CommandKind, + ConfirmShellCommandsActionReturn, + SlashCommand, +} from '../commands/types.js'; +import { Config, ToolConfirmationOutcome } from '@qwen-code/qwen-code-core'; +import { LoadedSettings } from '../../config/settings.js'; +import { MessageType } from '../types.js'; +import { BuiltinCommandLoader } from '../../services/BuiltinCommandLoader.js'; +import { FileCommandLoader } from '../../services/FileCommandLoader.js'; +import { McpPromptLoader } from '../../services/McpPromptLoader.js'; -vi.mock('./useShowMemoryCommand.js', () => ({ - SHOW_MEMORY_COMMAND_NAME: '/memory show', - createShowMemoryAction: vi.fn(() => vi.fn()), -})); - -vi.mock('open', () => ({ - default: vi.fn(), -})); - -vi.mock('@qwen-code/qwen-code-core', async (importOriginal) => { - const actual = - await importOriginal(); - return { - ...actual, - getMCPServerStatus: vi.fn(), - getMCPDiscoveryState: vi.fn(), - }; +const createTestCommand = ( + overrides: Partial, + kind: CommandKind = CommandKind.BUILT_IN, +): SlashCommand => ({ + name: 'test', + description: 'a test command', + kind, + ...overrides, }); describe('useSlashCommandProcessor', () => { - let mockAddItem: ReturnType; - let mockClearItems: ReturnType; - let mockLoadHistory: ReturnType; - let mockRefreshStatic: ReturnType; - let mockSetShowHelp: ReturnType; - let mockOnDebugMessage: ReturnType; - let mockOpenThemeDialog: ReturnType; - let mockOpenAuthDialog: ReturnType; - let mockOpenEditorDialog: ReturnType; - let mockSetQuittingMessages: ReturnType; - let mockTryCompressChat: ReturnType; - let mockGeminiClient: GeminiClient; - let mockConfig: Config; - let mockCorgiMode: ReturnType; - const mockUseSessionStats = useSessionStats as Mock; + const mockAddItem = vi.fn(); + const mockClearItems = vi.fn(); + const mockLoadHistory = vi.fn(); + const mockSetShowHelp = vi.fn(); + const mockOpenAuthDialog = vi.fn(); + const mockSetQuittingMessages = vi.fn(); + + const mockConfig = { + getProjectRoot: () => '/mock/cwd', + getSessionId: () => 'test-session', + getGeminiClient: () => ({ + setHistory: vi.fn().mockResolvedValue(undefined), + }), + } as unknown as Config; + + const mockSettings = {} as LoadedSettings; beforeEach(() => { - // Reset all mocks to clear any previous state or calls. vi.clearAllMocks(); - - // Default mock setup for CommandService for all the OLD tests. - // This makes them pass again by simulating the original behavior where - // the service is constructed but doesn't do much yet. - vi.mocked(CommandService).mockImplementation( - () => - ({ - loadCommands: vi.fn().mockResolvedValue(undefined), - getCommands: vi.fn().mockReturnValue([]), // Return an empty array by default - }) as unknown as CommandService, - ); - - mockAddItem = vi.fn(); - mockClearItems = vi.fn(); - mockLoadHistory = vi.fn(); - mockRefreshStatic = vi.fn(); - mockSetShowHelp = vi.fn(); - mockOnDebugMessage = vi.fn(); - mockOpenThemeDialog = vi.fn(); - mockOpenAuthDialog = vi.fn(); - mockOpenEditorDialog = vi.fn(); - mockSetQuittingMessages = vi.fn(); - mockTryCompressChat = vi.fn(); - mockGeminiClient = { - tryCompressChat: mockTryCompressChat, - } as unknown as GeminiClient; - mockConfig = { - getDebugMode: vi.fn(() => false), - getGeminiClient: () => mockGeminiClient, - getSandbox: vi.fn(() => 'test-sandbox'), - getModel: vi.fn(() => 'test-model'), - getProjectRoot: vi.fn(() => '/test/dir'), - getCheckpointingEnabled: vi.fn(() => true), - getBugCommand: vi.fn(() => undefined), - getSessionId: vi.fn(() => 'test-session-id'), - } as unknown as Config; - mockCorgiMode = vi.fn(); - mockUseSessionStats.mockReturnValue({ - stats: { - sessionStartTime: new Date('2025-01-01T00:00:00.000Z'), - cumulative: { - promptCount: 0, - promptTokenCount: 0, - candidatesTokenCount: 0, - totalTokenCount: 0, - cachedContentTokenCount: 0, - toolUsePromptTokenCount: 0, - thoughtsTokenCount: 0, - }, - }, - }); - - (open as Mock).mockClear(); - mockProcessExit.mockClear(); - (ShowMemoryCommandModule.createShowMemoryAction as Mock).mockClear(); - process.env = { ...globalThis.process.env }; + (vi.mocked(BuiltinCommandLoader) as Mock).mockClear(); + mockBuiltinLoadCommands.mockResolvedValue([]); + mockFileLoadCommands.mockResolvedValue([]); + mockMcpLoadCommands.mockResolvedValue([]); }); - const getProcessorHook = (showToolDescriptions: boolean = false) => { - const settings = { - merged: { - contextFileName: 'QWEN.md', - }, - } as unknown as LoadedSettings; - return renderHook(() => + const setupProcessorHook = ( + builtinCommands: SlashCommand[] = [], + fileCommands: SlashCommand[] = [], + mcpCommands: SlashCommand[] = [], + setIsProcessing = vi.fn(), + ) => { + mockBuiltinLoadCommands.mockResolvedValue(Object.freeze(builtinCommands)); + mockFileLoadCommands.mockResolvedValue(Object.freeze(fileCommands)); + mockMcpLoadCommands.mockResolvedValue(Object.freeze(mcpCommands)); + + const { result } = renderHook(() => useSlashCommandProcessor( mockConfig, - settings, - [], + mockSettings, mockAddItem, mockClearItems, mockLoadHistory, - mockRefreshStatic, + vi.fn(), // refreshStatic mockSetShowHelp, - mockOnDebugMessage, - mockOpenThemeDialog, + vi.fn(), // onDebugMessage + vi.fn(), // openThemeDialog mockOpenAuthDialog, - mockOpenEditorDialog, - mockCorgiMode, - showToolDescriptions, + vi.fn(), // openEditorDialog + vi.fn(), // toggleCorgiMode mockSetQuittingMessages, - vi.fn(), // mockOpenPrivacyNotice + vi.fn(), // openPrivacyNotice + vi.fn(), // toggleVimEnabled + setIsProcessing, ), ); + + return result; }; - const getProcessor = (showToolDescriptions: boolean = false) => - getProcessorHook(showToolDescriptions).result.current; + describe('Initialization and Command Loading', () => { + it('should initialize CommandService with all required loaders', () => { + setupProcessorHook(); + expect(BuiltinCommandLoader).toHaveBeenCalledWith(mockConfig); + expect(FileCommandLoader).toHaveBeenCalledWith(mockConfig); + expect(McpPromptLoader).toHaveBeenCalledWith(mockConfig); + }); - describe('/stats command', () => { - it('should show detailed session statistics', async () => { - // Arrange - mockUseSessionStats.mockReturnValue({ - stats: { - sessionStartTime: new Date('2025-01-01T00:00:00.000Z'), + it('should call loadCommands and populate state after mounting', async () => { + const testCommand = createTestCommand({ name: 'test' }); + const result = setupProcessorHook([testCommand]); + + await waitFor(() => { + expect(result.current.slashCommands).toHaveLength(1); + }); + + expect(result.current.slashCommands[0]?.name).toBe('test'); + expect(mockBuiltinLoadCommands).toHaveBeenCalledTimes(1); + expect(mockFileLoadCommands).toHaveBeenCalledTimes(1); + expect(mockMcpLoadCommands).toHaveBeenCalledTimes(1); + }); + + it('should provide an immutable array of commands to consumers', async () => { + const testCommand = createTestCommand({ name: 'test' }); + const result = setupProcessorHook([testCommand]); + + await waitFor(() => { + expect(result.current.slashCommands).toHaveLength(1); + }); + + const commands = result.current.slashCommands; + + expect(() => { + // @ts-expect-error - We are intentionally testing a violation of the readonly type. + commands.push(createTestCommand({ name: 'rogue' })); + }).toThrow(TypeError); + }); + + it('should override built-in commands with file-based commands of the same name', async () => { + const builtinAction = vi.fn(); + const fileAction = vi.fn(); + + const builtinCommand = createTestCommand({ + name: 'override', + description: 'builtin', + action: builtinAction, + }); + const fileCommand = createTestCommand( + { name: 'override', description: 'file', action: fileAction }, + CommandKind.FILE, + ); + + const result = setupProcessorHook([builtinCommand], [fileCommand]); + + await waitFor(() => { + // The service should only return one command with the name 'override' + expect(result.current.slashCommands).toHaveLength(1); + }); + + await act(async () => { + await result.current.handleSlashCommand('/override'); + }); + + // Only the file-based command's action should be called. + expect(fileAction).toHaveBeenCalledTimes(1); + expect(builtinAction).not.toHaveBeenCalled(); + }); + }); + + describe('Command Execution Logic', () => { + it('should display an error for an unknown command', async () => { + const result = setupProcessorHook(); + await waitFor(() => expect(result.current.slashCommands).toBeDefined()); + + await act(async () => { + await result.current.handleSlashCommand('/nonexistent'); + }); + + // Expect 2 calls: one for the user's input, one for the error message. + expect(mockAddItem).toHaveBeenCalledTimes(2); + expect(mockAddItem).toHaveBeenLastCalledWith( + { + type: MessageType.ERROR, + text: 'Unknown command: /nonexistent', }, - }); - - const { handleSlashCommand } = getProcessor(); - const mockDate = new Date('2025-01-01T01:02:03.000Z'); // 1h 2m 3s duration - vi.setSystemTime(mockDate); - - // Act - await act(async () => { - handleSlashCommand('/stats'); - }); - - // Assert - expect(mockAddItem).toHaveBeenNthCalledWith( - 2, // Called after the user message - expect.objectContaining({ - type: MessageType.STATS, - duration: '1h 2m 3s', - }), - expect.any(Number), - ); - - vi.useRealTimers(); - }); - - it('should show model-specific statistics when using /stats model', async () => { - // Arrange - const { handleSlashCommand } = getProcessor(); - - // Act - await act(async () => { - handleSlashCommand('/stats model'); - }); - - // Assert - expect(mockAddItem).toHaveBeenNthCalledWith( - 2, // Called after the user message - expect.objectContaining({ - type: MessageType.MODEL_STATS, - }), expect.any(Number), ); }); - it('should show tool-specific statistics when using /stats tools', async () => { - // Arrange - const { handleSlashCommand } = getProcessor(); - - // Act - await act(async () => { - handleSlashCommand('/stats tools'); - }); - - // Assert - expect(mockAddItem).toHaveBeenNthCalledWith( - 2, // Called after the user message - expect.objectContaining({ - type: MessageType.TOOL_STATS, - }), - expect.any(Number), - ); - }); - }); - - describe('Other commands', () => { - it('/editor should open editor dialog and return handled', async () => { - const { handleSlashCommand } = getProcessor(); - let commandResult: SlashCommandProcessorResult | false = false; - await act(async () => { - commandResult = await handleSlashCommand('/editor'); - }); - expect(mockOpenEditorDialog).toHaveBeenCalled(); - expect(commandResult).toEqual({ type: 'handled' }); - }); - }); - - describe('New command registry', () => { - let ActualCommandService: typeof CommandService; - - beforeAll(async () => { - const actual = (await vi.importActual( - '../../services/CommandService.js', - )) as { CommandService: typeof CommandService }; - ActualCommandService = actual.CommandService; - }); - - beforeEach(() => { - vi.clearAllMocks(); - }); - - it('should execute a command from the new registry', async () => { - const mockAction = vi.fn(); - const newCommand: SlashCommand = { name: 'test', action: mockAction }; - const mockLoader = async () => [newCommand]; - - // We create the instance outside the mock implementation. - const commandServiceInstance = new ActualCommandService(mockLoader); - - // This mock ensures the hook uses our pre-configured instance. - vi.mocked(CommandService).mockImplementation( - () => commandServiceInstance, - ); - - const { result } = getProcessorHook(); - - await vi.waitFor(() => { - // We check that the `slashCommands` array, which is the public API - // of our hook, eventually contains the command we injected. - expect( - result.current.slashCommands.some((c) => c.name === 'test'), - ).toBe(true); - }); - - let commandResult: SlashCommandProcessorResult | false = false; - await act(async () => { - commandResult = await result.current.handleSlashCommand('/test'); - }); - - expect(mockAction).toHaveBeenCalledTimes(1); - expect(commandResult).toEqual({ type: 'handled' }); - }); - - it('should return "schedule_tool" when a new command returns a tool action', async () => { - const mockAction = vi.fn().mockResolvedValue({ - type: 'tool', - toolName: 'my_tool', - toolArgs: { arg1: 'value1' }, - }); - const newCommand: SlashCommand = { name: 'test', action: mockAction }; - const mockLoader = async () => [newCommand]; - const commandServiceInstance = new ActualCommandService(mockLoader); - vi.mocked(CommandService).mockImplementation( - () => commandServiceInstance, - ); - - const { result } = getProcessorHook(); - await vi.waitFor(() => { - expect( - result.current.slashCommands.some((c) => c.name === 'test'), - ).toBe(true); - }); - - const commandResult = await result.current.handleSlashCommand('/test'); - - expect(mockAction).toHaveBeenCalledTimes(1); - expect(commandResult).toEqual({ - type: 'schedule_tool', - toolName: 'my_tool', - toolArgs: { arg1: 'value1' }, - }); - }); - - it('should return "handled" when a new command returns a message action', async () => { - const mockAction = vi.fn().mockResolvedValue({ - type: 'message', - messageType: 'info', - content: 'This is a message', - }); - const newCommand: SlashCommand = { name: 'test', action: mockAction }; - const mockLoader = async () => [newCommand]; - const commandServiceInstance = new ActualCommandService(mockLoader); - vi.mocked(CommandService).mockImplementation( - () => commandServiceInstance, - ); - - const { result } = getProcessorHook(); - await vi.waitFor(() => { - expect( - result.current.slashCommands.some((c) => c.name === 'test'), - ).toBe(true); - }); - - const commandResult = await result.current.handleSlashCommand('/test'); - - expect(mockAction).toHaveBeenCalledTimes(1); - expect(mockAddItem).toHaveBeenCalledWith( - expect.objectContaining({ - type: 'info', - text: 'This is a message', - }), - expect.any(Number), - ); - expect(commandResult).toEqual({ type: 'handled' }); - }); - - it('should return "handled" when a new command returns a dialog action', async () => { - const mockAction = vi.fn().mockResolvedValue({ - type: 'dialog', - dialog: 'help', - }); - const newCommand: SlashCommand = { name: 'test', action: mockAction }; - const mockLoader = async () => [newCommand]; - const commandServiceInstance = new ActualCommandService(mockLoader); - vi.mocked(CommandService).mockImplementation( - () => commandServiceInstance, - ); - - const { result } = getProcessorHook(); - await vi.waitFor(() => { - expect( - result.current.slashCommands.some((c) => c.name === 'test'), - ).toBe(true); - }); - - const commandResult = await result.current.handleSlashCommand('/test'); - - expect(mockAction).toHaveBeenCalledTimes(1); - expect(mockSetShowHelp).toHaveBeenCalledWith(true); - expect(commandResult).toEqual({ type: 'handled' }); - }); - - it('should open the auth dialog when a new command returns an auth dialog action', async () => { - const mockAction = vi.fn().mockResolvedValue({ - type: 'dialog', - dialog: 'auth', - }); - const newAuthCommand: SlashCommand = { name: 'auth', action: mockAction }; - - const mockLoader = async () => [newAuthCommand]; - const commandServiceInstance = new ActualCommandService(mockLoader); - vi.mocked(CommandService).mockImplementation( - () => commandServiceInstance, - ); - - const { result } = getProcessorHook(); - await vi.waitFor(() => { - expect( - result.current.slashCommands.some((c) => c.name === 'auth'), - ).toBe(true); - }); - - const commandResult = await result.current.handleSlashCommand('/auth'); - - expect(mockAction).toHaveBeenCalledTimes(1); - expect(mockOpenAuthDialog).toHaveBeenCalledWith(); - expect(commandResult).toEqual({ type: 'handled' }); - }); - - it('should open the theme dialog when a new command returns a theme dialog action', async () => { - const mockAction = vi.fn().mockResolvedValue({ - type: 'dialog', - dialog: 'theme', - }); - const newCommand: SlashCommand = { name: 'test', action: mockAction }; - const mockLoader = async () => [newCommand]; - const commandServiceInstance = new ActualCommandService(mockLoader); - vi.mocked(CommandService).mockImplementation( - () => commandServiceInstance, - ); - - const { result } = getProcessorHook(); - await vi.waitFor(() => { - expect( - result.current.slashCommands.some((c) => c.name === 'test'), - ).toBe(true); - }); - - const commandResult = await result.current.handleSlashCommand('/test'); - - expect(mockAction).toHaveBeenCalledTimes(1); - expect(mockOpenThemeDialog).toHaveBeenCalledWith(); - expect(commandResult).toEqual({ type: 'handled' }); - }); - - it('should show help for a parent command with no action', async () => { + it('should display help for a parent command invoked without a subcommand', async () => { const parentCommand: SlashCommand = { name: 'parent', + description: 'a parent command', + kind: CommandKind.BUILT_IN, subCommands: [ - { name: 'child', description: 'A child.', action: vi.fn() }, + { + name: 'child1', + description: 'First child.', + kind: CommandKind.BUILT_IN, + }, ], }; - - const mockLoader = async () => [parentCommand]; - const commandServiceInstance = new ActualCommandService(mockLoader); - vi.mocked(CommandService).mockImplementation( - () => commandServiceInstance, - ); - - const { result } = getProcessorHook(); - - await vi.waitFor(() => { - expect( - result.current.slashCommands.some((c) => c.name === 'parent'), - ).toBe(true); - }); + const result = setupProcessorHook([parentCommand]); + await waitFor(() => expect(result.current.slashCommands).toHaveLength(1)); await act(async () => { await result.current.handleSlashCommand('/parent'); }); - expect(mockAddItem).toHaveBeenCalledWith( - expect.objectContaining({ - type: 'info', + expect(mockAddItem).toHaveBeenCalledTimes(2); + expect(mockAddItem).toHaveBeenLastCalledWith( + { + type: MessageType.INFO, text: expect.stringContaining( "Command '/parent' requires a subcommand.", ), - }), + }, expect.any(Number), ); }); - }); - describe('/bug command', () => { - const originalEnv = process.env; - beforeEach(() => { - vi.resetModules(); - mockGetCliVersionFn.mockResolvedValue('0.1.0'); - process.env = { ...originalEnv }; - }); - - afterEach(() => { - vi.useRealTimers(); - process.env = originalEnv; - }); - - const getExpectedUrl = ( - description?: string, - sandboxEnvVar?: string, - seatbeltProfileVar?: string, - cliVersion?: string, - ) => { - const osVersion = 'test-platform test-node-version'; - let sandboxEnvStr = 'no sandbox'; - if (sandboxEnvVar && sandboxEnvVar !== 'sandbox-exec') { - sandboxEnvStr = sandboxEnvVar.replace(/^gemini-(?:code-)?/, ''); - } else if (sandboxEnvVar === 'sandbox-exec') { - sandboxEnvStr = `sandbox-exec (${seatbeltProfileVar || 'unknown'})`; - } - const modelVersion = 'test-model'; - // Use the mocked memoryUsage value - const memoryUsage = '11.8 MB'; - - const info = ` -* **CLI Version:** ${cliVersion} -* **Git Commit:** ${GIT_COMMIT_INFO} -* **Operating System:** ${osVersion} -* **Sandbox Environment:** ${sandboxEnvStr} -* **Model Version:** ${modelVersion} -* **Memory Usage:** ${memoryUsage} -`; - let url = - 'https://github.com/QwenLM/Qwen-Code/issues/new?template=bug_report.yml'; - if (description) { - url += `&title=${encodeURIComponent(description)}`; - } - url += `&info=${encodeURIComponent(info)}`; - return url; - }; - - it('should call open with the correct GitHub issue URL and return true', async () => { - mockGetCliVersionFn.mockResolvedValue('test-version'); - process.env.SANDBOX = 'gemini-sandbox'; - process.env.SEATBELT_PROFILE = 'test_profile'; - const { handleSlashCommand } = getProcessor(); - const bugDescription = 'This is a test bug'; - const expectedUrl = getExpectedUrl( - bugDescription, - process.env.SANDBOX, - process.env.SEATBELT_PROFILE, - 'test-version', - ); - let commandResult: SlashCommandProcessorResult | false = false; - await act(async () => { - commandResult = await handleSlashCommand(`/bug ${bugDescription}`); - }); - - expect(mockAddItem).toHaveBeenCalledTimes(2); - expect(open).toHaveBeenCalledWith(expectedUrl); - expect(commandResult).toEqual({ type: 'handled' }); - }); - - it('should use the custom bug command URL from config if available', async () => { - process.env.CLI_VERSION = '0.1.0'; - process.env.SANDBOX = 'sandbox-exec'; - process.env.SEATBELT_PROFILE = 'permissive-open'; - const bugCommand = { - urlTemplate: - 'https://custom-bug-tracker.com/new?title={title}&info={info}', - }; - mockConfig = { - ...mockConfig, - getBugCommand: vi.fn(() => bugCommand), - } as unknown as Config; - process.env.CLI_VERSION = '0.1.0'; - - const { handleSlashCommand } = getProcessor(); - const bugDescription = 'This is a custom bug'; - const info = ` -* **CLI Version:** 0.1.0 -* **Git Commit:** ${GIT_COMMIT_INFO} -* **Operating System:** test-platform test-node-version -* **Sandbox Environment:** sandbox-exec (permissive-open) -* **Model Version:** test-model -* **Memory Usage:** 11.8 MB -`; - const expectedUrl = bugCommand.urlTemplate - .replace('{title}', encodeURIComponent(bugDescription)) - .replace('{info}', encodeURIComponent(info)); - - let commandResult: SlashCommandProcessorResult | false = false; - await act(async () => { - commandResult = await handleSlashCommand(`/bug ${bugDescription}`); - }); - - expect(mockAddItem).toHaveBeenCalledTimes(2); - expect(open).toHaveBeenCalledWith(expectedUrl); - expect(commandResult).toEqual({ type: 'handled' }); - }); - }); - - describe('/quit and /exit commands', () => { - beforeEach(() => { - vi.useFakeTimers(); - }); - - afterEach(() => { - vi.useRealTimers(); - }); - - it.each([['/quit'], ['/exit']])( - 'should handle %s, set quitting messages, and exit the process', - async (command) => { - const { handleSlashCommand } = getProcessor(); - const mockDate = new Date('2025-01-01T01:02:03.000Z'); - vi.setSystemTime(mockDate); - - await act(async () => { - handleSlashCommand(command); - }); - - expect(mockAddItem).not.toHaveBeenCalled(); - expect(mockSetQuittingMessages).toHaveBeenCalledWith([ + it('should correctly find and execute a nested subcommand', async () => { + const childAction = vi.fn(); + const parentCommand: SlashCommand = { + name: 'parent', + description: 'a parent command', + kind: CommandKind.BUILT_IN, + subCommands: [ { - type: 'user', - text: command, - id: expect.any(Number), + name: 'child', + description: 'a child command', + kind: CommandKind.BUILT_IN, + action: childAction, }, - { - type: 'quit', - duration: '1h 2m 3s', - id: expect.any(Number), - }, - ]); + ], + }; + const result = setupProcessorHook([parentCommand]); + await waitFor(() => expect(result.current.slashCommands).toHaveLength(1)); - // Fast-forward timers to trigger process.exit - await act(async () => { - vi.advanceTimersByTime(100); + await act(async () => { + await result.current.handleSlashCommand('/parent child with args'); + }); + + expect(childAction).toHaveBeenCalledTimes(1); + + expect(childAction).toHaveBeenCalledWith( + expect.objectContaining({ + services: expect.objectContaining({ + config: mockConfig, + }), + ui: expect.objectContaining({ + addItem: mockAddItem, + }), + }), + 'with args', + ); + }); + + it('should set isProcessing to true during execution and false afterwards', async () => { + const mockSetIsProcessing = vi.fn(); + const command = createTestCommand({ + name: 'long-running', + action: () => new Promise((resolve) => setTimeout(resolve, 50)), + }); + + const result = setupProcessorHook([command], [], [], mockSetIsProcessing); + await waitFor(() => expect(result.current.slashCommands).toHaveLength(1)); + + const executionPromise = act(async () => { + await result.current.handleSlashCommand('/long-running'); + }); + + // It should be true immediately after starting + expect(mockSetIsProcessing).toHaveBeenCalledWith(true); + // It should not have been called with false yet + expect(mockSetIsProcessing).not.toHaveBeenCalledWith(false); + + await executionPromise; + + // After the promise resolves, it should be called with false + expect(mockSetIsProcessing).toHaveBeenCalledWith(false); + expect(mockSetIsProcessing).toHaveBeenCalledTimes(2); + }); + }); + + describe('Action Result Handling', () => { + it('should handle "dialog: help" action', async () => { + const command = createTestCommand({ + name: 'helpcmd', + action: vi.fn().mockResolvedValue({ type: 'dialog', dialog: 'help' }), + }); + const result = setupProcessorHook([command]); + await waitFor(() => expect(result.current.slashCommands).toHaveLength(1)); + + await act(async () => { + await result.current.handleSlashCommand('/helpcmd'); + }); + + expect(mockSetShowHelp).toHaveBeenCalledWith(true); + }); + + it('should handle "load_history" action', async () => { + const command = createTestCommand({ + name: 'load', + action: vi.fn().mockResolvedValue({ + type: 'load_history', + history: [{ type: MessageType.USER, text: 'old prompt' }], + clientHistory: [{ role: 'user', parts: [{ text: 'old prompt' }] }], + }), + }); + const result = setupProcessorHook([command]); + await waitFor(() => expect(result.current.slashCommands).toHaveLength(1)); + + await act(async () => { + await result.current.handleSlashCommand('/load'); + }); + + expect(mockClearItems).toHaveBeenCalledTimes(1); + expect(mockAddItem).toHaveBeenCalledWith( + { type: 'user', text: 'old prompt' }, + expect.any(Number), + ); + }); + + describe('with fake timers', () => { + // This test needs to let the async `waitFor` complete with REAL timers + // before switching to FAKE timers to test setTimeout. + it('should handle a "quit" action', async () => { + const quitAction = vi + .fn() + .mockResolvedValue({ type: 'quit', messages: [] }); + const command = createTestCommand({ + name: 'exit', + action: quitAction, }); - expect(mockProcessExit).toHaveBeenCalledWith(0); - }, - ); - }); + const result = setupProcessorHook([command]); - describe('Unknown command', () => { - it('should show an error and return handled for a general unknown command', async () => { - const { handleSlashCommand } = getProcessor(); - let commandResult: SlashCommandProcessorResult | false = false; - await act(async () => { - commandResult = await handleSlashCommand('/unknowncommand'); + await waitFor(() => + expect(result.current.slashCommands).toHaveLength(1), + ); + + vi.useFakeTimers(); + + try { + await act(async () => { + await result.current.handleSlashCommand('/exit'); + }); + + await act(async () => { + await vi.advanceTimersByTimeAsync(200); + }); + + expect(mockSetQuittingMessages).toHaveBeenCalledWith([]); + expect(mockProcessExit).toHaveBeenCalledWith(0); + } finally { + vi.useRealTimers(); + } }); - expect(mockAddItem).toHaveBeenNthCalledWith( - 2, - expect.objectContaining({ - type: MessageType.ERROR, - text: 'Unknown command: /unknowncommand', - }), - expect.any(Number), - ); - expect(commandResult).toEqual({ type: 'handled' }); - }); - }); - - describe('/tools command', () => { - it('should show an error if tool registry is not available', async () => { - mockConfig = { - ...mockConfig, - getToolRegistry: vi.fn().mockResolvedValue(undefined), - } as unknown as Config; - const { handleSlashCommand } = getProcessor(); - let commandResult: SlashCommandProcessorResult | false = false; - await act(async () => { - commandResult = await handleSlashCommand('/tools'); - }); - - expect(mockAddItem).toHaveBeenNthCalledWith( - 2, - expect.objectContaining({ - type: MessageType.ERROR, - text: 'Could not retrieve tools.', - }), - expect.any(Number), - ); - expect(commandResult).toEqual({ type: 'handled' }); }); - it('should show an error if getAllTools returns undefined', async () => { - mockConfig = { - ...mockConfig, - getToolRegistry: vi.fn().mockResolvedValue({ - getAllTools: vi.fn().mockReturnValue(undefined), - }), - } as unknown as Config; - const { handleSlashCommand } = getProcessor(); - let commandResult: SlashCommandProcessorResult | false = false; - await act(async () => { - commandResult = await handleSlashCommand('/tools'); - }); - - expect(mockAddItem).toHaveBeenNthCalledWith( - 2, - expect.objectContaining({ - type: MessageType.ERROR, - text: 'Could not retrieve tools.', - }), - expect.any(Number), - ); - expect(commandResult).toEqual({ type: 'handled' }); - }); - - it('should display only Gemini CLI tools (filtering out MCP tools)', async () => { - // Create mock tools - some with serverName property (MCP tools) and some without (Gemini CLI tools) - const mockTools = [ - { name: 'tool1', displayName: 'Tool1' }, - { name: 'tool2', displayName: 'Tool2' }, - { name: 'mcp_tool1', serverName: 'mcp-server1' }, - { name: 'mcp_tool2', serverName: 'mcp-server1' }, - ]; - - mockConfig = { - ...mockConfig, - getToolRegistry: vi.fn().mockResolvedValue({ - getAllTools: vi.fn().mockReturnValue(mockTools), - }), - } as unknown as Config; - - const { handleSlashCommand } = getProcessor(); - let commandResult: SlashCommandProcessorResult | false = false; - await act(async () => { - commandResult = await handleSlashCommand('/tools'); - }); - - // Should only show tool1 and tool2, not the MCP tools - const message = mockAddItem.mock.calls[1][0].text; - expect(message).toContain('Tool1'); - expect(message).toContain('Tool2'); - expect(commandResult).toEqual({ type: 'handled' }); - }); - - it('should display a message when no Gemini CLI tools are available', async () => { - // Only MCP tools available - const mockTools = [ - { name: 'mcp_tool1', serverName: 'mcp-server1' }, - { name: 'mcp_tool2', serverName: 'mcp-server1' }, - ]; - - mockConfig = { - ...mockConfig, - getToolRegistry: vi.fn().mockResolvedValue({ - getAllTools: vi.fn().mockReturnValue(mockTools), - }), - } as unknown as Config; - - const { handleSlashCommand } = getProcessor(); - let commandResult: SlashCommandProcessorResult | false = false; - await act(async () => { - commandResult = await handleSlashCommand('/tools'); - }); - - const message = mockAddItem.mock.calls[1][0].text; - expect(message).toContain('No tools available'); - expect(commandResult).toEqual({ type: 'handled' }); - }); - - it('should display tool descriptions when /tools desc is used', async () => { - const mockTools = [ + it('should handle "submit_prompt" action returned from a file-based command', async () => { + const fileCommand = createTestCommand( { - name: 'tool1', - displayName: 'Tool1', - description: 'Description for Tool1', + name: 'filecmd', + description: 'A command from a file', + action: async () => ({ + type: 'submit_prompt', + content: 'The actual prompt from the TOML file.', + }), }, - { - name: 'tool2', - displayName: 'Tool2', - description: 'Description for Tool2', - }, - ]; + CommandKind.FILE, + ); - mockConfig = { - ...mockConfig, - getToolRegistry: vi.fn().mockResolvedValue({ - getAllTools: vi.fn().mockReturnValue(mockTools), - }), - } as unknown as Config; + const result = setupProcessorHook([], [fileCommand]); + await waitFor(() => expect(result.current.slashCommands).toHaveLength(1)); - const { handleSlashCommand } = getProcessor(); - let commandResult: SlashCommandProcessorResult | false = false; + let actionResult; await act(async () => { - commandResult = await handleSlashCommand('/tools desc'); + actionResult = await result.current.handleSlashCommand('/filecmd'); }); - const message = mockAddItem.mock.calls[1][0].text; - expect(message).toContain('Tool1'); - expect(message).toContain('Description for Tool1'); - expect(message).toContain('Tool2'); - expect(message).toContain('Description for Tool2'); - expect(commandResult).toEqual({ type: 'handled' }); + expect(actionResult).toEqual({ + type: 'submit_prompt', + content: 'The actual prompt from the TOML file.', + }); + + expect(mockAddItem).toHaveBeenCalledWith( + { type: MessageType.USER, text: '/filecmd' }, + expect.any(Number), + ); + }); + + it('should handle "submit_prompt" action returned from a mcp-based command', async () => { + const mcpCommand = createTestCommand( + { + name: 'mcpcmd', + description: 'A command from mcp', + action: async () => ({ + type: 'submit_prompt', + content: 'The actual prompt from the mcp command.', + }), + }, + CommandKind.MCP_PROMPT, + ); + + const result = setupProcessorHook([], [], [mcpCommand]); + await waitFor(() => expect(result.current.slashCommands).toHaveLength(1)); + + let actionResult; + await act(async () => { + actionResult = await result.current.handleSlashCommand('/mcpcmd'); + }); + + expect(actionResult).toEqual({ + type: 'submit_prompt', + content: 'The actual prompt from the mcp command.', + }); + + expect(mockAddItem).toHaveBeenCalledWith( + { type: MessageType.USER, text: '/mcpcmd' }, + expect.any(Number), + ); }); }); - describe('/mcp command', () => { - it('should show an error if tool registry is not available', async () => { - mockConfig = { - ...mockConfig, - getToolRegistry: vi.fn().mockResolvedValue(undefined), - } as unknown as Config; - const { handleSlashCommand } = getProcessor(); - let commandResult: SlashCommandProcessorResult | false = false; - await act(async () => { - commandResult = await handleSlashCommand('/mcp'); + describe('Shell Command Confirmation Flow', () => { + // Use a generic vi.fn() for the action. We will change its behavior in each test. + const mockCommandAction = vi.fn(); + + const shellCommand = createTestCommand({ + name: 'shellcmd', + action: mockCommandAction, + }); + + beforeEach(() => { + // Reset the mock before each test + mockCommandAction.mockClear(); + + // Default behavior: request confirmation + mockCommandAction.mockResolvedValue({ + type: 'confirm_shell_commands', + commandsToConfirm: ['rm -rf /'], + originalInvocation: { raw: '/shellcmd' }, + } as ConfirmShellCommandsActionReturn); + }); + + it('should set confirmation request when action returns confirm_shell_commands', async () => { + const result = setupProcessorHook([shellCommand]); + await waitFor(() => expect(result.current.slashCommands).toHaveLength(1)); + + // This is intentionally not awaited, because the promise it returns + // will not resolve until the user responds to the confirmation. + act(() => { + result.current.handleSlashCommand('/shellcmd'); }); - expect(mockAddItem).toHaveBeenNthCalledWith( - 2, - expect.objectContaining({ + // We now wait for the state to be updated with the request. + await waitFor(() => { + expect(result.current.shellConfirmationRequest).not.toBeNull(); + }); + + expect(result.current.shellConfirmationRequest?.commands).toEqual([ + 'rm -rf /', + ]); + }); + + it('should do nothing if user cancels confirmation', async () => { + const result = setupProcessorHook([shellCommand]); + await waitFor(() => expect(result.current.slashCommands).toHaveLength(1)); + + act(() => { + result.current.handleSlashCommand('/shellcmd'); + }); + + // Wait for the confirmation dialog to be set + await waitFor(() => { + expect(result.current.shellConfirmationRequest).not.toBeNull(); + }); + + const onConfirm = result.current.shellConfirmationRequest?.onConfirm; + expect(onConfirm).toBeDefined(); + + // Change the mock action's behavior for a potential second run. + // If the test is flawed, this will be called, and we can detect it. + mockCommandAction.mockResolvedValue({ + type: 'message', + messageType: 'info', + content: 'This should not be called', + }); + + await act(async () => { + onConfirm!(ToolConfirmationOutcome.Cancel, []); // Pass empty array for safety + }); + + expect(result.current.shellConfirmationRequest).toBeNull(); + // Verify the action was only called the initial time. + expect(mockCommandAction).toHaveBeenCalledTimes(1); + }); + + it('should re-run command with one-time allowlist on "Proceed Once"', async () => { + const result = setupProcessorHook([shellCommand]); + await waitFor(() => expect(result.current.slashCommands).toHaveLength(1)); + + act(() => { + result.current.handleSlashCommand('/shellcmd'); + }); + await waitFor(() => { + expect(result.current.shellConfirmationRequest).not.toBeNull(); + }); + + const onConfirm = result.current.shellConfirmationRequest?.onConfirm; + + // **Change the mock's behavior for the SECOND run.** + // This is the key to testing the outcome. + mockCommandAction.mockResolvedValue({ + type: 'message', + messageType: 'info', + content: 'Success!', + }); + + await act(async () => { + onConfirm!(ToolConfirmationOutcome.ProceedOnce, ['rm -rf /']); + }); + + expect(result.current.shellConfirmationRequest).toBeNull(); + + // The action should have been called twice (initial + re-run). + await waitFor(() => { + expect(mockCommandAction).toHaveBeenCalledTimes(2); + }); + + // We can inspect the context of the second call to ensure the one-time list was used. + const secondCallContext = mockCommandAction.mock + .calls[1][0] as CommandContext; + expect( + secondCallContext.session.sessionShellAllowlist.has('rm -rf /'), + ).toBe(true); + + // Verify the final success message was added. + expect(mockAddItem).toHaveBeenCalledWith( + { type: MessageType.INFO, text: 'Success!' }, + expect.any(Number), + ); + + // Verify the session-wide allowlist was NOT permanently updated. + // Re-render the hook by calling a no-op command to get the latest context. + await act(async () => { + result.current.handleSlashCommand('/no-op'); + }); + const finalContext = result.current.commandContext; + expect(finalContext.session.sessionShellAllowlist.size).toBe(0); + }); + + it('should re-run command and update session allowlist on "Proceed Always"', async () => { + const result = setupProcessorHook([shellCommand]); + await waitFor(() => expect(result.current.slashCommands).toHaveLength(1)); + + act(() => { + result.current.handleSlashCommand('/shellcmd'); + }); + await waitFor(() => { + expect(result.current.shellConfirmationRequest).not.toBeNull(); + }); + + const onConfirm = result.current.shellConfirmationRequest?.onConfirm; + mockCommandAction.mockResolvedValue({ + type: 'message', + messageType: 'info', + content: 'Success!', + }); + + await act(async () => { + onConfirm!(ToolConfirmationOutcome.ProceedAlways, ['rm -rf /']); + }); + + expect(result.current.shellConfirmationRequest).toBeNull(); + await waitFor(() => { + expect(mockCommandAction).toHaveBeenCalledTimes(2); + }); + + expect(mockAddItem).toHaveBeenCalledWith( + { type: MessageType.INFO, text: 'Success!' }, + expect.any(Number), + ); + + // Check that the session-wide allowlist WAS updated. + await waitFor(() => { + const finalContext = result.current.commandContext; + expect(finalContext.session.sessionShellAllowlist.has('rm -rf /')).toBe( + true, + ); + }); + }); + }); + + describe('Command Parsing and Matching', () => { + it('should be case-sensitive', async () => { + const command = createTestCommand({ name: 'test' }); + const result = setupProcessorHook([command]); + await waitFor(() => expect(result.current.slashCommands).toHaveLength(1)); + + await act(async () => { + // Use uppercase when command is lowercase + await result.current.handleSlashCommand('/Test'); + }); + + // It should fail and call addItem with an error + expect(mockAddItem).toHaveBeenCalledWith( + { type: MessageType.ERROR, - text: 'Could not retrieve tool registry.', - }), - expect.any(Number), - ); - expect(commandResult).toEqual({ type: 'handled' }); - }); - - it('should display a message with a URL when no MCP servers are configured in a sandbox', async () => { - process.env.SANDBOX = 'sandbox'; - mockConfig = { - ...mockConfig, - getToolRegistry: vi.fn().mockResolvedValue({ - getToolsByServer: vi.fn().mockReturnValue([]), - }), - getMcpServers: vi.fn().mockReturnValue({}), - } as unknown as Config; - - const { handleSlashCommand } = getProcessor(); - let commandResult: SlashCommandProcessorResult | false = false; - await act(async () => { - commandResult = await handleSlashCommand('/mcp'); - }); - - expect(mockAddItem).toHaveBeenNthCalledWith( - 2, - expect.objectContaining({ - type: MessageType.INFO, - text: `No MCP servers configured. Please open the following URL in your browser to view documentation:\nhttps://goo.gle/gemini-cli-docs-mcp`, - }), - expect.any(Number), - ); - expect(commandResult).toEqual({ type: 'handled' }); - delete process.env.SANDBOX; - }); - - it('should display a message and open a URL when no MCP servers are configured outside a sandbox', async () => { - mockConfig = { - ...mockConfig, - getToolRegistry: vi.fn().mockResolvedValue({ - getToolsByServer: vi.fn().mockReturnValue([]), - }), - getMcpServers: vi.fn().mockReturnValue({}), - } as unknown as Config; - - const { handleSlashCommand } = getProcessor(); - let commandResult: SlashCommandProcessorResult | false = false; - await act(async () => { - commandResult = await handleSlashCommand('/mcp'); - }); - - expect(mockAddItem).toHaveBeenNthCalledWith( - 2, - expect.objectContaining({ - type: MessageType.INFO, - text: 'No MCP servers configured. Opening documentation in your browser: https://goo.gle/gemini-cli-docs-mcp', - }), - expect.any(Number), - ); - expect(open).toHaveBeenCalledWith('https://goo.gle/gemini-cli-docs-mcp'); - expect(commandResult).toEqual({ type: 'handled' }); - }); - - it('should display configured MCP servers with status indicators and their tools', async () => { - // Mock MCP servers configuration - const mockMcpServers = { - server1: { command: 'cmd1' }, - server2: { command: 'cmd2' }, - server3: { command: 'cmd3' }, - }; - - // Setup getMCPServerStatus mock implementation - use all CONNECTED to avoid startup message in this test - vi.mocked(getMCPServerStatus).mockImplementation((serverName) => { - if (serverName === 'server1') return MCPServerStatus.CONNECTED; - if (serverName === 'server2') return MCPServerStatus.CONNECTED; - return MCPServerStatus.DISCONNECTED; // Default for server3 and others - }); - - // Setup getMCPDiscoveryState mock to return completed so no startup message is shown - vi.mocked(getMCPDiscoveryState).mockReturnValue( - MCPDiscoveryState.COMPLETED, - ); - - // Mock tools from each server - const mockServer1Tools = [ - { name: 'server1_tool1' }, - { name: 'server1_tool2' }, - ]; - - const mockServer2Tools = [{ name: 'server2_tool1' }]; - - const mockServer3Tools = [{ name: 'server3_tool1' }]; - - const mockGetToolsByServer = vi.fn().mockImplementation((serverName) => { - if (serverName === 'server1') return mockServer1Tools; - if (serverName === 'server2') return mockServer2Tools; - if (serverName === 'server3') return mockServer3Tools; - return []; - }); - - mockConfig = { - ...mockConfig, - getToolRegistry: vi.fn().mockResolvedValue({ - getToolsByServer: mockGetToolsByServer, - }), - getMcpServers: vi.fn().mockReturnValue(mockMcpServers), - } as unknown as Config; - - const { handleSlashCommand } = getProcessor(); - let commandResult: SlashCommandProcessorResult | false = false; - await act(async () => { - commandResult = await handleSlashCommand('/mcp'); - }); - - expect(mockAddItem).toHaveBeenNthCalledWith( - 2, - expect.objectContaining({ - type: MessageType.INFO, - text: expect.stringContaining('Configured MCP servers:'), - }), - expect.any(Number), - ); - - // Check that the message contains details about servers and their tools - const message = mockAddItem.mock.calls[1][0].text; - // Server 1 - Connected - expect(message).toContain( - '🟢 \u001b[1mserver1\u001b[0m - Ready (2 tools)', - ); - expect(message).toContain('\u001b[36mserver1_tool1\u001b[0m'); - expect(message).toContain('\u001b[36mserver1_tool2\u001b[0m'); - - // Server 2 - Connected - expect(message).toContain( - '🟢 \u001b[1mserver2\u001b[0m - Ready (1 tools)', - ); - expect(message).toContain('\u001b[36mserver2_tool1\u001b[0m'); - - // Server 3 - Disconnected - expect(message).toContain( - '🔴 \u001b[1mserver3\u001b[0m - Disconnected (1 tools cached)', - ); - expect(message).toContain('\u001b[36mserver3_tool1\u001b[0m'); - - expect(commandResult).toEqual({ type: 'handled' }); - }); - - it('should display tool descriptions when showToolDescriptions is true', async () => { - // Mock MCP servers configuration with server description - const mockMcpServers = { - server1: { - command: 'cmd1', - description: 'This is a server description', + text: 'Unknown command: /Test', }, - }; - - // Setup getMCPServerStatus mock implementation - vi.mocked(getMCPServerStatus).mockImplementation((serverName) => { - if (serverName === 'server1') return MCPServerStatus.CONNECTED; - return MCPServerStatus.DISCONNECTED; - }); - - // Setup getMCPDiscoveryState mock to return completed - vi.mocked(getMCPDiscoveryState).mockReturnValue( - MCPDiscoveryState.COMPLETED, - ); - - // Mock tools from server with descriptions - const mockServerTools = [ - { name: 'tool1', description: 'This is tool 1 description' }, - { name: 'tool2', description: 'This is tool 2 description' }, - ]; - - mockConfig = { - ...mockConfig, - getToolRegistry: vi.fn().mockResolvedValue({ - getToolsByServer: vi.fn().mockReturnValue(mockServerTools), - }), - getMcpServers: vi.fn().mockReturnValue(mockMcpServers), - } as unknown as Config; - - const { handleSlashCommand } = getProcessor(true); - let commandResult: SlashCommandProcessorResult | false = false; - await act(async () => { - commandResult = await handleSlashCommand('/mcp'); - }); - - expect(mockAddItem).toHaveBeenNthCalledWith( - 2, - expect.objectContaining({ - type: MessageType.INFO, - text: expect.stringContaining('Configured MCP servers:'), - }), expect.any(Number), ); - - const message = mockAddItem.mock.calls[1][0].text; - - // Check that server description is included (with ANSI color codes) - expect(message).toContain('\u001b[1mserver1\u001b[0m - Ready (2 tools)'); - expect(message).toContain( - '\u001b[32mThis is a server description\u001b[0m', - ); - - // Check that tool descriptions are included (with ANSI color codes) - expect(message).toContain('\u001b[36mtool1\u001b[0m'); - expect(message).toContain( - '\u001b[32mThis is tool 1 description\u001b[0m', - ); - expect(message).toContain('\u001b[36mtool2\u001b[0m'); - expect(message).toContain( - '\u001b[32mThis is tool 2 description\u001b[0m', - ); - - expect(commandResult).toEqual({ type: 'handled' }); }); - it('should indicate when a server has no tools', async () => { - // Mock MCP servers configuration - const mockMcpServers = { - server1: { command: 'cmd1' }, - server2: { command: 'cmd2' }, - }; - - // Setup getMCPServerStatus mock implementation - vi.mocked(getMCPServerStatus).mockImplementation((serverName) => { - if (serverName === 'server1') return MCPServerStatus.CONNECTED; - if (serverName === 'server2') return MCPServerStatus.DISCONNECTED; - return MCPServerStatus.DISCONNECTED; + it('should correctly match an altName', async () => { + const action = vi.fn(); + const command = createTestCommand({ + name: 'main', + altNames: ['alias'], + description: 'a command with an alias', + action, }); + const result = setupProcessorHook([command]); + await waitFor(() => expect(result.current.slashCommands).toHaveLength(1)); - // Setup getMCPDiscoveryState mock to return completed - vi.mocked(getMCPDiscoveryState).mockReturnValue( - MCPDiscoveryState.COMPLETED, - ); - - // Mock tools from each server - server2 has no tools - const mockServer1Tools = [{ name: 'server1_tool1' }]; - - const mockServer2Tools: Array<{ name: string }> = []; - - const mockGetToolsByServer = vi.fn().mockImplementation((serverName) => { - if (serverName === 'server1') return mockServer1Tools; - if (serverName === 'server2') return mockServer2Tools; - return []; - }); - - mockConfig = { - ...mockConfig, - getToolRegistry: vi.fn().mockResolvedValue({ - getToolsByServer: mockGetToolsByServer, - }), - getMcpServers: vi.fn().mockReturnValue(mockMcpServers), - } as unknown as Config; - - const { handleSlashCommand } = getProcessor(); - let commandResult: SlashCommandProcessorResult | false = false; await act(async () => { - commandResult = await handleSlashCommand('/mcp'); + await result.current.handleSlashCommand('/alias'); }); - expect(mockAddItem).toHaveBeenNthCalledWith( - 2, - expect.objectContaining({ - type: MessageType.INFO, - text: expect.stringContaining('Configured MCP servers:'), - }), - expect.any(Number), + expect(action).toHaveBeenCalledTimes(1); + expect(mockAddItem).not.toHaveBeenCalledWith( + expect.objectContaining({ type: MessageType.ERROR }), ); - - // Check that the message contains details about both servers and their tools - const message = mockAddItem.mock.calls[1][0].text; - expect(message).toContain( - '🟢 \u001b[1mserver1\u001b[0m - Ready (1 tools)', - ); - expect(message).toContain('\u001b[36mserver1_tool1\u001b[0m'); - expect(message).toContain( - '🔴 \u001b[1mserver2\u001b[0m - Disconnected (0 tools cached)', - ); - expect(message).toContain('No tools available'); - - expect(commandResult).toEqual({ type: 'handled' }); }); - it('should show startup indicator when servers are connecting', async () => { - // Mock MCP servers configuration - const mockMcpServers = { - server1: { command: 'cmd1' }, - server2: { command: 'cmd2' }, - }; + it('should handle extra whitespace around the command', async () => { + const action = vi.fn(); + const command = createTestCommand({ name: 'test', action }); + const result = setupProcessorHook([command]); + await waitFor(() => expect(result.current.slashCommands).toHaveLength(1)); - // Setup getMCPServerStatus mock implementation with one server connecting - vi.mocked(getMCPServerStatus).mockImplementation((serverName) => { - if (serverName === 'server1') return MCPServerStatus.CONNECTED; - if (serverName === 'server2') return MCPServerStatus.CONNECTING; - return MCPServerStatus.DISCONNECTED; - }); - - // Setup getMCPDiscoveryState mock to return in progress - vi.mocked(getMCPDiscoveryState).mockReturnValue( - MCPDiscoveryState.IN_PROGRESS, - ); - - // Mock tools from each server - const mockServer1Tools = [{ name: 'server1_tool1' }]; - const mockServer2Tools = [{ name: 'server2_tool1' }]; - - const mockGetToolsByServer = vi.fn().mockImplementation((serverName) => { - if (serverName === 'server1') return mockServer1Tools; - if (serverName === 'server2') return mockServer2Tools; - return []; - }); - - mockConfig = { - ...mockConfig, - getToolRegistry: vi.fn().mockResolvedValue({ - getToolsByServer: mockGetToolsByServer, - }), - getMcpServers: vi.fn().mockReturnValue(mockMcpServers), - } as unknown as Config; - - const { handleSlashCommand } = getProcessor(); - let commandResult: SlashCommandProcessorResult | false = false; await act(async () => { - commandResult = await handleSlashCommand('/mcp'); + await result.current.handleSlashCommand(' /test with-args '); }); - const message = mockAddItem.mock.calls[1][0].text; + expect(action).toHaveBeenCalledWith(expect.anything(), 'with-args'); + }); - // Check that startup indicator is shown - expect(message).toContain( - '⏳ MCP servers are starting up (1 initializing)...', - ); - expect(message).toContain( - 'Note: First startup may take longer. Tool availability will update automatically.', - ); + it('should handle `?` as a command prefix', async () => { + const action = vi.fn(); + const command = createTestCommand({ name: 'help', action }); + const result = setupProcessorHook([command]); + await waitFor(() => expect(result.current.slashCommands).toHaveLength(1)); - // Check server statuses - expect(message).toContain( - '🟢 \u001b[1mserver1\u001b[0m - Ready (1 tools)', - ); - expect(message).toContain( - '🔄 \u001b[1mserver2\u001b[0m - Starting... (first startup may take longer) (tools will appear when ready)', - ); + await act(async () => { + await result.current.handleSlashCommand('?help'); + }); - expect(commandResult).toEqual({ type: 'handled' }); + expect(action).toHaveBeenCalledTimes(1); }); }); - describe('/mcp schema', () => { - it('should display tool schemas and descriptions', async () => { - // Mock MCP servers configuration with server description - const mockMcpServers = { - server1: { - command: 'cmd1', - description: 'This is a server description', + describe('Command Precedence', () => { + it('should override mcp-based commands with file-based commands of the same name', async () => { + const mcpAction = vi.fn(); + const fileAction = vi.fn(); + + const mcpCommand = createTestCommand( + { + name: 'override', + description: 'mcp', + action: mcpAction, }, - }; - - // Setup getMCPServerStatus mock implementation - vi.mocked(getMCPServerStatus).mockImplementation((serverName) => { - if (serverName === 'server1') return MCPServerStatus.CONNECTED; - return MCPServerStatus.DISCONNECTED; - }); - - // Setup getMCPDiscoveryState mock to return completed - vi.mocked(getMCPDiscoveryState).mockReturnValue( - MCPDiscoveryState.COMPLETED, + CommandKind.MCP_PROMPT, + ); + const fileCommand = createTestCommand( + { name: 'override', description: 'file', action: fileAction }, + CommandKind.FILE, ); - // Mock tools from server with descriptions - const mockServerTools = [ - { - name: 'tool1', - description: 'This is tool 1 description', - schema: { - parameters: [{ name: 'param1', type: 'string' }], - }, - }, - { - name: 'tool2', - description: 'This is tool 2 description', - schema: { - parameters: [{ name: 'param2', type: 'number' }], - }, - }, - ]; + const result = setupProcessorHook([], [fileCommand], [mcpCommand]); - mockConfig = { - ...mockConfig, - getToolRegistry: vi.fn().mockResolvedValue({ - getToolsByServer: vi.fn().mockReturnValue(mockServerTools), - }), - getMcpServers: vi.fn().mockReturnValue(mockMcpServers), - } as unknown as Config; - - const { handleSlashCommand } = getProcessor(true); - let commandResult: SlashCommandProcessorResult | false = false; - await act(async () => { - commandResult = await handleSlashCommand('/mcp schema'); + await waitFor(() => { + // The service should only return one command with the name 'override' + expect(result.current.slashCommands).toHaveLength(1); }); - expect(mockAddItem).toHaveBeenNthCalledWith( - 2, - expect.objectContaining({ - type: MessageType.INFO, - text: expect.stringContaining('Configured MCP servers:'), - }), + await act(async () => { + await result.current.handleSlashCommand('/override'); + }); + + // Only the file-based command's action should be called. + expect(fileAction).toHaveBeenCalledTimes(1); + expect(mcpAction).not.toHaveBeenCalled(); + }); + + it('should prioritize a command with a primary name over a command with a matching alias', async () => { + const quitAction = vi.fn(); + const exitAction = vi.fn(); + + const quitCommand = createTestCommand({ + name: 'quit', + altNames: ['exit'], + action: quitAction, + }); + + const exitCommand = createTestCommand( + { + name: 'exit', + action: exitAction, + }, + CommandKind.FILE, + ); + + // The order of commands in the final loaded array is not guaranteed, + // so the test must work regardless of which comes first. + const result = setupProcessorHook([quitCommand], [exitCommand]); + + await waitFor(() => { + expect(result.current.slashCommands).toHaveLength(2); + }); + + await act(async () => { + await result.current.handleSlashCommand('/exit'); + }); + + // The action for the command whose primary name is 'exit' should be called. + expect(exitAction).toHaveBeenCalledTimes(1); + // The action for the command that has 'exit' as an alias should NOT be called. + expect(quitAction).not.toHaveBeenCalled(); + }); + + it('should add an overridden command to the history', async () => { + const quitCommand = createTestCommand({ + name: 'quit', + altNames: ['exit'], + action: vi.fn(), + }); + const exitCommand = createTestCommand( + { name: 'exit', action: vi.fn() }, + CommandKind.FILE, + ); + + const result = setupProcessorHook([quitCommand], [exitCommand]); + await waitFor(() => expect(result.current.slashCommands).toHaveLength(2)); + + await act(async () => { + await result.current.handleSlashCommand('/exit'); + }); + + // It should be added to the history. + expect(mockAddItem).toHaveBeenCalledWith( + { type: MessageType.USER, text: '/exit' }, expect.any(Number), ); - - const message = mockAddItem.mock.calls[1][0].text; - - // Check that server description is included - expect(message).toContain('Ready (2 tools)'); - expect(message).toContain('This is a server description'); - - // Check that tool schemas are included - expect(message).toContain('tool 1 description'); - expect(message).toContain('param1'); - expect(message).toContain('string'); - expect(message).toContain('tool 2 description'); - expect(message).toContain('param2'); - expect(message).toContain('number'); - - expect(commandResult).toEqual({ type: 'handled' }); }); }); - describe('/compress command', () => { - it('should call tryCompressChat(true)', async () => { - const hook = getProcessorHook(); - mockTryCompressChat.mockResolvedValue({ - originalTokenCount: 100, - newTokenCount: 50, - }); + describe('Lifecycle', () => { + it('should abort command loading when the hook unmounts', () => { + const abortSpy = vi.spyOn(AbortController.prototype, 'abort'); + const { unmount } = renderHook(() => + useSlashCommandProcessor( + mockConfig, + mockSettings, + mockAddItem, + mockClearItems, + mockLoadHistory, + vi.fn(), // refreshStatic + mockSetShowHelp, + vi.fn(), // onDebugMessage + vi.fn(), // openThemeDialog + mockOpenAuthDialog, + vi.fn(), // openEditorDialog, + vi.fn(), // toggleCorgiMode + mockSetQuittingMessages, + vi.fn(), // openPrivacyNotice + vi.fn(), // toggleVimEnabled + ), + ); - await act(async () => { - hook.result.current.handleSlashCommand('/compress'); - }); - await act(async () => { - hook.rerender(); - }); - expect(hook.result.current.pendingHistoryItems).toEqual([]); - expect(mockGeminiClient.tryCompressChat).toHaveBeenCalledWith( - 'Prompt Id not set', - true, - ); - expect(mockAddItem).toHaveBeenNthCalledWith( - 2, - expect.objectContaining({ - type: MessageType.COMPRESSION, - compression: { - isPending: false, - originalTokenCount: 100, - newTokenCount: 50, - }, - }), - expect.any(Number), - ); + unmount(); + + expect(abortSpy).toHaveBeenCalledTimes(1); }); }); }); diff --git a/packages/cli/src/ui/hooks/slashCommandProcessor.ts b/packages/cli/src/ui/hooks/slashCommandProcessor.ts index c87be1d79..16c25635c 100644 --- a/packages/cli/src/ui/hooks/slashCommandProcessor.ts +++ b/packages/cli/src/ui/hooks/slashCommandProcessor.ts @@ -6,7 +6,6 @@ import { useCallback, useMemo, useEffect, useState } from 'react'; import { type PartListUnion } from '@google/genai'; -import open from 'open'; import process from 'node:process'; import { UseHistoryManagerReturn } from './useHistoryManager.js'; import { useStateAndRef } from './useStateAndRef.js'; @@ -14,10 +13,7 @@ import { Config, GitService, Logger, - MCPDiscoveryState, - MCPServerStatus, - getMCPDiscoveryState, - getMCPServerStatus, + ToolConfirmationOutcome, } from '@qwen-code/qwen-code-core'; import { useSessionStats } from '../contexts/SessionContext.js'; import { @@ -27,35 +23,12 @@ import { HistoryItem, SlashCommandProcessorResult, } from '../types.js'; -import { promises as fs } from 'fs'; -import path from 'path'; -import { GIT_COMMIT_INFO } from '../../generated/git-commit.js'; -import { formatDuration, formatMemoryUsage } from '../utils/formatters.js'; -import { getCliVersion } from '../../utils/version.js'; import { LoadedSettings } from '../../config/settings.js'; -import { - type CommandContext, - type SlashCommandActionReturn, - type SlashCommand, -} from '../commands/types.js'; +import { type CommandContext, type SlashCommand } from '../commands/types.js'; import { CommandService } from '../../services/CommandService.js'; - -// This interface is for the old, inline command definitions. -// It will be removed once all commands are migrated to the new system. -export interface LegacySlashCommand { - name: string; - altName?: string; - description?: string; - completion?: () => Promise; - action: ( - mainCommand: string, - subCommand?: string, - args?: string, - ) => - | void - | SlashCommandActionReturn - | Promise; -} +import { BuiltinCommandLoader } from '../../services/BuiltinCommandLoader.js'; +import { FileCommandLoader } from '../../services/FileCommandLoader.js'; +import { McpPromptLoader } from '../../services/McpPromptLoader.js'; /** * Hook to define and process slash commands (e.g., /help, /clear). @@ -63,7 +36,6 @@ export interface LegacySlashCommand { export const useSlashCommandProcessor = ( config: Config | null, settings: LoadedSettings, - history: HistoryItem[], addItem: UseHistoryManagerReturn['addItem'], clearItems: UseHistoryManagerReturn['clearItems'], loadHistory: UseHistoryManagerReturn['loadHistory'], @@ -74,12 +46,24 @@ export const useSlashCommandProcessor = ( openAuthDialog: () => void, openEditorDialog: () => void, toggleCorgiMode: () => void, - showToolDescriptions: boolean = false, setQuittingMessages: (message: HistoryItem[]) => void, openPrivacyNotice: () => void, + toggleVimEnabled: () => Promise, + setIsProcessing: (isProcessing: boolean) => void, ) => { const session = useSessionStats(); - const [commands, setCommands] = useState([]); + const [commands, setCommands] = useState([]); + const [shellConfirmationRequest, setShellConfirmationRequest] = + useState void; + }>(null); + const [sessionShellAllowlist, setSessionShellAllowlist] = useState( + new Set(), + ); const gitService = useMemo(() => { if (!config?.getProjectRoot()) { return; @@ -168,11 +152,16 @@ export const useSlashCommandProcessor = ( console.clear(); refreshStatic(); }, + loadHistory, setDebugMessage: onDebugMessage, + pendingItem: pendingCompressionItemRef.current, + setPendingItem: setPendingCompressionItem, + toggleCorgiMode, + toggleVimEnabled, }, session: { stats: session.stats, - resetSession: session.resetSession, + sessionShellAllowlist, }, }), [ @@ -180,1041 +169,302 @@ export const useSlashCommandProcessor = ( settings, gitService, logger, + loadHistory, addItem, clearItems, refreshStatic, session.stats, - session.resetSession, onDebugMessage, + pendingCompressionItemRef, + setPendingCompressionItem, + toggleCorgiMode, + toggleVimEnabled, + sessionShellAllowlist, ], ); - const commandService = useMemo(() => new CommandService(), []); - useEffect(() => { + const controller = new AbortController(); const load = async () => { - await commandService.loadCommands(); + const loaders = [ + new McpPromptLoader(config), + new BuiltinCommandLoader(config), + new FileCommandLoader(config), + ]; + const commandService = await CommandService.create( + loaders, + controller.signal, + ); setCommands(commandService.getCommands()); }; load(); - }, [commandService]); - const savedChatTags = useCallback(async () => { - const geminiDir = config?.getProjectTempDir(); - if (!geminiDir) { - return []; - } - try { - const files = await fs.readdir(geminiDir); - return files - .filter( - (file) => file.startsWith('checkpoint-') && file.endsWith('.json'), - ) - .map((file) => file.replace('checkpoint-', '').replace('.json', '')); - } catch (_err) { - return []; - } + return () => { + controller.abort(); + }; }, [config]); - // Define legacy commands - // This list contains all commands that have NOT YET been migrated to the - // new system. As commands are migrated, they are removed from this list. - const legacyCommands: LegacySlashCommand[] = useMemo(() => { - const commands: LegacySlashCommand[] = [ - // `/help` and `/clear` have been migrated and REMOVED from this list. - { - name: 'docs', - description: 'open full Qwen Code documentation in your browser', - action: async (_mainCommand, _subCommand, _args) => { - const docsUrl = - 'https://github.com/QwenLM/Qwen3-Coder/blob/main/README.md'; - if (process.env.SANDBOX && process.env.SANDBOX !== 'sandbox-exec') { - addMessage({ - type: MessageType.INFO, - content: `Please open the following URL in your browser to view the documentation:\n${docsUrl}`, - timestamp: new Date(), - }); - } else { - addMessage({ - type: MessageType.INFO, - content: `Opening documentation in your browser: ${docsUrl}`, - timestamp: new Date(), - }); - await open(docsUrl); - } - }, - }, - { - name: 'editor', - description: 'set external editor preference', - action: (_mainCommand, _subCommand, _args) => openEditorDialog(), - }, - { - name: 'stats', - altName: 'usage', - description: 'check session stats. Usage: /stats [model|tools]', - action: (_mainCommand, subCommand, _args) => { - if (subCommand === 'model') { - addMessage({ - type: MessageType.MODEL_STATS, - timestamp: new Date(), - }); - return; - } else if (subCommand === 'tools') { - addMessage({ - type: MessageType.TOOL_STATS, - timestamp: new Date(), - }); - return; - } - - const now = new Date(); - const { sessionStartTime } = session.stats; - const wallDuration = now.getTime() - sessionStartTime.getTime(); - - addMessage({ - type: MessageType.STATS, - duration: formatDuration(wallDuration), - timestamp: new Date(), - }); - }, - }, - { - name: 'mcp', - description: 'list configured MCP servers and tools', - action: async (_mainCommand, _subCommand, _args) => { - // Check if the _subCommand includes a specific flag to control description visibility - let useShowDescriptions = showToolDescriptions; - if (_subCommand === 'desc' || _subCommand === 'descriptions') { - useShowDescriptions = true; - } else if ( - _subCommand === 'nodesc' || - _subCommand === 'nodescriptions' - ) { - useShowDescriptions = false; - } else if (_args === 'desc' || _args === 'descriptions') { - useShowDescriptions = true; - } else if (_args === 'nodesc' || _args === 'nodescriptions') { - useShowDescriptions = false; - } - // Check if the _subCommand includes a specific flag to show detailed tool schema - let useShowSchema = false; - if (_subCommand === 'schema' || _args === 'schema') { - useShowSchema = true; - } - - const toolRegistry = await config?.getToolRegistry(); - if (!toolRegistry) { - addMessage({ - type: MessageType.ERROR, - content: 'Could not retrieve tool registry.', - timestamp: new Date(), - }); - return; - } - - const mcpServers = config?.getMcpServers() || {}; - const serverNames = Object.keys(mcpServers); - - if (serverNames.length === 0) { - const docsUrl = 'https://goo.gle/gemini-cli-docs-mcp'; - if (process.env.SANDBOX && process.env.SANDBOX !== 'sandbox-exec') { - addMessage({ - type: MessageType.INFO, - content: `No MCP servers configured. Please open the following URL in your browser to view documentation:\n${docsUrl}`, - timestamp: new Date(), - }); - } else { - addMessage({ - type: MessageType.INFO, - content: `No MCP servers configured. Opening documentation in your browser: ${docsUrl}`, - timestamp: new Date(), - }); - await open(docsUrl); - } - return; - } - - // Check if any servers are still connecting - const connectingServers = serverNames.filter( - (name) => getMCPServerStatus(name) === MCPServerStatus.CONNECTING, - ); - const discoveryState = getMCPDiscoveryState(); - - let message = ''; - - // Add overall discovery status message if needed - if ( - discoveryState === MCPDiscoveryState.IN_PROGRESS || - connectingServers.length > 0 - ) { - message += `\u001b[33m⏳ MCP servers are starting up (${connectingServers.length} initializing)...\u001b[0m\n`; - message += `\u001b[90mNote: First startup may take longer. Tool availability will update automatically.\u001b[0m\n\n`; - } - - message += 'Configured MCP servers:\n\n'; - - for (const serverName of serverNames) { - const serverTools = toolRegistry.getToolsByServer(serverName); - const status = getMCPServerStatus(serverName); - - // Add status indicator with descriptive text - let statusIndicator = ''; - let statusText = ''; - switch (status) { - case MCPServerStatus.CONNECTED: - statusIndicator = '🟢'; - statusText = 'Ready'; - break; - case MCPServerStatus.CONNECTING: - statusIndicator = '🔄'; - statusText = 'Starting... (first startup may take longer)'; - break; - case MCPServerStatus.DISCONNECTED: - default: - statusIndicator = '🔴'; - statusText = 'Disconnected'; - break; - } - - // Get server description if available - const server = mcpServers[serverName]; - - // Format server header with bold formatting and status - message += `${statusIndicator} \u001b[1m${serverName}\u001b[0m - ${statusText}`; - - // Add tool count with conditional messaging - if (status === MCPServerStatus.CONNECTED) { - message += ` (${serverTools.length} tools)`; - } else if (status === MCPServerStatus.CONNECTING) { - message += ` (tools will appear when ready)`; - } else { - message += ` (${serverTools.length} tools cached)`; - } - - // Add server description with proper handling of multi-line descriptions - if ((useShowDescriptions || useShowSchema) && server?.description) { - const greenColor = '\u001b[32m'; - const resetColor = '\u001b[0m'; - - const descLines = server.description.trim().split('\n'); - if (descLines) { - message += ':\n'; - for (const descLine of descLines) { - message += ` ${greenColor}${descLine}${resetColor}\n`; - } - } else { - message += '\n'; - } - } else { - message += '\n'; - } - - // Reset formatting after server entry - message += '\u001b[0m'; - - if (serverTools.length > 0) { - serverTools.forEach((tool) => { - if ( - (useShowDescriptions || useShowSchema) && - tool.description - ) { - // Format tool name in cyan using simple ANSI cyan color - message += ` - \u001b[36m${tool.name}\u001b[0m`; - - // Apply green color to the description text - const greenColor = '\u001b[32m'; - const resetColor = '\u001b[0m'; - - // Handle multi-line descriptions by properly indenting and preserving formatting - const descLines = tool.description.trim().split('\n'); - if (descLines) { - message += ':\n'; - for (const descLine of descLines) { - message += ` ${greenColor}${descLine}${resetColor}\n`; - } - } else { - message += '\n'; - } - // Reset is handled inline with each line now - } else { - // Use cyan color for the tool name even when not showing descriptions - message += ` - \u001b[36m${tool.name}\u001b[0m\n`; - } - if (useShowSchema) { - // Prefix the parameters in cyan - message += ` \u001b[36mParameters:\u001b[0m\n`; - // Apply green color to the parameter text - const greenColor = '\u001b[32m'; - const resetColor = '\u001b[0m'; - - const paramsLines = JSON.stringify( - tool.schema.parameters, - null, - 2, - ) - .trim() - .split('\n'); - if (paramsLines) { - for (const paramsLine of paramsLines) { - message += ` ${greenColor}${paramsLine}${resetColor}\n`; - } - } - } - }); - } else { - message += ' No tools available\n'; - } - message += '\n'; - } - - // Make sure to reset any ANSI formatting at the end to prevent it from affecting the terminal - message += '\u001b[0m'; - - addMessage({ - type: MessageType.INFO, - content: message, - timestamp: new Date(), - }); - }, - }, - { - name: 'extensions', - description: 'list active extensions', - action: async () => { - const activeExtensions = config?.getActiveExtensions(); - if (!activeExtensions || activeExtensions.length === 0) { - addMessage({ - type: MessageType.INFO, - content: 'No active extensions.', - timestamp: new Date(), - }); - return; - } - - let message = 'Active extensions:\n\n'; - for (const ext of activeExtensions) { - message += ` - \u001b[36m${ext.name} (v${ext.version})\u001b[0m\n`; - } - // Make sure to reset any ANSI formatting at the end to prevent it from affecting the terminal - message += '\u001b[0m'; - - addMessage({ - type: MessageType.INFO, - content: message, - timestamp: new Date(), - }); - }, - }, - { - name: 'tools', - description: 'list available Qwen Code tools', - action: async (_mainCommand, _subCommand, _args) => { - // Check if the _subCommand includes a specific flag to control description visibility - let useShowDescriptions = showToolDescriptions; - if (_subCommand === 'desc' || _subCommand === 'descriptions') { - useShowDescriptions = true; - } else if ( - _subCommand === 'nodesc' || - _subCommand === 'nodescriptions' - ) { - useShowDescriptions = false; - } else if (_args === 'desc' || _args === 'descriptions') { - useShowDescriptions = true; - } else if (_args === 'nodesc' || _args === 'nodescriptions') { - useShowDescriptions = false; - } - - const toolRegistry = await config?.getToolRegistry(); - const tools = toolRegistry?.getAllTools(); - if (!tools) { - addMessage({ - type: MessageType.ERROR, - content: 'Could not retrieve tools.', - timestamp: new Date(), - }); - return; - } - - // Filter out MCP tools by checking if they have a serverName property - const geminiTools = tools.filter((tool) => !('serverName' in tool)); - - let message = 'Available Qwen Code tools:\n\n'; - - if (geminiTools.length > 0) { - geminiTools.forEach((tool) => { - if (useShowDescriptions && tool.description) { - // Format tool name in cyan using simple ANSI cyan color - message += ` - \u001b[36m${tool.displayName} (${tool.name})\u001b[0m:\n`; - - // Apply green color to the description text - const greenColor = '\u001b[32m'; - const resetColor = '\u001b[0m'; - - // Handle multi-line descriptions by properly indenting and preserving formatting - const descLines = tool.description.trim().split('\n'); - - // If there are multiple lines, add proper indentation for each line - if (descLines) { - for (const descLine of descLines) { - message += ` ${greenColor}${descLine}${resetColor}\n`; - } - } - } else { - // Use cyan color for the tool name even when not showing descriptions - message += ` - \u001b[36m${tool.displayName}\u001b[0m\n`; - } - }); - } else { - message += ' No tools available\n'; - } - message += '\n'; - - // Make sure to reset any ANSI formatting at the end to prevent it from affecting the terminal - message += '\u001b[0m'; - - addMessage({ - type: MessageType.INFO, - content: message, - timestamp: new Date(), - }); - }, - }, - { - name: 'corgi', - action: (_mainCommand, _subCommand, _args) => { - toggleCorgiMode(); - }, - }, - { - name: 'bug', - description: 'submit a bug report', - action: async (_mainCommand, _subCommand, args) => { - let bugDescription = _subCommand || ''; - if (args) { - bugDescription += ` ${args}`; - } - bugDescription = bugDescription.trim(); - - const osVersion = `${process.platform} ${process.version}`; - let sandboxEnv = 'no sandbox'; - if (process.env.SANDBOX && process.env.SANDBOX !== 'sandbox-exec') { - sandboxEnv = process.env.SANDBOX.replace(/^gemini-(?:code-)?/, ''); - } else if (process.env.SANDBOX === 'sandbox-exec') { - sandboxEnv = `sandbox-exec (${ - process.env.SEATBELT_PROFILE || 'unknown' - })`; - } - const modelVersion = config?.getModel() || 'Unknown'; - const cliVersion = await getCliVersion(); - const memoryUsage = formatMemoryUsage(process.memoryUsage().rss); - - const info = ` -* **CLI Version:** ${cliVersion} -* **Git Commit:** ${GIT_COMMIT_INFO} -* **Operating System:** ${osVersion} -* **Sandbox Environment:** ${sandboxEnv} -* **Model Version:** ${modelVersion} -* **Memory Usage:** ${memoryUsage} -`; - - let bugReportUrl = - 'https://github.com/QwenLM/Qwen-Code/issues/new?template=bug_report.yml&title={title}&info={info}'; - const bugCommand = config?.getBugCommand(); - if (bugCommand?.urlTemplate) { - bugReportUrl = bugCommand.urlTemplate; - } - bugReportUrl = bugReportUrl - .replace('{title}', encodeURIComponent(bugDescription)) - .replace('{info}', encodeURIComponent(info)); - - addMessage({ - type: MessageType.INFO, - content: `To submit your bug report, please open the following URL in your browser:\n${bugReportUrl}`, - timestamp: new Date(), - }); - (async () => { - try { - await open(bugReportUrl); - } catch (error) { - const errorMessage = - error instanceof Error ? error.message : String(error); - addMessage({ - type: MessageType.ERROR, - content: `Could not open URL in browser: ${errorMessage}`, - timestamp: new Date(), - }); - } - })(); - }, - }, - { - name: 'chat', - description: - 'Manage conversation history. Usage: /chat ', - action: async (_mainCommand, subCommand, args) => { - const tag = (args || '').trim(); - const logger = new Logger(config?.getSessionId() || ''); - await logger.initialize(); - const chat = await config?.getGeminiClient()?.getChat(); - if (!chat) { - addMessage({ - type: MessageType.ERROR, - content: 'No chat client available for conversation status.', - timestamp: new Date(), - }); - return; - } - if (!subCommand) { - addMessage({ - type: MessageType.ERROR, - content: 'Missing command\nUsage: /chat ', - timestamp: new Date(), - }); - return; - } - switch (subCommand) { - case 'save': { - if (!tag) { - addMessage({ - type: MessageType.ERROR, - content: 'Missing tag. Usage: /chat save ', - timestamp: new Date(), - }); - return; - } - const history = chat.getHistory(); - if (history.length > 0) { - await logger.saveCheckpoint(chat?.getHistory() || [], tag); - addMessage({ - type: MessageType.INFO, - content: `Conversation checkpoint saved with tag: ${tag}.`, - timestamp: new Date(), - }); - } else { - addMessage({ - type: MessageType.INFO, - content: 'No conversation found to save.', - timestamp: new Date(), - }); - } - return; - } - case 'resume': - case 'restore': - case 'load': { - if (!tag) { - addMessage({ - type: MessageType.ERROR, - content: 'Missing tag. Usage: /chat resume ', - timestamp: new Date(), - }); - return; - } - const conversation = await logger.loadCheckpoint(tag); - if (conversation.length === 0) { - addMessage({ - type: MessageType.INFO, - content: `No saved checkpoint found with tag: ${tag}.`, - timestamp: new Date(), - }); - return; - } - - clearItems(); - chat.clearHistory(); - const rolemap: { [key: string]: MessageType } = { - user: MessageType.USER, - model: MessageType.GEMINI, - }; - let hasSystemPrompt = false; - let i = 0; - for (const item of conversation) { - i += 1; - - // Add each item to history regardless of whether we display - // it. - chat.addHistory(item); - - const text = - item.parts - ?.filter((m) => !!m.text) - .map((m) => m.text) - .join('') || ''; - if (!text) { - // Parsing Part[] back to various non-text output not yet implemented. - continue; - } - if (i === 1 && text.match(/context for our chat/)) { - hasSystemPrompt = true; - } - if (i > 2 || !hasSystemPrompt) { - addItem( - { - type: - (item.role && rolemap[item.role]) || MessageType.GEMINI, - text, - } as HistoryItemWithoutId, - i, - ); - } - } - console.clear(); - refreshStatic(); - return; - } - case 'list': - addMessage({ - type: MessageType.INFO, - content: - 'list of saved conversations: ' + - (await savedChatTags()).join(', '), - timestamp: new Date(), - }); - return; - default: - addMessage({ - type: MessageType.ERROR, - content: `Unknown /chat command: ${subCommand}. Available: list, save, resume`, - timestamp: new Date(), - }); - return; - } - }, - completion: async () => - (await savedChatTags()).map((tag) => 'resume ' + tag), - }, - { - name: 'quit', - altName: 'exit', - description: 'exit the cli', - action: async (mainCommand, _subCommand, _args) => { - const now = new Date(); - const { sessionStartTime } = session.stats; - const wallDuration = now.getTime() - sessionStartTime.getTime(); - - setQuittingMessages([ - { - type: 'user', - text: `/${mainCommand}`, - id: now.getTime() - 1, - }, - { - type: 'quit', - duration: formatDuration(wallDuration), - id: now.getTime(), - }, - ]); - - setTimeout(() => { - process.exit(0); - }, 100); - }, - }, - { - name: 'compress', - altName: 'summarize', - description: 'Compresses the context by replacing it with a summary.', - action: async (_mainCommand, _subCommand, _args) => { - if (pendingCompressionItemRef.current !== null) { - addMessage({ - type: MessageType.ERROR, - content: - 'Already compressing, wait for previous request to complete', - timestamp: new Date(), - }); - return; - } - setPendingCompressionItem({ - type: MessageType.COMPRESSION, - compression: { - isPending: true, - originalTokenCount: null, - newTokenCount: null, - }, - }); - try { - const compressed = await config! - .getGeminiClient()! - // TODO: Set Prompt id for CompressChat from SlashCommandProcessor. - .tryCompressChat('Prompt Id not set', true); - if (compressed) { - addMessage({ - type: MessageType.COMPRESSION, - compression: { - isPending: false, - originalTokenCount: compressed.originalTokenCount, - newTokenCount: compressed.newTokenCount, - }, - timestamp: new Date(), - }); - } else { - addMessage({ - type: MessageType.ERROR, - content: 'Failed to compress chat history.', - timestamp: new Date(), - }); - } - } catch (e) { - addMessage({ - type: MessageType.ERROR, - content: `Failed to compress chat history: ${e instanceof Error ? e.message : String(e)}`, - timestamp: new Date(), - }); - } - setPendingCompressionItem(null); - }, - }, - ]; - - if (config?.getCheckpointingEnabled()) { - commands.push({ - name: 'restore', - description: - 'restore a tool call. This will reset the conversation and file history to the state it was in when the tool call was suggested', - completion: async () => { - const checkpointDir = config?.getProjectTempDir() - ? path.join(config.getProjectTempDir(), 'checkpoints') - : undefined; - if (!checkpointDir) { - return []; - } - try { - const files = await fs.readdir(checkpointDir); - return files - .filter((file) => file.endsWith('.json')) - .map((file) => file.replace('.json', '')); - } catch (_err) { - return []; - } - }, - action: async (_mainCommand, subCommand, _args) => { - const checkpointDir = config?.getProjectTempDir() - ? path.join(config.getProjectTempDir(), 'checkpoints') - : undefined; - - if (!checkpointDir) { - addMessage({ - type: MessageType.ERROR, - content: 'Could not determine the .gemini directory path.', - timestamp: new Date(), - }); - return; - } - - try { - // Ensure the directory exists before trying to read it. - await fs.mkdir(checkpointDir, { recursive: true }); - const files = await fs.readdir(checkpointDir); - const jsonFiles = files.filter((file) => file.endsWith('.json')); - - if (!subCommand) { - if (jsonFiles.length === 0) { - addMessage({ - type: MessageType.INFO, - content: 'No restorable tool calls found.', - timestamp: new Date(), - }); - return; - } - const truncatedFiles = jsonFiles.map((file) => { - const components = file.split('.'); - if (components.length <= 1) { - return file; - } - components.pop(); - return components.join('.'); - }); - const fileList = truncatedFiles.join('\n'); - addMessage({ - type: MessageType.INFO, - content: `Available tool calls to restore:\n\n${fileList}`, - timestamp: new Date(), - }); - return; - } - - const selectedFile = subCommand.endsWith('.json') - ? subCommand - : `${subCommand}.json`; - - if (!jsonFiles.includes(selectedFile)) { - addMessage({ - type: MessageType.ERROR, - content: `File not found: ${selectedFile}`, - timestamp: new Date(), - }); - return; - } - - const filePath = path.join(checkpointDir, selectedFile); - const data = await fs.readFile(filePath, 'utf-8'); - const toolCallData = JSON.parse(data); - - if (toolCallData.history) { - loadHistory(toolCallData.history); - } - - if (toolCallData.clientHistory) { - await config - ?.getGeminiClient() - ?.setHistory(toolCallData.clientHistory); - } - - if (toolCallData.commitHash) { - await gitService?.restoreProjectFromSnapshot( - toolCallData.commitHash, - ); - addMessage({ - type: MessageType.INFO, - content: `Restored project to the state before the tool call.`, - timestamp: new Date(), - }); - } - - return { - type: 'tool', - toolName: toolCallData.toolCall.name, - toolArgs: toolCallData.toolCall.args, - }; - } catch (error) { - addMessage({ - type: MessageType.ERROR, - content: `Could not read restorable tool calls. This is the error: ${error}`, - timestamp: new Date(), - }); - } - }, - }); - } - return commands; - }, [ - addMessage, - openEditorDialog, - toggleCorgiMode, - savedChatTags, - config, - showToolDescriptions, - session, - gitService, - loadHistory, - addItem, - setQuittingMessages, - pendingCompressionItemRef, - setPendingCompressionItem, - clearItems, - refreshStatic, - ]); - const handleSlashCommand = useCallback( async ( rawQuery: PartListUnion, + oneTimeShellAllowlist?: Set, ): Promise => { - if (typeof rawQuery !== 'string') { - return false; - } + setIsProcessing(true); + try { + if (typeof rawQuery !== 'string') { + return false; + } - const trimmed = rawQuery.trim(); - if (!trimmed.startsWith('/') && !trimmed.startsWith('?')) { - return false; - } + const trimmed = rawQuery.trim(); + if (!trimmed.startsWith('/') && !trimmed.startsWith('?')) { + return false; + } - const userMessageTimestamp = Date.now(); - if (trimmed !== '/quit' && trimmed !== '/exit') { + const userMessageTimestamp = Date.now(); addItem( { type: MessageType.USER, text: trimmed }, userMessageTimestamp, ); - } - const parts = trimmed.substring(1).trim().split(/\s+/); - const commandPath = parts.filter((p) => p); // The parts of the command, e.g., ['memory', 'add'] + const parts = trimmed.substring(1).trim().split(/\s+/); + const commandPath = parts.filter((p) => p); // The parts of the command, e.g., ['memory', 'add'] - // --- Start of New Tree Traversal Logic --- + let currentCommands = commands; + let commandToExecute: SlashCommand | undefined; + let pathIndex = 0; - let currentCommands = commands; - let commandToExecute: SlashCommand | undefined; - let pathIndex = 0; + for (const part of commandPath) { + // TODO: For better performance and architectural clarity, this two-pass + // search could be replaced. A more optimal approach would be to + // pre-compute a single lookup map in `CommandService.ts` that resolves + // all name and alias conflicts during the initial loading phase. The + // processor would then perform a single, fast lookup on that map. - for (const part of commandPath) { - const foundCommand = currentCommands.find( - (cmd) => cmd.name === part || cmd.altName === part, - ); + // First pass: check for an exact match on the primary command name. + let foundCommand = currentCommands.find((cmd) => cmd.name === part); - if (foundCommand) { - commandToExecute = foundCommand; - pathIndex++; - if (foundCommand.subCommands) { - currentCommands = foundCommand.subCommands; + // Second pass: if no primary name matches, check for an alias. + if (!foundCommand) { + foundCommand = currentCommands.find((cmd) => + cmd.altNames?.includes(part), + ); + } + + if (foundCommand) { + commandToExecute = foundCommand; + pathIndex++; + if (foundCommand.subCommands) { + currentCommands = foundCommand.subCommands; + } else { + break; + } } else { break; } - } else { - break; } - } - if (commandToExecute) { - const args = parts.slice(pathIndex).join(' '); + if (commandToExecute) { + const args = parts.slice(pathIndex).join(' '); - if (commandToExecute.action) { - const result = await commandToExecute.action(commandContext, args); + if (commandToExecute.action) { + const fullCommandContext: CommandContext = { + ...commandContext, + invocation: { + raw: trimmed, + name: commandToExecute.name, + args, + }, + }; - if (result) { - switch (result.type) { - case 'tool': - return { - type: 'schedule_tool', - toolName: result.toolName, - toolArgs: result.toolArgs, - }; - case 'message': - addItem( - { - type: - result.messageType === 'error' - ? MessageType.ERROR - : MessageType.INFO, - text: result.content, - }, - Date.now(), - ); - return { type: 'handled' }; - case 'dialog': - switch (result.dialog) { - case 'help': - setShowHelp(true); + // If a one-time list is provided for a "Proceed" action, temporarily + // augment the session allowlist for this single execution. + if (oneTimeShellAllowlist && oneTimeShellAllowlist.size > 0) { + fullCommandContext.session = { + ...fullCommandContext.session, + sessionShellAllowlist: new Set([ + ...fullCommandContext.session.sessionShellAllowlist, + ...oneTimeShellAllowlist, + ]), + }; + } + + const result = await commandToExecute.action( + fullCommandContext, + args, + ); + + if (result) { + switch (result.type) { + case 'tool': + return { + type: 'schedule_tool', + toolName: result.toolName, + toolArgs: result.toolArgs, + }; + case 'message': + addItem( + { + type: + result.messageType === 'error' + ? MessageType.ERROR + : MessageType.INFO, + text: result.content, + }, + Date.now(), + ); + return { type: 'handled' }; + case 'dialog': + switch (result.dialog) { + case 'help': + setShowHelp(true); + return { type: 'handled' }; + case 'auth': + openAuthDialog(); + return { type: 'handled' }; + case 'theme': + openThemeDialog(); + return { type: 'handled' }; + case 'editor': + openEditorDialog(); + return { type: 'handled' }; + case 'privacy': + openPrivacyNotice(); + return { type: 'handled' }; + default: { + const unhandled: never = result.dialog; + throw new Error( + `Unhandled slash command result: ${unhandled}`, + ); + } + } + case 'load_history': { + await config + ?.getGeminiClient() + ?.setHistory(result.clientHistory); + fullCommandContext.ui.clear(); + result.history.forEach((item, index) => { + fullCommandContext.ui.addItem(item, index); + }); + return { type: 'handled' }; + } + case 'quit': + setQuittingMessages(result.messages); + setTimeout(() => { + process.exit(0); + }, 100); + return { type: 'handled' }; + + case 'submit_prompt': + return { + type: 'submit_prompt', + content: result.content, + }; + case 'confirm_shell_commands': { + const { outcome, approvedCommands } = await new Promise<{ + outcome: ToolConfirmationOutcome; + approvedCommands?: string[]; + }>((resolve) => { + setShellConfirmationRequest({ + commands: result.commandsToConfirm, + onConfirm: ( + resolvedOutcome, + resolvedApprovedCommands, + ) => { + setShellConfirmationRequest(null); // Close the dialog + resolve({ + outcome: resolvedOutcome, + approvedCommands: resolvedApprovedCommands, + }); + }, + }); + }); + + if ( + outcome === ToolConfirmationOutcome.Cancel || + !approvedCommands || + approvedCommands.length === 0 + ) { return { type: 'handled' }; - case 'auth': - openAuthDialog(); - return { type: 'handled' }; - case 'theme': - openThemeDialog(); - return { type: 'handled' }; - case 'privacy': - openPrivacyNotice(); - return { type: 'handled' }; - default: { - const unhandled: never = result.dialog; - throw new Error( - `Unhandled slash command result: ${unhandled}`, + } + + if (outcome === ToolConfirmationOutcome.ProceedAlways) { + setSessionShellAllowlist( + (prev) => new Set([...prev, ...approvedCommands]), ); } + + return await handleSlashCommand( + result.originalInvocation.raw, + // Pass the approved commands as a one-time grant for this execution. + new Set(approvedCommands), + ); + } + default: { + const unhandled: never = result; + throw new Error( + `Unhandled slash command result: ${unhandled}`, + ); } - default: { - const unhandled: never = result; - throw new Error(`Unhandled slash command result: ${unhandled}`); } } - } - return { type: 'handled' }; - } else if (commandToExecute.subCommands) { - const helpText = `Command '/${commandToExecute.name}' requires a subcommand. Available:\n${commandToExecute.subCommands - .map((sc) => ` - ${sc.name}: ${sc.description || ''}`) - .join('\n')}`; - addMessage({ - type: MessageType.INFO, - content: helpText, - timestamp: new Date(), - }); - return { type: 'handled' }; + return { type: 'handled' }; + } else if (commandToExecute.subCommands) { + const helpText = `Command '/${commandToExecute.name}' requires a subcommand. Available:\n${commandToExecute.subCommands + .map((sc) => ` - ${sc.name}: ${sc.description || ''}`) + .join('\n')}`; + addMessage({ + type: MessageType.INFO, + content: helpText, + timestamp: new Date(), + }); + return { type: 'handled' }; + } } + + addMessage({ + type: MessageType.ERROR, + content: `Unknown command: ${trimmed}`, + timestamp: new Date(), + }); + return { type: 'handled' }; + } catch (e) { + addItem( + { + type: MessageType.ERROR, + text: e instanceof Error ? e.message : String(e), + }, + Date.now(), + ); + return { type: 'handled' }; + } finally { + setIsProcessing(false); } - - // --- End of New Tree Traversal Logic --- - - // --- Legacy Fallback Logic (for commands not yet migrated) --- - - const mainCommand = parts[0]; - const subCommand = parts[1]; - const legacyArgs = parts.slice(2).join(' '); - - for (const cmd of legacyCommands) { - if (mainCommand === cmd.name || mainCommand === cmd.altName) { - const actionResult = await cmd.action( - mainCommand, - subCommand, - legacyArgs, - ); - - if (actionResult?.type === 'tool') { - return { - type: 'schedule_tool', - toolName: actionResult.toolName, - toolArgs: actionResult.toolArgs, - }; - } - if (actionResult?.type === 'message') { - addItem( - { - type: - actionResult.messageType === 'error' - ? MessageType.ERROR - : MessageType.INFO, - text: actionResult.content, - }, - Date.now(), - ); - } - return { type: 'handled' }; - } - } - - addMessage({ - type: MessageType.ERROR, - content: `Unknown command: ${trimmed}`, - timestamp: new Date(), - }); - return { type: 'handled' }; }, [ + config, addItem, setShowHelp, openAuthDialog, commands, - legacyCommands, commandContext, addMessage, openThemeDialog, openPrivacyNotice, + openEditorDialog, + setQuittingMessages, + setShellConfirmationRequest, + setSessionShellAllowlist, + setIsProcessing, ], ); - const allCommands = useMemo(() => { - // Adapt legacy commands to the new SlashCommand interface - const adaptedLegacyCommands: SlashCommand[] = legacyCommands.map( - (legacyCmd) => ({ - name: legacyCmd.name, - altName: legacyCmd.altName, - description: legacyCmd.description, - action: async (_context: CommandContext, args: string) => { - const parts = args.split(/\s+/); - const subCommand = parts[0] || undefined; - const restOfArgs = parts.slice(1).join(' ') || undefined; - - return legacyCmd.action(legacyCmd.name, subCommand, restOfArgs); - }, - completion: legacyCmd.completion - ? async (_context: CommandContext, _partialArg: string) => - legacyCmd.completion!() - : undefined, - }), - ); - - const newCommandNames = new Set(commands.map((c) => c.name)); - const filteredAdaptedLegacy = adaptedLegacyCommands.filter( - (c) => !newCommandNames.has(c.name), - ); - - return [...commands, ...filteredAdaptedLegacy]; - }, [commands, legacyCommands]); - return { handleSlashCommand, - slashCommands: allCommands, + slashCommands: commands, pendingHistoryItems, commandContext, + shellConfirmationRequest, }; }; diff --git a/packages/cli/src/ui/hooks/useAuthCommand.ts b/packages/cli/src/ui/hooks/useAuthCommand.ts index 0f51d3e8c..f300ac369 100644 --- a/packages/cli/src/ui/hooks/useAuthCommand.ts +++ b/packages/cli/src/ui/hooks/useAuthCommand.ts @@ -55,8 +55,12 @@ export const useAuthCommand = ( async (authType: AuthType | undefined, scope: SettingScope) => { if (authType) { await clearCachedCredentialFile(); + settings.setValue(scope, 'selectedAuthType', authType); - if (authType === AuthType.LOGIN_WITH_GOOGLE && config.getNoBrowser()) { + if ( + authType === AuthType.LOGIN_WITH_GOOGLE && + config.isBrowserLaunchSuppressed() + ) { runExitCleanup(); console.log( ` diff --git a/packages/cli/src/ui/hooks/useCompletion.integration.test.ts b/packages/cli/src/ui/hooks/useCompletion.integration.test.ts deleted file mode 100644 index c1eb1fcca..000000000 --- a/packages/cli/src/ui/hooks/useCompletion.integration.test.ts +++ /dev/null @@ -1,755 +0,0 @@ -/** - * @license - * Copyright 2025 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest'; -import type { Mocked } from 'vitest'; -import { renderHook, act } from '@testing-library/react'; -import { useCompletion } from './useCompletion.js'; -import * as fs from 'fs/promises'; -import { glob } from 'glob'; -import { CommandContext, SlashCommand } from '../commands/types.js'; -import { Config, FileDiscoveryService } from '@qwen-code/qwen-code-core'; - -interface MockConfig { - getFileFilteringRespectGitIgnore: () => boolean; - getEnableRecursiveFileSearch: () => boolean; - getFileService: () => FileDiscoveryService | null; -} - -// Mock dependencies -vi.mock('fs/promises'); -vi.mock('@qwen-code/qwen-code-core', async () => { - const actual = await vi.importActual('@qwen-code/qwen-code-core'); - return { - ...actual, - FileDiscoveryService: vi.fn(), - isNodeError: vi.fn((error) => error.code === 'ENOENT'), - escapePath: vi.fn((path) => path), - unescapePath: vi.fn((path) => path), - getErrorMessage: vi.fn((error) => error.message), - }; -}); -vi.mock('glob'); - -describe('useCompletion git-aware filtering integration', () => { - let mockFileDiscoveryService: Mocked; - let mockConfig: MockConfig; - - const testCwd = '/test/project'; - const slashCommands = [ - { name: 'help', description: 'Show help', action: vi.fn() }, - { name: 'clear', description: 'Clear screen', action: vi.fn() }, - ]; - - // A minimal mock is sufficient for these tests. - const mockCommandContext = {} as CommandContext; - - const mockSlashCommands: SlashCommand[] = [ - { - name: 'help', - altName: '?', - description: 'Show help', - action: vi.fn(), - }, - { - name: 'clear', - description: 'Clear the screen', - action: vi.fn(), - }, - { - name: 'memory', - description: 'Manage memory', - // This command is a parent, no action. - subCommands: [ - { - name: 'show', - description: 'Show memory', - action: vi.fn(), - }, - { - name: 'add', - description: 'Add to memory', - action: vi.fn(), - }, - ], - }, - { - name: 'chat', - description: 'Manage chat history', - subCommands: [ - { - name: 'save', - description: 'Save chat', - action: vi.fn(), - }, - { - name: 'resume', - description: 'Resume a saved chat', - action: vi.fn(), - // This command provides its own argument completions - completion: vi - .fn() - .mockResolvedValue([ - 'my-chat-tag-1', - 'my-chat-tag-2', - 'my-channel', - ]), - }, - ], - }, - ]; - - beforeEach(() => { - mockFileDiscoveryService = { - shouldGitIgnoreFile: vi.fn(), - shouldGeminiIgnoreFile: vi.fn(), - shouldIgnoreFile: vi.fn(), - filterFiles: vi.fn(), - getGeminiIgnorePatterns: vi.fn(), - projectRoot: '', - gitIgnoreFilter: null, - geminiIgnoreFilter: null, - } as unknown as Mocked; - - mockConfig = { - getFileFilteringRespectGitIgnore: vi.fn(() => true), - getFileService: vi.fn().mockReturnValue(mockFileDiscoveryService), - getEnableRecursiveFileSearch: vi.fn(() => true), - }; - - vi.mocked(FileDiscoveryService).mockImplementation( - () => mockFileDiscoveryService, - ); - vi.clearAllMocks(); - }); - - afterEach(() => { - vi.restoreAllMocks(); - }); - - it('should filter git-ignored entries from @ completions', async () => { - const globResults = [`${testCwd}/data`, `${testCwd}/dist`]; - vi.mocked(glob).mockResolvedValue(globResults); - - // Mock git ignore service to ignore certain files - mockFileDiscoveryService.shouldGitIgnoreFile.mockImplementation( - (path: string) => path.includes('dist'), - ); - mockFileDiscoveryService.shouldIgnoreFile.mockImplementation( - (path: string, options) => { - if (options?.respectGitIgnore !== false) { - return mockFileDiscoveryService.shouldGitIgnoreFile(path); - } - return false; - }, - ); - - const { result } = renderHook(() => - useCompletion( - '@d', - testCwd, - true, - slashCommands, - mockCommandContext, - mockConfig as Config, - ), - ); - - // Wait for async operations to complete - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); // Account for debounce - }); - - expect(result.current.suggestions).toHaveLength(1); - expect(result.current.suggestions).toEqual( - expect.arrayContaining([{ label: 'data', value: 'data' }]), - ); - expect(result.current.showSuggestions).toBe(true); - }); - - it('should filter git-ignored directories from @ completions', async () => { - // Mock fs.readdir to return both regular and git-ignored directories - vi.mocked(fs.readdir).mockResolvedValue([ - { name: 'src', isDirectory: () => true }, - { name: 'node_modules', isDirectory: () => true }, - { name: 'dist', isDirectory: () => true }, - { name: 'README.md', isDirectory: () => false }, - { name: '.env', isDirectory: () => false }, - ] as unknown as Awaited>); - - // Mock git ignore service to ignore certain files - mockFileDiscoveryService.shouldGitIgnoreFile.mockImplementation( - (path: string) => - path.includes('node_modules') || - path.includes('dist') || - path.includes('.env'), - ); - mockFileDiscoveryService.shouldIgnoreFile.mockImplementation( - (path: string, options) => { - if (options?.respectGitIgnore !== false) { - return mockFileDiscoveryService.shouldGitIgnoreFile(path); - } - return false; - }, - ); - - const { result } = renderHook(() => - useCompletion( - '@', - testCwd, - true, - slashCommands, - mockCommandContext, - mockConfig as Config, - ), - ); - - // Wait for async operations to complete - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); // Account for debounce - }); - - expect(result.current.suggestions).toHaveLength(2); - expect(result.current.suggestions).toEqual( - expect.arrayContaining([ - { label: 'src/', value: 'src/' }, - { label: 'README.md', value: 'README.md' }, - ]), - ); - expect(result.current.showSuggestions).toBe(true); - }); - - it('should handle recursive search with git-aware filtering', async () => { - // Mock the recursive file search scenario - vi.mocked(fs.readdir).mockImplementation( - async (dirPath: string | Buffer | URL) => { - if (dirPath === testCwd) { - return [ - { name: 'src', isDirectory: () => true }, - { name: 'node_modules', isDirectory: () => true }, - { name: 'temp', isDirectory: () => true }, - ] as Array<{ name: string; isDirectory: () => boolean }>; - } - if (dirPath.endsWith('/src')) { - return [ - { name: 'index.ts', isDirectory: () => false }, - { name: 'components', isDirectory: () => true }, - ] as Array<{ name: string; isDirectory: () => boolean }>; - } - if (dirPath.endsWith('/temp')) { - return [{ name: 'temp.log', isDirectory: () => false }] as Array<{ - name: string; - isDirectory: () => boolean; - }>; - } - return [] as Array<{ name: string; isDirectory: () => boolean }>; - }, - ); - - // Mock git ignore service - mockFileDiscoveryService.shouldGitIgnoreFile.mockImplementation( - (path: string) => path.includes('node_modules') || path.includes('temp'), - ); - mockFileDiscoveryService.shouldIgnoreFile.mockImplementation( - (path: string, options) => { - if (options?.respectGitIgnore !== false) { - return mockFileDiscoveryService.shouldGitIgnoreFile(path); - } - return false; - }, - ); - - const { result } = renderHook(() => - useCompletion( - '@t', - testCwd, - true, - slashCommands, - mockCommandContext, - mockConfig as Config, - ), - ); - - // Wait for async operations to complete - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); - }); - - // Should not include anything from node_modules or dist - const suggestionLabels = result.current.suggestions.map((s) => s.label); - expect(suggestionLabels).not.toContain('temp/'); - expect(suggestionLabels.some((l) => l.includes('node_modules'))).toBe( - false, - ); - }); - - it('should not perform recursive search when disabled in config', async () => { - const globResults = [`${testCwd}/data`, `${testCwd}/dist`]; - vi.mocked(glob).mockResolvedValue(globResults); - - // Disable recursive search in the mock config - const mockConfigNoRecursive = { - ...mockConfig, - getEnableRecursiveFileSearch: vi.fn(() => false), - } as unknown as Config; - - vi.mocked(fs.readdir).mockResolvedValue([ - { name: 'data', isDirectory: () => true }, - { name: 'dist', isDirectory: () => true }, - ] as unknown as Awaited>); - - renderHook(() => - useCompletion( - '@d', - testCwd, - true, - slashCommands, - mockCommandContext, - mockConfigNoRecursive, - ), - ); - - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); - }); - - // `glob` should not be called because recursive search is disabled - expect(glob).not.toHaveBeenCalled(); - // `fs.readdir` should be called for the top-level directory instead - expect(fs.readdir).toHaveBeenCalledWith(testCwd, { withFileTypes: true }); - }); - - it('should work without config (fallback behavior)', async () => { - vi.mocked(fs.readdir).mockResolvedValue([ - { name: 'src', isDirectory: () => true }, - { name: 'node_modules', isDirectory: () => true }, - { name: 'README.md', isDirectory: () => false }, - ] as unknown as Awaited>); - - const { result } = renderHook(() => - useCompletion( - '@', - testCwd, - true, - slashCommands, - mockCommandContext, - undefined, - ), - ); - - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); - }); - - // Without config, should include all files - expect(result.current.suggestions).toHaveLength(3); - expect(result.current.suggestions).toEqual( - expect.arrayContaining([ - { label: 'src/', value: 'src/' }, - { label: 'node_modules/', value: 'node_modules/' }, - { label: 'README.md', value: 'README.md' }, - ]), - ); - }); - - it('should handle git discovery service initialization failure gracefully', async () => { - vi.mocked(fs.readdir).mockResolvedValue([ - { name: 'src', isDirectory: () => true }, - { name: 'README.md', isDirectory: () => false }, - ] as unknown as Awaited>); - - const consoleSpy = vi.spyOn(console, 'warn').mockImplementation(() => {}); - - const { result } = renderHook(() => - useCompletion( - '@', - testCwd, - true, - slashCommands, - mockCommandContext, - mockConfig as Config, - ), - ); - - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); - }); - - // Since we use centralized service, initialization errors are handled at config level - // This test should verify graceful fallback behavior - expect(result.current.suggestions.length).toBeGreaterThanOrEqual(0); - // Should still show completions even if git discovery fails - expect(result.current.suggestions.length).toBeGreaterThan(0); - - consoleSpy.mockRestore(); - }); - - it('should handle directory-specific completions with git filtering', async () => { - vi.mocked(fs.readdir).mockResolvedValue([ - { name: 'component.tsx', isDirectory: () => false }, - { name: 'temp.log', isDirectory: () => false }, - { name: 'index.ts', isDirectory: () => false }, - ] as unknown as Awaited>); - - mockFileDiscoveryService.shouldGitIgnoreFile.mockImplementation( - (path: string) => path.includes('.log'), - ); - mockFileDiscoveryService.shouldIgnoreFile.mockImplementation( - (path: string, options) => { - if (options?.respectGitIgnore !== false) { - return mockFileDiscoveryService.shouldGitIgnoreFile(path); - } - return false; - }, - ); - - const { result } = renderHook(() => - useCompletion( - '@src/comp', - testCwd, - true, - slashCommands, - mockCommandContext, - mockConfig as Config, - ), - ); - - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); - }); - - // Should filter out .log files but include matching .tsx files - expect(result.current.suggestions).toEqual([ - { label: 'component.tsx', value: 'component.tsx' }, - ]); - }); - - it('should use glob for top-level @ completions when available', async () => { - const globResults = [`${testCwd}/src/index.ts`, `${testCwd}/README.md`]; - vi.mocked(glob).mockResolvedValue(globResults); - - const { result } = renderHook(() => - useCompletion( - '@s', - testCwd, - true, - slashCommands, - mockCommandContext, - mockConfig as Config, - ), - ); - - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); - }); - - expect(glob).toHaveBeenCalledWith('**/s*', { - cwd: testCwd, - dot: false, - nocase: true, - }); - expect(fs.readdir).not.toHaveBeenCalled(); // Ensure glob is used instead of readdir - expect(result.current.suggestions).toEqual([ - { label: 'README.md', value: 'README.md' }, - { label: 'src/index.ts', value: 'src/index.ts' }, - ]); - }); - - it('should include dotfiles in glob search when input starts with a dot', async () => { - const globResults = [ - `${testCwd}/.env`, - `${testCwd}/.gitignore`, - `${testCwd}/src/index.ts`, - ]; - vi.mocked(glob).mockResolvedValue(globResults); - - const { result } = renderHook(() => - useCompletion( - '@.', - testCwd, - true, - slashCommands, - mockCommandContext, - mockConfig as Config, - ), - ); - - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); - }); - - expect(glob).toHaveBeenCalledWith('**/.*', { - cwd: testCwd, - dot: true, - nocase: true, - }); - expect(fs.readdir).not.toHaveBeenCalled(); - expect(result.current.suggestions).toEqual([ - { label: '.env', value: '.env' }, - { label: '.gitignore', value: '.gitignore' }, - { label: 'src/index.ts', value: 'src/index.ts' }, - ]); - }); - - it('should suggest top-level command names based on partial input', async () => { - const { result } = renderHook(() => - useCompletion( - '/mem', - '/test/cwd', - true, - mockSlashCommands, - mockCommandContext, - ), - ); - - expect(result.current.suggestions).toEqual([ - { label: 'memory', value: 'memory', description: 'Manage memory' }, - ]); - expect(result.current.showSuggestions).toBe(true); - }); - - it('should suggest commands based on altName', async () => { - const { result } = renderHook(() => - useCompletion( - '/?', - '/test/cwd', - true, - mockSlashCommands, - mockCommandContext, - ), - ); - - expect(result.current.suggestions).toEqual([ - { label: 'help', value: 'help', description: 'Show help' }, - ]); - }); - - it('should suggest sub-command names for a parent command', async () => { - const { result } = renderHook(() => - useCompletion( - '/memory a', - '/test/cwd', - true, - mockSlashCommands, - mockCommandContext, - ), - ); - - expect(result.current.suggestions).toEqual([ - { label: 'add', value: 'add', description: 'Add to memory' }, - ]); - }); - - it('should suggest all sub-commands when the query ends with the parent command and a space', async () => { - const { result } = renderHook(() => - useCompletion( - '/memory ', - '/test/cwd', - true, - mockSlashCommands, - mockCommandContext, - ), - ); - - expect(result.current.suggestions).toHaveLength(2); - expect(result.current.suggestions).toEqual( - expect.arrayContaining([ - { label: 'show', value: 'show', description: 'Show memory' }, - { label: 'add', value: 'add', description: 'Add to memory' }, - ]), - ); - }); - - it('should call the command.completion function for argument suggestions', async () => { - const availableTags = ['my-chat-tag-1', 'my-chat-tag-2', 'another-channel']; - const mockCompletionFn = vi - .fn() - .mockImplementation(async (context: CommandContext, partialArg: string) => - availableTags.filter((tag) => tag.startsWith(partialArg)), - ); - - const mockCommandsWithFiltering = JSON.parse( - JSON.stringify(mockSlashCommands), - ) as SlashCommand[]; - - const chatCmd = mockCommandsWithFiltering.find( - (cmd) => cmd.name === 'chat', - ); - if (!chatCmd || !chatCmd.subCommands) { - throw new Error( - "Test setup error: Could not find the 'chat' command with subCommands in the mock data.", - ); - } - - const resumeCmd = chatCmd.subCommands.find((sc) => sc.name === 'resume'); - if (!resumeCmd) { - throw new Error( - "Test setup error: Could not find the 'resume' sub-command in the mock data.", - ); - } - - resumeCmd.completion = mockCompletionFn; - - const { result } = renderHook(() => - useCompletion( - '/chat resume my-ch', - '/test/cwd', - true, - mockCommandsWithFiltering, - mockCommandContext, - ), - ); - - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); - }); - - expect(mockCompletionFn).toHaveBeenCalledWith(mockCommandContext, 'my-ch'); - - expect(result.current.suggestions).toEqual([ - { label: 'my-chat-tag-1', value: 'my-chat-tag-1' }, - { label: 'my-chat-tag-2', value: 'my-chat-tag-2' }, - ]); - }); - - it('should not provide suggestions for a fully typed command that has no sub-commands or argument completion', async () => { - const { result } = renderHook(() => - useCompletion( - '/clear ', - '/test/cwd', - true, - mockSlashCommands, - mockCommandContext, - ), - ); - - expect(result.current.suggestions).toHaveLength(0); - expect(result.current.showSuggestions).toBe(false); - }); - - it('should not provide suggestions for an unknown command', async () => { - const { result } = renderHook(() => - useCompletion( - '/unknown-command', - '/test/cwd', - true, - mockSlashCommands, - mockCommandContext, - ), - ); - - expect(result.current.suggestions).toHaveLength(0); - expect(result.current.showSuggestions).toBe(false); - }); - - it('should suggest sub-commands for a fully typed parent command without a trailing space', async () => { - const { result } = renderHook(() => - useCompletion( - '/memory', // Note: no trailing space - '/test/cwd', - true, - mockSlashCommands, - mockCommandContext, - ), - ); - - // Assert that suggestions for sub-commands are shown immediately - expect(result.current.suggestions).toHaveLength(2); - expect(result.current.suggestions).toEqual( - expect.arrayContaining([ - { label: 'show', value: 'show', description: 'Show memory' }, - { label: 'add', value: 'add', description: 'Add to memory' }, - ]), - ); - expect(result.current.showSuggestions).toBe(true); - }); - - it('should NOT provide suggestions for a perfectly typed command that is a leaf node', async () => { - const { result } = renderHook(() => - useCompletion( - '/clear', // No trailing space - '/test/cwd', - true, - mockSlashCommands, - mockCommandContext, - ), - ); - - expect(result.current.suggestions).toHaveLength(0); - expect(result.current.showSuggestions).toBe(false); - }); - - it('should call command.completion with an empty string when args start with a space', async () => { - const mockCompletionFn = vi - .fn() - .mockResolvedValue(['my-chat-tag-1', 'my-chat-tag-2', 'my-channel']); - - const isolatedMockCommands = JSON.parse( - JSON.stringify(mockSlashCommands), - ) as SlashCommand[]; - - const resumeCommand = isolatedMockCommands - .find((cmd) => cmd.name === 'chat') - ?.subCommands?.find((cmd) => cmd.name === 'resume'); - - if (!resumeCommand) { - throw new Error( - 'Test setup failed: could not find resume command in mock', - ); - } - resumeCommand.completion = mockCompletionFn; - - const { result } = renderHook(() => - useCompletion( - '/chat resume ', // Trailing space, no partial argument - '/test/cwd', - true, - isolatedMockCommands, - mockCommandContext, - ), - ); - - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); - }); - - expect(mockCompletionFn).toHaveBeenCalledWith(mockCommandContext, ''); - expect(result.current.suggestions).toHaveLength(3); - expect(result.current.showSuggestions).toBe(true); - }); - - it('should suggest all top-level commands for the root slash', async () => { - const { result } = renderHook(() => - useCompletion( - '/', - '/test/cwd', - true, - mockSlashCommands, - mockCommandContext, - ), - ); - - expect(result.current.suggestions.length).toBe(mockSlashCommands.length); - expect(result.current.suggestions.map((s) => s.label)).toEqual( - expect.arrayContaining(['help', 'clear', 'memory', 'chat']), - ); - }); - - it('should provide no suggestions for an invalid sub-command', async () => { - const { result } = renderHook(() => - useCompletion( - '/memory dothisnow', - '/test/cwd', - true, - mockSlashCommands, - mockCommandContext, - ), - ); - - expect(result.current.suggestions).toHaveLength(0); - expect(result.current.showSuggestions).toBe(false); - }); -}); diff --git a/packages/cli/src/ui/hooks/useCompletion.test.ts b/packages/cli/src/ui/hooks/useCompletion.test.ts index b2790808a..d12f185be 100644 --- a/packages/cli/src/ui/hooks/useCompletion.test.ts +++ b/packages/cli/src/ui/hooks/useCompletion.test.ts @@ -4,941 +4,1244 @@ * SPDX-License-Identifier: Apache-2.0 */ +/** @vitest-environment jsdom */ + import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest'; -import type { Mocked } from 'vitest'; import { renderHook, act } from '@testing-library/react'; import { useCompletion } from './useCompletion.js'; import * as fs from 'fs/promises'; -import { glob } from 'glob'; +import * as path from 'path'; +import * as os from 'os'; import { CommandContext, SlashCommand } from '../commands/types.js'; -import { Config, FileDiscoveryService } from '@google/gemini-cli-core'; - -// Mock dependencies -vi.mock('fs/promises'); -vi.mock('glob'); -vi.mock('@google/gemini-cli-core', async () => { - const actual = await vi.importActual('@google/gemini-cli-core'); - return { - ...actual, - FileDiscoveryService: vi.fn(), - isNodeError: vi.fn((error) => error.code === 'ENOENT'), - escapePath: vi.fn((path) => path), - unescapePath: vi.fn((path) => path), - getErrorMessage: vi.fn((error) => error.message), - }; -}); -vi.mock('glob'); +import { Config, FileDiscoveryService } from '@qwen-code/qwen-code-core'; +import { useTextBuffer, TextBuffer } from '../components/shared/text-buffer.js'; describe('useCompletion', () => { - let mockFileDiscoveryService: Mocked; - let mockConfig: Mocked; - let mockCommandContext: CommandContext; - let mockSlashCommands: SlashCommand[]; + let testRootDir: string; + let mockConfig: Config; - const testCwd = '/test/project'; + // A minimal mock is sufficient for these tests. + const mockCommandContext = {} as CommandContext; - beforeEach(() => { - mockFileDiscoveryService = { - shouldGitIgnoreFile: vi.fn(), - shouldGeminiIgnoreFile: vi.fn(), - shouldIgnoreFile: vi.fn(), - filterFiles: vi.fn(), - getGeminiIgnorePatterns: vi.fn(), - projectRoot: '', - gitIgnoreFilter: null, - geminiIgnoreFilter: null, - } as unknown as Mocked; + async function createEmptyDir(...pathSegments: string[]) { + const fullPath = path.join(testRootDir, ...pathSegments); + await fs.mkdir(fullPath, { recursive: true }); + return fullPath; + } + async function createTestFile(content: string, ...pathSegments: string[]) { + const fullPath = path.join(testRootDir, ...pathSegments); + await fs.mkdir(path.dirname(fullPath), { recursive: true }); + await fs.writeFile(fullPath, content); + return fullPath; + } + + // Helper to create real TextBuffer objects within renderHook + function useTextBufferForTest(text: string) { + return useTextBuffer({ + initialText: text, + initialCursorOffset: text.length, + viewport: { width: 80, height: 20 }, + isValidPath: () => false, + onChange: () => {}, + }); + } + + beforeEach(async () => { + testRootDir = await fs.mkdtemp( + path.join(os.tmpdir(), 'completion-unit-test-'), + ); mockConfig = { - getFileFilteringRespectGitIgnore: vi.fn(() => true), - getFileService: vi.fn().mockReturnValue(mockFileDiscoveryService), + getTargetDir: () => testRootDir, + getProjectRoot: () => testRootDir, + getFileFilteringOptions: vi.fn(() => ({ + respectGitIgnore: true, + respectGeminiIgnore: true, + })), getEnableRecursiveFileSearch: vi.fn(() => true), - } as unknown as Mocked; - - mockCommandContext = {} as CommandContext; - - mockSlashCommands = [ - { - name: 'help', - altName: '?', - description: 'Show help', - action: vi.fn(), - }, - { - name: 'clear', - description: 'Clear the screen', - action: vi.fn(), - }, - { - name: 'memory', - description: 'Manage memory', - subCommands: [ - { - name: 'show', - description: 'Show memory', - action: vi.fn(), - }, - { - name: 'add', - description: 'Add to memory', - action: vi.fn(), - }, - ], - }, - { - name: 'chat', - description: 'Manage chat history', - subCommands: [ - { - name: 'save', - description: 'Save chat', - action: vi.fn(), - }, - { - name: 'resume', - description: 'Resume a saved chat', - action: vi.fn(), - completion: vi.fn().mockResolvedValue(['chat1', 'chat2']), - }, - ], - }, - ]; + getFileService: vi.fn(() => new FileDiscoveryService(testRootDir)), + } as unknown as Config; vi.clearAllMocks(); }); - afterEach(() => { + afterEach(async () => { vi.restoreAllMocks(); + await fs.rm(testRootDir, { recursive: true, force: true }); }); - describe('Hook initialization and state', () => { - it('should initialize with default state', () => { - const { result } = renderHook(() => - useCompletion( - '', - testCwd, - false, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - expect(result.current.suggestions).toEqual([]); - expect(result.current.activeSuggestionIndex).toBe(-1); - expect(result.current.visibleStartIndex).toBe(0); - expect(result.current.showSuggestions).toBe(false); - expect(result.current.isLoadingSuggestions).toBe(false); - }); - - it('should reset state when isActive becomes false', () => { - const { result, rerender } = renderHook( - ({ isActive }) => + describe('Core Hook Behavior', () => { + describe('State Management', () => { + it('should initialize with default state', () => { + const slashCommands = [ + { name: 'dummy', description: 'dummy' }, + ] as unknown as SlashCommand[]; + const { result } = renderHook(() => useCompletion( - '/help', - testCwd, - isActive, - mockSlashCommands, + useTextBufferForTest(''), + testRootDir, + slashCommands, mockCommandContext, mockConfig, ), - { initialProps: { isActive: true } }, - ); + ); - rerender({ isActive: false }); - - expect(result.current.suggestions).toEqual([]); - expect(result.current.activeSuggestionIndex).toBe(-1); - expect(result.current.visibleStartIndex).toBe(0); - expect(result.current.showSuggestions).toBe(false); - expect(result.current.isLoadingSuggestions).toBe(false); - }); - - it('should provide required functions', () => { - const { result } = renderHook(() => - useCompletion( - '', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - expect(typeof result.current.setActiveSuggestionIndex).toBe('function'); - expect(typeof result.current.setShowSuggestions).toBe('function'); - expect(typeof result.current.resetCompletionState).toBe('function'); - expect(typeof result.current.navigateUp).toBe('function'); - expect(typeof result.current.navigateDown).toBe('function'); - }); - }); - - describe('resetCompletionState', () => { - it('should reset all state to default values', () => { - const { result } = renderHook(() => - useCompletion( - '/help', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - act(() => { - result.current.setActiveSuggestionIndex(5); - result.current.setShowSuggestions(true); + expect(result.current.suggestions).toEqual([]); + expect(result.current.activeSuggestionIndex).toBe(-1); + expect(result.current.visibleStartIndex).toBe(0); + expect(result.current.showSuggestions).toBe(false); + expect(result.current.isLoadingSuggestions).toBe(false); }); - act(() => { - result.current.resetCompletionState(); + it('should reset state when isActive becomes false', () => { + const slashCommands = [ + { + name: 'help', + altNames: ['?'], + description: 'Show help', + action: vi.fn(), + }, + ] as unknown as SlashCommand[]; + + const { result, rerender } = renderHook( + ({ text }) => { + const textBuffer = useTextBufferForTest(text); + return useCompletion( + textBuffer, + testRootDir, + slashCommands, + mockCommandContext, + mockConfig, + ); + }, + { initialProps: { text: '/help' } }, + ); + + // Inactive because of the leading space + rerender({ text: ' /help' }); + + expect(result.current.suggestions).toEqual([]); + expect(result.current.activeSuggestionIndex).toBe(-1); + expect(result.current.visibleStartIndex).toBe(0); + expect(result.current.showSuggestions).toBe(false); + expect(result.current.isLoadingSuggestions).toBe(false); }); - expect(result.current.suggestions).toEqual([]); - expect(result.current.activeSuggestionIndex).toBe(-1); - expect(result.current.visibleStartIndex).toBe(0); - expect(result.current.showSuggestions).toBe(false); - expect(result.current.isLoadingSuggestions).toBe(false); - }); - }); - - describe('Navigation functions', () => { - it('should handle navigateUp with no suggestions', () => { - const { result } = renderHook(() => - useCompletion( - '', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - act(() => { - result.current.navigateUp(); - }); - - expect(result.current.activeSuggestionIndex).toBe(-1); - }); - - it('should handle navigateDown with no suggestions', () => { - const { result } = renderHook(() => - useCompletion( - '', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - act(() => { - result.current.navigateDown(); - }); - - expect(result.current.activeSuggestionIndex).toBe(-1); - }); - - it('should navigate up through suggestions with wrap-around', () => { - const { result } = renderHook(() => - useCompletion( - '/h', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - expect(result.current.suggestions.length).toBe(1); - expect(result.current.activeSuggestionIndex).toBe(0); - - act(() => { - result.current.navigateUp(); - }); - - expect(result.current.activeSuggestionIndex).toBe(0); - }); - - it('should navigate down through suggestions with wrap-around', () => { - const { result } = renderHook(() => - useCompletion( - '/h', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - expect(result.current.suggestions.length).toBe(1); - expect(result.current.activeSuggestionIndex).toBe(0); - - act(() => { - result.current.navigateDown(); - }); - - expect(result.current.activeSuggestionIndex).toBe(0); - }); - - it('should handle navigation with multiple suggestions', () => { - const { result } = renderHook(() => - useCompletion( - '/', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - expect(result.current.suggestions.length).toBe(4); - expect(result.current.activeSuggestionIndex).toBe(0); - - act(() => { - result.current.navigateDown(); - }); - expect(result.current.activeSuggestionIndex).toBe(1); - - act(() => { - result.current.navigateDown(); - }); - expect(result.current.activeSuggestionIndex).toBe(2); - - act(() => { - result.current.navigateUp(); - }); - expect(result.current.activeSuggestionIndex).toBe(1); - - act(() => { - result.current.navigateUp(); - }); - expect(result.current.activeSuggestionIndex).toBe(0); - - act(() => { - result.current.navigateUp(); - }); - expect(result.current.activeSuggestionIndex).toBe(3); - }); - - it('should handle navigation with large suggestion lists and scrolling', () => { - const largeMockCommands = Array.from({ length: 15 }, (_, i) => ({ - name: `command${i}`, - description: `Command ${i}`, - action: vi.fn(), - })); - - const { result } = renderHook(() => - useCompletion( - '/command', - testCwd, - true, - largeMockCommands, - mockCommandContext, - mockConfig, - ), - ); - - expect(result.current.suggestions.length).toBe(15); - expect(result.current.activeSuggestionIndex).toBe(0); - expect(result.current.visibleStartIndex).toBe(0); - - act(() => { - result.current.navigateUp(); - }); - - expect(result.current.activeSuggestionIndex).toBe(14); - expect(result.current.visibleStartIndex).toBe(Math.max(0, 15 - 8)); - }); - }); - - describe('Slash command completion', () => { - it('should show all commands for root slash', () => { - const { result } = renderHook(() => - useCompletion( - '/', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - expect(result.current.suggestions).toHaveLength(4); - expect(result.current.suggestions.map((s) => s.label)).toEqual( - expect.arrayContaining(['help', 'clear', 'memory', 'chat']), - ); - expect(result.current.showSuggestions).toBe(true); - expect(result.current.activeSuggestionIndex).toBe(0); - }); - - it('should filter commands by prefix', () => { - const { result } = renderHook(() => - useCompletion( - '/h', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - expect(result.current.suggestions).toHaveLength(1); - expect(result.current.suggestions[0].label).toBe('help'); - expect(result.current.suggestions[0].description).toBe('Show help'); - }); - - it('should suggest commands by altName', () => { - const { result } = renderHook(() => - useCompletion( - '/?', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - expect(result.current.suggestions).toHaveLength(1); - expect(result.current.suggestions[0].label).toBe('help'); - }); - - it('should not show suggestions for exact leaf command match', () => { - const { result } = renderHook(() => - useCompletion( - '/clear', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - expect(result.current.suggestions).toHaveLength(0); - expect(result.current.showSuggestions).toBe(false); - }); - - it('should show sub-commands for parent commands', () => { - const { result } = renderHook(() => - useCompletion( - '/memory', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - expect(result.current.suggestions).toHaveLength(2); - expect(result.current.suggestions.map((s) => s.label)).toEqual( - expect.arrayContaining(['show', 'add']), - ); - }); - - it('should show all sub-commands after parent command with space', () => { - const { result } = renderHook(() => - useCompletion( - '/memory ', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - expect(result.current.suggestions).toHaveLength(2); - expect(result.current.suggestions.map((s) => s.label)).toEqual( - expect.arrayContaining(['show', 'add']), - ); - }); - - it('should filter sub-commands by prefix', () => { - const { result } = renderHook(() => - useCompletion( - '/memory a', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - expect(result.current.suggestions).toHaveLength(1); - expect(result.current.suggestions[0].label).toBe('add'); - }); - - it('should handle unknown command gracefully', () => { - const { result } = renderHook(() => - useCompletion( - '/unknown', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - expect(result.current.suggestions).toHaveLength(0); - expect(result.current.showSuggestions).toBe(false); - }); - }); - - describe('Command argument completion', () => { - it('should call completion function for command arguments', async () => { - const completionFn = vi.fn().mockResolvedValue(['arg1', 'arg2']); - const commandsWithCompletion = [...mockSlashCommands]; - const chatCommand = commandsWithCompletion.find( - (cmd) => cmd.name === 'chat', - ); - const resumeCommand = chatCommand?.subCommands?.find( - (cmd) => cmd.name === 'resume', - ); - if (resumeCommand) { - resumeCommand.completion = completionFn; - } - - const { result } = renderHook(() => - useCompletion( - '/chat resume ', - testCwd, - true, - commandsWithCompletion, - mockCommandContext, - mockConfig, - ), - ); - - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); - }); - - expect(completionFn).toHaveBeenCalledWith(mockCommandContext, ''); - expect(result.current.suggestions).toHaveLength(2); - expect(result.current.suggestions.map((s) => s.label)).toEqual([ - 'arg1', - 'arg2', - ]); - }); - - it('should call completion function with partial argument', async () => { - const completionFn = vi.fn().mockResolvedValue(['arg1', 'arg2']); - const commandsWithCompletion = [...mockSlashCommands]; - const chatCommand = commandsWithCompletion.find( - (cmd) => cmd.name === 'chat', - ); - const resumeCommand = chatCommand?.subCommands?.find( - (cmd) => cmd.name === 'resume', - ); - if (resumeCommand) { - resumeCommand.completion = completionFn; - } - - renderHook(() => - useCompletion( - '/chat resume ar', - testCwd, - true, - commandsWithCompletion, - mockCommandContext, - mockConfig, - ), - ); - - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); - }); - - expect(completionFn).toHaveBeenCalledWith(mockCommandContext, 'ar'); - }); - - it('should handle completion function that returns null', async () => { - const completionFn = vi.fn().mockResolvedValue(null); - const commandsWithCompletion = [...mockSlashCommands]; - const chatCommand = commandsWithCompletion.find( - (cmd) => cmd.name === 'chat', - ); - const resumeCommand = chatCommand?.subCommands?.find( - (cmd) => cmd.name === 'resume', - ); - if (resumeCommand) { - resumeCommand.completion = completionFn; - } - - const { result } = renderHook(() => - useCompletion( - '/chat resume ', - testCwd, - true, - commandsWithCompletion, - mockCommandContext, - mockConfig, - ), - ); - - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); - }); - - expect(result.current.suggestions).toHaveLength(0); - expect(result.current.showSuggestions).toBe(false); - }); - }); - - describe('File path completion (@-syntax)', () => { - beforeEach(() => { - vi.mocked(fs.readdir).mockResolvedValue([ - { name: 'file1.txt', isDirectory: () => false }, - { name: 'file2.js', isDirectory: () => false }, - { name: 'folder1', isDirectory: () => true }, - { name: '.hidden', isDirectory: () => false }, - ] as unknown as Awaited>); - }); - - it('should show file completions for @ prefix', async () => { - const { result } = renderHook(() => - useCompletion( - '@', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); - }); - - expect(result.current.suggestions).toHaveLength(3); - expect(result.current.suggestions.map((s) => s.label)).toEqual( - expect.arrayContaining(['file1.txt', 'file2.js', 'folder1/']), - ); - }); - - it('should filter files by prefix', async () => { - // Mock for recursive search since enableRecursiveFileSearch is true - vi.mocked(glob).mockResolvedValue([ - `${testCwd}/file1.txt`, - `${testCwd}/file2.js`, - ]); - - const { result } = renderHook(() => - useCompletion( - '@file', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); - }); - - expect(result.current.suggestions).toHaveLength(2); - expect(result.current.suggestions.map((s) => s.label)).toEqual( - expect.arrayContaining(['file1.txt', 'file2.js']), - ); - }); - - it('should include hidden files when prefix starts with dot', async () => { - // Mock for recursive search since enableRecursiveFileSearch is true - vi.mocked(glob).mockResolvedValue([`${testCwd}/.hidden`]); - - const { result } = renderHook(() => - useCompletion( - '@.', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); - }); - - expect(result.current.suggestions).toHaveLength(1); - expect(result.current.suggestions[0].label).toBe('.hidden'); - }); - - it('should handle ENOENT error gracefully', async () => { - const enoentError = new Error('No such file or directory'); - (enoentError as Error & { code: string }).code = 'ENOENT'; - vi.mocked(fs.readdir).mockRejectedValue(enoentError); - - const { result } = renderHook(() => - useCompletion( - '@nonexistent', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); - }); - - expect(result.current.suggestions).toHaveLength(0); - expect(result.current.showSuggestions).toBe(false); - }); - - it('should handle other errors by resetting state', async () => { - const consoleErrorSpy = vi - .spyOn(console, 'error') - .mockImplementation(() => {}); - vi.mocked(fs.readdir).mockRejectedValue(new Error('Permission denied')); - - const { result } = renderHook(() => - useCompletion( - '@', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); - }); - - expect(consoleErrorSpy).toHaveBeenCalled(); - expect(result.current.suggestions).toHaveLength(0); - expect(result.current.showSuggestions).toBe(false); - expect(result.current.isLoadingSuggestions).toBe(false); - - consoleErrorSpy.mockRestore(); - }); - }); - - describe('Debouncing', () => { - it('should debounce file completion requests', async () => { - // Mock for recursive search since enableRecursiveFileSearch is true - vi.mocked(glob).mockResolvedValue([`${testCwd}/file1.txt`]); - - const { rerender } = renderHook( - ({ query }) => + it('should reset all state to default values', () => { + const slashCommands = [ + { + name: 'help', + description: 'Show help', + }, + ] as unknown as SlashCommand[]; + + const { result } = renderHook(() => useCompletion( - query, - testCwd, - true, - mockSlashCommands, + useTextBufferForTest('/help'), + testRootDir, + slashCommands, mockCommandContext, mockConfig, ), - { initialProps: { query: '@f' } }, - ); + ); - rerender({ query: '@fi' }); - rerender({ query: '@fil' }); - rerender({ query: '@file' }); + act(() => { + result.current.setActiveSuggestionIndex(5); + result.current.setShowSuggestions(true); + }); - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); + act(() => { + result.current.resetCompletionState(); + }); + + expect(result.current.suggestions).toEqual([]); + expect(result.current.activeSuggestionIndex).toBe(-1); + expect(result.current.visibleStartIndex).toBe(0); + expect(result.current.showSuggestions).toBe(false); + expect(result.current.isLoadingSuggestions).toBe(false); + }); + }); + + describe('Navigation', () => { + it('should handle navigateUp with no suggestions', () => { + const slashCommands = [ + { name: 'dummy', description: 'dummy' }, + ] as unknown as SlashCommand[]; + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest(''), + testRootDir, + slashCommands, + mockCommandContext, + mockConfig, + ), + ); + + act(() => { + result.current.navigateUp(); + }); + + expect(result.current.activeSuggestionIndex).toBe(-1); }); - expect(glob).toHaveBeenCalledTimes(1); + it('should handle navigateDown with no suggestions', () => { + const slashCommands = [ + { name: 'dummy', description: 'dummy' }, + ] as unknown as SlashCommand[]; + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest(''), + testRootDir, + slashCommands, + mockCommandContext, + mockConfig, + ), + ); + + act(() => { + result.current.navigateDown(); + }); + + expect(result.current.activeSuggestionIndex).toBe(-1); + }); + + it('should navigate up through suggestions with wrap-around', () => { + const slashCommands = [ + { + name: 'help', + description: 'Show help', + }, + ] as unknown as SlashCommand[]; + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('/h'), + testRootDir, + slashCommands, + mockCommandContext, + mockConfig, + ), + ); + + expect(result.current.suggestions.length).toBe(1); + expect(result.current.activeSuggestionIndex).toBe(0); + + act(() => { + result.current.navigateUp(); + }); + + expect(result.current.activeSuggestionIndex).toBe(0); + }); + + it('should navigate down through suggestions with wrap-around', () => { + const slashCommands = [ + { + name: 'help', + description: 'Show help', + }, + ] as unknown as SlashCommand[]; + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('/h'), + testRootDir, + slashCommands, + mockCommandContext, + mockConfig, + ), + ); + + expect(result.current.suggestions.length).toBe(1); + expect(result.current.activeSuggestionIndex).toBe(0); + + act(() => { + result.current.navigateDown(); + }); + + expect(result.current.activeSuggestionIndex).toBe(0); + }); + + it('should handle navigation with multiple suggestions', () => { + const slashCommands = [ + { name: 'help', description: 'Show help' }, + { name: 'stats', description: 'Show stats' }, + { name: 'clear', description: 'Clear screen' }, + { name: 'memory', description: 'Manage memory' }, + { name: 'chat', description: 'Manage chat' }, + ] as unknown as SlashCommand[]; + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('/'), + testRootDir, + slashCommands, + mockCommandContext, + mockConfig, + ), + ); + + expect(result.current.suggestions.length).toBe(5); + expect(result.current.activeSuggestionIndex).toBe(0); + + act(() => { + result.current.navigateDown(); + }); + expect(result.current.activeSuggestionIndex).toBe(1); + + act(() => { + result.current.navigateDown(); + }); + expect(result.current.activeSuggestionIndex).toBe(2); + + act(() => { + result.current.navigateUp(); + }); + expect(result.current.activeSuggestionIndex).toBe(1); + + act(() => { + result.current.navigateUp(); + }); + expect(result.current.activeSuggestionIndex).toBe(0); + + act(() => { + result.current.navigateUp(); + }); + expect(result.current.activeSuggestionIndex).toBe(4); + }); + + it('should handle navigation with large suggestion lists and scrolling', () => { + const largeMockCommands = Array.from({ length: 15 }, (_, i) => ({ + name: `command${i}`, + description: `Command ${i}`, + })) as unknown as SlashCommand[]; + + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('/command'), + testRootDir, + largeMockCommands, + mockCommandContext, + mockConfig, + ), + ); + + expect(result.current.suggestions.length).toBe(15); + expect(result.current.activeSuggestionIndex).toBe(0); + expect(result.current.visibleStartIndex).toBe(0); + + act(() => { + result.current.navigateUp(); + }); + + expect(result.current.activeSuggestionIndex).toBe(14); + expect(result.current.visibleStartIndex).toBe(Math.max(0, 15 - 8)); + }); }); }); - describe('Query handling edge cases', () => { - it('should handle empty query', () => { - const { result } = renderHook(() => - useCompletion( - '', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); + describe('Slash Command Completion (`/`)', () => { + describe('Top-Level Commands', () => { + it('should suggest all top-level commands for the root slash', async () => { + const slashCommands = [ + { + name: 'help', + altNames: ['?'], + description: 'Show help', + }, + { + name: 'stats', + altNames: ['usage'], + description: 'check session stats. Usage: /stats [model|tools]', + }, + { + name: 'clear', + description: 'Clear the screen', + }, + { + name: 'memory', + description: 'Manage memory', + subCommands: [ + { + name: 'show', + description: 'Show memory', + }, + ], + }, + { + name: 'chat', + description: 'Manage chat history', + }, + ] as unknown as SlashCommand[]; + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('/'), + testRootDir, + slashCommands, + mockCommandContext, + ), + ); - expect(result.current.suggestions).toHaveLength(0); - expect(result.current.showSuggestions).toBe(false); - }); - - it('should handle query without slash or @', () => { - const { result } = renderHook(() => - useCompletion( - 'regular text', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - expect(result.current.suggestions).toHaveLength(0); - expect(result.current.showSuggestions).toBe(false); - }); - - it('should handle query with whitespace', () => { - const { result } = renderHook(() => - useCompletion( - ' /hel', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - expect(result.current.suggestions).toHaveLength(1); - expect(result.current.suggestions[0].label).toBe('help'); - }); - - it('should handle @ at the end of query', async () => { - // Mock for recursive search since enableRecursiveFileSearch is true - vi.mocked(glob).mockResolvedValue([`${testCwd}/file1.txt`]); - - const { result } = renderHook(() => - useCompletion( - 'some text @', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - mockConfig, - ), - ); - - // Wait for completion - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); + expect(result.current.suggestions.length).toBe(slashCommands.length); + expect(result.current.suggestions.map((s) => s.label)).toEqual( + expect.arrayContaining(['help', 'clear', 'memory', 'chat', 'stats']), + ); }); - // Should process the @ query and get suggestions - expect(result.current.isLoadingSuggestions).toBe(false); - expect(result.current.suggestions.length).toBeGreaterThanOrEqual(0); + it('should filter commands based on partial input', async () => { + const slashCommands = [ + { + name: 'memory', + description: 'Manage memory', + }, + ] as unknown as SlashCommand[]; + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('/mem'), + testRootDir, + slashCommands, + mockCommandContext, + ), + ); + + expect(result.current.suggestions).toEqual([ + { label: 'memory', value: 'memory', description: 'Manage memory' }, + ]); + expect(result.current.showSuggestions).toBe(true); + }); + + it('should suggest commands based on partial altNames', async () => { + const slashCommands = [ + { + name: 'stats', + altNames: ['usage'], + description: 'check session stats. Usage: /stats [model|tools]', + }, + ] as unknown as SlashCommand[]; + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('/usag'), // part of the word "usage" + testRootDir, + slashCommands, + mockCommandContext, + ), + ); + + expect(result.current.suggestions).toEqual([ + { + label: 'stats', + value: 'stats', + description: 'check session stats. Usage: /stats [model|tools]', + }, + ]); + }); + + it('should NOT provide suggestions for a perfectly typed command that is a leaf node', async () => { + const slashCommands = [ + { + name: 'clear', + description: 'Clear the screen', + action: vi.fn(), + }, + ] as unknown as SlashCommand[]; + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('/clear'), // No trailing space + testRootDir, + slashCommands, + mockCommandContext, + ), + ); + + expect(result.current.suggestions).toHaveLength(0); + expect(result.current.showSuggestions).toBe(false); + }); + + it.each([['/?'], ['/usage']])( + 'should not suggest commands when altNames is fully typed', + async (query) => { + const mockSlashCommands = [ + { + name: 'help', + altNames: ['?'], + description: 'Show help', + action: vi.fn(), + }, + { + name: 'stats', + altNames: ['usage'], + description: 'check session stats. Usage: /stats [model|tools]', + action: vi.fn(), + }, + ] as unknown as SlashCommand[]; + + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest(query), + testRootDir, + mockSlashCommands, + mockCommandContext, + ), + ); + + expect(result.current.suggestions).toHaveLength(0); + }, + ); + + it('should not provide suggestions for a fully typed command that has no sub-commands or argument completion', async () => { + const slashCommands = [ + { + name: 'clear', + description: 'Clear the screen', + }, + ] as unknown as SlashCommand[]; + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('/clear '), + testRootDir, + slashCommands, + mockCommandContext, + ), + ); + + expect(result.current.suggestions).toHaveLength(0); + expect(result.current.showSuggestions).toBe(false); + }); + + it('should not provide suggestions for an unknown command', async () => { + const slashCommands = [ + { + name: 'help', + description: 'Show help', + }, + ] as unknown as SlashCommand[]; + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('/unknown-command'), + testRootDir, + slashCommands, + mockCommandContext, + ), + ); + + expect(result.current.suggestions).toHaveLength(0); + expect(result.current.showSuggestions).toBe(false); + }); + }); + + describe('Sub-Commands', () => { + it('should suggest sub-commands for a parent command', async () => { + const slashCommands = [ + { + name: 'memory', + description: 'Manage memory', + subCommands: [ + { + name: 'show', + description: 'Show memory', + }, + { + name: 'add', + description: 'Add to memory', + }, + ], + }, + ] as unknown as SlashCommand[]; + + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('/memory'), // Note: no trailing space + testRootDir, + slashCommands, + mockCommandContext, + ), + ); + + // Assert that suggestions for sub-commands are shown immediately + expect(result.current.suggestions).toHaveLength(2); + expect(result.current.suggestions).toEqual( + expect.arrayContaining([ + { label: 'show', value: 'show', description: 'Show memory' }, + { label: 'add', value: 'add', description: 'Add to memory' }, + ]), + ); + expect(result.current.showSuggestions).toBe(true); + }); + + it('should suggest all sub-commands when the query ends with the parent command and a space', async () => { + const slashCommands = [ + { + name: 'memory', + description: 'Manage memory', + subCommands: [ + { + name: 'show', + description: 'Show memory', + }, + { + name: 'add', + description: 'Add to memory', + }, + ], + }, + ] as unknown as SlashCommand[]; + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('/memory'), + testRootDir, + slashCommands, + mockCommandContext, + ), + ); + + expect(result.current.suggestions).toHaveLength(2); + expect(result.current.suggestions).toEqual( + expect.arrayContaining([ + { label: 'show', value: 'show', description: 'Show memory' }, + { label: 'add', value: 'add', description: 'Add to memory' }, + ]), + ); + }); + + it('should filter sub-commands by prefix', async () => { + const slashCommands = [ + { + name: 'memory', + description: 'Manage memory', + subCommands: [ + { + name: 'show', + description: 'Show memory', + }, + { + name: 'add', + description: 'Add to memory', + }, + ], + }, + ] as unknown as SlashCommand[]; + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('/memory a'), + testRootDir, + slashCommands, + mockCommandContext, + ), + ); + + expect(result.current.suggestions).toEqual([ + { label: 'add', value: 'add', description: 'Add to memory' }, + ]); + }); + + it('should provide no suggestions for an invalid sub-command', async () => { + const slashCommands = [ + { + name: 'memory', + description: 'Manage memory', + subCommands: [ + { + name: 'show', + description: 'Show memory', + }, + { + name: 'add', + description: 'Add to memory', + }, + ], + }, + ] as unknown as SlashCommand[]; + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('/memory dothisnow'), + testRootDir, + slashCommands, + mockCommandContext, + ), + ); + + expect(result.current.suggestions).toHaveLength(0); + expect(result.current.showSuggestions).toBe(false); + }); + }); + + describe('Argument Completion', () => { + it('should call the command.completion function for argument suggestions', async () => { + const availableTags = [ + 'my-chat-tag-1', + 'my-chat-tag-2', + 'another-channel', + ]; + const mockCompletionFn = vi + .fn() + .mockImplementation( + async (_context: CommandContext, partialArg: string) => + availableTags.filter((tag) => tag.startsWith(partialArg)), + ); + + const slashCommands = [ + { + name: 'chat', + description: 'Manage chat history', + subCommands: [ + { + name: 'resume', + description: 'Resume a saved chat', + completion: mockCompletionFn, + }, + ], + }, + ] as unknown as SlashCommand[]; + + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('/chat resume my-ch'), + testRootDir, + slashCommands, + mockCommandContext, + ), + ); + + await act(async () => { + await new Promise((resolve) => setTimeout(resolve, 150)); + }); + + expect(mockCompletionFn).toHaveBeenCalledWith( + mockCommandContext, + 'my-ch', + ); + + expect(result.current.suggestions).toEqual([ + { label: 'my-chat-tag-1', value: 'my-chat-tag-1' }, + { label: 'my-chat-tag-2', value: 'my-chat-tag-2' }, + ]); + }); + + it('should call command.completion with an empty string when args start with a space', async () => { + const mockCompletionFn = vi + .fn() + .mockResolvedValue(['my-chat-tag-1', 'my-chat-tag-2', 'my-channel']); + + const slashCommands = [ + { + name: 'chat', + description: 'Manage chat history', + subCommands: [ + { + name: 'resume', + description: 'Resume a saved chat', + completion: mockCompletionFn, + }, + ], + }, + ] as unknown as SlashCommand[]; + + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('/chat resume '), + testRootDir, + slashCommands, + mockCommandContext, + ), + ); + + await act(async () => { + await new Promise((resolve) => setTimeout(resolve, 150)); + }); + + expect(mockCompletionFn).toHaveBeenCalledWith(mockCommandContext, ''); + expect(result.current.suggestions).toHaveLength(3); + expect(result.current.showSuggestions).toBe(true); + }); + + it('should handle completion function that returns null', async () => { + const completionFn = vi.fn().mockResolvedValue(null); + const slashCommands = [ + { + name: 'chat', + description: 'Manage chat history', + subCommands: [ + { + name: 'resume', + description: 'Resume a saved chat', + completion: completionFn, + }, + ], + }, + ] as unknown as SlashCommand[]; + + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('/chat resume '), + testRootDir, + slashCommands, + mockCommandContext, + mockConfig, + ), + ); + + await act(async () => { + await new Promise((resolve) => setTimeout(resolve, 150)); + }); + + expect(result.current.suggestions).toHaveLength(0); + expect(result.current.showSuggestions).toBe(false); + }); }); }); - describe('File sorting behavior', () => { - it('should prioritize source files over test files with same base name', async () => { - // Mock glob to return files with same base name but different extensions - vi.mocked(glob).mockResolvedValue([ - `${testCwd}/component.test.ts`, - `${testCwd}/component.ts`, - `${testCwd}/utils.spec.js`, - `${testCwd}/utils.js`, - `${testCwd}/api.test.tsx`, - `${testCwd}/api.tsx`, + describe('File Path Completion (`@`)', () => { + describe('Basic Completion', () => { + it('should use glob for top-level @ completions when available', async () => { + await createTestFile('', 'src', 'index.ts'); + await createTestFile('', 'derp', 'script.ts'); + await createTestFile('', 'README.md'); + + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('@s'), + testRootDir, + [], + mockCommandContext, + mockConfig, + ), + ); + + await act(async () => { + await new Promise((resolve) => setTimeout(resolve, 150)); + }); + + expect(result.current.suggestions).toHaveLength(2); + expect(result.current.suggestions).toEqual( + expect.arrayContaining([ + { + label: 'derp/script.ts', + value: 'derp/script.ts', + }, + { label: 'src', value: 'src' }, + ]), + ); + }); + + it('should handle directory-specific completions with git filtering', async () => { + await createEmptyDir('.git'); + await createTestFile('*.log', '.gitignore'); + await createTestFile('', 'src', 'component.tsx'); + await createTestFile('', 'src', 'temp.log'); + await createTestFile('', 'src', 'index.ts'); + + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('@src/comp'), + testRootDir, + [], + mockCommandContext, + mockConfig, + ), + ); + + await act(async () => { + await new Promise((resolve) => setTimeout(resolve, 150)); + }); + + // Should filter out .log files but include matching .tsx files + expect(result.current.suggestions).toEqual([ + { label: 'component.tsx', value: 'component.tsx' }, + ]); + }); + + it('should include dotfiles in glob search when input starts with a dot', async () => { + await createTestFile('', '.env'); + await createTestFile('', '.gitignore'); + await createTestFile('', 'src', 'index.ts'); + + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('@.'), + testRootDir, + [], + mockCommandContext, + mockConfig, + ), + ); + + await act(async () => { + await new Promise((resolve) => setTimeout(resolve, 150)); + }); + + expect(result.current.suggestions).toEqual([ + { label: '.env', value: '.env' }, + { label: '.gitignore', value: '.gitignore' }, + ]); + }); + }); + + describe('Configuration-based Behavior', () => { + it('should not perform recursive search when disabled in config', async () => { + const mockConfigNoRecursive = { + ...mockConfig, + getEnableRecursiveFileSearch: vi.fn(() => false), + } as unknown as Config; + + await createEmptyDir('data'); + await createEmptyDir('dist'); + + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('@d'), + testRootDir, + [], + mockCommandContext, + mockConfigNoRecursive, + ), + ); + + await act(async () => { + await new Promise((resolve) => setTimeout(resolve, 150)); + }); + + expect(result.current.suggestions).toEqual([ + { label: 'data/', value: 'data/' }, + { label: 'dist/', value: 'dist/' }, + ]); + }); + + it('should work without config (fallback behavior)', async () => { + await createEmptyDir('src'); + await createEmptyDir('node_modules'); + await createTestFile('', 'README.md'); + + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('@'), + testRootDir, + [], + mockCommandContext, + undefined, + ), + ); + + await act(async () => { + await new Promise((resolve) => setTimeout(resolve, 150)); + }); + + // Without config, should include all files + expect(result.current.suggestions).toHaveLength(3); + expect(result.current.suggestions).toEqual( + expect.arrayContaining([ + { label: 'src/', value: 'src/' }, + { label: 'node_modules/', value: 'node_modules/' }, + { label: 'README.md', value: 'README.md' }, + ]), + ); + }); + + it('should handle git discovery service initialization failure gracefully', async () => { + // Intentionally don't create a .git directory to cause an initialization failure. + await createEmptyDir('src'); + await createTestFile('', 'README.md'); + + const consoleSpy = vi + .spyOn(console, 'warn') + .mockImplementation(() => {}); + + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('@'), + testRootDir, + [], + mockCommandContext, + mockConfig, + ), + ); + + await act(async () => { + await new Promise((resolve) => setTimeout(resolve, 150)); + }); + + // Since we use centralized service, initialization errors are handled at config level + // This test should verify graceful fallback behavior + expect(result.current.suggestions.length).toBeGreaterThanOrEqual(0); + // Should still show completions even if git discovery fails + expect(result.current.suggestions.length).toBeGreaterThan(0); + + consoleSpy.mockRestore(); + }); + }); + + describe('Git-Aware Filtering', () => { + it('should filter git-ignored entries from @ completions', async () => { + await createEmptyDir('.git'); + await createTestFile('dist', '.gitignore'); + await createEmptyDir('data'); + + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('@d'), + testRootDir, + [], + mockCommandContext, + mockConfig, + ), + ); + + // Wait for async operations to complete + await act(async () => { + await new Promise((resolve) => setTimeout(resolve, 150)); // Account for debounce + }); + + expect(result.current.suggestions).toEqual( + expect.arrayContaining([{ label: 'data', value: 'data' }]), + ); + expect(result.current.showSuggestions).toBe(true); + }); + + it('should filter git-ignored directories from @ completions', async () => { + await createEmptyDir('.git'); + await createTestFile('node_modules\ndist\n.env', '.gitignore'); + // gitignored entries + await createEmptyDir('node_modules'); + await createEmptyDir('dist'); + await createTestFile('', '.env'); + + // visible + await createEmptyDir('src'); + await createTestFile('', 'README.md'); + + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('@'), + testRootDir, + [], + mockCommandContext, + mockConfig, + ), + ); + + // Wait for async operations to complete + await act(async () => { + await new Promise((resolve) => setTimeout(resolve, 150)); // Account for debounce + }); + + expect(result.current.suggestions).toEqual([ + { label: 'README.md', value: 'README.md' }, + { label: 'src/', value: 'src/' }, + ]); + expect(result.current.showSuggestions).toBe(true); + }); + + it('should handle recursive search with git-aware filtering', async () => { + await createEmptyDir('.git'); + await createTestFile('node_modules/\ntemp/', '.gitignore'); + await createTestFile('', 'data', 'test.txt'); + await createEmptyDir('dist'); + await createEmptyDir('node_modules'); + await createTestFile('', 'src', 'index.ts'); + await createEmptyDir('src', 'components'); + await createTestFile('', 'temp', 'temp.log'); + + const { result } = renderHook(() => + useCompletion( + useTextBufferForTest('@t'), + testRootDir, + [], + mockCommandContext, + mockConfig, + ), + ); + + await act(async () => { + await new Promise((resolve) => setTimeout(resolve, 150)); + }); + + // Should not include anything from node_modules or dist + const suggestionLabels = result.current.suggestions.map((s) => s.label); + expect(suggestionLabels).not.toContain('temp/'); + expect(suggestionLabels).not.toContain('node_modules/'); + }); + }); + }); + + describe('handleAutocomplete', () => { + it('should complete a partial command', () => { + const slashCommands = [ + { + name: 'memory', + description: 'Manage memory', + subCommands: [ + { + name: 'show', + description: 'Show memory', + }, + { + name: 'add', + description: 'Add to memory', + }, + ], + }, + ] as unknown as SlashCommand[]; + // Create a mock buffer that we can spy on directly + const mockBuffer = { + text: '/mem', + setText: vi.fn(), + } as unknown as TextBuffer; + + const { result } = renderHook(() => + useCompletion( + mockBuffer, + testRootDir, + slashCommands, + mockCommandContext, + mockConfig, + ), + ); + + expect(result.current.suggestions.map((s) => s.value)).toEqual([ + 'memory', ]); - mockFileDiscoveryService.shouldIgnoreFile.mockReturnValue(false); + act(() => { + result.current.handleAutocomplete(0); + }); + + expect(mockBuffer.setText).toHaveBeenCalledWith('/memory '); + }); + + it('should append a sub-command when the parent is complete', () => { + const mockBuffer = { + text: '/memory', + setText: vi.fn(), + } as unknown as TextBuffer; + const slashCommands = [ + { + name: 'memory', + description: 'Manage memory', + subCommands: [ + { + name: 'show', + description: 'Show memory', + }, + { + name: 'add', + description: 'Add to memory', + }, + ], + }, + ] as unknown as SlashCommand[]; const { result } = renderHook(() => useCompletion( - '@comp', - testCwd, - true, - mockSlashCommands, + mockBuffer, + testRootDir, + slashCommands, mockCommandContext, mockConfig, ), ); - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); - }); - - expect(result.current.suggestions).toHaveLength(6); - - // Extract labels for easier testing - const labels = result.current.suggestions.map((s) => s.label); - - // Verify the exact sorted order: source files should come before their test counterparts - expect(labels).toEqual([ - 'api.tsx', - 'api.test.tsx', - 'component.ts', - 'component.test.ts', - 'utils.js', - 'utils.spec.js', + // Suggestions are populated by useEffect + expect(result.current.suggestions.map((s) => s.value)).toEqual([ + 'show', + 'add', ]); - }); - }); - describe('Config and FileDiscoveryService integration', () => { - it('should work without config', async () => { - vi.mocked(fs.readdir).mockResolvedValue([ - { name: 'file1.txt', isDirectory: () => false }, - ] as unknown as Awaited>); - - const { result } = renderHook(() => - useCompletion( - '@', - testCwd, - true, - mockSlashCommands, - mockCommandContext, - undefined, - ), - ); - - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); + act(() => { + result.current.handleAutocomplete(1); // index 1 is 'add' }); - expect(result.current.suggestions).toHaveLength(1); - expect(result.current.suggestions[0].label).toBe('file1.txt'); + expect(mockBuffer.setText).toHaveBeenCalledWith('/memory add '); }); - it('should respect file filtering when config is provided', async () => { - vi.mocked(fs.readdir).mockResolvedValue([ - { name: 'file1.txt', isDirectory: () => false }, - { name: 'ignored.log', isDirectory: () => false }, - ] as unknown as Awaited>); - - mockFileDiscoveryService.shouldIgnoreFile.mockImplementation( - (path: string) => path.includes('.log'), - ); + it('should complete a command with an alternative name', () => { + const mockBuffer = { + text: '/?', + setText: vi.fn(), + } as unknown as TextBuffer; + const slashCommands = [ + { + name: 'memory', + description: 'Manage memory', + subCommands: [ + { + name: 'show', + description: 'Show memory', + }, + { + name: 'add', + description: 'Add to memory', + }, + ], + }, + ] as unknown as SlashCommand[]; const { result } = renderHook(() => useCompletion( - '@', - testCwd, - true, - mockSlashCommands, + mockBuffer, + testRootDir, + slashCommands, mockCommandContext, mockConfig, ), ); - await act(async () => { - await new Promise((resolve) => setTimeout(resolve, 150)); + result.current.suggestions.push({ + label: 'help', + value: 'help', + description: 'Show help', }); - expect(result.current.suggestions).toHaveLength(1); - expect(result.current.suggestions[0].label).toBe('file1.txt'); + act(() => { + result.current.handleAutocomplete(0); + }); + + expect(mockBuffer.setText).toHaveBeenCalledWith('/help '); + }); + + it('should complete a file path', async () => { + const mockBuffer = { + text: '@src/fi', + lines: ['@src/fi'], + cursor: [0, 7], + setText: vi.fn(), + replaceRangeByOffset: vi.fn(), + } as unknown as TextBuffer; + const slashCommands = [ + { + name: 'memory', + description: 'Manage memory', + subCommands: [ + { + name: 'show', + description: 'Show memory', + }, + { + name: 'add', + description: 'Add to memory', + }, + ], + }, + ] as unknown as SlashCommand[]; + + const { result } = renderHook(() => + useCompletion( + mockBuffer, + testRootDir, + slashCommands, + mockCommandContext, + mockConfig, + ), + ); + + result.current.suggestions.push({ + label: 'file1.txt', + value: 'file1.txt', + }); + + act(() => { + result.current.handleAutocomplete(0); + }); + + expect(mockBuffer.replaceRangeByOffset).toHaveBeenCalledWith( + 5, // after '@src/' + mockBuffer.text.length, + 'file1.txt', + ); }); }); }); diff --git a/packages/cli/src/ui/hooks/useCompletion.ts b/packages/cli/src/ui/hooks/useCompletion.ts index 21cf057de..672448282 100644 --- a/packages/cli/src/ui/hooks/useCompletion.ts +++ b/packages/cli/src/ui/hooks/useCompletion.ts @@ -4,7 +4,7 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { useState, useEffect, useCallback } from 'react'; +import { useState, useEffect, useCallback, useMemo } from 'react'; import * as fs from 'fs/promises'; import * as path from 'path'; import { glob } from 'glob'; @@ -15,12 +15,16 @@ import { getErrorMessage, Config, FileDiscoveryService, + DEFAULT_FILE_FILTERING_OPTIONS, } from '@qwen-code/qwen-code-core'; import { MAX_SUGGESTIONS_TO_SHOW, Suggestion, } from '../components/SuggestionsDisplay.js'; import { CommandContext, SlashCommand } from '../commands/types.js'; +import { TextBuffer } from '../components/shared/text-buffer.js'; +import { isSlashCommand } from '../utils/commandUtils.js'; +import { toCodePoints } from '../utils/textUtils.js'; export interface UseCompletionReturn { suggestions: Suggestion[]; @@ -28,18 +32,19 @@ export interface UseCompletionReturn { visibleStartIndex: number; showSuggestions: boolean; isLoadingSuggestions: boolean; + isPerfectMatch: boolean; setActiveSuggestionIndex: React.Dispatch>; setShowSuggestions: React.Dispatch>; resetCompletionState: () => void; navigateUp: () => void; navigateDown: () => void; + handleAutocomplete: (indexToUse: number) => void; } export function useCompletion( - query: string, + buffer: TextBuffer, cwd: string, - isActive: boolean, - slashCommands: SlashCommand[], + slashCommands: readonly SlashCommand[], commandContext: CommandContext, config?: Config, ): UseCompletionReturn { @@ -50,6 +55,7 @@ export function useCompletion( const [showSuggestions, setShowSuggestions] = useState(false); const [isLoadingSuggestions, setIsLoadingSuggestions] = useState(false); + const [isPerfectMatch, setIsPerfectMatch] = useState(false); const resetCompletionState = useCallback(() => { setSuggestions([]); @@ -57,6 +63,7 @@ export function useCompletion( setVisibleStartIndex(0); setShowSuggestions(false); setIsLoadingSuggestions(false); + setIsPerfectMatch(false); }, []); const navigateUp = useCallback(() => { @@ -118,15 +125,50 @@ export function useCompletion( }); }, [suggestions.length]); + // Check if cursor is after @ or / without unescaped spaces + const isActive = useMemo(() => { + if (isSlashCommand(buffer.text.trim())) { + return true; + } + + // For other completions like '@', we search backwards from the cursor. + const [row, col] = buffer.cursor; + const currentLine = buffer.lines[row] || ''; + const codePoints = toCodePoints(currentLine); + + for (let i = col - 1; i >= 0; i--) { + const char = codePoints[i]; + + if (char === ' ') { + // Check for unescaped spaces. + let backslashCount = 0; + for (let j = i - 1; j >= 0 && codePoints[j] === '\\'; j--) { + backslashCount++; + } + if (backslashCount % 2 === 0) { + return false; // Inactive on unescaped space. + } + } else if (char === '@') { + // Active if we find an '@' before any unescaped space. + return true; + } + } + + return false; + }, [buffer.text, buffer.cursor, buffer.lines]); + useEffect(() => { if (!isActive) { resetCompletionState(); return; } - const trimmedQuery = query.trimStart(); + const trimmedQuery = buffer.text.trimStart(); if (trimmedQuery.startsWith('/')) { + // Always reset perfect match at the beginning of processing. + setIsPerfectMatch(false); + const fullPath = trimmedQuery.substring(1); const hasTrailingSpace = trimmedQuery.endsWith(' '); @@ -144,7 +186,7 @@ export function useCompletion( } // Traverse the Command Tree using the tentative completed path - let currentLevel: SlashCommand[] | undefined = slashCommands; + let currentLevel: readonly SlashCommand[] | undefined = slashCommands; let leafCommand: SlashCommand | null = null; for (const part of commandPathParts) { @@ -154,11 +196,13 @@ export function useCompletion( break; } const found: SlashCommand | undefined = currentLevel.find( - (cmd) => cmd.name === part || cmd.altName === part, + (cmd) => cmd.name === part || cmd.altNames?.includes(part), ); if (found) { leafCommand = found; - currentLevel = found.subCommands; + currentLevel = found.subCommands as + | readonly SlashCommand[] + | undefined; } else { leafCommand = null; currentLevel = []; @@ -170,7 +214,7 @@ export function useCompletion( if (!hasTrailingSpace && currentLevel) { const exactMatchAsParent = currentLevel.find( (cmd) => - (cmd.name === partial || cmd.altName === partial) && + (cmd.name === partial || cmd.altNames?.includes(partial)) && cmd.subCommands, ); @@ -183,6 +227,24 @@ export function useCompletion( } } + // Check for perfect, executable match + if (!hasTrailingSpace) { + if (leafCommand && partial === '' && leafCommand.action) { + // Case: /command - command has action, no sub-commands were suggested + setIsPerfectMatch(true); + } else if (currentLevel) { + // Case: /command subcommand + const perfectMatch = currentLevel.find( + (cmd) => + (cmd.name === partial || cmd.altNames?.includes(partial)) && + cmd.action, + ); + if (perfectMatch) { + setIsPerfectMatch(true); + } + } + } + const depth = commandPathParts.length; // Provide Suggestions based on the now-corrected context @@ -214,16 +276,17 @@ export function useCompletion( let potentialSuggestions = commandsToSearch.filter( (cmd) => cmd.description && - (cmd.name.startsWith(partial) || cmd.altName?.startsWith(partial)), + (cmd.name.startsWith(partial) || + cmd.altNames?.some((alt) => alt.startsWith(partial))), ); // If a user's input is an exact match and it is a leaf command, // enter should submit immediately. if (potentialSuggestions.length > 0 && !hasTrailingSpace) { const perfectMatch = potentialSuggestions.find( - (s) => s.name === partial, + (s) => s.name === partial || s.altNames?.includes(partial), ); - if (perfectMatch && !perfectMatch.subCommands) { + if (perfectMatch && perfectMatch.action) { potentialSuggestions = []; } } @@ -247,13 +310,13 @@ export function useCompletion( } // Handle At Command Completion - const atIndex = query.lastIndexOf('@'); + const atIndex = buffer.text.lastIndexOf('@'); if (atIndex === -1) { resetCompletionState(); return; } - const partialPath = query.substring(atIndex + 1); + const partialPath = buffer.text.substring(atIndex + 1); const lastSlashIndex = partialPath.lastIndexOf('/'); const baseDirRelative = lastSlashIndex === -1 @@ -364,13 +427,10 @@ export function useCompletion( }); const suggestions: Suggestion[] = files - .map((file: string) => { - const relativePath = path.relative(cwd, file); - return { - label: relativePath, - value: escapePath(relativePath), - }; - }) + .map((file: string) => ({ + label: file, + value: escapePath(file), + })) .filter((s) => { if (fileDiscoveryService) { return !fileDiscoveryService.shouldIgnoreFile( @@ -392,10 +452,8 @@ export function useCompletion( const fileDiscoveryService = config ? config.getFileService() : null; const enableRecursiveSearch = config?.getEnableRecursiveFileSearch() ?? true; - const filterOptions = { - respectGitIgnore: config?.getFileFilteringRespectGitIgnore() ?? true, - respectGeminiIgnore: true, - }; + const filterOptions = + config?.getFileFilteringOptions() ?? DEFAULT_FILE_FILTERING_OPTIONS; try { // If there's no slash, or it's the root, do a recursive search from cwd @@ -414,7 +472,7 @@ export function useCompletion( fetchedSuggestions = await findFilesRecursively( cwd, prefix, - fileDiscoveryService, + null, filterOptions, ); } @@ -457,6 +515,13 @@ export function useCompletion( }); } + // Like glob, we always return forwardslashes, even in windows. + fetchedSuggestions = fetchedSuggestions.map((suggestion) => ({ + ...suggestion, + label: suggestion.label.replace(/\\/g, '/'), + value: suggestion.value.replace(/\\/g, '/'), + })); + // Sort by depth, then directories first, then alphabetically fetchedSuggestions.sort((a, b) => { const depthA = (a.label.match(/\//g) || []).length; @@ -519,7 +584,7 @@ export function useCompletion( clearTimeout(debounceTimeout); }; }, [ - query, + buffer.text, cwd, isActive, resetCompletionState, @@ -528,16 +593,96 @@ export function useCompletion( config, ]); + const handleAutocomplete = useCallback( + (indexToUse: number) => { + if (indexToUse < 0 || indexToUse >= suggestions.length) { + return; + } + const query = buffer.text; + const suggestion = suggestions[indexToUse].value; + + if (query.trimStart().startsWith('/')) { + const hasTrailingSpace = query.endsWith(' '); + const parts = query + .trimStart() + .substring(1) + .split(/\s+/) + .filter(Boolean); + + let isParentPath = false; + // If there's no trailing space, we need to check if the current query + // is already a complete path to a parent command. + if (!hasTrailingSpace) { + let currentLevel: readonly SlashCommand[] | undefined = slashCommands; + for (let i = 0; i < parts.length; i++) { + const part = parts[i]; + const found: SlashCommand | undefined = currentLevel?.find( + (cmd) => cmd.name === part || cmd.altNames?.includes(part), + ); + + if (found) { + if (i === parts.length - 1 && found.subCommands) { + isParentPath = true; + } + currentLevel = found.subCommands as + | readonly SlashCommand[] + | undefined; + } else { + // Path is invalid, so it can't be a parent path. + currentLevel = undefined; + break; + } + } + } + + // Determine the base path of the command. + // - If there's a trailing space, the whole command is the base. + // - If it's a known parent path, the whole command is the base. + // - If the last part is a complete argument, the whole command is the base. + // - Otherwise, the base is everything EXCEPT the last partial part. + const lastPart = parts.length > 0 ? parts[parts.length - 1] : ''; + const isLastPartACompleteArg = + lastPart.startsWith('--') && lastPart.includes('='); + + const basePath = + hasTrailingSpace || isParentPath || isLastPartACompleteArg + ? parts + : parts.slice(0, -1); + const newValue = `/${[...basePath, suggestion].join(' ')} `; + + buffer.setText(newValue); + } else { + const atIndex = query.lastIndexOf('@'); + if (atIndex === -1) return; + const pathPart = query.substring(atIndex + 1); + const lastSlashIndexInPath = pathPart.lastIndexOf('/'); + let autoCompleteStartIndex = atIndex + 1; + if (lastSlashIndexInPath !== -1) { + autoCompleteStartIndex += lastSlashIndexInPath + 1; + } + buffer.replaceRangeByOffset( + autoCompleteStartIndex, + buffer.text.length, + suggestion, + ); + } + resetCompletionState(); + }, + [resetCompletionState, buffer, suggestions, slashCommands], + ); + return { suggestions, activeSuggestionIndex, visibleStartIndex, showSuggestions, isLoadingSuggestions, + isPerfectMatch, setActiveSuggestionIndex, setShowSuggestions, resetCompletionState, navigateUp, navigateDown, + handleAutocomplete, }; } diff --git a/packages/cli/src/ui/hooks/useConsoleMessages.test.ts b/packages/cli/src/ui/hooks/useConsoleMessages.test.ts index 3b225ecf4..b1d1acd66 100644 --- a/packages/cli/src/ui/hooks/useConsoleMessages.test.ts +++ b/packages/cli/src/ui/hooks/useConsoleMessages.test.ts @@ -5,127 +5,105 @@ */ import { act, renderHook } from '@testing-library/react'; -import { useConsoleMessages } from './useConsoleMessages.js'; -import { ConsoleMessageItem } from '../types.js'; - -// Mock setTimeout and clearTimeout -vi.useFakeTimers(); +import { vi } from 'vitest'; +import { useConsoleMessages } from './useConsoleMessages'; +import { useCallback } from 'react'; describe('useConsoleMessages', () => { + beforeEach(() => { + vi.useFakeTimers(); + }); + + afterEach(() => { + vi.runOnlyPendingTimers(); + vi.useRealTimers(); + }); + + const useTestableConsoleMessages = () => { + const { handleNewMessage, ...rest } = useConsoleMessages(); + const log = useCallback( + (content: string) => handleNewMessage({ type: 'log', content, count: 1 }), + [handleNewMessage], + ); + const error = useCallback( + (content: string) => + handleNewMessage({ type: 'error', content, count: 1 }), + [handleNewMessage], + ); + return { + ...rest, + log, + error, + clearConsoleMessages: rest.clearConsoleMessages, + }; + }; + it('should initialize with an empty array of console messages', () => { - const { result } = renderHook(() => useConsoleMessages()); + const { result } = renderHook(() => useTestableConsoleMessages()); expect(result.current.consoleMessages).toEqual([]); }); - it('should add a new message', () => { - const { result } = renderHook(() => useConsoleMessages()); - const message: ConsoleMessageItem = { - type: 'log', - content: 'Test message', - count: 1, - }; + it('should add a new message when log is called', async () => { + const { result } = renderHook(() => useTestableConsoleMessages()); act(() => { - result.current.handleNewMessage(message); + result.current.log('Test message'); }); - act(() => { - vi.runAllTimers(); // Process the queue - }); - - expect(result.current.consoleMessages).toEqual([{ ...message, count: 1 }]); - }); - - it('should consolidate identical consecutive messages', () => { - const { result } = renderHook(() => useConsoleMessages()); - const message: ConsoleMessageItem = { - type: 'log', - content: 'Test message', - count: 1, - }; - - act(() => { - result.current.handleNewMessage(message); - result.current.handleNewMessage(message); - }); - - act(() => { - vi.runAllTimers(); - }); - - expect(result.current.consoleMessages).toEqual([{ ...message, count: 2 }]); - }); - - it('should not consolidate different messages', () => { - const { result } = renderHook(() => useConsoleMessages()); - const message1: ConsoleMessageItem = { - type: 'log', - content: 'Test message 1', - count: 1, - }; - const message2: ConsoleMessageItem = { - type: 'error', - content: 'Test message 2', - count: 1, - }; - - act(() => { - result.current.handleNewMessage(message1); - result.current.handleNewMessage(message2); - }); - - act(() => { - vi.runAllTimers(); + await act(async () => { + await vi.advanceTimersByTimeAsync(20); }); expect(result.current.consoleMessages).toEqual([ - { ...message1, count: 1 }, - { ...message2, count: 1 }, + { type: 'log', content: 'Test message', count: 1 }, ]); }); - it('should not consolidate messages if type is different', () => { - const { result } = renderHook(() => useConsoleMessages()); - const message1: ConsoleMessageItem = { - type: 'log', - content: 'Test message', - count: 1, - }; - const message2: ConsoleMessageItem = { - type: 'error', - content: 'Test message', - count: 1, - }; + it('should batch and count identical consecutive messages', async () => { + const { result } = renderHook(() => useTestableConsoleMessages()); act(() => { - result.current.handleNewMessage(message1); - result.current.handleNewMessage(message2); + result.current.log('Test message'); + result.current.log('Test message'); + result.current.log('Test message'); }); - act(() => { - vi.runAllTimers(); + await act(async () => { + await vi.advanceTimersByTimeAsync(20); }); expect(result.current.consoleMessages).toEqual([ - { ...message1, count: 1 }, - { ...message2, count: 1 }, + { type: 'log', content: 'Test message', count: 3 }, ]); }); - it('should clear console messages', () => { - const { result } = renderHook(() => useConsoleMessages()); - const message: ConsoleMessageItem = { - type: 'log', - content: 'Test message', - count: 1, - }; + it('should not batch different messages', async () => { + const { result } = renderHook(() => useTestableConsoleMessages()); act(() => { - result.current.handleNewMessage(message); + result.current.log('First message'); + result.current.error('Second message'); }); + await act(async () => { + await vi.advanceTimersByTimeAsync(20); + }); + + expect(result.current.consoleMessages).toEqual([ + { type: 'log', content: 'First message', count: 1 }, + { type: 'error', content: 'Second message', count: 1 }, + ]); + }); + + it('should clear all messages when clearConsoleMessages is called', async () => { + const { result } = renderHook(() => useTestableConsoleMessages()); + act(() => { - vi.runAllTimers(); + result.current.log('A message'); + }); + + await act(async () => { + await vi.advanceTimersByTimeAsync(20); }); expect(result.current.consoleMessages).toHaveLength(1); @@ -134,79 +112,36 @@ describe('useConsoleMessages', () => { result.current.clearConsoleMessages(); }); - expect(result.current.consoleMessages).toEqual([]); + expect(result.current.consoleMessages).toHaveLength(0); }); - it('should clear pending timeout on clearConsoleMessages', () => { - const { result } = renderHook(() => useConsoleMessages()); - const message: ConsoleMessageItem = { - type: 'log', - content: 'Test message', - count: 1, - }; + it('should clear the pending timeout when clearConsoleMessages is called', () => { + const { result } = renderHook(() => useTestableConsoleMessages()); + const clearTimeoutSpy = vi.spyOn(global, 'clearTimeout'); act(() => { - result.current.handleNewMessage(message); // This schedules a timeout + result.current.log('A message'); }); act(() => { result.current.clearConsoleMessages(); }); - // Ensure the queue is empty and no more messages are processed - act(() => { - vi.runAllTimers(); // If timeout wasn't cleared, this would process the queue - }); - - expect(result.current.consoleMessages).toEqual([]); + expect(clearTimeoutSpy).toHaveBeenCalled(); + clearTimeoutSpy.mockRestore(); }); - it('should clear message queue on clearConsoleMessages', () => { - const { result } = renderHook(() => useConsoleMessages()); - const message: ConsoleMessageItem = { - type: 'log', - content: 'Test message', - count: 1, - }; + it('should clean up the timeout on unmount', () => { + const { result, unmount } = renderHook(() => useTestableConsoleMessages()); + const clearTimeoutSpy = vi.spyOn(global, 'clearTimeout'); act(() => { - // Add a message but don't process the queue yet - result.current.handleNewMessage(message); - }); - - act(() => { - result.current.clearConsoleMessages(); - }); - - // Process any pending timeouts (should be none related to message queue) - act(() => { - vi.runAllTimers(); - }); - - // The consoleMessages should be empty because the queue was cleared before processing - expect(result.current.consoleMessages).toEqual([]); - }); - - it('should cleanup timeout on unmount', () => { - const { result, unmount } = renderHook(() => useConsoleMessages()); - const message: ConsoleMessageItem = { - type: 'log', - content: 'Test message', - count: 1, - }; - - act(() => { - result.current.handleNewMessage(message); + result.current.log('A message'); }); unmount(); - // This is a bit indirect. We check that clearTimeout was called. - // If clearTimeout was not called, and we run timers, an error might occur - // or the state might change, which it shouldn't after unmount. - // Vitest's vi.clearAllTimers() or specific checks for clearTimeout calls - // would be more direct if available and easy to set up here. - // For now, we rely on the useEffect cleanup pattern. - expect(vi.getTimerCount()).toBe(0); // Check if all timers are cleared + expect(clearTimeoutSpy).toHaveBeenCalled(); + clearTimeoutSpy.mockRestore(); }); }); diff --git a/packages/cli/src/ui/hooks/useConsoleMessages.ts b/packages/cli/src/ui/hooks/useConsoleMessages.ts index 52ffbd399..3b71560e4 100644 --- a/packages/cli/src/ui/hooks/useConsoleMessages.ts +++ b/packages/cli/src/ui/hooks/useConsoleMessages.ts @@ -4,7 +4,13 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { useCallback, useEffect, useRef, useState } from 'react'; +import { + useCallback, + useEffect, + useReducer, + useRef, + useTransition, +} from 'react'; import { ConsoleMessageItem } from '../types.js'; export interface UseConsoleMessagesReturn { @@ -13,75 +19,90 @@ export interface UseConsoleMessagesReturn { clearConsoleMessages: () => void; } -export function useConsoleMessages(): UseConsoleMessagesReturn { - const [consoleMessages, setConsoleMessages] = useState( - [], - ); - const messageQueueRef = useRef([]); - const messageQueueTimeoutRef = useRef(null); +type Action = + | { type: 'ADD_MESSAGES'; payload: ConsoleMessageItem[] } + | { type: 'CLEAR' }; - const processMessageQueue = useCallback(() => { - if (messageQueueRef.current.length === 0) { - return; - } - - const newMessagesToAdd = messageQueueRef.current; - messageQueueRef.current = []; - - setConsoleMessages((prevMessages) => { - const newMessages = [...prevMessages]; - newMessagesToAdd.forEach((queuedMessage) => { +function consoleMessagesReducer( + state: ConsoleMessageItem[], + action: Action, +): ConsoleMessageItem[] { + switch (action.type) { + case 'ADD_MESSAGES': { + const newMessages = [...state]; + for (const queuedMessage of action.payload) { + const lastMessage = newMessages[newMessages.length - 1]; if ( - newMessages.length > 0 && - newMessages[newMessages.length - 1].type === queuedMessage.type && - newMessages[newMessages.length - 1].content === queuedMessage.content + lastMessage && + lastMessage.type === queuedMessage.type && + lastMessage.content === queuedMessage.content ) { - newMessages[newMessages.length - 1].count = - (newMessages[newMessages.length - 1].count || 1) + 1; + // Create a new object for the last message to ensure React detects + // the change, preventing mutation of the existing state object. + newMessages[newMessages.length - 1] = { + ...lastMessage, + count: lastMessage.count + 1, + }; } else { newMessages.push({ ...queuedMessage, count: 1 }); } - }); + } return newMessages; - }); - - messageQueueTimeoutRef.current = null; // Allow next scheduling - }, []); - - const scheduleQueueProcessing = useCallback(() => { - if (messageQueueTimeoutRef.current === null) { - messageQueueTimeoutRef.current = setTimeout( - processMessageQueue, - 0, - ) as unknown as number; } - }, [processMessageQueue]); + case 'CLEAR': + return []; + default: + return state; + } +} + +export function useConsoleMessages(): UseConsoleMessagesReturn { + const [consoleMessages, dispatch] = useReducer(consoleMessagesReducer, []); + const messageQueueRef = useRef([]); + const timeoutRef = useRef(null); + const [, startTransition] = useTransition(); + + const processQueue = useCallback(() => { + if (messageQueueRef.current.length > 0) { + const messagesToProcess = messageQueueRef.current; + messageQueueRef.current = []; + startTransition(() => { + dispatch({ type: 'ADD_MESSAGES', payload: messagesToProcess }); + }); + } + timeoutRef.current = null; + }, []); const handleNewMessage = useCallback( (message: ConsoleMessageItem) => { messageQueueRef.current.push(message); - scheduleQueueProcessing(); + if (!timeoutRef.current) { + // Batch updates using a timeout. 16ms is a reasonable delay to batch + // rapid-fire messages without noticeable lag. + timeoutRef.current = setTimeout(processQueue, 16); + } }, - [scheduleQueueProcessing], + [processQueue], ); const clearConsoleMessages = useCallback(() => { - setConsoleMessages([]); - if (messageQueueTimeoutRef.current !== null) { - clearTimeout(messageQueueTimeoutRef.current); - messageQueueTimeoutRef.current = null; + if (timeoutRef.current) { + clearTimeout(timeoutRef.current); + timeoutRef.current = null; } messageQueueRef.current = []; + startTransition(() => { + dispatch({ type: 'CLEAR' }); + }); }, []); + // Cleanup on unmount useEffect( - () => - // Cleanup on unmount - () => { - if (messageQueueTimeoutRef.current !== null) { - clearTimeout(messageQueueTimeoutRef.current); - } - }, + () => () => { + if (timeoutRef.current) { + clearTimeout(timeoutRef.current); + } + }, [], ); diff --git a/packages/cli/src/ui/hooks/useFocus.test.ts b/packages/cli/src/ui/hooks/useFocus.test.ts new file mode 100644 index 000000000..5e17951e9 --- /dev/null +++ b/packages/cli/src/ui/hooks/useFocus.test.ts @@ -0,0 +1,119 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { renderHook, act } from '@testing-library/react'; +import { EventEmitter } from 'events'; +import { useFocus } from './useFocus.js'; +import { vi } from 'vitest'; +import { useStdin, useStdout } from 'ink'; + +// Mock the ink hooks +vi.mock('ink', async (importOriginal) => { + const original = await importOriginal(); + return { + ...original, + useStdin: vi.fn(), + useStdout: vi.fn(), + }; +}); + +const mockedUseStdin = vi.mocked(useStdin); +const mockedUseStdout = vi.mocked(useStdout); + +describe('useFocus', () => { + let stdin: EventEmitter; + let stdout: { write: vi.Func }; + + beforeEach(() => { + stdin = new EventEmitter(); + stdout = { write: vi.fn() }; + mockedUseStdin.mockReturnValue({ stdin } as ReturnType); + mockedUseStdout.mockReturnValue({ stdout } as unknown as ReturnType< + typeof useStdout + >); + }); + + afterEach(() => { + vi.clearAllMocks(); + }); + + it('should initialize with focus and enable focus reporting', () => { + const { result } = renderHook(() => useFocus()); + + expect(result.current).toBe(true); + expect(stdout.write).toHaveBeenCalledWith('\x1b[?1004h'); + }); + + it('should set isFocused to false when a focus-out event is received', () => { + const { result } = renderHook(() => useFocus()); + + // Initial state is focused + expect(result.current).toBe(true); + + // Simulate focus-out event + act(() => { + stdin.emit('data', Buffer.from('\x1b[O')); + }); + + // State should now be unfocused + expect(result.current).toBe(false); + }); + + it('should set isFocused to true when a focus-in event is received', () => { + const { result } = renderHook(() => useFocus()); + + // Simulate focus-out to set initial state to false + act(() => { + stdin.emit('data', Buffer.from('\x1b[O')); + }); + expect(result.current).toBe(false); + + // Simulate focus-in event + act(() => { + stdin.emit('data', Buffer.from('\x1b[I')); + }); + + // State should now be focused + expect(result.current).toBe(true); + }); + + it('should clean up and disable focus reporting on unmount', () => { + const { unmount } = renderHook(() => useFocus()); + + // Ensure listener was attached + expect(stdin.listenerCount('data')).toBe(1); + + unmount(); + + // Assert that the cleanup function was called + expect(stdout.write).toHaveBeenCalledWith('\x1b[?1004l'); + expect(stdin.listenerCount('data')).toBe(0); + }); + + it('should handle multiple focus events correctly', () => { + const { result } = renderHook(() => useFocus()); + + act(() => { + stdin.emit('data', Buffer.from('\x1b[O')); + }); + expect(result.current).toBe(false); + + act(() => { + stdin.emit('data', Buffer.from('\x1b[O')); + }); + expect(result.current).toBe(false); + + act(() => { + stdin.emit('data', Buffer.from('\x1b[I')); + }); + expect(result.current).toBe(true); + + act(() => { + stdin.emit('data', Buffer.from('\x1b[I')); + }); + expect(result.current).toBe(true); + }); +}); diff --git a/packages/cli/src/ui/hooks/useFocus.ts b/packages/cli/src/ui/hooks/useFocus.ts new file mode 100644 index 000000000..6c9a6daa9 --- /dev/null +++ b/packages/cli/src/ui/hooks/useFocus.ts @@ -0,0 +1,48 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { useStdin, useStdout } from 'ink'; +import { useEffect, useState } from 'react'; + +// ANSI escape codes to enable/disable terminal focus reporting +const ENABLE_FOCUS_REPORTING = '\x1b[?1004h'; +const DISABLE_FOCUS_REPORTING = '\x1b[?1004l'; + +// ANSI escape codes for focus events +const FOCUS_IN = '\x1b[I'; +const FOCUS_OUT = '\x1b[O'; + +export const useFocus = () => { + const { stdin } = useStdin(); + const { stdout } = useStdout(); + const [isFocused, setIsFocused] = useState(true); + + useEffect(() => { + const handleData = (data: Buffer) => { + const sequence = data.toString(); + const lastFocusIn = sequence.lastIndexOf(FOCUS_IN); + const lastFocusOut = sequence.lastIndexOf(FOCUS_OUT); + + if (lastFocusIn > lastFocusOut) { + setIsFocused(true); + } else if (lastFocusOut > lastFocusIn) { + setIsFocused(false); + } + }; + + // Enable focus reporting + stdout?.write(ENABLE_FOCUS_REPORTING); + stdin?.on('data', handleData); + + return () => { + // Disable focus reporting on cleanup + stdout?.write(DISABLE_FOCUS_REPORTING); + stdin?.removeListener('data', handleData); + }; + }, [stdin, stdout]); + + return isFocused; +}; diff --git a/packages/cli/src/ui/hooks/useGeminiStream.test.tsx b/packages/cli/src/ui/hooks/useGeminiStream.test.tsx index 2a92c4907..89de9da25 100644 --- a/packages/cli/src/ui/hooks/useGeminiStream.test.tsx +++ b/packages/cli/src/ui/hooks/useGeminiStream.test.tsx @@ -16,7 +16,12 @@ import { TrackedExecutingToolCall, TrackedCancelledToolCall, } from './useReactToolScheduler.js'; -import { Config, EditorType, AuthType } from '@qwen-code/qwen-code-core'; +import { + Config, + EditorType, + AuthType, + GeminiEventType as ServerGeminiEventType, +} from '@qwen-code/qwen-code-core'; import { Part, PartListUnion } from '@google/genai'; import { UseHistoryManagerReturn } from './useHistoryManager.js'; import { @@ -1053,6 +1058,65 @@ describe('useGeminiStream', () => { expect(mockSendMessageStream).not.toHaveBeenCalled(); // No LLM call made }); }); + + it('should call Gemini with prompt content when slash command returns a `submit_prompt` action', async () => { + const customCommandResult: SlashCommandProcessorResult = { + type: 'submit_prompt', + content: 'This is the actual prompt from the command file.', + }; + mockHandleSlashCommand.mockResolvedValue(customCommandResult); + + const { result, mockSendMessageStream: localMockSendMessageStream } = + renderTestHook(); + + await act(async () => { + await result.current.submitQuery('/my-custom-command'); + }); + + await waitFor(() => { + expect(mockHandleSlashCommand).toHaveBeenCalledWith( + '/my-custom-command', + ); + + expect(localMockSendMessageStream).not.toHaveBeenCalledWith( + '/my-custom-command', + expect.anything(), + expect.anything(), + ); + + expect(localMockSendMessageStream).toHaveBeenCalledWith( + 'This is the actual prompt from the command file.', + expect.any(AbortSignal), + expect.any(String), + ); + + expect(mockScheduleToolCalls).not.toHaveBeenCalled(); + }); + }); + + it('should correctly handle a submit_prompt action with empty content', async () => { + const emptyPromptResult: SlashCommandProcessorResult = { + type: 'submit_prompt', + content: '', + }; + mockHandleSlashCommand.mockResolvedValue(emptyPromptResult); + + const { result, mockSendMessageStream: localMockSendMessageStream } = + renderTestHook(); + + await act(async () => { + await result.current.submitQuery('/emptycmd'); + }); + + await waitFor(() => { + expect(mockHandleSlashCommand).toHaveBeenCalledWith('/emptycmd'); + expect(localMockSendMessageStream).toHaveBeenCalledWith( + '', + expect.any(AbortSignal), + expect.any(String), + ); + }); + }); }); describe('Memory Refresh on save_memory', () => { @@ -1178,4 +1242,235 @@ describe('useGeminiStream', () => { }); }); }); + + describe('handleFinishedEvent', () => { + it('should add info message for MAX_TOKENS finish reason', async () => { + // Setup mock to return a stream with MAX_TOKENS finish reason + mockSendMessageStream.mockReturnValue( + (async function* () { + yield { + type: ServerGeminiEventType.Content, + value: 'This is a truncated response...', + }; + yield { type: ServerGeminiEventType.Finished, value: 'MAX_TOKENS' }; + })(), + ); + + const { result } = renderHook(() => + useGeminiStream( + new MockedGeminiClientClass(mockConfig), + [], + mockAddItem, + mockSetShowHelp, + mockConfig, + mockOnDebugMessage, + mockHandleSlashCommand, + false, + () => 'vscode' as EditorType, + () => {}, + () => Promise.resolve(), + false, + () => {}, + ), + ); + + // Submit a query + await act(async () => { + await result.current.submitQuery('Generate long text'); + }); + + // Check that the info message was added + await waitFor(() => { + expect(mockAddItem).toHaveBeenCalledWith( + { + type: 'info', + text: '⚠️ Response truncated due to token limits.', + }, + expect.any(Number), + ); + }); + }); + + it('should not add message for STOP finish reason', async () => { + // Setup mock to return a stream with STOP finish reason + mockSendMessageStream.mockReturnValue( + (async function* () { + yield { + type: ServerGeminiEventType.Content, + value: 'Complete response', + }; + yield { type: ServerGeminiEventType.Finished, value: 'STOP' }; + })(), + ); + + const { result } = renderHook(() => + useGeminiStream( + new MockedGeminiClientClass(mockConfig), + [], + mockAddItem, + mockSetShowHelp, + mockConfig, + mockOnDebugMessage, + mockHandleSlashCommand, + false, + () => 'vscode' as EditorType, + () => {}, + () => Promise.resolve(), + false, + () => {}, + ), + ); + + // Submit a query + await act(async () => { + await result.current.submitQuery('Test normal completion'); + }); + + // Wait a bit to ensure no message is added + await new Promise((resolve) => setTimeout(resolve, 100)); + + // Check that no info message was added for STOP + const infoMessages = mockAddItem.mock.calls.filter( + (call) => call[0].type === 'info', + ); + expect(infoMessages).toHaveLength(0); + }); + + it('should not add message for FINISH_REASON_UNSPECIFIED', async () => { + // Setup mock to return a stream with FINISH_REASON_UNSPECIFIED + mockSendMessageStream.mockReturnValue( + (async function* () { + yield { + type: ServerGeminiEventType.Content, + value: 'Response with unspecified finish', + }; + yield { + type: ServerGeminiEventType.Finished, + value: 'FINISH_REASON_UNSPECIFIED', + }; + })(), + ); + + const { result } = renderHook(() => + useGeminiStream( + new MockedGeminiClientClass(mockConfig), + [], + mockAddItem, + mockSetShowHelp, + mockConfig, + mockOnDebugMessage, + mockHandleSlashCommand, + false, + () => 'vscode' as EditorType, + () => {}, + () => Promise.resolve(), + false, + () => {}, + ), + ); + + // Submit a query + await act(async () => { + await result.current.submitQuery('Test unspecified finish'); + }); + + // Wait a bit to ensure no message is added + await new Promise((resolve) => setTimeout(resolve, 100)); + + // Check that no info message was added + const infoMessages = mockAddItem.mock.calls.filter( + (call) => call[0].type === 'info', + ); + expect(infoMessages).toHaveLength(0); + }); + + it('should add appropriate messages for other finish reasons', async () => { + const testCases = [ + { + reason: 'SAFETY', + message: '⚠️ Response stopped due to safety reasons.', + }, + { + reason: 'RECITATION', + message: '⚠️ Response stopped due to recitation policy.', + }, + { + reason: 'LANGUAGE', + message: '⚠️ Response stopped due to unsupported language.', + }, + { + reason: 'BLOCKLIST', + message: '⚠️ Response stopped due to forbidden terms.', + }, + { + reason: 'PROHIBITED_CONTENT', + message: '⚠️ Response stopped due to prohibited content.', + }, + { + reason: 'SPII', + message: + '⚠️ Response stopped due to sensitive personally identifiable information.', + }, + { reason: 'OTHER', message: '⚠️ Response stopped for other reasons.' }, + { + reason: 'MALFORMED_FUNCTION_CALL', + message: '⚠️ Response stopped due to malformed function call.', + }, + { + reason: 'IMAGE_SAFETY', + message: '⚠️ Response stopped due to image safety violations.', + }, + { + reason: 'UNEXPECTED_TOOL_CALL', + message: '⚠️ Response stopped due to unexpected tool call.', + }, + ]; + + for (const { reason, message } of testCases) { + // Reset mocks for each test case + mockAddItem.mockClear(); + mockSendMessageStream.mockReturnValue( + (async function* () { + yield { + type: ServerGeminiEventType.Content, + value: `Response for ${reason}`, + }; + yield { type: ServerGeminiEventType.Finished, value: reason }; + })(), + ); + + const { result } = renderHook(() => + useGeminiStream( + new MockedGeminiClientClass(mockConfig), + [], + mockAddItem, + mockSetShowHelp, + mockConfig, + mockOnDebugMessage, + mockHandleSlashCommand, + false, + () => 'vscode' as EditorType, + () => {}, + () => Promise.resolve(), + false, + () => {}, + ), + ); + + await act(async () => { + await result.current.submitQuery(`Test ${reason}`); + }); + + await waitFor(() => { + expect(mockAddItem).toHaveBeenCalledWith( + { + type: 'info', + text: message, + }, + expect.any(Number), + ); + }); + } + }); + }); }); diff --git a/packages/cli/src/ui/hooks/useGeminiStream.ts b/packages/cli/src/ui/hooks/useGeminiStream.ts index 7f52dcf57..a5e3f4f60 100644 --- a/packages/cli/src/ui/hooks/useGeminiStream.ts +++ b/packages/cli/src/ui/hooks/useGeminiStream.ts @@ -14,6 +14,7 @@ import { ServerGeminiContentEvent as ContentEvent, ServerGeminiErrorEvent as ErrorEvent, ServerGeminiChatCompressedEvent, + ServerGeminiFinishedEvent, getErrorMessage, isNodeError, MessageSenderType, @@ -26,7 +27,7 @@ import { UserPromptEvent, DEFAULT_GEMINI_FLASH_MODEL, } from '@qwen-code/qwen-code-core'; -import { type Part, type PartListUnion } from '@google/genai'; +import { type Part, type PartListUnion, FinishReason } from '@google/genai'; import { StreamingState, HistoryItem, @@ -239,19 +240,37 @@ export const useGeminiStream = ( const slashCommandResult = await handleSlashCommand(trimmedQuery); if (slashCommandResult) { - if (slashCommandResult.type === 'schedule_tool') { - const { toolName, toolArgs } = slashCommandResult; - const toolCallRequest: ToolCallRequestInfo = { - callId: `${toolName}-${Date.now()}-${Math.random().toString(16).slice(2)}`, - name: toolName, - args: toolArgs, - isClientInitiated: true, - prompt_id, - }; - scheduleToolCalls([toolCallRequest], abortSignal); - } + switch (slashCommandResult.type) { + case 'schedule_tool': { + const { toolName, toolArgs } = slashCommandResult; + const toolCallRequest: ToolCallRequestInfo = { + callId: `${toolName}-${Date.now()}-${Math.random().toString(16).slice(2)}`, + name: toolName, + args: toolArgs, + isClientInitiated: true, + prompt_id, + }; + scheduleToolCalls([toolCallRequest], abortSignal); + return { queryToSend: null, shouldProceed: false }; + } + case 'submit_prompt': { + localQueryToSendToGemini = slashCommandResult.content; - return { queryToSend: null, shouldProceed: false }; + return { + queryToSend: localQueryToSendToGemini, + shouldProceed: true, + }; + } + case 'handled': { + return { queryToSend: null, shouldProceed: false }; + } + default: { + const unreachable: never = slashCommandResult; + throw new Error( + `Unhandled slash command result type: ${unreachable}`, + ); + } + } } if (shellModeActive && handleShellCommand(trimmedQuery, abortSignal)) { @@ -422,6 +441,46 @@ export const useGeminiStream = ( [addItem, pendingHistoryItemRef, setPendingHistoryItem, config], ); + const handleFinishedEvent = useCallback( + (event: ServerGeminiFinishedEvent, userMessageTimestamp: number) => { + const finishReason = event.value; + + const finishReasonMessages: Record = { + [FinishReason.FINISH_REASON_UNSPECIFIED]: undefined, + [FinishReason.STOP]: undefined, + [FinishReason.MAX_TOKENS]: 'Response truncated due to token limits.', + [FinishReason.SAFETY]: 'Response stopped due to safety reasons.', + [FinishReason.RECITATION]: 'Response stopped due to recitation policy.', + [FinishReason.LANGUAGE]: + 'Response stopped due to unsupported language.', + [FinishReason.BLOCKLIST]: 'Response stopped due to forbidden terms.', + [FinishReason.PROHIBITED_CONTENT]: + 'Response stopped due to prohibited content.', + [FinishReason.SPII]: + 'Response stopped due to sensitive personally identifiable information.', + [FinishReason.OTHER]: 'Response stopped for other reasons.', + [FinishReason.MALFORMED_FUNCTION_CALL]: + 'Response stopped due to malformed function call.', + [FinishReason.IMAGE_SAFETY]: + 'Response stopped due to image safety violations.', + [FinishReason.UNEXPECTED_TOOL_CALL]: + 'Response stopped due to unexpected tool call.', + }; + + const message = finishReasonMessages[finishReason]; + if (message) { + addItem( + { + type: 'info', + text: `⚠️ ${message}`, + }, + userMessageTimestamp, + ); + } + }, + [addItem], + ); + const handleChatCompressionEvent = useCallback( (eventValue: ServerGeminiChatCompressedEvent['value']) => addItem( @@ -452,23 +511,6 @@ export const useGeminiStream = ( [addItem, config], ); - const handleSessionTokenLimitExceededEvent = useCallback( - (value: { currentTokens: number; limit: number; message: string }) => - addItem( - { - type: 'error', - text: - `🚫 Session token limit exceeded: ${value.currentTokens.toLocaleString()} tokens > ${value.limit.toLocaleString()} limit.\n\n` + - `💡 Solutions:\n` + - ` • Start a new session: Use /clear command\n` + - ` • Increase limit: Add "sessionTokenLimit": (e.g., 128000) to your settings.json\n` + - ` • Compress history: Use /compress command to compress history`, - }, - Date.now(), - ), - [addItem], - ); - const handleLoopDetectedEvent = useCallback(() => { addItem( { @@ -518,8 +560,11 @@ export const useGeminiStream = ( case ServerGeminiEventType.MaxSessionTurns: handleMaxSessionTurnsEvent(); break; - case ServerGeminiEventType.SessionTokenLimitExceeded: - handleSessionTokenLimitExceededEvent(event.value); + case ServerGeminiEventType.Finished: + handleFinishedEvent( + event as ServerGeminiFinishedEvent, + userMessageTimestamp, + ); break; case ServerGeminiEventType.LoopDetected: // handle later because we want to move pending history to history @@ -544,8 +589,8 @@ export const useGeminiStream = ( handleErrorEvent, scheduleToolCalls, handleChatCompressionEvent, + handleFinishedEvent, handleMaxSessionTurnsEvent, - handleSessionTokenLimitExceededEvent, ], ); diff --git a/packages/cli/src/ui/hooks/useHistoryManager.test.ts b/packages/cli/src/ui/hooks/useHistoryManager.test.ts index c7f925e2c..b3245eb00 100644 --- a/packages/cli/src/ui/hooks/useHistoryManager.test.ts +++ b/packages/cli/src/ui/hooks/useHistoryManager.test.ts @@ -92,7 +92,7 @@ describe('useHistoryManager', () => { }); }); - it('should not change history if updateHistoryItem is called with a non-existent ID', () => { + it('should not change history if updateHistoryItem is called with a nonexistent ID', () => { const { result } = renderHook(() => useHistory()); const timestamp = Date.now(); const itemData: Omit = { @@ -107,7 +107,7 @@ describe('useHistoryManager', () => { const originalHistory = [...result.current.history]; // Clone before update attempt act(() => { - result.current.updateItem(99999, { text: 'Should not apply' }); // Non-existent ID + result.current.updateItem(99999, { text: 'Should not apply' }); // Nonexistent ID }); expect(result.current.history).toEqual(originalHistory); diff --git a/packages/cli/src/ui/hooks/useInputHistory.ts b/packages/cli/src/ui/hooks/useInputHistory.ts index 8225d4fcc..58fc9d4a6 100644 --- a/packages/cli/src/ui/hooks/useInputHistory.ts +++ b/packages/cli/src/ui/hooks/useInputHistory.ts @@ -14,7 +14,7 @@ interface UseInputHistoryProps { onChange: (value: string) => void; } -interface UseInputHistoryReturn { +export interface UseInputHistoryReturn { handleSubmit: (value: string) => void; navigateUp: () => boolean; navigateDown: () => boolean; diff --git a/packages/cli/src/ui/hooks/useKeypress.ts b/packages/cli/src/ui/hooks/useKeypress.ts index d3e3df5c4..6c2b7e8f1 100644 --- a/packages/cli/src/ui/hooks/useKeypress.ts +++ b/packages/cli/src/ui/hooks/useKeypress.ts @@ -147,12 +147,15 @@ export function useKeypress( let rl: readline.Interface; if (usePassthrough) { - rl = readline.createInterface({ input: keypressStream }); + rl = readline.createInterface({ + input: keypressStream, + escapeCodeTimeout: 0, + }); readline.emitKeypressEvents(keypressStream, rl); keypressStream.on('keypress', handleKeypress); stdin.on('data', handleRawKeypress); } else { - rl = readline.createInterface({ input: stdin }); + rl = readline.createInterface({ input: stdin, escapeCodeTimeout: 0 }); readline.emitKeypressEvents(stdin, rl); stdin.on('keypress', handleKeypress); } diff --git a/packages/cli/src/ui/hooks/usePrivacySettings.ts b/packages/cli/src/ui/hooks/usePrivacySettings.ts index 663bfad08..964545d99 100644 --- a/packages/cli/src/ui/hooks/usePrivacySettings.ts +++ b/packages/cli/src/ui/hooks/usePrivacySettings.ts @@ -4,7 +4,6 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { GaxiosError } from 'gaxios'; import { useState, useEffect, useCallback } from 'react'; import { Config, @@ -117,13 +116,18 @@ async function getRemoteDataCollectionOptIn( try { const resp = await server.getCodeAssistGlobalUserSetting(); return resp.freeTierDataCollectionOptin; - } catch (e) { - if (e instanceof GaxiosError) { - if (e.response?.status === 404) { + } catch (error: unknown) { + if (error && typeof error === 'object' && 'response' in error) { + const gaxiosError = error as { + response?: { + status?: unknown; + }; + }; + if (gaxiosError.response?.status === 404) { return true; } } - throw e; + throw error; } } diff --git a/packages/cli/src/ui/hooks/useShellHistory.test.ts b/packages/cli/src/ui/hooks/useShellHistory.test.ts index e960ff23c..3f3a0ada6 100644 --- a/packages/cli/src/ui/hooks/useShellHistory.test.ts +++ b/packages/cli/src/ui/hooks/useShellHistory.test.ts @@ -67,7 +67,7 @@ describe('useShellHistory', () => { expect(command).toBe('cmd2'); }); - it('should handle a non-existent history file gracefully', async () => { + it('should handle a nonexistent history file gracefully', async () => { const error = new Error('File not found') as NodeJS.ErrnoException; error.code = 'ENOENT'; mockedFs.readFile.mockRejectedValue(error); diff --git a/packages/cli/src/ui/hooks/useShellHistory.ts b/packages/cli/src/ui/hooks/useShellHistory.ts index 2eddcbb45..5701de575 100644 --- a/packages/cli/src/ui/hooks/useShellHistory.ts +++ b/packages/cli/src/ui/hooks/useShellHistory.ts @@ -12,6 +12,13 @@ import { isNodeError, getProjectTempDir } from '@qwen-code/qwen-code-core'; const HISTORY_FILE = 'shell_history'; const MAX_HISTORY_LENGTH = 100; +export interface UseShellHistoryReturn { + addCommandToHistory: (command: string) => void; + getPreviousCommand: () => string | null; + getNextCommand: () => string | null; + resetHistoryPosition: () => void; +} + async function getHistoryFilePath(projectRoot: string): Promise { const historyDir = getProjectTempDir(projectRoot); return path.join(historyDir, HISTORY_FILE); @@ -42,7 +49,7 @@ async function writeHistoryFile( } } -export function useShellHistory(projectRoot: string) { +export function useShellHistory(projectRoot: string): UseShellHistoryReturn { const [history, setHistory] = useState([]); const [historyIndex, setHistoryIndex] = useState(-1); const [historyFilePath, setHistoryFilePath] = useState(null); diff --git a/packages/cli/src/ui/hooks/useThemeCommand.ts b/packages/cli/src/ui/hooks/useThemeCommand.ts index c258b0e33..6c9e60d8a 100644 --- a/packages/cli/src/ui/hooks/useThemeCommand.ts +++ b/packages/cli/src/ui/hooks/useThemeCommand.ts @@ -25,39 +25,18 @@ export const useThemeCommand = ( setThemeError: (error: string | null) => void, addItem: (item: Omit, timestamp: number) => void, ): UseThemeCommandReturn => { - // Determine the effective theme - const effectiveTheme = loadedSettings.merged.theme; + const [isThemeDialogOpen, setIsThemeDialogOpen] = useState(false); - // Initial state: Open dialog if no theme is set in either user or workspace settings - const [isThemeDialogOpen, setIsThemeDialogOpen] = useState( - effectiveTheme === undefined && !process.env.NO_COLOR, - ); - // TODO: refactor how theme's are accessed to avoid requiring a forced render. - const [, setForceRender] = useState(0); - - // Apply initial theme on component mount + // Check for invalid theme configuration on startup useEffect(() => { - if (effectiveTheme === undefined) { - if (process.env.NO_COLOR) { - addItem( - { - type: MessageType.INFO, - text: 'Theme configuration unavailable due to NO_COLOR env variable.', - }, - Date.now(), - ); - } - // If no theme is set and NO_COLOR is not set, the dialog is already open. - return; - } - - if (!themeManager.setActiveTheme(effectiveTheme)) { + const effectiveTheme = loadedSettings.merged.theme; + if (effectiveTheme && !themeManager.findThemeByName(effectiveTheme)) { setIsThemeDialogOpen(true); setThemeError(`Theme "${effectiveTheme}" not found.`); } else { setThemeError(null); } - }, [effectiveTheme, setThemeError, addItem]); // Re-run if effectiveTheme or setThemeError changes + }, [loadedSettings.merged.theme, setThemeError]); const openThemeDialog = useCallback(() => { if (process.env.NO_COLOR) { @@ -80,11 +59,10 @@ export const useThemeCommand = ( setIsThemeDialogOpen(true); setThemeError(`Theme "${themeName}" not found.`); } else { - setForceRender((v) => v + 1); // Trigger potential re-render setThemeError(null); // Clear any previous theme error on success } }, - [setForceRender, setThemeError], + [setThemeError], ); const handleThemeHighlight = useCallback( @@ -96,15 +74,31 @@ export const useThemeCommand = ( const handleThemeSelect = useCallback( (themeName: string | undefined, scope: SettingScope) => { - // Added scope parameter try { + // Merge user and workspace custom themes (workspace takes precedence) + const mergedCustomThemes = { + ...(loadedSettings.user.settings.customThemes || {}), + ...(loadedSettings.workspace.settings.customThemes || {}), + }; + // Only allow selecting themes available in the merged custom themes or built-in themes + const isBuiltIn = themeManager.findThemeByName(themeName); + const isCustom = themeName && mergedCustomThemes[themeName]; + if (!isBuiltIn && !isCustom) { + setThemeError(`Theme "${themeName}" not found in selected scope.`); + setIsThemeDialogOpen(true); + return; + } loadedSettings.setValue(scope, 'theme', themeName); // Update the merged settings + if (loadedSettings.merged.customThemes) { + themeManager.loadCustomThemes(loadedSettings.merged.customThemes); + } applyTheme(loadedSettings.merged.theme); // Apply the current theme + setThemeError(null); } finally { setIsThemeDialogOpen(false); // Close the dialog } }, - [applyTheme, loadedSettings], + [applyTheme, loadedSettings, setThemeError], ); return { diff --git a/packages/cli/src/ui/hooks/useToolScheduler.test.ts b/packages/cli/src/ui/hooks/useToolScheduler.test.ts index befec526f..915f9c55f 100644 --- a/packages/cli/src/ui/hooks/useToolScheduler.test.ts +++ b/packages/cli/src/ui/hooks/useToolScheduler.test.ts @@ -23,7 +23,8 @@ import { ToolCallResponseInfo, ToolCall, // Import from core Status as ToolCallStatusType, - ApprovalMode, // Import from core + ApprovalMode, + Icon, } from '@qwen-code/qwen-code-core'; import { HistoryItemWithoutId, @@ -56,6 +57,8 @@ const mockTool: Tool = { name: 'mockTool', displayName: 'Mock Tool', description: 'A mock tool for testing', + icon: Icon.Hammer, + toolLocations: vi.fn(), isOutputMarkdown: false, canUpdateOutput: false, schema: {}, @@ -85,6 +88,8 @@ const mockToolRequiresConfirmation: Tool = { onConfirm: mockOnUserConfirmForToolConfirmation, fileName: 'mockToolRequiresConfirmation.ts', fileDiff: 'Mock tool requires confirmation', + originalContent: 'Original content', + newContent: 'New content', }), ), }; @@ -336,7 +341,7 @@ describe('useReactToolScheduler', () => { const schedule = result.current[1]; const request: ToolCallRequestInfo = { callId: 'call1', - name: 'nonExistentTool', + name: 'nonexistentTool', args: {}, }; @@ -356,7 +361,7 @@ describe('useReactToolScheduler', () => { request, response: expect.objectContaining({ error: expect.objectContaining({ - message: 'Tool "nonExistentTool" not found in registry.', + message: 'Tool "nonexistentTool" not found in registry.', }), }), }), @@ -807,6 +812,8 @@ describe('mapToDisplay', () => { isOutputMarkdown: false, canUpdateOutput: false, schema: {}, + icon: Icon.Hammer, + toolLocations: vi.fn(), validateToolParams: vi.fn(), execute: vi.fn(), shouldConfirmExecute: vi.fn(), @@ -885,6 +892,8 @@ describe('mapToDisplay', () => { toolDisplayName: 'Test Tool Display', fileName: 'test.ts', fileDiff: 'Test diff', + originalContent: 'Original content', + newContent: 'New content', } as ToolCallConfirmationDetails, }, expectedStatus: ToolCallStatus.Confirming, diff --git a/packages/cli/src/ui/hooks/vim.test.ts b/packages/cli/src/ui/hooks/vim.test.ts new file mode 100644 index 000000000..f939982fd --- /dev/null +++ b/packages/cli/src/ui/hooks/vim.test.ts @@ -0,0 +1,1626 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest'; +import { renderHook, act } from '@testing-library/react'; +import React from 'react'; +import { useVim } from './vim.js'; +import type { TextBuffer } from '../components/shared/text-buffer.js'; +import { textBufferReducer } from '../components/shared/text-buffer.js'; + +// Mock the VimModeContext +const mockVimContext = { + vimEnabled: true, + vimMode: 'NORMAL' as const, + toggleVimEnabled: vi.fn(), + setVimMode: vi.fn(), +}; + +vi.mock('../contexts/VimModeContext.js', () => ({ + useVimMode: () => mockVimContext, + VimModeProvider: ({ children }: { children: React.ReactNode }) => children, +})); + +// Test constants +const TEST_SEQUENCES = { + ESCAPE: { sequence: '\u001b', name: 'escape' }, + LEFT: { sequence: 'h' }, + RIGHT: { sequence: 'l' }, + UP: { sequence: 'k' }, + DOWN: { sequence: 'j' }, + INSERT: { sequence: 'i' }, + APPEND: { sequence: 'a' }, + DELETE_CHAR: { sequence: 'x' }, + DELETE: { sequence: 'd' }, + CHANGE: { sequence: 'c' }, + WORD_FORWARD: { sequence: 'w' }, + WORD_BACKWARD: { sequence: 'b' }, + WORD_END: { sequence: 'e' }, + LINE_START: { sequence: '0' }, + LINE_END: { sequence: '$' }, + REPEAT: { sequence: '.' }, +} as const; + +describe('useVim hook', () => { + let mockBuffer: Partial; + let mockHandleFinalSubmit: vi.Mock; + + const createMockBuffer = ( + text = 'hello world', + cursor: [number, number] = [0, 5], + ) => { + const cursorState = { pos: cursor }; + const lines = text.split('\n'); + + return { + lines, + get cursor() { + return cursorState.pos; + }, + set cursor(newPos: [number, number]) { + cursorState.pos = newPos; + }, + text, + move: vi.fn().mockImplementation((direction: string) => { + let [row, col] = cursorState.pos; + const _line = lines[row] || ''; + if (direction === 'left') { + col = Math.max(0, col - 1); + } else if (direction === 'right') { + col = Math.min(line.length, col + 1); + } else if (direction === 'home') { + col = 0; + } else if (direction === 'end') { + col = line.length; + } + cursorState.pos = [row, col]; + }), + del: vi.fn(), + moveToOffset: vi.fn(), + insert: vi.fn(), + newline: vi.fn(), + replaceRangeByOffset: vi.fn(), + handleInput: vi.fn(), + setText: vi.fn(), + // Vim-specific methods + vimDeleteWordForward: vi.fn(), + vimDeleteWordBackward: vi.fn(), + vimDeleteWordEnd: vi.fn(), + vimChangeWordForward: vi.fn(), + vimChangeWordBackward: vi.fn(), + vimChangeWordEnd: vi.fn(), + vimDeleteLine: vi.fn(), + vimChangeLine: vi.fn(), + vimDeleteToEndOfLine: vi.fn(), + vimChangeToEndOfLine: vi.fn(), + vimChangeMovement: vi.fn(), + vimMoveLeft: vi.fn(), + vimMoveRight: vi.fn(), + vimMoveUp: vi.fn(), + vimMoveDown: vi.fn(), + vimMoveWordForward: vi.fn(), + vimMoveWordBackward: vi.fn(), + vimMoveWordEnd: vi.fn(), + vimDeleteChar: vi.fn(), + vimInsertAtCursor: vi.fn(), + vimAppendAtCursor: vi.fn().mockImplementation(() => { + // Append moves cursor right (vim 'a' behavior - position after current char) + const [row, col] = cursorState.pos; + const _line = lines[row] || ''; + // In vim, 'a' moves cursor to position after current character + // This allows inserting at the end of the line + cursorState.pos = [row, col + 1]; + }), + vimOpenLineBelow: vi.fn(), + vimOpenLineAbove: vi.fn(), + vimAppendAtLineEnd: vi.fn(), + vimInsertAtLineStart: vi.fn(), + vimMoveToLineStart: vi.fn(), + vimMoveToLineEnd: vi.fn(), + vimMoveToFirstNonWhitespace: vi.fn(), + vimMoveToFirstLine: vi.fn(), + vimMoveToLastLine: vi.fn(), + vimMoveToLine: vi.fn(), + vimEscapeInsertMode: vi.fn().mockImplementation(() => { + // Escape moves cursor left unless at beginning of line + const [row, col] = cursorState.pos; + if (col > 0) { + cursorState.pos = [row, col - 1]; + } + }), + }; + }; + + const _createMockSettings = (vimMode = true) => ({ + getValue: vi.fn().mockReturnValue(vimMode), + setValue: vi.fn(), + merged: { vimMode }, + }); + + const renderVimHook = (buffer?: Partial) => + renderHook(() => + useVim((buffer || mockBuffer) as TextBuffer, mockHandleFinalSubmit), + ); + + const exitInsertMode = (result: { + current: { + handleInput: (input: { sequence: string; name: string }) => void; + }; + }) => { + act(() => { + result.current.handleInput({ sequence: '\u001b', name: 'escape' }); + }); + }; + + beforeEach(() => { + vi.clearAllMocks(); + mockHandleFinalSubmit = vi.fn(); + mockBuffer = createMockBuffer(); + // Reset mock context to default state + mockVimContext.vimEnabled = true; + mockVimContext.vimMode = 'NORMAL'; + mockVimContext.toggleVimEnabled.mockClear(); + mockVimContext.setVimMode.mockClear(); + }); + + describe('Mode switching', () => { + it('should start in NORMAL mode', () => { + const { result } = renderVimHook(); + expect(result.current.mode).toBe('NORMAL'); + }); + + it('should switch to INSERT mode with i command', () => { + const { result } = renderVimHook(); + + act(() => { + result.current.handleInput(TEST_SEQUENCES.INSERT); + }); + + expect(result.current.mode).toBe('INSERT'); + expect(mockVimContext.setVimMode).toHaveBeenCalledWith('INSERT'); + }); + + it('should switch back to NORMAL mode with Escape', () => { + const { result } = renderVimHook(); + + act(() => { + result.current.handleInput(TEST_SEQUENCES.INSERT); + }); + expect(result.current.mode).toBe('INSERT'); + + exitInsertMode(result); + expect(result.current.mode).toBe('NORMAL'); + }); + + it('should properly handle escape followed immediately by a command', () => { + const testBuffer = createMockBuffer('hello world test', [0, 6]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'i' }); + }); + expect(result.current.mode).toBe('INSERT'); + + vi.clearAllMocks(); + + exitInsertMode(result); + expect(result.current.mode).toBe('NORMAL'); + + act(() => { + result.current.handleInput({ sequence: 'b' }); + }); + + expect(testBuffer.vimMoveWordBackward).toHaveBeenCalledWith(1); + }); + }); + + describe('Navigation commands', () => { + it('should handle h (left movement)', () => { + const { result } = renderVimHook(); + + act(() => { + result.current.handleInput({ sequence: 'h' }); + }); + + expect(mockBuffer.vimMoveLeft).toHaveBeenCalledWith(1); + }); + + it('should handle l (right movement)', () => { + const { result } = renderVimHook(); + + act(() => { + result.current.handleInput({ sequence: 'l' }); + }); + + expect(mockBuffer.vimMoveRight).toHaveBeenCalledWith(1); + }); + + it('should handle j (down movement)', () => { + const testBuffer = createMockBuffer('first line\nsecond line'); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'j' }); + }); + + expect(testBuffer.vimMoveDown).toHaveBeenCalledWith(1); + }); + + it('should handle k (up movement)', () => { + const testBuffer = createMockBuffer('first line\nsecond line'); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'k' }); + }); + + expect(testBuffer.vimMoveUp).toHaveBeenCalledWith(1); + }); + + it('should handle 0 (move to start of line)', () => { + const { result } = renderVimHook(); + + act(() => { + result.current.handleInput({ sequence: '0' }); + }); + + expect(mockBuffer.vimMoveToLineStart).toHaveBeenCalled(); + }); + + it('should handle $ (move to end of line)', () => { + const { result } = renderVimHook(); + + act(() => { + result.current.handleInput({ sequence: '$' }); + }); + + expect(mockBuffer.vimMoveToLineEnd).toHaveBeenCalled(); + }); + }); + + describe('Mode switching commands', () => { + it('should handle a (append after cursor)', () => { + const { result } = renderVimHook(); + + act(() => { + result.current.handleInput({ sequence: 'a' }); + }); + + expect(mockBuffer.vimAppendAtCursor).toHaveBeenCalled(); + expect(result.current.mode).toBe('INSERT'); + }); + + it('should handle A (append at end of line)', () => { + const { result } = renderVimHook(); + + act(() => { + result.current.handleInput({ sequence: 'A' }); + }); + + expect(mockBuffer.vimAppendAtLineEnd).toHaveBeenCalled(); + expect(result.current.mode).toBe('INSERT'); + }); + + it('should handle o (open line below)', () => { + const { result } = renderVimHook(); + + act(() => { + result.current.handleInput({ sequence: 'o' }); + }); + + expect(mockBuffer.vimOpenLineBelow).toHaveBeenCalled(); + expect(result.current.mode).toBe('INSERT'); + }); + + it('should handle O (open line above)', () => { + const { result } = renderVimHook(); + + act(() => { + result.current.handleInput({ sequence: 'O' }); + }); + + expect(mockBuffer.vimOpenLineAbove).toHaveBeenCalled(); + expect(result.current.mode).toBe('INSERT'); + }); + }); + + describe('Edit commands', () => { + it('should handle x (delete character)', () => { + const { result } = renderVimHook(); + vi.clearAllMocks(); + + act(() => { + result.current.handleInput({ sequence: 'x' }); + }); + + expect(mockBuffer.vimDeleteChar).toHaveBeenCalledWith(1); + }); + + it('should move cursor left when deleting last character on line (vim behavior)', () => { + const testBuffer = createMockBuffer('hello', [0, 4]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'x' }); + }); + + expect(testBuffer.vimDeleteChar).toHaveBeenCalledWith(1); + }); + + it('should handle first d key (sets pending state)', () => { + const { result } = renderVimHook(); + + act(() => { + result.current.handleInput({ sequence: 'd' }); + }); + + expect(mockBuffer.replaceRangeByOffset).not.toHaveBeenCalled(); + }); + }); + + describe('Count handling', () => { + it('should handle count input and return to count 0 after command', () => { + const { result } = renderVimHook(); + + act(() => { + const handled = result.current.handleInput({ sequence: '3' }); + expect(handled).toBe(true); + }); + + act(() => { + const handled = result.current.handleInput({ sequence: 'h' }); + expect(handled).toBe(true); + }); + + expect(mockBuffer.vimMoveLeft).toHaveBeenCalledWith(3); + }); + + it('should only delete 1 character with x command when no count is specified', () => { + const testBuffer = createMockBuffer(); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'x' }); + }); + + expect(testBuffer.vimDeleteChar).toHaveBeenCalledWith(1); + }); + }); + + describe('Word movement', () => { + it('should properly initialize vim hook with word movement support', () => { + const testBuffer = createMockBuffer('cat elephant mouse', [0, 0]); + const { result } = renderVimHook(testBuffer); + + expect(result.current.vimModeEnabled).toBe(true); + expect(result.current.mode).toBe('NORMAL'); + expect(result.current.handleInput).toBeDefined(); + }); + + it('should support vim mode and basic operations across multiple lines', () => { + const testBuffer = createMockBuffer( + 'first line word\nsecond line word', + [0, 11], + ); + const { result } = renderVimHook(testBuffer); + + expect(result.current.vimModeEnabled).toBe(true); + expect(result.current.mode).toBe('NORMAL'); + expect(result.current.handleInput).toBeDefined(); + expect(testBuffer.replaceRangeByOffset).toBeDefined(); + expect(testBuffer.moveToOffset).toBeDefined(); + }); + + it('should handle w (next word)', () => { + const testBuffer = createMockBuffer('hello world test'); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'w' }); + }); + + expect(testBuffer.vimMoveWordForward).toHaveBeenCalledWith(1); + }); + + it('should handle b (previous word)', () => { + const testBuffer = createMockBuffer('hello world test', [0, 6]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'b' }); + }); + + expect(testBuffer.vimMoveWordBackward).toHaveBeenCalledWith(1); + }); + + it('should handle e (end of word)', () => { + const testBuffer = createMockBuffer('hello world test'); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'e' }); + }); + + expect(testBuffer.vimMoveWordEnd).toHaveBeenCalledWith(1); + }); + + it('should handle w when cursor is on the last word', () => { + const testBuffer = createMockBuffer('hello world', [0, 8]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'w' }); + }); + + expect(testBuffer.vimMoveWordForward).toHaveBeenCalledWith(1); + }); + + it('should handle first c key (sets pending change state)', () => { + const { result } = renderVimHook(); + + act(() => { + result.current.handleInput({ sequence: 'c' }); + }); + + expect(result.current.mode).toBe('NORMAL'); + expect(mockBuffer.del).not.toHaveBeenCalled(); + }); + + it('should clear pending state on invalid command sequence (df)', () => { + const { result } = renderVimHook(); + + act(() => { + result.current.handleInput({ sequence: 'd' }); + result.current.handleInput({ sequence: 'f' }); + }); + + expect(mockBuffer.replaceRangeByOffset).not.toHaveBeenCalled(); + expect(mockBuffer.del).not.toHaveBeenCalled(); + }); + + it('should clear pending state with Escape in NORMAL mode', () => { + const { result } = renderVimHook(); + + act(() => { + result.current.handleInput({ sequence: 'd' }); + }); + + exitInsertMode(result); + + expect(mockBuffer.replaceRangeByOffset).not.toHaveBeenCalled(); + }); + }); + + describe('Disabled vim mode', () => { + it('should not respond to vim commands when disabled', () => { + mockVimContext.vimEnabled = false; + const { result } = renderVimHook(mockBuffer); + + act(() => { + result.current.handleInput({ sequence: 'h' }); + }); + + expect(mockBuffer.move).not.toHaveBeenCalled(); + }); + }); + + // These tests are no longer applicable at the hook level + + describe('Command repeat system', () => { + it('should repeat x command from current cursor position', () => { + const testBuffer = createMockBuffer('abcd\nefgh\nijkl', [0, 1]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'x' }); + }); + expect(testBuffer.vimDeleteChar).toHaveBeenCalledWith(1); + + testBuffer.cursor = [1, 2]; + + act(() => { + result.current.handleInput({ sequence: '.' }); + }); + expect(testBuffer.vimDeleteChar).toHaveBeenCalledWith(1); + }); + + it('should repeat dd command from current position', () => { + const testBuffer = createMockBuffer('line1\nline2\nline3', [1, 0]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'd' }); + }); + act(() => { + result.current.handleInput({ sequence: 'd' }); + }); + expect(testBuffer.vimDeleteLine).toHaveBeenCalledTimes(1); + + testBuffer.cursor = [0, 0]; + + act(() => { + result.current.handleInput({ sequence: '.' }); + }); + + expect(testBuffer.vimDeleteLine).toHaveBeenCalledTimes(2); + }); + + it('should repeat ce command from current position', () => { + const testBuffer = createMockBuffer('word', [0, 0]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'c' }); + }); + act(() => { + result.current.handleInput({ sequence: 'e' }); + }); + expect(testBuffer.vimChangeWordEnd).toHaveBeenCalledTimes(1); + + // Exit INSERT mode to complete the command + exitInsertMode(result); + + testBuffer.cursor = [0, 2]; + + act(() => { + result.current.handleInput({ sequence: '.' }); + }); + + expect(testBuffer.vimChangeWordEnd).toHaveBeenCalledTimes(2); + }); + + it('should repeat cc command from current position', () => { + const testBuffer = createMockBuffer('line1\nline2\nline3', [1, 2]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'c' }); + }); + act(() => { + result.current.handleInput({ sequence: 'c' }); + }); + expect(testBuffer.vimChangeLine).toHaveBeenCalledTimes(1); + + // Exit INSERT mode to complete the command + exitInsertMode(result); + + testBuffer.cursor = [0, 1]; + + act(() => { + result.current.handleInput({ sequence: '.' }); + }); + + expect(testBuffer.vimChangeLine).toHaveBeenCalledTimes(2); + }); + + it('should repeat cw command from current position', () => { + const testBuffer = createMockBuffer('hello world test', [0, 6]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'c' }); + }); + act(() => { + result.current.handleInput({ sequence: 'w' }); + }); + expect(testBuffer.vimChangeWordForward).toHaveBeenCalledTimes(1); + + // Exit INSERT mode to complete the command + exitInsertMode(result); + + testBuffer.cursor = [0, 0]; + + act(() => { + result.current.handleInput({ sequence: '.' }); + }); + + expect(testBuffer.vimChangeWordForward).toHaveBeenCalledTimes(2); + }); + + it('should repeat D command from current position', () => { + const testBuffer = createMockBuffer('hello world test', [0, 6]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'D' }); + }); + expect(testBuffer.vimDeleteToEndOfLine).toHaveBeenCalledTimes(1); + + testBuffer.cursor = [0, 2]; + vi.clearAllMocks(); // Clear all mocks instead of just one method + + act(() => { + result.current.handleInput({ sequence: '.' }); + }); + + expect(testBuffer.vimDeleteToEndOfLine).toHaveBeenCalledTimes(1); + }); + + it('should repeat C command from current position', () => { + const testBuffer = createMockBuffer('hello world test', [0, 6]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'C' }); + }); + expect(testBuffer.vimChangeToEndOfLine).toHaveBeenCalledTimes(1); + + // Exit INSERT mode to complete the command + exitInsertMode(result); + + testBuffer.cursor = [0, 2]; + + act(() => { + result.current.handleInput({ sequence: '.' }); + }); + + expect(testBuffer.vimChangeToEndOfLine).toHaveBeenCalledTimes(2); + }); + + it('should repeat command after cursor movement', () => { + const testBuffer = createMockBuffer('test text', [0, 0]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'x' }); + }); + expect(testBuffer.vimDeleteChar).toHaveBeenCalledWith(1); + + testBuffer.cursor = [0, 2]; + + act(() => { + result.current.handleInput({ sequence: '.' }); + }); + expect(testBuffer.vimDeleteChar).toHaveBeenCalledWith(1); + }); + + it('should move cursor to the correct position after exiting INSERT mode with "a"', () => { + const testBuffer = createMockBuffer('hello world', [0, 10]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'a' }); + }); + expect(result.current.mode).toBe('INSERT'); + expect(testBuffer.cursor).toEqual([0, 11]); + + exitInsertMode(result); + expect(result.current.mode).toBe('NORMAL'); + expect(testBuffer.cursor).toEqual([0, 10]); + }); + }); + + describe('Special characters and edge cases', () => { + it('should handle ^ (move to first non-whitespace character)', () => { + const testBuffer = createMockBuffer(' hello world', [0, 5]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: '^' }); + }); + + expect(testBuffer.vimMoveToFirstNonWhitespace).toHaveBeenCalled(); + }); + + it('should handle G without count (go to last line)', () => { + const testBuffer = createMockBuffer('line1\nline2\nline3', [0, 0]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'G' }); + }); + + expect(testBuffer.vimMoveToLastLine).toHaveBeenCalled(); + }); + + it('should handle gg (go to first line)', () => { + const testBuffer = createMockBuffer('line1\nline2\nline3', [2, 0]); + const { result } = renderVimHook(testBuffer); + + // First 'g' sets pending state + act(() => { + result.current.handleInput({ sequence: 'g' }); + }); + + // Second 'g' executes the command + act(() => { + result.current.handleInput({ sequence: 'g' }); + }); + + expect(testBuffer.vimMoveToFirstLine).toHaveBeenCalled(); + }); + + it('should handle count with movement commands', () => { + const testBuffer = createMockBuffer('hello world test', [0, 0]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: '3' }); + }); + + act(() => { + result.current.handleInput(TEST_SEQUENCES.WORD_FORWARD); + }); + + expect(testBuffer.vimMoveWordForward).toHaveBeenCalledWith(3); + }); + }); + + describe('Vim word operations', () => { + describe('dw (delete word forward)', () => { + it('should delete from cursor to start of next word', () => { + const testBuffer = createMockBuffer('hello world test', [0, 0]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'd' }); + }); + act(() => { + result.current.handleInput({ sequence: 'w' }); + }); + + expect(testBuffer.vimDeleteWordForward).toHaveBeenCalledWith(1); + }); + + it('should actually delete the complete word including trailing space', () => { + // This test uses the real text-buffer reducer instead of mocks + const initialState = { + lines: ['hello world test'], + cursorRow: 0, + cursorCol: 0, + preferredCol: null, + undoStack: [], + redoStack: [], + clipboard: null, + selectionAnchor: null, + }; + + const result = textBufferReducer(initialState, { + type: 'vim_delete_word_forward', + payload: { count: 1 }, + }); + + // Should delete "hello " (word + space), leaving "world test" + expect(result.lines).toEqual(['world test']); + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(0); + }); + + it('should delete word from middle of word correctly', () => { + const initialState = { + lines: ['hello world test'], + cursorRow: 0, + cursorCol: 2, // cursor on 'l' in "hello" + preferredCol: null, + undoStack: [], + redoStack: [], + clipboard: null, + selectionAnchor: null, + }; + + const result = textBufferReducer(initialState, { + type: 'vim_delete_word_forward', + payload: { count: 1 }, + }); + + // Should delete "llo " (rest of word + space), leaving "he world test" + expect(result.lines).toEqual(['heworld test']); + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(2); + }); + + it('should handle dw at end of line', () => { + const initialState = { + lines: ['hello world'], + cursorRow: 0, + cursorCol: 6, // cursor on 'w' in "world" + preferredCol: null, + undoStack: [], + redoStack: [], + clipboard: null, + selectionAnchor: null, + }; + + const result = textBufferReducer(initialState, { + type: 'vim_delete_word_forward', + payload: { count: 1 }, + }); + + // Should delete "world" (no trailing space at end), leaving "hello " + expect(result.lines).toEqual(['hello ']); + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(6); + }); + + it('should delete multiple words with count', () => { + const testBuffer = createMockBuffer('one two three four', [0, 0]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: '2' }); + }); + act(() => { + result.current.handleInput({ sequence: 'd' }); + }); + act(() => { + result.current.handleInput({ sequence: 'w' }); + }); + + expect(testBuffer.vimDeleteWordForward).toHaveBeenCalledWith(2); + }); + + it('should record command for repeat with dot', () => { + const testBuffer = createMockBuffer('hello world test', [0, 0]); + const { result } = renderVimHook(testBuffer); + + // Execute dw + act(() => { + result.current.handleInput({ sequence: 'd' }); + }); + act(() => { + result.current.handleInput({ sequence: 'w' }); + }); + + vi.clearAllMocks(); + + // Execute dot repeat + act(() => { + result.current.handleInput({ sequence: '.' }); + }); + + expect(testBuffer.vimDeleteWordForward).toHaveBeenCalledWith(1); + }); + }); + + describe('de (delete word end)', () => { + it('should delete from cursor to end of current word', () => { + const testBuffer = createMockBuffer('hello world test', [0, 1]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'd' }); + }); + act(() => { + result.current.handleInput({ sequence: 'e' }); + }); + + expect(testBuffer.vimDeleteWordEnd).toHaveBeenCalledWith(1); + }); + + it('should handle count with de', () => { + const testBuffer = createMockBuffer('one two three four', [0, 0]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: '3' }); + }); + act(() => { + result.current.handleInput({ sequence: 'd' }); + }); + act(() => { + result.current.handleInput({ sequence: 'e' }); + }); + + expect(testBuffer.vimDeleteWordEnd).toHaveBeenCalledWith(3); + }); + }); + + describe('cw (change word forward)', () => { + it('should change from cursor to start of next word and enter INSERT mode', () => { + const testBuffer = createMockBuffer('hello world test', [0, 0]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'c' }); + }); + act(() => { + result.current.handleInput({ sequence: 'w' }); + }); + + expect(testBuffer.vimChangeWordForward).toHaveBeenCalledWith(1); + expect(result.current.mode).toBe('INSERT'); + expect(mockVimContext.setVimMode).toHaveBeenCalledWith('INSERT'); + }); + + it('should handle count with cw', () => { + const testBuffer = createMockBuffer('one two three four', [0, 0]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: '2' }); + }); + act(() => { + result.current.handleInput({ sequence: 'c' }); + }); + act(() => { + result.current.handleInput({ sequence: 'w' }); + }); + + expect(testBuffer.vimChangeWordForward).toHaveBeenCalledWith(2); + expect(result.current.mode).toBe('INSERT'); + }); + + it('should be repeatable with dot', () => { + const testBuffer = createMockBuffer('hello world test more', [0, 0]); + const { result } = renderVimHook(testBuffer); + + // Execute cw + act(() => { + result.current.handleInput({ sequence: 'c' }); + }); + act(() => { + result.current.handleInput({ sequence: 'w' }); + }); + + // Exit INSERT mode + exitInsertMode(result); + + vi.clearAllMocks(); + mockVimContext.setVimMode.mockClear(); + + // Execute dot repeat + act(() => { + result.current.handleInput({ sequence: '.' }); + }); + + expect(testBuffer.vimChangeWordForward).toHaveBeenCalledWith(1); + expect(result.current.mode).toBe('INSERT'); + }); + }); + + describe('ce (change word end)', () => { + it('should change from cursor to end of word and enter INSERT mode', () => { + const testBuffer = createMockBuffer('hello world test', [0, 1]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'c' }); + }); + act(() => { + result.current.handleInput({ sequence: 'e' }); + }); + + expect(testBuffer.vimChangeWordEnd).toHaveBeenCalledWith(1); + expect(result.current.mode).toBe('INSERT'); + }); + + it('should handle count with ce', () => { + const testBuffer = createMockBuffer('one two three four', [0, 0]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: '2' }); + }); + act(() => { + result.current.handleInput({ sequence: 'c' }); + }); + act(() => { + result.current.handleInput({ sequence: 'e' }); + }); + + expect(testBuffer.vimChangeWordEnd).toHaveBeenCalledWith(2); + expect(result.current.mode).toBe('INSERT'); + }); + }); + + describe('cc (change line)', () => { + it('should change entire line and enter INSERT mode', () => { + const testBuffer = createMockBuffer('hello world\nsecond line', [0, 5]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'c' }); + }); + act(() => { + result.current.handleInput({ sequence: 'c' }); + }); + + expect(testBuffer.vimChangeLine).toHaveBeenCalledWith(1); + expect(result.current.mode).toBe('INSERT'); + }); + + it('should change multiple lines with count', () => { + const testBuffer = createMockBuffer( + 'line1\nline2\nline3\nline4', + [1, 0], + ); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: '3' }); + }); + act(() => { + result.current.handleInput({ sequence: 'c' }); + }); + act(() => { + result.current.handleInput({ sequence: 'c' }); + }); + + expect(testBuffer.vimChangeLine).toHaveBeenCalledWith(3); + expect(result.current.mode).toBe('INSERT'); + }); + + it('should be repeatable with dot', () => { + const testBuffer = createMockBuffer('line1\nline2\nline3', [0, 0]); + const { result } = renderVimHook(testBuffer); + + // Execute cc + act(() => { + result.current.handleInput({ sequence: 'c' }); + }); + act(() => { + result.current.handleInput({ sequence: 'c' }); + }); + + // Exit INSERT mode + exitInsertMode(result); + + vi.clearAllMocks(); + mockVimContext.setVimMode.mockClear(); + + // Execute dot repeat + act(() => { + result.current.handleInput({ sequence: '.' }); + }); + + expect(testBuffer.vimChangeLine).toHaveBeenCalledWith(1); + expect(result.current.mode).toBe('INSERT'); + }); + }); + + describe('db (delete word backward)', () => { + it('should delete from cursor to start of previous word', () => { + const testBuffer = createMockBuffer('hello world test', [0, 11]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'd' }); + }); + act(() => { + result.current.handleInput({ sequence: 'b' }); + }); + + expect(testBuffer.vimDeleteWordBackward).toHaveBeenCalledWith(1); + }); + + it('should handle count with db', () => { + const testBuffer = createMockBuffer('one two three four', [0, 18]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: '2' }); + }); + act(() => { + result.current.handleInput({ sequence: 'd' }); + }); + act(() => { + result.current.handleInput({ sequence: 'b' }); + }); + + expect(testBuffer.vimDeleteWordBackward).toHaveBeenCalledWith(2); + }); + }); + + describe('cb (change word backward)', () => { + it('should change from cursor to start of previous word and enter INSERT mode', () => { + const testBuffer = createMockBuffer('hello world test', [0, 11]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: 'c' }); + }); + act(() => { + result.current.handleInput({ sequence: 'b' }); + }); + + expect(testBuffer.vimChangeWordBackward).toHaveBeenCalledWith(1); + expect(result.current.mode).toBe('INSERT'); + }); + + it('should handle count with cb', () => { + const testBuffer = createMockBuffer('one two three four', [0, 18]); + const { result } = renderVimHook(testBuffer); + + act(() => { + result.current.handleInput({ sequence: '3' }); + }); + act(() => { + result.current.handleInput({ sequence: 'c' }); + }); + act(() => { + result.current.handleInput({ sequence: 'b' }); + }); + + expect(testBuffer.vimChangeWordBackward).toHaveBeenCalledWith(3); + expect(result.current.mode).toBe('INSERT'); + }); + }); + + describe('Pending state handling', () => { + it('should clear pending delete state after dw', () => { + const testBuffer = createMockBuffer('hello world', [0, 0]); + const { result } = renderVimHook(testBuffer); + + // Press 'd' to enter pending delete state + act(() => { + result.current.handleInput({ sequence: 'd' }); + }); + + // Complete with 'w' + act(() => { + result.current.handleInput({ sequence: 'w' }); + }); + + // Next 'd' should start a new pending state, not continue the previous one + act(() => { + result.current.handleInput({ sequence: 'd' }); + }); + + // This should trigger dd (delete line), not an error + act(() => { + result.current.handleInput({ sequence: 'd' }); + }); + + expect(testBuffer.vimDeleteLine).toHaveBeenCalledWith(1); + }); + + it('should clear pending change state after cw', () => { + const testBuffer = createMockBuffer('hello world', [0, 0]); + const { result } = renderVimHook(testBuffer); + + // Execute cw + act(() => { + result.current.handleInput({ sequence: 'c' }); + }); + act(() => { + result.current.handleInput({ sequence: 'w' }); + }); + + // Exit INSERT mode + exitInsertMode(result); + + // Next 'c' should start a new pending state + act(() => { + result.current.handleInput({ sequence: 'c' }); + }); + act(() => { + result.current.handleInput({ sequence: 'c' }); + }); + + expect(testBuffer.vimChangeLine).toHaveBeenCalledWith(1); + }); + + it('should clear pending state with escape', () => { + const testBuffer = createMockBuffer('hello world', [0, 0]); + const { result } = renderVimHook(testBuffer); + + // Enter pending delete state + act(() => { + result.current.handleInput({ sequence: 'd' }); + }); + + // Press escape to clear pending state + exitInsertMode(result); + + // Now 'w' should just move cursor, not delete + act(() => { + result.current.handleInput({ sequence: 'w' }); + }); + + expect(testBuffer.vimDeleteWordForward).not.toHaveBeenCalled(); + // w should move to next word after clearing pending state + expect(testBuffer.vimMoveWordForward).toHaveBeenCalledWith(1); + }); + }); + }); + + // Line operations (dd, cc) are tested in text-buffer.test.ts + + describe('Reducer-based integration tests', () => { + describe('de (delete word end)', () => { + it('should delete from cursor to end of current word', () => { + const initialState = { + lines: ['hello world test'], + cursorRow: 0, + cursorCol: 1, // cursor on 'e' in "hello" + preferredCol: null, + undoStack: [], + redoStack: [], + clipboard: null, + selectionAnchor: null, + }; + + const result = textBufferReducer(initialState, { + type: 'vim_delete_word_end', + payload: { count: 1 }, + }); + + // Should delete "ello" (from cursor to end of word), leaving "h world test" + expect(result.lines).toEqual(['h world test']); + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(1); + }); + + it('should delete multiple word ends with count', () => { + const initialState = { + lines: ['hello world test more'], + cursorRow: 0, + cursorCol: 1, // cursor on 'e' in "hello" + preferredCol: null, + undoStack: [], + redoStack: [], + clipboard: null, + selectionAnchor: null, + }; + + const result = textBufferReducer(initialState, { + type: 'vim_delete_word_end', + payload: { count: 2 }, + }); + + // Should delete "ello world" (to end of second word), leaving "h test more" + expect(result.lines).toEqual(['h test more']); + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(1); + }); + }); + + describe('db (delete word backward)', () => { + it('should delete from cursor to start of previous word', () => { + const initialState = { + lines: ['hello world test'], + cursorRow: 0, + cursorCol: 11, // cursor on 't' in "test" + preferredCol: null, + undoStack: [], + redoStack: [], + clipboard: null, + selectionAnchor: null, + }; + + const result = textBufferReducer(initialState, { + type: 'vim_delete_word_backward', + payload: { count: 1 }, + }); + + // Should delete "world" (previous word only), leaving "hello test" + expect(result.lines).toEqual(['hello test']); + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(6); + }); + + it('should delete multiple words backward with count', () => { + const initialState = { + lines: ['hello world test more'], + cursorRow: 0, + cursorCol: 17, // cursor on 'm' in "more" + preferredCol: null, + undoStack: [], + redoStack: [], + clipboard: null, + selectionAnchor: null, + }; + + const result = textBufferReducer(initialState, { + type: 'vim_delete_word_backward', + payload: { count: 2 }, + }); + + // Should delete "world test " (two words backward), leaving "hello more" + expect(result.lines).toEqual(['hello more']); + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(6); + }); + }); + + describe('cw (change word forward)', () => { + it('should delete from cursor to start of next word', () => { + const initialState = { + lines: ['hello world test'], + cursorRow: 0, + cursorCol: 0, // cursor on 'h' in "hello" + preferredCol: null, + undoStack: [], + redoStack: [], + clipboard: null, + selectionAnchor: null, + }; + + const result = textBufferReducer(initialState, { + type: 'vim_change_word_forward', + payload: { count: 1 }, + }); + + // Should delete "hello " (word + space), leaving "world test" + expect(result.lines).toEqual(['world test']); + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(0); + }); + + it('should change multiple words with count', () => { + const initialState = { + lines: ['hello world test more'], + cursorRow: 0, + cursorCol: 0, + preferredCol: null, + undoStack: [], + redoStack: [], + clipboard: null, + selectionAnchor: null, + }; + + const result = textBufferReducer(initialState, { + type: 'vim_change_word_forward', + payload: { count: 2 }, + }); + + // Should delete "hello world " (two words), leaving "test more" + expect(result.lines).toEqual(['test more']); + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(0); + }); + }); + + describe('ce (change word end)', () => { + it('should change from cursor to end of current word', () => { + const initialState = { + lines: ['hello world test'], + cursorRow: 0, + cursorCol: 1, // cursor on 'e' in "hello" + preferredCol: null, + undoStack: [], + redoStack: [], + clipboard: null, + selectionAnchor: null, + }; + + const result = textBufferReducer(initialState, { + type: 'vim_change_word_end', + payload: { count: 1 }, + }); + + // Should delete "ello" (from cursor to end of word), leaving "h world test" + expect(result.lines).toEqual(['h world test']); + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(1); + }); + + it('should change multiple word ends with count', () => { + const initialState = { + lines: ['hello world test'], + cursorRow: 0, + cursorCol: 1, // cursor on 'e' in "hello" + preferredCol: null, + undoStack: [], + redoStack: [], + clipboard: null, + selectionAnchor: null, + }; + + const result = textBufferReducer(initialState, { + type: 'vim_change_word_end', + payload: { count: 2 }, + }); + + // Should delete "ello world" (to end of second word), leaving "h test" + expect(result.lines).toEqual(['h test']); + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(1); + }); + }); + + describe('cb (change word backward)', () => { + it('should change from cursor to start of previous word', () => { + const initialState = { + lines: ['hello world test'], + cursorRow: 0, + cursorCol: 11, // cursor on 't' in "test" + preferredCol: null, + undoStack: [], + redoStack: [], + clipboard: null, + selectionAnchor: null, + }; + + const result = textBufferReducer(initialState, { + type: 'vim_change_word_backward', + payload: { count: 1 }, + }); + + // Should delete "world" (previous word only), leaving "hello test" + expect(result.lines).toEqual(['hello test']); + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(6); + }); + }); + + describe('cc (change line)', () => { + it('should clear the line and place cursor at the start', () => { + const initialState = { + lines: [' hello world'], + cursorRow: 0, + cursorCol: 5, // cursor on 'o' + preferredCol: null, + undoStack: [], + redoStack: [], + clipboard: null, + selectionAnchor: null, + }; + + const result = textBufferReducer(initialState, { + type: 'vim_change_line', + payload: { count: 1 }, + }); + + expect(result.lines).toEqual(['']); + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(0); + }); + }); + + describe('dd (delete line)', () => { + it('should delete the current line', () => { + const initialState = { + lines: ['line1', 'line2', 'line3'], + cursorRow: 1, + cursorCol: 2, + preferredCol: null, + undoStack: [], + redoStack: [], + clipboard: null, + selectionAnchor: null, + }; + + const result = textBufferReducer(initialState, { + type: 'vim_delete_line', + payload: { count: 1 }, + }); + + expect(result.lines).toEqual(['line1', 'line3']); + expect(result.cursorRow).toBe(1); + expect(result.cursorCol).toBe(0); + }); + + it('should delete multiple lines with count', () => { + const initialState = { + lines: ['line1', 'line2', 'line3', 'line4'], + cursorRow: 1, + cursorCol: 2, + preferredCol: null, + undoStack: [], + redoStack: [], + clipboard: null, + selectionAnchor: null, + }; + + const result = textBufferReducer(initialState, { + type: 'vim_delete_line', + payload: { count: 2 }, + }); + + // Should delete lines 1 and 2 + expect(result.lines).toEqual(['line1', 'line4']); + expect(result.cursorRow).toBe(1); + expect(result.cursorCol).toBe(0); + }); + + it('should handle deleting last line', () => { + const initialState = { + lines: ['only line'], + cursorRow: 0, + cursorCol: 3, + preferredCol: null, + undoStack: [], + redoStack: [], + clipboard: null, + selectionAnchor: null, + }; + + const result = textBufferReducer(initialState, { + type: 'vim_delete_line', + payload: { count: 1 }, + }); + + // Should leave an empty line when deleting the only line + expect(result.lines).toEqual(['']); + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(0); + }); + }); + + describe('D (delete to end of line)', () => { + it('should delete from cursor to end of line', () => { + const initialState = { + lines: ['hello world test'], + cursorRow: 0, + cursorCol: 6, // cursor on 'w' in "world" + preferredCol: null, + undoStack: [], + redoStack: [], + clipboard: null, + selectionAnchor: null, + }; + + const result = textBufferReducer(initialState, { + type: 'vim_delete_to_end_of_line', + }); + + // Should delete "world test", leaving "hello " + expect(result.lines).toEqual(['hello ']); + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(6); + }); + + it('should handle D at end of line', () => { + const initialState = { + lines: ['hello world'], + cursorRow: 0, + cursorCol: 11, // cursor at end + preferredCol: null, + undoStack: [], + redoStack: [], + clipboard: null, + selectionAnchor: null, + }; + + const result = textBufferReducer(initialState, { + type: 'vim_delete_to_end_of_line', + }); + + // Should not change anything when at end of line + expect(result.lines).toEqual(['hello world']); + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(11); + }); + }); + + describe('C (change to end of line)', () => { + it('should change from cursor to end of line', () => { + const initialState = { + lines: ['hello world test'], + cursorRow: 0, + cursorCol: 6, // cursor on 'w' in "world" + preferredCol: null, + undoStack: [], + redoStack: [], + clipboard: null, + selectionAnchor: null, + }; + + const result = textBufferReducer(initialState, { + type: 'vim_change_to_end_of_line', + }); + + // Should delete "world test", leaving "hello " + expect(result.lines).toEqual(['hello ']); + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(6); + }); + + it('should handle C at beginning of line', () => { + const initialState = { + lines: ['hello world'], + cursorRow: 0, + cursorCol: 0, + preferredCol: null, + undoStack: [], + redoStack: [], + clipboard: null, + selectionAnchor: null, + }; + + const result = textBufferReducer(initialState, { + type: 'vim_change_to_end_of_line', + }); + + // Should delete entire line content + expect(result.lines).toEqual(['']); + expect(result.cursorRow).toBe(0); + expect(result.cursorCol).toBe(0); + }); + }); + }); +}); diff --git a/packages/cli/src/ui/hooks/vim.ts b/packages/cli/src/ui/hooks/vim.ts new file mode 100644 index 000000000..cb65e1ee9 --- /dev/null +++ b/packages/cli/src/ui/hooks/vim.ts @@ -0,0 +1,774 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { useCallback, useReducer, useEffect } from 'react'; +import type { Key } from './useKeypress.js'; +import type { TextBuffer } from '../components/shared/text-buffer.js'; +import { useVimMode } from '../contexts/VimModeContext.js'; + +export type VimMode = 'NORMAL' | 'INSERT'; + +// Constants +const DIGIT_MULTIPLIER = 10; +const DEFAULT_COUNT = 1; +const DIGIT_1_TO_9 = /^[1-9]$/; + +// Command types +const CMD_TYPES = { + DELETE_WORD_FORWARD: 'dw', + DELETE_WORD_BACKWARD: 'db', + DELETE_WORD_END: 'de', + CHANGE_WORD_FORWARD: 'cw', + CHANGE_WORD_BACKWARD: 'cb', + CHANGE_WORD_END: 'ce', + DELETE_CHAR: 'x', + DELETE_LINE: 'dd', + CHANGE_LINE: 'cc', + DELETE_TO_EOL: 'D', + CHANGE_TO_EOL: 'C', + CHANGE_MOVEMENT: { + LEFT: 'ch', + DOWN: 'cj', + UP: 'ck', + RIGHT: 'cl', + }, +} as const; + +// Helper function to clear pending state +const createClearPendingState = () => ({ + count: 0, + pendingOperator: null as 'g' | 'd' | 'c' | null, +}); + +// State and action types for useReducer +type VimState = { + mode: VimMode; + count: number; + pendingOperator: 'g' | 'd' | 'c' | null; + lastCommand: { type: string; count: number } | null; +}; + +type VimAction = + | { type: 'SET_MODE'; mode: VimMode } + | { type: 'SET_COUNT'; count: number } + | { type: 'INCREMENT_COUNT'; digit: number } + | { type: 'CLEAR_COUNT' } + | { type: 'SET_PENDING_OPERATOR'; operator: 'g' | 'd' | 'c' | null } + | { + type: 'SET_LAST_COMMAND'; + command: { type: string; count: number } | null; + } + | { type: 'CLEAR_PENDING_STATES' } + | { type: 'ESCAPE_TO_NORMAL' }; + +const initialVimState: VimState = { + mode: 'NORMAL', + count: 0, + pendingOperator: null, + lastCommand: null, +}; + +// Reducer function +const vimReducer = (state: VimState, action: VimAction): VimState => { + switch (action.type) { + case 'SET_MODE': + return { ...state, mode: action.mode }; + + case 'SET_COUNT': + return { ...state, count: action.count }; + + case 'INCREMENT_COUNT': + return { ...state, count: state.count * DIGIT_MULTIPLIER + action.digit }; + + case 'CLEAR_COUNT': + return { ...state, count: 0 }; + + case 'SET_PENDING_OPERATOR': + return { ...state, pendingOperator: action.operator }; + + case 'SET_LAST_COMMAND': + return { ...state, lastCommand: action.command }; + + case 'CLEAR_PENDING_STATES': + return { + ...state, + ...createClearPendingState(), + }; + + case 'ESCAPE_TO_NORMAL': + // Handle escape - clear all pending states (mode is updated via context) + return { + ...state, + ...createClearPendingState(), + }; + + default: + return state; + } +}; + +/** + * React hook that provides vim-style editing functionality for text input. + * + * Features: + * - Modal editing (INSERT/NORMAL modes) + * - Navigation: h,j,k,l,w,b,e,0,$,^,gg,G with count prefixes + * - Editing: x,a,i,o,O,A,I,d,c,D,C with count prefixes + * - Complex operations: dd,cc,dw,cw,db,cb,de,ce + * - Command repetition (.) + * - Settings persistence + * + * @param buffer - TextBuffer instance for text manipulation + * @param onSubmit - Optional callback for command submission + * @returns Object with vim state and input handler + */ +export function useVim(buffer: TextBuffer, onSubmit?: (value: string) => void) { + const { vimEnabled, vimMode, setVimMode } = useVimMode(); + const [state, dispatch] = useReducer(vimReducer, initialVimState); + + // Sync vim mode from context to local state + useEffect(() => { + dispatch({ type: 'SET_MODE', mode: vimMode }); + }, [vimMode]); + + // Helper to update mode in both reducer and context + const updateMode = useCallback( + (mode: VimMode) => { + setVimMode(mode); + dispatch({ type: 'SET_MODE', mode }); + }, + [setVimMode], + ); + + // Helper functions using the reducer state + const getCurrentCount = useCallback( + () => state.count || DEFAULT_COUNT, + [state.count], + ); + + /** Executes common commands to eliminate duplication in dot (.) repeat command */ + const executeCommand = useCallback( + (cmdType: string, count: number) => { + switch (cmdType) { + case CMD_TYPES.DELETE_WORD_FORWARD: { + buffer.vimDeleteWordForward(count); + break; + } + + case CMD_TYPES.DELETE_WORD_BACKWARD: { + buffer.vimDeleteWordBackward(count); + break; + } + + case CMD_TYPES.DELETE_WORD_END: { + buffer.vimDeleteWordEnd(count); + break; + } + + case CMD_TYPES.CHANGE_WORD_FORWARD: { + buffer.vimChangeWordForward(count); + updateMode('INSERT'); + break; + } + + case CMD_TYPES.CHANGE_WORD_BACKWARD: { + buffer.vimChangeWordBackward(count); + updateMode('INSERT'); + break; + } + + case CMD_TYPES.CHANGE_WORD_END: { + buffer.vimChangeWordEnd(count); + updateMode('INSERT'); + break; + } + + case CMD_TYPES.DELETE_CHAR: { + buffer.vimDeleteChar(count); + break; + } + + case CMD_TYPES.DELETE_LINE: { + buffer.vimDeleteLine(count); + break; + } + + case CMD_TYPES.CHANGE_LINE: { + buffer.vimChangeLine(count); + updateMode('INSERT'); + break; + } + + case CMD_TYPES.CHANGE_MOVEMENT.LEFT: + case CMD_TYPES.CHANGE_MOVEMENT.DOWN: + case CMD_TYPES.CHANGE_MOVEMENT.UP: + case CMD_TYPES.CHANGE_MOVEMENT.RIGHT: { + const movementMap: Record = { + [CMD_TYPES.CHANGE_MOVEMENT.LEFT]: 'h', + [CMD_TYPES.CHANGE_MOVEMENT.DOWN]: 'j', + [CMD_TYPES.CHANGE_MOVEMENT.UP]: 'k', + [CMD_TYPES.CHANGE_MOVEMENT.RIGHT]: 'l', + }; + const movementType = movementMap[cmdType]; + if (movementType) { + buffer.vimChangeMovement(movementType, count); + updateMode('INSERT'); + } + break; + } + + case CMD_TYPES.DELETE_TO_EOL: { + buffer.vimDeleteToEndOfLine(); + break; + } + + case CMD_TYPES.CHANGE_TO_EOL: { + buffer.vimChangeToEndOfLine(); + updateMode('INSERT'); + break; + } + + default: + return false; + } + return true; + }, + [buffer, updateMode], + ); + + /** + * Handles key input in INSERT mode + * @param normalizedKey - The normalized key input + * @returns boolean indicating if the key was handled + */ + const handleInsertModeInput = useCallback( + (normalizedKey: Key): boolean => { + // Handle escape key immediately - switch to NORMAL mode on any escape + if (normalizedKey.name === 'escape') { + // Vim behavior: move cursor left when exiting insert mode (unless at beginning of line) + buffer.vimEscapeInsertMode(); + dispatch({ type: 'ESCAPE_TO_NORMAL' }); + updateMode('NORMAL'); + return true; + } + + // In INSERT mode, let InputPrompt handle completion keys and special commands + if ( + normalizedKey.name === 'tab' || + (normalizedKey.name === 'return' && !normalizedKey.ctrl) || + normalizedKey.name === 'up' || + normalizedKey.name === 'down' + ) { + return false; // Let InputPrompt handle completion + } + + // Let InputPrompt handle Ctrl+V for clipboard image pasting + if (normalizedKey.ctrl && normalizedKey.name === 'v') { + return false; // Let InputPrompt handle clipboard functionality + } + + // Special handling for Enter key to allow command submission (lower priority than completion) + if ( + normalizedKey.name === 'return' && + !normalizedKey.ctrl && + !normalizedKey.meta + ) { + if (buffer.text.trim() && onSubmit) { + // Handle command submission directly + const submittedValue = buffer.text; + buffer.setText(''); + onSubmit(submittedValue); + return true; + } + return true; // Handled by vim (even if no onSubmit callback) + } + + // useKeypress already provides the correct format for TextBuffer + buffer.handleInput(normalizedKey); + return true; // Handled by vim + }, + [buffer, dispatch, updateMode, onSubmit], + ); + + /** + * Normalizes key input to ensure all required properties are present + * @param key - Raw key input + * @returns Normalized key with all properties + */ + const normalizeKey = useCallback( + (key: Key): Key => ({ + name: key.name || '', + sequence: key.sequence || '', + ctrl: key.ctrl || false, + meta: key.meta || false, + shift: key.shift || false, + paste: key.paste || false, + }), + [], + ); + + /** + * Handles change movement commands (ch, cj, ck, cl) + * @param movement - The movement direction + * @returns boolean indicating if command was handled + */ + const handleChangeMovement = useCallback( + (movement: 'h' | 'j' | 'k' | 'l'): boolean => { + const count = getCurrentCount(); + dispatch({ type: 'CLEAR_COUNT' }); + buffer.vimChangeMovement(movement, count); + updateMode('INSERT'); + + const cmdTypeMap = { + h: CMD_TYPES.CHANGE_MOVEMENT.LEFT, + j: CMD_TYPES.CHANGE_MOVEMENT.DOWN, + k: CMD_TYPES.CHANGE_MOVEMENT.UP, + l: CMD_TYPES.CHANGE_MOVEMENT.RIGHT, + }; + + dispatch({ + type: 'SET_LAST_COMMAND', + command: { type: cmdTypeMap[movement], count }, + }); + dispatch({ type: 'SET_PENDING_OPERATOR', operator: null }); + return true; + }, + [getCurrentCount, dispatch, buffer, updateMode], + ); + + /** + * Handles operator-motion commands (dw/cw, db/cb, de/ce) + * @param operator - The operator type ('d' for delete, 'c' for change) + * @param motion - The motion type ('w', 'b', 'e') + * @returns boolean indicating if command was handled + */ + const handleOperatorMotion = useCallback( + (operator: 'd' | 'c', motion: 'w' | 'b' | 'e'): boolean => { + const count = getCurrentCount(); + + const commandMap = { + d: { + w: CMD_TYPES.DELETE_WORD_FORWARD, + b: CMD_TYPES.DELETE_WORD_BACKWARD, + e: CMD_TYPES.DELETE_WORD_END, + }, + c: { + w: CMD_TYPES.CHANGE_WORD_FORWARD, + b: CMD_TYPES.CHANGE_WORD_BACKWARD, + e: CMD_TYPES.CHANGE_WORD_END, + }, + }; + + const cmdType = commandMap[operator][motion]; + executeCommand(cmdType, count); + + dispatch({ + type: 'SET_LAST_COMMAND', + command: { type: cmdType, count }, + }); + dispatch({ type: 'CLEAR_COUNT' }); + dispatch({ type: 'SET_PENDING_OPERATOR', operator: null }); + + return true; + }, + [getCurrentCount, executeCommand, dispatch], + ); + + const handleInput = useCallback( + (key: Key): boolean => { + if (!vimEnabled) { + return false; // Let InputPrompt handle it + } + + let normalizedKey: Key; + try { + normalizedKey = normalizeKey(key); + } catch (error) { + // Handle malformed key inputs gracefully + console.warn('Malformed key input in vim mode:', key, error); + return false; + } + + // Handle INSERT mode + if (state.mode === 'INSERT') { + return handleInsertModeInput(normalizedKey); + } + + // Handle NORMAL mode + if (state.mode === 'NORMAL') { + // Handle Escape key in NORMAL mode - clear all pending states + if (normalizedKey.name === 'escape') { + dispatch({ type: 'CLEAR_PENDING_STATES' }); + return true; // Handled by vim + } + + // Handle count input (numbers 1-9, and 0 if count > 0) + if ( + DIGIT_1_TO_9.test(normalizedKey.sequence) || + (normalizedKey.sequence === '0' && state.count > 0) + ) { + dispatch({ + type: 'INCREMENT_COUNT', + digit: parseInt(normalizedKey.sequence, 10), + }); + return true; // Handled by vim + } + + const repeatCount = getCurrentCount(); + + switch (normalizedKey.sequence) { + case 'h': { + // Check if this is part of a change command (ch) + if (state.pendingOperator === 'c') { + return handleChangeMovement('h'); + } + + // Normal left movement + buffer.vimMoveLeft(repeatCount); + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + case 'j': { + // Check if this is part of a change command (cj) + if (state.pendingOperator === 'c') { + return handleChangeMovement('j'); + } + + // Normal down movement + buffer.vimMoveDown(repeatCount); + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + case 'k': { + // Check if this is part of a change command (ck) + if (state.pendingOperator === 'c') { + return handleChangeMovement('k'); + } + + // Normal up movement + buffer.vimMoveUp(repeatCount); + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + case 'l': { + // Check if this is part of a change command (cl) + if (state.pendingOperator === 'c') { + return handleChangeMovement('l'); + } + + // Normal right movement + buffer.vimMoveRight(repeatCount); + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + case 'w': { + // Check if this is part of a delete or change command (dw/cw) + if (state.pendingOperator === 'd') { + return handleOperatorMotion('d', 'w'); + } + if (state.pendingOperator === 'c') { + return handleOperatorMotion('c', 'w'); + } + + // Normal word movement + buffer.vimMoveWordForward(repeatCount); + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + case 'b': { + // Check if this is part of a delete or change command (db/cb) + if (state.pendingOperator === 'd') { + return handleOperatorMotion('d', 'b'); + } + if (state.pendingOperator === 'c') { + return handleOperatorMotion('c', 'b'); + } + + // Normal backward word movement + buffer.vimMoveWordBackward(repeatCount); + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + case 'e': { + // Check if this is part of a delete or change command (de/ce) + if (state.pendingOperator === 'd') { + return handleOperatorMotion('d', 'e'); + } + if (state.pendingOperator === 'c') { + return handleOperatorMotion('c', 'e'); + } + + // Normal word end movement + buffer.vimMoveWordEnd(repeatCount); + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + case 'x': { + // Delete character under cursor + buffer.vimDeleteChar(repeatCount); + dispatch({ + type: 'SET_LAST_COMMAND', + command: { type: CMD_TYPES.DELETE_CHAR, count: repeatCount }, + }); + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + case 'i': { + // Enter INSERT mode at current position + buffer.vimInsertAtCursor(); + updateMode('INSERT'); + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + case 'a': { + // Enter INSERT mode after current position + buffer.vimAppendAtCursor(); + updateMode('INSERT'); + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + case 'o': { + // Insert new line after current line and enter INSERT mode + buffer.vimOpenLineBelow(); + updateMode('INSERT'); + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + case 'O': { + // Insert new line before current line and enter INSERT mode + buffer.vimOpenLineAbove(); + updateMode('INSERT'); + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + case '0': { + // Move to start of line + buffer.vimMoveToLineStart(); + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + case '$': { + // Move to end of line + buffer.vimMoveToLineEnd(); + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + case '^': { + // Move to first non-whitespace character + buffer.vimMoveToFirstNonWhitespace(); + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + case 'g': { + if (state.pendingOperator === 'g') { + // Second 'g' - go to first line (gg command) + buffer.vimMoveToFirstLine(); + dispatch({ type: 'SET_PENDING_OPERATOR', operator: null }); + } else { + // First 'g' - wait for second g + dispatch({ type: 'SET_PENDING_OPERATOR', operator: 'g' }); + } + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + case 'G': { + if (state.count > 0) { + // Go to specific line number (1-based) when a count was provided + buffer.vimMoveToLine(state.count); + } else { + // Go to last line when no count was provided + buffer.vimMoveToLastLine(); + } + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + case 'I': { + // Enter INSERT mode at start of line (first non-whitespace) + buffer.vimInsertAtLineStart(); + updateMode('INSERT'); + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + case 'A': { + // Enter INSERT mode at end of line + buffer.vimAppendAtLineEnd(); + updateMode('INSERT'); + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + case 'd': { + if (state.pendingOperator === 'd') { + // Second 'd' - delete N lines (dd command) + const repeatCount = getCurrentCount(); + executeCommand(CMD_TYPES.DELETE_LINE, repeatCount); + dispatch({ + type: 'SET_LAST_COMMAND', + command: { type: CMD_TYPES.DELETE_LINE, count: repeatCount }, + }); + dispatch({ type: 'CLEAR_COUNT' }); + dispatch({ type: 'SET_PENDING_OPERATOR', operator: null }); + } else { + // First 'd' - wait for movement command + dispatch({ type: 'SET_PENDING_OPERATOR', operator: 'd' }); + } + return true; + } + + case 'c': { + if (state.pendingOperator === 'c') { + // Second 'c' - change N entire lines (cc command) + const repeatCount = getCurrentCount(); + executeCommand(CMD_TYPES.CHANGE_LINE, repeatCount); + dispatch({ + type: 'SET_LAST_COMMAND', + command: { type: CMD_TYPES.CHANGE_LINE, count: repeatCount }, + }); + dispatch({ type: 'CLEAR_COUNT' }); + dispatch({ type: 'SET_PENDING_OPERATOR', operator: null }); + } else { + // First 'c' - wait for movement command + dispatch({ type: 'SET_PENDING_OPERATOR', operator: 'c' }); + } + return true; + } + + case 'D': { + // Delete from cursor to end of line (equivalent to d$) + executeCommand(CMD_TYPES.DELETE_TO_EOL, 1); + dispatch({ + type: 'SET_LAST_COMMAND', + command: { type: CMD_TYPES.DELETE_TO_EOL, count: 1 }, + }); + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + case 'C': { + // Change from cursor to end of line (equivalent to c$) + executeCommand(CMD_TYPES.CHANGE_TO_EOL, 1); + dispatch({ + type: 'SET_LAST_COMMAND', + command: { type: CMD_TYPES.CHANGE_TO_EOL, count: 1 }, + }); + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + case '.': { + // Repeat last command + if (state.lastCommand) { + const cmdData = state.lastCommand; + + // All repeatable commands are now handled by executeCommand + executeCommand(cmdData.type, cmdData.count); + } + + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + default: { + // Check for arrow keys (they have different sequences but known names) + if (normalizedKey.name === 'left') { + // Left arrow - same as 'h' + if (state.pendingOperator === 'c') { + return handleChangeMovement('h'); + } + + // Normal left movement (same as 'h') + buffer.vimMoveLeft(repeatCount); + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + if (normalizedKey.name === 'down') { + // Down arrow - same as 'j' + if (state.pendingOperator === 'c') { + return handleChangeMovement('j'); + } + + // Normal down movement (same as 'j') + buffer.vimMoveDown(repeatCount); + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + if (normalizedKey.name === 'up') { + // Up arrow - same as 'k' + if (state.pendingOperator === 'c') { + return handleChangeMovement('k'); + } + + // Normal up movement (same as 'k') + buffer.vimMoveUp(repeatCount); + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + if (normalizedKey.name === 'right') { + // Right arrow - same as 'l' + if (state.pendingOperator === 'c') { + return handleChangeMovement('l'); + } + + // Normal right movement (same as 'l') + buffer.vimMoveRight(repeatCount); + dispatch({ type: 'CLEAR_COUNT' }); + return true; + } + + // Unknown command, clear count and pending states + dispatch({ type: 'CLEAR_PENDING_STATES' }); + return true; // Still handled by vim to prevent other handlers + } + } + } + + return false; // Not handled by vim + }, + [ + vimEnabled, + normalizeKey, + handleInsertModeInput, + state.mode, + state.count, + state.pendingOperator, + state.lastCommand, + dispatch, + getCurrentCount, + handleChangeMovement, + handleOperatorMotion, + buffer, + executeCommand, + updateMode, + ], + ); + + return { + mode: state.mode, + vimModeEnabled: vimEnabled, + handleInput, // Expose the input handler for InputPrompt to use + }; +} diff --git a/packages/cli/src/ui/themes/ansi-light.ts b/packages/cli/src/ui/themes/ansi-light.ts index 31af6aca9..00f9bbccb 100644 --- a/packages/cli/src/ui/themes/ansi-light.ts +++ b/packages/cli/src/ui/themes/ansi-light.ts @@ -9,7 +9,7 @@ import { type ColorsTheme, Theme } from './theme.js'; const ansiLightColors: ColorsTheme = { type: 'light', Background: 'white', - Foreground: 'black', + Foreground: '#444', LightBlue: 'blue', AccentBlue: 'blue', AccentPurple: 'purple', @@ -17,6 +17,8 @@ const ansiLightColors: ColorsTheme = { AccentGreen: 'green', AccentYellow: 'orange', AccentRed: 'red', + DiffAdded: '#E5F2E5', + DiffRemoved: '#FFE5E5', Comment: 'gray', Gray: 'gray', GradientColors: ['blue', 'green'], diff --git a/packages/cli/src/ui/themes/ansi.ts b/packages/cli/src/ui/themes/ansi.ts index 4ef694544..2afc135c1 100644 --- a/packages/cli/src/ui/themes/ansi.ts +++ b/packages/cli/src/ui/themes/ansi.ts @@ -17,6 +17,8 @@ const ansiColors: ColorsTheme = { AccentGreen: 'green', AccentYellow: 'yellow', AccentRed: 'red', + DiffAdded: '#003300', + DiffRemoved: '#4D0000', Comment: 'gray', Gray: 'gray', GradientColors: ['cyan', 'green'], diff --git a/packages/cli/src/ui/themes/atom-one-dark.ts b/packages/cli/src/ui/themes/atom-one-dark.ts index 951b88984..5545971e0 100644 --- a/packages/cli/src/ui/themes/atom-one-dark.ts +++ b/packages/cli/src/ui/themes/atom-one-dark.ts @@ -17,6 +17,8 @@ const atomOneDarkColors: ColorsTheme = { AccentGreen: '#98c379', AccentYellow: '#e6c07b', AccentRed: '#e06c75', + DiffAdded: '#39544E', + DiffRemoved: '#562B2F', Comment: '#5c6370', Gray: '#5c6370', GradientColors: ['#61aeee', '#98c379'], diff --git a/packages/cli/src/ui/themes/ayu-light.ts b/packages/cli/src/ui/themes/ayu-light.ts index 450041072..8410cfb27 100644 --- a/packages/cli/src/ui/themes/ayu-light.ts +++ b/packages/cli/src/ui/themes/ayu-light.ts @@ -17,8 +17,10 @@ const ayuLightColors: ColorsTheme = { AccentGreen: '#86b300', AccentYellow: '#f2ae49', AccentRed: '#f07171', + DiffAdded: '#C6EAD8', + DiffRemoved: '#FFCCCC', Comment: '#ABADB1', - Gray: '#CCCFD3', + Gray: '#a6aaaf', GradientColors: ['#399ee6', '#86b300'], }; diff --git a/packages/cli/src/ui/themes/ayu.ts b/packages/cli/src/ui/themes/ayu.ts index a5cfc7dba..1d1fc7d0e 100644 --- a/packages/cli/src/ui/themes/ayu.ts +++ b/packages/cli/src/ui/themes/ayu.ts @@ -17,8 +17,10 @@ const ayuDarkColors: ColorsTheme = { AccentGreen: '#AAD94C', AccentYellow: '#FFB454', AccentRed: '#F26D78', + DiffAdded: '#293022', + DiffRemoved: '#3D1215', Comment: '#646A71', - Gray: '##3D4149', + Gray: '#3D4149', GradientColors: ['#FFB454', '#F26D78'], }; diff --git a/packages/cli/src/ui/themes/color-utils.test.ts b/packages/cli/src/ui/themes/color-utils.test.ts new file mode 100644 index 000000000..cafc28dda --- /dev/null +++ b/packages/cli/src/ui/themes/color-utils.test.ts @@ -0,0 +1,221 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect } from 'vitest'; +import { + isValidColor, + resolveColor, + CSS_NAME_TO_HEX_MAP, + INK_SUPPORTED_NAMES, +} from './color-utils.js'; + +describe('Color Utils', () => { + describe('isValidColor', () => { + it('should validate hex colors', () => { + expect(isValidColor('#ff0000')).toBe(true); + expect(isValidColor('#00ff00')).toBe(true); + expect(isValidColor('#0000ff')).toBe(true); + expect(isValidColor('#fff')).toBe(true); + expect(isValidColor('#000')).toBe(true); + expect(isValidColor('#FF0000')).toBe(true); // Case insensitive + }); + + it('should validate Ink-supported color names', () => { + expect(isValidColor('black')).toBe(true); + expect(isValidColor('red')).toBe(true); + expect(isValidColor('green')).toBe(true); + expect(isValidColor('yellow')).toBe(true); + expect(isValidColor('blue')).toBe(true); + expect(isValidColor('cyan')).toBe(true); + expect(isValidColor('magenta')).toBe(true); + expect(isValidColor('white')).toBe(true); + expect(isValidColor('gray')).toBe(true); + expect(isValidColor('grey')).toBe(true); + expect(isValidColor('blackbright')).toBe(true); + expect(isValidColor('redbright')).toBe(true); + expect(isValidColor('greenbright')).toBe(true); + expect(isValidColor('yellowbright')).toBe(true); + expect(isValidColor('bluebright')).toBe(true); + expect(isValidColor('cyanbright')).toBe(true); + expect(isValidColor('magentabright')).toBe(true); + expect(isValidColor('whitebright')).toBe(true); + }); + + it('should validate Ink-supported color names case insensitive', () => { + expect(isValidColor('BLACK')).toBe(true); + expect(isValidColor('Red')).toBe(true); + expect(isValidColor('GREEN')).toBe(true); + }); + + it('should validate CSS color names', () => { + expect(isValidColor('darkkhaki')).toBe(true); + expect(isValidColor('coral')).toBe(true); + expect(isValidColor('teal')).toBe(true); + expect(isValidColor('tomato')).toBe(true); + expect(isValidColor('turquoise')).toBe(true); + expect(isValidColor('violet')).toBe(true); + expect(isValidColor('wheat')).toBe(true); + expect(isValidColor('whitesmoke')).toBe(true); + expect(isValidColor('yellowgreen')).toBe(true); + }); + + it('should validate CSS color names case insensitive', () => { + expect(isValidColor('DARKKHAKI')).toBe(true); + expect(isValidColor('Coral')).toBe(true); + expect(isValidColor('TEAL')).toBe(true); + }); + + it('should reject invalid color names', () => { + expect(isValidColor('invalidcolor')).toBe(false); + expect(isValidColor('notacolor')).toBe(false); + expect(isValidColor('')).toBe(false); + }); + }); + + describe('resolveColor', () => { + it('should resolve hex colors', () => { + expect(resolveColor('#ff0000')).toBe('#ff0000'); + expect(resolveColor('#00ff00')).toBe('#00ff00'); + expect(resolveColor('#0000ff')).toBe('#0000ff'); + expect(resolveColor('#fff')).toBe('#fff'); + expect(resolveColor('#000')).toBe('#000'); + }); + + it('should resolve Ink-supported color names', () => { + expect(resolveColor('black')).toBe('black'); + expect(resolveColor('red')).toBe('red'); + expect(resolveColor('green')).toBe('green'); + expect(resolveColor('yellow')).toBe('yellow'); + expect(resolveColor('blue')).toBe('blue'); + expect(resolveColor('cyan')).toBe('cyan'); + expect(resolveColor('magenta')).toBe('magenta'); + expect(resolveColor('white')).toBe('white'); + expect(resolveColor('gray')).toBe('gray'); + expect(resolveColor('grey')).toBe('grey'); + }); + + it('should resolve CSS color names to hex', () => { + expect(resolveColor('darkkhaki')).toBe('#bdb76b'); + expect(resolveColor('coral')).toBe('#ff7f50'); + expect(resolveColor('teal')).toBe('#008080'); + expect(resolveColor('tomato')).toBe('#ff6347'); + expect(resolveColor('turquoise')).toBe('#40e0d0'); + expect(resolveColor('violet')).toBe('#ee82ee'); + expect(resolveColor('wheat')).toBe('#f5deb3'); + expect(resolveColor('whitesmoke')).toBe('#f5f5f5'); + expect(resolveColor('yellowgreen')).toBe('#9acd32'); + }); + + it('should handle case insensitive color names', () => { + expect(resolveColor('DARKKHAKI')).toBe('#bdb76b'); + expect(resolveColor('Coral')).toBe('#ff7f50'); + expect(resolveColor('TEAL')).toBe('#008080'); + }); + + it('should return undefined for invalid colors', () => { + expect(resolveColor('invalidcolor')).toBeUndefined(); + expect(resolveColor('notacolor')).toBeUndefined(); + expect(resolveColor('')).toBeUndefined(); + }); + }); + + describe('CSS_NAME_TO_HEX_MAP', () => { + it('should contain expected CSS color mappings', () => { + expect(CSS_NAME_TO_HEX_MAP.darkkhaki).toBe('#bdb76b'); + expect(CSS_NAME_TO_HEX_MAP.coral).toBe('#ff7f50'); + expect(CSS_NAME_TO_HEX_MAP.teal).toBe('#008080'); + expect(CSS_NAME_TO_HEX_MAP.tomato).toBe('#ff6347'); + expect(CSS_NAME_TO_HEX_MAP.turquoise).toBe('#40e0d0'); + }); + + it('should not contain Ink-supported color names', () => { + expect(CSS_NAME_TO_HEX_MAP.black).toBeUndefined(); + expect(CSS_NAME_TO_HEX_MAP.red).toBeUndefined(); + expect(CSS_NAME_TO_HEX_MAP.green).toBeUndefined(); + expect(CSS_NAME_TO_HEX_MAP.blue).toBeUndefined(); + }); + }); + + describe('INK_SUPPORTED_NAMES', () => { + it('should contain all Ink-supported color names', () => { + expect(INK_SUPPORTED_NAMES.has('black')).toBe(true); + expect(INK_SUPPORTED_NAMES.has('red')).toBe(true); + expect(INK_SUPPORTED_NAMES.has('green')).toBe(true); + expect(INK_SUPPORTED_NAMES.has('yellow')).toBe(true); + expect(INK_SUPPORTED_NAMES.has('blue')).toBe(true); + expect(INK_SUPPORTED_NAMES.has('cyan')).toBe(true); + expect(INK_SUPPORTED_NAMES.has('magenta')).toBe(true); + expect(INK_SUPPORTED_NAMES.has('white')).toBe(true); + expect(INK_SUPPORTED_NAMES.has('gray')).toBe(true); + expect(INK_SUPPORTED_NAMES.has('grey')).toBe(true); + expect(INK_SUPPORTED_NAMES.has('blackbright')).toBe(true); + expect(INK_SUPPORTED_NAMES.has('redbright')).toBe(true); + expect(INK_SUPPORTED_NAMES.has('greenbright')).toBe(true); + expect(INK_SUPPORTED_NAMES.has('yellowbright')).toBe(true); + expect(INK_SUPPORTED_NAMES.has('bluebright')).toBe(true); + expect(INK_SUPPORTED_NAMES.has('cyanbright')).toBe(true); + expect(INK_SUPPORTED_NAMES.has('magentabright')).toBe(true); + expect(INK_SUPPORTED_NAMES.has('whitebright')).toBe(true); + }); + + it('should not contain CSS color names', () => { + expect(INK_SUPPORTED_NAMES.has('darkkhaki')).toBe(false); + expect(INK_SUPPORTED_NAMES.has('coral')).toBe(false); + expect(INK_SUPPORTED_NAMES.has('teal')).toBe(false); + }); + }); + + describe('Consistency between validation and resolution', () => { + it('should have consistent behavior between isValidColor and resolveColor', () => { + // Test that any color that isValidColor returns true for can be resolved + const testColors = [ + '#ff0000', + '#00ff00', + '#0000ff', + '#fff', + '#000', + 'black', + 'red', + 'green', + 'yellow', + 'blue', + 'cyan', + 'magenta', + 'white', + 'gray', + 'grey', + 'darkkhaki', + 'coral', + 'teal', + 'tomato', + 'turquoise', + 'violet', + 'wheat', + 'whitesmoke', + 'yellowgreen', + ]; + + for (const color of testColors) { + expect(isValidColor(color)).toBe(true); + expect(resolveColor(color)).toBeDefined(); + } + + // Test that invalid colors are consistently rejected + const invalidColors = [ + 'invalidcolor', + 'notacolor', + '', + '#gg0000', + '#ff00', + ]; + + for (const color of invalidColors) { + expect(isValidColor(color)).toBe(false); + expect(resolveColor(color)).toBeUndefined(); + } + }); + }); +}); diff --git a/packages/cli/src/ui/themes/color-utils.ts b/packages/cli/src/ui/themes/color-utils.ts new file mode 100644 index 000000000..a861ee321 --- /dev/null +++ b/packages/cli/src/ui/themes/color-utils.ts @@ -0,0 +1,231 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// Mapping from common CSS color names (lowercase) to hex codes (lowercase) +// Excludes names directly supported by Ink +export const CSS_NAME_TO_HEX_MAP: Readonly> = { + aliceblue: '#f0f8ff', + antiquewhite: '#faebd7', + aqua: '#00ffff', + aquamarine: '#7fffd4', + azure: '#f0ffff', + beige: '#f5f5dc', + bisque: '#ffe4c4', + blanchedalmond: '#ffebcd', + blueviolet: '#8a2be2', + brown: '#a52a2a', + burlywood: '#deb887', + cadetblue: '#5f9ea0', + chartreuse: '#7fff00', + chocolate: '#d2691e', + coral: '#ff7f50', + cornflowerblue: '#6495ed', + cornsilk: '#fff8dc', + crimson: '#dc143c', + darkblue: '#00008b', + darkcyan: '#008b8b', + darkgoldenrod: '#b8860b', + darkgray: '#a9a9a9', + darkgrey: '#a9a9a9', + darkgreen: '#006400', + darkkhaki: '#bdb76b', + darkmagenta: '#8b008b', + darkolivegreen: '#556b2f', + darkorange: '#ff8c00', + darkorchid: '#9932cc', + darkred: '#8b0000', + darksalmon: '#e9967a', + darkseagreen: '#8fbc8f', + darkslateblue: '#483d8b', + darkslategray: '#2f4f4f', + darkslategrey: '#2f4f4f', + darkturquoise: '#00ced1', + darkviolet: '#9400d3', + deeppink: '#ff1493', + deepskyblue: '#00bfff', + dimgray: '#696969', + dimgrey: '#696969', + dodgerblue: '#1e90ff', + firebrick: '#b22222', + floralwhite: '#fffaf0', + forestgreen: '#228b22', + fuchsia: '#ff00ff', + gainsboro: '#dcdcdc', + ghostwhite: '#f8f8ff', + gold: '#ffd700', + goldenrod: '#daa520', + greenyellow: '#adff2f', + honeydew: '#f0fff0', + hotpink: '#ff69b4', + indianred: '#cd5c5c', + indigo: '#4b0082', + ivory: '#fffff0', + khaki: '#f0e68c', + lavender: '#e6e6fa', + lavenderblush: '#fff0f5', + lawngreen: '#7cfc00', + lemonchiffon: '#fffacd', + lightblue: '#add8e6', + lightcoral: '#f08080', + lightcyan: '#e0ffff', + lightgoldenrodyellow: '#fafad2', + lightgray: '#d3d3d3', + lightgrey: '#d3d3d3', + lightgreen: '#90ee90', + lightpink: '#ffb6c1', + lightsalmon: '#ffa07a', + lightseagreen: '#20b2aa', + lightskyblue: '#87cefa', + lightslategray: '#778899', + lightslategrey: '#778899', + lightsteelblue: '#b0c4de', + lightyellow: '#ffffe0', + lime: '#00ff00', + limegreen: '#32cd32', + linen: '#faf0e6', + maroon: '#800000', + mediumaquamarine: '#66cdaa', + mediumblue: '#0000cd', + mediumorchid: '#ba55d3', + mediumpurple: '#9370db', + mediumseagreen: '#3cb371', + mediumslateblue: '#7b68ee', + mediumspringgreen: '#00fa9a', + mediumturquoise: '#48d1cc', + mediumvioletred: '#c71585', + midnightblue: '#191970', + mintcream: '#f5fffa', + mistyrose: '#ffe4e1', + moccasin: '#ffe4b5', + navajowhite: '#ffdead', + navy: '#000080', + oldlace: '#fdf5e6', + olive: '#808000', + olivedrab: '#6b8e23', + orange: '#ffa500', + orangered: '#ff4500', + orchid: '#da70d6', + palegoldenrod: '#eee8aa', + palegreen: '#98fb98', + paleturquoise: '#afeeee', + palevioletred: '#db7093', + papayawhip: '#ffefd5', + peachpuff: '#ffdab9', + peru: '#cd853f', + pink: '#ffc0cb', + plum: '#dda0dd', + powderblue: '#b0e0e6', + purple: '#800080', + rebeccapurple: '#663399', + rosybrown: '#bc8f8f', + royalblue: '#4169e1', + saddlebrown: '#8b4513', + salmon: '#fa8072', + sandybrown: '#f4a460', + seagreen: '#2e8b57', + seashell: '#fff5ee', + sienna: '#a0522d', + silver: '#c0c0c0', + skyblue: '#87ceeb', + slateblue: '#6a5acd', + slategray: '#708090', + slategrey: '#708090', + snow: '#fffafa', + springgreen: '#00ff7f', + steelblue: '#4682b4', + tan: '#d2b48c', + teal: '#008080', + thistle: '#d8bfd8', + tomato: '#ff6347', + turquoise: '#40e0d0', + violet: '#ee82ee', + wheat: '#f5deb3', + whitesmoke: '#f5f5f5', + yellowgreen: '#9acd32', +}; + +// Define the set of Ink's named colors for quick lookup +export const INK_SUPPORTED_NAMES = new Set([ + 'black', + 'red', + 'green', + 'yellow', + 'blue', + 'cyan', + 'magenta', + 'white', + 'gray', + 'grey', + 'blackbright', + 'redbright', + 'greenbright', + 'yellowbright', + 'bluebright', + 'cyanbright', + 'magentabright', + 'whitebright', +]); + +/** + * Checks if a color string is valid (hex, Ink-supported color name, or CSS color name). + * This function uses the same validation logic as the Theme class's _resolveColor method + * to ensure consistency between validation and resolution. + * @param color The color string to validate. + * @returns True if the color is valid. + */ +export function isValidColor(color: string): boolean { + const lowerColor = color.toLowerCase(); + + // 1. Check if it's a hex code + if (lowerColor.startsWith('#')) { + return /^#[0-9A-Fa-f]{3}([0-9A-Fa-f]{3})?$/.test(color); + } + + // 2. Check if it's an Ink supported name + if (INK_SUPPORTED_NAMES.has(lowerColor)) { + return true; + } + + // 3. Check if it's a known CSS name we can map to hex + if (CSS_NAME_TO_HEX_MAP[lowerColor]) { + return true; + } + + // 4. Not a valid color + return false; +} + +/** + * Resolves a CSS color value (name or hex) into an Ink-compatible color string. + * @param colorValue The raw color string (e.g., 'blue', '#ff0000', 'darkkhaki'). + * @returns An Ink-compatible color string (hex or name), or undefined if not resolvable. + */ +export function resolveColor(colorValue: string): string | undefined { + const lowerColor = colorValue.toLowerCase(); + + // 1. Check if it's already a hex code and valid + if (lowerColor.startsWith('#')) { + if (/^#[0-9A-Fa-f]{3}([0-9A-Fa-f]{3})?$/.test(colorValue)) { + return lowerColor; + } else { + return undefined; + } + } + // 2. Check if it's an Ink supported name (lowercase) + else if (INK_SUPPORTED_NAMES.has(lowerColor)) { + return lowerColor; // Use Ink name directly + } + // 3. Check if it's a known CSS name we can map to hex + else if (CSS_NAME_TO_HEX_MAP[lowerColor]) { + return CSS_NAME_TO_HEX_MAP[lowerColor]; // Use mapped hex + } + + // 4. Could not resolve + console.warn( + `[ColorUtils] Could not resolve color "${colorValue}" to an Ink-compatible format.`, + ); + return undefined; +} diff --git a/packages/cli/src/ui/themes/dracula.ts b/packages/cli/src/ui/themes/dracula.ts index d754deedd..e746d8e8c 100644 --- a/packages/cli/src/ui/themes/dracula.ts +++ b/packages/cli/src/ui/themes/dracula.ts @@ -17,6 +17,8 @@ const draculaColors: ColorsTheme = { AccentGreen: '#50fa7b', AccentYellow: '#f1fa8c', AccentRed: '#ff5555', + DiffAdded: '#11431d', + DiffRemoved: '#6e1818', Comment: '#6272a4', Gray: '#6272a4', GradientColors: ['#ff79c6', '#8be9fd'], diff --git a/packages/cli/src/ui/themes/github-dark.ts b/packages/cli/src/ui/themes/github-dark.ts index f69128216..e93c8c6a8 100644 --- a/packages/cli/src/ui/themes/github-dark.ts +++ b/packages/cli/src/ui/themes/github-dark.ts @@ -17,6 +17,8 @@ const githubDarkColors: ColorsTheme = { AccentGreen: '#85E89D', AccentYellow: '#FFAB70', AccentRed: '#F97583', + DiffAdded: '#3C4636', + DiffRemoved: '#502125', Comment: '#6A737D', Gray: '#6A737D', GradientColors: ['#79B8FF', '#85E89D'], diff --git a/packages/cli/src/ui/themes/github-light.ts b/packages/cli/src/ui/themes/github-light.ts index f1393e707..dcb4bbf00 100644 --- a/packages/cli/src/ui/themes/github-light.ts +++ b/packages/cli/src/ui/themes/github-light.ts @@ -17,6 +17,8 @@ const githubLightColors: ColorsTheme = { AccentGreen: '#008080', AccentYellow: '#990073', AccentRed: '#d14', + DiffAdded: '#C6EAD8', + DiffRemoved: '#FFCCCC', Comment: '#998', Gray: '#999', GradientColors: ['#458', '#008080'], diff --git a/packages/cli/src/ui/themes/googlecode.ts b/packages/cli/src/ui/themes/googlecode.ts index 5e4f02fb1..38b719a3c 100644 --- a/packages/cli/src/ui/themes/googlecode.ts +++ b/packages/cli/src/ui/themes/googlecode.ts @@ -9,7 +9,7 @@ import { lightTheme, Theme, type ColorsTheme } from './theme.js'; const googleCodeColors: ColorsTheme = { type: 'light', Background: 'white', - Foreground: 'black', + Foreground: '#444', LightBlue: '#066', AccentBlue: '#008', AccentPurple: '#606', @@ -17,6 +17,8 @@ const googleCodeColors: ColorsTheme = { AccentGreen: '#080', AccentYellow: '#660', AccentRed: '#800', + DiffAdded: '#C6EAD8', + DiffRemoved: '#FEDEDE', Comment: '#5f6368', Gray: lightTheme.Gray, GradientColors: ['#066', '#606'], diff --git a/packages/cli/src/ui/themes/no-color.ts b/packages/cli/src/ui/themes/no-color.ts index 8ddb57fdb..a6efb454c 100644 --- a/packages/cli/src/ui/themes/no-color.ts +++ b/packages/cli/src/ui/themes/no-color.ts @@ -17,12 +17,14 @@ const noColorColorsTheme: ColorsTheme = { AccentGreen: '', AccentYellow: '', AccentRed: '', + DiffAdded: '', + DiffRemoved: '', Comment: '', Gray: '', }; export const NoColorTheme: Theme = new Theme( - 'No Color', + 'NoColor', 'dark', { hljs: { diff --git a/packages/cli/src/ui/themes/qwen-dark.ts b/packages/cli/src/ui/themes/qwen-dark.ts index 4516fbee4..b8cdf4459 100644 --- a/packages/cli/src/ui/themes/qwen-dark.ts +++ b/packages/cli/src/ui/themes/qwen-dark.ts @@ -17,6 +17,8 @@ const qwenDarkColors: ColorsTheme = { AccentGreen: '#AAD94C', AccentYellow: '#FFD700', AccentRed: '#F26D78', + DiffAdded: '#AAD94C', + DiffRemoved: '#F26D78', Comment: '#646A71', Gray: '#3D4149', GradientColors: ['#FFD700', '#da7959'], diff --git a/packages/cli/src/ui/themes/qwen-light.ts b/packages/cli/src/ui/themes/qwen-light.ts index 7a9e48f54..6b038856b 100644 --- a/packages/cli/src/ui/themes/qwen-light.ts +++ b/packages/cli/src/ui/themes/qwen-light.ts @@ -17,6 +17,8 @@ const qwenLightColors: ColorsTheme = { AccentGreen: '#86b300', AccentYellow: '#f2ae49', AccentRed: '#f07171', + DiffAdded: '#86b300', + DiffRemoved: '#f07171', Comment: '#ABADB1', Gray: '#CCCFD3', GradientColors: ['#399ee6', '#86b300'], diff --git a/packages/cli/src/ui/themes/shades-of-purple.ts b/packages/cli/src/ui/themes/shades-of-purple.ts index 83eeb7863..6e20240f9 100644 --- a/packages/cli/src/ui/themes/shades-of-purple.ts +++ b/packages/cli/src/ui/themes/shades-of-purple.ts @@ -5,7 +5,7 @@ */ /** - * Shades of Purple Theme — for Highlightjs. + * Shades of Purple Theme — for Highlight.js. * @author Ahmad Awais */ import { type ColorsTheme, Theme } from './theme.js'; @@ -22,6 +22,8 @@ const shadesOfPurpleColors: ColorsTheme = { AccentGreen: '#A5FF90', // Strings and many others AccentYellow: '#fad000', // Title, main yellow AccentRed: '#ff628c', // Error/deletion accent + DiffAdded: '#383E45', + DiffRemoved: '#572244', Comment: '#B362FF', // Comment color (same as AccentPurple) Gray: '#726c86', // Gray color GradientColors: ['#4d21fc', '#847ace', '#ff628c'], diff --git a/packages/cli/src/ui/themes/theme-manager.test.ts b/packages/cli/src/ui/themes/theme-manager.test.ts new file mode 100644 index 000000000..6f9565a52 --- /dev/null +++ b/packages/cli/src/ui/themes/theme-manager.test.ts @@ -0,0 +1,108 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +// Patch: Unset NO_COLOR at the very top before any imports +if (process.env.NO_COLOR !== undefined) { + delete process.env.NO_COLOR; +} + +import { describe, it, expect, beforeEach } from 'vitest'; +import { themeManager, DEFAULT_THEME } from './theme-manager.js'; +import { CustomTheme } from './theme.js'; + +const validCustomTheme: CustomTheme = { + type: 'custom', + name: 'MyCustomTheme', + Background: '#000000', + Foreground: '#ffffff', + LightBlue: '#89BDCD', + AccentBlue: '#3B82F6', + AccentPurple: '#8B5CF6', + AccentCyan: '#06B6D4', + AccentGreen: '#3CA84B', + AccentYellow: 'yellow', + AccentRed: 'red', + DiffAdded: 'green', + DiffRemoved: 'red', + Comment: 'gray', + Gray: 'gray', +}; + +describe('ThemeManager', () => { + beforeEach(() => { + // Reset themeManager state + themeManager.loadCustomThemes({}); + themeManager.setActiveTheme(DEFAULT_THEME.name); + }); + + it('should load valid custom themes', () => { + themeManager.loadCustomThemes({ MyCustomTheme: validCustomTheme }); + expect(themeManager.getCustomThemeNames()).toContain('MyCustomTheme'); + expect(themeManager.isCustomTheme('MyCustomTheme')).toBe(true); + }); + + it('should not load invalid custom themes', () => { + const invalidTheme = { ...validCustomTheme, Background: 'not-a-color' }; + themeManager.loadCustomThemes({ + InvalidTheme: invalidTheme as unknown as CustomTheme, + }); + expect(themeManager.getCustomThemeNames()).not.toContain('InvalidTheme'); + expect(themeManager.isCustomTheme('InvalidTheme')).toBe(false); + }); + + it('should set and get the active theme', () => { + expect(themeManager.getActiveTheme().name).toBe(DEFAULT_THEME.name); + themeManager.setActiveTheme('Ayu'); + expect(themeManager.getActiveTheme().name).toBe('Ayu'); + }); + + it('should set and get a custom active theme', () => { + themeManager.loadCustomThemes({ MyCustomTheme: validCustomTheme }); + themeManager.setActiveTheme('MyCustomTheme'); + expect(themeManager.getActiveTheme().name).toBe('MyCustomTheme'); + }); + + it('should return false when setting a non-existent theme', () => { + expect(themeManager.setActiveTheme('NonExistentTheme')).toBe(false); + expect(themeManager.getActiveTheme().name).toBe(DEFAULT_THEME.name); + }); + + it('should list available themes including custom themes', () => { + themeManager.loadCustomThemes({ MyCustomTheme: validCustomTheme }); + const available = themeManager.getAvailableThemes(); + expect( + available.some( + (t: { name: string; isCustom?: boolean }) => + t.name === 'MyCustomTheme' && t.isCustom, + ), + ).toBe(true); + }); + + it('should get a theme by name', () => { + expect(themeManager.getTheme('Ayu')).toBeDefined(); + themeManager.loadCustomThemes({ MyCustomTheme: validCustomTheme }); + expect(themeManager.getTheme('MyCustomTheme')).toBeDefined(); + }); + + it('should fall back to default theme if active theme is invalid', () => { + (themeManager as unknown as { activeTheme: unknown }).activeTheme = { + name: 'NonExistent', + type: 'custom', + }; + expect(themeManager.getActiveTheme().name).toBe(DEFAULT_THEME.name); + }); + + it('should return NoColorTheme if NO_COLOR is set', () => { + const original = process.env.NO_COLOR; + process.env.NO_COLOR = '1'; + expect(themeManager.getActiveTheme().name).toBe('NoColor'); + if (original === undefined) { + delete process.env.NO_COLOR; + } else { + process.env.NO_COLOR = original; + } + }); +}); diff --git a/packages/cli/src/ui/themes/theme-manager.ts b/packages/cli/src/ui/themes/theme-manager.ts index 91c942d94..972d8cb83 100644 --- a/packages/cli/src/ui/themes/theme-manager.ts +++ b/packages/cli/src/ui/themes/theme-manager.ts @@ -17,7 +17,13 @@ import { ShadesOfPurple } from './shades-of-purple.js'; import { XCode } from './xcode.js'; import { QwenLight } from './qwen-light.js'; import { QwenDark } from './qwen-dark.js'; -import { Theme, ThemeType } from './theme.js'; +import { + Theme, + ThemeType, + CustomTheme, + createCustomTheme, + validateCustomTheme, +} from './theme.js'; import { ANSI } from './ansi.js'; import { ANSILight } from './ansi-light.js'; import { NoColorTheme } from './no-color.js'; @@ -26,6 +32,7 @@ import process from 'node:process'; export interface ThemeDisplay { name: string; type: ThemeType; + isCustom?: boolean; } export const DEFAULT_THEME: Theme = QwenDark; @@ -33,6 +40,7 @@ export const DEFAULT_THEME: Theme = QwenDark; class ThemeManager { private readonly availableThemes: Theme[]; private activeTheme: Theme; + private customThemes: Map = new Map(); constructor() { this.availableThemes = [ @@ -56,84 +64,177 @@ class ThemeManager { } /** - * Returns a list of available theme names. + * Loads custom themes from settings. + * @param customThemesSettings Custom themes from settings. */ - getAvailableThemes(): ThemeDisplay[] { - // Separate Qwen themes - const qwenThemes = this.availableThemes.filter( - (theme) => theme.name === QwenLight.name || theme.name === QwenDark.name, - ); - const otherThemes = this.availableThemes.filter( - (theme) => theme.name !== QwenLight.name && theme.name !== QwenDark.name, - ); + loadCustomThemes(customThemesSettings?: Record): void { + this.customThemes.clear(); - // Sort other themes by type and then name - const sortedOtherThemes = otherThemes.sort((a, b) => { - const typeOrder = (type: ThemeType): number => { - switch (type) { - case 'dark': - return 1; - case 'light': - return 2; - default: - return 3; + if (!customThemesSettings) { + return; + } + + for (const [name, customThemeConfig] of Object.entries( + customThemesSettings, + )) { + const validation = validateCustomTheme(customThemeConfig); + if (validation.isValid) { + if (validation.warning) { + console.warn(`Theme "${name}": ${validation.warning}`); } - }; + const themeWithDefaults: CustomTheme = { + ...DEFAULT_THEME.colors, + ...customThemeConfig, + name: customThemeConfig.name || name, + type: 'custom', + }; - const typeComparison = typeOrder(a.type) - typeOrder(b.type); - if (typeComparison !== 0) { - return typeComparison; + try { + const theme = createCustomTheme(themeWithDefaults); + this.customThemes.set(name, theme); + } catch (error) { + console.warn(`Failed to load custom theme "${name}":`, error); + } + } else { + console.warn(`Invalid custom theme "${name}": ${validation.error}`); } - return a.name.localeCompare(b.name); - }); - - // Combine Qwen themes first, then sorted others - const sortedThemes = [...qwenThemes, ...sortedOtherThemes]; - - return sortedThemes.map((theme) => ({ - name: theme.name, - type: theme.type, - })); + } + // If the current active theme is a custom theme, keep it if still valid + if ( + this.activeTheme && + this.activeTheme.type === 'custom' && + this.customThemes.has(this.activeTheme.name) + ) { + this.activeTheme = this.customThemes.get(this.activeTheme.name)!; + } } /** * Sets the active theme. - * @param themeName The name of the theme to activate. + * @param themeName The name of the theme to set as active. * @returns True if the theme was successfully set, false otherwise. */ setActiveTheme(themeName: string | undefined): boolean { - const foundTheme = this.findThemeByName(themeName); - - if (foundTheme) { - this.activeTheme = foundTheme; - return true; - } else { - // If themeName is undefined, it means we want to set the default theme. - // If findThemeByName returns undefined (e.g. default theme is also not found for some reason) - // then this will return false. - if (themeName === undefined) { - this.activeTheme = DEFAULT_THEME; - return true; - } + const theme = this.findThemeByName(themeName); + if (!theme) { return false; } + this.activeTheme = theme; + return true; + } + + /** + * Gets the currently active theme. + * @returns The active theme. + */ + getActiveTheme(): Theme { + if (process.env.NO_COLOR) { + return NoColorTheme; + } + // Ensure the active theme is always valid (fall back to default if not) + if (!this.activeTheme || !this.findThemeByName(this.activeTheme.name)) { + this.activeTheme = DEFAULT_THEME; + } + return this.activeTheme; + } + + /** + * Gets a list of custom theme names. + * @returns Array of custom theme names. + */ + getCustomThemeNames(): string[] { + return Array.from(this.customThemes.keys()); + } + + /** + * Checks if a theme name is a custom theme. + * @param themeName The theme name to check. + * @returns True if the theme is custom. + */ + isCustomTheme(themeName: string): boolean { + return this.customThemes.has(themeName); + } + + /** + * Returns a list of available theme names. + */ + getAvailableThemes(): ThemeDisplay[] { + const builtInThemes = this.availableThemes.map((theme) => ({ + name: theme.name, + type: theme.type, + isCustom: false, + })); + + const customThemes = Array.from(this.customThemes.values()).map( + (theme) => ({ + name: theme.name, + type: theme.type, + isCustom: true, + }), + ); + + // Separate Qwen themes + const qwenThemes = builtInThemes.filter( + (theme) => theme.name === QwenLight.name || theme.name === QwenDark.name, + ); + const otherBuiltInThemes = builtInThemes.filter( + (theme) => theme.name !== QwenLight.name && theme.name !== QwenDark.name, + ); + + // Sort other themes by type and then name + const sortedOtherThemes = [...otherBuiltInThemes, ...customThemes].sort( + (a, b) => { + const typeOrder = (type: ThemeType): number => { + switch (type) { + case 'dark': + return 1; + case 'light': + return 2; + case 'ansi': + return 3; + case 'custom': + return 4; // Custom themes at the end + default: + return 5; + } + }; + + const typeComparison = typeOrder(a.type) - typeOrder(b.type); + if (typeComparison !== 0) { + return typeComparison; + } + return a.name.localeCompare(b.name); + }, + ); + + // Combine Qwen themes first, then sorted others + return [...qwenThemes, ...sortedOtherThemes]; + } + + /** + * Gets a theme by name. + * @param themeName The name of the theme to get. + * @returns The theme if found, undefined otherwise. + */ + getTheme(themeName: string): Theme | undefined { + return this.findThemeByName(themeName); } findThemeByName(themeName: string | undefined): Theme | undefined { if (!themeName) { return DEFAULT_THEME; } - return this.availableThemes.find((theme) => theme.name === themeName); - } - /** - * Returns the currently active theme object. - */ - getActiveTheme(): Theme { - if (process.env.NO_COLOR) { - return NoColorTheme; + // First check built-in themes + const builtInTheme = this.availableThemes.find( + (theme) => theme.name === themeName, + ); + if (builtInTheme) { + return builtInTheme; } - return this.activeTheme; + + // Then check custom themes + return this.customThemes.get(themeName); } } diff --git a/packages/cli/src/ui/themes/theme.test.ts b/packages/cli/src/ui/themes/theme.test.ts new file mode 100644 index 000000000..51486d4af --- /dev/null +++ b/packages/cli/src/ui/themes/theme.test.ts @@ -0,0 +1,147 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect } from 'vitest'; +import * as themeModule from './theme.js'; +import { themeManager } from './theme-manager.js'; + +const { validateCustomTheme } = themeModule; +type CustomTheme = themeModule.CustomTheme; + +describe('validateCustomTheme', () => { + const validTheme: CustomTheme = { + type: 'custom', + name: 'My Custom Theme', + Background: '#FFFFFF', + Foreground: '#000000', + LightBlue: '#ADD8E6', + AccentBlue: '#0000FF', + AccentPurple: '#800080', + AccentCyan: '#00FFFF', + AccentGreen: '#008000', + AccentYellow: '#FFFF00', + AccentRed: '#FF0000', + DiffAdded: '#00FF00', + DiffRemoved: '#FF0000', + Comment: '#808080', + Gray: '#808080', + }; + + it('should return isValid: true for a valid theme', () => { + const result = validateCustomTheme(validTheme); + expect(result.isValid).toBe(true); + expect(result.error).toBeUndefined(); + }); + + it('should return isValid: false for a theme with a missing required field', () => { + const invalidTheme = { + ...validTheme, + name: undefined as unknown as string, + }; + const result = validateCustomTheme(invalidTheme); + expect(result.isValid).toBe(false); + expect(result.error).toBe('Missing required field: name'); + }); + + it('should return isValid: false for a theme with an invalid color format', () => { + const invalidTheme = { ...validTheme, Background: 'not-a-color' }; + const result = validateCustomTheme(invalidTheme); + expect(result.isValid).toBe(false); + expect(result.error).toBe( + 'Invalid color format for Background: not-a-color', + ); + }); + + it('should return isValid: false for a theme with an invalid name', () => { + const invalidTheme = { ...validTheme, name: ' ' }; + const result = validateCustomTheme(invalidTheme); + expect(result.isValid).toBe(false); + expect(result.error).toBe('Invalid theme name: '); + }); + + it('should return isValid: true for a theme missing optional DiffAdded and DiffRemoved colors', () => { + const legacyTheme: Partial = { ...validTheme }; + delete legacyTheme.DiffAdded; + delete legacyTheme.DiffRemoved; + const result = validateCustomTheme(legacyTheme); + expect(result.isValid).toBe(true); + expect(result.error).toBeUndefined(); + }); + + it('should return a warning if DiffAdded and DiffRemoved are missing', () => { + const legacyTheme: Partial = { ...validTheme }; + delete legacyTheme.DiffAdded; + delete legacyTheme.DiffRemoved; + const result = validateCustomTheme(legacyTheme); + expect(result.isValid).toBe(true); + expect(result.warning).toBe('Missing field(s) DiffAdded, DiffRemoved'); + }); + + it('should return a warning if only DiffRemoved is missing', () => { + const legacyTheme: Partial = { ...validTheme }; + delete legacyTheme.DiffRemoved; + const result = validateCustomTheme(legacyTheme); + expect(result.isValid).toBe(true); + expect(result.warning).toBe('Missing field(s) DiffRemoved'); + }); + + it('should return isValid: false for a theme with an invalid DiffAdded color', () => { + const invalidTheme = { ...validTheme, DiffAdded: 'invalid' }; + const result = validateCustomTheme(invalidTheme); + expect(result.isValid).toBe(false); + expect(result.error).toBe('Invalid color format for DiffAdded: invalid'); + }); + + it('should return isValid: false for a theme with an invalid DiffRemoved color', () => { + const invalidTheme = { ...validTheme, DiffRemoved: 'invalid' }; + const result = validateCustomTheme(invalidTheme); + expect(result.isValid).toBe(false); + expect(result.error).toBe('Invalid color format for DiffRemoved: invalid'); + }); + + it('should return isValid: false for a theme with a very long name', () => { + const invalidTheme = { ...validTheme, name: 'a'.repeat(51) }; + const result = validateCustomTheme(invalidTheme); + expect(result.isValid).toBe(false); + expect(result.error).toBe(`Invalid theme name: ${'a'.repeat(51)}`); + }); +}); + +describe('themeManager.loadCustomThemes', () => { + const baseTheme: Omit & { + DiffAdded?: string; + DiffRemoved?: string; + } = { + type: 'custom', + name: 'Test Theme', + Background: '#FFF', + Foreground: '#000', + LightBlue: '#ADD8E6', + AccentBlue: '#00F', + AccentPurple: '#808', + AccentCyan: '#0FF', + AccentGreen: '#080', + AccentYellow: '#FF0', + AccentRed: '#F00', + Comment: '#888', + Gray: '#888', + }; + + it('should use values from DEFAULT_THEME when DiffAdded and DiffRemoved are not provided', () => { + const legacyTheme: Partial = { ...baseTheme }; + delete legacyTheme.DiffAdded; + delete legacyTheme.DiffRemoved; + + themeManager.loadCustomThemes({ 'Legacy Custom Theme': legacyTheme }); + const result = themeManager.getTheme('Legacy Custom Theme')!; + + // Should use DEFAULT_THEME (QwenDark) values for missing fields + expect(result.colors.DiffAdded).toBe('#AAD94C'); + expect(result.colors.DiffRemoved).toBe('#F26D78'); + expect(result.colors.AccentBlue).toBe(legacyTheme.AccentBlue); + expect(result.name).toBe(legacyTheme.name); + }); +}); diff --git a/packages/cli/src/ui/themes/theme.ts b/packages/cli/src/ui/themes/theme.ts index 9b04da526..7d21af1db 100644 --- a/packages/cli/src/ui/themes/theme.ts +++ b/packages/cli/src/ui/themes/theme.ts @@ -5,8 +5,9 @@ */ import type { CSSProperties } from 'react'; +import { isValidColor, resolveColor } from './color-utils.js'; -export type ThemeType = 'light' | 'dark' | 'ansi'; +export type ThemeType = 'light' | 'dark' | 'ansi' | 'custom'; export interface ColorsTheme { type: ThemeType; @@ -19,11 +20,18 @@ export interface ColorsTheme { AccentGreen: string; AccentYellow: string; AccentRed: string; + DiffAdded: string; + DiffRemoved: string; Comment: string; Gray: string; GradientColors?: string[]; } +export interface CustomTheme extends ColorsTheme { + type: 'custom'; + name: string; +} + export const lightTheme: ColorsTheme = { type: 'light', Background: '#FAFAFA', @@ -35,8 +43,10 @@ export const lightTheme: ColorsTheme = { AccentGreen: '#3CA84B', AccentYellow: '#D5A40A', AccentRed: '#DD4C4C', + DiffAdded: '#C6EAD8', + DiffRemoved: '#FFCCCC', Comment: '#008000', - Gray: '#B7BECC', + Gray: '#97a0b0', GradientColors: ['#4796E4', '#847ACE', '#C3677F'], }; @@ -51,6 +61,8 @@ export const darkTheme: ColorsTheme = { AccentGreen: '#A6E3A1', AccentYellow: '#F9E2AF', AccentRed: '#F38BA8', + DiffAdded: '#28350B', + DiffRemoved: '#430000', Comment: '#6C7086', Gray: '#6C7086', GradientColors: ['#4796E4', '#847ACE', '#C3677F'], @@ -67,6 +79,8 @@ export const ansiTheme: ColorsTheme = { AccentGreen: 'green', AccentYellow: 'yellow', AccentRed: 'red', + DiffAdded: 'green', + DiffRemoved: 'red', Comment: 'gray', Gray: 'gray', }; @@ -83,173 +97,6 @@ export class Theme { */ protected readonly _colorMap: Readonly>; - // --- Static Helper Data --- - - // Mapping from common CSS color names (lowercase) to hex codes (lowercase) - // Excludes names directly supported by Ink - private static readonly cssNameToHexMap: Readonly> = { - aliceblue: '#f0f8ff', - antiquewhite: '#faebd7', - aqua: '#00ffff', - aquamarine: '#7fffd4', - azure: '#f0ffff', - beige: '#f5f5dc', - bisque: '#ffe4c4', - blanchedalmond: '#ffebcd', - blueviolet: '#8a2be2', - brown: '#a52a2a', - burlywood: '#deb887', - cadetblue: '#5f9ea0', - chartreuse: '#7fff00', - chocolate: '#d2691e', - coral: '#ff7f50', - cornflowerblue: '#6495ed', - cornsilk: '#fff8dc', - crimson: '#dc143c', - darkblue: '#00008b', - darkcyan: '#008b8b', - darkgoldenrod: '#b8860b', - darkgray: '#a9a9a9', - darkgrey: '#a9a9a9', - darkgreen: '#006400', - darkkhaki: '#bdb76b', - darkmagenta: '#8b008b', - darkolivegreen: '#556b2f', - darkorange: '#ff8c00', - darkorchid: '#9932cc', - darkred: '#8b0000', - darksalmon: '#e9967a', - darkseagreen: '#8fbc8f', - darkslateblue: '#483d8b', - darkslategray: '#2f4f4f', - darkslategrey: '#2f4f4f', - darkturquoise: '#00ced1', - darkviolet: '#9400d3', - deeppink: '#ff1493', - deepskyblue: '#00bfff', - dimgray: '#696969', - dimgrey: '#696969', - dodgerblue: '#1e90ff', - firebrick: '#b22222', - floralwhite: '#fffaf0', - forestgreen: '#228b22', - fuchsia: '#ff00ff', - gainsboro: '#dcdcdc', - ghostwhite: '#f8f8ff', - gold: '#ffd700', - goldenrod: '#daa520', - greenyellow: '#adff2f', - honeydew: '#f0fff0', - hotpink: '#ff69b4', - indianred: '#cd5c5c', - indigo: '#4b0082', - ivory: '#fffff0', - khaki: '#f0e68c', - lavender: '#e6e6fa', - lavenderblush: '#fff0f5', - lawngreen: '#7cfc00', - lemonchiffon: '#fffacd', - lightblue: '#add8e6', - lightcoral: '#f08080', - lightcyan: '#e0ffff', - lightgoldenrodyellow: '#fafad2', - lightgray: '#d3d3d3', - lightgrey: '#d3d3d3', - lightgreen: '#90ee90', - lightpink: '#ffb6c1', - lightsalmon: '#ffa07a', - lightseagreen: '#20b2aa', - lightskyblue: '#87cefa', - lightslategray: '#778899', - lightslategrey: '#778899', - lightsteelblue: '#b0c4de', - lightyellow: '#ffffe0', - lime: '#00ff00', - limegreen: '#32cd32', - linen: '#faf0e6', - maroon: '#800000', - mediumaquamarine: '#66cdaa', - mediumblue: '#0000cd', - mediumorchid: '#ba55d3', - mediumpurple: '#9370db', - mediumseagreen: '#3cb371', - mediumslateblue: '#7b68ee', - mediumspringgreen: '#00fa9a', - mediumturquoise: '#48d1cc', - mediumvioletred: '#c71585', - midnightblue: '#191970', - mintcream: '#f5fffa', - mistyrose: '#ffe4e1', - moccasin: '#ffe4b5', - navajowhite: '#ffdead', - navy: '#000080', - oldlace: '#fdf5e6', - olive: '#808000', - olivedrab: '#6b8e23', - orange: '#ffa500', - orangered: '#ff4500', - orchid: '#da70d6', - palegoldenrod: '#eee8aa', - palegreen: '#98fb98', - paleturquoise: '#afeeee', - palevioletred: '#db7093', - papayawhip: '#ffefd5', - peachpuff: '#ffdab9', - peru: '#cd853f', - pink: '#ffc0cb', - plum: '#dda0dd', - powderblue: '#b0e0e6', - purple: '#800080', - rebeccapurple: '#663399', - rosybrown: '#bc8f8f', - royalblue: '#4169e1', - saddlebrown: '#8b4513', - salmon: '#fa8072', - sandybrown: '#f4a460', - seagreen: '#2e8b57', - seashell: '#fff5ee', - sienna: '#a0522d', - silver: '#c0c0c0', - skyblue: '#87ceeb', - slateblue: '#6a5acd', - slategray: '#708090', - slategrey: '#708090', - snow: '#fffafa', - springgreen: '#00ff7f', - steelblue: '#4682b4', - tan: '#d2b48c', - teal: '#008080', - thistle: '#d8bfd8', - tomato: '#ff6347', - turquoise: '#40e0d0', - violet: '#ee82ee', - wheat: '#f5deb3', - whitesmoke: '#f5f5f5', - yellowgreen: '#9acd32', - }; - - // Define the set of Ink's named colors for quick lookup - private static readonly inkSupportedNames = new Set([ - 'black', - 'red', - 'green', - 'yellow', - 'blue', - 'cyan', - 'magenta', - 'white', - 'gray', - 'grey', - 'blackbright', - 'redbright', - 'greenbright', - 'yellowbright', - 'bluebright', - 'cyanbright', - 'magentabright', - 'whitebright', - ]); - /** * Creates a new Theme instance. * @param name The name of the theme. @@ -285,26 +132,7 @@ export class Theme { * @returns An Ink-compatible color string (hex or name), or undefined if not resolvable. */ private static _resolveColor(colorValue: string): string | undefined { - const lowerColor = colorValue.toLowerCase(); - - // 1. Check if it's already a hex code - if (lowerColor.startsWith('#')) { - return lowerColor; // Use hex directly - } - // 2. Check if it's an Ink supported name (lowercase) - else if (Theme.inkSupportedNames.has(lowerColor)) { - return lowerColor; // Use Ink name directly - } - // 3. Check if it's a known CSS name we can map to hex - else if (Theme.cssNameToHexMap[lowerColor]) { - return Theme.cssNameToHexMap[lowerColor]; // Use mapped hex - } - - // 4. Could not resolve - console.warn( - `[Theme] Could not resolve color "${colorValue}" to an Ink-compatible format.`, - ); - return undefined; + return resolveColor(colorValue); } /** @@ -331,7 +159,7 @@ export class Theme { inkTheme[key] = resolvedColor; } // If color is not resolvable, it's omitted from the map, - // allowing fallback to the default foreground color. + // this enables falling back to the default foreground color. } // We currently only care about the 'color' property for Ink rendering. // Other properties like background, fontStyle, etc., are ignored. @@ -339,3 +167,254 @@ export class Theme { return inkTheme; } } + +/** + * Creates a Theme instance from a custom theme configuration. + * @param customTheme The custom theme configuration. + * @returns A new Theme instance. + */ +export function createCustomTheme(customTheme: CustomTheme): Theme { + // Generate CSS properties mappings based on the custom theme colors + const rawMappings: Record = { + hljs: { + display: 'block', + overflowX: 'auto', + padding: '0.5em', + background: customTheme.Background, + color: customTheme.Foreground, + }, + 'hljs-keyword': { + color: customTheme.AccentBlue, + }, + 'hljs-literal': { + color: customTheme.AccentBlue, + }, + 'hljs-symbol': { + color: customTheme.AccentBlue, + }, + 'hljs-name': { + color: customTheme.AccentBlue, + }, + 'hljs-link': { + color: customTheme.AccentBlue, + textDecoration: 'underline', + }, + 'hljs-built_in': { + color: customTheme.AccentCyan, + }, + 'hljs-type': { + color: customTheme.AccentCyan, + }, + 'hljs-number': { + color: customTheme.AccentGreen, + }, + 'hljs-class': { + color: customTheme.AccentGreen, + }, + 'hljs-string': { + color: customTheme.AccentYellow, + }, + 'hljs-meta-string': { + color: customTheme.AccentYellow, + }, + 'hljs-regexp': { + color: customTheme.AccentRed, + }, + 'hljs-template-tag': { + color: customTheme.AccentRed, + }, + 'hljs-subst': { + color: customTheme.Foreground, + }, + 'hljs-function': { + color: customTheme.Foreground, + }, + 'hljs-title': { + color: customTheme.Foreground, + }, + 'hljs-params': { + color: customTheme.Foreground, + }, + 'hljs-formula': { + color: customTheme.Foreground, + }, + 'hljs-comment': { + color: customTheme.Comment, + fontStyle: 'italic', + }, + 'hljs-quote': { + color: customTheme.Comment, + fontStyle: 'italic', + }, + 'hljs-doctag': { + color: customTheme.Comment, + }, + 'hljs-meta': { + color: customTheme.Gray, + }, + 'hljs-meta-keyword': { + color: customTheme.Gray, + }, + 'hljs-tag': { + color: customTheme.Gray, + }, + 'hljs-variable': { + color: customTheme.AccentPurple, + }, + 'hljs-template-variable': { + color: customTheme.AccentPurple, + }, + 'hljs-attr': { + color: customTheme.LightBlue, + }, + 'hljs-attribute': { + color: customTheme.LightBlue, + }, + 'hljs-builtin-name': { + color: customTheme.LightBlue, + }, + 'hljs-section': { + color: customTheme.AccentYellow, + }, + 'hljs-emphasis': { + fontStyle: 'italic', + }, + 'hljs-strong': { + fontWeight: 'bold', + }, + 'hljs-bullet': { + color: customTheme.AccentYellow, + }, + 'hljs-selector-tag': { + color: customTheme.AccentYellow, + }, + 'hljs-selector-id': { + color: customTheme.AccentYellow, + }, + 'hljs-selector-class': { + color: customTheme.AccentYellow, + }, + 'hljs-selector-attr': { + color: customTheme.AccentYellow, + }, + 'hljs-selector-pseudo': { + color: customTheme.AccentYellow, + }, + 'hljs-addition': { + backgroundColor: customTheme.AccentGreen, + display: 'inline-block', + width: '100%', + }, + 'hljs-deletion': { + backgroundColor: customTheme.AccentRed, + display: 'inline-block', + width: '100%', + }, + }; + + return new Theme(customTheme.name, 'custom', rawMappings, customTheme); +} + +/** + * Validates a custom theme configuration. + * @param customTheme The custom theme to validate. + * @returns An object with isValid boolean and error message if invalid. + */ +export function validateCustomTheme(customTheme: Partial): { + isValid: boolean; + error?: string; + warning?: string; +} { + // Check required fields + const requiredFields: Array = [ + 'name', + 'Background', + 'Foreground', + 'LightBlue', + 'AccentBlue', + 'AccentPurple', + 'AccentCyan', + 'AccentGreen', + 'AccentYellow', + 'AccentRed', + // 'DiffAdded' and 'DiffRemoved' are not required as they were added after + // the theme format was defined. + 'Comment', + 'Gray', + ]; + + const recommendedFields: Array = [ + 'DiffAdded', + 'DiffRemoved', + ]; + + for (const field of requiredFields) { + if (!customTheme[field]) { + return { + isValid: false, + error: `Missing required field: ${field}`, + }; + } + } + + const missingFields: string[] = []; + + for (const field of recommendedFields) { + if (!customTheme[field]) { + missingFields.push(field); + } + } + + // Validate color format (basic hex validation) + const colorFields: Array = [ + 'Background', + 'Foreground', + 'LightBlue', + 'AccentBlue', + 'AccentPurple', + 'AccentCyan', + 'AccentGreen', + 'AccentYellow', + 'AccentRed', + 'DiffAdded', + 'DiffRemoved', + 'Comment', + 'Gray', + ]; + + for (const field of colorFields) { + const color = customTheme[field] as string | undefined; + if (color !== undefined && !isValidColor(color)) { + return { + isValid: false, + error: `Invalid color format for ${field}: ${color}`, + }; + } + } + + // Validate theme name + if (customTheme.name && !isValidThemeName(customTheme.name)) { + return { + isValid: false, + error: `Invalid theme name: ${customTheme.name}`, + }; + } + + return { + isValid: true, + warning: + missingFields.length > 0 + ? `Missing field(s) ${missingFields.join(', ')}` + : undefined, + }; +} + +/** + * Checks if a theme name is valid. + * @param name The theme name to validate. + * @returns True if the theme name is valid. + */ +function isValidThemeName(name: string): boolean { + // Theme name should be non-empty and not contain invalid characters + return name.trim().length > 0 && name.trim().length <= 50; +} diff --git a/packages/cli/src/ui/themes/xcode.ts b/packages/cli/src/ui/themes/xcode.ts index 15012288e..690d23863 100644 --- a/packages/cli/src/ui/themes/xcode.ts +++ b/packages/cli/src/ui/themes/xcode.ts @@ -9,7 +9,7 @@ import { type ColorsTheme, Theme } from './theme.js'; const xcodeColors: ColorsTheme = { type: 'light', Background: '#fff', - Foreground: 'black', + Foreground: '#444', LightBlue: '#0E0EFF', AccentBlue: '#1c00cf', AccentPurple: '#aa0d91', @@ -17,6 +17,8 @@ const xcodeColors: ColorsTheme = { AccentGreen: '#007400', AccentYellow: '#836C28', AccentRed: '#c41a16', + DiffAdded: '#C6EAD8', + DiffRemoved: '#FEDEDE', Comment: '#007400', Gray: '#c0c0c0', GradientColors: ['#1c00cf', '#007400'], diff --git a/packages/cli/src/ui/types.ts b/packages/cli/src/ui/types.ts index 78f8ea6dc..c67eaa020 100644 --- a/packages/cli/src/ui/types.ts +++ b/packages/cli/src/ui/types.ts @@ -217,6 +217,15 @@ export interface ConsoleMessageItem { count: number; } +/** + * Result type for a slash command that should immediately result in a prompt + * being submitted to the Gemini model. + */ +export interface SubmitPromptResult { + type: 'submit_prompt'; + content: string; +} + /** * Defines the result of the slash command processor for its consumer (useGeminiStream). */ @@ -228,4 +237,5 @@ export type SlashCommandProcessorResult = } | { type: 'handled'; // Indicates the command was processed and no further action is needed. - }; + } + | SubmitPromptResult; diff --git a/packages/cli/src/ui/utils/CodeColorizer.tsx b/packages/cli/src/ui/utils/CodeColorizer.tsx index aaa183abb..58b32c7e9 100644 --- a/packages/cli/src/ui/utils/CodeColorizer.tsx +++ b/packages/cli/src/ui/utils/CodeColorizer.tsx @@ -50,7 +50,7 @@ function renderHastNode( } // Determine the color to pass down: Use this element's specific color - // if found, otherwise, continue passing down the already inherited color. + // if found; otherwise, continue passing down the already inherited color. const colorToPassDown = elementColor || inheritedColor; // Recursively render children, passing the determined color down @@ -68,9 +68,9 @@ function renderHastNode( return {children}; } - // Handle Root Node: Start recursion with initial inherited color + // Handle Root Node: Start recursion with initially inherited color if (node.type === 'root') { - // Check if children array is empty - this happens when lowlight can't detect language – fallback to plain text + // Check if children array is empty - this happens when lowlight can't detect language – fall back to plain text if (!node.children || node.children.length === 0) { return null; } @@ -88,6 +88,34 @@ function renderHastNode( return null; } +function highlightAndRenderLine( + line: string, + language: string | null, + theme: Theme, +): React.ReactNode { + try { + const getHighlightedLine = () => + !language || !lowlight.registered(language) + ? lowlight.highlightAuto(line) + : lowlight.highlight(language, line); + + const renderedNode = renderHastNode(getHighlightedLine(), theme, undefined); + + return renderedNode !== null ? renderedNode : line; + } catch (_error) { + return line; + } +} + +export function colorizeLine( + line: string, + language: string | null, + theme?: Theme, +): React.ReactNode { + const activeTheme = theme || themeManager.getActiveTheme(); + return highlightAndRenderLine(line, language, activeTheme); +} + /** * Renders syntax-highlighted code for Ink applications using a selected theme. * @@ -100,9 +128,10 @@ export function colorizeCode( language: string | null, availableHeight?: number, maxWidth?: number, + theme?: Theme, ): React.ReactNode { const codeToHighlight = code.replace(/\n$/, ''); - const activeTheme = themeManager.getActiveTheme(); + const activeTheme = theme || themeManager.getActiveTheme(); try { // Render the HAST tree using the adapted theme @@ -122,11 +151,6 @@ export function colorizeCode( } } - const getHighlightedLines = (line: string) => - !language || !lowlight.registered(language) - ? lowlight.highlightAuto(line) - : lowlight.highlight(language, line); - return ( {lines.map((line, index) => { - const renderedNode = renderHastNode( - getHighlightedLines(line), + const contentToRender = highlightAndRenderLine( + line, + language, activeTheme, - undefined, ); - const contentToRender = renderedNode !== null ? renderedNode : line; return ( - {`${String(index + 1 + hiddenLinesCount).padStart(padWidth, ' ')} `} + {`${String(index + 1 + hiddenLinesCount).padStart( + padWidth, + ' ', + )} `} {contentToRender} @@ -160,7 +186,7 @@ export function colorizeCode( `[colorizeCode] Error highlighting code for language "${language}":`, error, ); - // Fallback to plain text with default color on error + // Fall back to plain text with default color on error // Also display line numbers in fallback const lines = codeToHighlight.split('\n'); const padWidth = String(lines.length).length; // Calculate padding width based on number of lines diff --git a/packages/cli/src/ui/utils/commandUtils.test.ts b/packages/cli/src/ui/utils/commandUtils.test.ts new file mode 100644 index 000000000..4bd48ceef --- /dev/null +++ b/packages/cli/src/ui/utils/commandUtils.test.ts @@ -0,0 +1,345 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { vi, describe, it, expect, beforeEach, Mock } from 'vitest'; +import { spawn } from 'child_process'; +import { EventEmitter } from 'events'; +import { + isAtCommand, + isSlashCommand, + copyToClipboard, +} from './commandUtils.js'; + +// Mock child_process +vi.mock('child_process'); + +// Mock process.platform for platform-specific tests +const mockProcess = vi.hoisted(() => ({ + platform: 'darwin', +})); + +vi.stubGlobal('process', { + ...process, + get platform() { + return mockProcess.platform; + }, +}); + +interface MockChildProcess extends EventEmitter { + stdin: EventEmitter & { + write: Mock; + end: Mock; + }; + stderr: EventEmitter; +} + +describe('commandUtils', () => { + let mockSpawn: Mock; + let mockChild: MockChildProcess; + + beforeEach(async () => { + vi.clearAllMocks(); + // Dynamically import and set up spawn mock + const { spawn } = await import('child_process'); + mockSpawn = spawn as Mock; + + // Create mock child process with stdout/stderr emitters + mockChild = Object.assign(new EventEmitter(), { + stdin: Object.assign(new EventEmitter(), { + write: vi.fn(), + end: vi.fn(), + }), + stderr: new EventEmitter(), + }) as MockChildProcess; + + mockSpawn.mockReturnValue(mockChild as unknown as ReturnType); + }); + + describe('isAtCommand', () => { + it('should return true when query starts with @', () => { + expect(isAtCommand('@file')).toBe(true); + expect(isAtCommand('@path/to/file')).toBe(true); + expect(isAtCommand('@')).toBe(true); + }); + + it('should return true when query contains @ preceded by whitespace', () => { + expect(isAtCommand('hello @file')).toBe(true); + expect(isAtCommand('some text @path/to/file')).toBe(true); + expect(isAtCommand(' @file')).toBe(true); + }); + + it('should return false when query does not start with @ and has no spaced @', () => { + expect(isAtCommand('file')).toBe(false); + expect(isAtCommand('hello')).toBe(false); + expect(isAtCommand('')).toBe(false); + expect(isAtCommand('email@domain.com')).toBe(false); + expect(isAtCommand('user@host')).toBe(false); + }); + + it('should return false when @ is not preceded by whitespace', () => { + expect(isAtCommand('hello@file')).toBe(false); + expect(isAtCommand('text@path')).toBe(false); + }); + }); + + describe('isSlashCommand', () => { + it('should return true when query starts with /', () => { + expect(isSlashCommand('/help')).toBe(true); + expect(isSlashCommand('/memory show')).toBe(true); + expect(isSlashCommand('/clear')).toBe(true); + expect(isSlashCommand('/')).toBe(true); + }); + + it('should return false when query does not start with /', () => { + expect(isSlashCommand('help')).toBe(false); + expect(isSlashCommand('memory show')).toBe(false); + expect(isSlashCommand('')).toBe(false); + expect(isSlashCommand('path/to/file')).toBe(false); + expect(isSlashCommand(' /help')).toBe(false); + }); + }); + + describe('copyToClipboard', () => { + describe('on macOS (darwin)', () => { + beforeEach(() => { + mockProcess.platform = 'darwin'; + }); + + it('should successfully copy text to clipboard using pbcopy', async () => { + const testText = 'Hello, world!'; + + // Simulate successful execution + setTimeout(() => { + mockChild.emit('close', 0); + }, 0); + + await copyToClipboard(testText); + + expect(mockSpawn).toHaveBeenCalledWith('pbcopy', []); + expect(mockChild.stdin.write).toHaveBeenCalledWith(testText); + expect(mockChild.stdin.end).toHaveBeenCalled(); + }); + + it('should handle pbcopy command failure', async () => { + const testText = 'Hello, world!'; + + // Simulate command failure + setTimeout(() => { + mockChild.stderr.emit('data', 'Command not found'); + mockChild.emit('close', 1); + }, 0); + + await expect(copyToClipboard(testText)).rejects.toThrow( + "'pbcopy' exited with code 1: Command not found", + ); + }); + + it('should handle spawn error', async () => { + const testText = 'Hello, world!'; + + setTimeout(() => { + mockChild.emit('error', new Error('spawn error')); + }, 0); + + await expect(copyToClipboard(testText)).rejects.toThrow('spawn error'); + }); + + it('should handle stdin write error', async () => { + const testText = 'Hello, world!'; + + setTimeout(() => { + mockChild.stdin.emit('error', new Error('stdin error')); + }, 0); + + await expect(copyToClipboard(testText)).rejects.toThrow('stdin error'); + }); + }); + + describe('on Windows (win32)', () => { + beforeEach(() => { + mockProcess.platform = 'win32'; + }); + + it('should successfully copy text to clipboard using clip', async () => { + const testText = 'Hello, world!'; + + setTimeout(() => { + mockChild.emit('close', 0); + }, 0); + + await copyToClipboard(testText); + + expect(mockSpawn).toHaveBeenCalledWith('clip', []); + expect(mockChild.stdin.write).toHaveBeenCalledWith(testText); + expect(mockChild.stdin.end).toHaveBeenCalled(); + }); + }); + + describe('on Linux', () => { + beforeEach(() => { + mockProcess.platform = 'linux'; + }); + + it('should successfully copy text to clipboard using xclip', async () => { + const testText = 'Hello, world!'; + + setTimeout(() => { + mockChild.emit('close', 0); + }, 0); + + await copyToClipboard(testText); + + expect(mockSpawn).toHaveBeenCalledWith('xclip', [ + '-selection', + 'clipboard', + ]); + expect(mockChild.stdin.write).toHaveBeenCalledWith(testText); + expect(mockChild.stdin.end).toHaveBeenCalled(); + }); + + it('should fall back to xsel when xclip fails', async () => { + const testText = 'Hello, world!'; + let callCount = 0; + + mockSpawn.mockImplementation(() => { + const child = Object.assign(new EventEmitter(), { + stdin: Object.assign(new EventEmitter(), { + write: vi.fn(), + end: vi.fn(), + }), + stderr: new EventEmitter(), + }) as MockChildProcess; + + setTimeout(() => { + if (callCount === 0) { + // First call (xclip) fails + child.stderr.emit('data', 'xclip not found'); + child.emit('close', 1); + callCount++; + } else { + // Second call (xsel) succeeds + child.emit('close', 0); + } + }, 0); + + return child as unknown as ReturnType; + }); + + await copyToClipboard(testText); + + expect(mockSpawn).toHaveBeenCalledTimes(2); + expect(mockSpawn).toHaveBeenNthCalledWith(1, 'xclip', [ + '-selection', + 'clipboard', + ]); + expect(mockSpawn).toHaveBeenNthCalledWith(2, 'xsel', [ + '--clipboard', + '--input', + ]); + }); + + it('should throw error when both xclip and xsel fail', async () => { + const testText = 'Hello, world!'; + let callCount = 0; + + mockSpawn.mockImplementation(() => { + const child = Object.assign(new EventEmitter(), { + stdin: Object.assign(new EventEmitter(), { + write: vi.fn(), + end: vi.fn(), + }), + stderr: new EventEmitter(), + }); + + setTimeout(() => { + if (callCount === 0) { + // First call (xclip) fails + child.stderr.emit('data', 'xclip command not found'); + child.emit('close', 1); + callCount++; + } else { + // Second call (xsel) fails + child.stderr.emit('data', 'xsel command not found'); + child.emit('close', 1); + } + }, 0); + + return child as unknown as ReturnType; + }); + + await expect(copyToClipboard(testText)).rejects.toThrow( + /All copy commands failed/, + ); + + expect(mockSpawn).toHaveBeenCalledTimes(2); + }); + }); + + describe('on unsupported platform', () => { + beforeEach(() => { + mockProcess.platform = 'unsupported'; + }); + + it('should throw error for unsupported platform', async () => { + await expect(copyToClipboard('test')).rejects.toThrow( + 'Unsupported platform: unsupported', + ); + }); + }); + + describe('error handling', () => { + beforeEach(() => { + mockProcess.platform = 'darwin'; + }); + + it('should handle command exit without stderr', async () => { + const testText = 'Hello, world!'; + + setTimeout(() => { + mockChild.emit('close', 1); + }, 0); + + await expect(copyToClipboard(testText)).rejects.toThrow( + "'pbcopy' exited with code 1", + ); + }); + + it('should handle empty text', async () => { + setTimeout(() => { + mockChild.emit('close', 0); + }, 0); + + await copyToClipboard(''); + + expect(mockChild.stdin.write).toHaveBeenCalledWith(''); + }); + + it('should handle multiline text', async () => { + const multilineText = 'Line 1\nLine 2\nLine 3'; + + setTimeout(() => { + mockChild.emit('close', 0); + }, 0); + + await copyToClipboard(multilineText); + + expect(mockChild.stdin.write).toHaveBeenCalledWith(multilineText); + }); + + it('should handle special characters', async () => { + const specialText = 'Special chars: !@#$%^&*()_+-=[]{}|;:,.<>?'; + + setTimeout(() => { + mockChild.emit('close', 0); + }, 0); + + await copyToClipboard(specialText); + + expect(mockChild.stdin.write).toHaveBeenCalledWith(specialText); + }); + }); + }); +}); diff --git a/packages/cli/src/ui/utils/commandUtils.ts b/packages/cli/src/ui/utils/commandUtils.ts index aadd035ea..4280388f9 100644 --- a/packages/cli/src/ui/utils/commandUtils.ts +++ b/packages/cli/src/ui/utils/commandUtils.ts @@ -4,6 +4,8 @@ * SPDX-License-Identifier: Apache-2.0 */ +import { spawn } from 'child_process'; + /** * Checks if a query string potentially represents an '@' command. * It triggers if the query starts with '@' or contains '@' preceded by whitespace @@ -24,3 +26,57 @@ export const isAtCommand = (query: string): boolean => * @returns True if the query looks like an '/' command, false otherwise. */ export const isSlashCommand = (query: string): boolean => query.startsWith('/'); + +//Copies a string snippet to the clipboard for different platforms +export const copyToClipboard = async (text: string): Promise => { + const run = (cmd: string, args: string[]) => + new Promise((resolve, reject) => { + const child = spawn(cmd, args); + let stderr = ''; + child.stderr.on('data', (chunk) => (stderr += chunk.toString())); + child.on('error', reject); + child.on('close', (code) => { + if (code === 0) return resolve(); + const errorMsg = stderr.trim(); + reject( + new Error( + `'${cmd}' exited with code ${code}${errorMsg ? `: ${errorMsg}` : ''}`, + ), + ); + }); + child.stdin.on('error', reject); + child.stdin.write(text); + child.stdin.end(); + }); + + switch (process.platform) { + case 'win32': + return run('clip', []); + case 'darwin': + return run('pbcopy', []); + case 'linux': + try { + await run('xclip', ['-selection', 'clipboard']); + } catch (primaryError) { + try { + // If xclip fails for any reason, try xsel as a fallback. + await run('xsel', ['--clipboard', '--input']); + } catch (fallbackError) { + const primaryMsg = + primaryError instanceof Error + ? primaryError.message + : String(primaryError); + const fallbackMsg = + fallbackError instanceof Error + ? fallbackError.message + : String(fallbackError); + throw new Error( + `All copy commands failed. xclip: "${primaryMsg}", xsel: "${fallbackMsg}". Please ensure xclip or xsel is installed and configured.`, + ); + } + } + return; + default: + throw new Error(`Unsupported platform: ${process.platform}`); + } +}; diff --git a/packages/cli/src/ui/utils/errorParsing.ts b/packages/cli/src/ui/utils/errorParsing.ts index 6e82f53b3..10616514a 100644 --- a/packages/cli/src/ui/utils/errorParsing.ts +++ b/packages/cli/src/ui/utils/errorParsing.ts @@ -14,6 +14,7 @@ import { isApiError, isStructuredError, } from '@qwen-code/qwen-code-core'; + // Free Tier message functions const getRateLimitErrorMessageGoogleFree = ( fallbackModel: string = DEFAULT_GEMINI_FLASH_MODEL, diff --git a/packages/cli/src/ui/utils/markdownUtilities.ts b/packages/cli/src/ui/utils/markdownUtilities.ts index c328c12a8..6a34f1387 100644 --- a/packages/cli/src/ui/utils/markdownUtilities.ts +++ b/packages/cli/src/ui/utils/markdownUtilities.ts @@ -29,7 +29,7 @@ This function aims to find an *intelligent* or "safe" index within the provided * **Single Line Breaks:** If no double newline is found in a suitable range, it will look for a single newline (`\n`). * Any newline chosen as a split point must also not be inside a code block. -4. **Fallback to `idealMaxLength`:** +4. **Fall back to `idealMaxLength`:** * If no "safer" split point (respecting code blocks or finding suitable newlines) is identified before or at `idealMaxLength`, and `idealMaxLength` itself is not determined to be an unsafe split point (e.g., inside a code block), the function may return a length larger than `idealMaxLength`, again it CANNOT break markdown formatting. This could happen with very long lines of text without Markdown block structures or newlines. **In essence, `findSafeSplitPoint` tries to be a good Markdown citizen when forced to divide content, preferring structural boundaries over arbitrary character limits, with a strong emphasis on not corrupting code blocks.** diff --git a/packages/cli/src/ui/utils/textUtils.test.ts b/packages/cli/src/ui/utils/textUtils.test.ts deleted file mode 100644 index 5dd088757..000000000 --- a/packages/cli/src/ui/utils/textUtils.test.ts +++ /dev/null @@ -1,41 +0,0 @@ -/** - * @license - * Copyright 2025 Google LLC - * SPDX-License-Identifier: Apache-2.0 - */ - -import { isBinary } from './textUtils'; - -describe('textUtils', () => { - describe('isBinary', () => { - it('should return true for a buffer containing a null byte', () => { - const buffer = Buffer.from([ - 0x89, 0x50, 0x4e, 0x47, 0x0d, 0x1a, 0x0a, 0x00, - ]); - expect(isBinary(buffer)).toBe(true); - }); - - it('should return false for a buffer containing only text', () => { - const buffer = Buffer.from('This is a test string.'); - expect(isBinary(buffer)).toBe(false); - }); - - it('should return false for an empty buffer', () => { - const buffer = Buffer.from([]); - expect(isBinary(buffer)).toBe(false); - }); - - it('should return false for a null or undefined buffer', () => { - expect(isBinary(null)).toBe(false); - expect(isBinary(undefined)).toBe(false); - }); - - it('should only check the sample size', () => { - const longBufferWithNullByteAtEnd = Buffer.concat([ - Buffer.from('a'.repeat(1024)), - Buffer.from([0x00]), - ]); - expect(isBinary(longBufferWithNullByteAtEnd, 512)).toBe(false); - }); - }); -}); diff --git a/packages/cli/src/ui/utils/textUtils.ts b/packages/cli/src/ui/utils/textUtils.ts index fa0abe9a8..e4d8ea582 100644 --- a/packages/cli/src/ui/utils/textUtils.ts +++ b/packages/cli/src/ui/utils/textUtils.ts @@ -17,35 +17,6 @@ export const getAsciiArtWidth = (asciiArt: string): number => { return Math.max(...lines.map((line) => line.length)); }; -/** - * Checks if a Buffer is likely binary by testing for the presence of a NULL byte. - * The presence of a NULL byte is a strong indicator that the data is not plain text. - * @param data The Buffer to check. - * @param sampleSize The number of bytes from the start of the buffer to test. - * @returns True if a NULL byte is found, false otherwise. - */ -export function isBinary( - data: Buffer | null | undefined, - sampleSize = 512, -): boolean { - if (!data) { - return false; - } - - const sample = data.length > sampleSize ? data.subarray(0, sampleSize) : data; - - for (const byte of sample) { - // The presence of a NULL byte (0x00) is one of the most reliable - // indicators of a binary file. Text files should not contain them. - if (byte === 0) { - return true; - } - } - - // If no NULL bytes were found in the sample, we assume it's text. - return false; -} - /* * ------------------------------------------------------------------------- * Unicode‑aware helpers (work at the code‑point level rather than UTF‑16 diff --git a/packages/cli/src/ui/utils/updateCheck.test.ts b/packages/cli/src/ui/utils/updateCheck.test.ts index 6d3c8b779..975c320db 100644 --- a/packages/cli/src/ui/utils/updateCheck.test.ts +++ b/packages/cli/src/ui/utils/updateCheck.test.ts @@ -20,6 +20,23 @@ vi.mock('update-notifier', () => ({ describe('checkForUpdates', () => { beforeEach(() => { vi.resetAllMocks(); + // Clear DEV environment variable before each test + delete process.env.DEV; + }); + + it('should return null when running from source (DEV=true)', async () => { + process.env.DEV = 'true'; + getPackageJson.mockResolvedValue({ + name: 'test-package', + version: '1.0.0', + }); + updateNotifier.mockReturnValue({ + update: { current: '1.0.0', latest: '1.1.0' }, + }); + const result = await checkForUpdates(); + expect(result).toBeNull(); + expect(getPackageJson).not.toHaveBeenCalled(); + expect(updateNotifier).not.toHaveBeenCalled(); }); it('should return null if package.json is missing', async () => { diff --git a/packages/cli/src/ui/utils/updateCheck.ts b/packages/cli/src/ui/utils/updateCheck.ts index 6be5effc2..904a98906 100644 --- a/packages/cli/src/ui/utils/updateCheck.ts +++ b/packages/cli/src/ui/utils/updateCheck.ts @@ -10,6 +10,11 @@ import { getPackageJson } from '../../utils/package.js'; export async function checkForUpdates(): Promise { try { + // Skip update check when running from source (development mode) + if (process.env.DEV === 'true') { + return null; + } + const packageJson = await getPackageJson(); if (!packageJson || !packageJson.name || !packageJson.version) { return null; diff --git a/packages/cli/src/utils/events.ts b/packages/cli/src/utils/events.ts new file mode 100644 index 000000000..393643879 --- /dev/null +++ b/packages/cli/src/utils/events.ts @@ -0,0 +1,14 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { EventEmitter } from 'events'; + +export enum AppEvent { + OpenDebugConsole = 'open-debug-console', + LogError = 'log-error', +} + +export const appEvents = new EventEmitter(); diff --git a/packages/cli/src/utils/sandbox.ts b/packages/cli/src/utils/sandbox.ts index d7ea26d17..7dfbc557e 100644 --- a/packages/cli/src/utils/sandbox.ts +++ b/packages/cli/src/utils/sandbox.ts @@ -99,7 +99,7 @@ async function shouldUseCurrentUserInSandbox(): Promise { } // docker does not allow container names to contain ':' or '/', so we -// parse those out and make the name a little shorter +// parse those out to shorten the name function parseImageName(image: string): string { const [fullName, tag] = image.split(':'); const name = fullName.split('/').at(-1) ?? 'unknown-image'; @@ -187,7 +187,7 @@ export async function start_sandbox( if (config.command === 'sandbox-exec') { // disallow BUILD_SANDBOX if (process.env.BUILD_SANDBOX) { - console.error('ERROR: cannot BUILD_SANDBOX when using MacOS Seatbelt'); + console.error('ERROR: cannot BUILD_SANDBOX when using macOS Seatbelt'); process.exit(1); } const profile = (process.env.SEATBELT_PROFILE ??= 'permissive-open'); @@ -536,6 +536,14 @@ export async function start_sandbox( ); } + // copy GOOGLE_GENAI_USE_GCA + if (process.env.GOOGLE_GENAI_USE_GCA) { + args.push( + '--env', + `GOOGLE_GENAI_USE_GCA=${process.env.GOOGLE_GENAI_USE_GCA}`, + ); + } + // copy GOOGLE_CLOUD_PROJECT if (process.env.GOOGLE_CLOUD_PROJECT) { args.push( @@ -858,7 +866,7 @@ async function ensureSandboxImageIsPresent( console.info(`Sandbox image ${image} not found locally.`); if (image === LOCAL_DEV_SANDBOX_IMAGE_NAME) { - // user needs to build the image themself + // user needs to build the image themselves return false; } diff --git a/packages/cli/src/utils/userStartupWarnings.test.ts b/packages/cli/src/utils/userStartupWarnings.test.ts index 8210d4e36..6d9b89899 100644 --- a/packages/cli/src/utils/userStartupWarnings.test.ts +++ b/packages/cli/src/utils/userStartupWarnings.test.ts @@ -8,113 +8,80 @@ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; import { getUserStartupWarnings } from './userStartupWarnings.js'; import * as os from 'os'; import fs from 'fs/promises'; -import semver from 'semver'; +import path from 'path'; -vi.mock('os', () => ({ - default: { homedir: vi.fn() }, - homedir: vi.fn(), -})); - -vi.mock('fs/promises', () => ({ - default: { realpath: vi.fn() }, -})); - -vi.mock('semver', () => ({ - default: { - major: vi.fn(), - }, - major: vi.fn(), -})); +// Mock os.homedir to control the home directory in tests +vi.mock('os', async (importOriginal) => { + const actualOs = await importOriginal(); + return { + ...actualOs, + homedir: vi.fn(), + }; +}); describe('getUserStartupWarnings', () => { - const homeDir = '/home/user'; + let testRootDir: string; + let homeDir: string; - beforeEach(() => { + beforeEach(async () => { + testRootDir = await fs.mkdtemp(path.join(os.tmpdir(), 'warnings-test-')); + homeDir = path.join(testRootDir, 'home'); + await fs.mkdir(homeDir, { recursive: true }); vi.mocked(os.homedir).mockReturnValue(homeDir); - vi.mocked(fs.realpath).mockImplementation(async (path) => path.toString()); }); - afterEach(() => { + afterEach(async () => { + await fs.rm(testRootDir, { recursive: true, force: true }); vi.clearAllMocks(); }); describe('home directory check', () => { it('should return a warning when running in home directory', async () => { - vi.mocked(fs.realpath) - .mockResolvedValueOnce(homeDir) - .mockResolvedValueOnce(homeDir); - const warnings = await getUserStartupWarnings(homeDir); - expect(warnings).toContainEqual( expect.stringContaining('home directory'), ); }); it('should not return a warning when running in a project directory', async () => { - vi.mocked(fs.realpath) - .mockResolvedValueOnce('/some/project/path') - .mockResolvedValueOnce(homeDir); - - const warnings = await getUserStartupWarnings('/some/project/path'); + const projectDir = path.join(testRootDir, 'project'); + await fs.mkdir(projectDir); + const warnings = await getUserStartupWarnings(projectDir); expect(warnings).not.toContainEqual( expect.stringContaining('home directory'), ); }); + }); - it('should handle errors when checking directory', async () => { - vi.mocked(fs.realpath) - .mockRejectedValueOnce(new Error('FS error')) - .mockResolvedValueOnce(homeDir); - - const warnings = await getUserStartupWarnings('/error/path'); + describe('root directory check', () => { + it('should return a warning when running in a root directory', async () => { + const rootDir = path.parse(testRootDir).root; + const warnings = await getUserStartupWarnings(rootDir); expect(warnings).toContainEqual( - expect.stringContaining('Could not verify'), + expect.stringContaining('root directory'), + ); + expect(warnings).toContainEqual( + expect.stringContaining('folder structure will be used'), + ); + }); + + it('should not return a warning when running in a non-root directory', async () => { + const projectDir = path.join(testRootDir, 'project'); + await fs.mkdir(projectDir); + const warnings = await getUserStartupWarnings(projectDir); + expect(warnings).not.toContainEqual( + expect.stringContaining('root directory'), ); }); }); - function setNodeVersionMajor(majorVersion: number) { - vi.mocked(semver.major).mockReturnValue(majorVersion); - } - - describe('node version check', () => { - afterEach(() => { - setNodeVersionMajor(20); - }); - - it('should return a warning if Node.js version is less than minMajor', async () => { - setNodeVersionMajor(18); - const warnings = await getUserStartupWarnings(''); - expect(warnings).toHaveLength(1); - expect(warnings[0]).toContain('Node.js'); - expect(warnings[0]).toContain('requires Node.js 20 or higher'); - }); - - it('should not return a warning if Node.js version is equal to minMajor', async () => { - setNodeVersionMajor(20); - const warnings = await getUserStartupWarnings(''); - expect(warnings).toEqual([]); - }); - - it('should not return a warning if Node.js version is greater than minMajor', async () => { - setNodeVersionMajor(22); - const warnings = await getUserStartupWarnings(''); - expect(warnings).toEqual([]); - }); - - it('should use default minMajor=20 if not provided', async () => { - setNodeVersionMajor(18); - const warnings = await getUserStartupWarnings(''); - expect(warnings).toHaveLength(1); - expect(warnings[0]).toContain('Node.js'); - expect(warnings[0]).toContain('requires Node.js 20 or higher'); + describe('error handling', () => { + it('should handle errors when checking directory', async () => { + const nonExistentPath = path.join(testRootDir, 'non-existent'); + const warnings = await getUserStartupWarnings(nonExistentPath); + const expectedWarning = + 'Could not verify the current directory due to a file system error.'; + expect(warnings).toEqual([expectedWarning, expectedWarning]); }); }); - - // // Example of how to add a new check: - // describe('node version check', () => { - // // Tests for node version check would go here - // // This shows how easy it is to add new test sections - // }); }); diff --git a/packages/cli/src/utils/userStartupWarnings.ts b/packages/cli/src/utils/userStartupWarnings.ts index 244bc5894..edb5e92dd 100644 --- a/packages/cli/src/utils/userStartupWarnings.ts +++ b/packages/cli/src/utils/userStartupWarnings.ts @@ -6,7 +6,7 @@ import fs from 'fs/promises'; import * as os from 'os'; -import semver from 'semver'; +import path from 'path'; type WarningCheck = { id: string; @@ -24,7 +24,7 @@ const homeDirectoryCheck: WarningCheck = { ]); if (workspaceRealPath === homeRealPath) { - return 'You are running Qwen Code in your home directory. It is recommended to run in a project-specific directory.'; + return 'You are running Gemini CLI in your home directory. It is recommended to run in a project-specific directory.'; } return null; } catch (_err: unknown) { @@ -33,22 +33,30 @@ const homeDirectoryCheck: WarningCheck = { }, }; -const nodeVersionCheck: WarningCheck = { - id: 'node-version', - check: async (_workspaceRoot: string) => { - const minMajor = 20; - const major = semver.major(process.versions.node); - if (major < minMajor) { - return `You are using Node.js v${process.versions.node}. Gemini CLI requires Node.js ${minMajor} or higher for best results.`; +const rootDirectoryCheck: WarningCheck = { + id: 'root-directory', + check: async (workspaceRoot: string) => { + try { + const workspaceRealPath = await fs.realpath(workspaceRoot); + const errorMessage = + 'Warning: You are running Qwen Code in the root directory. Your entire folder structure will be used for context. It is strongly recommended to run in a project-specific directory.'; + + // Check for Unix root directory + if (path.dirname(workspaceRealPath) === workspaceRealPath) { + return errorMessage; + } + + return null; + } catch (_err: unknown) { + return 'Could not verify the current directory due to a file system error.'; } - return null; }, }; // All warning checks const WARNING_CHECKS: readonly WarningCheck[] = [ homeDirectoryCheck, - nodeVersionCheck, + rootDirectoryCheck, ]; export async function getUserStartupWarnings( diff --git a/packages/cli/src/validateNonInterActiveAuth.test.ts b/packages/cli/src/validateNonInterActiveAuth.test.ts new file mode 100644 index 000000000..801f4cce2 --- /dev/null +++ b/packages/cli/src/validateNonInterActiveAuth.test.ts @@ -0,0 +1,178 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import { + validateNonInteractiveAuth, + NonInteractiveConfig, +} from './validateNonInterActiveAuth.js'; +import { AuthType } from '@qwen-code/qwen-code-core'; + +describe('validateNonInterActiveAuth', () => { + let originalEnvGeminiApiKey: string | undefined; + let originalEnvVertexAi: string | undefined; + let originalEnvGcp: string | undefined; + let consoleErrorSpy: ReturnType; + let processExitSpy: ReturnType; + let refreshAuthMock: jest.MockedFunction< + (authType: AuthType) => Promise + >; + + beforeEach(() => { + originalEnvGeminiApiKey = process.env.GEMINI_API_KEY; + originalEnvVertexAi = process.env.GOOGLE_GENAI_USE_VERTEXAI; + originalEnvGcp = process.env.GOOGLE_GENAI_USE_GCA; + delete process.env.GEMINI_API_KEY; + delete process.env.GOOGLE_GENAI_USE_VERTEXAI; + delete process.env.GOOGLE_GENAI_USE_GCA; + consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); + processExitSpy = vi.spyOn(process, 'exit').mockImplementation((code) => { + throw new Error(`process.exit(${code}) called`); + }); + refreshAuthMock = vi.fn().mockResolvedValue('refreshed'); + }); + + afterEach(() => { + if (originalEnvGeminiApiKey !== undefined) { + process.env.GEMINI_API_KEY = originalEnvGeminiApiKey; + } else { + delete process.env.GEMINI_API_KEY; + } + if (originalEnvVertexAi !== undefined) { + process.env.GOOGLE_GENAI_USE_VERTEXAI = originalEnvVertexAi; + } else { + delete process.env.GOOGLE_GENAI_USE_VERTEXAI; + } + if (originalEnvGcp !== undefined) { + process.env.GOOGLE_GENAI_USE_GCA = originalEnvGcp; + } else { + delete process.env.GOOGLE_GENAI_USE_GCA; + } + vi.restoreAllMocks(); + }); + + it('exits if no auth type is configured or env vars set', async () => { + const nonInteractiveConfig: NonInteractiveConfig = { + refreshAuth: refreshAuthMock, + }; + try { + await validateNonInteractiveAuth(undefined, nonInteractiveConfig); + expect.fail('Should have exited'); + } catch (e) { + expect((e as Error).message).toContain('process.exit(1) called'); + } + expect(consoleErrorSpy).toHaveBeenCalledWith( + expect.stringContaining('Please set an Auth method'), + ); + expect(processExitSpy).toHaveBeenCalledWith(1); + }); + + it('uses LOGIN_WITH_GOOGLE if GOOGLE_GENAI_USE_GCA is set', async () => { + process.env.GOOGLE_GENAI_USE_GCA = 'true'; + const nonInteractiveConfig: NonInteractiveConfig = { + refreshAuth: refreshAuthMock, + }; + await validateNonInteractiveAuth(undefined, nonInteractiveConfig); + expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.LOGIN_WITH_GOOGLE); + }); + + it('uses USE_GEMINI if GEMINI_API_KEY is set', async () => { + process.env.GEMINI_API_KEY = 'fake-key'; + const nonInteractiveConfig: NonInteractiveConfig = { + refreshAuth: refreshAuthMock, + }; + await validateNonInteractiveAuth(undefined, nonInteractiveConfig); + expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.USE_GEMINI); + }); + + it('uses USE_VERTEX_AI if GOOGLE_GENAI_USE_VERTEXAI is true (with GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION)', async () => { + process.env.GOOGLE_GENAI_USE_VERTEXAI = 'true'; + process.env.GOOGLE_CLOUD_PROJECT = 'test-project'; + process.env.GOOGLE_CLOUD_LOCATION = 'us-central1'; + const nonInteractiveConfig: NonInteractiveConfig = { + refreshAuth: refreshAuthMock, + }; + await validateNonInteractiveAuth(undefined, nonInteractiveConfig); + expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.USE_VERTEX_AI); + }); + + it('uses USE_VERTEX_AI if GOOGLE_GENAI_USE_VERTEXAI is true and GOOGLE_API_KEY is set', async () => { + process.env.GOOGLE_GENAI_USE_VERTEXAI = 'true'; + process.env.GOOGLE_API_KEY = 'vertex-api-key'; + const nonInteractiveConfig: NonInteractiveConfig = { + refreshAuth: refreshAuthMock, + }; + await validateNonInteractiveAuth(undefined, nonInteractiveConfig); + expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.USE_VERTEX_AI); + }); + + it('uses LOGIN_WITH_GOOGLE if GOOGLE_GENAI_USE_GCA is set, even with other env vars', async () => { + process.env.GOOGLE_GENAI_USE_GCA = 'true'; + process.env.GEMINI_API_KEY = 'fake-key'; + process.env.GOOGLE_GENAI_USE_VERTEXAI = 'true'; + process.env.GOOGLE_CLOUD_PROJECT = 'test-project'; + process.env.GOOGLE_CLOUD_LOCATION = 'us-central1'; + const nonInteractiveConfig: NonInteractiveConfig = { + refreshAuth: refreshAuthMock, + }; + await validateNonInteractiveAuth(undefined, nonInteractiveConfig); + expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.LOGIN_WITH_GOOGLE); + }); + + it('uses USE_VERTEX_AI if both GEMINI_API_KEY and GOOGLE_GENAI_USE_VERTEXAI are set', async () => { + process.env.GEMINI_API_KEY = 'fake-key'; + process.env.GOOGLE_GENAI_USE_VERTEXAI = 'true'; + process.env.GOOGLE_CLOUD_PROJECT = 'test-project'; + process.env.GOOGLE_CLOUD_LOCATION = 'us-central1'; + const nonInteractiveConfig: NonInteractiveConfig = { + refreshAuth: refreshAuthMock, + }; + await validateNonInteractiveAuth(undefined, nonInteractiveConfig); + expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.USE_VERTEX_AI); + }); + + it('uses USE_GEMINI if GOOGLE_GENAI_USE_VERTEXAI is false, GEMINI_API_KEY is set, and project/location are available', async () => { + process.env.GOOGLE_GENAI_USE_VERTEXAI = 'false'; + process.env.GEMINI_API_KEY = 'fake-key'; + process.env.GOOGLE_CLOUD_PROJECT = 'test-project'; + process.env.GOOGLE_CLOUD_LOCATION = 'us-central1'; + const nonInteractiveConfig: NonInteractiveConfig = { + refreshAuth: refreshAuthMock, + }; + await validateNonInteractiveAuth(undefined, nonInteractiveConfig); + expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.USE_GEMINI); + }); + + it('uses configuredAuthType if provided', async () => { + // Set required env var for USE_GEMINI + process.env.GEMINI_API_KEY = 'fake-key'; + const nonInteractiveConfig: NonInteractiveConfig = { + refreshAuth: refreshAuthMock, + }; + await validateNonInteractiveAuth(AuthType.USE_GEMINI, nonInteractiveConfig); + expect(refreshAuthMock).toHaveBeenCalledWith(AuthType.USE_GEMINI); + }); + + it('exits if validateAuthMethod returns error', async () => { + // Mock validateAuthMethod to return error + const mod = await import('./config/auth.js'); + vi.spyOn(mod, 'validateAuthMethod').mockReturnValue('Auth error!'); + const nonInteractiveConfig: NonInteractiveConfig = { + refreshAuth: refreshAuthMock, + }; + try { + await validateNonInteractiveAuth( + AuthType.USE_GEMINI, + nonInteractiveConfig, + ); + expect.fail('Should have exited'); + } catch (e) { + expect((e as Error).message).toContain('process.exit(1) called'); + } + expect(consoleErrorSpy).toHaveBeenCalledWith('Auth error!'); + expect(processExitSpy).toHaveBeenCalledWith(1); + }); +}); diff --git a/packages/cli/src/validateNonInterActiveAuth.ts b/packages/cli/src/validateNonInterActiveAuth.ts new file mode 100644 index 000000000..21a0573d0 --- /dev/null +++ b/packages/cli/src/validateNonInterActiveAuth.ts @@ -0,0 +1,45 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { AuthType, Config } from '@qwen-code/qwen-code-core'; +import { USER_SETTINGS_PATH } from './config/settings.js'; +import { validateAuthMethod } from './config/auth.js'; + +function getAuthTypeFromEnv(): AuthType | undefined { + if (process.env.GOOGLE_GENAI_USE_GCA === 'true') { + return AuthType.LOGIN_WITH_GOOGLE; + } + if (process.env.GOOGLE_GENAI_USE_VERTEXAI === 'true') { + return AuthType.USE_VERTEX_AI; + } + if (process.env.GEMINI_API_KEY) { + return AuthType.USE_GEMINI; + } + return undefined; +} + +export async function validateNonInteractiveAuth( + configuredAuthType: AuthType | undefined, + nonInteractiveConfig: Config, +) { + const effectiveAuthType = configuredAuthType || getAuthTypeFromEnv(); + + if (!effectiveAuthType) { + console.error( + `Please set an Auth method in your ${USER_SETTINGS_PATH} or specify one of the following environment variables before running: GEMINI_API_KEY, GOOGLE_GENAI_USE_VERTEXAI, GOOGLE_GENAI_USE_GCA`, + ); + process.exit(1); + } + + const err = validateAuthMethod(effectiveAuthType); + if (err != null) { + console.error(err); + process.exit(1); + } + + await nonInteractiveConfig.refreshAuth(effectiveAuthType); + return nonInteractiveConfig; +} diff --git a/packages/cli/tsconfig.json b/packages/cli/tsconfig.json index c0faa1667..55be9a03f 100644 --- a/packages/cli/tsconfig.json +++ b/packages/cli/tsconfig.json @@ -3,7 +3,7 @@ "compilerOptions": { "outDir": "dist", "jsx": "react-jsx", - "lib": ["DOM", "DOM.Iterable", "ES2020"], + "lib": ["DOM", "DOM.Iterable", "ES2022"], "types": ["node", "vitest/globals"] }, "include": [ diff --git a/packages/core/package-lock.json b/packages/core/package-lock.json deleted file mode 100644 index d4d5dcaaa..000000000 --- a/packages/core/package-lock.json +++ /dev/null @@ -1,4583 +0,0 @@ -{ - "name": "@google/gemini-cli-core", - "version": "0.0.3", - "lockfileVersion": 3, - "requires": true, - "packages": { - "": { - "name": "@google/gemini-cli-core", - "version": "0.0.3", - "dependencies": { - "@google/genai": "^1.4.0", - "@modelcontextprotocol/sdk": "^1.11.0", - "@opentelemetry/api": "^1.9.0", - "@opentelemetry/exporter-logs-otlp-grpc": "^0.52.0", - "@opentelemetry/exporter-metrics-otlp-grpc": "^0.52.0", - "@opentelemetry/exporter-trace-otlp-grpc": "^0.52.0", - "@opentelemetry/instrumentation-http": "^0.52.0", - "@opentelemetry/sdk-node": "^0.52.0", - "@types/glob": "^8.1.0", - "@types/html-to-text": "^9.0.4", - "diff": "^7.0.0", - "dotenv": "^16.4.7", - "glob": "^10.4.5", - "google-auth-library": "^9.11.0", - "html-to-text": "^9.0.5", - "ignore": "^7.0.0", - "open": "^10.1.2", - "shell-quote": "^1.8.2", - "simple-git": "^3.28.0", - "strip-ansi": "^7.1.0", - "undici": "^7.10.0", - "ws": "^8.18.0" - }, - "devDependencies": { - "@types/diff": "^7.0.2", - "@types/dotenv": "^6.1.1", - "@types/minimatch": "^5.1.2", - "@types/ws": "^8.5.10", - "typescript": "^5.3.3", - "vitest": "^3.1.1" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/aix-ppc64": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.5.tgz", - "integrity": "sha512-9o3TMmpmftaCMepOdA5k/yDw8SfInyzWWTjYTFCX3kPSDJMROQTb8jg+h9Cnwnmm1vOzvxN7gIfB5V2ewpjtGA==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "aix" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/android-arm": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.5.tgz", - "integrity": "sha512-AdJKSPeEHgi7/ZhuIPtcQKr5RQdo6OO2IL87JkianiMYMPbCtot9fxPbrMiBADOWWm3T2si9stAiVsGbTQFkbA==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/android-arm64": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.5.tgz", - "integrity": "sha512-VGzGhj4lJO+TVGV1v8ntCZWJktV7SGCs3Pn1GRWI1SBFtRALoomm8k5E9Pmwg3HOAal2VDc2F9+PM/rEY6oIDg==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/android-x64": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.5.tgz", - "integrity": "sha512-D2GyJT1kjvO//drbRT3Hib9XPwQeWd9vZoBJn+bu/lVsOZ13cqNdDeqIF/xQ5/VmWvMduP6AmXvylO/PIc2isw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/darwin-arm64": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.5.tgz", - "integrity": "sha512-GtaBgammVvdF7aPIgH2jxMDdivezgFu6iKpmT+48+F8Hhg5J/sfnDieg0aeG/jfSvkYQU2/pceFPDKlqZzwnfQ==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/darwin-x64": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.5.tgz", - "integrity": "sha512-1iT4FVL0dJ76/q1wd7XDsXrSW+oLoquptvh4CLR4kITDtqi2e/xwXwdCVH8hVHU43wgJdsq7Gxuzcs6Iq/7bxQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/freebsd-arm64": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.5.tgz", - "integrity": "sha512-nk4tGP3JThz4La38Uy/gzyXtpkPW8zSAmoUhK9xKKXdBCzKODMc2adkB2+8om9BDYugz+uGV7sLmpTYzvmz6Sw==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/freebsd-x64": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.5.tgz", - "integrity": "sha512-PrikaNjiXdR2laW6OIjlbeuCPrPaAl0IwPIaRv+SMV8CiM8i2LqVUHFC1+8eORgWyY7yhQY+2U2fA55mBzReaw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-arm": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.5.tgz", - "integrity": "sha512-cPzojwW2okgh7ZlRpcBEtsX7WBuqbLrNXqLU89GxWbNt6uIg78ET82qifUy3W6OVww6ZWobWub5oqZOVtwolfw==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-arm64": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.5.tgz", - "integrity": "sha512-Z9kfb1v6ZlGbWj8EJk9T6czVEjjq2ntSYLY2cw6pAZl4oKtfgQuS4HOq41M/BcoLPzrUbNd+R4BXFyH//nHxVg==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-ia32": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.5.tgz", - "integrity": "sha512-sQ7l00M8bSv36GLV95BVAdhJ2QsIbCuCjh/uYrWiMQSUuV+LpXwIqhgJDcvMTj+VsQmqAHL2yYaasENvJ7CDKA==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-loong64": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.5.tgz", - "integrity": "sha512-0ur7ae16hDUC4OL5iEnDb0tZHDxYmuQyhKhsPBV8f99f6Z9KQM02g33f93rNH5A30agMS46u2HP6qTdEt6Q1kg==", - "cpu": [ - "loong64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-mips64el": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.5.tgz", - "integrity": "sha512-kB/66P1OsHO5zLz0i6X0RxlQ+3cu0mkxS3TKFvkb5lin6uwZ/ttOkP3Z8lfR9mJOBk14ZwZ9182SIIWFGNmqmg==", - "cpu": [ - "mips64el" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-ppc64": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.5.tgz", - "integrity": "sha512-UZCmJ7r9X2fe2D6jBmkLBMQetXPXIsZjQJCjgwpVDz+YMcS6oFR27alkgGv3Oqkv07bxdvw7fyB71/olceJhkQ==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-riscv64": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.5.tgz", - "integrity": "sha512-kTxwu4mLyeOlsVIFPfQo+fQJAV9mh24xL+y+Bm6ej067sYANjyEw1dNHmvoqxJUCMnkBdKpvOn0Ahql6+4VyeA==", - "cpu": [ - "riscv64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-s390x": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.5.tgz", - "integrity": "sha512-K2dSKTKfmdh78uJ3NcWFiqyRrimfdinS5ErLSn3vluHNeHVnBAFWC8a4X5N+7FgVE1EjXS1QDZbpqZBjfrqMTQ==", - "cpu": [ - "s390x" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/linux-x64": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.5.tgz", - "integrity": "sha512-uhj8N2obKTE6pSZ+aMUbqq+1nXxNjZIIjCjGLfsWvVpy7gKCOL6rsY1MhRh9zLtUtAI7vpgLMK6DxjO8Qm9lJw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/netbsd-arm64": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.5.tgz", - "integrity": "sha512-pwHtMP9viAy1oHPvgxtOv+OkduK5ugofNTVDilIzBLpoWAM16r7b/mxBvfpuQDpRQFMfuVr5aLcn4yveGvBZvw==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "netbsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/netbsd-x64": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.5.tgz", - "integrity": "sha512-WOb5fKrvVTRMfWFNCroYWWklbnXH0Q5rZppjq0vQIdlsQKuw6mdSihwSo4RV/YdQ5UCKKvBy7/0ZZYLBZKIbwQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "netbsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/openbsd-arm64": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.5.tgz", - "integrity": "sha512-7A208+uQKgTxHd0G0uqZO8UjK2R0DDb4fDmERtARjSHWxqMTye4Erz4zZafx7Di9Cv+lNHYuncAkiGFySoD+Mw==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openbsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/openbsd-x64": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.5.tgz", - "integrity": "sha512-G4hE405ErTWraiZ8UiSoesH8DaCsMm0Cay4fsFWOOUcz8b8rC6uCvnagr+gnioEjWn0wC+o1/TAHt+It+MpIMg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "openbsd" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/sunos-x64": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.5.tgz", - "integrity": "sha512-l+azKShMy7FxzY0Rj4RCt5VD/q8mG/e+mDivgspo+yL8zW7qEwctQ6YqKX34DTEleFAvCIUviCFX1SDZRSyMQA==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "sunos" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/win32-arm64": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.5.tgz", - "integrity": "sha512-O2S7SNZzdcFG7eFKgvwUEZ2VG9D/sn/eIiz8XRZ1Q/DO5a3s76Xv0mdBzVM5j5R639lXQmPmSo0iRpHqUUrsxw==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/win32-ia32": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.5.tgz", - "integrity": "sha512-onOJ02pqs9h1iMJ1PQphR+VZv8qBMQ77Klcsqv9CNW2w6yLqoURLcgERAIurY6QE63bbLuqgP9ATqajFLK5AMQ==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@esbuild/win32-x64": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.5.tgz", - "integrity": "sha512-TXv6YnJ8ZMVdX+SXWVBo/0p8LTcrUYngpWjvm91TMjjBQii7Oz11Lw5lbDV5Y0TzuhSJHwiH4hEtC1I42mMS0g==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=18" - } - }, - "node_modules/@google/genai": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/@google/genai/-/genai-1.5.0.tgz", - "integrity": "sha512-N/UhGPcKwYw4tD/fERrNQMeHh3yiUDXhJ5f94HSMV7LhkU19IMFSqtXjek6nSpgM/YDJGBYw/MoHVLjfWhmdmQ==", - "license": "Apache-2.0", - "dependencies": { - "google-auth-library": "^9.14.2", - "ws": "^8.18.0", - "zod": "^3.22.4", - "zod-to-json-schema": "^3.22.4" - }, - "engines": { - "node": ">=20.0.0" - }, - "peerDependencies": { - "@modelcontextprotocol/sdk": "^1.11.0" - }, - "peerDependenciesMeta": { - "@modelcontextprotocol/sdk": { - "optional": true - } - } - }, - "node_modules/@grpc/grpc-js": { - "version": "1.13.4", - "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.13.4.tgz", - "integrity": "sha512-GsFaMXCkMqkKIvwCQjCrwH+GHbPKBjhwo/8ZuUkWHqbI73Kky9I+pQltrlT0+MWpedCoosda53lgjYfyEPgxBg==", - "license": "Apache-2.0", - "dependencies": { - "@grpc/proto-loader": "^0.7.13", - "@js-sdsl/ordered-map": "^4.4.2" - }, - "engines": { - "node": ">=12.10.0" - } - }, - "node_modules/@grpc/proto-loader": { - "version": "0.7.15", - "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.7.15.tgz", - "integrity": "sha512-tMXdRCfYVixjuFK+Hk0Q1s38gV9zDiDJfWL3h1rv4Qc39oILCu1TRTDt7+fGUI8K4G1Fj125Hx/ru3azECWTyQ==", - "license": "Apache-2.0", - "dependencies": { - "lodash.camelcase": "^4.3.0", - "long": "^5.0.0", - "protobufjs": "^7.2.5", - "yargs": "^17.7.2" - }, - "bin": { - "proto-loader-gen-types": "build/bin/proto-loader-gen-types.js" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/@isaacs/cliui": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", - "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", - "license": "ISC", - "dependencies": { - "string-width": "^5.1.2", - "string-width-cjs": "npm:string-width@^4.2.0", - "strip-ansi": "^7.0.1", - "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", - "wrap-ansi": "^8.1.0", - "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", - "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/@js-sdsl/ordered-map": { - "version": "4.4.2", - "resolved": "https://registry.npmjs.org/@js-sdsl/ordered-map/-/ordered-map-4.4.2.tgz", - "integrity": "sha512-iUKgm52T8HOE/makSxjqoWhe95ZJA1/G1sYsGev2JDKUSS14KAgg1LHb+Ba+IPow0xflbnSkOsZcO08C7w1gYw==", - "license": "MIT", - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/js-sdsl" - } - }, - "node_modules/@kwsites/file-exists": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@kwsites/file-exists/-/file-exists-1.1.1.tgz", - "integrity": "sha512-m9/5YGR18lIwxSFDwfE3oA7bWuq9kdau6ugN4H2rJeyhFQZcG9AgSHkQtSD15a8WvTgfz9aikZMrKPHvbpqFiw==", - "license": "MIT", - "dependencies": { - "debug": "^4.1.1" - } - }, - "node_modules/@kwsites/promise-deferred": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@kwsites/promise-deferred/-/promise-deferred-1.1.1.tgz", - "integrity": "sha512-GaHYm+c0O9MjZRu0ongGBRbinu8gVAMd2UZjji6jVmqKtZluZnptXGWhz1E8j8D2HJ3f/yMxKAUC0b+57wncIw==", - "license": "MIT" - }, - "node_modules/@modelcontextprotocol/sdk": { - "version": "1.12.3", - "resolved": "https://registry.npmjs.org/@modelcontextprotocol/sdk/-/sdk-1.12.3.tgz", - "integrity": "sha512-DyVYSOafBvk3/j1Oka4z5BWT8o4AFmoNyZY9pALOm7Lh3GZglR71Co4r4dEUoqDWdDazIZQHBe7J2Nwkg6gHgQ==", - "license": "MIT", - "dependencies": { - "ajv": "^6.12.6", - "content-type": "^1.0.5", - "cors": "^2.8.5", - "cross-spawn": "^7.0.5", - "eventsource": "^3.0.2", - "express": "^5.0.1", - "express-rate-limit": "^7.5.0", - "pkce-challenge": "^5.0.0", - "raw-body": "^3.0.0", - "zod": "^3.23.8", - "zod-to-json-schema": "^3.24.1" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/@opentelemetry/api": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz", - "integrity": "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==", - "license": "Apache-2.0", - "engines": { - "node": ">=8.0.0" - } - }, - "node_modules/@opentelemetry/api-logs": { - "version": "0.52.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.52.1.tgz", - "integrity": "sha512-qnSqB2DQ9TPP96dl8cDubDvrUyWc0/sK81xHTK8eSUspzDM3bsewX903qclQFvVhgStjRWdC5bLb3kQqMkfV5A==", - "license": "Apache-2.0", - "dependencies": { - "@opentelemetry/api": "^1.0.0" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/@opentelemetry/context-async-hooks": { - "version": "1.25.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/context-async-hooks/-/context-async-hooks-1.25.1.tgz", - "integrity": "sha512-UW/ge9zjvAEmRWVapOP0qyCvPulWU6cQxGxDbWEFfGOj1VBBZAuOqTo3X6yWmDTD3Xe15ysCZChHncr2xFMIfQ==", - "license": "Apache-2.0", - "engines": { - "node": ">=14" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" - } - }, - "node_modules/@opentelemetry/core": { - "version": "1.25.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-1.25.1.tgz", - "integrity": "sha512-GeT/l6rBYWVQ4XArluLVB6WWQ8flHbdb6r2FCHC3smtdOAbrJBIv35tpV/yp9bmYUJf+xmZpu9DRTIeJVhFbEQ==", - "license": "Apache-2.0", - "dependencies": { - "@opentelemetry/semantic-conventions": "1.25.1" - }, - "engines": { - "node": ">=14" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" - } - }, - "node_modules/@opentelemetry/exporter-logs-otlp-grpc": { - "version": "0.52.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-logs-otlp-grpc/-/exporter-logs-otlp-grpc-0.52.1.tgz", - "integrity": "sha512-sXgcp4fsL3zCo96A0LmFIGYOj2LSEDI6wD7nBYRhuDDxeRsk18NQgqRVlCf4VIyTBZzGu1M7yOtdFukQPgII1A==", - "license": "Apache-2.0", - "dependencies": { - "@grpc/grpc-js": "^1.7.1", - "@opentelemetry/core": "1.25.1", - "@opentelemetry/otlp-grpc-exporter-base": "0.52.1", - "@opentelemetry/otlp-transformer": "0.52.1", - "@opentelemetry/sdk-logs": "0.52.1" - }, - "engines": { - "node": ">=14" - }, - "peerDependencies": { - "@opentelemetry/api": "^1.0.0" - } - }, - "node_modules/@opentelemetry/exporter-metrics-otlp-grpc": { - "version": "0.52.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-metrics-otlp-grpc/-/exporter-metrics-otlp-grpc-0.52.1.tgz", - "integrity": "sha512-CE0f1IEE1GQj8JWl/BxKvKwx9wBTLR09OpPQHaIs5LGBw3ODu8ek5kcbrHPNsFYh/pWh+pcjbZQoxq3CqvQVnA==", - "license": "Apache-2.0", - "dependencies": { - "@grpc/grpc-js": "^1.7.1", - "@opentelemetry/core": "1.25.1", - "@opentelemetry/exporter-metrics-otlp-http": "0.52.1", - "@opentelemetry/otlp-exporter-base": "0.52.1", - "@opentelemetry/otlp-grpc-exporter-base": "0.52.1", - "@opentelemetry/otlp-transformer": "0.52.1", - "@opentelemetry/resources": "1.25.1", - "@opentelemetry/sdk-metrics": "1.25.1" - }, - "engines": { - "node": ">=14" - }, - "peerDependencies": { - "@opentelemetry/api": "^1.3.0" - } - }, - "node_modules/@opentelemetry/exporter-metrics-otlp-http": { - "version": "0.52.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-metrics-otlp-http/-/exporter-metrics-otlp-http-0.52.1.tgz", - "integrity": "sha512-oAHPOy1sZi58bwqXaucd19F/v7+qE2EuVslQOEeLQT94CDuZJJ4tbWzx8DpYBTrOSzKqqrMtx9+PMxkrcbxOyQ==", - "license": "Apache-2.0", - "dependencies": { - "@opentelemetry/core": "1.25.1", - "@opentelemetry/otlp-exporter-base": "0.52.1", - "@opentelemetry/otlp-transformer": "0.52.1", - "@opentelemetry/resources": "1.25.1", - "@opentelemetry/sdk-metrics": "1.25.1" - }, - "engines": { - "node": ">=14" - }, - "peerDependencies": { - "@opentelemetry/api": "^1.3.0" - } - }, - "node_modules/@opentelemetry/exporter-trace-otlp-grpc": { - "version": "0.52.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-trace-otlp-grpc/-/exporter-trace-otlp-grpc-0.52.1.tgz", - "integrity": "sha512-pVkSH20crBwMTqB3nIN4jpQKUEoB0Z94drIHpYyEqs7UBr+I0cpYyOR3bqjA/UasQUMROb3GX8ZX4/9cVRqGBQ==", - "license": "Apache-2.0", - "dependencies": { - "@grpc/grpc-js": "^1.7.1", - "@opentelemetry/core": "1.25.1", - "@opentelemetry/otlp-grpc-exporter-base": "0.52.1", - "@opentelemetry/otlp-transformer": "0.52.1", - "@opentelemetry/resources": "1.25.1", - "@opentelemetry/sdk-trace-base": "1.25.1" - }, - "engines": { - "node": ">=14" - }, - "peerDependencies": { - "@opentelemetry/api": "^1.0.0" - } - }, - "node_modules/@opentelemetry/exporter-trace-otlp-http": { - "version": "0.52.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-trace-otlp-http/-/exporter-trace-otlp-http-0.52.1.tgz", - "integrity": "sha512-05HcNizx0BxcFKKnS5rwOV+2GevLTVIRA0tRgWYyw4yCgR53Ic/xk83toYKts7kbzcI+dswInUg/4s8oyA+tqg==", - "license": "Apache-2.0", - "dependencies": { - "@opentelemetry/core": "1.25.1", - "@opentelemetry/otlp-exporter-base": "0.52.1", - "@opentelemetry/otlp-transformer": "0.52.1", - "@opentelemetry/resources": "1.25.1", - "@opentelemetry/sdk-trace-base": "1.25.1" - }, - "engines": { - "node": ">=14" - }, - "peerDependencies": { - "@opentelemetry/api": "^1.0.0" - } - }, - "node_modules/@opentelemetry/exporter-trace-otlp-proto": { - "version": "0.52.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-trace-otlp-proto/-/exporter-trace-otlp-proto-0.52.1.tgz", - "integrity": "sha512-pt6uX0noTQReHXNeEslQv7x311/F1gJzMnp1HD2qgypLRPbXDeMzzeTngRTUaUbP6hqWNtPxuLr4DEoZG+TcEQ==", - "license": "Apache-2.0", - "dependencies": { - "@opentelemetry/core": "1.25.1", - "@opentelemetry/otlp-exporter-base": "0.52.1", - "@opentelemetry/otlp-transformer": "0.52.1", - "@opentelemetry/resources": "1.25.1", - "@opentelemetry/sdk-trace-base": "1.25.1" - }, - "engines": { - "node": ">=14" - }, - "peerDependencies": { - "@opentelemetry/api": "^1.0.0" - } - }, - "node_modules/@opentelemetry/exporter-zipkin": { - "version": "1.25.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/exporter-zipkin/-/exporter-zipkin-1.25.1.tgz", - "integrity": "sha512-RmOwSvkimg7ETwJbUOPTMhJm9A9bG1U8s7Zo3ajDh4zM7eYcycQ0dM7FbLD6NXWbI2yj7UY4q8BKinKYBQksyw==", - "license": "Apache-2.0", - "dependencies": { - "@opentelemetry/core": "1.25.1", - "@opentelemetry/resources": "1.25.1", - "@opentelemetry/sdk-trace-base": "1.25.1", - "@opentelemetry/semantic-conventions": "1.25.1" - }, - "engines": { - "node": ">=14" - }, - "peerDependencies": { - "@opentelemetry/api": "^1.0.0" - } - }, - "node_modules/@opentelemetry/instrumentation": { - "version": "0.52.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation/-/instrumentation-0.52.1.tgz", - "integrity": "sha512-uXJbYU/5/MBHjMp1FqrILLRuiJCs3Ofk0MeRDk8g1S1gD47U8X3JnSwcMO1rtRo1x1a7zKaQHaoYu49p/4eSKw==", - "license": "Apache-2.0", - "dependencies": { - "@opentelemetry/api-logs": "0.52.1", - "@types/shimmer": "^1.0.2", - "import-in-the-middle": "^1.8.1", - "require-in-the-middle": "^7.1.1", - "semver": "^7.5.2", - "shimmer": "^1.2.1" - }, - "engines": { - "node": ">=14" - }, - "peerDependencies": { - "@opentelemetry/api": "^1.3.0" - } - }, - "node_modules/@opentelemetry/instrumentation-http": { - "version": "0.52.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/instrumentation-http/-/instrumentation-http-0.52.1.tgz", - "integrity": "sha512-dG/aevWhaP+7OLv4BQQSEKMJv8GyeOp3Wxl31NHqE8xo9/fYMfEljiZphUHIfyg4gnZ9swMyWjfOQs5GUQe54Q==", - "license": "Apache-2.0", - "dependencies": { - "@opentelemetry/core": "1.25.1", - "@opentelemetry/instrumentation": "0.52.1", - "@opentelemetry/semantic-conventions": "1.25.1", - "semver": "^7.5.2" - }, - "engines": { - "node": ">=14" - }, - "peerDependencies": { - "@opentelemetry/api": "^1.3.0" - } - }, - "node_modules/@opentelemetry/otlp-exporter-base": { - "version": "0.52.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/otlp-exporter-base/-/otlp-exporter-base-0.52.1.tgz", - "integrity": "sha512-z175NXOtX5ihdlshtYBe5RpGeBoTXVCKPPLiQlD6FHvpM4Ch+p2B0yWKYSrBfLH24H9zjJiBdTrtD+hLlfnXEQ==", - "license": "Apache-2.0", - "dependencies": { - "@opentelemetry/core": "1.25.1", - "@opentelemetry/otlp-transformer": "0.52.1" - }, - "engines": { - "node": ">=14" - }, - "peerDependencies": { - "@opentelemetry/api": "^1.0.0" - } - }, - "node_modules/@opentelemetry/otlp-grpc-exporter-base": { - "version": "0.52.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/otlp-grpc-exporter-base/-/otlp-grpc-exporter-base-0.52.1.tgz", - "integrity": "sha512-zo/YrSDmKMjG+vPeA9aBBrsQM9Q/f2zo6N04WMB3yNldJRsgpRBeLLwvAt/Ba7dpehDLOEFBd1i2JCoaFtpCoQ==", - "license": "Apache-2.0", - "dependencies": { - "@grpc/grpc-js": "^1.7.1", - "@opentelemetry/core": "1.25.1", - "@opentelemetry/otlp-exporter-base": "0.52.1", - "@opentelemetry/otlp-transformer": "0.52.1" - }, - "engines": { - "node": ">=14" - }, - "peerDependencies": { - "@opentelemetry/api": "^1.0.0" - } - }, - "node_modules/@opentelemetry/otlp-transformer": { - "version": "0.52.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/otlp-transformer/-/otlp-transformer-0.52.1.tgz", - "integrity": "sha512-I88uCZSZZtVa0XniRqQWKbjAUm73I8tpEy/uJYPPYw5d7BRdVk0RfTBQw8kSUl01oVWEuqxLDa802222MYyWHg==", - "license": "Apache-2.0", - "dependencies": { - "@opentelemetry/api-logs": "0.52.1", - "@opentelemetry/core": "1.25.1", - "@opentelemetry/resources": "1.25.1", - "@opentelemetry/sdk-logs": "0.52.1", - "@opentelemetry/sdk-metrics": "1.25.1", - "@opentelemetry/sdk-trace-base": "1.25.1", - "protobufjs": "^7.3.0" - }, - "engines": { - "node": ">=14" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.3.0 <1.10.0" - } - }, - "node_modules/@opentelemetry/propagator-b3": { - "version": "1.25.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/propagator-b3/-/propagator-b3-1.25.1.tgz", - "integrity": "sha512-p6HFscpjrv7//kE+7L+3Vn00VEDUJB0n6ZrjkTYHrJ58QZ8B3ajSJhRbCcY6guQ3PDjTbxWklyvIN2ojVbIb1A==", - "license": "Apache-2.0", - "dependencies": { - "@opentelemetry/core": "1.25.1" - }, - "engines": { - "node": ">=14" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" - } - }, - "node_modules/@opentelemetry/propagator-jaeger": { - "version": "1.25.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/propagator-jaeger/-/propagator-jaeger-1.25.1.tgz", - "integrity": "sha512-nBprRf0+jlgxks78G/xq72PipVK+4or9Ypntw0gVZYNTCSK8rg5SeaGV19tV920CMqBD/9UIOiFr23Li/Q8tiA==", - "license": "Apache-2.0", - "dependencies": { - "@opentelemetry/core": "1.25.1" - }, - "engines": { - "node": ">=14" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" - } - }, - "node_modules/@opentelemetry/resources": { - "version": "1.25.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-1.25.1.tgz", - "integrity": "sha512-pkZT+iFYIZsVn6+GzM0kSX+u3MSLCY9md+lIJOoKl/P+gJFfxJte/60Usdp8Ce4rOs8GduUpSPNe1ddGyDT1sQ==", - "license": "Apache-2.0", - "dependencies": { - "@opentelemetry/core": "1.25.1", - "@opentelemetry/semantic-conventions": "1.25.1" - }, - "engines": { - "node": ">=14" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" - } - }, - "node_modules/@opentelemetry/sdk-logs": { - "version": "0.52.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-logs/-/sdk-logs-0.52.1.tgz", - "integrity": "sha512-MBYh+WcPPsN8YpRHRmK1Hsca9pVlyyKd4BxOC4SsgHACnl/bPp4Cri9hWhVm5+2tiQ9Zf4qSc1Jshw9tOLGWQA==", - "license": "Apache-2.0", - "dependencies": { - "@opentelemetry/api-logs": "0.52.1", - "@opentelemetry/core": "1.25.1", - "@opentelemetry/resources": "1.25.1" - }, - "engines": { - "node": ">=14" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.4.0 <1.10.0" - } - }, - "node_modules/@opentelemetry/sdk-metrics": { - "version": "1.25.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-metrics/-/sdk-metrics-1.25.1.tgz", - "integrity": "sha512-9Mb7q5ioFL4E4dDrc4wC/A3NTHDat44v4I3p2pLPSxRvqUbDIQyMVr9uK+EU69+HWhlET1VaSrRzwdckWqY15Q==", - "license": "Apache-2.0", - "dependencies": { - "@opentelemetry/core": "1.25.1", - "@opentelemetry/resources": "1.25.1", - "lodash.merge": "^4.6.2" - }, - "engines": { - "node": ">=14" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.3.0 <1.10.0" - } - }, - "node_modules/@opentelemetry/sdk-node": { - "version": "0.52.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-node/-/sdk-node-0.52.1.tgz", - "integrity": "sha512-uEG+gtEr6eKd8CVWeKMhH2olcCHM9dEK68pe0qE0be32BcCRsvYURhHaD1Srngh1SQcnQzZ4TP324euxqtBOJA==", - "license": "Apache-2.0", - "dependencies": { - "@opentelemetry/api-logs": "0.52.1", - "@opentelemetry/core": "1.25.1", - "@opentelemetry/exporter-trace-otlp-grpc": "0.52.1", - "@opentelemetry/exporter-trace-otlp-http": "0.52.1", - "@opentelemetry/exporter-trace-otlp-proto": "0.52.1", - "@opentelemetry/exporter-zipkin": "1.25.1", - "@opentelemetry/instrumentation": "0.52.1", - "@opentelemetry/resources": "1.25.1", - "@opentelemetry/sdk-logs": "0.52.1", - "@opentelemetry/sdk-metrics": "1.25.1", - "@opentelemetry/sdk-trace-base": "1.25.1", - "@opentelemetry/sdk-trace-node": "1.25.1", - "@opentelemetry/semantic-conventions": "1.25.1" - }, - "engines": { - "node": ">=14" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.3.0 <1.10.0" - } - }, - "node_modules/@opentelemetry/sdk-trace-base": { - "version": "1.25.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-trace-base/-/sdk-trace-base-1.25.1.tgz", - "integrity": "sha512-C8k4hnEbc5FamuZQ92nTOp8X/diCY56XUTnMiv9UTuJitCzaNNHAVsdm5+HLCdI8SLQsLWIrG38tddMxLVoftw==", - "license": "Apache-2.0", - "dependencies": { - "@opentelemetry/core": "1.25.1", - "@opentelemetry/resources": "1.25.1", - "@opentelemetry/semantic-conventions": "1.25.1" - }, - "engines": { - "node": ">=14" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" - } - }, - "node_modules/@opentelemetry/sdk-trace-node": { - "version": "1.25.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/sdk-trace-node/-/sdk-trace-node-1.25.1.tgz", - "integrity": "sha512-nMcjFIKxnFqoez4gUmihdBrbpsEnAX/Xj16sGvZm+guceYE0NE00vLhpDVK6f3q8Q4VFI5xG8JjlXKMB/SkTTQ==", - "license": "Apache-2.0", - "dependencies": { - "@opentelemetry/context-async-hooks": "1.25.1", - "@opentelemetry/core": "1.25.1", - "@opentelemetry/propagator-b3": "1.25.1", - "@opentelemetry/propagator-jaeger": "1.25.1", - "@opentelemetry/sdk-trace-base": "1.25.1", - "semver": "^7.5.2" - }, - "engines": { - "node": ">=14" - }, - "peerDependencies": { - "@opentelemetry/api": ">=1.0.0 <1.10.0" - } - }, - "node_modules/@opentelemetry/semantic-conventions": { - "version": "1.25.1", - "resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.25.1.tgz", - "integrity": "sha512-ZDjMJJQRlyk8A1KZFCc+bCbsyrn1wTwdNt56F7twdfUfnHUZUq77/WfONCj8p72NZOyP7pNTdUWSTYC3GTbuuQ==", - "license": "Apache-2.0", - "engines": { - "node": ">=14" - } - }, - "node_modules/@pkgjs/parseargs": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", - "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", - "license": "MIT", - "optional": true, - "engines": { - "node": ">=14" - } - }, - "node_modules/@protobufjs/aspromise": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", - "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/base64": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", - "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/codegen": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", - "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/eventemitter": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", - "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/fetch": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", - "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", - "license": "BSD-3-Clause", - "dependencies": { - "@protobufjs/aspromise": "^1.1.1", - "@protobufjs/inquire": "^1.1.0" - } - }, - "node_modules/@protobufjs/float": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", - "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/inquire": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", - "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/path": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", - "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/pool": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", - "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==", - "license": "BSD-3-Clause" - }, - "node_modules/@protobufjs/utf8": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", - "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==", - "license": "BSD-3-Clause" - }, - "node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.43.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.43.0.tgz", - "integrity": "sha512-Krjy9awJl6rKbruhQDgivNbD1WuLb8xAclM4IR4cN5pHGAs2oIMMQJEiC3IC/9TZJ+QZkmZhlMO/6MBGxPidpw==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ] - }, - "node_modules/@rollup/rollup-android-arm64": { - "version": "4.43.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.43.0.tgz", - "integrity": "sha512-ss4YJwRt5I63454Rpj+mXCXicakdFmKnUNxr1dLK+5rv5FJgAxnN7s31a5VchRYxCFWdmnDWKd0wbAdTr0J5EA==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "android" - ] - }, - "node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.43.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.43.0.tgz", - "integrity": "sha512-eKoL8ykZ7zz8MjgBenEF2OoTNFAPFz1/lyJ5UmmFSz5jW+7XbH1+MAgCVHy72aG59rbuQLcJeiMrP8qP5d/N0A==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ] - }, - "node_modules/@rollup/rollup-darwin-x64": { - "version": "4.43.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.43.0.tgz", - "integrity": "sha512-SYwXJgaBYW33Wi/q4ubN+ldWC4DzQY62S4Ll2dgfr/dbPoF50dlQwEaEHSKrQdSjC6oIe1WgzosoaNoHCdNuMg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ] - }, - "node_modules/@rollup/rollup-freebsd-arm64": { - "version": "4.43.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.43.0.tgz", - "integrity": "sha512-SV+U5sSo0yujrjzBF7/YidieK2iF6E7MdF6EbYxNz94lA+R0wKl3SiixGyG/9Klab6uNBIqsN7j4Y/Fya7wAjQ==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ] - }, - "node_modules/@rollup/rollup-freebsd-x64": { - "version": "4.43.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.43.0.tgz", - "integrity": "sha512-J7uCsiV13L/VOeHJBo5SjasKiGxJ0g+nQTrBkAsmQBIdil3KhPnSE9GnRon4ejX1XDdsmK/l30IYLiAaQEO0Cg==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "freebsd" - ] - }, - "node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.43.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.43.0.tgz", - "integrity": "sha512-gTJ/JnnjCMc15uwB10TTATBEhK9meBIY+gXP4s0sHD1zHOaIh4Dmy1X9wup18IiY9tTNk5gJc4yx9ctj/fjrIw==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-arm-musleabihf": { - "version": "4.43.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.43.0.tgz", - "integrity": "sha512-ZJ3gZynL1LDSIvRfz0qXtTNs56n5DI2Mq+WACWZ7yGHFUEirHBRt7fyIk0NsCKhmRhn7WAcjgSkSVVxKlPNFFw==", - "cpu": [ - "arm" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.43.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.43.0.tgz", - "integrity": "sha512-8FnkipasmOOSSlfucGYEu58U8cxEdhziKjPD2FIa0ONVMxvl/hmONtX/7y4vGjdUhjcTHlKlDhw3H9t98fPvyA==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.43.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.43.0.tgz", - "integrity": "sha512-KPPyAdlcIZ6S9C3S2cndXDkV0Bb1OSMsX0Eelr2Bay4EsF9yi9u9uzc9RniK3mcUGCLhWY9oLr6er80P5DE6XA==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-loongarch64-gnu": { - "version": "4.43.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loongarch64-gnu/-/rollup-linux-loongarch64-gnu-4.43.0.tgz", - "integrity": "sha512-HPGDIH0/ZzAZjvtlXj6g+KDQ9ZMHfSP553za7o2Odegb/BEfwJcR0Sw0RLNpQ9nC6Gy8s+3mSS9xjZ0n3rhcYg==", - "cpu": [ - "loong64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-powerpc64le-gnu": { - "version": "4.43.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.43.0.tgz", - "integrity": "sha512-gEmwbOws4U4GLAJDhhtSPWPXUzDfMRedT3hFMyRAvM9Mrnj+dJIFIeL7otsv2WF3D7GrV0GIewW0y28dOYWkmw==", - "cpu": [ - "ppc64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-riscv64-gnu": { - "version": "4.43.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.43.0.tgz", - "integrity": "sha512-XXKvo2e+wFtXZF/9xoWohHg+MuRnvO29TI5Hqe9xwN5uN8NKUYy7tXUG3EZAlfchufNCTHNGjEx7uN78KsBo0g==", - "cpu": [ - "riscv64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-riscv64-musl": { - "version": "4.43.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.43.0.tgz", - "integrity": "sha512-ruf3hPWhjw6uDFsOAzmbNIvlXFXlBQ4nk57Sec8E8rUxs/AI4HD6xmiiasOOx/3QxS2f5eQMKTAwk7KHwpzr/Q==", - "cpu": [ - "riscv64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-s390x-gnu": { - "version": "4.43.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.43.0.tgz", - "integrity": "sha512-QmNIAqDiEMEvFV15rsSnjoSmO0+eJLoKRD9EAa9rrYNwO/XRCtOGM3A5A0X+wmG+XRrw9Fxdsw+LnyYiZWWcVw==", - "cpu": [ - "s390x" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.43.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.43.0.tgz", - "integrity": "sha512-jAHr/S0iiBtFyzjhOkAics/2SrXE092qyqEg96e90L3t9Op8OTzS6+IX0Fy5wCt2+KqeHAkti+eitV0wvblEoQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.43.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.43.0.tgz", - "integrity": "sha512-3yATWgdeXyuHtBhrLt98w+5fKurdqvs8B53LaoKD7P7H7FKOONLsBVMNl9ghPQZQuYcceV5CDyPfyfGpMWD9mQ==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.43.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.43.0.tgz", - "integrity": "sha512-wVzXp2qDSCOpcBCT5WRWLmpJRIzv23valvcTwMHEobkjippNf+C3ys/+wf07poPkeNix0paTNemB2XrHr2TnGw==", - "cpu": [ - "arm64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.43.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.43.0.tgz", - "integrity": "sha512-fYCTEyzf8d+7diCw8b+asvWDCLMjsCEA8alvtAutqJOJp/wL5hs1rWSqJ1vkjgW0L2NB4bsYJrpKkiIPRR9dvw==", - "cpu": [ - "ia32" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.43.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.43.0.tgz", - "integrity": "sha512-SnGhLiE5rlK0ofq8kzuDkM0g7FN1s5VYY+YSMTibP7CqShxCQvqtNxTARS4xX4PFJfHjG0ZQYX9iGzI3FQh5Aw==", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/@selderee/plugin-htmlparser2": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/@selderee/plugin-htmlparser2/-/plugin-htmlparser2-0.11.0.tgz", - "integrity": "sha512-P33hHGdldxGabLFjPPpaTxVolMrzrcegejx+0GxjrIb9Zv48D8yAIA/QTDR2dFl7Uz7urX8aX6+5bCZslr+gWQ==", - "license": "MIT", - "dependencies": { - "domhandler": "^5.0.3", - "selderee": "^0.11.0" - }, - "funding": { - "url": "https://ko-fi.com/killymxi" - } - }, - "node_modules/@types/chai": { - "version": "5.2.2", - "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.2.tgz", - "integrity": "sha512-8kB30R7Hwqf40JPiKhVzodJs2Qc1ZJ5zuT3uzw5Hq/dhNCl3G3l83jfpdI1e20BP348+fV7VIL/+FxaXkqBmWg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/deep-eql": "*" - } - }, - "node_modules/@types/deep-eql": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", - "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/diff": { - "version": "7.0.2", - "resolved": "https://registry.npmjs.org/@types/diff/-/diff-7.0.2.tgz", - "integrity": "sha512-JSWRMozjFKsGlEjiiKajUjIJVKuKdE3oVy2DNtK+fUo8q82nhFZ2CPQwicAIkXrofahDXrWJ7mjelvZphMS98Q==", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/dotenv": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/@types/dotenv/-/dotenv-6.1.1.tgz", - "integrity": "sha512-ftQl3DtBvqHl9L16tpqqzA4YzCSXZfi7g8cQceTz5rOlYtk/IZbFjAv3mLOQlNIgOaylCQWQoBdDQHPgEBJPHg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@types/estree": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", - "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", - "dev": true, - "license": "MIT" - }, - "node_modules/@types/glob": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/@types/glob/-/glob-8.1.0.tgz", - "integrity": "sha512-IO+MJPVhoqz+28h1qLAcBEH2+xHMK6MTyHJc7MTnnYb6wsoLR29POVGJ7LycmVXIqyy/4/2ShP5sUwTXuOwb/w==", - "license": "MIT", - "dependencies": { - "@types/minimatch": "^5.1.2", - "@types/node": "*" - } - }, - "node_modules/@types/html-to-text": { - "version": "9.0.4", - "resolved": "https://registry.npmjs.org/@types/html-to-text/-/html-to-text-9.0.4.tgz", - "integrity": "sha512-pUY3cKH/Nm2yYrEmDlPR1mR7yszjGx4DrwPjQ702C4/D5CwHuZTgZdIdwPkRbcuhs7BAh2L5rg3CL5cbRiGTCQ==", - "license": "MIT" - }, - "node_modules/@types/minimatch": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/@types/minimatch/-/minimatch-5.1.2.tgz", - "integrity": "sha512-K0VQKziLUWkVKiRVrx4a40iPaxTUefQmjtkQofBkYRcoaaL/8rhwDWww9qWbrgicNOgnpIsMxyNIUM4+n6dUIA==", - "license": "MIT" - }, - "node_modules/@types/node": { - "version": "24.0.1", - "resolved": "https://registry.npmjs.org/@types/node/-/node-24.0.1.tgz", - "integrity": "sha512-MX4Zioh39chHlDJbKmEgydJDS3tspMP/lnQC67G3SWsTnb9NeYVWOjkxpOSy4oMfPs4StcWHwBrvUb4ybfnuaw==", - "license": "MIT", - "dependencies": { - "undici-types": "~7.8.0" - } - }, - "node_modules/@types/shimmer": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/@types/shimmer/-/shimmer-1.2.0.tgz", - "integrity": "sha512-UE7oxhQLLd9gub6JKIAhDq06T0F6FnztwMNRvYgjeQSBeMc1ZG/tA47EwfduvkuQS8apbkM/lpLpWsaCeYsXVg==", - "license": "MIT" - }, - "node_modules/@types/ws": { - "version": "8.18.1", - "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.18.1.tgz", - "integrity": "sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/node": "*" - } - }, - "node_modules/@vitest/expect": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-3.2.3.tgz", - "integrity": "sha512-W2RH2TPWVHA1o7UmaFKISPvdicFJH+mjykctJFoAkUw+SPTJTGjUNdKscFBrqM7IPnCVu6zihtKYa7TkZS1dkQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/chai": "^5.2.2", - "@vitest/spy": "3.2.3", - "@vitest/utils": "3.2.3", - "chai": "^5.2.0", - "tinyrainbow": "^2.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/mocker": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-3.2.3.tgz", - "integrity": "sha512-cP6fIun+Zx8he4rbWvi+Oya6goKQDZK+Yq4hhlggwQBbrlOQ4qtZ+G4nxB6ZnzI9lyIb+JnvyiJnPC2AGbKSPA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/spy": "3.2.3", - "estree-walker": "^3.0.3", - "magic-string": "^0.30.17" - }, - "funding": { - "url": "https://opencollective.com/vitest" - }, - "peerDependencies": { - "msw": "^2.4.9", - "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" - }, - "peerDependenciesMeta": { - "msw": { - "optional": true - }, - "vite": { - "optional": true - } - } - }, - "node_modules/@vitest/pretty-format": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-3.2.3.tgz", - "integrity": "sha512-yFglXGkr9hW/yEXngO+IKMhP0jxyFw2/qys/CK4fFUZnSltD+MU7dVYGrH8rvPcK/O6feXQA+EU33gjaBBbAng==", - "dev": true, - "license": "MIT", - "dependencies": { - "tinyrainbow": "^2.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/runner": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-3.2.3.tgz", - "integrity": "sha512-83HWYisT3IpMaU9LN+VN+/nLHVBCSIUKJzGxC5RWUOsK1h3USg7ojL+UXQR3b4o4UBIWCYdD2fxuzM7PQQ1u8w==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/utils": "3.2.3", - "pathe": "^2.0.3", - "strip-literal": "^3.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/snapshot": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-3.2.3.tgz", - "integrity": "sha512-9gIVWx2+tysDqUmmM1L0hwadyumqssOL1r8KJipwLx5JVYyxvVRfxvMq7DaWbZZsCqZnu/dZedaZQh4iYTtneA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/pretty-format": "3.2.3", - "magic-string": "^0.30.17", - "pathe": "^2.0.3" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/spy": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-3.2.3.tgz", - "integrity": "sha512-JHu9Wl+7bf6FEejTCREy+DmgWe+rQKbK+y32C/k5f4TBIAlijhJbRBIRIOCEpVevgRsCQR2iHRUH2/qKVM/plw==", - "dev": true, - "license": "MIT", - "dependencies": { - "tinyspy": "^4.0.3" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/@vitest/utils": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-3.2.3.tgz", - "integrity": "sha512-4zFBCU5Pf+4Z6v+rwnZ1HU1yzOKKvDkMXZrymE2PBlbjKJRlrOxbvpfPSvJTGRIwGoahaOGvp+kbCoxifhzJ1Q==", - "dev": true, - "license": "MIT", - "dependencies": { - "@vitest/pretty-format": "3.2.3", - "loupe": "^3.1.3", - "tinyrainbow": "^2.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/accepts": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/accepts/-/accepts-2.0.0.tgz", - "integrity": "sha512-5cvg6CtKwfgdmVqY1WIiXKc3Q1bkRqGLi+2W/6ao+6Y7gu/RCwRuAhGEzh5B4KlszSuTLgZYuqFqo5bImjNKng==", - "license": "MIT", - "dependencies": { - "mime-types": "^3.0.0", - "negotiator": "^1.0.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/acorn": { - "version": "8.15.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", - "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", - "license": "MIT", - "bin": { - "acorn": "bin/acorn" - }, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/acorn-import-attributes": { - "version": "1.9.5", - "resolved": "https://registry.npmjs.org/acorn-import-attributes/-/acorn-import-attributes-1.9.5.tgz", - "integrity": "sha512-n02Vykv5uA3eHGM/Z2dQrcD56kL8TyDb2p1+0P83PClMnC/nc+anbQRhIOWnSq4Ke/KvDPrY3C9hDtC/A3eHnQ==", - "license": "MIT", - "peerDependencies": { - "acorn": "^8" - } - }, - "node_modules/agent-base": { - "version": "7.1.3", - "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.3.tgz", - "integrity": "sha512-jRR5wdylq8CkOe6hei19GGZnxM6rBGwFl3Bg0YItGDimvjGtAvdZk4Pu6Cl4u4Igsws4a1fd1Vq3ezrhn4KmFw==", - "license": "MIT", - "engines": { - "node": ">= 14" - } - }, - "node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "license": "MIT", - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/ansi-regex": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", - "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-regex?sponsor=1" - } - }, - "node_modules/ansi-styles": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", - "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/assertion-error": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", - "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - } - }, - "node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "license": "MIT" - }, - "node_modules/base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, - "node_modules/bignumber.js": { - "version": "9.3.0", - "resolved": "https://registry.npmjs.org/bignumber.js/-/bignumber.js-9.3.0.tgz", - "integrity": "sha512-EM7aMFTXbptt/wZdMlBv2t8IViwQL+h6SLHosp8Yf0dqJMTnY6iL32opnAB6kAdL0SZPuvcAzFr31o0c/R3/RA==", - "license": "MIT", - "engines": { - "node": "*" - } - }, - "node_modules/body-parser": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-2.2.0.tgz", - "integrity": "sha512-02qvAaxv8tp7fBa/mw1ga98OGm+eCbqzJOKoRt70sLmfEEi+jyBYVTDGfCL/k06/4EMk/z01gCe7HoCH/f2LTg==", - "license": "MIT", - "dependencies": { - "bytes": "^3.1.2", - "content-type": "^1.0.5", - "debug": "^4.4.0", - "http-errors": "^2.0.0", - "iconv-lite": "^0.6.3", - "on-finished": "^2.4.1", - "qs": "^6.14.0", - "raw-body": "^3.0.0", - "type-is": "^2.0.0" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/brace-expansion": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", - "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0" - } - }, - "node_modules/buffer-equal-constant-time": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", - "integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==", - "license": "BSD-3-Clause" - }, - "node_modules/bundle-name": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/bundle-name/-/bundle-name-4.1.0.tgz", - "integrity": "sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q==", - "license": "MIT", - "dependencies": { - "run-applescript": "^7.0.0" - }, - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/bytes": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", - "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/cac": { - "version": "6.7.14", - "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", - "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/call-bind-apply-helpers": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", - "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", - "license": "MIT", - "dependencies": { - "es-errors": "^1.3.0", - "function-bind": "^1.1.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/call-bound": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", - "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", - "license": "MIT", - "dependencies": { - "call-bind-apply-helpers": "^1.0.2", - "get-intrinsic": "^1.3.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/chai": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/chai/-/chai-5.2.0.tgz", - "integrity": "sha512-mCuXncKXk5iCLhfhwTc0izo0gtEmpz5CtG2y8GiOINBlMVS6v8TMRc5TaLWKS6692m9+dVVfzgeVxR5UxWHTYw==", - "dev": true, - "license": "MIT", - "dependencies": { - "assertion-error": "^2.0.1", - "check-error": "^2.1.1", - "deep-eql": "^5.0.1", - "loupe": "^3.1.0", - "pathval": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/check-error": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.1.tgz", - "integrity": "sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 16" - } - }, - "node_modules/cjs-module-lexer": { - "version": "1.4.3", - "resolved": "https://registry.npmjs.org/cjs-module-lexer/-/cjs-module-lexer-1.4.3.tgz", - "integrity": "sha512-9z8TZaGM1pfswYeXrUpzPrkx8UnWYdhJclsiYMm6x/w5+nN+8Tf/LnAgfLGQCm59qAOxU8WwHEq2vNwF6i4j+Q==", - "license": "MIT" - }, - "node_modules/cliui": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", - "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", - "license": "ISC", - "dependencies": { - "string-width": "^4.2.0", - "strip-ansi": "^6.0.1", - "wrap-ansi": "^7.0.0" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/cliui/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/cliui/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "license": "MIT", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/cliui/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, - "node_modules/cliui/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cliui/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/cliui/node_modules/wrap-ansi": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "license": "MIT", - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "license": "MIT" - }, - "node_modules/content-disposition": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-1.0.0.tgz", - "integrity": "sha512-Au9nRL8VNUut/XSzbQA38+M78dzP4D+eqg3gfJHMIHHYa3bg067xj1KxMUWj+VULbiZMowKngFFbKczUrNJ1mg==", - "license": "MIT", - "dependencies": { - "safe-buffer": "5.2.1" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/content-type": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz", - "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/cookie": { - "version": "0.7.2", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz", - "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/cookie-signature": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.2.2.tgz", - "integrity": "sha512-D76uU73ulSXrD1UXF4KE2TMxVVwhsnCgfAyTg9k8P6KGZjlXKrOLe4dJQKI3Bxi5wjesZoFXJWElNWBjPZMbhg==", - "license": "MIT", - "engines": { - "node": ">=6.6.0" - } - }, - "node_modules/cors": { - "version": "2.8.5", - "resolved": "https://registry.npmjs.org/cors/-/cors-2.8.5.tgz", - "integrity": "sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==", - "license": "MIT", - "dependencies": { - "object-assign": "^4", - "vary": "^1" - }, - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/cross-spawn": { - "version": "7.0.6", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", - "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", - "license": "MIT", - "dependencies": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/debug": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz", - "integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==", - "license": "MIT", - "dependencies": { - "ms": "^2.1.3" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/deep-eql": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", - "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/deepmerge": { - "version": "4.3.1", - "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", - "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/default-browser": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/default-browser/-/default-browser-5.2.1.tgz", - "integrity": "sha512-WY/3TUME0x3KPYdRRxEJJvXRHV4PyPoUsxtZa78lwItwRQRHhd2U9xOscaT/YTf8uCXIAjeJOFBVEh/7FtD8Xg==", - "license": "MIT", - "dependencies": { - "bundle-name": "^4.1.0", - "default-browser-id": "^5.0.0" - }, - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/default-browser-id": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/default-browser-id/-/default-browser-id-5.0.0.tgz", - "integrity": "sha512-A6p/pu/6fyBcA1TRz/GqWYPViplrftcW2gZC9q79ngNCKAeR/X3gcEdXQHl4KNXV+3wgIJ1CPkJQ3IHM6lcsyA==", - "license": "MIT", - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/define-lazy-prop": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-3.0.0.tgz", - "integrity": "sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==", - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/depd": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", - "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/diff": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/diff/-/diff-7.0.0.tgz", - "integrity": "sha512-PJWHUb1RFevKCwaFA9RlG5tCd+FO5iRh9A8HEtkmBH2Li03iJriB6m6JIN4rGz3K3JLawI7/veA1xzRKP6ISBw==", - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.3.1" - } - }, - "node_modules/dom-serializer": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz", - "integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==", - "license": "MIT", - "dependencies": { - "domelementtype": "^2.3.0", - "domhandler": "^5.0.2", - "entities": "^4.2.0" - }, - "funding": { - "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" - } - }, - "node_modules/domelementtype": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", - "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ], - "license": "BSD-2-Clause" - }, - "node_modules/domhandler": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", - "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", - "license": "BSD-2-Clause", - "dependencies": { - "domelementtype": "^2.3.0" - }, - "engines": { - "node": ">= 4" - }, - "funding": { - "url": "https://github.com/fb55/domhandler?sponsor=1" - } - }, - "node_modules/domutils": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.2.2.tgz", - "integrity": "sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw==", - "license": "BSD-2-Clause", - "dependencies": { - "dom-serializer": "^2.0.0", - "domelementtype": "^2.3.0", - "domhandler": "^5.0.3" - }, - "funding": { - "url": "https://github.com/fb55/domutils?sponsor=1" - } - }, - "node_modules/dotenv": { - "version": "16.5.0", - "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.5.0.tgz", - "integrity": "sha512-m/C+AwOAr9/W1UOIZUo232ejMNnJAJtYQjUbHoNTBNTJSvqzzDh7vnrei3o3r3m9blf6ZoDkvcw0VmozNRFJxg==", - "license": "BSD-2-Clause", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://dotenvx.com" - } - }, - "node_modules/dunder-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", - "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", - "license": "MIT", - "dependencies": { - "call-bind-apply-helpers": "^1.0.1", - "es-errors": "^1.3.0", - "gopd": "^1.2.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/eastasianwidth": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", - "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", - "license": "MIT" - }, - "node_modules/ecdsa-sig-formatter": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", - "integrity": "sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==", - "license": "Apache-2.0", - "dependencies": { - "safe-buffer": "^5.0.1" - } - }, - "node_modules/ee-first": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz", - "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==", - "license": "MIT" - }, - "node_modules/emoji-regex": { - "version": "9.2.2", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", - "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", - "license": "MIT" - }, - "node_modules/encodeurl": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz", - "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/entities": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", - "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", - "license": "BSD-2-Clause", - "engines": { - "node": ">=0.12" - }, - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, - "node_modules/es-define-property": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", - "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-errors": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", - "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-module-lexer": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", - "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", - "dev": true, - "license": "MIT" - }, - "node_modules/es-object-atoms": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", - "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", - "license": "MIT", - "dependencies": { - "es-errors": "^1.3.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/esbuild": { - "version": "0.25.5", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.5.tgz", - "integrity": "sha512-P8OtKZRv/5J5hhz0cUAdu/cLuPIKXpQl1R9pZtvmHWQvrAUVd0UNIPT4IB4W3rNOqVO0rlqHmCIbSwxh/c9yUQ==", - "dev": true, - "hasInstallScript": true, - "license": "MIT", - "bin": { - "esbuild": "bin/esbuild" - }, - "engines": { - "node": ">=18" - }, - "optionalDependencies": { - "@esbuild/aix-ppc64": "0.25.5", - "@esbuild/android-arm": "0.25.5", - "@esbuild/android-arm64": "0.25.5", - "@esbuild/android-x64": "0.25.5", - "@esbuild/darwin-arm64": "0.25.5", - "@esbuild/darwin-x64": "0.25.5", - "@esbuild/freebsd-arm64": "0.25.5", - "@esbuild/freebsd-x64": "0.25.5", - "@esbuild/linux-arm": "0.25.5", - "@esbuild/linux-arm64": "0.25.5", - "@esbuild/linux-ia32": "0.25.5", - "@esbuild/linux-loong64": "0.25.5", - "@esbuild/linux-mips64el": "0.25.5", - "@esbuild/linux-ppc64": "0.25.5", - "@esbuild/linux-riscv64": "0.25.5", - "@esbuild/linux-s390x": "0.25.5", - "@esbuild/linux-x64": "0.25.5", - "@esbuild/netbsd-arm64": "0.25.5", - "@esbuild/netbsd-x64": "0.25.5", - "@esbuild/openbsd-arm64": "0.25.5", - "@esbuild/openbsd-x64": "0.25.5", - "@esbuild/sunos-x64": "0.25.5", - "@esbuild/win32-arm64": "0.25.5", - "@esbuild/win32-ia32": "0.25.5", - "@esbuild/win32-x64": "0.25.5" - } - }, - "node_modules/escalade": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", - "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/escape-html": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", - "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", - "license": "MIT" - }, - "node_modules/estree-walker": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", - "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/estree": "^1.0.0" - } - }, - "node_modules/etag": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", - "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/eventsource": { - "version": "3.0.7", - "resolved": "https://registry.npmjs.org/eventsource/-/eventsource-3.0.7.tgz", - "integrity": "sha512-CRT1WTyuQoD771GW56XEZFQ/ZoSfWid1alKGDYMmkt2yl8UXrVR4pspqWNEcqKvVIzg6PAltWjxcSSPrboA4iA==", - "license": "MIT", - "dependencies": { - "eventsource-parser": "^3.0.1" - }, - "engines": { - "node": ">=18.0.0" - } - }, - "node_modules/eventsource-parser": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/eventsource-parser/-/eventsource-parser-3.0.2.tgz", - "integrity": "sha512-6RxOBZ/cYgd8usLwsEl+EC09Au/9BcmCKYF2/xbml6DNczf7nv0MQb+7BA2F+li6//I+28VNlQR37XfQtcAJuA==", - "license": "MIT", - "engines": { - "node": ">=18.0.0" - } - }, - "node_modules/expect-type": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.2.1.tgz", - "integrity": "sha512-/kP8CAwxzLVEeFrMm4kMmy4CCDlpipyA7MYLVrdJIkV0fYF0UaigQHRsxHiuY/GEea+bh4KSv3TIlgr+2UL6bw==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": ">=12.0.0" - } - }, - "node_modules/express": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/express/-/express-5.1.0.tgz", - "integrity": "sha512-DT9ck5YIRU+8GYzzU5kT3eHGA5iL+1Zd0EutOmTE9Dtk+Tvuzd23VBU+ec7HPNSTxXYO55gPV/hq4pSBJDjFpA==", - "license": "MIT", - "dependencies": { - "accepts": "^2.0.0", - "body-parser": "^2.2.0", - "content-disposition": "^1.0.0", - "content-type": "^1.0.5", - "cookie": "^0.7.1", - "cookie-signature": "^1.2.1", - "debug": "^4.4.0", - "encodeurl": "^2.0.0", - "escape-html": "^1.0.3", - "etag": "^1.8.1", - "finalhandler": "^2.1.0", - "fresh": "^2.0.0", - "http-errors": "^2.0.0", - "merge-descriptors": "^2.0.0", - "mime-types": "^3.0.0", - "on-finished": "^2.4.1", - "once": "^1.4.0", - "parseurl": "^1.3.3", - "proxy-addr": "^2.0.7", - "qs": "^6.14.0", - "range-parser": "^1.2.1", - "router": "^2.2.0", - "send": "^1.1.0", - "serve-static": "^2.2.0", - "statuses": "^2.0.1", - "type-is": "^2.0.1", - "vary": "^1.1.2" - }, - "engines": { - "node": ">= 18" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/express" - } - }, - "node_modules/express-rate-limit": { - "version": "7.5.0", - "resolved": "https://registry.npmjs.org/express-rate-limit/-/express-rate-limit-7.5.0.tgz", - "integrity": "sha512-eB5zbQh5h+VenMPM3fh+nw1YExi5nMr6HUCR62ELSP11huvxm/Uir1H1QEyTkk5QX6A58pX6NmaTMceKZ0Eodg==", - "license": "MIT", - "engines": { - "node": ">= 16" - }, - "funding": { - "url": "https://github.com/sponsors/express-rate-limit" - }, - "peerDependencies": { - "express": "^4.11 || 5 || ^5.0.0-beta.1" - } - }, - "node_modules/extend": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", - "license": "MIT" - }, - "node_modules/fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", - "license": "MIT" - }, - "node_modules/fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", - "license": "MIT" - }, - "node_modules/fdir": { - "version": "6.4.6", - "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.4.6.tgz", - "integrity": "sha512-hiFoqpyZcfNm1yc4u8oWCf9A2c4D3QjCrks3zmoVKVxpQRzmPNar1hUJcBG2RQHvEVGDN+Jm81ZheVLAQMK6+w==", - "dev": true, - "license": "MIT", - "peerDependencies": { - "picomatch": "^3 || ^4" - }, - "peerDependenciesMeta": { - "picomatch": { - "optional": true - } - } - }, - "node_modules/finalhandler": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-2.1.0.tgz", - "integrity": "sha512-/t88Ty3d5JWQbWYgaOGCCYfXRwV1+be02WqYYlL6h0lEiUAMPM8o8qKGO01YIkOHzka2up08wvgYD0mDiI+q3Q==", - "license": "MIT", - "dependencies": { - "debug": "^4.4.0", - "encodeurl": "^2.0.0", - "escape-html": "^1.0.3", - "on-finished": "^2.4.1", - "parseurl": "^1.3.3", - "statuses": "^2.0.1" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/foreground-child": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.1.tgz", - "integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==", - "license": "ISC", - "dependencies": { - "cross-spawn": "^7.0.6", - "signal-exit": "^4.0.1" - }, - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/forwarded": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz", - "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/fresh": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/fresh/-/fresh-2.0.0.tgz", - "integrity": "sha512-Rx/WycZ60HOaqLKAi6cHRKKI7zxWbJ31MhntmtwMoaTeF7XFH9hhBp8vITaMidfljRQ6eYWCKkaTK+ykVJHP2A==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/fsevents": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", - "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", - "dev": true, - "hasInstallScript": true, - "license": "MIT", - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": "^8.16.0 || ^10.6.0 || >=11.0.0" - } - }, - "node_modules/function-bind": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", - "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/gaxios": { - "version": "6.7.1", - "resolved": "https://registry.npmjs.org/gaxios/-/gaxios-6.7.1.tgz", - "integrity": "sha512-LDODD4TMYx7XXdpwxAVRAIAuB0bzv0s+ywFonY46k126qzQHT9ygyoa9tncmOiQmmDrik65UYsEkv3lbfqQ3yQ==", - "license": "Apache-2.0", - "dependencies": { - "extend": "^3.0.2", - "https-proxy-agent": "^7.0.1", - "is-stream": "^2.0.0", - "node-fetch": "^2.6.9", - "uuid": "^9.0.1" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/gcp-metadata": { - "version": "6.1.1", - "resolved": "https://registry.npmjs.org/gcp-metadata/-/gcp-metadata-6.1.1.tgz", - "integrity": "sha512-a4tiq7E0/5fTjxPAaH4jpjkSv/uCaU2p5KC6HVGrvl0cDjA8iBZv4vv1gyzlmK0ZUKqwpOyQMKzZQe3lTit77A==", - "license": "Apache-2.0", - "dependencies": { - "gaxios": "^6.1.1", - "google-logging-utils": "^0.0.2", - "json-bigint": "^1.0.0" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/get-caller-file": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", - "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", - "license": "ISC", - "engines": { - "node": "6.* || 8.* || >= 10.*" - } - }, - "node_modules/get-intrinsic": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", - "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", - "license": "MIT", - "dependencies": { - "call-bind-apply-helpers": "^1.0.2", - "es-define-property": "^1.0.1", - "es-errors": "^1.3.0", - "es-object-atoms": "^1.1.1", - "function-bind": "^1.1.2", - "get-proto": "^1.0.1", - "gopd": "^1.2.0", - "has-symbols": "^1.1.0", - "hasown": "^2.0.2", - "math-intrinsics": "^1.1.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/get-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", - "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", - "license": "MIT", - "dependencies": { - "dunder-proto": "^1.0.1", - "es-object-atoms": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/glob": { - "version": "10.4.5", - "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", - "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", - "license": "ISC", - "dependencies": { - "foreground-child": "^3.1.0", - "jackspeak": "^3.1.2", - "minimatch": "^9.0.4", - "minipass": "^7.1.2", - "package-json-from-dist": "^1.0.0", - "path-scurry": "^1.11.1" - }, - "bin": { - "glob": "dist/esm/bin.mjs" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/google-auth-library": { - "version": "9.15.1", - "resolved": "https://registry.npmjs.org/google-auth-library/-/google-auth-library-9.15.1.tgz", - "integrity": "sha512-Jb6Z0+nvECVz+2lzSMt9u98UsoakXxA2HGHMCxh+so3n90XgYWkq5dur19JAJV7ONiJY22yBTyJB1TSkvPq9Ng==", - "license": "Apache-2.0", - "dependencies": { - "base64-js": "^1.3.0", - "ecdsa-sig-formatter": "^1.0.11", - "gaxios": "^6.1.1", - "gcp-metadata": "^6.1.0", - "gtoken": "^7.0.0", - "jws": "^4.0.0" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/google-logging-utils": { - "version": "0.0.2", - "resolved": "https://registry.npmjs.org/google-logging-utils/-/google-logging-utils-0.0.2.tgz", - "integrity": "sha512-NEgUnEcBiP5HrPzufUkBzJOD/Sxsco3rLNo1F1TNf7ieU8ryUzBhqba8r756CjLX7rn3fHl6iLEwPYuqpoKgQQ==", - "license": "Apache-2.0", - "engines": { - "node": ">=14" - } - }, - "node_modules/gopd": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", - "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/gtoken": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/gtoken/-/gtoken-7.1.0.tgz", - "integrity": "sha512-pCcEwRi+TKpMlxAQObHDQ56KawURgyAf6jtIY046fJ5tIv3zDe/LEIubckAO8fj6JnAxLdmWkUfNyulQ2iKdEw==", - "license": "MIT", - "dependencies": { - "gaxios": "^6.0.0", - "jws": "^4.0.0" - }, - "engines": { - "node": ">=14.0.0" - } - }, - "node_modules/has-symbols": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", - "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/hasown": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", - "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", - "license": "MIT", - "dependencies": { - "function-bind": "^1.1.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/html-to-text": { - "version": "9.0.5", - "resolved": "https://registry.npmjs.org/html-to-text/-/html-to-text-9.0.5.tgz", - "integrity": "sha512-qY60FjREgVZL03vJU6IfMV4GDjGBIoOyvuFdpBDIX9yTlDw0TjxVBQp+P8NvpdIXNJvfWBTNul7fsAQJq2FNpg==", - "license": "MIT", - "dependencies": { - "@selderee/plugin-htmlparser2": "^0.11.0", - "deepmerge": "^4.3.1", - "dom-serializer": "^2.0.0", - "htmlparser2": "^8.0.2", - "selderee": "^0.11.0" - }, - "engines": { - "node": ">=14" - } - }, - "node_modules/htmlparser2": { - "version": "8.0.2", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-8.0.2.tgz", - "integrity": "sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA==", - "funding": [ - "https://github.com/fb55/htmlparser2?sponsor=1", - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ], - "license": "MIT", - "dependencies": { - "domelementtype": "^2.3.0", - "domhandler": "^5.0.3", - "domutils": "^3.0.1", - "entities": "^4.4.0" - } - }, - "node_modules/http-errors": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", - "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", - "license": "MIT", - "dependencies": { - "depd": "2.0.0", - "inherits": "2.0.4", - "setprototypeof": "1.2.0", - "statuses": "2.0.1", - "toidentifier": "1.0.1" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/http-errors/node_modules/statuses": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", - "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/https-proxy-agent": { - "version": "7.0.6", - "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", - "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", - "license": "MIT", - "dependencies": { - "agent-base": "^7.1.2", - "debug": "4" - }, - "engines": { - "node": ">= 14" - } - }, - "node_modules/iconv-lite": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", - "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", - "license": "MIT", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/ignore": { - "version": "7.0.5", - "license": "MIT", - "engines": { - "node": ">= 4" - } - }, - "node_modules/import-in-the-middle": { - "version": "1.14.2", - "resolved": "https://registry.npmjs.org/import-in-the-middle/-/import-in-the-middle-1.14.2.tgz", - "integrity": "sha512-5tCuY9BV8ujfOpwtAGgsTx9CGUapcFMEEyByLv1B+v2+6DhAcw+Zr0nhQT7uwaZ7DiourxFEscghOR8e1aPLQw==", - "license": "Apache-2.0", - "dependencies": { - "acorn": "^8.14.0", - "acorn-import-attributes": "^1.9.5", - "cjs-module-lexer": "^1.2.2", - "module-details-from-path": "^1.0.3" - } - }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "license": "ISC" - }, - "node_modules/ipaddr.js": { - "version": "1.9.1", - "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", - "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==", - "license": "MIT", - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/is-core-module": { - "version": "2.16.1", - "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", - "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", - "license": "MIT", - "dependencies": { - "hasown": "^2.0.2" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/is-docker": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-3.0.0.tgz", - "integrity": "sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==", - "license": "MIT", - "bin": { - "is-docker": "cli.js" - }, - "engines": { - "node": "^12.20.0 || ^14.13.1 || >=16.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/is-inside-container": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-inside-container/-/is-inside-container-1.0.0.tgz", - "integrity": "sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==", - "license": "MIT", - "dependencies": { - "is-docker": "^3.0.0" - }, - "bin": { - "is-inside-container": "cli.js" - }, - "engines": { - "node": ">=14.16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-promise": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-4.0.0.tgz", - "integrity": "sha512-hvpoI6korhJMnej285dSg6nu1+e6uxs7zG3BYAm5byqDsgJNWwxzM6z6iZiAgQR4TJ30JmBTOwqZUw3WlyH3AQ==", - "license": "MIT" - }, - "node_modules/is-stream": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", - "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", - "license": "MIT", - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-wsl": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-3.1.0.tgz", - "integrity": "sha512-UcVfVfaK4Sc4m7X3dUSoHoozQGBEFeDC+zVo06t98xe8CzHSZZBekNXH+tu0NalHolcJ/QAGqS46Hef7QXBIMw==", - "license": "MIT", - "dependencies": { - "is-inside-container": "^1.0.0" - }, - "engines": { - "node": ">=16" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "license": "ISC" - }, - "node_modules/jackspeak": { - "version": "3.4.3", - "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", - "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", - "license": "BlueOak-1.0.0", - "dependencies": { - "@isaacs/cliui": "^8.0.2" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - }, - "optionalDependencies": { - "@pkgjs/parseargs": "^0.11.0" - } - }, - "node_modules/js-tokens": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-9.0.1.tgz", - "integrity": "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/json-bigint": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/json-bigint/-/json-bigint-1.0.0.tgz", - "integrity": "sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ==", - "license": "MIT", - "dependencies": { - "bignumber.js": "^9.0.0" - } - }, - "node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "license": "MIT" - }, - "node_modules/jwa": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.1.tgz", - "integrity": "sha512-hRF04fqJIP8Abbkq5NKGN0Bbr3JxlQ+qhZufXVr0DvujKy93ZCbXZMHDL4EOtodSbCWxOqR8MS1tXA5hwqCXDg==", - "license": "MIT", - "dependencies": { - "buffer-equal-constant-time": "^1.0.1", - "ecdsa-sig-formatter": "1.0.11", - "safe-buffer": "^5.0.1" - } - }, - "node_modules/jws": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/jws/-/jws-4.0.0.tgz", - "integrity": "sha512-KDncfTmOZoOMTFG4mBlG0qUIOlc03fmzH+ru6RgYVZhPkyiy/92Owlt/8UEN+a4TXR1FQetfIpJE8ApdvdVxTg==", - "license": "MIT", - "dependencies": { - "jwa": "^2.0.0", - "safe-buffer": "^5.0.1" - } - }, - "node_modules/leac": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/leac/-/leac-0.6.0.tgz", - "integrity": "sha512-y+SqErxb8h7nE/fiEX07jsbuhrpO9lL8eca7/Y1nuWV2moNlXhyd59iDGcRf6moVyDMbmTNzL40SUyrFU/yDpg==", - "license": "MIT", - "funding": { - "url": "https://ko-fi.com/killymxi" - } - }, - "node_modules/lodash.camelcase": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz", - "integrity": "sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA==", - "license": "MIT" - }, - "node_modules/lodash.merge": { - "version": "4.6.2", - "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", - "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", - "license": "MIT" - }, - "node_modules/long": { - "version": "5.3.2", - "resolved": "https://registry.npmjs.org/long/-/long-5.3.2.tgz", - "integrity": "sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==", - "license": "Apache-2.0" - }, - "node_modules/loupe": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.1.3.tgz", - "integrity": "sha512-kkIp7XSkP78ZxJEsSxW3712C6teJVoeHHwgo9zJ380de7IYyJ2ISlxojcH2pC5OFLewESmnRi/+XCDIEEVyoug==", - "dev": true, - "license": "MIT" - }, - "node_modules/lru-cache": { - "version": "10.4.3", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", - "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", - "license": "ISC" - }, - "node_modules/magic-string": { - "version": "0.30.17", - "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.17.tgz", - "integrity": "sha512-sNPKHvyjVf7gyjwS4xGTaW/mCnF8wnjtifKBEhxfZ7E/S8tQ0rssrwGNn6q8JH/ohItJfSQp9mBtQYuTlH5QnA==", - "dev": true, - "license": "MIT", - "dependencies": { - "@jridgewell/sourcemap-codec": "^1.5.0" - } - }, - "node_modules/math-intrinsics": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", - "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/media-typer": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-1.1.0.tgz", - "integrity": "sha512-aisnrDP4GNe06UcKFnV5bfMNPBUw4jsLGaWwWfnH3v02GnBuXX2MCVn5RbrWo0j3pczUilYblq7fQ7Nw2t5XKw==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/merge-descriptors": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-2.0.0.tgz", - "integrity": "sha512-Snk314V5ayFLhp3fkUREub6WtjBfPdCPY1Ln8/8munuLuiYhsABgBVWsozAG+MWMbVEvcdcpbi9R7ww22l9Q3g==", - "license": "MIT", - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/mime-db": { - "version": "1.54.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.54.0.tgz", - "integrity": "sha512-aU5EJuIN2WDemCcAp2vFBfp/m4EAhWJnUNSSw0ixs7/kXbd6Pg64EmwJkNdFhB8aWt1sH2CTXrLxo/iAGV3oPQ==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-types": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-3.0.1.tgz", - "integrity": "sha512-xRc4oEhT6eaBpU1XF7AjpOFD+xQmXNB5OVKwp4tqCuBpHLS/ZbBDrc07mYTDqVMg6PfxUjjNp85O6Cd2Z/5HWA==", - "license": "MIT", - "dependencies": { - "mime-db": "^1.54.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/minimatch": { - "version": "9.0.5", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", - "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", - "license": "ISC", - "dependencies": { - "brace-expansion": "^2.0.1" - }, - "engines": { - "node": ">=16 || 14 >=14.17" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/minipass": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", - "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", - "license": "ISC", - "engines": { - "node": ">=16 || 14 >=14.17" - } - }, - "node_modules/module-details-from-path": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/module-details-from-path/-/module-details-from-path-1.0.4.tgz", - "integrity": "sha512-EGWKgxALGMgzvxYF1UyGTy0HXX/2vHLkw6+NvDKW2jypWbHpjQuj4UMcqQWXHERJhVGKikolT06G3bcKe4fi7w==", - "license": "MIT" - }, - "node_modules/ms": { - "version": "2.1.3", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", - "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", - "license": "MIT" - }, - "node_modules/nanoid": { - "version": "3.3.11", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", - "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "bin": { - "nanoid": "bin/nanoid.cjs" - }, - "engines": { - "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" - } - }, - "node_modules/negotiator": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-1.0.0.tgz", - "integrity": "sha512-8Ofs/AUQh8MaEcrlq5xOX0CQ9ypTF5dl78mjlMNfOK08fzpgTHQRQPBxcPlEtIw0yRpws+Zo/3r+5WRby7u3Gg==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/node-fetch": { - "version": "2.7.0", - "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz", - "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", - "license": "MIT", - "dependencies": { - "whatwg-url": "^5.0.0" - }, - "engines": { - "node": "4.x || >=6.0.0" - }, - "peerDependencies": { - "encoding": "^0.1.0" - }, - "peerDependenciesMeta": { - "encoding": { - "optional": true - } - } - }, - "node_modules/object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/object-inspect": { - "version": "1.13.4", - "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", - "integrity": "sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/on-finished": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz", - "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==", - "license": "MIT", - "dependencies": { - "ee-first": "1.1.1" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", - "license": "ISC", - "dependencies": { - "wrappy": "1" - } - }, - "node_modules/open": { - "version": "10.1.2", - "resolved": "https://registry.npmjs.org/open/-/open-10.1.2.tgz", - "integrity": "sha512-cxN6aIDPz6rm8hbebcP7vrQNhvRcveZoJU72Y7vskh4oIm+BZwBECnx5nTmrlres1Qapvx27Qo1Auukpf8PKXw==", - "license": "MIT", - "dependencies": { - "default-browser": "^5.2.1", - "define-lazy-prop": "^3.0.0", - "is-inside-container": "^1.0.0", - "is-wsl": "^3.1.0" - }, - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/package-json-from-dist": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", - "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", - "license": "BlueOak-1.0.0" - }, - "node_modules/parseley": { - "version": "0.12.1", - "resolved": "https://registry.npmjs.org/parseley/-/parseley-0.12.1.tgz", - "integrity": "sha512-e6qHKe3a9HWr0oMRVDTRhKce+bRO8VGQR3NyVwcjwrbhMmFCX9KszEV35+rn4AdilFAq9VPxP/Fe1wC9Qjd2lw==", - "license": "MIT", - "dependencies": { - "leac": "^0.6.0", - "peberminta": "^0.9.0" - }, - "funding": { - "url": "https://ko-fi.com/killymxi" - } - }, - "node_modules/parseurl": { - "version": "1.3.3", - "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", - "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/path-parse": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", - "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", - "license": "MIT" - }, - "node_modules/path-scurry": { - "version": "1.11.1", - "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", - "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", - "license": "BlueOak-1.0.0", - "dependencies": { - "lru-cache": "^10.2.0", - "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" - }, - "engines": { - "node": ">=16 || 14 >=14.18" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/path-to-regexp": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-8.2.0.tgz", - "integrity": "sha512-TdrF7fW9Rphjq4RjrW0Kp2AW0Ahwu9sRGTkS6bvDi0SCwZlEZYmcfDbEsTz8RVk0EHIS/Vd1bv3JhG+1xZuAyQ==", - "license": "MIT", - "engines": { - "node": ">=16" - } - }, - "node_modules/pathe": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", - "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", - "dev": true, - "license": "MIT" - }, - "node_modules/pathval": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.0.tgz", - "integrity": "sha512-vE7JKRyES09KiunauX7nd2Q9/L7lhok4smP9RZTDeD4MVs72Dp2qNFVz39Nz5a0FVEW0BJR6C0DYrq6unoziZA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 14.16" - } - }, - "node_modules/peberminta": { - "version": "0.9.0", - "resolved": "https://registry.npmjs.org/peberminta/-/peberminta-0.9.0.tgz", - "integrity": "sha512-XIxfHpEuSJbITd1H3EeQwpcZbTLHc+VVr8ANI9t5sit565tsI4/xK3KWTUFE2e6QiangUkh3B0jihzmGnNrRsQ==", - "license": "MIT", - "funding": { - "url": "https://ko-fi.com/killymxi" - } - }, - "node_modules/picocolors": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", - "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", - "dev": true, - "license": "ISC" - }, - "node_modules/picomatch": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.2.tgz", - "integrity": "sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, - "node_modules/pkce-challenge": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/pkce-challenge/-/pkce-challenge-5.0.0.tgz", - "integrity": "sha512-ueGLflrrnvwB3xuo/uGob5pd5FN7l0MsLf0Z87o/UQmRtwjvfylfc9MurIxRAWywCYTgrvpXBcqjV4OfCYGCIQ==", - "license": "MIT", - "engines": { - "node": ">=16.20.0" - } - }, - "node_modules/postcss": { - "version": "8.5.5", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.5.tgz", - "integrity": "sha512-d/jtm+rdNT8tpXuHY5MMtcbJFBkhXE6593XVR9UoGCH8jSFGci7jGvMGH5RYd5PBJW+00NZQt6gf7CbagJCrhg==", - "dev": true, - "funding": [ - { - "type": "opencollective", - "url": "https://opencollective.com/postcss/" - }, - { - "type": "tidelift", - "url": "https://tidelift.com/funding/github/npm/postcss" - }, - { - "type": "github", - "url": "https://github.com/sponsors/ai" - } - ], - "license": "MIT", - "dependencies": { - "nanoid": "^3.3.11", - "picocolors": "^1.1.1", - "source-map-js": "^1.2.1" - }, - "engines": { - "node": "^10 || ^12 || >=14" - } - }, - "node_modules/protobufjs": { - "version": "7.5.3", - "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.5.3.tgz", - "integrity": "sha512-sildjKwVqOI2kmFDiXQ6aEB0fjYTafpEvIBs8tOR8qI4spuL9OPROLVu2qZqi/xgCfsHIwVqlaF8JBjWFHnKbw==", - "hasInstallScript": true, - "license": "BSD-3-Clause", - "dependencies": { - "@protobufjs/aspromise": "^1.1.2", - "@protobufjs/base64": "^1.1.2", - "@protobufjs/codegen": "^2.0.4", - "@protobufjs/eventemitter": "^1.1.0", - "@protobufjs/fetch": "^1.1.0", - "@protobufjs/float": "^1.0.2", - "@protobufjs/inquire": "^1.1.0", - "@protobufjs/path": "^1.1.2", - "@protobufjs/pool": "^1.1.0", - "@protobufjs/utf8": "^1.1.0", - "@types/node": ">=13.7.0", - "long": "^5.0.0" - }, - "engines": { - "node": ">=12.0.0" - } - }, - "node_modules/proxy-addr": { - "version": "2.0.7", - "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", - "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==", - "license": "MIT", - "dependencies": { - "forwarded": "0.2.0", - "ipaddr.js": "1.9.1" - }, - "engines": { - "node": ">= 0.10" - } - }, - "node_modules/punycode": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", - "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/qs": { - "version": "6.14.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.0.tgz", - "integrity": "sha512-YWWTjgABSKcvs/nWBi9PycY/JiPJqOD4JA6o9Sej2AtvSGarXxKC3OQSk4pAarbdQlKAh5D4FCQkJNkW+GAn3w==", - "license": "BSD-3-Clause", - "dependencies": { - "side-channel": "^1.1.0" - }, - "engines": { - "node": ">=0.6" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/range-parser": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz", - "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==", - "license": "MIT", - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/raw-body": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-3.0.0.tgz", - "integrity": "sha512-RmkhL8CAyCRPXCE28MMH0z2PNWQBNk2Q09ZdxM9IOOXwxwZbN+qbWaatPkdkWIKL2ZVDImrN/pK5HTRz2PcS4g==", - "license": "MIT", - "dependencies": { - "bytes": "3.1.2", - "http-errors": "2.0.0", - "iconv-lite": "0.6.3", - "unpipe": "1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/require-directory": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", - "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/require-in-the-middle": { - "version": "7.5.2", - "resolved": "https://registry.npmjs.org/require-in-the-middle/-/require-in-the-middle-7.5.2.tgz", - "integrity": "sha512-gAZ+kLqBdHarXB64XpAe2VCjB7rIRv+mU8tfRWziHRJ5umKsIHN2tLLv6EtMw7WCdP19S0ERVMldNvxYCHnhSQ==", - "license": "MIT", - "dependencies": { - "debug": "^4.3.5", - "module-details-from-path": "^1.0.3", - "resolve": "^1.22.8" - }, - "engines": { - "node": ">=8.6.0" - } - }, - "node_modules/resolve": { - "version": "1.22.10", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz", - "integrity": "sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==", - "license": "MIT", - "dependencies": { - "is-core-module": "^2.16.0", - "path-parse": "^1.0.7", - "supports-preserve-symlinks-flag": "^1.0.0" - }, - "bin": { - "resolve": "bin/resolve" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/rollup": { - "version": "4.43.0", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.43.0.tgz", - "integrity": "sha512-wdN2Kd3Twh8MAEOEJZsuxuLKCsBEo4PVNLK6tQWAn10VhsVewQLzcucMgLolRlhFybGxfclbPeEYBaP6RvUFGg==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/estree": "1.0.7" - }, - "bin": { - "rollup": "dist/bin/rollup" - }, - "engines": { - "node": ">=18.0.0", - "npm": ">=8.0.0" - }, - "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.43.0", - "@rollup/rollup-android-arm64": "4.43.0", - "@rollup/rollup-darwin-arm64": "4.43.0", - "@rollup/rollup-darwin-x64": "4.43.0", - "@rollup/rollup-freebsd-arm64": "4.43.0", - "@rollup/rollup-freebsd-x64": "4.43.0", - "@rollup/rollup-linux-arm-gnueabihf": "4.43.0", - "@rollup/rollup-linux-arm-musleabihf": "4.43.0", - "@rollup/rollup-linux-arm64-gnu": "4.43.0", - "@rollup/rollup-linux-arm64-musl": "4.43.0", - "@rollup/rollup-linux-loongarch64-gnu": "4.43.0", - "@rollup/rollup-linux-powerpc64le-gnu": "4.43.0", - "@rollup/rollup-linux-riscv64-gnu": "4.43.0", - "@rollup/rollup-linux-riscv64-musl": "4.43.0", - "@rollup/rollup-linux-s390x-gnu": "4.43.0", - "@rollup/rollup-linux-x64-gnu": "4.43.0", - "@rollup/rollup-linux-x64-musl": "4.43.0", - "@rollup/rollup-win32-arm64-msvc": "4.43.0", - "@rollup/rollup-win32-ia32-msvc": "4.43.0", - "@rollup/rollup-win32-x64-msvc": "4.43.0", - "fsevents": "~2.3.2" - } - }, - "node_modules/rollup/node_modules/@types/estree": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.7.tgz", - "integrity": "sha512-w28IoSUCJpidD/TGviZwwMJckNESJZXFu7NBZ5YJ4mEUnNraUn9Pm8HSZm/jDF1pDWYKspWE7oVphigUPRakIQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/router": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/router/-/router-2.2.0.tgz", - "integrity": "sha512-nLTrUKm2UyiL7rlhapu/Zl45FwNgkZGaCpZbIHajDYgwlJCOzLSk+cIPAnsEqV955GjILJnKbdQC1nVPz+gAYQ==", - "license": "MIT", - "dependencies": { - "debug": "^4.4.0", - "depd": "^2.0.0", - "is-promise": "^4.0.0", - "parseurl": "^1.3.3", - "path-to-regexp": "^8.0.0" - }, - "engines": { - "node": ">= 18" - } - }, - "node_modules/run-applescript": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/run-applescript/-/run-applescript-7.0.0.tgz", - "integrity": "sha512-9by4Ij99JUr/MCFBUkDKLWK3G9HVXmabKz9U5MlIAIuvuzkiOicRYs8XJLxX+xahD+mLiiCYDqF9dKAgtzKP1A==", - "license": "MIT", - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, - "node_modules/safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", - "license": "MIT" - }, - "node_modules/selderee": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/selderee/-/selderee-0.11.0.tgz", - "integrity": "sha512-5TF+l7p4+OsnP8BCCvSyZiSPc4x4//p5uPwK8TCnVPJYRmU2aYKMpOXvw8zM5a5JvuuCGN1jmsMwuU2W02ukfA==", - "license": "MIT", - "dependencies": { - "parseley": "^0.12.0" - }, - "funding": { - "url": "https://ko-fi.com/killymxi" - } - }, - "node_modules/semver": { - "version": "7.7.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz", - "integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==", - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/send": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/send/-/send-1.2.0.tgz", - "integrity": "sha512-uaW0WwXKpL9blXE2o0bRhoL2EGXIrZxQ2ZQ4mgcfoBxdFmQold+qWsD2jLrfZ0trjKL6vOw0j//eAwcALFjKSw==", - "license": "MIT", - "dependencies": { - "debug": "^4.3.5", - "encodeurl": "^2.0.0", - "escape-html": "^1.0.3", - "etag": "^1.8.1", - "fresh": "^2.0.0", - "http-errors": "^2.0.0", - "mime-types": "^3.0.1", - "ms": "^2.1.3", - "on-finished": "^2.4.1", - "range-parser": "^1.2.1", - "statuses": "^2.0.1" - }, - "engines": { - "node": ">= 18" - } - }, - "node_modules/serve-static": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-2.2.0.tgz", - "integrity": "sha512-61g9pCh0Vnh7IutZjtLGGpTA355+OPn2TyDv/6ivP2h/AdAVX9azsoxmg2/M6nZeQZNYBEwIcsne1mJd9oQItQ==", - "license": "MIT", - "dependencies": { - "encodeurl": "^2.0.0", - "escape-html": "^1.0.3", - "parseurl": "^1.3.3", - "send": "^1.2.0" - }, - "engines": { - "node": ">= 18" - } - }, - "node_modules/setprototypeof": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz", - "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==", - "license": "ISC" - }, - "node_modules/shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "license": "MIT", - "dependencies": { - "shebang-regex": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/shell-quote": { - "version": "1.8.3", - "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.3.tgz", - "integrity": "sha512-ObmnIF4hXNg1BqhnHmgbDETF8dLPCggZWBjkQfhZpbszZnYur5DUljTcCHii5LC3J5E0yeO/1LIMyH+UvHQgyw==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/shimmer": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/shimmer/-/shimmer-1.2.1.tgz", - "integrity": "sha512-sQTKC1Re/rM6XyFM6fIAGHRPVGvyXfgzIDvzoq608vM+jeyVD0Tu1E6Np0Kc2zAIFWIj963V2800iF/9LPieQw==", - "license": "BSD-2-Clause" - }, - "node_modules/side-channel": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.1.0.tgz", - "integrity": "sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==", - "license": "MIT", - "dependencies": { - "es-errors": "^1.3.0", - "object-inspect": "^1.13.3", - "side-channel-list": "^1.0.0", - "side-channel-map": "^1.0.1", - "side-channel-weakmap": "^1.0.2" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/side-channel-list": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/side-channel-list/-/side-channel-list-1.0.0.tgz", - "integrity": "sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==", - "license": "MIT", - "dependencies": { - "es-errors": "^1.3.0", - "object-inspect": "^1.13.3" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/side-channel-map": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/side-channel-map/-/side-channel-map-1.0.1.tgz", - "integrity": "sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==", - "license": "MIT", - "dependencies": { - "call-bound": "^1.0.2", - "es-errors": "^1.3.0", - "get-intrinsic": "^1.2.5", - "object-inspect": "^1.13.3" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/side-channel-weakmap": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/side-channel-weakmap/-/side-channel-weakmap-1.0.2.tgz", - "integrity": "sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==", - "license": "MIT", - "dependencies": { - "call-bound": "^1.0.2", - "es-errors": "^1.3.0", - "get-intrinsic": "^1.2.5", - "object-inspect": "^1.13.3", - "side-channel-map": "^1.0.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/siginfo": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", - "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", - "dev": true, - "license": "ISC" - }, - "node_modules/signal-exit": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", - "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", - "license": "ISC", - "engines": { - "node": ">=14" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/simple-git": { - "version": "3.28.0", - "resolved": "https://registry.npmjs.org/simple-git/-/simple-git-3.28.0.tgz", - "integrity": "sha512-Rs/vQRwsn1ILH1oBUy8NucJlXmnnLeLCfcvbSehkPzbv3wwoFWIdtfd6Ndo6ZPhlPsCZ60CPI4rxurnwAa+a2w==", - "license": "MIT", - "dependencies": { - "@kwsites/file-exists": "^1.1.1", - "@kwsites/promise-deferred": "^1.1.1", - "debug": "^4.4.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/steveukx/git-js?sponsor=1" - } - }, - "node_modules/source-map-js": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", - "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", - "dev": true, - "license": "BSD-3-Clause", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/stackback": { - "version": "0.0.2", - "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", - "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", - "dev": true, - "license": "MIT" - }, - "node_modules/statuses": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz", - "integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/std-env": { - "version": "3.9.0", - "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.9.0.tgz", - "integrity": "sha512-UGvjygr6F6tpH7o2qyqR6QYpwraIjKSdtzyBdyytFOHmPZY917kwdwLG0RbOjWOnKmnm3PeHjaoLLMie7kPLQw==", - "dev": true, - "license": "MIT" - }, - "node_modules/string-width": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", - "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", - "license": "MIT", - "dependencies": { - "eastasianwidth": "^0.2.0", - "emoji-regex": "^9.2.2", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/string-width-cjs": { - "name": "string-width", - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/string-width-cjs/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/string-width-cjs/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, - "node_modules/string-width-cjs/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-ansi": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", - "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^6.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/strip-ansi?sponsor=1" - } - }, - "node_modules/strip-ansi-cjs": { - "name": "strip-ansi", - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-ansi-cjs/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-literal": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-3.0.0.tgz", - "integrity": "sha512-TcccoMhJOM3OebGhSBEmp3UZ2SfDMZUEBdRA/9ynfLi8yYajyWX3JiXArcJt4Umh4vISpspkQIY8ZZoCqjbviA==", - "dev": true, - "license": "MIT", - "dependencies": { - "js-tokens": "^9.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/antfu" - } - }, - "node_modules/supports-preserve-symlinks-flag": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", - "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/tinybench": { - "version": "2.9.0", - "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", - "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", - "dev": true, - "license": "MIT" - }, - "node_modules/tinyexec": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.2.tgz", - "integrity": "sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==", - "dev": true, - "license": "MIT" - }, - "node_modules/tinyglobby": { - "version": "0.2.14", - "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.14.tgz", - "integrity": "sha512-tX5e7OM1HnYr2+a2C/4V0htOcSQcoSTH9KgJnVvNm5zm/cyEWKJ7j7YutsH9CxMdtOkkLFy2AHrMci9IM8IPZQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "fdir": "^6.4.4", - "picomatch": "^4.0.2" - }, - "engines": { - "node": ">=12.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/SuperchupuDev" - } - }, - "node_modules/tinypool": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.1.0.tgz", - "integrity": "sha512-7CotroY9a8DKsKprEy/a14aCCm8jYVmR7aFy4fpkZM8sdpNJbKkixuNjgM50yCmip2ezc8z4N7k3oe2+rfRJCQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": "^18.0.0 || >=20.0.0" - } - }, - "node_modules/tinyrainbow": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-2.0.0.tgz", - "integrity": "sha512-op4nsTR47R6p0vMUUoYl/a+ljLFVtlfaXkLQmqfLR1qHma1h/ysYk4hEXZ880bf2CYgTskvTa/e196Vd5dDQXw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=14.0.0" - } - }, - "node_modules/tinyspy": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-4.0.3.tgz", - "integrity": "sha512-t2T/WLB2WRgZ9EpE4jgPJ9w+i66UZfDc8wHh0xrwiRNN+UwH98GIJkTeZqX9rg0i0ptwzqW+uYeIF0T4F8LR7A==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=14.0.0" - } - }, - "node_modules/toidentifier": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", - "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==", - "license": "MIT", - "engines": { - "node": ">=0.6" - } - }, - "node_modules/tr46": { - "version": "0.0.3", - "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz", - "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", - "license": "MIT" - }, - "node_modules/type-is": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/type-is/-/type-is-2.0.1.tgz", - "integrity": "sha512-OZs6gsjF4vMp32qrCbiVSkrFmXtG/AZhY3t0iAMrMBiAZyV9oALtXO8hsrHbMXF9x6L3grlFuwW2oAz7cav+Gw==", - "license": "MIT", - "dependencies": { - "content-type": "^1.0.5", - "media-typer": "^1.1.0", - "mime-types": "^3.0.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/typescript": { - "version": "5.8.3", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.8.3.tgz", - "integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==", - "dev": true, - "license": "Apache-2.0", - "bin": { - "tsc": "bin/tsc", - "tsserver": "bin/tsserver" - }, - "engines": { - "node": ">=14.17" - } - }, - "node_modules/undici": { - "version": "7.10.0", - "resolved": "https://registry.npmjs.org/undici/-/undici-7.10.0.tgz", - "integrity": "sha512-u5otvFBOBZvmdjWLVW+5DAc9Nkq8f24g0O9oY7qw2JVIF1VocIFoyz9JFkuVOS2j41AufeO0xnlweJ2RLT8nGw==", - "license": "MIT", - "engines": { - "node": ">=20.18.1" - } - }, - "node_modules/undici-types": { - "version": "7.8.0", - "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.8.0.tgz", - "integrity": "sha512-9UJ2xGDvQ43tYyVMpuHlsgApydB8ZKfVYTsLDhXkFL/6gfkp+U8xTGdh8pMJv1SpZna0zxG1DwsKZsreLbXBxw==", - "license": "MIT" - }, - "node_modules/unpipe": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz", - "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/uri-js": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", - "license": "BSD-2-Clause", - "dependencies": { - "punycode": "^2.1.0" - } - }, - "node_modules/uuid": { - "version": "9.0.1", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-9.0.1.tgz", - "integrity": "sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==", - "funding": [ - "https://github.com/sponsors/broofa", - "https://github.com/sponsors/ctavan" - ], - "license": "MIT", - "bin": { - "uuid": "dist/bin/uuid" - } - }, - "node_modules/vary": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz", - "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/vite": { - "version": "6.3.5", - "resolved": "https://registry.npmjs.org/vite/-/vite-6.3.5.tgz", - "integrity": "sha512-cZn6NDFE7wdTpINgs++ZJ4N49W2vRp8LCKrn3Ob1kYNtOo21vfDoaV5GzBfLU4MovSAB8uNRm4jgzVQZ+mBzPQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "esbuild": "^0.25.0", - "fdir": "^6.4.4", - "picomatch": "^4.0.2", - "postcss": "^8.5.3", - "rollup": "^4.34.9", - "tinyglobby": "^0.2.13" - }, - "bin": { - "vite": "bin/vite.js" - }, - "engines": { - "node": "^18.0.0 || ^20.0.0 || >=22.0.0" - }, - "funding": { - "url": "https://github.com/vitejs/vite?sponsor=1" - }, - "optionalDependencies": { - "fsevents": "~2.3.3" - }, - "peerDependencies": { - "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", - "jiti": ">=1.21.0", - "less": "*", - "lightningcss": "^1.21.0", - "sass": "*", - "sass-embedded": "*", - "stylus": "*", - "sugarss": "*", - "terser": "^5.16.0", - "tsx": "^4.8.1", - "yaml": "^2.4.2" - }, - "peerDependenciesMeta": { - "@types/node": { - "optional": true - }, - "jiti": { - "optional": true - }, - "less": { - "optional": true - }, - "lightningcss": { - "optional": true - }, - "sass": { - "optional": true - }, - "sass-embedded": { - "optional": true - }, - "stylus": { - "optional": true - }, - "sugarss": { - "optional": true - }, - "terser": { - "optional": true - }, - "tsx": { - "optional": true - }, - "yaml": { - "optional": true - } - } - }, - "node_modules/vite-node": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-3.2.3.tgz", - "integrity": "sha512-gc8aAifGuDIpZHrPjuHyP4dpQmYXqWw7D1GmDnWeNWP654UEXzVfQ5IHPSK5HaHkwB/+p1atpYpSdw/2kOv8iQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "cac": "^6.7.14", - "debug": "^4.4.1", - "es-module-lexer": "^1.7.0", - "pathe": "^2.0.3", - "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0" - }, - "bin": { - "vite-node": "vite-node.mjs" - }, - "engines": { - "node": "^18.0.0 || ^20.0.0 || >=22.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - } - }, - "node_modules/vitest": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/vitest/-/vitest-3.2.3.tgz", - "integrity": "sha512-E6U2ZFXe3N/t4f5BwUaVCKRLHqUpk1CBWeMh78UT4VaTPH/2dyvH6ALl29JTovEPu9dVKr/K/J4PkXgrMbw4Ww==", - "dev": true, - "license": "MIT", - "dependencies": { - "@types/chai": "^5.2.2", - "@vitest/expect": "3.2.3", - "@vitest/mocker": "3.2.3", - "@vitest/pretty-format": "^3.2.3", - "@vitest/runner": "3.2.3", - "@vitest/snapshot": "3.2.3", - "@vitest/spy": "3.2.3", - "@vitest/utils": "3.2.3", - "chai": "^5.2.0", - "debug": "^4.4.1", - "expect-type": "^1.2.1", - "magic-string": "^0.30.17", - "pathe": "^2.0.3", - "picomatch": "^4.0.2", - "std-env": "^3.9.0", - "tinybench": "^2.9.0", - "tinyexec": "^0.3.2", - "tinyglobby": "^0.2.14", - "tinypool": "^1.1.0", - "tinyrainbow": "^2.0.0", - "vite": "^5.0.0 || ^6.0.0 || ^7.0.0-0", - "vite-node": "3.2.3", - "why-is-node-running": "^2.3.0" - }, - "bin": { - "vitest": "vitest.mjs" - }, - "engines": { - "node": "^18.0.0 || ^20.0.0 || >=22.0.0" - }, - "funding": { - "url": "https://opencollective.com/vitest" - }, - "peerDependencies": { - "@edge-runtime/vm": "*", - "@types/debug": "^4.1.12", - "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", - "@vitest/browser": "3.2.3", - "@vitest/ui": "3.2.3", - "happy-dom": "*", - "jsdom": "*" - }, - "peerDependenciesMeta": { - "@edge-runtime/vm": { - "optional": true - }, - "@types/debug": { - "optional": true - }, - "@types/node": { - "optional": true - }, - "@vitest/browser": { - "optional": true - }, - "@vitest/ui": { - "optional": true - }, - "happy-dom": { - "optional": true - }, - "jsdom": { - "optional": true - } - } - }, - "node_modules/webidl-conversions": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz", - "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", - "license": "BSD-2-Clause" - }, - "node_modules/whatwg-url": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz", - "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", - "license": "MIT", - "dependencies": { - "tr46": "~0.0.3", - "webidl-conversions": "^3.0.0" - } - }, - "node_modules/which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "license": "ISC", - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "node-which": "bin/node-which" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/why-is-node-running": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", - "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", - "dev": true, - "license": "MIT", - "dependencies": { - "siginfo": "^2.0.0", - "stackback": "0.0.2" - }, - "bin": { - "why-is-node-running": "cli.js" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/wrap-ansi": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", - "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", - "license": "MIT", - "dependencies": { - "ansi-styles": "^6.1.0", - "string-width": "^5.0.1", - "strip-ansi": "^7.0.1" - }, - "engines": { - "node": ">=12" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/wrap-ansi-cjs": { - "name": "wrap-ansi", - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", - "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.0.0", - "string-width": "^4.1.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/wrap-ansi?sponsor=1" - } - }, - "node_modules/wrap-ansi-cjs/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/wrap-ansi-cjs/node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "license": "MIT", - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, - "node_modules/wrap-ansi-cjs/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/wrap-ansi-cjs/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", - "license": "ISC" - }, - "node_modules/ws": { - "version": "8.18.2", - "license": "MIT", - "engines": { - "node": ">=10.0.0" - }, - "peerDependencies": { - "bufferutil": "^4.0.1", - "utf-8-validate": ">=5.0.2" - }, - "peerDependenciesMeta": { - "bufferutil": { - "optional": true - }, - "utf-8-validate": { - "optional": true - } - } - }, - "node_modules/y18n": { - "version": "5.0.8", - "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", - "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", - "license": "ISC", - "engines": { - "node": ">=10" - } - }, - "node_modules/yargs": { - "version": "17.7.2", - "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", - "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", - "license": "MIT", - "dependencies": { - "cliui": "^8.0.1", - "escalade": "^3.1.1", - "get-caller-file": "^2.0.5", - "require-directory": "^2.1.1", - "string-width": "^4.2.3", - "y18n": "^5.0.5", - "yargs-parser": "^21.1.1" - }, - "engines": { - "node": ">=12" - } - }, - "node_modules/yargs-parser": { - "version": "21.1.1", - "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", - "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", - "license": "ISC", - "engines": { - "node": ">=12" - } - }, - "node_modules/yargs/node_modules/ansi-regex": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", - "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", - "license": "MIT", - "engines": { - "node": ">=8" - } - }, - "node_modules/yargs/node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "license": "MIT" - }, - "node_modules/yargs/node_modules/string-width": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", - "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", - "license": "MIT", - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/yargs/node_modules/strip-ansi": { - "version": "6.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", - "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", - "license": "MIT", - "dependencies": { - "ansi-regex": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/zod": { - "version": "3.25.64", - "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.64.tgz", - "integrity": "sha512-hbP9FpSZf7pkS7hRVUrOjhwKJNyampPgtXKc3AN6DsWtoHsg2Sb4SQaS4Tcay380zSwd2VPo9G9180emBACp5g==", - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/colinhacks" - } - }, - "node_modules/zod-to-json-schema": { - "version": "3.24.5", - "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.24.5.tgz", - "integrity": "sha512-/AuWwMP+YqiPbsJx5D6TfgRTc4kTLjsh5SOcd4bLsfUg2RcEXrFMJl1DGgdHy2aCfsIA/cr/1JM0xcB2GZji8g==", - "license": "ISC", - "peerDependencies": { - "zod": "^3.24.1" - } - } - } -} diff --git a/packages/core/package.json b/packages/core/package.json index 3cb2f112b..abda46546 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -20,7 +20,7 @@ "dist" ], "dependencies": { - "@google/genai": "1.8.0", + "@google/genai": "1.9.0", "@modelcontextprotocol/sdk": "^1.11.0", "@opentelemetry/api": "^1.9.0", "@opentelemetry/exporter-logs-otlp-grpc": "^0.52.0", @@ -28,15 +28,15 @@ "@opentelemetry/exporter-trace-otlp-grpc": "^0.52.0", "@opentelemetry/instrumentation-http": "^0.52.0", "@opentelemetry/sdk-node": "^0.52.0", - "@types/glob": "^8.1.0", "@types/html-to-text": "^9.0.4", "ajv": "^8.17.1", + "chardet": "^2.1.0", "diff": "^7.0.0", "dotenv": "^17.1.0", - "gaxios": "^7.1.1", "glob": "^10.4.5", "google-auth-library": "^9.11.0", "html-to-text": "^9.0.5", + "https-proxy-agent": "^7.0.6", "ignore": "^7.0.0", "micromatch": "^4.0.8", "open": "^10.1.2", @@ -44,7 +44,6 @@ "shell-quote": "^1.8.3", "simple-git": "^3.28.0", "strip-ansi": "^7.1.0", - "tiktoken": "^1.0.21", "undici": "^7.10.0", "ws": "^8.18.0" }, diff --git a/packages/core/src/code_assist/codeAssist.ts b/packages/core/src/code_assist/codeAssist.ts index 23dfe4034..f9c6a7a34 100644 --- a/packages/core/src/code_assist/codeAssist.ts +++ b/packages/core/src/code_assist/codeAssist.ts @@ -21,8 +21,14 @@ export async function createCodeAssistContentGenerator( authType === AuthType.CLOUD_SHELL ) { const authClient = await getOauthClient(authType, config); - const projectId = await setupUser(authClient); - return new CodeAssistServer(authClient, projectId, httpOptions, sessionId); + const userData = await setupUser(authClient); + return new CodeAssistServer( + authClient, + userData.projectId, + httpOptions, + sessionId, + userData.userTier, + ); } throw new Error(`Unsupported authType: ${authType}`); diff --git a/packages/core/src/code_assist/oauth2.test.ts b/packages/core/src/code_assist/oauth2.test.ts index cea04c9c0..a67ec0496 100644 --- a/packages/core/src/code_assist/oauth2.test.ts +++ b/packages/core/src/code_assist/oauth2.test.ts @@ -6,7 +6,11 @@ import { describe, it, expect, vi, beforeEach, afterEach, Mock } from 'vitest'; import { getOauthClient } from './oauth2.js'; -import { getCachedGoogleAccount } from '../utils/user_account.js'; +import { + getCachedGoogleAccount, + cacheGoogleAccount, + clearCachedGoogleAccount, +} from '../utils/user_account.js'; import { OAuth2Client, Compute } from 'google-auth-library'; import * as fs from 'fs'; import * as path from 'path'; @@ -31,9 +35,20 @@ vi.mock('http'); vi.mock('open'); vi.mock('crypto'); vi.mock('node:readline'); +vi.mock('../utils/browser.js', () => ({ + shouldAttemptBrowserLaunch: () => true, +})); + +vi.mock('../utils/user_account.js', () => ({ + getCachedGoogleAccount: vi.fn(), + cacheGoogleAccount: vi.fn(), + clearCachedGoogleAccount: vi.fn(), +})); const mockConfig = { getNoBrowser: () => false, + getProxy: () => 'http://test.proxy.com:8080', + isBrowserLaunchSuppressed: () => false, } as unknown as Config; // Mock fetch globally @@ -47,11 +62,18 @@ describe('oauth2', () => { path.join(os.tmpdir(), 'gemini-cli-test-home-'), ); (os.homedir as Mock).mockReturnValue(tempHomeDir); + + // Reset user account mocks + (getCachedGoogleAccount as Mock).mockReset(); + (cacheGoogleAccount as Mock).mockReset(); + (clearCachedGoogleAccount as Mock).mockReset(); }); afterEach(() => { fs.rmSync(tempHomeDir, { recursive: true, force: true }); vi.clearAllMocks(); delete process.env.CLOUD_SHELL; + delete process.env.GOOGLE_GENAI_USE_GCA; + delete process.env.GOOGLE_CLOUD_ACCESS_TOKEN; }); it('should perform a web login', async () => { @@ -82,7 +104,7 @@ describe('oauth2', () => { ); vi.spyOn(crypto, 'randomBytes').mockReturnValue(mockState as never); - (open as Mock).mockImplementation(async () => ({}) as never); + (open as Mock).mockImplementation(async () => ({ on: vi.fn() }) as never); // Mock the UserInfo API response (global.fetch as Mock).mockResolvedValue({ @@ -92,6 +114,28 @@ describe('oauth2', () => { .mockResolvedValue({ email: 'test-google-account@gmail.com' }), } as unknown as Response); + // Mock getCachedGoogleAccount to return null initially + (getCachedGoogleAccount as Mock).mockReturnValue(null); + + // Mock cacheGoogleAccount to simulate storing the email + (cacheGoogleAccount as Mock).mockImplementation(async (email: string) => { + // Create the google_accounts.json file in the test directory + const googleAccountPath = path.join( + tempHomeDir, + '.gemini', + 'google_accounts.json', + ); + await fs.promises.mkdir(path.dirname(googleAccountPath), { + recursive: true, + }); + await fs.promises.writeFile( + googleAccountPath, + JSON.stringify({ active: email, old: [] }, null, 2), + ); + // Update the mock to return the email + (getCachedGoogleAccount as Mock).mockReturnValue(email); + }); + let requestCallback!: http.RequestListener< typeof http.IncomingMessage, typeof http.ServerResponse @@ -104,7 +148,7 @@ describe('oauth2', () => { let capturedPort = 0; const mockHttpServer = { - listen: vi.fn((port: number, callback?: () => void) => { + listen: vi.fn((port: number, _host: string, callback?: () => void) => { capturedPort = port; if (callback) { callback(); @@ -158,7 +202,7 @@ describe('oauth2', () => { // Verify Google Account was cached const googleAccountPath = path.join( tempHomeDir, - '.qwen', + '.gemini', 'google_accounts.json', ); expect(fs.existsSync(googleAccountPath)).toBe(true); @@ -175,6 +219,8 @@ describe('oauth2', () => { it('should perform login with user code', async () => { const mockConfigWithNoBrowser = { getNoBrowser: () => true, + getProxy: () => 'http://test.proxy.com:8080', + isBrowserLaunchSuppressed: () => true, } as unknown as Config; const mockCodeVerifier = { @@ -234,7 +280,7 @@ describe('oauth2', () => { expect(mockGetToken).toHaveBeenCalledWith({ code: mockCode, codeVerifier: mockCodeVerifier.codeVerifier, - redirect_uri: 'https://sdk.cloud.google.com/authcode_cloudcode.html', + redirect_uri: 'https://codeassist.google.com/authcode', }); expect(mockSetCredentials).toHaveBeenCalledWith(mockTokens); @@ -246,13 +292,6 @@ describe('oauth2', () => { let mockComputeClient: Compute; beforeEach(() => { - vi.spyOn(os, 'homedir').mockReturnValue('/user/home'); - vi.spyOn(fs.promises, 'mkdir').mockResolvedValue(undefined); - vi.spyOn(fs.promises, 'writeFile').mockResolvedValue(undefined); - vi.spyOn(fs.promises, 'readFile').mockRejectedValue( - new Error('File not found'), - ); // Default to no cached creds - mockGetAccessToken.mockResolvedValue({ token: 'test-access-token' }); mockComputeClient = { credentials: { refresh_token: 'test-refresh-token' }, @@ -264,9 +303,9 @@ describe('oauth2', () => { it('should attempt to load cached credentials first', async () => { const cachedCreds = { refresh_token: 'cached-token' }; - vi.spyOn(fs.promises, 'readFile').mockResolvedValue( - JSON.stringify(cachedCreds), - ); + const credsPath = path.join(tempHomeDir, '.gemini', 'oauth_creds.json'); + await fs.promises.mkdir(path.dirname(credsPath), { recursive: true }); + await fs.promises.writeFile(credsPath, JSON.stringify(cachedCreds)); const mockClient = { setCredentials: vi.fn(), @@ -282,10 +321,6 @@ describe('oauth2', () => { await getOauthClient(AuthType.LOGIN_WITH_GOOGLE, mockConfig); - expect(fs.promises.readFile).toHaveBeenCalledWith( - '/user/home/.qwen/oauth_creds.json', - 'utf-8', - ); expect(mockClient.setCredentials).toHaveBeenCalledWith(cachedCreds); expect(mockClient.getAccessToken).toHaveBeenCalled(); expect(mockClient.getTokenInfo).toHaveBeenCalled(); @@ -306,7 +341,8 @@ describe('oauth2', () => { await getOauthClient(AuthType.CLOUD_SHELL, mockConfig); - expect(fs.promises.writeFile).not.toHaveBeenCalled(); + const credsPath = path.join(tempHomeDir, '.gemini', 'oauth_creds.json'); + expect(fs.existsSync(credsPath)).toBe(false); }); it('should return the Compute client on successful ADC authentication', async () => { @@ -325,4 +361,144 @@ describe('oauth2', () => { ); }); }); + + describe('with GCP environment variables', () => { + it('should use GOOGLE_CLOUD_ACCESS_TOKEN when GOOGLE_GENAI_USE_GCA is true', async () => { + process.env.GOOGLE_GENAI_USE_GCA = 'true'; + process.env.GOOGLE_CLOUD_ACCESS_TOKEN = 'gcp-access-token'; + + const mockSetCredentials = vi.fn(); + const mockGetAccessToken = vi + .fn() + .mockResolvedValue({ token: 'gcp-access-token' }); + const mockOAuth2Client = { + setCredentials: mockSetCredentials, + getAccessToken: mockGetAccessToken, + on: vi.fn(), + } as unknown as OAuth2Client; + (OAuth2Client as unknown as Mock).mockImplementation( + () => mockOAuth2Client, + ); + + // Mock the UserInfo API response for fetchAndCacheUserInfo + (global.fetch as Mock).mockResolvedValue({ + ok: true, + json: vi + .fn() + .mockResolvedValue({ email: 'test-gcp-account@gmail.com' }), + } as unknown as Response); + + // Mock cacheGoogleAccount to simulate storing the email + (cacheGoogleAccount as Mock).mockImplementation(async (email: string) => { + // Create the google_accounts.json file in the test directory + const googleAccountPath = path.join( + tempHomeDir, + '.gemini', + 'google_accounts.json', + ); + await fs.promises.mkdir(path.dirname(googleAccountPath), { + recursive: true, + }); + await fs.promises.writeFile( + googleAccountPath, + JSON.stringify({ active: email, old: [] }, null, 2), + ); + }); + + const client = await getOauthClient( + AuthType.LOGIN_WITH_GOOGLE, + mockConfig, + ); + + expect(client).toBe(mockOAuth2Client); + expect(mockSetCredentials).toHaveBeenCalledWith({ + access_token: 'gcp-access-token', + }); + + // Verify fetchAndCacheUserInfo was effectively called + expect(mockGetAccessToken).toHaveBeenCalled(); + expect(global.fetch).toHaveBeenCalledWith( + 'https://www.googleapis.com/oauth2/v2/userinfo', + { + headers: { + Authorization: 'Bearer gcp-access-token', + }, + }, + ); + + // Verify Google Account was cached + const googleAccountPath = path.join( + tempHomeDir, + '.gemini', + 'google_accounts.json', + ); + const cachedContent = fs.readFileSync(googleAccountPath, 'utf-8'); + expect(JSON.parse(cachedContent)).toEqual({ + active: 'test-gcp-account@gmail.com', + old: [], + }); + }); + + it('should not use GCP token if GOOGLE_CLOUD_ACCESS_TOKEN is not set', async () => { + process.env.GOOGLE_GENAI_USE_GCA = 'true'; + + const mockSetCredentials = vi.fn(); + const mockGetAccessToken = vi + .fn() + .mockResolvedValue({ token: 'cached-access-token' }); + const mockGetTokenInfo = vi.fn().mockResolvedValue({}); + const mockOAuth2Client = { + setCredentials: mockSetCredentials, + getAccessToken: mockGetAccessToken, + getTokenInfo: mockGetTokenInfo, + on: vi.fn(), + } as unknown as OAuth2Client; + (OAuth2Client as unknown as Mock).mockImplementation( + () => mockOAuth2Client, + ); + + // Make it fall through to cached credentials path + const cachedCreds = { refresh_token: 'cached-token' }; + const credsPath = path.join(tempHomeDir, '.gemini', 'oauth_creds.json'); + await fs.promises.mkdir(path.dirname(credsPath), { recursive: true }); + await fs.promises.writeFile(credsPath, JSON.stringify(cachedCreds)); + + await getOauthClient(AuthType.LOGIN_WITH_GOOGLE, mockConfig); + + // It should be called with the cached credentials, not the GCP access token. + expect(mockSetCredentials).toHaveBeenCalledTimes(1); + expect(mockSetCredentials).toHaveBeenCalledWith(cachedCreds); + }); + + it('should not use GCP token if GOOGLE_GENAI_USE_GCA is not set', async () => { + process.env.GOOGLE_CLOUD_ACCESS_TOKEN = 'gcp-access-token'; + + const mockSetCredentials = vi.fn(); + const mockGetAccessToken = vi + .fn() + .mockResolvedValue({ token: 'cached-access-token' }); + const mockGetTokenInfo = vi.fn().mockResolvedValue({}); + const mockOAuth2Client = { + setCredentials: mockSetCredentials, + getAccessToken: mockGetAccessToken, + getTokenInfo: mockGetTokenInfo, + on: vi.fn(), + } as unknown as OAuth2Client; + (OAuth2Client as unknown as Mock).mockImplementation( + () => mockOAuth2Client, + ); + + // Make it fall through to cached credentials path + const cachedCreds = { refresh_token: 'cached-token' }; + const credsPath = path.join(tempHomeDir, '.gemini', 'oauth_creds.json'); + await fs.promises.mkdir(path.dirname(credsPath), { recursive: true }); + await fs.promises.writeFile(credsPath, JSON.stringify(cachedCreds)); + + await getOauthClient(AuthType.LOGIN_WITH_GOOGLE, mockConfig); + + // It should be called with the cached credentials, not the GCP access token. + expect(mockSetCredentials).toHaveBeenCalledTimes(1); + expect(mockSetCredentials).toHaveBeenCalledWith(cachedCreds); + }); + }); }); diff --git a/packages/core/src/code_assist/oauth2.ts b/packages/core/src/code_assist/oauth2.ts index 160f4ff88..f10464160 100644 --- a/packages/core/src/code_assist/oauth2.ts +++ b/packages/core/src/code_assist/oauth2.ts @@ -53,7 +53,7 @@ const SIGN_IN_SUCCESS_URL = const SIGN_IN_FAILURE_URL = 'https://developers.google.com/gemini-code-assist/auth_failure_gemini'; -const GEMINI_DIR = '.qwen'; +const GEMINI_DIR = '.gemini'; const CREDENTIAL_FILENAME = 'oauth_creds.json'; /** @@ -73,8 +73,22 @@ export async function getOauthClient( const client = new OAuth2Client({ clientId: OAUTH_CLIENT_ID, clientSecret: OAUTH_CLIENT_SECRET, + transporterOptions: { + proxy: config.getProxy(), + }, }); + if ( + process.env.GOOGLE_GENAI_USE_GCA && + process.env.GOOGLE_CLOUD_ACCESS_TOKEN + ) { + client.setCredentials({ + access_token: process.env.GOOGLE_CLOUD_ACCESS_TOKEN, + }); + await fetchAndCacheUserInfo(client); + return client; + } + client.on('tokens', async (tokens: Credentials) => { await cacheCredentials(tokens); }); @@ -118,7 +132,7 @@ export async function getOauthClient( } } - if (config.getNoBrowser()) { + if (config.isBrowserLaunchSuppressed()) { let success = false; const maxRetries = 2; for (let i = 0; !success && i < maxRetries; i++) { @@ -136,13 +150,35 @@ export async function getOauthClient( } else { const webLogin = await authWithWeb(client); - // This does basically nothing, as it isn't show to the user. console.log( `\n\nCode Assist login required.\n` + `Attempting to open authentication page in your browser.\n` + `Otherwise navigate to:\n\n${webLogin.authUrl}\n\n`, ); - await open(webLogin.authUrl); + try { + // Attempt to open the authentication URL in the default browser. + // We do not use the `wait` option here because the main script's execution + // is already paused by `loginCompletePromise`, which awaits the server callback. + const childProcess = await open(webLogin.authUrl); + + // IMPORTANT: Attach an error handler to the returned child process. + // Without this, if `open` fails to spawn a process (e.g., `xdg-open` is not found + // in a minimal Docker container), it will emit an unhandled 'error' event, + // causing the entire Node.js process to crash. + childProcess.on('error', (_) => { + console.error( + 'Failed to open browser automatically. Please try running again with NO_BROWSER=true set.', + ); + process.exit(1); + }); + } catch (err) { + console.error( + 'An unexpected error occurred while trying to open the browser:', + err, + '\nPlease try running again with NO_BROWSER=true set.', + ); + process.exit(1); + } console.log('Waiting for authentication...'); await webLogin.loginCompletePromise; @@ -152,7 +188,7 @@ export async function getOauthClient( } async function authWithUserCode(client: OAuth2Client): Promise { - const redirectUri = 'https://sdk.cloud.google.com/authcode_cloudcode.html'; + const redirectUri = 'https://codeassist.google.com/authcode'; const codeVerifier = await client.generateCodeVerifierAsync(); const state = crypto.randomBytes(32).toString('hex'); const authUrl: string = client.generateAuthUrl({ @@ -199,6 +235,12 @@ async function authWithUserCode(client: OAuth2Client): Promise { async function authWithWeb(client: OAuth2Client): Promise { const port = await getAvailablePort(); + // The hostname used for the HTTP server binding (e.g., '0.0.0.0' in Docker). + const host = process.env.OAUTH_CALLBACK_HOST || 'localhost'; + // The `redirectUri` sent to Google's authorization server MUST use a loopback IP literal + // (i.e., 'localhost' or '127.0.0.1'). This is a strict security policy for credentials of + // type 'Desktop app' or 'Web application' (when using loopback flow) to mitigate + // authorization code interception attacks. const redirectUri = `http://localhost:${port}/oauth2callback`; const state = crypto.randomBytes(32).toString('hex'); const authUrl = client.generateAuthUrl({ @@ -256,7 +298,7 @@ async function authWithWeb(client: OAuth2Client): Promise { server.close(); } }); - server.listen(port); + server.listen(port, host); }); return { @@ -269,6 +311,16 @@ export function getAvailablePort(): Promise { return new Promise((resolve, reject) => { let port = 0; try { + const portStr = process.env.OAUTH_CALLBACK_PORT; + if (portStr) { + port = parseInt(portStr, 10); + if (isNaN(port) || port <= 0 || port > 65535) { + return reject( + new Error(`Invalid value for OAUTH_CALLBACK_PORT: "${portStr}"`), + ); + } + return resolve(port); + } const server = net.createServer(); server.listen(0, () => { const address = server.address()! as net.AddressInfo; diff --git a/packages/core/src/code_assist/server.test.ts b/packages/core/src/code_assist/server.test.ts index 6944af47d..6246fd4e1 100644 --- a/packages/core/src/code_assist/server.test.ts +++ b/packages/core/src/code_assist/server.test.ts @@ -7,6 +7,7 @@ import { describe, it, expect, vi } from 'vitest'; import { CodeAssistServer } from './server.js'; import { OAuth2Client } from 'google-auth-library'; +import { UserTierId } from './types.js'; vi.mock('google-auth-library'); @@ -115,7 +116,14 @@ describe('CodeAssistServer', () => { const client = new OAuth2Client(); const server = new CodeAssistServer(client, 'test-project'); const mockResponse = { - // TODO: Add mock response + currentTier: { + id: UserTierId.FREE, + name: 'Free', + description: 'free tier', + }, + allowedTiers: [], + ineligibleTiers: [], + cloudaicompanionProject: 'projects/test', }; vi.spyOn(server, 'requestPost').mockResolvedValue(mockResponse); @@ -127,7 +135,7 @@ describe('CodeAssistServer', () => { 'loadCodeAssist', expect.any(Object), ); - expect(response).toBe(mockResponse); + expect(response).toEqual(mockResponse); }); it('should return 0 for countTokens', async () => { diff --git a/packages/core/src/code_assist/server.ts b/packages/core/src/code_assist/server.ts index 3923990ae..7af643f7c 100644 --- a/packages/core/src/code_assist/server.ts +++ b/packages/core/src/code_assist/server.ts @@ -9,7 +9,7 @@ import { CodeAssistGlobalUserSettingResponse, LoadCodeAssistRequest, LoadCodeAssistResponse, - LongrunningOperationResponse, + LongRunningOperationResponse, OnboardUserRequest, SetCodeAssistGlobalUserSettingRequest, } from './types.js'; @@ -32,23 +32,6 @@ import { toCountTokenRequest, toGenerateContentRequest, } from './converter.js'; -import { Readable } from 'node:stream'; - -interface ErrorData { - error?: { - message?: string; - }; -} - -interface GaxiosResponse { - status: number; - data: unknown; -} - -interface StreamError extends Error { - status?: number; - response?: GaxiosResponse; -} /** HTTP options to be used in each of the requests. */ export interface HttpOptions { @@ -56,17 +39,16 @@ export interface HttpOptions { headers?: Record; } -export const CODE_ASSIST_ENDPOINT = 'https://localhost:0'; // Disable Google Code Assist API Request +export const CODE_ASSIST_ENDPOINT = 'https://cloudcode-pa.googleapis.com'; export const CODE_ASSIST_API_VERSION = 'v1internal'; export class CodeAssistServer implements ContentGenerator { - private userTier: UserTierId | undefined = undefined; - constructor( readonly client: OAuth2Client, readonly projectId?: string, readonly httpOptions: HttpOptions = {}, readonly sessionId?: string, + readonly userTier?: UserTierId, ) {} async generateContentStream( @@ -97,8 +79,8 @@ export class CodeAssistServer implements ContentGenerator { async onboardUser( req: OnboardUserRequest, - ): Promise { - return await this.requestPost( + ): Promise { + return await this.requestPost( 'onboardUser', req, ); @@ -196,45 +178,8 @@ export class CodeAssistServer implements ContentGenerator { }); return (async function* (): AsyncGenerator { - // Convert ReadableStream to Node.js stream if needed - let nodeStream: NodeJS.ReadableStream; - - if (res.data instanceof ReadableStream) { - // Convert Web ReadableStream to Node.js Readable stream - // eslint-disable-next-line @typescript-eslint/no-explicit-any - nodeStream = Readable.fromWeb(res.data as any); - } else if ( - res.data && - typeof (res.data as NodeJS.ReadableStream).on === 'function' - ) { - // Already a Node.js stream - nodeStream = res.data as NodeJS.ReadableStream; - } else { - // If res.data is not a stream, it might be an error response - // Try to extract error information from the response - let errorMessage = - 'Response data is not a readable stream. This may indicate a server error or quota issue.'; - - if (res.data && typeof res.data === 'object') { - // Check if this is an error response with error details - const errorData = res.data as ErrorData; - if (errorData.error?.message) { - errorMessage = errorData.error.message; - } else if (typeof errorData === 'string') { - errorMessage = errorData; - } - } - - // Create an error that looks like a quota error if it contains quota information - const error: StreamError = new Error(errorMessage); - // Add status and response properties so it can be properly handled by retry logic - error.status = res.status; - error.response = res; - throw error; - } - const rl = readline.createInterface({ - input: nodeStream, + input: res.data as NodeJS.ReadableStream, crlfDelay: Infinity, // Recognizes '\r\n' and '\n' as line breaks }); @@ -256,40 +201,6 @@ export class CodeAssistServer implements ContentGenerator { })(); } - async getTier(): Promise { - if (this.userTier === undefined) { - await this.detectUserTier(); - } - return this.userTier; - } - - private async detectUserTier(): Promise { - try { - // Reset user tier when detection runs - this.userTier = undefined; - - // Only attempt tier detection if we have a project ID - if (this.projectId) { - const loadRes = await this.loadCodeAssist({ - cloudaicompanionProject: this.projectId, - metadata: { - ideType: 'IDE_UNSPECIFIED', - platform: 'PLATFORM_UNSPECIFIED', - pluginType: 'GEMINI', - duetProject: this.projectId, - }, - }); - if (loadRes.currentTier) { - this.userTier = loadRes.currentTier.id; - } - } - } catch (error) { - // Silently fail - this is not critical functionality - // We'll default to FREE tier behavior if tier detection fails - console.debug('User tier detection failed:', error); - } - } - getMethodUrl(method: string): string { const endpoint = process.env.CODE_ASSIST_ENDPOINT ?? CODE_ASSIST_ENDPOINT; return `${endpoint}/${CODE_ASSIST_API_VERSION}:${method}`; diff --git a/packages/core/src/code_assist/setup.test.ts b/packages/core/src/code_assist/setup.test.ts index 479abae05..6db5fd88e 100644 --- a/packages/core/src/code_assist/setup.test.ts +++ b/packages/core/src/code_assist/setup.test.ts @@ -65,7 +65,10 @@ describe('setupUser', () => { expect.any(Object), undefined, ); - expect(projectId).toBe('server-project'); + expect(projectId).toEqual({ + projectId: 'server-project', + userTier: 'standard-tier', + }); }); it('should throw ProjectIdRequiredError when no project ID is available', async () => { diff --git a/packages/core/src/code_assist/setup.ts b/packages/core/src/code_assist/setup.ts index 3c7b81b08..8831d24b9 100644 --- a/packages/core/src/code_assist/setup.ts +++ b/packages/core/src/code_assist/setup.ts @@ -22,12 +22,17 @@ export class ProjectIdRequiredError extends Error { } } +export interface UserData { + projectId: string; + userTier: UserTierId; +} + /** * * @param projectId the user's project id, if any * @returns the user's actual project id */ -export async function setupUser(client: OAuth2Client): Promise { +export async function setupUser(client: OAuth2Client): Promise { let projectId = process.env.GOOGLE_CLOUD_PROJECT || undefined; const caServer = new CodeAssistServer(client, projectId); @@ -64,7 +69,10 @@ export async function setupUser(client: OAuth2Client): Promise { await new Promise((f) => setTimeout(f, 5000)); lroRes = await caServer.onboardUser(onboardReq); } - return lroRes.response?.cloudaicompanionProject?.id || ''; + return { + projectId: lroRes.response?.cloudaicompanionProject?.id || '', + userTier: tier.id, + }; } function getOnboardTier(res: LoadCodeAssistResponse): GeminiUserTier { diff --git a/packages/core/src/code_assist/types.ts b/packages/core/src/code_assist/types.ts index 4c395e577..367186857 100644 --- a/packages/core/src/code_assist/types.ts +++ b/packages/core/src/code_assist/types.ts @@ -127,10 +127,10 @@ export interface OnboardUserRequest { } /** - * Represents LongrunningOperation proto + * Represents LongRunningOperation proto * http://google3/google/longrunning/operations.proto;rcl=698857719;l=107 */ -export interface LongrunningOperationResponse { +export interface LongRunningOperationResponse { name: string; done?: boolean; response?: OnboardUserResponse; diff --git a/packages/core/src/config/config.test.ts b/packages/core/src/config/config.test.ts index d903c531c..3f0b3db5b 100644 --- a/packages/core/src/config/config.test.ts +++ b/packages/core/src/config/config.test.ts @@ -18,13 +18,12 @@ import { } from '../core/contentGenerator.js'; import { GeminiClient } from '../core/client.js'; import { GitService } from '../services/gitService.js'; -import { loadServerHierarchicalMemory } from '../utils/memoryDiscovery.js'; // Mock dependencies that might be called during Config construction or createServerConfig vi.mock('../tools/tool-registry', () => { const ToolRegistryMock = vi.fn(); ToolRegistryMock.prototype.registerTool = vi.fn(); - ToolRegistryMock.prototype.discoverTools = vi.fn(); + ToolRegistryMock.prototype.discoverAllTools = vi.fn(); ToolRegistryMock.prototype.getAllTools = vi.fn(() => []); // Mock methods if needed ToolRegistryMock.prototype.getTool = vi.fn(); ToolRegistryMock.prototype.getFunctionDeclarations = vi.fn(() => []); @@ -48,9 +47,9 @@ vi.mock('../tools/read-many-files'); vi.mock('../tools/memoryTool', () => ({ MemoryTool: vi.fn(), setGeminiMdFilename: vi.fn(), - getCurrentGeminiMdFilename: vi.fn(() => 'QWEN.md'), // Mock the original filename - DEFAULT_CONTEXT_FILENAME: 'QWEN.md', - GEMINI_CONFIG_DIR: '.qwen', + getCurrentGeminiMdFilename: vi.fn(() => 'GEMINI.md'), // Mock the original filename + DEFAULT_CONTEXT_FILENAME: 'GEMINI.md', + GEMINI_CONFIG_DIR: '.gemini', })); vi.mock('../core/contentGenerator.js', async (importOriginal) => { @@ -93,7 +92,7 @@ describe('Server Config (config.ts)', () => { const QUESTION = 'test question'; const FULL_CONTEXT = false; const USER_MEMORY = 'Test User Memory'; - const TELEMETRY_SETTINGS = { enabled: true }; + const TELEMETRY_SETTINGS = { enabled: false }; const EMBEDDING_MODEL = 'gemini-embedding'; const SESSION_ID = 'test-session-id'; const baseParams: ConfigParameters = { @@ -151,14 +150,12 @@ describe('Server Config (config.ts)', () => { apiKey: 'test-key', }; - (createContentGeneratorConfig as Mock).mockResolvedValue( - mockContentConfig, - ); + (createContentGeneratorConfig as Mock).mockReturnValue(mockContentConfig); await config.refreshAuth(authType); expect(createContentGeneratorConfig).toHaveBeenCalledWith( - MODEL, // Should be called with the original model 'gemini-pro' + config, authType, ); // Verify that contentGeneratorConfig is updated with the new model @@ -234,11 +231,11 @@ describe('Server Config (config.ts)', () => { expect(config.getTelemetryEnabled()).toBe(false); }); - it('Config constructor should default telemetry to false if not provided', () => { + it('Config constructor should default telemetry to default value if not provided', () => { const paramsWithoutTelemetry: ConfigParameters = { ...baseParams }; delete paramsWithoutTelemetry.telemetry; const config = new Config(paramsWithoutTelemetry); - expect(config.getTelemetryEnabled()).toBe(false); + expect(config.getTelemetryEnabled()).toBe(TELEMETRY_SETTINGS.enabled); }); it('should have a getFileService method that returns FileDiscoveryService', () => { @@ -285,20 +282,20 @@ describe('Server Config (config.ts)', () => { expect(config.getTelemetryLogPromptsEnabled()).toBe(false); }); - it('should return default logPrompts setting (false) if not provided', () => { + it('should return default logPrompts setting (true) if not provided', () => { const params: ConfigParameters = { ...baseParams, telemetry: { enabled: true }, }; const config = new Config(params); - expect(config.getTelemetryLogPromptsEnabled()).toBe(false); + expect(config.getTelemetryLogPromptsEnabled()).toBe(true); }); - it('should return default logPrompts setting (false) if telemetry object is not provided', () => { + it('should return default logPrompts setting (true) if telemetry object is not provided', () => { const paramsWithoutTelemetry: ConfigParameters = { ...baseParams }; delete paramsWithoutTelemetry.telemetry; const config = new Config(paramsWithoutTelemetry); - expect(config.getTelemetryLogPromptsEnabled()).toBe(false); + expect(config.getTelemetryLogPromptsEnabled()).toBe(true); }); it('should return default telemetry target if telemetry object is not provided', () => { @@ -315,38 +312,4 @@ describe('Server Config (config.ts)', () => { expect(config.getTelemetryOtlpEndpoint()).toBe(DEFAULT_OTLP_ENDPOINT); }); }); - - describe('refreshMemory', () => { - it('should update memory and file count on successful refresh', async () => { - const config = new Config(baseParams); - const mockMemoryData = { - memoryContent: 'new memory content', - fileCount: 5, - }; - - (loadServerHierarchicalMemory as Mock).mockResolvedValue(mockMemoryData); - - const result = await config.refreshMemory(); - - expect(loadServerHierarchicalMemory).toHaveBeenCalledWith( - config.getWorkingDir(), - config.getDebugMode(), - config.getFileService(), - config.getExtensionContextFilePaths(), - ); - - expect(config.getUserMemory()).toBe(mockMemoryData.memoryContent); - expect(config.getGeminiMdFileCount()).toBe(mockMemoryData.fileCount); - expect(result).toEqual(mockMemoryData); - }); - - it('should propagate errors from loadServerHierarchicalMemory', async () => { - const config = new Config(baseParams); - const testError = new Error('Failed to load memory'); - - (loadServerHierarchicalMemory as Mock).mockRejectedValue(testError); - - await expect(config.refreshMemory()).rejects.toThrow(testError); - }); - }); }); diff --git a/packages/core/src/config/config.ts b/packages/core/src/config/config.ts index 9ccbfa2ec..e7555477a 100644 --- a/packages/core/src/config/config.ts +++ b/packages/core/src/config/config.ts @@ -11,7 +11,7 @@ import { ContentGeneratorConfig, createContentGeneratorConfig, } from '../core/contentGenerator.js'; -import { UserTierId } from '../code_assist/types.js'; +import { PromptRegistry } from '../prompts/prompt-registry.js'; import { ToolRegistry } from '../tools/tool-registry.js'; import { LSTool } from '../tools/ls.js'; import { ReadFileTool } from '../tools/read-file.js'; @@ -27,21 +27,29 @@ import { setGeminiMdFilename, GEMINI_CONFIG_DIR as GEMINI_DIR, } from '../tools/memoryTool.js'; +import { WebSearchTool } from '../tools/web-search.js'; import { GeminiClient } from '../core/client.js'; import { FileDiscoveryService } from '../services/fileDiscoveryService.js'; import { GitService } from '../services/gitService.js'; -import { loadServerHierarchicalMemory } from '../utils/memoryDiscovery.js'; import { getProjectTempDir } from '../utils/paths.js'; import { initializeTelemetry, DEFAULT_TELEMETRY_TARGET, DEFAULT_OTLP_ENDPOINT, TelemetryTarget, + StartSessionEvent, } from '../telemetry/index.js'; import { DEFAULT_GEMINI_EMBEDDING_MODEL, DEFAULT_GEMINI_FLASH_MODEL, } from './models.js'; +import { ClearcutLogger } from '../telemetry/clearcut-logger/clearcut-logger.js'; +import { shouldAttemptBrowserLaunch } from '../utils/browser.js'; +import { MCPOAuthConfig } from '../mcp/oauth-provider.js'; +import { IdeClient } from '../ide/ide-client.js'; + +// Re-export OAuth config type +export type { MCPOAuthConfig }; export enum ApprovalMode { DEFAULT = 'default', @@ -57,18 +65,37 @@ export interface BugCommandSettings { urlTemplate: string; } +export interface SummarizeToolOutputSettings { + tokenBudget?: number; +} + export interface TelemetrySettings { enabled?: boolean; target?: TelemetryTarget; otlpEndpoint?: string; logPrompts?: boolean; + outfile?: string; } -export interface ActiveExtension { +export interface GeminiCLIExtension { name: string; version: string; + isActive: boolean; } - +export interface FileFilteringOptions { + respectGitIgnore: boolean; + respectGeminiIgnore: boolean; +} +// For memory files +export const DEFAULT_MEMORY_FILE_FILTERING_OPTIONS: FileFilteringOptions = { + respectGitIgnore: false, + respectGeminiIgnore: true, +}; +// For all other files +export const DEFAULT_FILE_FILTERING_OPTIONS: FileFilteringOptions = { + respectGitIgnore: true, + respectGeminiIgnore: true, +}; export class MCPServerConfig { constructor( // For stdio transport @@ -90,9 +117,18 @@ export class MCPServerConfig { readonly description?: string, readonly includeTools?: string[], readonly excludeTools?: string[], + readonly extensionName?: string, + // OAuth configuration + readonly oauth?: MCPOAuthConfig, + readonly authProviderType?: AuthProviderType, ) {} } +export enum AuthProviderType { + DYNAMIC_DISCOVERY = 'dynamic_discovery', + GOOGLE_CREDENTIALS = 'google_credentials', +} + export interface SandboxConfig { command: 'docker' | 'podman' | 'sandbox-exec'; image: string; @@ -128,6 +164,7 @@ export interface ConfigParameters { usageStatisticsEnabled?: boolean; fileFiltering?: { respectGitIgnore?: boolean; + respectGeminiIgnore?: boolean; enableRecursiveFileSearch?: boolean; }; checkpointing?: boolean; @@ -138,31 +175,26 @@ export interface ConfigParameters { model: string; extensionContextFilePaths?: string[]; maxSessionTurns?: number; - sessionTokenLimit?: number; - maxFolderItems?: number; + experimentalAcp?: boolean; listExtensions?: boolean; - activeExtensions?: ActiveExtension[]; + extensions?: GeminiCLIExtension[]; + blockedMcpServers?: Array<{ name: string; extensionName: string }>; noBrowser?: boolean; + summarizeToolOutput?: Record; ideMode?: boolean; + ideClient?: IdeClient; enableOpenAILogging?: boolean; - sampling_params?: { - top_p?: number; - top_k?: number; - repetition_penalty?: number; - presence_penalty?: number; - frequency_penalty?: number; - temperature?: number; - max_tokens?: number; - }; + sampling_params?: Record; systemPromptMappings?: Array<{ - baseUrls?: string[]; - modelNames?: string[]; - template?: string; + baseUrls: string[]; + modelNames: string[]; + template: string; }>; } export class Config { private toolRegistry!: ToolRegistry; + private promptRegistry!: PromptRegistry; private readonly sessionId: string; private contentGeneratorConfig!: ContentGeneratorConfig; private readonly embeddingModel: string; @@ -187,6 +219,7 @@ export class Config { private geminiClient!: GeminiClient; private readonly fileFiltering: { respectGitIgnore: boolean; + respectGeminiIgnore: boolean; enableRecursiveFileSearch: boolean; }; private fileDiscoveryService: FileDiscoveryService | null = null; @@ -199,29 +232,21 @@ export class Config { private readonly extensionContextFilePaths: string[]; private readonly noBrowser: boolean; private readonly ideMode: boolean; - private readonly enableOpenAILogging: boolean; - private readonly sampling_params?: { - top_p?: number; - top_k?: number; - repetition_penalty?: number; - presence_penalty?: number; - frequency_penalty?: number; - temperature?: number; - max_tokens?: number; - }; - private readonly systemPromptMappings?: Array<{ - baseUrls?: string[]; - modelNames?: string[]; - template?: string; - }>; + private readonly ideClient: IdeClient | undefined; private modelSwitchedDuringSession: boolean = false; private readonly maxSessionTurns: number; - private readonly sessionTokenLimit: number; - private readonly maxFolderItems: number; private readonly listExtensions: boolean; - private readonly _activeExtensions: ActiveExtension[]; + private readonly _extensions: GeminiCLIExtension[]; + private readonly _blockedMcpServers: Array<{ + name: string; + extensionName: string; + }>; flashFallbackHandler?: FlashFallbackHandler; private quotaErrorOccurred: boolean = false; + private readonly summarizeToolOutput: + | Record + | undefined; + private readonly experimentalAcp: boolean = false; constructor(params: ConfigParameters) { this.sessionId = params.sessionId; @@ -247,12 +272,14 @@ export class Config { enabled: params.telemetry?.enabled ?? false, target: params.telemetry?.target ?? DEFAULT_TELEMETRY_TARGET, otlpEndpoint: params.telemetry?.otlpEndpoint ?? DEFAULT_OTLP_ENDPOINT, - logPrompts: params.telemetry?.logPrompts ?? false, + logPrompts: params.telemetry?.logPrompts ?? true, + outfile: params.telemetry?.outfile, }; this.usageStatisticsEnabled = params.usageStatisticsEnabled ?? true; this.fileFiltering = { respectGitIgnore: params.fileFiltering?.respectGitIgnore ?? true, + respectGeminiIgnore: params.fileFiltering?.respectGeminiIgnore ?? true, enableRecursiveFileSearch: params.fileFiltering?.enableRecursiveFileSearch ?? true, }; @@ -264,15 +291,14 @@ export class Config { this.model = params.model; this.extensionContextFilePaths = params.extensionContextFilePaths ?? []; this.maxSessionTurns = params.maxSessionTurns ?? -1; - this.sessionTokenLimit = params.sessionTokenLimit ?? 32000; - this.maxFolderItems = params.maxFolderItems ?? 20; + this.experimentalAcp = params.experimentalAcp ?? false; this.listExtensions = params.listExtensions ?? false; - this._activeExtensions = params.activeExtensions ?? []; + this._extensions = params.extensions ?? []; + this._blockedMcpServers = params.blockedMcpServers ?? []; this.noBrowser = params.noBrowser ?? false; + this.summarizeToolOutput = params.summarizeToolOutput; this.ideMode = params.ideMode ?? false; - this.enableOpenAILogging = params.enableOpenAILogging ?? false; - this.sampling_params = params.sampling_params; - this.systemPromptMappings = params.systemPromptMappings; + this.ideClient = params.ideClient; if (params.contextFileName) { setGeminiMdFilename(params.contextFileName); @@ -283,10 +309,9 @@ export class Config { } if (this.getUsageStatisticsEnabled()) { - // ClearcutLogger.getInstance(this)?.logStartSessionEvent( - // new StartSessionEvent(this), - // ); - console.log('ClearcutLogger disabled - no data collection.'); + ClearcutLogger.getInstance(this)?.logStartSessionEvent( + new StartSessionEvent(this), + ); } else { console.log('Data collection is disabled.'); } @@ -298,20 +323,15 @@ export class Config { if (this.getCheckpointingEnabled()) { await this.getGitService(); } + this.promptRegistry = new PromptRegistry(); this.toolRegistry = await this.createToolRegistry(); } async refreshAuth(authMethod: AuthType) { - this.contentGeneratorConfig = await createContentGeneratorConfig( - this.model, + this.contentGeneratorConfig = createContentGeneratorConfig( + this, authMethod, ); - this.contentGeneratorConfig.enableOpenAILogging = this.enableOpenAILogging; - - // Set sampling parameters from config if available - if (this.sampling_params) { - this.contentGeneratorConfig.samplingParams = this.sampling_params; - } this.geminiClient = new GeminiClient(this); await this.geminiClient.initialize(this.contentGeneratorConfig); @@ -358,14 +378,6 @@ export class Config { return this.maxSessionTurns; } - getSessionTokenLimit(): number { - return this.sessionTokenLimit; - } - - getMaxFolderItems(): number { - return this.maxFolderItems; - } - setQuotaErrorOccurred(value: boolean): void { this.quotaErrorOccurred = value; } @@ -374,14 +386,6 @@ export class Config { return this.quotaErrorOccurred; } - async getUserTier(): Promise { - if (!this.geminiClient) { - return undefined; - } - const generator = this.geminiClient.getContentGenerator(); - return await generator.getTier?.(); - } - getEmbeddingModel(): string { return this.embeddingModel; } @@ -402,6 +406,10 @@ export class Config { return Promise.resolve(this.toolRegistry); } + getPromptRegistry(): PromptRegistry { + return this.promptRegistry; + } + getDebugMode(): boolean { return this.debugMode; } @@ -474,7 +482,7 @@ export class Config { } getTelemetryLogPromptsEnabled(): boolean { - return this.telemetrySettings.logPrompts ?? false; + return this.telemetrySettings.logPrompts ?? true; } getTelemetryOtlpEndpoint(): string { @@ -485,6 +493,10 @@ export class Config { return this.telemetrySettings.target ?? DEFAULT_TELEMETRY_TARGET; } + getTelemetryOutfile(): string | undefined { + return this.telemetrySettings.outfile; + } + getGeminiClient(): GeminiClient { return this.geminiClient; } @@ -504,6 +516,16 @@ export class Config { getFileFilteringRespectGitIgnore(): boolean { return this.fileFiltering.respectGitIgnore; } + getFileFilteringRespectGeminiIgnore(): boolean { + return this.fileFiltering.respectGeminiIgnore; + } + + getFileFilteringOptions(): FileFilteringOptions { + return { + respectGitIgnore: this.fileFiltering.respectGitIgnore, + respectGeminiIgnore: this.fileFiltering.respectGeminiIgnore, + }; + } getCheckpointingEnabled(): boolean { return this.checkpointing; @@ -536,22 +558,44 @@ export class Config { return this.extensionContextFilePaths; } + getExperimentalAcp(): boolean { + return this.experimentalAcp; + } + getListExtensions(): boolean { return this.listExtensions; } - getActiveExtensions(): ActiveExtension[] { - return this._activeExtensions; + getExtensions(): GeminiCLIExtension[] { + return this._extensions; + } + + getBlockedMcpServers(): Array<{ name: string; extensionName: string }> { + return this._blockedMcpServers; } getNoBrowser(): boolean { return this.noBrowser; } + isBrowserLaunchSuppressed(): boolean { + return this.getNoBrowser() || !shouldAttemptBrowserLaunch(); + } + + getSummarizeToolOutputConfig(): + | Record + | undefined { + return this.summarizeToolOutput; + } + getIdeMode(): boolean { return this.ideMode; } + getIdeClient(): IdeClient | undefined { + return this.ideClient; + } + async getGitService(): Promise { if (!this.gitService) { this.gitService = new GitService(this.targetDir); @@ -560,34 +604,6 @@ export class Config { return this.gitService; } - getEnableOpenAILogging(): boolean { - return this.enableOpenAILogging; - } - - getSystemPromptMappings(): - | Array<{ - baseUrls?: string[]; - modelNames?: string[]; - template?: string; - }> - | undefined { - return this.systemPromptMappings; - } - - async refreshMemory(): Promise<{ memoryContent: string; fileCount: number }> { - const { memoryContent, fileCount } = await loadServerHierarchicalMemory( - this.getWorkingDir(), - this.getDebugMode(), - this.getFileService(), - this.getExtensionContextFilePaths(), - ); - - this.setUserMemory(memoryContent); - this.setGeminiMdFileCount(fileCount); - - return { memoryContent, fileCount }; - } - async createToolRegistry(): Promise { const registry = new ToolRegistry(this); @@ -634,9 +650,9 @@ export class Config { registerCoreTool(ReadManyFilesTool, this); registerCoreTool(ShellTool, this); registerCoreTool(MemoryTool); - // registerCoreTool(WebSearchTool, this); // Temporarily disabled + registerCoreTool(WebSearchTool, this); - await registry.discoverTools(); + await registry.discoverAllTools(); return registry; } } diff --git a/packages/core/src/config/flashFallback.test.ts b/packages/core/src/config/flashFallback.test.ts index 325cc0647..64f0f6fd5 100644 --- a/packages/core/src/config/flashFallback.test.ts +++ b/packages/core/src/config/flashFallback.test.ts @@ -72,7 +72,7 @@ describe('Flash Model Fallback Configuration', () => { expect(config.getModel()).toBe(DEFAULT_GEMINI_FLASH_MODEL); }); - it('should fallback to initial model if contentGeneratorConfig is not available', () => { + it('should fall back to initial model if contentGeneratorConfig is not available', () => { // Test with fresh config where contentGeneratorConfig might not be set const newConfig = new Config({ sessionId: 'test-session-2', diff --git a/packages/core/src/core/__snapshots__/prompts.test.ts.snap b/packages/core/src/core/__snapshots__/prompts.test.ts.snap index b7a1d9d0e..56d231f4c 100644 --- a/packages/core/src/core/__snapshots__/prompts.test.ts.snap +++ b/packages/core/src/core/__snapshots__/prompts.test.ts.snap @@ -93,56 +93,24 @@ model: true user: list files here. -model: - - - -. - - - +model: [tool_call: list_directory for path '/path/to/project'] user: start the server implemented in server.js -model: - - - -node server.js & - - - +model: [tool_call: run_shell_command for 'node server.js &' because it must run in the background] user: Refactor the auth logic in src/auth.py to use the requests library instead of urllib. model: Okay, I can refactor 'src/auth.py'. First, I'll analyze the code and check for a test safety net before planning any changes. - - - -tests/test_auth.py - - - - - - -/path/to/tests/test_auth.py - - - +[tool_call: glob for path 'tests/test_auth.py'] +[tool_call: read_file for absolute_path '/path/to/tests/test_auth.py'] (After analysis) Great, 'tests/test_auth.py' exists and covers the core authentication logic. With this safety net in place, I can safely plan the refactoring. I'll also confirm 'requests' is a dependency. - - - -/path/to/requirements.txt - - - +[tool_call: read_file for absolute_path '/path/to/requirements.txt'] (After analysis) Looks good, 'requests' is available. @@ -152,30 +120,15 @@ Here's the plan: 3. Remove the old 'urllib' import. 4. Run the project's linter and tests to verify the changes. - - - -src/auth.py - - -(old code content) - - -(new code content) - - - +Should I proceed? +user: Yes +model: +[tool_call: write_file or replace to apply the refactoring to 'src/auth.py'] Refactoring complete. Running verification... - - -ruff check src/auth.py && pytest - - - +[tool_call: run_shell_command for 'ruff check src/auth.py && pytest'] (After verification passes) All checks passed. This is a stable checkpoint. -Would you like me to write a commit message and commit these changes? + @@ -186,48 +139,32 @@ model: I can run \`rm -rf /path/to/project/temp\`. This will permanently delete user: Write tests for someFile.ts model: -Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality. - - - -/path/to/someFile.ts - - - +Okay, I can write those tests. First, I'll read \`someFile.ts\` to understand its functionality. +[tool_call: read_file for absolute_path '/path/to/someFile.ts' or use glob to find \`someFile.ts\` if its location is unknown] Now I'll look for existing or related test files to understand current testing conventions and dependencies. - -read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory] - +[tool_call: read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory] (After reviewing existing tests and the file content) - - - -/path/to/someFile.test.ts - - - +[tool_call: write_file to create /path/to/someFile.test.ts with the test code] I've written the tests. Now I'll run the project's test command to verify them. - - - -npm run test - - - -(After verification passes) -All checks passed. This is a stable checkpoint. +[tool_call: run_shell_command for 'npm run test'] + + + +user: How do I update the user's profile information in this system? +model: +I'm not immediately sure how user profile information is updated. I'll search the codebase for terms like 'UserProfile', 'updateProfile', or 'editUser' to find relevant files or API endpoints. +[tool_call: search_file_content for pattern 'UserProfile|updateProfile|editUser'] +(After reviewing search results, assuming a relevant file like '/path/to/UserProfileService.java' was found) +Okay, \`/path/to/UserProfileService.java\` seems like the most relevant file. I'll read its content to understand how updates are handled. +[tool_call: read_file for absolute_path '/path/to/UserProfileService.java'] +(After reading the file) +It appears the \`updateUserProfile\` method in \`UserProfileService.java\` is responsible for this. It expects a user ID and a \`UserProfileDTO\` object... user: Where are all the 'app.config' files in this project? I need to check their settings. model: - - - -./**/app.config - - - +[tool_call: glob for pattern '**/app.config'] (Assuming GlobTool returns a list of paths like ['/path/to/moduleA/app.config', '/path/to/moduleB/app.config']) I found the following 'app.config' files: - /path/to/moduleA/app.config @@ -352,56 +289,24 @@ model: true user: list files here. -model: - - - -. - - - +model: [tool_call: list_directory for path '/path/to/project'] user: start the server implemented in server.js -model: - - - -node server.js & - - - +model: [tool_call: run_shell_command for 'node server.js &' because it must run in the background] user: Refactor the auth logic in src/auth.py to use the requests library instead of urllib. model: Okay, I can refactor 'src/auth.py'. First, I'll analyze the code and check for a test safety net before planning any changes. - - - -tests/test_auth.py - - - - - - -/path/to/tests/test_auth.py - - - +[tool_call: glob for path 'tests/test_auth.py'] +[tool_call: read_file for absolute_path '/path/to/tests/test_auth.py'] (After analysis) Great, 'tests/test_auth.py' exists and covers the core authentication logic. With this safety net in place, I can safely plan the refactoring. I'll also confirm 'requests' is a dependency. - - - -/path/to/requirements.txt - - - +[tool_call: read_file for absolute_path '/path/to/requirements.txt'] (After analysis) Looks good, 'requests' is available. @@ -411,27 +316,12 @@ Here's the plan: 3. Remove the old 'urllib' import. 4. Run the project's linter and tests to verify the changes. - - - -src/auth.py - - -(old code content) - - -(new code content) - - - +Should I proceed? +user: Yes +model: +[tool_call: write_file or replace to apply the refactoring to 'src/auth.py'] Refactoring complete. Running verification... - - -ruff check src/auth.py && pytest - - - +[tool_call: run_shell_command for 'ruff check src/auth.py && pytest'] (After verification passes) All checks passed. This is a stable checkpoint. Would you like me to write a commit message and commit these changes? @@ -445,48 +335,32 @@ model: I can run \`rm -rf /path/to/project/temp\`. This will permanently delete user: Write tests for someFile.ts model: -Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality. - - - -/path/to/someFile.ts - - - +Okay, I can write those tests. First, I'll read \`someFile.ts\` to understand its functionality. +[tool_call: read_file for absolute_path '/path/to/someFile.ts' or use glob to find \`someFile.ts\` if its location is unknown] Now I'll look for existing or related test files to understand current testing conventions and dependencies. - -read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory] - +[tool_call: read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory] (After reviewing existing tests and the file content) - - - -/path/to/someFile.test.ts - - - +[tool_call: write_file to create /path/to/someFile.test.ts with the test code] I've written the tests. Now I'll run the project's test command to verify them. - - - -npm run test - - - -(After verification passes) -All checks passed. This is a stable checkpoint. +[tool_call: run_shell_command for 'npm run test'] + + + +user: How do I update the user's profile information in this system? +model: +I'm not immediately sure how user profile information is updated. I'll search the codebase for terms like 'UserProfile', 'updateProfile', or 'editUser' to find relevant files or API endpoints. +[tool_call: search_file_content for pattern 'UserProfile|updateProfile|editUser'] +(After reviewing search results, assuming a relevant file like '/path/to/UserProfileService.java' was found) +Okay, \`/path/to/UserProfileService.java\` seems like the most relevant file. I'll read its content to understand how updates are handled. +[tool_call: read_file for absolute_path '/path/to/UserProfileService.java'] +(After reading the file) +It appears the \`updateUserProfile\` method in \`UserProfileService.java\` is responsible for this. It expects a user ID and a \`UserProfileDTO\` object... user: Where are all the 'app.config' files in this project? I need to check their settings. model: - - - -./**/app.config - - - +[tool_call: glob for pattern '**/app.config'] (Assuming GlobTool returns a list of paths like ['/path/to/moduleA/app.config', '/path/to/moduleB/app.config']) I found the following 'app.config' files: - /path/to/moduleA/app.config @@ -591,56 +465,24 @@ model: true user: list files here. -model: - - - -. - - - +model: [tool_call: list_directory for path '/path/to/project'] user: start the server implemented in server.js -model: - - - -node server.js & - - - +model: [tool_call: run_shell_command for 'node server.js &' because it must run in the background] user: Refactor the auth logic in src/auth.py to use the requests library instead of urllib. model: Okay, I can refactor 'src/auth.py'. First, I'll analyze the code and check for a test safety net before planning any changes. - - - -tests/test_auth.py - - - - - - -/path/to/tests/test_auth.py - - - +[tool_call: glob for path 'tests/test_auth.py'] +[tool_call: read_file for absolute_path '/path/to/tests/test_auth.py'] (After analysis) Great, 'tests/test_auth.py' exists and covers the core authentication logic. With this safety net in place, I can safely plan the refactoring. I'll also confirm 'requests' is a dependency. - - - -/path/to/requirements.txt - - - +[tool_call: read_file for absolute_path '/path/to/requirements.txt'] (After analysis) Looks good, 'requests' is available. @@ -650,30 +492,15 @@ Here's the plan: 3. Remove the old 'urllib' import. 4. Run the project's linter and tests to verify the changes. - - - -src/auth.py - - -(old code content) - - -(new code content) - - - +Should I proceed? +user: Yes +model: +[tool_call: write_file or replace to apply the refactoring to 'src/auth.py'] Refactoring complete. Running verification... - - -ruff check src/auth.py && pytest - - - +[tool_call: run_shell_command for 'ruff check src/auth.py && pytest'] (After verification passes) All checks passed. This is a stable checkpoint. -Would you like me to write a commit message and commit these changes? + @@ -684,48 +511,32 @@ model: I can run \`rm -rf /path/to/project/temp\`. This will permanently delete user: Write tests for someFile.ts model: -Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality. - - - -/path/to/someFile.ts - - - +Okay, I can write those tests. First, I'll read \`someFile.ts\` to understand its functionality. +[tool_call: read_file for absolute_path '/path/to/someFile.ts' or use glob to find \`someFile.ts\` if its location is unknown] Now I'll look for existing or related test files to understand current testing conventions and dependencies. - -read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory] - +[tool_call: read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory] (After reviewing existing tests and the file content) - - - -/path/to/someFile.test.ts - - - +[tool_call: write_file to create /path/to/someFile.test.ts with the test code] I've written the tests. Now I'll run the project's test command to verify them. - - - -npm run test - - - -(After verification passes) -All checks passed. This is a stable checkpoint. +[tool_call: run_shell_command for 'npm run test'] + + + +user: How do I update the user's profile information in this system? +model: +I'm not immediately sure how user profile information is updated. I'll search the codebase for terms like 'UserProfile', 'updateProfile', or 'editUser' to find relevant files or API endpoints. +[tool_call: search_file_content for pattern 'UserProfile|updateProfile|editUser'] +(After reviewing search results, assuming a relevant file like '/path/to/UserProfileService.java' was found) +Okay, \`/path/to/UserProfileService.java\` seems like the most relevant file. I'll read its content to understand how updates are handled. +[tool_call: read_file for absolute_path '/path/to/UserProfileService.java'] +(After reading the file) +It appears the \`updateUserProfile\` method in \`UserProfileService.java\` is responsible for this. It expects a user ID and a \`UserProfileDTO\` object... user: Where are all the 'app.config' files in this project? I need to check their settings. model: - - - -./**/app.config - - - +[tool_call: glob for pattern '**/app.config'] (Assuming GlobTool returns a list of paths like ['/path/to/moduleA/app.config', '/path/to/moduleB/app.config']) I found the following 'app.config' files: - /path/to/moduleA/app.config @@ -830,56 +641,24 @@ model: true user: list files here. -model: - - - -. - - - +model: [tool_call: list_directory for path '/path/to/project'] user: start the server implemented in server.js -model: - - - -node server.js & - - - +model: [tool_call: run_shell_command for 'node server.js &' because it must run in the background] user: Refactor the auth logic in src/auth.py to use the requests library instead of urllib. model: Okay, I can refactor 'src/auth.py'. First, I'll analyze the code and check for a test safety net before planning any changes. - - - -tests/test_auth.py - - - - - - -/path/to/tests/test_auth.py - - - +[tool_call: glob for path 'tests/test_auth.py'] +[tool_call: read_file for absolute_path '/path/to/tests/test_auth.py'] (After analysis) Great, 'tests/test_auth.py' exists and covers the core authentication logic. With this safety net in place, I can safely plan the refactoring. I'll also confirm 'requests' is a dependency. - - - -/path/to/requirements.txt - - - +[tool_call: read_file for absolute_path '/path/to/requirements.txt'] (After analysis) Looks good, 'requests' is available. @@ -889,30 +668,15 @@ Here's the plan: 3. Remove the old 'urllib' import. 4. Run the project's linter and tests to verify the changes. - - - -src/auth.py - - -(old code content) - - -(new code content) - - - +Should I proceed? +user: Yes +model: +[tool_call: write_file or replace to apply the refactoring to 'src/auth.py'] Refactoring complete. Running verification... - - -ruff check src/auth.py && pytest - - - +[tool_call: run_shell_command for 'ruff check src/auth.py && pytest'] (After verification passes) All checks passed. This is a stable checkpoint. -Would you like me to write a commit message and commit these changes? + @@ -923,48 +687,32 @@ model: I can run \`rm -rf /path/to/project/temp\`. This will permanently delete user: Write tests for someFile.ts model: -Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality. - - - -/path/to/someFile.ts - - - +Okay, I can write those tests. First, I'll read \`someFile.ts\` to understand its functionality. +[tool_call: read_file for absolute_path '/path/to/someFile.ts' or use glob to find \`someFile.ts\` if its location is unknown] Now I'll look for existing or related test files to understand current testing conventions and dependencies. - -read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory] - +[tool_call: read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory] (After reviewing existing tests and the file content) - - - -/path/to/someFile.test.ts - - - +[tool_call: write_file to create /path/to/someFile.test.ts with the test code] I've written the tests. Now I'll run the project's test command to verify them. - - - -npm run test - - - -(After verification passes) -All checks passed. This is a stable checkpoint. +[tool_call: run_shell_command for 'npm run test'] + + + +user: How do I update the user's profile information in this system? +model: +I'm not immediately sure how user profile information is updated. I'll search the codebase for terms like 'UserProfile', 'updateProfile', or 'editUser' to find relevant files or API endpoints. +[tool_call: search_file_content for pattern 'UserProfile|updateProfile|editUser'] +(After reviewing search results, assuming a relevant file like '/path/to/UserProfileService.java' was found) +Okay, \`/path/to/UserProfileService.java\` seems like the most relevant file. I'll read its content to understand how updates are handled. +[tool_call: read_file for absolute_path '/path/to/UserProfileService.java'] +(After reading the file) +It appears the \`updateUserProfile\` method in \`UserProfileService.java\` is responsible for this. It expects a user ID and a \`UserProfileDTO\` object... user: Where are all the 'app.config' files in this project? I need to check their settings. model: - - - -./**/app.config - - - +[tool_call: glob for pattern '**/app.config'] (Assuming GlobTool returns a list of paths like ['/path/to/moduleA/app.config', '/path/to/moduleB/app.config']) I found the following 'app.config' files: - /path/to/moduleA/app.config @@ -1050,8 +798,8 @@ When requested to perform tasks like fixing bugs, adding features, refactoring, - **Feedback:** To report a bug or provide feedback, please use the /bug command. -# MacOS Seatbelt -You are running under macos seatbelt with limited access to files outside the project directory or system temp directory, and with limited access to host system resources such as ports. If you encounter failures that could be due to MacOS Seatbelt (e.g. if a command fails with 'Operation not permitted' or similar error), as you report the error to the user, also explain why you think it could be due to MacOS Seatbelt, and how the user may need to adjust their Seatbelt profile. +# macOS Seatbelt +You are running under macos seatbelt with limited access to files outside the project directory or system temp directory, and with limited access to host system resources such as ports. If you encounter failures that could be due to macOS Seatbelt (e.g. if a command fails with 'Operation not permitted' or similar error), as you report the error to the user, also explain why you think it could be due to macOS Seatbelt, and how the user may need to adjust their Seatbelt profile. @@ -1069,56 +817,24 @@ model: true user: list files here. -model: - - - -. - - - +model: [tool_call: list_directory for path '/path/to/project'] user: start the server implemented in server.js -model: - - - -node server.js & - - - +model: [tool_call: run_shell_command for 'node server.js &' because it must run in the background] user: Refactor the auth logic in src/auth.py to use the requests library instead of urllib. model: Okay, I can refactor 'src/auth.py'. First, I'll analyze the code and check for a test safety net before planning any changes. - - - -tests/test_auth.py - - - - - - -/path/to/tests/test_auth.py - - - +[tool_call: glob for path 'tests/test_auth.py'] +[tool_call: read_file for absolute_path '/path/to/tests/test_auth.py'] (After analysis) Great, 'tests/test_auth.py' exists and covers the core authentication logic. With this safety net in place, I can safely plan the refactoring. I'll also confirm 'requests' is a dependency. - - - -/path/to/requirements.txt - - - +[tool_call: read_file for absolute_path '/path/to/requirements.txt'] (After analysis) Looks good, 'requests' is available. @@ -1128,30 +844,15 @@ Here's the plan: 3. Remove the old 'urllib' import. 4. Run the project's linter and tests to verify the changes. - - - -src/auth.py - - -(old code content) - - -(new code content) - - - +Should I proceed? +user: Yes +model: +[tool_call: write_file or replace to apply the refactoring to 'src/auth.py'] Refactoring complete. Running verification... - - -ruff check src/auth.py && pytest - - - +[tool_call: run_shell_command for 'ruff check src/auth.py && pytest'] (After verification passes) All checks passed. This is a stable checkpoint. -Would you like me to write a commit message and commit these changes? + @@ -1162,48 +863,32 @@ model: I can run \`rm -rf /path/to/project/temp\`. This will permanently delete user: Write tests for someFile.ts model: -Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality. - - - -/path/to/someFile.ts - - - +Okay, I can write those tests. First, I'll read \`someFile.ts\` to understand its functionality. +[tool_call: read_file for absolute_path '/path/to/someFile.ts' or use glob to find \`someFile.ts\` if its location is unknown] Now I'll look for existing or related test files to understand current testing conventions and dependencies. - -read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory] - +[tool_call: read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory] (After reviewing existing tests and the file content) - - - -/path/to/someFile.test.ts - - - +[tool_call: write_file to create /path/to/someFile.test.ts with the test code] I've written the tests. Now I'll run the project's test command to verify them. - - - -npm run test - - - -(After verification passes) -All checks passed. This is a stable checkpoint. +[tool_call: run_shell_command for 'npm run test'] + + + +user: How do I update the user's profile information in this system? +model: +I'm not immediately sure how user profile information is updated. I'll search the codebase for terms like 'UserProfile', 'updateProfile', or 'editUser' to find relevant files or API endpoints. +[tool_call: search_file_content for pattern 'UserProfile|updateProfile|editUser'] +(After reviewing search results, assuming a relevant file like '/path/to/UserProfileService.java' was found) +Okay, \`/path/to/UserProfileService.java\` seems like the most relevant file. I'll read its content to understand how updates are handled. +[tool_call: read_file for absolute_path '/path/to/UserProfileService.java'] +(After reading the file) +It appears the \`updateUserProfile\` method in \`UserProfileService.java\` is responsible for this. It expects a user ID and a \`UserProfileDTO\` object... user: Where are all the 'app.config' files in this project? I need to check their settings. model: - - - -./**/app.config - - - +[tool_call: glob for pattern '**/app.config'] (Assuming GlobTool returns a list of paths like ['/path/to/moduleA/app.config', '/path/to/moduleB/app.config']) I found the following 'app.config' files: - /path/to/moduleA/app.config @@ -1308,56 +993,24 @@ model: true user: list files here. -model: - - - -. - - - +model: [tool_call: list_directory for path '/path/to/project'] user: start the server implemented in server.js -model: - - - -node server.js & - - - +model: [tool_call: run_shell_command for 'node server.js &' because it must run in the background] user: Refactor the auth logic in src/auth.py to use the requests library instead of urllib. model: Okay, I can refactor 'src/auth.py'. First, I'll analyze the code and check for a test safety net before planning any changes. - - - -tests/test_auth.py - - - - - - -/path/to/tests/test_auth.py - - - +[tool_call: glob for path 'tests/test_auth.py'] +[tool_call: read_file for absolute_path '/path/to/tests/test_auth.py'] (After analysis) Great, 'tests/test_auth.py' exists and covers the core authentication logic. With this safety net in place, I can safely plan the refactoring. I'll also confirm 'requests' is a dependency. - - - -/path/to/requirements.txt - - - +[tool_call: read_file for absolute_path '/path/to/requirements.txt'] (After analysis) Looks good, 'requests' is available. @@ -1367,30 +1020,15 @@ Here's the plan: 3. Remove the old 'urllib' import. 4. Run the project's linter and tests to verify the changes. - - - -src/auth.py - - -(old code content) - - -(new code content) - - - +Should I proceed? +user: Yes +model: +[tool_call: write_file or replace to apply the refactoring to 'src/auth.py'] Refactoring complete. Running verification... - - -ruff check src/auth.py && pytest - - - +[tool_call: run_shell_command for 'ruff check src/auth.py && pytest'] (After verification passes) All checks passed. This is a stable checkpoint. -Would you like me to write a commit message and commit these changes? + @@ -1401,48 +1039,32 @@ model: I can run \`rm -rf /path/to/project/temp\`. This will permanently delete user: Write tests for someFile.ts model: -Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality. - - - -/path/to/someFile.ts - - - +Okay, I can write those tests. First, I'll read \`someFile.ts\` to understand its functionality. +[tool_call: read_file for absolute_path '/path/to/someFile.ts' or use glob to find \`someFile.ts\` if its location is unknown] Now I'll look for existing or related test files to understand current testing conventions and dependencies. - -read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory] - +[tool_call: read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory] (After reviewing existing tests and the file content) - - - -/path/to/someFile.test.ts - - - +[tool_call: write_file to create /path/to/someFile.test.ts with the test code] I've written the tests. Now I'll run the project's test command to verify them. - - - -npm run test - - - -(After verification passes) -All checks passed. This is a stable checkpoint. +[tool_call: run_shell_command for 'npm run test'] + + + +user: How do I update the user's profile information in this system? +model: +I'm not immediately sure how user profile information is updated. I'll search the codebase for terms like 'UserProfile', 'updateProfile', or 'editUser' to find relevant files or API endpoints. +[tool_call: search_file_content for pattern 'UserProfile|updateProfile|editUser'] +(After reviewing search results, assuming a relevant file like '/path/to/UserProfileService.java' was found) +Okay, \`/path/to/UserProfileService.java\` seems like the most relevant file. I'll read its content to understand how updates are handled. +[tool_call: read_file for absolute_path '/path/to/UserProfileService.java'] +(After reading the file) +It appears the \`updateUserProfile\` method in \`UserProfileService.java\` is responsible for this. It expects a user ID and a \`UserProfileDTO\` object... user: Where are all the 'app.config' files in this project? I need to check their settings. model: - - - -./**/app.config - - - +[tool_call: glob for pattern '**/app.config'] (Assuming GlobTool returns a list of paths like ['/path/to/moduleA/app.config', '/path/to/moduleB/app.config']) I found the following 'app.config' files: - /path/to/moduleA/app.config @@ -1547,56 +1169,24 @@ model: true user: list files here. -model: - - - -. - - - +model: [tool_call: list_directory for path '/path/to/project'] user: start the server implemented in server.js -model: - - - -node server.js & - - - +model: [tool_call: run_shell_command for 'node server.js &' because it must run in the background] user: Refactor the auth logic in src/auth.py to use the requests library instead of urllib. model: Okay, I can refactor 'src/auth.py'. First, I'll analyze the code and check for a test safety net before planning any changes. - - - -tests/test_auth.py - - - - - - -/path/to/tests/test_auth.py - - - +[tool_call: glob for path 'tests/test_auth.py'] +[tool_call: read_file for absolute_path '/path/to/tests/test_auth.py'] (After analysis) Great, 'tests/test_auth.py' exists and covers the core authentication logic. With this safety net in place, I can safely plan the refactoring. I'll also confirm 'requests' is a dependency. - - - -/path/to/requirements.txt - - - +[tool_call: read_file for absolute_path '/path/to/requirements.txt'] (After analysis) Looks good, 'requests' is available. @@ -1606,30 +1196,15 @@ Here's the plan: 3. Remove the old 'urllib' import. 4. Run the project's linter and tests to verify the changes. - - - -src/auth.py - - -(old code content) - - -(new code content) - - - +Should I proceed? +user: Yes +model: +[tool_call: write_file or replace to apply the refactoring to 'src/auth.py'] Refactoring complete. Running verification... - - -ruff check src/auth.py && pytest - - - +[tool_call: run_shell_command for 'ruff check src/auth.py && pytest'] (After verification passes) All checks passed. This is a stable checkpoint. -Would you like me to write a commit message and commit these changes? + @@ -1640,48 +1215,32 @@ model: I can run \`rm -rf /path/to/project/temp\`. This will permanently delete user: Write tests for someFile.ts model: -Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality. - - - -/path/to/someFile.ts - - - +Okay, I can write those tests. First, I'll read \`someFile.ts\` to understand its functionality. +[tool_call: read_file for absolute_path '/path/to/someFile.ts' or use glob to find \`someFile.ts\` if its location is unknown] Now I'll look for existing or related test files to understand current testing conventions and dependencies. - -read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory] - +[tool_call: read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory] (After reviewing existing tests and the file content) - - - -/path/to/someFile.test.ts - - - +[tool_call: write_file to create /path/to/someFile.test.ts with the test code] I've written the tests. Now I'll run the project's test command to verify them. - - - -npm run test - - - -(After verification passes) -All checks passed. This is a stable checkpoint. +[tool_call: run_shell_command for 'npm run test'] + + + +user: How do I update the user's profile information in this system? +model: +I'm not immediately sure how user profile information is updated. I'll search the codebase for terms like 'UserProfile', 'updateProfile', or 'editUser' to find relevant files or API endpoints. +[tool_call: search_file_content for pattern 'UserProfile|updateProfile|editUser'] +(After reviewing search results, assuming a relevant file like '/path/to/UserProfileService.java' was found) +Okay, \`/path/to/UserProfileService.java\` seems like the most relevant file. I'll read its content to understand how updates are handled. +[tool_call: read_file for absolute_path '/path/to/UserProfileService.java'] +(After reading the file) +It appears the \`updateUserProfile\` method in \`UserProfileService.java\` is responsible for this. It expects a user ID and a \`UserProfileDTO\` object... user: Where are all the 'app.config' files in this project? I need to check their settings. model: - - - -./**/app.config - - - +[tool_call: glob for pattern '**/app.config'] (Assuming GlobTool returns a list of paths like ['/path/to/moduleA/app.config', '/path/to/moduleB/app.config']) I found the following 'app.config' files: - /path/to/moduleA/app.config @@ -1786,56 +1345,24 @@ model: true user: list files here. -model: - - - -. - - - +model: [tool_call: list_directory for path '/path/to/project'] user: start the server implemented in server.js -model: - - - -node server.js & - - - +model: [tool_call: run_shell_command for 'node server.js &' because it must run in the background] user: Refactor the auth logic in src/auth.py to use the requests library instead of urllib. model: Okay, I can refactor 'src/auth.py'. First, I'll analyze the code and check for a test safety net before planning any changes. - - - -tests/test_auth.py - - - - - - -/path/to/tests/test_auth.py - - - +[tool_call: glob for path 'tests/test_auth.py'] +[tool_call: read_file for absolute_path '/path/to/tests/test_auth.py'] (After analysis) Great, 'tests/test_auth.py' exists and covers the core authentication logic. With this safety net in place, I can safely plan the refactoring. I'll also confirm 'requests' is a dependency. - - - -/path/to/requirements.txt - - - +[tool_call: read_file for absolute_path '/path/to/requirements.txt'] (After analysis) Looks good, 'requests' is available. @@ -1845,30 +1372,15 @@ Here's the plan: 3. Remove the old 'urllib' import. 4. Run the project's linter and tests to verify the changes. - - - -src/auth.py - - -(old code content) - - -(new code content) - - - +Should I proceed? +user: Yes +model: +[tool_call: write_file or replace to apply the refactoring to 'src/auth.py'] Refactoring complete. Running verification... - - -ruff check src/auth.py && pytest - - - +[tool_call: run_shell_command for 'ruff check src/auth.py && pytest'] (After verification passes) All checks passed. This is a stable checkpoint. -Would you like me to write a commit message and commit these changes? + @@ -1879,48 +1391,32 @@ model: I can run \`rm -rf /path/to/project/temp\`. This will permanently delete user: Write tests for someFile.ts model: -Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality. - - - -/path/to/someFile.ts - - - +Okay, I can write those tests. First, I'll read \`someFile.ts\` to understand its functionality. +[tool_call: read_file for absolute_path '/path/to/someFile.ts' or use glob to find \`someFile.ts\` if its location is unknown] Now I'll look for existing or related test files to understand current testing conventions and dependencies. - -read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory] - +[tool_call: read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory] (After reviewing existing tests and the file content) - - - -/path/to/someFile.test.ts - - - +[tool_call: write_file to create /path/to/someFile.test.ts with the test code] I've written the tests. Now I'll run the project's test command to verify them. - - - -npm run test - - - -(After verification passes) -All checks passed. This is a stable checkpoint. +[tool_call: run_shell_command for 'npm run test'] + + + +user: How do I update the user's profile information in this system? +model: +I'm not immediately sure how user profile information is updated. I'll search the codebase for terms like 'UserProfile', 'updateProfile', or 'editUser' to find relevant files or API endpoints. +[tool_call: search_file_content for pattern 'UserProfile|updateProfile|editUser'] +(After reviewing search results, assuming a relevant file like '/path/to/UserProfileService.java' was found) +Okay, \`/path/to/UserProfileService.java\` seems like the most relevant file. I'll read its content to understand how updates are handled. +[tool_call: read_file for absolute_path '/path/to/UserProfileService.java'] +(After reading the file) +It appears the \`updateUserProfile\` method in \`UserProfileService.java\` is responsible for this. It expects a user ID and a \`UserProfileDTO\` object... user: Where are all the 'app.config' files in this project? I need to check their settings. model: - - - -./**/app.config - - - +[tool_call: glob for pattern '**/app.config'] (Assuming GlobTool returns a list of paths like ['/path/to/moduleA/app.config', '/path/to/moduleB/app.config']) I found the following 'app.config' files: - /path/to/moduleA/app.config @@ -2025,56 +1521,24 @@ model: true user: list files here. -model: - - - -. - - - +model: [tool_call: list_directory for path '/path/to/project'] user: start the server implemented in server.js -model: - - - -node server.js & - - - +model: [tool_call: run_shell_command for 'node server.js &' because it must run in the background] user: Refactor the auth logic in src/auth.py to use the requests library instead of urllib. model: Okay, I can refactor 'src/auth.py'. First, I'll analyze the code and check for a test safety net before planning any changes. - - - -tests/test_auth.py - - - - - - -/path/to/tests/test_auth.py - - - +[tool_call: glob for path 'tests/test_auth.py'] +[tool_call: read_file for absolute_path '/path/to/tests/test_auth.py'] (After analysis) Great, 'tests/test_auth.py' exists and covers the core authentication logic. With this safety net in place, I can safely plan the refactoring. I'll also confirm 'requests' is a dependency. - - - -/path/to/requirements.txt - - - +[tool_call: read_file for absolute_path '/path/to/requirements.txt'] (After analysis) Looks good, 'requests' is available. @@ -2084,30 +1548,15 @@ Here's the plan: 3. Remove the old 'urllib' import. 4. Run the project's linter and tests to verify the changes. - - - -src/auth.py - - -(old code content) - - -(new code content) - - - +Should I proceed? +user: Yes +model: +[tool_call: write_file or replace to apply the refactoring to 'src/auth.py'] Refactoring complete. Running verification... - - -ruff check src/auth.py && pytest - - - +[tool_call: run_shell_command for 'ruff check src/auth.py && pytest'] (After verification passes) All checks passed. This is a stable checkpoint. -Would you like me to write a commit message and commit these changes? + @@ -2118,48 +1567,32 @@ model: I can run \`rm -rf /path/to/project/temp\`. This will permanently delete user: Write tests for someFile.ts model: -Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality. - - - -/path/to/someFile.ts - - - +Okay, I can write those tests. First, I'll read \`someFile.ts\` to understand its functionality. +[tool_call: read_file for absolute_path '/path/to/someFile.ts' or use glob to find \`someFile.ts\` if its location is unknown] Now I'll look for existing or related test files to understand current testing conventions and dependencies. - -read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory] - +[tool_call: read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory] (After reviewing existing tests and the file content) - - - -/path/to/someFile.test.ts - - - +[tool_call: write_file to create /path/to/someFile.test.ts with the test code] I've written the tests. Now I'll run the project's test command to verify them. - - - -npm run test - - - -(After verification passes) -All checks passed. This is a stable checkpoint. +[tool_call: run_shell_command for 'npm run test'] + + + +user: How do I update the user's profile information in this system? +model: +I'm not immediately sure how user profile information is updated. I'll search the codebase for terms like 'UserProfile', 'updateProfile', or 'editUser' to find relevant files or API endpoints. +[tool_call: search_file_content for pattern 'UserProfile|updateProfile|editUser'] +(After reviewing search results, assuming a relevant file like '/path/to/UserProfileService.java' was found) +Okay, \`/path/to/UserProfileService.java\` seems like the most relevant file. I'll read its content to understand how updates are handled. +[tool_call: read_file for absolute_path '/path/to/UserProfileService.java'] +(After reading the file) +It appears the \`updateUserProfile\` method in \`UserProfileService.java\` is responsible for this. It expects a user ID and a \`UserProfileDTO\` object... user: Where are all the 'app.config' files in this project? I need to check their settings. model: - - - -./**/app.config - - - +[tool_call: glob for pattern '**/app.config'] (Assuming GlobTool returns a list of paths like ['/path/to/moduleA/app.config', '/path/to/moduleB/app.config']) I found the following 'app.config' files: - /path/to/moduleA/app.config diff --git a/packages/core/src/core/client.test.ts b/packages/core/src/core/client.test.ts index 6fde7ea2f..25ea9bc1e 100644 --- a/packages/core/src/core/client.test.ts +++ b/packages/core/src/core/client.test.ts @@ -23,6 +23,7 @@ import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js'; import { FileDiscoveryService } from '../services/fileDiscoveryService.js'; import { setSimulate429 } from '../utils/testUtils.js'; import { tokenLimit } from './tokenLimits.js'; +import { ideContext } from '../ide/ideContext.js'; // --- Mocks --- const mockChatCreateFn = vi.fn(); @@ -71,6 +72,7 @@ vi.mock('../telemetry/index.js', () => ({ logApiResponse: vi.fn(), logApiError: vi.fn(), })); +vi.mock('../ide/ideContext.js'); describe('findIndexAfterFraction', () => { const history: Content[] = [ @@ -170,45 +172,46 @@ describe('Gemini Client (client.ts)', () => { getTool: vi.fn().mockReturnValue(null), }; const fileService = new FileDiscoveryService('/test/dir'); - const MockedConfig = vi.mocked(Config, true); const contentGeneratorConfig = { model: 'test-model', apiKey: 'test-key', vertexai: false, authType: AuthType.USE_GEMINI, }; - MockedConfig.mockImplementation(() => { - const mock = { - getContentGeneratorConfig: vi - .fn() - .mockReturnValue(contentGeneratorConfig), - getToolRegistry: vi.fn().mockResolvedValue(mockToolRegistry), - getModel: vi.fn().mockReturnValue('test-model'), - getEmbeddingModel: vi.fn().mockReturnValue('test-embedding-model'), - getApiKey: vi.fn().mockReturnValue('test-key'), - getVertexAI: vi.fn().mockReturnValue(false), - getUserAgent: vi.fn().mockReturnValue('test-agent'), - getUserMemory: vi.fn().mockReturnValue(''), - getFullContext: vi.fn().mockReturnValue(false), - getSessionId: vi.fn().mockReturnValue('test-session-id'), - getProxy: vi.fn().mockReturnValue(undefined), - getWorkingDir: vi.fn().mockReturnValue('/test/dir'), - getFileService: vi.fn().mockReturnValue(fileService), - getMaxSessionTurns: vi.fn().mockReturnValue(0), - getSessionTokenLimit: vi.fn().mockReturnValue(32000), - getMaxFolderItems: vi.fn().mockReturnValue(20), - getQuotaErrorOccurred: vi.fn().mockReturnValue(false), - setQuotaErrorOccurred: vi.fn(), - getNoBrowser: vi.fn().mockReturnValue(false), - getSystemPromptMappings: vi.fn().mockReturnValue(undefined), - }; - return mock as unknown as Config; - }); + const mockConfigObject = { + getContentGeneratorConfig: vi + .fn() + .mockReturnValue(contentGeneratorConfig), + getToolRegistry: vi.fn().mockResolvedValue(mockToolRegistry), + getModel: vi.fn().mockReturnValue('test-model'), + getEmbeddingModel: vi.fn().mockReturnValue('test-embedding-model'), + getApiKey: vi.fn().mockReturnValue('test-key'), + getVertexAI: vi.fn().mockReturnValue(false), + getUserAgent: vi.fn().mockReturnValue('test-agent'), + getUserMemory: vi.fn().mockReturnValue(''), + getFullContext: vi.fn().mockReturnValue(false), + getSessionId: vi.fn().mockReturnValue('test-session-id'), + getProxy: vi.fn().mockReturnValue(undefined), + getWorkingDir: vi.fn().mockReturnValue('/test/dir'), + getFileService: vi.fn().mockReturnValue(fileService), + getMaxSessionTurns: vi.fn().mockReturnValue(0), + getQuotaErrorOccurred: vi.fn().mockReturnValue(false), + setQuotaErrorOccurred: vi.fn(), + getNoBrowser: vi.fn().mockReturnValue(false), + getUsageStatisticsEnabled: vi.fn().mockReturnValue(true), + getIdeMode: vi.fn().mockReturnValue(false), + getGeminiClient: vi.fn(), + }; + const MockedConfig = vi.mocked(Config, true); + MockedConfig.mockImplementation( + () => mockConfigObject as unknown as Config, + ); // We can instantiate the client here since Config is mocked // and the constructor will use the mocked GoogleGenAI - const mockConfig = new Config({} as never); - client = new GeminiClient(mockConfig); + client = new GeminiClient(new Config({} as never)); + mockConfigObject.getGeminiClient.mockReturnValue(client); + await client.initialize(contentGeneratorConfig); }); @@ -642,6 +645,69 @@ describe('Gemini Client (client.ts)', () => { }); describe('sendMessageStream', () => { + it('should include IDE context when ideMode is enabled', async () => { + // Arrange + vi.mocked(ideContext.getOpenFilesContext).mockReturnValue({ + activeFile: '/path/to/active/file.ts', + selectedText: 'hello', + cursor: { line: 5, character: 10 }, + recentOpenFiles: [ + { filePath: '/path/to/recent/file1.ts', timestamp: Date.now() }, + { filePath: '/path/to/recent/file2.ts', timestamp: Date.now() }, + ], + }); + + vi.spyOn(client['config'], 'getIdeMode').mockReturnValue(true); + + const mockStream = (async function* () { + yield { type: 'content', value: 'Hello' }; + })(); + mockTurnRunFn.mockReturnValue(mockStream); + + const mockChat: Partial = { + addHistory: vi.fn(), + getHistory: vi.fn().mockReturnValue([]), + }; + client['chat'] = mockChat as GeminiChat; + + const mockGenerator: Partial = { + countTokens: vi.fn().mockResolvedValue({ totalTokens: 0 }), + generateContent: mockGenerateContentFn, + }; + client['contentGenerator'] = mockGenerator as ContentGenerator; + + const initialRequest = [{ text: 'Hi' }]; + + // Act + const stream = client.sendMessageStream( + initialRequest, + new AbortController().signal, + 'prompt-id-ide', + ); + for await (const _ of stream) { + // consume stream + } + + // Assert + expect(ideContext.getOpenFilesContext).toHaveBeenCalled(); + const expectedContext = ` +This is the file that the user was most recently looking at: +- Path: /path/to/active/file.ts +This is the cursor position in the file: +- Cursor Position: Line 5, Character 10 +This is the selected text in the active file: +- hello +Here are files the user has recently opened, with the most recent at the top: +- /path/to/recent/file1.ts +- /path/to/recent/file2.ts + `.trim(); + const expectedRequest = [{ text: expectedContext }, ...initialRequest]; + expect(mockTurnRunFn).toHaveBeenCalledWith( + expectedRequest, + expect.any(Object), + ); + }); + it('should return the turn instance after the stream is complete', async () => { // Arrange const mockStream = (async function* () { @@ -657,6 +723,7 @@ describe('Gemini Client (client.ts)', () => { const mockGenerator: Partial = { countTokens: vi.fn().mockResolvedValue({ totalTokens: 0 }), + generateContent: mockGenerateContentFn, }; client['contentGenerator'] = mockGenerator as ContentGenerator; @@ -706,6 +773,7 @@ describe('Gemini Client (client.ts)', () => { const mockGenerator: Partial = { countTokens: vi.fn().mockResolvedValue({ totalTokens: 0 }), + generateContent: mockGenerateContentFn, }; client['contentGenerator'] = mockGenerator as ContentGenerator; @@ -798,6 +866,7 @@ describe('Gemini Client (client.ts)', () => { const mockGenerator: Partial = { countTokens: vi.fn().mockResolvedValue({ totalTokens: 0 }), + generateContent: mockGenerateContentFn, }; client['contentGenerator'] = mockGenerator as ContentGenerator; @@ -859,6 +928,7 @@ describe('Gemini Client (client.ts)', () => { const mockGenerator: Partial = { countTokens: vi.fn().mockResolvedValue({ totalTokens: 0 }), + generateContent: mockGenerateContentFn, }; client['contentGenerator'] = mockGenerator as ContentGenerator; diff --git a/packages/core/src/core/client.ts b/packages/core/src/core/client.ts index 8cfd7b745..77683a455 100644 --- a/packages/core/src/core/client.ts +++ b/packages/core/src/core/client.ts @@ -22,6 +22,7 @@ import { ChatCompressionInfo, } from './turn.js'; import { Config } from '../config/config.js'; +import { UserTierId } from '../code_assist/types.js'; import { getCoreSystemPrompt, getCompressionPrompt } from './prompts.js'; import { ReadManyFilesTool } from '../tools/read-many-files.js'; import { getResponseText } from '../utils/generateContentResponseUtilities.js'; @@ -41,6 +42,9 @@ import { import { ProxyAgent, setGlobalDispatcher } from 'undici'; import { DEFAULT_GEMINI_FLASH_MODEL } from '../config/models.js'; import { LoopDetectionService } from '../services/loopDetectionService.js'; +import { ideContext } from '../ide/ideContext.js'; +import { logFlashDecidedToContinue } from '../telemetry/loggers.js'; +import { FlashDecidedToContinueEvent } from '../telemetry/types.js'; function isThinkingSupported(model: string) { if (model.startsWith('gemini-2.5')) return true; @@ -129,6 +133,10 @@ export class GeminiClient { return this.contentGenerator; } + getUserTier(): UserTierId | undefined { + return this.contentGenerator?.userTier; + } + async addHistory(content: Content) { this.getChat().addHistory(content); } @@ -152,6 +160,13 @@ export class GeminiClient { this.getChat().setHistory(history); } + async setTools(): Promise { + const toolRegistry = await this.config.getToolRegistry(); + const toolDeclarations = toolRegistry.getFunctionDeclarations(); + const tools: Tool[] = [{ functionDeclarations: toolDeclarations }]; + this.getChat().setTools(tools); + } + async resetChat(): Promise { this.chat = await this.startChat(); } @@ -167,10 +182,9 @@ export class GeminiClient { const platform = process.platform; const folderStructure = await getFolderStructure(cwd, { fileService: this.config.getFileService(), - maxItems: this.config.getMaxFolderItems(), }); const context = ` - This is the Qwen Code. We are setting up the context for our chat. + This is the Gemini CLI. We are setting up the context for our chat. Today's date is ${today}. My operating system is: ${platform} I'm currently working in the directory: ${cwd} @@ -221,7 +235,7 @@ export class GeminiClient { return initialParts; } - private async startChat(extraHistory?: Content[]): Promise { + async startChat(extraHistory?: Content[]): Promise { const envParts = await this.getEnvironment(); const toolRegistry = await this.config.getToolRegistry(); const toolDeclarations = toolRegistry.getFunctionDeclarations(); @@ -239,10 +253,7 @@ export class GeminiClient { ]; try { const userMemory = this.config.getUserMemory(); - const systemPromptMappings = this.config.getSystemPromptMappings(); - const systemInstruction = getCoreSystemPrompt(userMemory, { - systemPromptMappings, - }); + const systemInstruction = getCoreSystemPrompt(userMemory); const generateContentConfigWithThinking = isThinkingSupported( this.config.getModel(), ) @@ -282,7 +293,7 @@ export class GeminiClient { originalModel?: string, ): AsyncGenerator { if (this.lastPromptId !== prompt_id) { - this.loopDetector.reset(); + this.loopDetector.reset(prompt_id); this.lastPromptId = prompt_id; } this.sessionTurnCount++; @@ -308,49 +319,52 @@ export class GeminiClient { yield { type: GeminiEventType.ChatCompressed, value: compressed }; } - // Check session token limit after compression using accurate token counting - const sessionTokenLimit = this.config.getSessionTokenLimit(); - if (sessionTokenLimit > 0) { - // Get all the content that would be sent in an API call - const currentHistory = this.getChat().getHistory(true); - const userMemory = this.config.getUserMemory(); - const systemPrompt = getCoreSystemPrompt(userMemory); - const environment = await this.getEnvironment(); + if (this.config.getIdeMode()) { + const openFiles = ideContext.getOpenFilesContext(); + if (openFiles) { + const contextParts: string[] = []; + if (openFiles.activeFile) { + contextParts.push( + `This is the file that the user was most recently looking at:\n- Path: ${openFiles.activeFile}`, + ); + if (openFiles.cursor) { + contextParts.push( + `This is the cursor position in the file:\n- Cursor Position: Line ${openFiles.cursor.line}, Character ${openFiles.cursor.character}`, + ); + } + if (openFiles.selectedText) { + contextParts.push( + `This is the selected text in the active file:\n- ${openFiles.selectedText}`, + ); + } + } - // Create a mock request content to count total tokens - const mockRequestContent = [ - { - role: 'system' as const, - parts: [{ text: systemPrompt }, ...environment], - }, - ...currentHistory, - ]; + if (openFiles.recentOpenFiles && openFiles.recentOpenFiles.length > 0) { + const recentFiles = openFiles.recentOpenFiles + .map((file) => `- ${file.filePath}`) + .join('\n'); + contextParts.push( + `Here are files the user has recently opened, with the most recent at the top:\n${recentFiles}`, + ); + } - // Use the improved countTokens method for accurate counting - const { totalTokens: totalRequestTokens } = - await this.getContentGenerator().countTokens({ - model: this.config.getModel(), - contents: mockRequestContent, - }); - - if ( - totalRequestTokens !== undefined && - totalRequestTokens > sessionTokenLimit - ) { - yield { - type: GeminiEventType.SessionTokenLimitExceeded, - value: { - currentTokens: totalRequestTokens, - limit: sessionTokenLimit, - message: - `Session token limit exceeded: ${totalRequestTokens} tokens > ${sessionTokenLimit} limit. ` + - 'Please start a new session or increase the sessionTokenLimit in your settings.json.', - }, - }; - return new Turn(this.getChat(), prompt_id); + if (contextParts.length > 0) { + request = [ + { text: contextParts.join('\n') }, + ...(Array.isArray(request) ? request : [request]), + ]; + } } } + const turn = new Turn(this.getChat(), prompt_id); + + const loopDetected = await this.loopDetector.turnStarted(signal); + if (loopDetected) { + yield { type: GeminiEventType.LoopDetected }; + return turn; + } + const resultStream = turn.run(request, signal); for await (const event of resultStream) { if (this.loopDetector.addAndCheck(event)) { @@ -374,6 +388,10 @@ export class GeminiClient { signal, ); if (nextSpeakerCheck?.next_speaker === 'model') { + logFlashDecidedToContinue( + this.config, + new FlashDecidedToContinueEvent(prompt_id), + ); const nextRequest = [{ text: 'Please continue.' }]; // This recursive call's events will be yielded out, but the final // turn object will be from the top-level call. @@ -401,10 +419,7 @@ export class GeminiClient { model || this.config.getModel() || DEFAULT_GEMINI_FLASH_MODEL; try { const userMemory = this.config.getUserMemory(); - const systemPromptMappings = this.config.getSystemPromptMappings(); - const systemInstruction = getCoreSystemPrompt(userMemory, { - systemPromptMappings, - }); + const systemInstruction = getCoreSystemPrompt(userMemory); const requestConfig = { abortSignal, ...this.generateContentConfig, @@ -443,30 +458,7 @@ export class GeminiClient { throw error; } try { - // Try to extract JSON from various formats - const extractors = [ - // Match ```json ... ``` or ``` ... ``` blocks - /```(?:json)?\s*\n?([\s\S]*?)\n?```/, - // Match inline code blocks `{...}` - /`(\{[\s\S]*?\})`/, - // Match raw JSON objects or arrays - /(\{[\s\S]*\}|\[[\s\S]*\])/, - ]; - - for (const regex of extractors) { - const match = text.match(regex); - if (match && match[1]) { - try { - return JSON.parse(match[1].trim()); - } catch { - // Continue to next pattern if parsing fails - continue; - } - } - } - - // If no patterns matched, try parsing the entire text - return JSON.parse(text.trim()); + return JSON.parse(text); } catch (parseError) { await reportError( parseError, @@ -520,10 +512,7 @@ export class GeminiClient { try { const userMemory = this.config.getUserMemory(); - const systemPromptMappings = this.config.getSystemPromptMappings(); - const systemInstruction = getCoreSystemPrompt(userMemory, { - systemPromptMappings, - }); + const systemInstruction = getCoreSystemPrompt(userMemory); const requestConfig = { abortSignal, @@ -689,8 +678,8 @@ export class GeminiClient { } /** - * Handles fallback to Flash model when persistent 429 errors occur for OAuth users. - * Uses a fallback handler if provided by the config, otherwise returns null. + * Handles falling back to Flash model when persistent 429 errors occur for OAuth users. + * Uses a fallback handler if provided by the config; otherwise, returns null. */ private async handleFlashFallback( authType?: string, diff --git a/packages/core/src/core/contentGenerator.test.ts b/packages/core/src/core/contentGenerator.test.ts index 92144aa45..78eee3860 100644 --- a/packages/core/src/core/contentGenerator.test.ts +++ b/packages/core/src/core/contentGenerator.test.ts @@ -64,12 +64,19 @@ describe('createContentGenerator', () => { describe('createContentGeneratorConfig', () => { const originalEnv = process.env; + const mockConfig = { + getModel: vi.fn().mockReturnValue('gemini-pro'), + setModel: vi.fn(), + flashFallbackHandler: vi.fn(), + getProxy: vi.fn(), + } as unknown as Config; beforeEach(() => { // Reset modules to re-evaluate imports and environment variables vi.resetModules(); // Restore process.env before each test process.env = { ...originalEnv }; + vi.clearAllMocks(); }); afterAll(() => { @@ -80,7 +87,7 @@ describe('createContentGeneratorConfig', () => { it('should configure for Gemini using GEMINI_API_KEY when set', async () => { process.env.GEMINI_API_KEY = 'env-gemini-key'; const config = await createContentGeneratorConfig( - undefined, + mockConfig, AuthType.USE_GEMINI, ); expect(config.apiKey).toBe('env-gemini-key'); @@ -90,7 +97,7 @@ describe('createContentGeneratorConfig', () => { it('should not configure for Gemini if GEMINI_API_KEY is empty', async () => { process.env.GEMINI_API_KEY = ''; const config = await createContentGeneratorConfig( - undefined, + mockConfig, AuthType.USE_GEMINI, ); expect(config.apiKey).toBeUndefined(); @@ -100,7 +107,7 @@ describe('createContentGeneratorConfig', () => { it('should configure for Vertex AI using GOOGLE_API_KEY when set', async () => { process.env.GOOGLE_API_KEY = 'env-google-key'; const config = await createContentGeneratorConfig( - undefined, + mockConfig, AuthType.USE_VERTEX_AI, ); expect(config.apiKey).toBe('env-google-key'); @@ -111,7 +118,7 @@ describe('createContentGeneratorConfig', () => { process.env.GOOGLE_CLOUD_PROJECT = 'env-gcp-project'; process.env.GOOGLE_CLOUD_LOCATION = 'env-gcp-location'; const config = await createContentGeneratorConfig( - undefined, + mockConfig, AuthType.USE_VERTEX_AI, ); expect(config.vertexai).toBe(true); @@ -123,7 +130,7 @@ describe('createContentGeneratorConfig', () => { process.env.GOOGLE_CLOUD_PROJECT = ''; process.env.GOOGLE_CLOUD_LOCATION = ''; const config = await createContentGeneratorConfig( - undefined, + mockConfig, AuthType.USE_VERTEX_AI, ); expect(config.apiKey).toBeUndefined(); diff --git a/packages/core/src/core/contentGenerator.ts b/packages/core/src/core/contentGenerator.ts index be1fe54ad..548eb1627 100644 --- a/packages/core/src/core/contentGenerator.ts +++ b/packages/core/src/core/contentGenerator.ts @@ -35,7 +35,7 @@ export interface ContentGenerator { embedContent(request: EmbedContentParameters): Promise; - getTier?(): Promise; + userTier?: UserTierId; } export enum AuthType { @@ -65,24 +65,26 @@ export type ContentGeneratorConfig = { temperature?: number; max_tokens?: number; }; + proxy?: string | undefined; }; -export async function createContentGeneratorConfig( - model: string | undefined, +export function createContentGeneratorConfig( + config: Config, authType: AuthType | undefined, -): Promise { +): ContentGeneratorConfig { const geminiApiKey = process.env.GEMINI_API_KEY || undefined; const googleApiKey = process.env.GOOGLE_API_KEY || undefined; const googleCloudProject = process.env.GOOGLE_CLOUD_PROJECT || undefined; const googleCloudLocation = process.env.GOOGLE_CLOUD_LOCATION || undefined; const openaiApiKey = process.env.OPENAI_API_KEY; - // Use runtime model from config if available, otherwise fallback to parameter or default - const effectiveModel = model || DEFAULT_GEMINI_MODEL; + // Use runtime model from config if available; otherwise, fall back to parameter or default + const effectiveModel = config.getModel() || DEFAULT_GEMINI_MODEL; const contentGeneratorConfig: ContentGeneratorConfig = { model: effectiveModel, authType, + proxy: config?.getProxy(), }; // If we are using Google auth or we are in Cloud Shell, there is nothing else to validate for now @@ -96,9 +98,10 @@ export async function createContentGeneratorConfig( if (authType === AuthType.USE_GEMINI && geminiApiKey) { contentGeneratorConfig.apiKey = geminiApiKey; contentGeneratorConfig.vertexai = false; - contentGeneratorConfig.model = await getEffectiveModel( + getEffectiveModel( contentGeneratorConfig.apiKey, contentGeneratorConfig.model, + contentGeneratorConfig.proxy, ); return contentGeneratorConfig; diff --git a/packages/core/src/core/coreToolScheduler.test.ts b/packages/core/src/core/coreToolScheduler.test.ts index 0b2c51245..7b6a130c1 100644 --- a/packages/core/src/core/coreToolScheduler.test.ts +++ b/packages/core/src/core/coreToolScheduler.test.ts @@ -19,6 +19,7 @@ import { ToolConfirmationPayload, ToolResult, Config, + Icon, } from '../index.js'; import { Part, PartListUnion } from '@google/genai'; @@ -29,7 +30,7 @@ class MockTool extends BaseTool, ToolResult> { executeFn = vi.fn(); constructor(name = 'mockTool') { - super(name, name, 'A mock tool', {}); + super(name, name, 'A mock tool', Icon.Hammer, {}); } async shouldConfirmExecute( @@ -91,6 +92,8 @@ class MockModifiableTool title: 'Confirm Mock Tool', fileName: 'test.txt', fileDiff: 'diff', + originalContent: 'originalContent', + newContent: 'newContent', onConfirm: async () => {}, }; } @@ -404,3 +407,123 @@ describe('convertToFunctionResponse', () => { }); }); }); + +describe('CoreToolScheduler edit cancellation', () => { + it('should preserve diff when an edit is cancelled', async () => { + class MockEditTool extends BaseTool, ToolResult> { + constructor() { + super( + 'mockEditTool', + 'mockEditTool', + 'A mock edit tool', + Icon.Pencil, + {}, + ); + } + + async shouldConfirmExecute( + _params: Record, + _abortSignal: AbortSignal, + ): Promise { + return { + type: 'edit', + title: 'Confirm Edit', + fileName: 'test.txt', + fileDiff: + '--- test.txt\n+++ test.txt\n@@ -1,1 +1,1 @@\n-old content\n+new content', + originalContent: 'old content', + newContent: 'new content', + onConfirm: async () => {}, + }; + } + + async execute( + _params: Record, + _abortSignal: AbortSignal, + ): Promise { + return { + llmContent: 'Edited successfully', + returnDisplay: 'Edited successfully', + }; + } + } + + const mockEditTool = new MockEditTool(); + const toolRegistry = { + getTool: () => mockEditTool, + getFunctionDeclarations: () => [], + tools: new Map(), + discovery: {} as any, + registerTool: () => {}, + getToolByName: () => mockEditTool, + getToolByDisplayName: () => mockEditTool, + getTools: () => [], + discoverTools: async () => {}, + getAllTools: () => [], + getToolsByServer: () => [], + }; + + const onAllToolCallsComplete = vi.fn(); + const onToolCallsUpdate = vi.fn(); + + const mockConfig = { + getSessionId: () => 'test-session-id', + getUsageStatisticsEnabled: () => true, + getDebugMode: () => false, + } as unknown as Config; + + const scheduler = new CoreToolScheduler({ + config: mockConfig, + toolRegistry: Promise.resolve(toolRegistry as any), + onAllToolCallsComplete, + onToolCallsUpdate, + getPreferredEditor: () => 'vscode', + }); + + const abortController = new AbortController(); + const request = { + callId: '1', + name: 'mockEditTool', + args: {}, + isClientInitiated: false, + prompt_id: 'prompt-id-1', + }; + + await scheduler.schedule([request], abortController.signal); + + // Wait for the tool to reach awaiting_approval state + const awaitingCall = onToolCallsUpdate.mock.calls.find( + (call) => call[0][0].status === 'awaiting_approval', + )?.[0][0]; + + expect(awaitingCall).toBeDefined(); + + // Cancel the edit + const confirmationDetails = await mockEditTool.shouldConfirmExecute( + {}, + abortController.signal, + ); + if (confirmationDetails) { + await scheduler.handleConfirmationResponse( + '1', + confirmationDetails.onConfirm, + ToolConfirmationOutcome.Cancel, + abortController.signal, + ); + } + + expect(onAllToolCallsComplete).toHaveBeenCalled(); + const completedCalls = onAllToolCallsComplete.mock + .calls[0][0] as ToolCall[]; + + expect(completedCalls[0].status).toBe('cancelled'); + + // Check that the diff is preserved + const cancelledCall = completedCalls[0] as any; + expect(cancelledCall.response.resultDisplay).toBeDefined(); + expect(cancelledCall.response.resultDisplay.fileDiff).toBe( + '--- test.txt\n+++ test.txt\n@@ -1,1 +1,1 @@\n-old content\n+new content', + ); + expect(cancelledCall.response.resultDisplay.fileName).toBe('test.txt'); + }); +}); diff --git a/packages/core/src/core/coreToolScheduler.ts b/packages/core/src/core/coreToolScheduler.ts index 8f9ec1e20..0d7d59237 100644 --- a/packages/core/src/core/coreToolScheduler.ts +++ b/packages/core/src/core/coreToolScheduler.ts @@ -11,6 +11,7 @@ import { Tool, ToolCallConfirmationDetails, ToolResult, + ToolResultDisplay, ToolRegistry, ApprovalMode, EditorType, @@ -335,6 +336,22 @@ export class CoreToolScheduler { const durationMs = existingStartTime ? Date.now() - existingStartTime : undefined; + + // Preserve diff for cancelled edit operations + let resultDisplay: ToolResultDisplay | undefined = undefined; + if (currentCall.status === 'awaiting_approval') { + const waitingCall = currentCall as WaitingToolCall; + if (waitingCall.confirmationDetails.type === 'edit') { + resultDisplay = { + fileDiff: waitingCall.confirmationDetails.fileDiff, + fileName: waitingCall.confirmationDetails.fileName, + originalContent: + waitingCall.confirmationDetails.originalContent, + newContent: waitingCall.confirmationDetails.newContent, + }; + } + } + return { request: currentCall.request, tool: toolInstance, @@ -350,7 +367,7 @@ export class CoreToolScheduler { }, }, }, - resultDisplay: undefined, + resultDisplay, error: undefined, }, durationMs, diff --git a/packages/core/src/core/geminiChat.ts b/packages/core/src/core/geminiChat.ts index f93470ddc..4c3cd4c88 100644 --- a/packages/core/src/core/geminiChat.ts +++ b/packages/core/src/core/geminiChat.ts @@ -15,6 +15,7 @@ import { createUserContent, Part, GenerateContentResponseUsageMetadata, + Tool, } from '@google/genai'; import { retryWithBackoff } from '../utils/retry.js'; import { isFunctionResponse } from '../utils/messageInspectors.js'; @@ -25,10 +26,6 @@ import { logApiResponse, logApiError, } from '../telemetry/loggers.js'; -import { - getStructuredResponse, - getStructuredResponseFromParts, -} from '../utils/generateContentResponseUtilities.js'; import { ApiErrorEvent, ApiRequestEvent, @@ -141,11 +138,7 @@ export class GeminiChat { } private _getRequestTextFromContents(contents: Content[]): string { - return contents - .flatMap((content) => content.parts ?? []) - .map((part) => part.text) - .filter(Boolean) - .join(''); + return JSON.stringify(contents); } private async _logApiRequest( @@ -201,8 +194,8 @@ export class GeminiChat { } /** - * Handles fallback to Flash model when persistent 429 errors occur for OAuth users. - * Uses a fallback handler if provided by the config, otherwise returns null. + * Handles falling back to Flash model when persistent 429 errors occur for OAuth users. + * Uses a fallback handler if provided by the config; otherwise, returns null. */ private async handleFlashFallback( authType?: string, @@ -317,7 +310,7 @@ export class GeminiChat { durationMs, prompt_id, response.usageMetadata, - getStructuredResponse(response), + JSON.stringify(response), ); this.sendPromise = (async () => { @@ -498,6 +491,10 @@ export class GeminiChat { this.history = history; } + setTools(tools: Tool[]): void { + this.generationConfig.tools = tools; + } + getFinalUsageMetadata( chunks: GenerateContentResponse[], ): GenerateContentResponseUsageMetadata | undefined { @@ -549,12 +546,11 @@ export class GeminiChat { allParts.push(...content.parts); } } - const fullText = getStructuredResponseFromParts(allParts); await this._logApiResponse( durationMs, prompt_id, this.getFinalUsageMetadata(chunks), - fullText, + JSON.stringify(chunks), ); } this.recordHistory(inputContent, outputContent); diff --git a/packages/core/src/core/geminiRequest.ts b/packages/core/src/core/geminiRequest.ts index e85bd51ea..f3c52fbb6 100644 --- a/packages/core/src/core/geminiRequest.ts +++ b/packages/core/src/core/geminiRequest.ts @@ -4,7 +4,8 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { type PartListUnion, type Part } from '@google/genai'; +import { type PartListUnion } from '@google/genai'; +import { partToString } from '../utils/partUtils.js'; /** * Represents a request to be sent to the Gemini API. @@ -14,58 +15,5 @@ import { type PartListUnion, type Part } from '@google/genai'; export type GeminiCodeRequest = PartListUnion; export function partListUnionToString(value: PartListUnion): string { - if (typeof value === 'string') { - return value; - } - - if (Array.isArray(value)) { - return value.map(partListUnionToString).join(''); - } - - // Cast to Part, assuming it might contain project-specific fields - const part = value as Part & { - videoMetadata?: unknown; - thought?: string; - codeExecutionResult?: unknown; - executableCode?: unknown; - }; - - if (part.videoMetadata !== undefined) { - return `[Video Metadata]`; - } - - if (part.thought !== undefined) { - return `[Thought: ${part.thought}]`; - } - - if (part.codeExecutionResult !== undefined) { - return `[Code Execution Result]`; - } - - if (part.executableCode !== undefined) { - return `[Executable Code]`; - } - - // Standard Part fields - if (part.fileData !== undefined) { - return `[File Data]`; - } - - if (part.functionCall !== undefined) { - return `[Function Call: ${part.functionCall.name}]`; - } - - if (part.functionResponse !== undefined) { - return `[Function Response: ${part.functionResponse.name}]`; - } - - if (part.inlineData !== undefined) { - return `<${part.inlineData.mimeType}>`; - } - - if (part.text !== undefined) { - return part.text; - } - - return ''; + return partToString(value, { verbose: true }); } diff --git a/packages/core/src/core/logger.test.ts b/packages/core/src/core/logger.test.ts index 862f31780..c64f4b6d2 100644 --- a/packages/core/src/core/logger.test.ts +++ b/packages/core/src/core/logger.test.ts @@ -453,7 +453,7 @@ describe('Logger', () => { }); it('should return an empty array if a tagged checkpoint file does not exist', async () => { - const loaded = await logger.loadCheckpoint('non-existent-tag'); + const loaded = await logger.loadCheckpoint('nonexistent-tag'); expect(loaded).toEqual([]); }); diff --git a/packages/core/src/core/logger.ts b/packages/core/src/core/logger.ts index 2aecbc317..c9124ac11 100644 --- a/packages/core/src/core/logger.ts +++ b/packages/core/src/core/logger.ts @@ -238,6 +238,12 @@ export class Logger { if (!this.qwenDir) { throw new Error('Checkpoint file path not set.'); } + // Sanitize tag to prevent directory traversal attacks + tag = tag.replace(/[^a-zA-Z0-9-_]/g, ''); + if (!tag) { + console.error('Sanitized tag is empty setting to "default".'); + tag = 'default'; + } return path.join(this.qwenDir, `checkpoint-${tag}.json`); } diff --git a/packages/core/src/core/modelCheck.ts b/packages/core/src/core/modelCheck.ts index cc63d7d50..25d86993a 100644 --- a/packages/core/src/core/modelCheck.ts +++ b/packages/core/src/core/modelCheck.ts @@ -4,7 +4,11 @@ * SPDX-License-Identifier: Apache-2.0 */ -// 移除未使用的导入 +import { setGlobalDispatcher, ProxyAgent } from 'undici'; +import { + DEFAULT_GEMINI_MODEL, + DEFAULT_GEMINI_FLASH_MODEL, +} from '../config/models.js'; /** * Checks if the default "pro" model is rate-limited and returns a fallback "flash" @@ -15,9 +19,58 @@ * and the original model if a switch happened. */ export async function getEffectiveModel( - _apiKey: string, + apiKey: string, currentConfiguredModel: string, + proxy?: string, ): Promise { - // Disable Google API Model Check - return currentConfiguredModel; + if (currentConfiguredModel !== DEFAULT_GEMINI_MODEL) { + // Only check if the user is trying to use the specific pro model we want to fallback from. + return currentConfiguredModel; + } + + const modelToTest = DEFAULT_GEMINI_MODEL; + const fallbackModel = DEFAULT_GEMINI_FLASH_MODEL; + const endpoint = `https://generativelanguage.googleapis.com/v1beta/models/${modelToTest}:generateContent`; + const body = JSON.stringify({ + contents: [{ parts: [{ text: 'test' }] }], + generationConfig: { + maxOutputTokens: 1, + temperature: 0, + topK: 1, + thinkingConfig: { thinkingBudget: 128, includeThoughts: false }, + }, + }); + + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), 2000); // 500ms timeout for the request + + try { + if (proxy) { + setGlobalDispatcher(new ProxyAgent(proxy)); + } + const response = await fetch(endpoint, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + 'x-goog-api-key': apiKey, + }, + body, + signal: controller.signal, + }); + + clearTimeout(timeoutId); + + if (response.status === 429) { + console.log( + `[INFO] Your configured model (${modelToTest}) was temporarily unavailable. Switched to ${fallbackModel} for this session.`, + ); + return fallbackModel; + } + // For any other case (success, other error codes), we stick to the original model. + return currentConfiguredModel; + } catch (_error) { + clearTimeout(timeoutId); + // On timeout or any other fetch error, stick to the original model. + return currentConfiguredModel; + } } diff --git a/packages/core/src/core/nonInteractiveToolExecutor.test.ts b/packages/core/src/core/nonInteractiveToolExecutor.test.ts index 14b048b42..1bbb9209c 100644 --- a/packages/core/src/core/nonInteractiveToolExecutor.test.ts +++ b/packages/core/src/core/nonInteractiveToolExecutor.test.ts @@ -13,6 +13,7 @@ import { Tool, ToolCallConfirmationDetails, Config, + Icon, } from '../index.js'; import { Part, Type } from '@google/genai'; @@ -32,6 +33,7 @@ describe('executeToolCall', () => { name: 'testTool', displayName: 'Test Tool', description: 'A tool for testing', + icon: Icon.Hammer, schema: { name: 'testTool', description: 'A tool for testing', @@ -51,6 +53,7 @@ describe('executeToolCall', () => { isOutputMarkdown: false, canUpdateOutput: false, getDescription: vi.fn(), + toolLocations: vi.fn(() => []), }; mockToolRegistry = { @@ -103,7 +106,7 @@ describe('executeToolCall', () => { it('should return an error if tool is not found', async () => { const request: ToolCallRequestInfo = { callId: 'call2', - name: 'nonExistentTool', + name: 'nonexistentTool', args: {}, isClientInitiated: false, prompt_id: 'prompt-id-2', @@ -120,17 +123,17 @@ describe('executeToolCall', () => { expect(response.callId).toBe('call2'); expect(response.error).toBeInstanceOf(Error); expect(response.error?.message).toBe( - 'Tool "nonExistentTool" not found in registry.', + 'Tool "nonexistentTool" not found in registry.', ); expect(response.resultDisplay).toBe( - 'Tool "nonExistentTool" not found in registry.', + 'Tool "nonexistentTool" not found in registry.', ); expect(response.responseParts).toEqual([ { functionResponse: { - name: 'nonExistentTool', + name: 'nonexistentTool', id: 'call2', - response: { error: 'Tool "nonExistentTool" not found in registry.' }, + response: { error: 'Tool "nonexistentTool" not found in registry.' }, }, }, ]); diff --git a/packages/core/src/core/openaiContentGenerator.ts b/packages/core/src/core/openaiContentGenerator.ts index 2df0c2415..91d798c17 100644 --- a/packages/core/src/core/openaiContentGenerator.ts +++ b/packages/core/src/core/openaiContentGenerator.ts @@ -22,10 +22,6 @@ import { } from '@google/genai'; import { ContentGenerator } from './contentGenerator.js'; import OpenAI from 'openai'; -import type { - ChatCompletion, - ChatCompletionChunk, -} from 'openai/resources/chat/index.js'; import { logApiResponse } from '../telemetry/loggers.js'; import { ApiResponseEvent } from '../telemetry/types.js'; import { Config } from '../config/config.js'; @@ -198,7 +194,7 @@ export class OpenAIContentGenerator implements ContentGenerator { // console.log('createParams', createParams); const completion = (await this.client.chat.completions.create( createParams, - )) as ChatCompletion; + )) as OpenAI.Chat.ChatCompletion; const response = this.convertToGeminiFormat(completion); const durationMs = Date.now() - startTime; @@ -326,7 +322,7 @@ export class OpenAIContentGenerator implements ContentGenerator { const stream = (await this.client.chat.completions.create( createParams, - )) as AsyncIterable; + )) as AsyncIterable; const originalStream = this.streamGenerator(stream); @@ -508,7 +504,7 @@ export class OpenAIContentGenerator implements ContentGenerator { } private async *streamGenerator( - stream: AsyncIterable, + stream: AsyncIterable, ): AsyncGenerator { // Reset the accumulator for each new stream this.streamingToolCalls.clear(); @@ -1120,7 +1116,7 @@ export class OpenAIContentGenerator implements ContentGenerator { } private convertToGeminiFormat( - openaiResponse: ChatCompletion, + openaiResponse: OpenAI.Chat.ChatCompletion, ): GenerateContentResponse { const choice = openaiResponse.choices[0]; const response = new GenerateContentResponse(); @@ -1207,7 +1203,7 @@ export class OpenAIContentGenerator implements ContentGenerator { } private convertStreamChunkToGeminiFormat( - chunk: ChatCompletionChunk, + chunk: OpenAI.Chat.ChatCompletionChunk, ): GenerateContentResponse { const choice = chunk.choices?.[0]; const response = new GenerateContentResponse(); diff --git a/packages/core/src/core/prompts.test.ts b/packages/core/src/core/prompts.test.ts index 66c206aa6..062601861 100644 --- a/packages/core/src/core/prompts.test.ts +++ b/packages/core/src/core/prompts.test.ts @@ -4,9 +4,13 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { describe, it, expect, vi } from 'vitest'; +import { describe, it, expect, vi, beforeEach } from 'vitest'; import { getCoreSystemPrompt } from './prompts.js'; import { isGitRepository } from '../utils/gitUtils.js'; +import fs from 'node:fs'; +import os from 'node:os'; +import path from 'node:path'; +import { GEMINI_CONFIG_DIR } from '../tools/memoryTool.js'; // Mock tool names if they are dynamically generated or complex vi.mock('../tools/ls', () => ({ LSTool: { Name: 'list_directory' } })); @@ -26,8 +30,15 @@ vi.mock('../tools/write-file', () => ({ vi.mock('../utils/gitUtils', () => ({ isGitRepository: vi.fn(), })); +vi.mock('node:fs'); describe('Core System Prompt (prompts.ts)', () => { + beforeEach(() => { + vi.resetAllMocks(); + vi.stubEnv('GEMINI_SYSTEM_MD', undefined); + vi.stubEnv('GEMINI_WRITE_SYSTEM_MD', undefined); + }); + it('should return the base prompt when no userMemory is provided', () => { vi.stubEnv('SANDBOX', undefined); const prompt = getCoreSystemPrompt(); @@ -67,7 +78,7 @@ describe('Core System Prompt (prompts.ts)', () => { vi.stubEnv('SANDBOX', 'true'); // Generic sandbox value const prompt = getCoreSystemPrompt(); expect(prompt).toContain('# Sandbox'); - expect(prompt).not.toContain('# MacOS Seatbelt'); + expect(prompt).not.toContain('# macOS Seatbelt'); expect(prompt).not.toContain('# Outside of Sandbox'); expect(prompt).toMatchSnapshot(); }); @@ -75,7 +86,7 @@ describe('Core System Prompt (prompts.ts)', () => { it('should include seatbelt-specific instructions when SANDBOX env var is "sandbox-exec"', () => { vi.stubEnv('SANDBOX', 'sandbox-exec'); const prompt = getCoreSystemPrompt(); - expect(prompt).toContain('# MacOS Seatbelt'); + expect(prompt).toContain('# macOS Seatbelt'); expect(prompt).not.toContain('# Sandbox'); expect(prompt).not.toContain('# Outside of Sandbox'); expect(prompt).toMatchSnapshot(); @@ -86,7 +97,7 @@ describe('Core System Prompt (prompts.ts)', () => { const prompt = getCoreSystemPrompt(); expect(prompt).toContain('# Outside of Sandbox'); expect(prompt).not.toContain('# Sandbox'); - expect(prompt).not.toContain('# MacOS Seatbelt'); + expect(prompt).not.toContain('# macOS Seatbelt'); expect(prompt).toMatchSnapshot(); }); @@ -105,97 +116,157 @@ describe('Core System Prompt (prompts.ts)', () => { expect(prompt).not.toContain('# Git Repository'); expect(prompt).toMatchSnapshot(); }); -}); -describe('URL matching with trailing slash compatibility', () => { - it('should match URLs with and without trailing slash', () => { - const config = { - systemPromptMappings: [ - { - baseUrls: ['https://api.example.com'], - modelNames: ['gpt-4'], - template: 'Custom template for example.com', - }, - { - baseUrls: ['https://api.openai.com/'], - modelNames: ['gpt-3.5-turbo'], - template: 'Custom template for openai.com', - }, - ], - }; + describe('GEMINI_SYSTEM_MD environment variable', () => { + it('should use default prompt when GEMINI_SYSTEM_MD is "false"', () => { + vi.stubEnv('GEMINI_SYSTEM_MD', 'false'); + const prompt = getCoreSystemPrompt(); + expect(fs.readFileSync).not.toHaveBeenCalled(); + expect(prompt).not.toContain('custom system prompt'); + }); - // Simulate environment variables - const originalEnv = process.env; + it('should use default prompt when GEMINI_SYSTEM_MD is "0"', () => { + vi.stubEnv('GEMINI_SYSTEM_MD', '0'); + const prompt = getCoreSystemPrompt(); + expect(fs.readFileSync).not.toHaveBeenCalled(); + expect(prompt).not.toContain('custom system prompt'); + }); - // Test case 1: No trailing slash in config, actual URL has trailing slash - process.env = { - ...originalEnv, - OPENAI_BASE_URL: 'https://api.example.com/', - OPENAI_MODEL: 'gpt-4', - }; + it('should throw error if GEMINI_SYSTEM_MD points to a non-existent file', () => { + const customPath = '/non/existent/path/system.md'; + vi.stubEnv('GEMINI_SYSTEM_MD', customPath); + vi.mocked(fs.existsSync).mockReturnValue(false); + expect(() => getCoreSystemPrompt()).toThrow( + `missing system prompt file '${path.resolve(customPath)}'`, + ); + }); - const result1 = getCoreSystemPrompt(undefined, config); - expect(result1).toContain('Custom template for example.com'); + it('should read from default path when GEMINI_SYSTEM_MD is "true"', () => { + const defaultPath = path.resolve( + path.join(GEMINI_CONFIG_DIR, 'system.md'), + ); + vi.stubEnv('GEMINI_SYSTEM_MD', 'true'); + vi.mocked(fs.existsSync).mockReturnValue(true); + vi.mocked(fs.readFileSync).mockReturnValue('custom system prompt'); - // Test case 2: Config has trailing slash, actual URL has no trailing slash - process.env = { - ...originalEnv, - OPENAI_BASE_URL: 'https://api.openai.com', - OPENAI_MODEL: 'gpt-3.5-turbo', - }; + const prompt = getCoreSystemPrompt(); + expect(fs.readFileSync).toHaveBeenCalledWith(defaultPath, 'utf8'); + expect(prompt).toBe('custom system prompt'); + }); - const result2 = getCoreSystemPrompt(undefined, config); - expect(result2).toContain('Custom template for openai.com'); + it('should read from default path when GEMINI_SYSTEM_MD is "1"', () => { + const defaultPath = path.resolve( + path.join(GEMINI_CONFIG_DIR, 'system.md'), + ); + vi.stubEnv('GEMINI_SYSTEM_MD', '1'); + vi.mocked(fs.existsSync).mockReturnValue(true); + vi.mocked(fs.readFileSync).mockReturnValue('custom system prompt'); - // Test case 3: No trailing slash in config, actual URL has no trailing slash - process.env = { - ...originalEnv, - OPENAI_BASE_URL: 'https://api.example.com', - OPENAI_MODEL: 'gpt-4', - }; + const prompt = getCoreSystemPrompt(); + expect(fs.readFileSync).toHaveBeenCalledWith(defaultPath, 'utf8'); + expect(prompt).toBe('custom system prompt'); + }); - const result3 = getCoreSystemPrompt(undefined, config); - expect(result3).toContain('Custom template for example.com'); + it('should read from custom path when GEMINI_SYSTEM_MD provides one, preserving case', () => { + const customPath = path.resolve('/custom/path/SyStEm.Md'); + vi.stubEnv('GEMINI_SYSTEM_MD', customPath); + vi.mocked(fs.existsSync).mockReturnValue(true); + vi.mocked(fs.readFileSync).mockReturnValue('custom system prompt'); - // Test case 4: Config has trailing slash, actual URL has trailing slash - process.env = { - ...originalEnv, - OPENAI_BASE_URL: 'https://api.openai.com/', - OPENAI_MODEL: 'gpt-3.5-turbo', - }; + const prompt = getCoreSystemPrompt(); + expect(fs.readFileSync).toHaveBeenCalledWith(customPath, 'utf8'); + expect(prompt).toBe('custom system prompt'); + }); - const result4 = getCoreSystemPrompt(undefined, config); - expect(result4).toContain('Custom template for openai.com'); + it('should expand tilde in custom path when GEMINI_SYSTEM_MD is set', () => { + const homeDir = '/Users/test'; + vi.spyOn(os, 'homedir').mockReturnValue(homeDir); + const customPath = '~/custom/system.md'; + const expectedPath = path.join(homeDir, 'custom/system.md'); + vi.stubEnv('GEMINI_SYSTEM_MD', customPath); + vi.mocked(fs.existsSync).mockReturnValue(true); + vi.mocked(fs.readFileSync).mockReturnValue('custom system prompt'); - // Restore original environment variables - process.env = originalEnv; + const prompt = getCoreSystemPrompt(); + expect(fs.readFileSync).toHaveBeenCalledWith( + path.resolve(expectedPath), + 'utf8', + ); + expect(prompt).toBe('custom system prompt'); + }); }); - it('should not match when URLs are different', () => { - const config = { - systemPromptMappings: [ - { - baseUrls: ['https://api.example.com'], - modelNames: ['gpt-4'], - template: 'Custom template for example.com', - }, - ], - }; + describe('GEMINI_WRITE_SYSTEM_MD environment variable', () => { + it('should not write to file when GEMINI_WRITE_SYSTEM_MD is "false"', () => { + vi.stubEnv('GEMINI_WRITE_SYSTEM_MD', 'false'); + getCoreSystemPrompt(); + expect(fs.writeFileSync).not.toHaveBeenCalled(); + }); - const originalEnv = process.env; + it('should not write to file when GEMINI_WRITE_SYSTEM_MD is "0"', () => { + vi.stubEnv('GEMINI_WRITE_SYSTEM_MD', '0'); + getCoreSystemPrompt(); + expect(fs.writeFileSync).not.toHaveBeenCalled(); + }); - // Test case: URLs do not match - process.env = { - ...originalEnv, - OPENAI_BASE_URL: 'https://api.different.com', - OPENAI_MODEL: 'gpt-4', - }; + it('should write to default path when GEMINI_WRITE_SYSTEM_MD is "true"', () => { + const defaultPath = path.resolve( + path.join(GEMINI_CONFIG_DIR, 'system.md'), + ); + vi.stubEnv('GEMINI_WRITE_SYSTEM_MD', 'true'); + getCoreSystemPrompt(); + expect(fs.writeFileSync).toHaveBeenCalledWith( + defaultPath, + expect.any(String), + ); + }); - const result = getCoreSystemPrompt(undefined, config); - // Should return default template, not contain custom template - expect(result).not.toContain('Custom template for example.com'); + it('should write to default path when GEMINI_WRITE_SYSTEM_MD is "1"', () => { + const defaultPath = path.resolve( + path.join(GEMINI_CONFIG_DIR, 'system.md'), + ); + vi.stubEnv('GEMINI_WRITE_SYSTEM_MD', '1'); + getCoreSystemPrompt(); + expect(fs.writeFileSync).toHaveBeenCalledWith( + defaultPath, + expect.any(String), + ); + }); - // Restore original environment variables - process.env = originalEnv; + it('should write to custom path when GEMINI_WRITE_SYSTEM_MD provides one', () => { + const customPath = path.resolve('/custom/path/system.md'); + vi.stubEnv('GEMINI_WRITE_SYSTEM_MD', customPath); + getCoreSystemPrompt(); + expect(fs.writeFileSync).toHaveBeenCalledWith( + customPath, + expect.any(String), + ); + }); + + it('should expand tilde in custom path when GEMINI_WRITE_SYSTEM_MD is set', () => { + const homeDir = '/Users/test'; + vi.spyOn(os, 'homedir').mockReturnValue(homeDir); + const customPath = '~/custom/system.md'; + const expectedPath = path.join(homeDir, 'custom/system.md'); + vi.stubEnv('GEMINI_WRITE_SYSTEM_MD', customPath); + getCoreSystemPrompt(); + expect(fs.writeFileSync).toHaveBeenCalledWith( + path.resolve(expectedPath), + expect.any(String), + ); + }); + + it('should expand tilde in custom path when GEMINI_WRITE_SYSTEM_MD is just ~', () => { + const homeDir = '/Users/test'; + vi.spyOn(os, 'homedir').mockReturnValue(homeDir); + const customPath = '~'; + const expectedPath = homeDir; + vi.stubEnv('GEMINI_WRITE_SYSTEM_MD', customPath); + getCoreSystemPrompt(); + expect(fs.writeFileSync).toHaveBeenCalledWith( + path.resolve(expectedPath), + expect.any(String), + ); + }); }); }); diff --git a/packages/core/src/core/prompts.ts b/packages/core/src/core/prompts.ts index 7a397455b..b130dbb12 100644 --- a/packages/core/src/core/prompts.ts +++ b/packages/core/src/core/prompts.ts @@ -6,6 +6,8 @@ import path from 'node:path'; import fs from 'node:fs'; +import os from 'node:os'; +import { LSTool } from '../tools/ls.js'; import { EditTool } from '../tools/edit.js'; import { GlobTool } from '../tools/glob.js'; import { GrepTool } from '../tools/grep.js'; @@ -16,98 +18,32 @@ import { WriteFileTool } from '../tools/write-file.js'; import process from 'node:process'; import { isGitRepository } from '../utils/gitUtils.js'; import { MemoryTool, GEMINI_CONFIG_DIR } from '../tools/memoryTool.js'; -import { DEFAULT_GEMINI_MODEL } from '../config/models.js'; -export interface ModelTemplateMapping { - baseUrls?: string[]; - modelNames?: string[]; - template?: string; -} - -export interface SystemPromptConfig { - systemPromptMappings?: ModelTemplateMapping[]; -} - -/** - * Normalizes a URL by removing trailing slash for consistent comparison - */ -function normalizeUrl(url: string): string { - return url.endsWith('/') ? url.slice(0, -1) : url; -} - -/** - * Checks if a URL matches any URL in the array, ignoring trailing slashes - */ -function urlMatches(urlArray: string[], targetUrl: string): boolean { - const normalizedTarget = normalizeUrl(targetUrl); - return urlArray.some((url) => normalizeUrl(url) === normalizedTarget); -} - -export function getCoreSystemPrompt( - userMemory?: string, - config?: SystemPromptConfig, -): string { +export function getCoreSystemPrompt(userMemory?: string): string { // if GEMINI_SYSTEM_MD is set (and not 0|false), override system prompt from file - // default path is .qwen/system.md but can be modified via custom path in GEMINI_SYSTEM_MD + // default path is .gemini/system.md but can be modified via custom path in GEMINI_SYSTEM_MD let systemMdEnabled = false; let systemMdPath = path.resolve(path.join(GEMINI_CONFIG_DIR, 'system.md')); - const systemMdVar = process.env.GEMINI_SYSTEM_MD?.toLowerCase(); - if (systemMdVar && !['0', 'false'].includes(systemMdVar)) { - systemMdEnabled = true; // enable system prompt override - if (!['1', 'true'].includes(systemMdVar)) { - systemMdPath = path.resolve(systemMdVar); // use custom path from GEMINI_SYSTEM_MD - } - // require file to exist when override is enabled - if (!fs.existsSync(systemMdPath)) { - throw new Error(`missing system prompt file '${systemMdPath}'`); + const systemMdVar = process.env.GEMINI_SYSTEM_MD; + if (systemMdVar) { + const systemMdVarLower = systemMdVar.toLowerCase(); + if (!['0', 'false'].includes(systemMdVarLower)) { + systemMdEnabled = true; // enable system prompt override + if (!['1', 'true'].includes(systemMdVarLower)) { + let customPath = systemMdVar; + if (customPath.startsWith('~/')) { + customPath = path.join(os.homedir(), customPath.slice(2)); + } else if (customPath === '~') { + customPath = os.homedir(); + } + systemMdPath = path.resolve(customPath); // use custom path from GEMINI_SYSTEM_MD + } + // require file to exist when override is enabled + if (!fs.existsSync(systemMdPath)) { + throw new Error(`missing system prompt file '${systemMdPath}'`); + } } } - - // Check for system prompt mappings from global config - if (config?.systemPromptMappings) { - const currentModel = process.env.OPENAI_MODEL || DEFAULT_GEMINI_MODEL; - const currentBaseUrl = process.env.OPENAI_BASE_URL || ''; - - const matchedMapping = config.systemPromptMappings.find((mapping) => { - const { baseUrls, modelNames } = mapping; - // Check if baseUrl matches (when specified) - if ( - baseUrls && - modelNames && - urlMatches(baseUrls, currentBaseUrl) && - modelNames.includes(currentModel) - ) { - return true; - } - - if (baseUrls && urlMatches(baseUrls, currentBaseUrl) && !modelNames) { - return true; - } - if (modelNames && modelNames.includes(currentModel) && !baseUrls) { - return true; - } - - return false; - }); - - if (matchedMapping?.template) { - const isGitRepo = isGitRepository(process.cwd()); - - // Replace placeholders in template - let template = matchedMapping.template; - template = template.replace( - '{RUNTIME_VARS_IS_GIT_REPO}', - String(isGitRepo), - ); - template = template.replace( - '{RUNTIME_VARS_SANDBOX}', - process.env.SANDBOX || '', - ); - - return template; - } - } - const basePrompt = systemMdEnabled ? fs.readFileSync(systemMdPath, 'utf8') : ` @@ -190,8 +126,8 @@ ${(function () { if (isSandboxExec) { return ` -# MacOS Seatbelt -You are running under macos seatbelt with limited access to files outside the project directory or system temp directory, and with limited access to host system resources such as ports. If you encounter failures that could be due to MacOS Seatbelt (e.g. if a command fails with 'Operation not permitted' or similar error), as you report the error to the user, also explain why you think it could be due to MacOS Seatbelt, and how the user may need to adjust their Seatbelt profile. +# macOS Seatbelt +You are running under macos seatbelt with limited access to files outside the project directory or system temp directory, and with limited access to host system resources such as ports. If you encounter failures that could be due to macOS Seatbelt (e.g. if a command fails with 'Operation not permitted' or similar error), as you report the error to the user, also explain why you think it could be due to macOS Seatbelt, and how the user may need to adjust their Seatbelt profile. `; } else if (isGenericSandbox) { return ` @@ -241,56 +177,24 @@ model: true user: list files here. -model: - - - -. - - - +model: [tool_call: ${LSTool.Name} for path '/path/to/project'] user: start the server implemented in server.js -model: - - - -node server.js & - - - +model: [tool_call: ${ShellTool.Name} for 'node server.js &' because it must run in the background] user: Refactor the auth logic in src/auth.py to use the requests library instead of urllib. model: Okay, I can refactor 'src/auth.py'. First, I'll analyze the code and check for a test safety net before planning any changes. - - - -tests/test_auth.py - - - - - - -/path/to/tests/test_auth.py - - - +[tool_call: ${GlobTool.Name} for path 'tests/test_auth.py'] +[tool_call: ${ReadFileTool.Name} for absolute_path '/path/to/tests/test_auth.py'] (After analysis) Great, 'tests/test_auth.py' exists and covers the core authentication logic. With this safety net in place, I can safely plan the refactoring. I'll also confirm 'requests' is a dependency. - - - -/path/to/requirements.txt - - - +[tool_call: ${ReadFileTool.Name} for absolute_path '/path/to/requirements.txt'] (After analysis) Looks good, 'requests' is available. @@ -300,30 +204,20 @@ Here's the plan: 3. Remove the old 'urllib' import. 4. Run the project's linter and tests to verify the changes. - - - -src/auth.py - - -(old code content) - - -(new code content) - - - +Should I proceed? +user: Yes +model: +[tool_call: ${WriteFileTool.Name} or ${EditTool.Name} to apply the refactoring to 'src/auth.py'] Refactoring complete. Running verification... - - -ruff check src/auth.py && pytest - - - +[tool_call: ${ShellTool.Name} for 'ruff check src/auth.py && pytest'] (After verification passes) All checks passed. This is a stable checkpoint. -Would you like me to write a commit message and commit these changes? +${(function () { + if (isGitRepository(process.cwd())) { + return `Would you like me to write a commit message and commit these changes?`; + } + return ''; +})()} @@ -334,48 +228,32 @@ model: I can run \`rm -rf /path/to/project/temp\`. This will permanently delete user: Write tests for someFile.ts model: -Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality. - - - -/path/to/someFile.ts - - - +Okay, I can write those tests. First, I'll read \`someFile.ts\` to understand its functionality. +[tool_call: ${ReadFileTool.Name} for absolute_path '/path/to/someFile.ts' or use ${GlobTool.Name} to find \`someFile.ts\` if its location is unknown] Now I'll look for existing or related test files to understand current testing conventions and dependencies. - -read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory] - +[tool_call: ${ReadManyFilesTool.Name} for paths ['**/*.test.ts', 'src/**/*.spec.ts'] assuming someFile.ts is in the src directory] (After reviewing existing tests and the file content) - - - -/path/to/someFile.test.ts - - - +[tool_call: ${WriteFileTool.Name} to create /path/to/someFile.test.ts with the test code] I've written the tests. Now I'll run the project's test command to verify them. - - - -npm run test - - - -(After verification passes) -All checks passed. This is a stable checkpoint. +[tool_call: ${ShellTool.Name} for 'npm run test'] + + + +user: How do I update the user's profile information in this system? +model: +I'm not immediately sure how user profile information is updated. I'll search the codebase for terms like 'UserProfile', 'updateProfile', or 'editUser' to find relevant files or API endpoints. +[tool_call: ${GrepTool.Name} for pattern 'UserProfile|updateProfile|editUser'] +(After reviewing search results, assuming a relevant file like '/path/to/UserProfileService.java' was found) +Okay, \`/path/to/UserProfileService.java\` seems like the most relevant file. I'll read its content to understand how updates are handled. +[tool_call: ${ReadFileTool.Name} for absolute_path '/path/to/UserProfileService.java'] +(After reading the file) +It appears the \`updateUserProfile\` method in \`UserProfileService.java\` is responsible for this. It expects a user ID and a \`UserProfileDTO\` object... user: Where are all the 'app.config' files in this project? I need to check their settings. model: - - - -./**/app.config - - - +[tool_call: ${GlobTool.Name} for pattern '**/app.config'] (Assuming GlobTool returns a list of paths like ['/path/to/moduleA/app.config', '/path/to/moduleB/app.config']) I found the following 'app.config' files: - /path/to/moduleA/app.config @@ -388,13 +266,24 @@ Your core function is efficient and safe assistance. Balance extreme conciseness `.trim(); // if GEMINI_WRITE_SYSTEM_MD is set (and not 0|false), write base system prompt to file - - const writeSystemMdVar = process.env.GEMINI_WRITE_SYSTEM_MD?.toLowerCase(); - if (writeSystemMdVar && !['0', 'false'].includes(writeSystemMdVar)) { - if (['1', 'true'].includes(writeSystemMdVar)) { - fs.writeFileSync(systemMdPath, basePrompt); // write to default path, can be modified via GEMINI_SYSTEM_MD - } else { - fs.writeFileSync(path.resolve(writeSystemMdVar), basePrompt); // write to custom path from GEMINI_WRITE_SYSTEM_MD + const writeSystemMdVar = process.env.GEMINI_WRITE_SYSTEM_MD; + if (writeSystemMdVar) { + const writeSystemMdVarLower = writeSystemMdVar.toLowerCase(); + if (!['0', 'false'].includes(writeSystemMdVarLower)) { + if (['1', 'true'].includes(writeSystemMdVarLower)) { + fs.mkdirSync(path.dirname(systemMdPath), { recursive: true }); + fs.writeFileSync(systemMdPath, basePrompt); // write to default path, can be modified via GEMINI_SYSTEM_MD + } else { + let customPath = writeSystemMdVar; + if (customPath.startsWith('~/')) { + customPath = path.join(os.homedir(), customPath.slice(2)); + } else if (customPath === '~') { + customPath = os.homedir(); + } + const resolvedPath = path.resolve(customPath); + fs.mkdirSync(path.dirname(resolvedPath), { recursive: true }); + fs.writeFileSync(resolvedPath, basePrompt); // write to custom path from GEMINI_WRITE_SYSTEM_MD + } } } diff --git a/packages/core/src/core/turn.test.ts b/packages/core/src/core/turn.test.ts index b0c27f7ee..2a557927c 100644 --- a/packages/core/src/core/turn.test.ts +++ b/packages/core/src/core/turn.test.ts @@ -282,6 +282,165 @@ describe('Turn', () => { expect(turn.pendingToolCalls[2]).toEqual(event3.value); expect(turn.getDebugResponses().length).toBe(1); }); + + it('should yield finished event when response has finish reason', async () => { + const mockResponseStream = (async function* () { + yield { + candidates: [ + { + content: { parts: [{ text: 'Partial response' }] }, + finishReason: 'STOP', + }, + ], + } as unknown as GenerateContentResponse; + })(); + mockSendMessageStream.mockResolvedValue(mockResponseStream); + + const events = []; + const reqParts: Part[] = [{ text: 'Test finish reason' }]; + for await (const event of turn.run( + reqParts, + new AbortController().signal, + )) { + events.push(event); + } + + expect(events).toEqual([ + { type: GeminiEventType.Content, value: 'Partial response' }, + { type: GeminiEventType.Finished, value: 'STOP' }, + ]); + }); + + it('should yield finished event for MAX_TOKENS finish reason', async () => { + const mockResponseStream = (async function* () { + yield { + candidates: [ + { + content: { + parts: [ + { text: 'This is a long response that was cut off...' }, + ], + }, + finishReason: 'MAX_TOKENS', + }, + ], + } as unknown as GenerateContentResponse; + })(); + mockSendMessageStream.mockResolvedValue(mockResponseStream); + + const events = []; + const reqParts: Part[] = [{ text: 'Generate long text' }]; + for await (const event of turn.run( + reqParts, + new AbortController().signal, + )) { + events.push(event); + } + + expect(events).toEqual([ + { + type: GeminiEventType.Content, + value: 'This is a long response that was cut off...', + }, + { type: GeminiEventType.Finished, value: 'MAX_TOKENS' }, + ]); + }); + + it('should yield finished event for SAFETY finish reason', async () => { + const mockResponseStream = (async function* () { + yield { + candidates: [ + { + content: { parts: [{ text: 'Content blocked' }] }, + finishReason: 'SAFETY', + }, + ], + } as unknown as GenerateContentResponse; + })(); + mockSendMessageStream.mockResolvedValue(mockResponseStream); + + const events = []; + const reqParts: Part[] = [{ text: 'Test safety' }]; + for await (const event of turn.run( + reqParts, + new AbortController().signal, + )) { + events.push(event); + } + + expect(events).toEqual([ + { type: GeminiEventType.Content, value: 'Content blocked' }, + { type: GeminiEventType.Finished, value: 'SAFETY' }, + ]); + }); + + it('should not yield finished event when there is no finish reason', async () => { + const mockResponseStream = (async function* () { + yield { + candidates: [ + { + content: { parts: [{ text: 'Response without finish reason' }] }, + // No finishReason property + }, + ], + } as unknown as GenerateContentResponse; + })(); + mockSendMessageStream.mockResolvedValue(mockResponseStream); + + const events = []; + const reqParts: Part[] = [{ text: 'Test no finish reason' }]; + for await (const event of turn.run( + reqParts, + new AbortController().signal, + )) { + events.push(event); + } + + expect(events).toEqual([ + { + type: GeminiEventType.Content, + value: 'Response without finish reason', + }, + ]); + // No Finished event should be emitted + }); + + it('should handle multiple responses with different finish reasons', async () => { + const mockResponseStream = (async function* () { + yield { + candidates: [ + { + content: { parts: [{ text: 'First part' }] }, + // No finish reason on first response + }, + ], + } as unknown as GenerateContentResponse; + yield { + candidates: [ + { + content: { parts: [{ text: 'Second part' }] }, + finishReason: 'OTHER', + }, + ], + } as unknown as GenerateContentResponse; + })(); + mockSendMessageStream.mockResolvedValue(mockResponseStream); + + const events = []; + const reqParts: Part[] = [{ text: 'Test multiple responses' }]; + for await (const event of turn.run( + reqParts, + new AbortController().signal, + )) { + events.push(event); + } + + expect(events).toEqual([ + { type: GeminiEventType.Content, value: 'First part' }, + { type: GeminiEventType.Content, value: 'Second part' }, + { type: GeminiEventType.Finished, value: 'OTHER' }, + ]); + }); }); describe('getDebugResponses', () => { diff --git a/packages/core/src/core/turn.ts b/packages/core/src/core/turn.ts index 520ad3bfb..bea29b66e 100644 --- a/packages/core/src/core/turn.ts +++ b/packages/core/src/core/turn.ts @@ -9,6 +9,7 @@ import { GenerateContentResponse, FunctionCall, FunctionDeclaration, + FinishReason, } from '@google/genai'; import { ToolCallConfirmationDetails, @@ -49,7 +50,7 @@ export enum GeminiEventType { ChatCompressed = 'chat_compressed', Thought = 'thought', MaxSessionTurns = 'max_session_turns', - SessionTokenLimitExceeded = 'session_token_limit_exceeded', + Finished = 'finished', LoopDetected = 'loop_detected', } @@ -62,12 +63,6 @@ export interface GeminiErrorEventValue { error: StructuredError; } -export interface SessionTokenLimitExceededValue { - currentTokens: number; - limit: number; - message: string; -} - export interface ToolCallRequestInfo { callId: string; name: string; @@ -141,9 +136,9 @@ export type ServerGeminiMaxSessionTurnsEvent = { type: GeminiEventType.MaxSessionTurns; }; -export type ServerGeminiSessionTokenLimitExceededEvent = { - type: GeminiEventType.SessionTokenLimitExceeded; - value: SessionTokenLimitExceededValue; +export type ServerGeminiFinishedEvent = { + type: GeminiEventType.Finished; + value: FinishReason; }; export type ServerGeminiLoopDetectedEvent = { @@ -161,7 +156,7 @@ export type ServerGeminiStreamEvent = | ServerGeminiChatCompressedEvent | ServerGeminiThoughtEvent | ServerGeminiMaxSessionTurnsEvent - | ServerGeminiSessionTokenLimitExceededEvent + | ServerGeminiFinishedEvent | ServerGeminiLoopDetectedEvent; // A turn manages the agentic loop turn within the server context. @@ -235,6 +230,16 @@ export class Turn { yield event; } } + + // Check if response was truncated or stopped for various reasons + const finishReason = resp.candidates?.[0]?.finishReason; + + if (finishReason) { + yield { + type: GeminiEventType.Finished, + value: finishReason as FinishReason, + }; + } } } catch (e) { const error = toFriendlyError(e); diff --git a/packages/core/src/ide/ide-client.ts b/packages/core/src/ide/ide-client.ts new file mode 100644 index 000000000..3f91f3863 --- /dev/null +++ b/packages/core/src/ide/ide-client.ts @@ -0,0 +1,109 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { ideContext, OpenFilesNotificationSchema } from '../ide/ideContext.js'; +import { Client } from '@modelcontextprotocol/sdk/client/index.js'; +import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js'; + +const logger = { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + debug: (...args: any[]) => console.debug('[DEBUG] [IDEClient]', ...args), +}; + +export type IDEConnectionState = { + status: IDEConnectionStatus; + details?: string; +}; + +export enum IDEConnectionStatus { + Connected = 'connected', + Disconnected = 'disconnected', + Connecting = 'connecting', +} + +/** + * Manages the connection to and interaction with the IDE server. + */ +export class IdeClient { + client: Client | undefined = undefined; + connectionStatus: IDEConnectionStatus = IDEConnectionStatus.Disconnected; + + constructor() { + this.connectToMcpServer().catch((err) => { + logger.debug('Failed to initialize IdeClient:', err); + }); + } + + getConnectionStatus(): { + status: IDEConnectionStatus; + details?: string; + } { + let details: string | undefined; + if (this.connectionStatus === IDEConnectionStatus.Disconnected) { + if (!process.env['GEMINI_CLI_IDE_SERVER_PORT']) { + details = 'GEMINI_CLI_IDE_SERVER_PORT environment variable is not set.'; + } + } + return { + status: this.connectionStatus, + details, + }; + } + + async connectToMcpServer(): Promise { + this.connectionStatus = IDEConnectionStatus.Connecting; + const idePort = process.env['GEMINI_CLI_IDE_SERVER_PORT']; + if (!idePort) { + logger.debug( + 'Unable to connect to IDE mode MCP server. GEMINI_CLI_IDE_SERVER_PORT environment variable is not set.', + ); + this.connectionStatus = IDEConnectionStatus.Disconnected; + return; + } + + let transport: StreamableHTTPClientTransport | undefined; + try { + this.client = new Client({ + name: 'streamable-http-client', + // TODO(#3487): use the CLI version here. + version: '1.0.0', + }); + transport = new StreamableHTTPClientTransport( + new URL(`http://localhost:${idePort}/mcp`), + ); + await this.client.connect(transport); + + this.client.setNotificationHandler( + OpenFilesNotificationSchema, + (notification) => { + ideContext.setOpenFilesContext(notification.params); + }, + ); + this.client.onerror = (error) => { + logger.debug('IDE MCP client error:', error); + this.connectionStatus = IDEConnectionStatus.Disconnected; + ideContext.clearOpenFilesContext(); + }; + this.client.onclose = () => { + logger.debug('IDE MCP client connection closed.'); + this.connectionStatus = IDEConnectionStatus.Disconnected; + ideContext.clearOpenFilesContext(); + }; + + this.connectionStatus = IDEConnectionStatus.Connected; + } catch (error) { + this.connectionStatus = IDEConnectionStatus.Disconnected; + logger.debug('Failed to connect to MCP server:', error); + if (transport) { + try { + await transport.close(); + } catch (closeError) { + logger.debug('Failed to close transport:', closeError); + } + } + } + } +} diff --git a/packages/core/src/ide/ideContext.test.ts b/packages/core/src/ide/ideContext.test.ts new file mode 100644 index 000000000..1cb09c53e --- /dev/null +++ b/packages/core/src/ide/ideContext.test.ts @@ -0,0 +1,140 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, beforeEach, vi } from 'vitest'; +import { createIdeContextStore } from './ideContext.js'; + +describe('ideContext - Active File', () => { + let ideContext: ReturnType; + + beforeEach(() => { + // Create a fresh, isolated instance for each test + ideContext = createIdeContextStore(); + }); + + it('should return undefined initially for active file context', () => { + expect(ideContext.getOpenFilesContext()).toBeUndefined(); + }); + + it('should set and retrieve the active file context', () => { + const testFile = { + activeFile: '/path/to/test/file.ts', + selectedText: '1234', + }; + + ideContext.setOpenFilesContext(testFile); + + const activeFile = ideContext.getOpenFilesContext(); + expect(activeFile).toEqual(testFile); + }); + + it('should update the active file context when called multiple times', () => { + const firstFile = { + activeFile: '/path/to/first.js', + selectedText: '1234', + }; + ideContext.setOpenFilesContext(firstFile); + + const secondFile = { + activeFile: '/path/to/second.py', + cursor: { line: 20, character: 30 }, + }; + ideContext.setOpenFilesContext(secondFile); + + const activeFile = ideContext.getOpenFilesContext(); + expect(activeFile).toEqual(secondFile); + }); + + it('should handle empty string for file path', () => { + const testFile = { + activeFile: '', + selectedText: '1234', + }; + ideContext.setOpenFilesContext(testFile); + expect(ideContext.getOpenFilesContext()).toEqual(testFile); + }); + + it('should notify subscribers when active file context changes', () => { + const subscriber1 = vi.fn(); + const subscriber2 = vi.fn(); + + ideContext.subscribeToOpenFiles(subscriber1); + ideContext.subscribeToOpenFiles(subscriber2); + + const testFile = { + activeFile: '/path/to/subscribed.ts', + cursor: { line: 15, character: 25 }, + }; + ideContext.setOpenFilesContext(testFile); + + expect(subscriber1).toHaveBeenCalledTimes(1); + expect(subscriber1).toHaveBeenCalledWith(testFile); + expect(subscriber2).toHaveBeenCalledTimes(1); + expect(subscriber2).toHaveBeenCalledWith(testFile); + + // Test with another update + const newFile = { + activeFile: '/path/to/new.js', + selectedText: '1234', + }; + ideContext.setOpenFilesContext(newFile); + + expect(subscriber1).toHaveBeenCalledTimes(2); + expect(subscriber1).toHaveBeenCalledWith(newFile); + expect(subscriber2).toHaveBeenCalledTimes(2); + expect(subscriber2).toHaveBeenCalledWith(newFile); + }); + + it('should stop notifying a subscriber after unsubscribe', () => { + const subscriber1 = vi.fn(); + const subscriber2 = vi.fn(); + + const unsubscribe1 = ideContext.subscribeToOpenFiles(subscriber1); + ideContext.subscribeToOpenFiles(subscriber2); + + ideContext.setOpenFilesContext({ + activeFile: '/path/to/file1.txt', + selectedText: '1234', + }); + expect(subscriber1).toHaveBeenCalledTimes(1); + expect(subscriber2).toHaveBeenCalledTimes(1); + + unsubscribe1(); + + ideContext.setOpenFilesContext({ + activeFile: '/path/to/file2.txt', + selectedText: '1234', + }); + expect(subscriber1).toHaveBeenCalledTimes(1); // Should not be called again + expect(subscriber2).toHaveBeenCalledTimes(2); + }); + + it('should allow the cursor to be optional', () => { + const testFile = { + activeFile: '/path/to/test/file.ts', + }; + + ideContext.setOpenFilesContext(testFile); + + const activeFile = ideContext.getOpenFilesContext(); + expect(activeFile).toEqual(testFile); + }); + + it('should clear the active file context', () => { + const testFile = { + activeFile: '/path/to/test/file.ts', + selectedText: '1234', + }; + + ideContext.setOpenFilesContext(testFile); + + expect(ideContext.getOpenFilesContext()).toEqual(testFile); + + ideContext.clearOpenFilesContext(); + + expect(ideContext.getOpenFilesContext()).toBeUndefined(); + }); +}); diff --git a/packages/core/src/ide/ideContext.ts b/packages/core/src/ide/ideContext.ts new file mode 100644 index 000000000..bc7383a12 --- /dev/null +++ b/packages/core/src/ide/ideContext.ts @@ -0,0 +1,118 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { z } from 'zod'; + +/** + * Zod schema for validating a cursor position. + */ +export const CursorSchema = z.object({ + line: z.number(), + character: z.number(), +}); +export type Cursor = z.infer; + +/** + * Zod schema for validating an active file context from the IDE. + */ +export const OpenFilesSchema = z.object({ + activeFile: z.string(), + selectedText: z.string().optional(), + cursor: CursorSchema.optional(), + recentOpenFiles: z + .array( + z.object({ + filePath: z.string(), + timestamp: z.number(), + }), + ) + .optional(), +}); +export type OpenFiles = z.infer; + +/** + * Zod schema for validating the 'ide/openFilesChanged' notification from the IDE. + */ +export const OpenFilesNotificationSchema = z.object({ + method: z.literal('ide/openFilesChanged'), + params: OpenFilesSchema, +}); + +type OpenFilesSubscriber = (openFiles: OpenFiles | undefined) => void; + +/** + * Creates a new store for managing the IDE's active file context. + * This factory function encapsulates the state and logic, allowing for the creation + * of isolated instances, which is particularly useful for testing. + * + * @returns An object with methods to interact with the active file context. + */ +export function createIdeContextStore() { + let openFilesContext: OpenFiles | undefined = undefined; + const subscribers = new Set(); + + /** + * Notifies all registered subscribers about the current active file context. + */ + function notifySubscribers(): void { + for (const subscriber of subscribers) { + subscriber(openFilesContext); + } + } + + /** + * Sets the active file context and notifies all registered subscribers of the change. + * @param newOpenFiles The new active file context from the IDE. + */ + function setOpenFilesContext(newOpenFiles: OpenFiles): void { + openFilesContext = newOpenFiles; + notifySubscribers(); + } + + /** + * Clears the active file context and notifies all registered subscribers of the change. + */ + function clearOpenFilesContext(): void { + openFilesContext = undefined; + notifySubscribers(); + } + + /** + * Retrieves the current active file context. + * @returns The `OpenFiles` object if a file is active; otherwise, `undefined`. + */ + function getOpenFilesContext(): OpenFiles | undefined { + return openFilesContext; + } + + /** + * Subscribes to changes in the active file context. + * + * When the active file context changes, the provided `subscriber` function will be called. + * Note: The subscriber is not called with the current value upon subscription. + * + * @param subscriber The function to be called when the active file context changes. + * @returns A function that, when called, will unsubscribe the provided subscriber. + */ + function subscribeToOpenFiles(subscriber: OpenFilesSubscriber): () => void { + subscribers.add(subscriber); + return () => { + subscribers.delete(subscriber); + }; + } + + return { + setOpenFilesContext, + getOpenFilesContext, + subscribeToOpenFiles, + clearOpenFilesContext, + }; +} + +/** + * The default, shared instance of the IDE context store for the application. + */ +export const ideContext = createIdeContextStore(); diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index a91edfb90..34f25d21b 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -33,15 +33,31 @@ export * from './utils/memoryDiscovery.js'; export * from './utils/gitIgnoreParser.js'; export * from './utils/editor.js'; export * from './utils/quotaErrorDetection.js'; +export * from './utils/fileUtils.js'; +export * from './utils/retry.js'; +export * from './utils/shell-utils.js'; +export * from './utils/systemEncoding.js'; +export * from './utils/textUtils.js'; +export * from './utils/formatters.js'; // Export services export * from './services/fileDiscoveryService.js'; export * from './services/gitService.js'; +// Export IDE specific logic +export * from './ide/ide-client.js'; +export * from './ide/ideContext.js'; + +// Export Shell Execution Service +export * from './services/shellExecutionService.js'; + // Export base tool definitions export * from './tools/tools.js'; export * from './tools/tool-registry.js'; +// Export prompt logic +export * from './prompts/mcp-prompts.js'; + // Export specific tool logic export * from './tools/read-file.js'; export * from './tools/ls.js'; @@ -57,10 +73,24 @@ export * from './tools/read-many-files.js'; export * from './tools/mcp-client.js'; export * from './tools/mcp-tool.js'; +// MCP OAuth +export { MCPOAuthProvider } from './mcp/oauth-provider.js'; +export { + MCPOAuthToken, + MCPOAuthCredentials, + MCPOAuthTokenStorage, +} from './mcp/oauth-token-storage.js'; +export type { MCPOAuthConfig } from './mcp/oauth-provider.js'; +export type { + OAuthAuthorizationServerMetadata, + OAuthProtectedResourceMetadata, +} from './mcp/oauth-utils.js'; +export { OAuthUtils } from './mcp/oauth-utils.js'; + // Export telemetry functions export * from './telemetry/index.js'; export { sessionId } from './utils/session.js'; - +export * from './utils/browser.js'; // OpenAI Logging Utilities export { OpenAILogger, openaiLogger } from './utils/openaiLogger.js'; export { default as OpenAILogViewer } from './utils/openaiLogViewer.js'; diff --git a/packages/core/src/mcp/google-auth-provider.test.ts b/packages/core/src/mcp/google-auth-provider.test.ts new file mode 100644 index 000000000..f481b9e25 --- /dev/null +++ b/packages/core/src/mcp/google-auth-provider.test.ts @@ -0,0 +1,67 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { GoogleAuth } from 'google-auth-library'; +import { GoogleCredentialProvider } from './google-auth-provider.js'; +import { vi, describe, beforeEach, it, expect, Mock } from 'vitest'; +import { MCPServerConfig } from '../config/config.js'; + +vi.mock('google-auth-library'); + +describe('GoogleCredentialProvider', () => { + it('should throw an error if no scopes are provided', () => { + expect(() => new GoogleCredentialProvider()).toThrow( + 'Scopes must be provided in the oauth config for Google Credentials provider', + ); + }); + + it('should use scopes from the config if provided', () => { + const config = { + oauth: { + scopes: ['scope1', 'scope2'], + }, + } as MCPServerConfig; + new GoogleCredentialProvider(config); + expect(GoogleAuth).toHaveBeenCalledWith({ + scopes: ['scope1', 'scope2'], + }); + }); + + describe('with provider instance', () => { + let provider: GoogleCredentialProvider; + + beforeEach(() => { + const config = { + oauth: { + scopes: ['scope1', 'scope2'], + }, + } as MCPServerConfig; + provider = new GoogleCredentialProvider(config); + vi.clearAllMocks(); + }); + + it('should return credentials', async () => { + const mockClient = { + getAccessToken: vi.fn().mockResolvedValue({ token: 'test-token' }), + }; + (GoogleAuth.prototype.getClient as Mock).mockResolvedValue(mockClient); + + const credentials = await provider.tokens(); + + expect(credentials?.access_token).toBe('test-token'); + }); + + it('should return undefined if access token is not available', async () => { + const mockClient = { + getAccessToken: vi.fn().mockResolvedValue({ token: null }), + }; + (GoogleAuth.prototype.getClient as Mock).mockResolvedValue(mockClient); + + const credentials = await provider.tokens(); + expect(credentials).toBeUndefined(); + }); + }); +}); diff --git a/packages/core/src/mcp/google-auth-provider.ts b/packages/core/src/mcp/google-auth-provider.ts new file mode 100644 index 000000000..88cd086b2 --- /dev/null +++ b/packages/core/src/mcp/google-auth-provider.ts @@ -0,0 +1,83 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { OAuthClientProvider } from '@modelcontextprotocol/sdk/client/auth.js'; +import { + OAuthClientInformation, + OAuthClientInformationFull, + OAuthClientMetadata, + OAuthTokens, +} from '@modelcontextprotocol/sdk/shared/auth.js'; +import { GoogleAuth } from 'google-auth-library'; +import { MCPServerConfig } from '../config/config.js'; + +export class GoogleCredentialProvider implements OAuthClientProvider { + private readonly auth: GoogleAuth; + + // Properties required by OAuthClientProvider, with no-op values + readonly redirectUrl = ''; + readonly clientMetadata: OAuthClientMetadata = { + client_name: 'Gemini CLI (Google ADC)', + redirect_uris: [], + grant_types: [], + response_types: [], + token_endpoint_auth_method: 'none', + }; + private _clientInformation?: OAuthClientInformationFull; + + constructor(private readonly config?: MCPServerConfig) { + const scopes = this.config?.oauth?.scopes; + if (!scopes || scopes.length === 0) { + throw new Error( + 'Scopes must be provided in the oauth config for Google Credentials provider', + ); + } + this.auth = new GoogleAuth({ + scopes, + }); + } + + clientInformation(): OAuthClientInformation | undefined { + return this._clientInformation; + } + + saveClientInformation(clientInformation: OAuthClientInformationFull): void { + this._clientInformation = clientInformation; + } + + async tokens(): Promise { + const client = await this.auth.getClient(); + const accessTokenResponse = await client.getAccessToken(); + + if (!accessTokenResponse.token) { + console.error('Failed to get access token from Google ADC'); + return undefined; + } + + const tokens: OAuthTokens = { + access_token: accessTokenResponse.token, + token_type: 'Bearer', + }; + return tokens; + } + + saveTokens(_tokens: OAuthTokens): void { + // No-op, ADC manages tokens. + } + + redirectToAuthorization(_authorizationUrl: URL): void { + // No-op + } + + saveCodeVerifier(_codeVerifier: string): void { + // No-op + } + + codeVerifier(): string { + // No-op + return ''; + } +} diff --git a/packages/core/src/mcp/oauth-provider.test.ts b/packages/core/src/mcp/oauth-provider.test.ts new file mode 100644 index 000000000..20dc9fab0 --- /dev/null +++ b/packages/core/src/mcp/oauth-provider.test.ts @@ -0,0 +1,722 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import * as http from 'node:http'; +import * as crypto from 'node:crypto'; +import open from 'open'; +import { + MCPOAuthProvider, + MCPOAuthConfig, + OAuthTokenResponse, + OAuthClientRegistrationResponse, +} from './oauth-provider.js'; +import { MCPOAuthTokenStorage, MCPOAuthToken } from './oauth-token-storage.js'; + +// Mock dependencies +vi.mock('open'); +vi.mock('node:crypto'); +vi.mock('./oauth-token-storage.js'); + +// Mock fetch globally +const mockFetch = vi.fn(); +global.fetch = mockFetch; + +// Define a reusable mock server with .listen, .close, and .on methods +const mockHttpServer = { + listen: vi.fn(), + close: vi.fn(), + on: vi.fn(), +}; +vi.mock('node:http', () => ({ + createServer: vi.fn(() => mockHttpServer), +})); + +describe('MCPOAuthProvider', () => { + const mockConfig: MCPOAuthConfig = { + enabled: true, + clientId: 'test-client-id', + clientSecret: 'test-client-secret', + authorizationUrl: 'https://auth.example.com/authorize', + tokenUrl: 'https://auth.example.com/token', + scopes: ['read', 'write'], + redirectUri: 'http://localhost:7777/oauth/callback', + }; + + const mockToken: MCPOAuthToken = { + accessToken: 'access_token_123', + refreshToken: 'refresh_token_456', + tokenType: 'Bearer', + scope: 'read write', + expiresAt: Date.now() + 3600000, + }; + + const mockTokenResponse: OAuthTokenResponse = { + access_token: 'access_token_123', + token_type: 'Bearer', + expires_in: 3600, + refresh_token: 'refresh_token_456', + scope: 'read write', + }; + + beforeEach(() => { + vi.clearAllMocks(); + vi.spyOn(console, 'log').mockImplementation(() => {}); + vi.spyOn(console, 'warn').mockImplementation(() => {}); + vi.spyOn(console, 'error').mockImplementation(() => {}); + + // Mock crypto functions + vi.mocked(crypto.randomBytes).mockImplementation((size: number) => { + if (size === 32) return Buffer.from('code_verifier_mock_32_bytes_long'); + if (size === 16) return Buffer.from('state_mock_16_by'); + return Buffer.alloc(size); + }); + + vi.mocked(crypto.createHash).mockReturnValue({ + update: vi.fn().mockReturnThis(), + digest: vi.fn().mockReturnValue('code_challenge_mock'), + } as unknown as crypto.Hash); + + // Mock randomBytes to return predictable values for state + vi.mocked(crypto.randomBytes).mockImplementation((size) => { + if (size === 32) { + return Buffer.from('mock_code_verifier_32_bytes_long_string'); + } else if (size === 16) { + return Buffer.from('mock_state_16_bytes'); + } + return Buffer.alloc(size); + }); + + // Mock token storage + vi.mocked(MCPOAuthTokenStorage.saveToken).mockResolvedValue(undefined); + vi.mocked(MCPOAuthTokenStorage.getToken).mockResolvedValue(null); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + describe('authenticate', () => { + it('should perform complete OAuth flow with PKCE', async () => { + // Mock HTTP server callback + let callbackHandler: unknown; + vi.mocked(http.createServer).mockImplementation((handler) => { + callbackHandler = handler; + return mockHttpServer as unknown as http.Server; + }); + + mockHttpServer.listen.mockImplementation((port, callback) => { + callback?.(); + // Simulate OAuth callback + setTimeout(() => { + const mockReq = { + url: '/oauth/callback?code=auth_code_123&state=bW9ja19zdGF0ZV8xNl9ieXRlcw', + }; + const mockRes = { + writeHead: vi.fn(), + end: vi.fn(), + }; + (callbackHandler as (req: unknown, res: unknown) => void)( + mockReq, + mockRes, + ); + }, 10); + }); + + // Mock token exchange + mockFetch.mockResolvedValueOnce({ + ok: true, + json: () => Promise.resolve(mockTokenResponse), + }); + + const result = await MCPOAuthProvider.authenticate( + 'test-server', + mockConfig, + ); + + expect(result).toEqual({ + accessToken: 'access_token_123', + refreshToken: 'refresh_token_456', + tokenType: 'Bearer', + scope: 'read write', + expiresAt: expect.any(Number), + }); + + expect(open).toHaveBeenCalledWith(expect.stringContaining('authorize')); + expect(MCPOAuthTokenStorage.saveToken).toHaveBeenCalledWith( + 'test-server', + expect.objectContaining({ accessToken: 'access_token_123' }), + 'test-client-id', + 'https://auth.example.com/token', + undefined, + ); + }); + + it('should handle OAuth discovery when no authorization URL provided', async () => { + // Use a mutable config object + const configWithoutAuth: MCPOAuthConfig = { ...mockConfig }; + delete configWithoutAuth.authorizationUrl; + delete configWithoutAuth.tokenUrl; + + const mockResourceMetadata = { + authorization_servers: ['https://discovered.auth.com'], + }; + + const mockAuthServerMetadata = { + authorization_endpoint: 'https://discovered.auth.com/authorize', + token_endpoint: 'https://discovered.auth.com/token', + scopes_supported: ['read', 'write'], + }; + + mockFetch + .mockResolvedValueOnce({ + ok: true, + json: () => Promise.resolve(mockResourceMetadata), + }) + .mockResolvedValueOnce({ + ok: true, + json: () => Promise.resolve(mockAuthServerMetadata), + }); + + // Patch config after discovery + configWithoutAuth.authorizationUrl = + mockAuthServerMetadata.authorization_endpoint; + configWithoutAuth.tokenUrl = mockAuthServerMetadata.token_endpoint; + configWithoutAuth.scopes = mockAuthServerMetadata.scopes_supported; + + // Setup callback handler + let callbackHandler: unknown; + vi.mocked(http.createServer).mockImplementation((handler) => { + callbackHandler = handler; + return mockHttpServer as unknown as http.Server; + }); + + mockHttpServer.listen.mockImplementation((port, callback) => { + callback?.(); + setTimeout(() => { + const mockReq = { + url: '/oauth/callback?code=auth_code_123&state=bW9ja19zdGF0ZV8xNl9ieXRlcw', + }; + const mockRes = { + writeHead: vi.fn(), + end: vi.fn(), + }; + (callbackHandler as (req: unknown, res: unknown) => void)( + mockReq, + mockRes, + ); + }, 10); + }); + + // Mock token exchange with discovered endpoint + mockFetch.mockResolvedValueOnce({ + ok: true, + json: () => Promise.resolve(mockTokenResponse), + }); + + const result = await MCPOAuthProvider.authenticate( + 'test-server', + configWithoutAuth, + 'https://api.example.com', + ); + + expect(result).toBeDefined(); + expect(mockFetch).toHaveBeenCalledWith( + 'https://discovered.auth.com/token', + expect.objectContaining({ + method: 'POST', + headers: { 'Content-Type': 'application/x-www-form-urlencoded' }, + }), + ); + }); + + it('should perform dynamic client registration when no client ID provided', async () => { + const configWithoutClient = { ...mockConfig }; + delete configWithoutClient.clientId; + + const mockRegistrationResponse: OAuthClientRegistrationResponse = { + client_id: 'dynamic_client_id', + client_secret: 'dynamic_client_secret', + redirect_uris: ['http://localhost:7777/oauth/callback'], + grant_types: ['authorization_code', 'refresh_token'], + response_types: ['code'], + token_endpoint_auth_method: 'none', + }; + + const mockAuthServerMetadata = { + authorization_endpoint: 'https://auth.example.com/authorize', + token_endpoint: 'https://auth.example.com/token', + registration_endpoint: 'https://auth.example.com/register', + }; + + mockFetch + .mockResolvedValueOnce({ + ok: true, + json: () => Promise.resolve(mockAuthServerMetadata), + }) + .mockResolvedValueOnce({ + ok: true, + json: () => Promise.resolve(mockRegistrationResponse), + }); + + // Setup callback handler + let callbackHandler: unknown; + vi.mocked(http.createServer).mockImplementation((handler) => { + callbackHandler = handler; + return mockHttpServer as unknown as http.Server; + }); + + mockHttpServer.listen.mockImplementation((port, callback) => { + callback?.(); + setTimeout(() => { + const mockReq = { + url: '/oauth/callback?code=auth_code_123&state=bW9ja19zdGF0ZV8xNl9ieXRlcw', + }; + const mockRes = { + writeHead: vi.fn(), + end: vi.fn(), + }; + (callbackHandler as (req: unknown, res: unknown) => void)( + mockReq, + mockRes, + ); + }, 10); + }); + + // Mock token exchange + mockFetch.mockResolvedValueOnce({ + ok: true, + json: () => Promise.resolve(mockTokenResponse), + }); + + const result = await MCPOAuthProvider.authenticate( + 'test-server', + configWithoutClient, + ); + + expect(result).toBeDefined(); + expect(mockFetch).toHaveBeenCalledWith( + 'https://auth.example.com/register', + expect.objectContaining({ + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + }), + ); + }); + + it('should handle OAuth callback errors', async () => { + let callbackHandler: unknown; + vi.mocked(http.createServer).mockImplementation((handler) => { + callbackHandler = handler; + return mockHttpServer as unknown as http.Server; + }); + + mockHttpServer.listen.mockImplementation((port, callback) => { + callback?.(); + setTimeout(() => { + const mockReq = { + url: '/oauth/callback?error=access_denied&error_description=User%20denied%20access', + }; + const mockRes = { + writeHead: vi.fn(), + end: vi.fn(), + }; + (callbackHandler as (req: unknown, res: unknown) => void)( + mockReq, + mockRes, + ); + }, 10); + }); + + await expect( + MCPOAuthProvider.authenticate('test-server', mockConfig), + ).rejects.toThrow('OAuth error: access_denied'); + }); + + it('should handle state mismatch in callback', async () => { + let callbackHandler: unknown; + vi.mocked(http.createServer).mockImplementation((handler) => { + callbackHandler = handler; + return mockHttpServer as unknown as http.Server; + }); + + mockHttpServer.listen.mockImplementation((port, callback) => { + callback?.(); + setTimeout(() => { + const mockReq = { + url: '/oauth/callback?code=auth_code_123&state=wrong_state', + }; + const mockRes = { + writeHead: vi.fn(), + end: vi.fn(), + }; + (callbackHandler as (req: unknown, res: unknown) => void)( + mockReq, + mockRes, + ); + }, 10); + }); + + await expect( + MCPOAuthProvider.authenticate('test-server', mockConfig), + ).rejects.toThrow('State mismatch - possible CSRF attack'); + }); + + it('should handle token exchange failure', async () => { + let callbackHandler: unknown; + vi.mocked(http.createServer).mockImplementation((handler) => { + callbackHandler = handler; + return mockHttpServer as unknown as http.Server; + }); + + mockHttpServer.listen.mockImplementation((port, callback) => { + callback?.(); + setTimeout(() => { + const mockReq = { + url: '/oauth/callback?code=auth_code_123&state=bW9ja19zdGF0ZV8xNl9ieXRlcw', + }; + const mockRes = { + writeHead: vi.fn(), + end: vi.fn(), + }; + (callbackHandler as (req: unknown, res: unknown) => void)( + mockReq, + mockRes, + ); + }, 10); + }); + + mockFetch.mockResolvedValueOnce({ + ok: false, + status: 400, + text: () => Promise.resolve('Invalid grant'), + }); + + await expect( + MCPOAuthProvider.authenticate('test-server', mockConfig), + ).rejects.toThrow('Token exchange failed: 400 - Invalid grant'); + }); + + it('should handle callback timeout', async () => { + vi.mocked(http.createServer).mockImplementation( + () => mockHttpServer as unknown as http.Server, + ); + + mockHttpServer.listen.mockImplementation((port, callback) => { + callback?.(); + // Don't trigger callback - simulate timeout + }); + + // Mock setTimeout to trigger timeout immediately + const originalSetTimeout = global.setTimeout; + global.setTimeout = vi.fn((callback, delay) => { + if (delay === 5 * 60 * 1000) { + // 5 minute timeout + callback(); + } + return originalSetTimeout(callback, 0); + }) as unknown as typeof setTimeout; + + await expect( + MCPOAuthProvider.authenticate('test-server', mockConfig), + ).rejects.toThrow('OAuth callback timeout'); + + global.setTimeout = originalSetTimeout; + }); + }); + + describe('refreshAccessToken', () => { + it('should refresh token successfully', async () => { + const refreshResponse = { + access_token: 'new_access_token', + token_type: 'Bearer', + expires_in: 3600, + refresh_token: 'new_refresh_token', + }; + + mockFetch.mockResolvedValueOnce({ + ok: true, + json: () => Promise.resolve(refreshResponse), + }); + + const result = await MCPOAuthProvider.refreshAccessToken( + mockConfig, + 'old_refresh_token', + 'https://auth.example.com/token', + ); + + expect(result).toEqual(refreshResponse); + expect(mockFetch).toHaveBeenCalledWith( + 'https://auth.example.com/token', + expect.objectContaining({ + method: 'POST', + headers: { 'Content-Type': 'application/x-www-form-urlencoded' }, + body: expect.stringContaining('grant_type=refresh_token'), + }), + ); + }); + + it('should include client secret in refresh request when available', async () => { + mockFetch.mockResolvedValueOnce({ + ok: true, + json: () => Promise.resolve(mockTokenResponse), + }); + + await MCPOAuthProvider.refreshAccessToken( + mockConfig, + 'refresh_token', + 'https://auth.example.com/token', + ); + + const fetchCall = mockFetch.mock.calls[0]; + expect(fetchCall[1].body).toContain('client_secret=test-client-secret'); + }); + + it('should handle refresh token failure', async () => { + mockFetch.mockResolvedValueOnce({ + ok: false, + status: 400, + text: () => Promise.resolve('Invalid refresh token'), + }); + + await expect( + MCPOAuthProvider.refreshAccessToken( + mockConfig, + 'invalid_refresh_token', + 'https://auth.example.com/token', + ), + ).rejects.toThrow('Token refresh failed: 400 - Invalid refresh token'); + }); + }); + + describe('getValidToken', () => { + it('should return valid token when not expired', async () => { + const validCredentials = { + serverName: 'test-server', + token: mockToken, + clientId: 'test-client-id', + tokenUrl: 'https://auth.example.com/token', + updatedAt: Date.now(), + }; + + vi.mocked(MCPOAuthTokenStorage.getToken).mockResolvedValue( + validCredentials, + ); + vi.mocked(MCPOAuthTokenStorage.isTokenExpired).mockReturnValue(false); + + const result = await MCPOAuthProvider.getValidToken( + 'test-server', + mockConfig, + ); + + expect(result).toBe('access_token_123'); + }); + + it('should refresh expired token and return new token', async () => { + const expiredCredentials = { + serverName: 'test-server', + token: { ...mockToken, expiresAt: Date.now() - 3600000 }, + clientId: 'test-client-id', + tokenUrl: 'https://auth.example.com/token', + updatedAt: Date.now(), + }; + + vi.mocked(MCPOAuthTokenStorage.getToken).mockResolvedValue( + expiredCredentials, + ); + vi.mocked(MCPOAuthTokenStorage.isTokenExpired).mockReturnValue(true); + + const refreshResponse = { + access_token: 'new_access_token', + token_type: 'Bearer', + expires_in: 3600, + refresh_token: 'new_refresh_token', + }; + + mockFetch.mockResolvedValueOnce({ + ok: true, + json: () => Promise.resolve(refreshResponse), + }); + + const result = await MCPOAuthProvider.getValidToken( + 'test-server', + mockConfig, + ); + + expect(result).toBe('new_access_token'); + expect(MCPOAuthTokenStorage.saveToken).toHaveBeenCalledWith( + 'test-server', + expect.objectContaining({ accessToken: 'new_access_token' }), + 'test-client-id', + 'https://auth.example.com/token', + undefined, + ); + }); + + it('should return null when no credentials exist', async () => { + vi.mocked(MCPOAuthTokenStorage.getToken).mockResolvedValue(null); + + const result = await MCPOAuthProvider.getValidToken( + 'test-server', + mockConfig, + ); + + expect(result).toBeNull(); + }); + + it('should handle refresh failure and remove invalid token', async () => { + const expiredCredentials = { + serverName: 'test-server', + token: { ...mockToken, expiresAt: Date.now() - 3600000 }, + clientId: 'test-client-id', + tokenUrl: 'https://auth.example.com/token', + updatedAt: Date.now(), + }; + + vi.mocked(MCPOAuthTokenStorage.getToken).mockResolvedValue( + expiredCredentials, + ); + vi.mocked(MCPOAuthTokenStorage.isTokenExpired).mockReturnValue(true); + vi.mocked(MCPOAuthTokenStorage.removeToken).mockResolvedValue(undefined); + + mockFetch.mockResolvedValueOnce({ + ok: false, + status: 400, + text: () => Promise.resolve('Invalid refresh token'), + }); + + const result = await MCPOAuthProvider.getValidToken( + 'test-server', + mockConfig, + ); + + expect(result).toBeNull(); + expect(MCPOAuthTokenStorage.removeToken).toHaveBeenCalledWith( + 'test-server', + ); + expect(console.error).toHaveBeenCalledWith( + expect.stringContaining('Failed to refresh token'), + ); + }); + + it('should return null for token without refresh capability', async () => { + const tokenWithoutRefresh = { + serverName: 'test-server', + token: { + ...mockToken, + refreshToken: undefined, + expiresAt: Date.now() - 3600000, + }, + clientId: 'test-client-id', + tokenUrl: 'https://auth.example.com/token', + updatedAt: Date.now(), + }; + + vi.mocked(MCPOAuthTokenStorage.getToken).mockResolvedValue( + tokenWithoutRefresh, + ); + vi.mocked(MCPOAuthTokenStorage.isTokenExpired).mockReturnValue(true); + + const result = await MCPOAuthProvider.getValidToken( + 'test-server', + mockConfig, + ); + + expect(result).toBeNull(); + }); + }); + + describe('PKCE parameter generation', () => { + it('should generate valid PKCE parameters', async () => { + // Test is implicit in the authenticate flow tests, but we can verify + // the crypto mocks are called correctly + let callbackHandler: unknown; + vi.mocked(http.createServer).mockImplementation((handler) => { + callbackHandler = handler; + return mockHttpServer as unknown as http.Server; + }); + + mockHttpServer.listen.mockImplementation((port, callback) => { + callback?.(); + setTimeout(() => { + const mockReq = { + url: '/oauth/callback?code=auth_code_123&state=bW9ja19zdGF0ZV8xNl9ieXRlcw', + }; + const mockRes = { + writeHead: vi.fn(), + end: vi.fn(), + }; + (callbackHandler as (req: unknown, res: unknown) => void)( + mockReq, + mockRes, + ); + }, 10); + }); + + mockFetch.mockResolvedValueOnce({ + ok: true, + json: () => Promise.resolve(mockTokenResponse), + }); + + await MCPOAuthProvider.authenticate('test-server', mockConfig); + + expect(crypto.randomBytes).toHaveBeenCalledWith(32); // code verifier + expect(crypto.randomBytes).toHaveBeenCalledWith(16); // state + expect(crypto.createHash).toHaveBeenCalledWith('sha256'); + }); + }); + + describe('Authorization URL building', () => { + it('should build correct authorization URL with all parameters', async () => { + // Mock to capture the URL that would be opened + let capturedUrl: string; + vi.mocked(open).mockImplementation((url) => { + capturedUrl = url; + // Return a minimal mock ChildProcess + return Promise.resolve({ + pid: 1234, + } as unknown as import('child_process').ChildProcess); + }); + + let callbackHandler: unknown; + vi.mocked(http.createServer).mockImplementation((handler) => { + callbackHandler = handler; + return mockHttpServer as unknown as http.Server; + }); + + mockHttpServer.listen.mockImplementation((port, callback) => { + callback?.(); + setTimeout(() => { + const mockReq = { + url: '/oauth/callback?code=auth_code_123&state=bW9ja19zdGF0ZV8xNl9ieXRlcw', + }; + const mockRes = { + writeHead: vi.fn(), + end: vi.fn(), + }; + (callbackHandler as (req: unknown, res: unknown) => void)( + mockReq, + mockRes, + ); + }, 10); + }); + + mockFetch.mockResolvedValueOnce({ + ok: true, + json: () => Promise.resolve(mockTokenResponse), + }); + + await MCPOAuthProvider.authenticate('test-server', mockConfig); + + expect(capturedUrl!).toContain('response_type=code'); + expect(capturedUrl!).toContain('client_id=test-client-id'); + expect(capturedUrl!).toContain('code_challenge=code_challenge_mock'); + expect(capturedUrl!).toContain('code_challenge_method=S256'); + expect(capturedUrl!).toContain('scope=read+write'); + expect(capturedUrl!).toContain('resource=https%3A%2F%2Fauth.example.com'); + }); + }); +}); diff --git a/packages/core/src/mcp/oauth-provider.ts b/packages/core/src/mcp/oauth-provider.ts new file mode 100644 index 000000000..2f65f0516 --- /dev/null +++ b/packages/core/src/mcp/oauth-provider.ts @@ -0,0 +1,731 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as http from 'node:http'; +import * as crypto from 'node:crypto'; +import { URL } from 'node:url'; +import open from 'open'; +import { MCPOAuthToken, MCPOAuthTokenStorage } from './oauth-token-storage.js'; +import { getErrorMessage } from '../utils/errors.js'; +import { OAuthUtils } from './oauth-utils.js'; + +/** + * OAuth configuration for an MCP server. + */ +export interface MCPOAuthConfig { + enabled?: boolean; // Whether OAuth is enabled for this server + clientId?: string; + clientSecret?: string; + authorizationUrl?: string; + tokenUrl?: string; + scopes?: string[]; + redirectUri?: string; + tokenParamName?: string; // For SSE connections, specifies the query parameter name for the token +} + +/** + * OAuth authorization response. + */ +export interface OAuthAuthorizationResponse { + code: string; + state: string; +} + +/** + * OAuth token response from the authorization server. + */ +export interface OAuthTokenResponse { + access_token: string; + token_type: string; + expires_in?: number; + refresh_token?: string; + scope?: string; +} + +/** + * Dynamic client registration request. + */ +export interface OAuthClientRegistrationRequest { + client_name: string; + redirect_uris: string[]; + grant_types: string[]; + response_types: string[]; + token_endpoint_auth_method: string; + code_challenge_method?: string[]; + scope?: string; +} + +/** + * Dynamic client registration response. + */ +export interface OAuthClientRegistrationResponse { + client_id: string; + client_secret?: string; + client_id_issued_at?: number; + client_secret_expires_at?: number; + redirect_uris: string[]; + grant_types: string[]; + response_types: string[]; + token_endpoint_auth_method: string; + code_challenge_method?: string[]; + scope?: string; +} + +/** + * PKCE (Proof Key for Code Exchange) parameters. + */ +interface PKCEParams { + codeVerifier: string; + codeChallenge: string; + state: string; +} + +/** + * Provider for handling OAuth authentication for MCP servers. + */ +export class MCPOAuthProvider { + private static readonly REDIRECT_PORT = 7777; + private static readonly REDIRECT_PATH = '/oauth/callback'; + private static readonly HTTP_OK = 200; + private static readonly HTTP_REDIRECT = 302; + + /** + * Register a client dynamically with the OAuth server. + * + * @param registrationUrl The client registration endpoint URL + * @param config OAuth configuration + * @returns The registered client information + */ + private static async registerClient( + registrationUrl: string, + config: MCPOAuthConfig, + ): Promise { + const redirectUri = + config.redirectUri || + `http://localhost:${this.REDIRECT_PORT}${this.REDIRECT_PATH}`; + + const registrationRequest: OAuthClientRegistrationRequest = { + client_name: 'Gemini CLI MCP Client', + redirect_uris: [redirectUri], + grant_types: ['authorization_code', 'refresh_token'], + response_types: ['code'], + token_endpoint_auth_method: 'none', // Public client + code_challenge_method: ['S256'], + scope: config.scopes?.join(' ') || '', + }; + + const response = await fetch(registrationUrl, { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(registrationRequest), + }); + + if (!response.ok) { + const errorText = await response.text(); + throw new Error( + `Client registration failed: ${response.status} ${response.statusText} - ${errorText}`, + ); + } + + return (await response.json()) as OAuthClientRegistrationResponse; + } + + /** + * Discover OAuth configuration from an MCP server URL. + * + * @param mcpServerUrl The MCP server URL + * @returns OAuth configuration if discovered, null otherwise + */ + private static async discoverOAuthFromMCPServer( + mcpServerUrl: string, + ): Promise { + const baseUrl = OAuthUtils.extractBaseUrl(mcpServerUrl); + return OAuthUtils.discoverOAuthConfig(baseUrl); + } + + /** + * Generate PKCE parameters for OAuth flow. + * + * @returns PKCE parameters including code verifier, challenge, and state + */ + private static generatePKCEParams(): PKCEParams { + // Generate code verifier (43-128 characters) + const codeVerifier = crypto.randomBytes(32).toString('base64url'); + + // Generate code challenge using SHA256 + const codeChallenge = crypto + .createHash('sha256') + .update(codeVerifier) + .digest('base64url'); + + // Generate state for CSRF protection + const state = crypto.randomBytes(16).toString('base64url'); + + return { codeVerifier, codeChallenge, state }; + } + + /** + * Start a local HTTP server to handle OAuth callback. + * + * @param expectedState The state parameter to validate + * @returns Promise that resolves with the authorization code + */ + private static async startCallbackServer( + expectedState: string, + ): Promise { + return new Promise((resolve, reject) => { + const server = http.createServer( + async (req: http.IncomingMessage, res: http.ServerResponse) => { + try { + const url = new URL( + req.url!, + `http://localhost:${this.REDIRECT_PORT}`, + ); + + if (url.pathname !== this.REDIRECT_PATH) { + res.writeHead(404); + res.end('Not found'); + return; + } + + const code = url.searchParams.get('code'); + const state = url.searchParams.get('state'); + const error = url.searchParams.get('error'); + + if (error) { + res.writeHead(this.HTTP_OK, { 'Content-Type': 'text/html' }); + res.end(` + + +

Authentication Failed

+

Error: ${(error as string).replace(//g, '>')}

+

${((url.searchParams.get('error_description') || '') as string).replace(//g, '>')}

+

You can close this window.

+ + + `); + server.close(); + reject(new Error(`OAuth error: ${error}`)); + return; + } + + if (!code || !state) { + res.writeHead(400); + res.end('Missing code or state parameter'); + return; + } + + if (state !== expectedState) { + res.writeHead(400); + res.end('Invalid state parameter'); + server.close(); + reject(new Error('State mismatch - possible CSRF attack')); + return; + } + + // Send success response to browser + res.writeHead(this.HTTP_OK, { 'Content-Type': 'text/html' }); + res.end(` + + +

Authentication Successful!

+

You can close this window and return to Gemini CLI.

+ + + + `); + + server.close(); + resolve({ code, state }); + } catch (error) { + server.close(); + reject(error); + } + }, + ); + + server.on('error', reject); + server.listen(this.REDIRECT_PORT, () => { + console.log( + `OAuth callback server listening on port ${this.REDIRECT_PORT}`, + ); + }); + + // Timeout after 5 minutes + setTimeout( + () => { + server.close(); + reject(new Error('OAuth callback timeout')); + }, + 5 * 60 * 1000, + ); + }); + } + + /** + * Build the authorization URL with PKCE parameters. + * + * @param config OAuth configuration + * @param pkceParams PKCE parameters + * @param mcpServerUrl The MCP server URL to use as the resource parameter + * @returns The authorization URL + */ + private static buildAuthorizationUrl( + config: MCPOAuthConfig, + pkceParams: PKCEParams, + mcpServerUrl?: string, + ): string { + const redirectUri = + config.redirectUri || + `http://localhost:${this.REDIRECT_PORT}${this.REDIRECT_PATH}`; + + const params = new URLSearchParams({ + client_id: config.clientId!, + response_type: 'code', + redirect_uri: redirectUri, + state: pkceParams.state, + code_challenge: pkceParams.codeChallenge, + code_challenge_method: 'S256', + }); + + if (config.scopes && config.scopes.length > 0) { + params.append('scope', config.scopes.join(' ')); + } + + // Add resource parameter for MCP OAuth spec compliance + // Use the MCP server URL if provided, otherwise fall back to authorization URL + const resourceUrl = mcpServerUrl || config.authorizationUrl!; + try { + params.append('resource', OAuthUtils.buildResourceParameter(resourceUrl)); + } catch (error) { + throw new Error( + `Invalid resource URL: "${resourceUrl}". ${getErrorMessage(error)}`, + ); + } + + return `${config.authorizationUrl}?${params.toString()}`; + } + + /** + * Exchange authorization code for tokens. + * + * @param config OAuth configuration + * @param code Authorization code + * @param codeVerifier PKCE code verifier + * @param mcpServerUrl The MCP server URL to use as the resource parameter + * @returns The token response + */ + private static async exchangeCodeForToken( + config: MCPOAuthConfig, + code: string, + codeVerifier: string, + mcpServerUrl?: string, + ): Promise { + const redirectUri = + config.redirectUri || + `http://localhost:${this.REDIRECT_PORT}${this.REDIRECT_PATH}`; + + const params = new URLSearchParams({ + grant_type: 'authorization_code', + code, + redirect_uri: redirectUri, + code_verifier: codeVerifier, + client_id: config.clientId!, + }); + + if (config.clientSecret) { + params.append('client_secret', config.clientSecret); + } + + // Add resource parameter for MCP OAuth spec compliance + // Use the MCP server URL if provided, otherwise fall back to token URL + const resourceUrl = mcpServerUrl || config.tokenUrl!; + try { + params.append('resource', OAuthUtils.buildResourceParameter(resourceUrl)); + } catch (error) { + throw new Error( + `Invalid resource URL: "${resourceUrl}". ${getErrorMessage(error)}`, + ); + } + + const response = await fetch(config.tokenUrl!, { + method: 'POST', + headers: { + 'Content-Type': 'application/x-www-form-urlencoded', + }, + body: params.toString(), + }); + + if (!response.ok) { + const errorText = await response.text(); + throw new Error( + `Token exchange failed: ${response.status} - ${errorText}`, + ); + } + + return (await response.json()) as OAuthTokenResponse; + } + + /** + * Refresh an access token using a refresh token. + * + * @param config OAuth configuration + * @param refreshToken The refresh token + * @param tokenUrl The token endpoint URL + * @param mcpServerUrl The MCP server URL to use as the resource parameter + * @returns The new token response + */ + static async refreshAccessToken( + config: MCPOAuthConfig, + refreshToken: string, + tokenUrl: string, + mcpServerUrl?: string, + ): Promise { + const params = new URLSearchParams({ + grant_type: 'refresh_token', + refresh_token: refreshToken, + client_id: config.clientId!, + }); + + if (config.clientSecret) { + params.append('client_secret', config.clientSecret); + } + + if (config.scopes && config.scopes.length > 0) { + params.append('scope', config.scopes.join(' ')); + } + + // Add resource parameter for MCP OAuth spec compliance + // Use the MCP server URL if provided, otherwise fall back to token URL + const resourceUrl = mcpServerUrl || tokenUrl; + try { + params.append('resource', OAuthUtils.buildResourceParameter(resourceUrl)); + } catch (error) { + throw new Error( + `Invalid resource URL: "${resourceUrl}". ${getErrorMessage(error)}`, + ); + } + + const response = await fetch(tokenUrl, { + method: 'POST', + headers: { + 'Content-Type': 'application/x-www-form-urlencoded', + }, + body: params.toString(), + }); + + if (!response.ok) { + const errorText = await response.text(); + throw new Error( + `Token refresh failed: ${response.status} - ${errorText}`, + ); + } + + return (await response.json()) as OAuthTokenResponse; + } + + /** + * Perform the full OAuth authorization code flow with PKCE. + * + * @param serverName The name of the MCP server + * @param config OAuth configuration + * @param mcpServerUrl Optional MCP server URL for OAuth discovery + * @returns The obtained OAuth token + */ + static async authenticate( + serverName: string, + config: MCPOAuthConfig, + mcpServerUrl?: string, + ): Promise { + // If no authorization URL is provided, try to discover OAuth configuration + if (!config.authorizationUrl && mcpServerUrl) { + console.log( + 'No authorization URL provided, attempting OAuth discovery...', + ); + + // For SSE URLs, first check if authentication is required + if (OAuthUtils.isSSEEndpoint(mcpServerUrl)) { + try { + const response = await fetch(mcpServerUrl, { + method: 'HEAD', + headers: { + Accept: 'text/event-stream', + }, + }); + + if (response.status === 401 || response.status === 307) { + const wwwAuthenticate = response.headers.get('www-authenticate'); + if (wwwAuthenticate) { + const discoveredConfig = + await OAuthUtils.discoverOAuthFromWWWAuthenticate( + wwwAuthenticate, + ); + if (discoveredConfig) { + config = { + ...config, + ...discoveredConfig, + scopes: discoveredConfig.scopes || config.scopes || [], + }; + } + } + } + } catch (error) { + console.debug( + `Failed to check SSE endpoint for authentication requirements: ${getErrorMessage(error)}`, + ); + } + } + + // If we still don't have OAuth config, try the standard discovery + if (!config.authorizationUrl) { + const discoveredConfig = + await this.discoverOAuthFromMCPServer(mcpServerUrl); + if (discoveredConfig) { + config = { ...config, ...discoveredConfig }; + console.log('OAuth configuration discovered successfully'); + } else { + throw new Error( + 'Failed to discover OAuth configuration from MCP server', + ); + } + } + } + + // If no client ID is provided, try dynamic client registration + if (!config.clientId) { + // Extract server URL from authorization URL + if (!config.authorizationUrl) { + throw new Error( + 'Cannot perform dynamic registration without authorization URL', + ); + } + + const authUrl = new URL(config.authorizationUrl); + const serverUrl = `${authUrl.protocol}//${authUrl.host}`; + + console.log( + 'No client ID provided, attempting dynamic client registration...', + ); + + // Get the authorization server metadata for registration + const authServerMetadataUrl = new URL( + '/.well-known/oauth-authorization-server', + serverUrl, + ).toString(); + + const authServerMetadata = + await OAuthUtils.fetchAuthorizationServerMetadata( + authServerMetadataUrl, + ); + if (!authServerMetadata) { + throw new Error( + 'Failed to fetch authorization server metadata for client registration', + ); + } + + // Register client if registration endpoint is available + if (authServerMetadata.registration_endpoint) { + const clientRegistration = await this.registerClient( + authServerMetadata.registration_endpoint, + config, + ); + + config.clientId = clientRegistration.client_id; + if (clientRegistration.client_secret) { + config.clientSecret = clientRegistration.client_secret; + } + + console.log('Dynamic client registration successful'); + } else { + throw new Error( + 'No client ID provided and dynamic registration not supported', + ); + } + } + + // Validate configuration + if (!config.clientId || !config.authorizationUrl || !config.tokenUrl) { + throw new Error( + 'Missing required OAuth configuration after discovery and registration', + ); + } + + // Generate PKCE parameters + const pkceParams = this.generatePKCEParams(); + + // Build authorization URL + const authUrl = this.buildAuthorizationUrl( + config, + pkceParams, + mcpServerUrl, + ); + + console.log('\nOpening browser for OAuth authentication...'); + console.log('If the browser does not open, please visit:'); + console.log(''); + + // Get terminal width or default to 80 + const terminalWidth = process.stdout.columns || 80; + const separatorLength = Math.min(terminalWidth - 2, 80); + const separator = '━'.repeat(separatorLength); + + console.log(separator); + console.log( + 'COPY THE ENTIRE URL BELOW (select all text between the lines):', + ); + console.log(separator); + console.log(authUrl); + console.log(separator); + console.log(''); + console.log( + '💡 TIP: Triple-click to select the entire URL, then copy and paste it into your browser.', + ); + console.log( + '⚠️ Make sure to copy the COMPLETE URL - it may wrap across multiple lines.', + ); + console.log(''); + + // Start callback server + const callbackPromise = this.startCallbackServer(pkceParams.state); + + // Open browser + try { + await open(authUrl); + } catch (error) { + console.warn( + 'Failed to open browser automatically:', + getErrorMessage(error), + ); + } + + // Wait for callback + const { code } = await callbackPromise; + + console.log('\nAuthorization code received, exchanging for tokens...'); + + // Exchange code for tokens + const tokenResponse = await this.exchangeCodeForToken( + config, + code, + pkceParams.codeVerifier, + mcpServerUrl, + ); + + // Convert to our token format + const token: MCPOAuthToken = { + accessToken: tokenResponse.access_token, + tokenType: tokenResponse.token_type, + refreshToken: tokenResponse.refresh_token, + scope: tokenResponse.scope, + }; + + if (tokenResponse.expires_in) { + token.expiresAt = Date.now() + tokenResponse.expires_in * 1000; + } + + // Save token + try { + await MCPOAuthTokenStorage.saveToken( + serverName, + token, + config.clientId, + config.tokenUrl, + mcpServerUrl, + ); + console.log('Authentication successful! Token saved.'); + + // Verify token was saved + const savedToken = await MCPOAuthTokenStorage.getToken(serverName); + if (savedToken) { + console.log( + `Token verification successful: ${savedToken.token.accessToken.substring(0, 20)}...`, + ); + } else { + console.error('Token verification failed: token not found after save'); + } + } catch (saveError) { + console.error(`Failed to save token: ${getErrorMessage(saveError)}`); + throw saveError; + } + + return token; + } + + /** + * Get a valid access token for an MCP server, refreshing if necessary. + * + * @param serverName The name of the MCP server + * @param config OAuth configuration + * @returns A valid access token or null if not authenticated + */ + static async getValidToken( + serverName: string, + config: MCPOAuthConfig, + ): Promise { + console.debug(`Getting valid token for server: ${serverName}`); + const credentials = await MCPOAuthTokenStorage.getToken(serverName); + + if (!credentials) { + console.debug(`No credentials found for server: ${serverName}`); + return null; + } + + const { token } = credentials; + console.debug( + `Found token for server: ${serverName}, expired: ${MCPOAuthTokenStorage.isTokenExpired(token)}`, + ); + + // Check if token is expired + if (!MCPOAuthTokenStorage.isTokenExpired(token)) { + console.debug(`Returning valid token for server: ${serverName}`); + return token.accessToken; + } + + // Try to refresh if we have a refresh token + if (token.refreshToken && config.clientId && credentials.tokenUrl) { + try { + console.log(`Refreshing expired token for MCP server: ${serverName}`); + + const newTokenResponse = await this.refreshAccessToken( + config, + token.refreshToken, + credentials.tokenUrl, + credentials.mcpServerUrl, + ); + + // Update stored token + const newToken: MCPOAuthToken = { + accessToken: newTokenResponse.access_token, + tokenType: newTokenResponse.token_type, + refreshToken: newTokenResponse.refresh_token || token.refreshToken, + scope: newTokenResponse.scope || token.scope, + }; + + if (newTokenResponse.expires_in) { + newToken.expiresAt = Date.now() + newTokenResponse.expires_in * 1000; + } + + await MCPOAuthTokenStorage.saveToken( + serverName, + newToken, + config.clientId, + credentials.tokenUrl, + credentials.mcpServerUrl, + ); + + return newToken.accessToken; + } catch (error) { + console.error(`Failed to refresh token: ${getErrorMessage(error)}`); + // Remove invalid token + await MCPOAuthTokenStorage.removeToken(serverName); + } + } + + return null; + } +} diff --git a/packages/core/src/mcp/oauth-token-storage.test.ts b/packages/core/src/mcp/oauth-token-storage.test.ts new file mode 100644 index 000000000..5fe2f3f59 --- /dev/null +++ b/packages/core/src/mcp/oauth-token-storage.test.ts @@ -0,0 +1,325 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import { promises as fs } from 'node:fs'; +import * as path from 'node:path'; +import { + MCPOAuthTokenStorage, + MCPOAuthToken, + MCPOAuthCredentials, +} from './oauth-token-storage.js'; + +// Mock file system operations +vi.mock('node:fs', () => ({ + promises: { + readFile: vi.fn(), + writeFile: vi.fn(), + mkdir: vi.fn(), + unlink: vi.fn(), + }, +})); + +vi.mock('node:os', () => ({ + homedir: vi.fn(() => '/mock/home'), +})); + +describe('MCPOAuthTokenStorage', () => { + const mockToken: MCPOAuthToken = { + accessToken: 'access_token_123', + refreshToken: 'refresh_token_456', + tokenType: 'Bearer', + scope: 'read write', + expiresAt: Date.now() + 3600000, // 1 hour from now + }; + + const mockCredentials: MCPOAuthCredentials = { + serverName: 'test-server', + token: mockToken, + clientId: 'test-client-id', + tokenUrl: 'https://auth.example.com/token', + updatedAt: Date.now(), + }; + + beforeEach(() => { + vi.clearAllMocks(); + vi.spyOn(console, 'error').mockImplementation(() => {}); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + describe('loadTokens', () => { + it('should return empty map when token file does not exist', async () => { + vi.mocked(fs.readFile).mockRejectedValue({ code: 'ENOENT' }); + + const tokens = await MCPOAuthTokenStorage.loadTokens(); + + expect(tokens.size).toBe(0); + expect(console.error).not.toHaveBeenCalled(); + }); + + it('should load tokens from file successfully', async () => { + const tokensArray = [mockCredentials]; + vi.mocked(fs.readFile).mockResolvedValue(JSON.stringify(tokensArray)); + + const tokens = await MCPOAuthTokenStorage.loadTokens(); + + expect(tokens.size).toBe(1); + expect(tokens.get('test-server')).toEqual(mockCredentials); + expect(fs.readFile).toHaveBeenCalledWith( + path.join('/mock/home', '.gemini', 'mcp-oauth-tokens.json'), + 'utf-8', + ); + }); + + it('should handle corrupted token file gracefully', async () => { + vi.mocked(fs.readFile).mockResolvedValue('invalid json'); + + const tokens = await MCPOAuthTokenStorage.loadTokens(); + + expect(tokens.size).toBe(0); + expect(console.error).toHaveBeenCalledWith( + expect.stringContaining('Failed to load MCP OAuth tokens'), + ); + }); + + it('should handle file read errors other than ENOENT', async () => { + const error = new Error('Permission denied'); + vi.mocked(fs.readFile).mockRejectedValue(error); + + const tokens = await MCPOAuthTokenStorage.loadTokens(); + + expect(tokens.size).toBe(0); + expect(console.error).toHaveBeenCalledWith( + expect.stringContaining('Failed to load MCP OAuth tokens'), + ); + }); + }); + + describe('saveToken', () => { + it('should save token with restricted permissions', async () => { + vi.mocked(fs.readFile).mockRejectedValue({ code: 'ENOENT' }); + vi.mocked(fs.mkdir).mockResolvedValue(undefined); + vi.mocked(fs.writeFile).mockResolvedValue(undefined); + + await MCPOAuthTokenStorage.saveToken( + 'test-server', + mockToken, + 'client-id', + 'https://token.url', + ); + + expect(fs.mkdir).toHaveBeenCalledWith( + path.join('/mock/home', '.gemini'), + { recursive: true }, + ); + expect(fs.writeFile).toHaveBeenCalledWith( + path.join('/mock/home', '.gemini', 'mcp-oauth-tokens.json'), + expect.stringContaining('test-server'), + { mode: 0o600 }, + ); + }); + + it('should update existing token for same server', async () => { + const existingCredentials = { + ...mockCredentials, + serverName: 'existing-server', + }; + vi.mocked(fs.readFile).mockResolvedValue( + JSON.stringify([existingCredentials]), + ); + vi.mocked(fs.writeFile).mockResolvedValue(undefined); + + const newToken = { ...mockToken, accessToken: 'new_access_token' }; + await MCPOAuthTokenStorage.saveToken('existing-server', newToken); + + const writeCall = vi.mocked(fs.writeFile).mock.calls[0]; + const savedData = JSON.parse(writeCall[1] as string); + + expect(savedData).toHaveLength(1); + expect(savedData[0].token.accessToken).toBe('new_access_token'); + expect(savedData[0].serverName).toBe('existing-server'); + }); + + it('should handle write errors gracefully', async () => { + vi.mocked(fs.readFile).mockRejectedValue({ code: 'ENOENT' }); + vi.mocked(fs.mkdir).mockResolvedValue(undefined); + const writeError = new Error('Disk full'); + vi.mocked(fs.writeFile).mockRejectedValue(writeError); + + await expect( + MCPOAuthTokenStorage.saveToken('test-server', mockToken), + ).rejects.toThrow('Disk full'); + + expect(console.error).toHaveBeenCalledWith( + expect.stringContaining('Failed to save MCP OAuth token'), + ); + }); + }); + + describe('getToken', () => { + it('should return token for existing server', async () => { + vi.mocked(fs.readFile).mockResolvedValue( + JSON.stringify([mockCredentials]), + ); + + const result = await MCPOAuthTokenStorage.getToken('test-server'); + + expect(result).toEqual(mockCredentials); + }); + + it('should return null for non-existent server', async () => { + vi.mocked(fs.readFile).mockResolvedValue( + JSON.stringify([mockCredentials]), + ); + + const result = await MCPOAuthTokenStorage.getToken('non-existent'); + + expect(result).toBeNull(); + }); + + it('should return null when no tokens file exists', async () => { + vi.mocked(fs.readFile).mockRejectedValue({ code: 'ENOENT' }); + + const result = await MCPOAuthTokenStorage.getToken('test-server'); + + expect(result).toBeNull(); + }); + }); + + describe('removeToken', () => { + it('should remove token for specific server', async () => { + const credentials1 = { ...mockCredentials, serverName: 'server1' }; + const credentials2 = { ...mockCredentials, serverName: 'server2' }; + vi.mocked(fs.readFile).mockResolvedValue( + JSON.stringify([credentials1, credentials2]), + ); + vi.mocked(fs.writeFile).mockResolvedValue(undefined); + + await MCPOAuthTokenStorage.removeToken('server1'); + + const writeCall = vi.mocked(fs.writeFile).mock.calls[0]; + const savedData = JSON.parse(writeCall[1] as string); + + expect(savedData).toHaveLength(1); + expect(savedData[0].serverName).toBe('server2'); + }); + + it('should remove token file when no tokens remain', async () => { + vi.mocked(fs.readFile).mockResolvedValue( + JSON.stringify([mockCredentials]), + ); + vi.mocked(fs.unlink).mockResolvedValue(undefined); + + await MCPOAuthTokenStorage.removeToken('test-server'); + + expect(fs.unlink).toHaveBeenCalledWith( + path.join('/mock/home', '.gemini', 'mcp-oauth-tokens.json'), + ); + expect(fs.writeFile).not.toHaveBeenCalled(); + }); + + it('should handle removal of non-existent token gracefully', async () => { + vi.mocked(fs.readFile).mockResolvedValue( + JSON.stringify([mockCredentials]), + ); + + await MCPOAuthTokenStorage.removeToken('non-existent'); + + expect(fs.writeFile).not.toHaveBeenCalled(); + expect(fs.unlink).not.toHaveBeenCalled(); + }); + + it('should handle file operation errors gracefully', async () => { + vi.mocked(fs.readFile).mockResolvedValue( + JSON.stringify([mockCredentials]), + ); + vi.mocked(fs.unlink).mockRejectedValue(new Error('Permission denied')); + + await MCPOAuthTokenStorage.removeToken('test-server'); + + expect(console.error).toHaveBeenCalledWith( + expect.stringContaining('Failed to remove MCP OAuth token'), + ); + }); + }); + + describe('isTokenExpired', () => { + it('should return false for token without expiry', () => { + const tokenWithoutExpiry = { ...mockToken }; + delete tokenWithoutExpiry.expiresAt; + + const result = MCPOAuthTokenStorage.isTokenExpired(tokenWithoutExpiry); + + expect(result).toBe(false); + }); + + it('should return false for valid token', () => { + const futureToken = { + ...mockToken, + expiresAt: Date.now() + 3600000, // 1 hour from now + }; + + const result = MCPOAuthTokenStorage.isTokenExpired(futureToken); + + expect(result).toBe(false); + }); + + it('should return true for expired token', () => { + const expiredToken = { + ...mockToken, + expiresAt: Date.now() - 3600000, // 1 hour ago + }; + + const result = MCPOAuthTokenStorage.isTokenExpired(expiredToken); + + expect(result).toBe(true); + }); + + it('should return true for token expiring within buffer time', () => { + const soonToExpireToken = { + ...mockToken, + expiresAt: Date.now() + 60000, // 1 minute from now (within 5-minute buffer) + }; + + const result = MCPOAuthTokenStorage.isTokenExpired(soonToExpireToken); + + expect(result).toBe(true); + }); + }); + + describe('clearAllTokens', () => { + it('should remove token file successfully', async () => { + vi.mocked(fs.unlink).mockResolvedValue(undefined); + + await MCPOAuthTokenStorage.clearAllTokens(); + + expect(fs.unlink).toHaveBeenCalledWith( + path.join('/mock/home', '.gemini', 'mcp-oauth-tokens.json'), + ); + }); + + it('should handle non-existent file gracefully', async () => { + vi.mocked(fs.unlink).mockRejectedValue({ code: 'ENOENT' }); + + await MCPOAuthTokenStorage.clearAllTokens(); + + expect(console.error).not.toHaveBeenCalled(); + }); + + it('should handle other file errors gracefully', async () => { + vi.mocked(fs.unlink).mockRejectedValue(new Error('Permission denied')); + + await MCPOAuthTokenStorage.clearAllTokens(); + + expect(console.error).toHaveBeenCalledWith( + expect.stringContaining('Failed to clear MCP OAuth tokens'), + ); + }); + }); +}); diff --git a/packages/core/src/mcp/oauth-token-storage.ts b/packages/core/src/mcp/oauth-token-storage.ts new file mode 100644 index 000000000..0500b43e0 --- /dev/null +++ b/packages/core/src/mcp/oauth-token-storage.ts @@ -0,0 +1,209 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { promises as fs } from 'node:fs'; +import * as path from 'node:path'; +import * as os from 'node:os'; +import { getErrorMessage } from '../utils/errors.js'; + +/** + * Interface for MCP OAuth tokens. + */ +export interface MCPOAuthToken { + accessToken: string; + refreshToken?: string; + expiresAt?: number; + tokenType: string; + scope?: string; +} + +/** + * Interface for stored MCP OAuth credentials. + */ +export interface MCPOAuthCredentials { + serverName: string; + token: MCPOAuthToken; + clientId?: string; + tokenUrl?: string; + mcpServerUrl?: string; + updatedAt: number; +} + +/** + * Class for managing MCP OAuth token storage and retrieval. + */ +export class MCPOAuthTokenStorage { + private static readonly TOKEN_FILE = 'mcp-oauth-tokens.json'; + private static readonly CONFIG_DIR = '.gemini'; + + /** + * Get the path to the token storage file. + * + * @returns The full path to the token storage file + */ + private static getTokenFilePath(): string { + const homeDir = os.homedir(); + return path.join(homeDir, this.CONFIG_DIR, this.TOKEN_FILE); + } + + /** + * Ensure the config directory exists. + */ + private static async ensureConfigDir(): Promise { + const configDir = path.dirname(this.getTokenFilePath()); + await fs.mkdir(configDir, { recursive: true }); + } + + /** + * Load all stored MCP OAuth tokens. + * + * @returns A map of server names to credentials + */ + static async loadTokens(): Promise> { + const tokenMap = new Map(); + + try { + const tokenFile = this.getTokenFilePath(); + const data = await fs.readFile(tokenFile, 'utf-8'); + const tokens = JSON.parse(data) as MCPOAuthCredentials[]; + + for (const credential of tokens) { + tokenMap.set(credential.serverName, credential); + } + } catch (error) { + // File doesn't exist or is invalid, return empty map + if ((error as NodeJS.ErrnoException).code !== 'ENOENT') { + console.error( + `Failed to load MCP OAuth tokens: ${getErrorMessage(error)}`, + ); + } + } + + return tokenMap; + } + + /** + * Save a token for a specific MCP server. + * + * @param serverName The name of the MCP server + * @param token The OAuth token to save + * @param clientId Optional client ID used for this token + * @param tokenUrl Optional token URL used for this token + * @param mcpServerUrl Optional MCP server URL + */ + static async saveToken( + serverName: string, + token: MCPOAuthToken, + clientId?: string, + tokenUrl?: string, + mcpServerUrl?: string, + ): Promise { + await this.ensureConfigDir(); + + const tokens = await this.loadTokens(); + + const credential: MCPOAuthCredentials = { + serverName, + token, + clientId, + tokenUrl, + mcpServerUrl, + updatedAt: Date.now(), + }; + + tokens.set(serverName, credential); + + const tokenArray = Array.from(tokens.values()); + const tokenFile = this.getTokenFilePath(); + + try { + await fs.writeFile( + tokenFile, + JSON.stringify(tokenArray, null, 2), + { mode: 0o600 }, // Restrict file permissions + ); + } catch (error) { + console.error( + `Failed to save MCP OAuth token: ${getErrorMessage(error)}`, + ); + throw error; + } + } + + /** + * Get a token for a specific MCP server. + * + * @param serverName The name of the MCP server + * @returns The stored credentials or null if not found + */ + static async getToken( + serverName: string, + ): Promise { + const tokens = await this.loadTokens(); + return tokens.get(serverName) || null; + } + + /** + * Remove a token for a specific MCP server. + * + * @param serverName The name of the MCP server + */ + static async removeToken(serverName: string): Promise { + const tokens = await this.loadTokens(); + + if (tokens.delete(serverName)) { + const tokenArray = Array.from(tokens.values()); + const tokenFile = this.getTokenFilePath(); + + try { + if (tokenArray.length === 0) { + // Remove file if no tokens left + await fs.unlink(tokenFile); + } else { + await fs.writeFile(tokenFile, JSON.stringify(tokenArray, null, 2), { + mode: 0o600, + }); + } + } catch (error) { + console.error( + `Failed to remove MCP OAuth token: ${getErrorMessage(error)}`, + ); + } + } + } + + /** + * Check if a token is expired. + * + * @param token The token to check + * @returns True if the token is expired + */ + static isTokenExpired(token: MCPOAuthToken): boolean { + if (!token.expiresAt) { + return false; // No expiry, assume valid + } + + // Add a 5-minute buffer to account for clock skew + const bufferMs = 5 * 60 * 1000; + return Date.now() + bufferMs >= token.expiresAt; + } + + /** + * Clear all stored MCP OAuth tokens. + */ + static async clearAllTokens(): Promise { + try { + const tokenFile = this.getTokenFilePath(); + await fs.unlink(tokenFile); + } catch (error) { + if ((error as NodeJS.ErrnoException).code !== 'ENOENT') { + console.error( + `Failed to clear MCP OAuth tokens: ${getErrorMessage(error)}`, + ); + } + } + } +} diff --git a/packages/core/src/mcp/oauth-utils.test.ts b/packages/core/src/mcp/oauth-utils.test.ts new file mode 100644 index 000000000..b27d97b34 --- /dev/null +++ b/packages/core/src/mcp/oauth-utils.test.ts @@ -0,0 +1,206 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import { + OAuthUtils, + OAuthAuthorizationServerMetadata, + OAuthProtectedResourceMetadata, +} from './oauth-utils.js'; + +// Mock fetch globally +const mockFetch = vi.fn(); +global.fetch = mockFetch; + +describe('OAuthUtils', () => { + beforeEach(() => { + vi.clearAllMocks(); + vi.spyOn(console, 'debug').mockImplementation(() => {}); + vi.spyOn(console, 'error').mockImplementation(() => {}); + vi.spyOn(console, 'log').mockImplementation(() => {}); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + describe('buildWellKnownUrls', () => { + it('should build correct well-known URLs', () => { + const urls = OAuthUtils.buildWellKnownUrls('https://example.com/path'); + expect(urls.protectedResource).toBe( + 'https://example.com/.well-known/oauth-protected-resource', + ); + expect(urls.authorizationServer).toBe( + 'https://example.com/.well-known/oauth-authorization-server', + ); + }); + }); + + describe('fetchProtectedResourceMetadata', () => { + const mockResourceMetadata: OAuthProtectedResourceMetadata = { + resource: 'https://api.example.com', + authorization_servers: ['https://auth.example.com'], + bearer_methods_supported: ['header'], + }; + + it('should fetch protected resource metadata successfully', async () => { + mockFetch.mockResolvedValueOnce({ + ok: true, + json: () => Promise.resolve(mockResourceMetadata), + }); + + const result = await OAuthUtils.fetchProtectedResourceMetadata( + 'https://example.com/.well-known/oauth-protected-resource', + ); + + expect(result).toEqual(mockResourceMetadata); + }); + + it('should return null when fetch fails', async () => { + mockFetch.mockResolvedValueOnce({ + ok: false, + }); + + const result = await OAuthUtils.fetchProtectedResourceMetadata( + 'https://example.com/.well-known/oauth-protected-resource', + ); + + expect(result).toBeNull(); + }); + }); + + describe('fetchAuthorizationServerMetadata', () => { + const mockAuthServerMetadata: OAuthAuthorizationServerMetadata = { + issuer: 'https://auth.example.com', + authorization_endpoint: 'https://auth.example.com/authorize', + token_endpoint: 'https://auth.example.com/token', + scopes_supported: ['read', 'write'], + }; + + it('should fetch authorization server metadata successfully', async () => { + mockFetch.mockResolvedValueOnce({ + ok: true, + json: () => Promise.resolve(mockAuthServerMetadata), + }); + + const result = await OAuthUtils.fetchAuthorizationServerMetadata( + 'https://auth.example.com/.well-known/oauth-authorization-server', + ); + + expect(result).toEqual(mockAuthServerMetadata); + }); + + it('should return null when fetch fails', async () => { + mockFetch.mockResolvedValueOnce({ + ok: false, + }); + + const result = await OAuthUtils.fetchAuthorizationServerMetadata( + 'https://auth.example.com/.well-known/oauth-authorization-server', + ); + + expect(result).toBeNull(); + }); + }); + + describe('metadataToOAuthConfig', () => { + it('should convert metadata to OAuth config', () => { + const metadata: OAuthAuthorizationServerMetadata = { + issuer: 'https://auth.example.com', + authorization_endpoint: 'https://auth.example.com/authorize', + token_endpoint: 'https://auth.example.com/token', + scopes_supported: ['read', 'write'], + }; + + const config = OAuthUtils.metadataToOAuthConfig(metadata); + + expect(config).toEqual({ + authorizationUrl: 'https://auth.example.com/authorize', + tokenUrl: 'https://auth.example.com/token', + scopes: ['read', 'write'], + }); + }); + + it('should handle empty scopes', () => { + const metadata: OAuthAuthorizationServerMetadata = { + issuer: 'https://auth.example.com', + authorization_endpoint: 'https://auth.example.com/authorize', + token_endpoint: 'https://auth.example.com/token', + }; + + const config = OAuthUtils.metadataToOAuthConfig(metadata); + + expect(config.scopes).toEqual([]); + }); + }); + + describe('parseWWWAuthenticateHeader', () => { + it('should parse resource metadata URI from WWW-Authenticate header', () => { + const header = + 'Bearer realm="example", resource_metadata_uri="https://example.com/.well-known/oauth-protected-resource"'; + const result = OAuthUtils.parseWWWAuthenticateHeader(header); + expect(result).toBe( + 'https://example.com/.well-known/oauth-protected-resource', + ); + }); + + it('should return null when no resource metadata URI is found', () => { + const header = 'Bearer realm="example"'; + const result = OAuthUtils.parseWWWAuthenticateHeader(header); + expect(result).toBeNull(); + }); + }); + + describe('extractBaseUrl', () => { + it('should extract base URL from MCP server URL', () => { + const result = OAuthUtils.extractBaseUrl('https://example.com/mcp/v1'); + expect(result).toBe('https://example.com'); + }); + + it('should handle URLs with ports', () => { + const result = OAuthUtils.extractBaseUrl( + 'https://example.com:8080/mcp/v1', + ); + expect(result).toBe('https://example.com:8080'); + }); + }); + + describe('isSSEEndpoint', () => { + it('should return true for SSE endpoints', () => { + expect(OAuthUtils.isSSEEndpoint('https://example.com/sse')).toBe(true); + expect(OAuthUtils.isSSEEndpoint('https://example.com/api/v1/sse')).toBe( + true, + ); + }); + + it('should return true for non-MCP endpoints', () => { + expect(OAuthUtils.isSSEEndpoint('https://example.com/api')).toBe(true); + }); + + it('should return false for MCP endpoints', () => { + expect(OAuthUtils.isSSEEndpoint('https://example.com/mcp')).toBe(false); + expect(OAuthUtils.isSSEEndpoint('https://example.com/api/mcp/v1')).toBe( + false, + ); + }); + }); + + describe('buildResourceParameter', () => { + it('should build resource parameter from endpoint URL', () => { + const result = OAuthUtils.buildResourceParameter( + 'https://example.com/oauth/token', + ); + expect(result).toBe('https://example.com'); + }); + + it('should handle URLs with ports', () => { + const result = OAuthUtils.buildResourceParameter( + 'https://example.com:8080/oauth/token', + ); + expect(result).toBe('https://example.com:8080'); + }); + }); +}); diff --git a/packages/core/src/mcp/oauth-utils.ts b/packages/core/src/mcp/oauth-utils.ts new file mode 100644 index 000000000..6dad17c86 --- /dev/null +++ b/packages/core/src/mcp/oauth-utils.ts @@ -0,0 +1,285 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { MCPOAuthConfig } from './oauth-provider.js'; +import { getErrorMessage } from '../utils/errors.js'; + +/** + * OAuth authorization server metadata as per RFC 8414. + */ +export interface OAuthAuthorizationServerMetadata { + issuer: string; + authorization_endpoint: string; + token_endpoint: string; + token_endpoint_auth_methods_supported?: string[]; + revocation_endpoint?: string; + revocation_endpoint_auth_methods_supported?: string[]; + registration_endpoint?: string; + response_types_supported?: string[]; + grant_types_supported?: string[]; + code_challenge_methods_supported?: string[]; + scopes_supported?: string[]; +} + +/** + * OAuth protected resource metadata as per RFC 9728. + */ +export interface OAuthProtectedResourceMetadata { + resource: string; + authorization_servers?: string[]; + bearer_methods_supported?: string[]; + resource_documentation?: string; + resource_signing_alg_values_supported?: string[]; + resource_encryption_alg_values_supported?: string[]; + resource_encryption_enc_values_supported?: string[]; +} + +/** + * Utility class for common OAuth operations. + */ +export class OAuthUtils { + /** + * Construct well-known OAuth endpoint URLs. + */ + static buildWellKnownUrls(baseUrl: string) { + const serverUrl = new URL(baseUrl); + const base = `${serverUrl.protocol}//${serverUrl.host}`; + + return { + protectedResource: new URL( + '/.well-known/oauth-protected-resource', + base, + ).toString(), + authorizationServer: new URL( + '/.well-known/oauth-authorization-server', + base, + ).toString(), + }; + } + + /** + * Fetch OAuth protected resource metadata. + * + * @param resourceMetadataUrl The protected resource metadata URL + * @returns The protected resource metadata or null if not available + */ + static async fetchProtectedResourceMetadata( + resourceMetadataUrl: string, + ): Promise { + try { + const response = await fetch(resourceMetadataUrl); + if (!response.ok) { + return null; + } + return (await response.json()) as OAuthProtectedResourceMetadata; + } catch (error) { + console.debug( + `Failed to fetch protected resource metadata from ${resourceMetadataUrl}: ${getErrorMessage(error)}`, + ); + return null; + } + } + + /** + * Fetch OAuth authorization server metadata. + * + * @param authServerMetadataUrl The authorization server metadata URL + * @returns The authorization server metadata or null if not available + */ + static async fetchAuthorizationServerMetadata( + authServerMetadataUrl: string, + ): Promise { + try { + const response = await fetch(authServerMetadataUrl); + if (!response.ok) { + return null; + } + return (await response.json()) as OAuthAuthorizationServerMetadata; + } catch (error) { + console.debug( + `Failed to fetch authorization server metadata from ${authServerMetadataUrl}: ${getErrorMessage(error)}`, + ); + return null; + } + } + + /** + * Convert authorization server metadata to OAuth configuration. + * + * @param metadata The authorization server metadata + * @returns The OAuth configuration + */ + static metadataToOAuthConfig( + metadata: OAuthAuthorizationServerMetadata, + ): MCPOAuthConfig { + return { + authorizationUrl: metadata.authorization_endpoint, + tokenUrl: metadata.token_endpoint, + scopes: metadata.scopes_supported || [], + }; + } + + /** + * Discover OAuth configuration using the standard well-known endpoints. + * + * @param serverUrl The base URL of the server + * @returns The discovered OAuth configuration or null if not available + */ + static async discoverOAuthConfig( + serverUrl: string, + ): Promise { + try { + const wellKnownUrls = this.buildWellKnownUrls(serverUrl); + + // First, try to get the protected resource metadata + const resourceMetadata = await this.fetchProtectedResourceMetadata( + wellKnownUrls.protectedResource, + ); + + if (resourceMetadata?.authorization_servers?.length) { + // Use the first authorization server + const authServerUrl = resourceMetadata.authorization_servers[0]; + const authServerMetadataUrl = new URL( + '/.well-known/oauth-authorization-server', + authServerUrl, + ).toString(); + + const authServerMetadata = await this.fetchAuthorizationServerMetadata( + authServerMetadataUrl, + ); + + if (authServerMetadata) { + const config = this.metadataToOAuthConfig(authServerMetadata); + if (authServerMetadata.registration_endpoint) { + console.log( + 'Dynamic client registration is supported at:', + authServerMetadata.registration_endpoint, + ); + } + return config; + } + } + + // Fallback: try /.well-known/oauth-authorization-server at the base URL + console.debug( + `Trying OAuth discovery fallback at ${wellKnownUrls.authorizationServer}`, + ); + const authServerMetadata = await this.fetchAuthorizationServerMetadata( + wellKnownUrls.authorizationServer, + ); + + if (authServerMetadata) { + const config = this.metadataToOAuthConfig(authServerMetadata); + if (authServerMetadata.registration_endpoint) { + console.log( + 'Dynamic client registration is supported at:', + authServerMetadata.registration_endpoint, + ); + } + return config; + } + + return null; + } catch (error) { + console.debug( + `Failed to discover OAuth configuration: ${getErrorMessage(error)}`, + ); + return null; + } + } + + /** + * Parse WWW-Authenticate header to extract OAuth information. + * + * @param header The WWW-Authenticate header value + * @returns The resource metadata URI if found + */ + static parseWWWAuthenticateHeader(header: string): string | null { + // Parse Bearer realm and resource_metadata_uri + const match = header.match(/resource_metadata_uri="([^"]+)"/); + if (match) { + return match[1]; + } + return null; + } + + /** + * Discover OAuth configuration from WWW-Authenticate header. + * + * @param wwwAuthenticate The WWW-Authenticate header value + * @returns The discovered OAuth configuration or null if not available + */ + static async discoverOAuthFromWWWAuthenticate( + wwwAuthenticate: string, + ): Promise { + const resourceMetadataUri = + this.parseWWWAuthenticateHeader(wwwAuthenticate); + if (!resourceMetadataUri) { + return null; + } + + console.log( + `Found resource metadata URI from www-authenticate header: ${resourceMetadataUri}`, + ); + + const resourceMetadata = + await this.fetchProtectedResourceMetadata(resourceMetadataUri); + if (!resourceMetadata?.authorization_servers?.length) { + return null; + } + + const authServerUrl = resourceMetadata.authorization_servers[0]; + const authServerMetadataUrl = new URL( + '/.well-known/oauth-authorization-server', + authServerUrl, + ).toString(); + + const authServerMetadata = await this.fetchAuthorizationServerMetadata( + authServerMetadataUrl, + ); + + if (authServerMetadata) { + console.log( + 'OAuth configuration discovered successfully from www-authenticate header', + ); + return this.metadataToOAuthConfig(authServerMetadata); + } + + return null; + } + + /** + * Extract base URL from an MCP server URL. + * + * @param mcpServerUrl The MCP server URL + * @returns The base URL + */ + static extractBaseUrl(mcpServerUrl: string): string { + const serverUrl = new URL(mcpServerUrl); + return `${serverUrl.protocol}//${serverUrl.host}`; + } + + /** + * Check if a URL is an SSE endpoint. + * + * @param url The URL to check + * @returns True if the URL appears to be an SSE endpoint + */ + static isSSEEndpoint(url: string): boolean { + return url.includes('/sse') || !url.includes('/mcp'); + } + + /** + * Build a resource parameter for OAuth requests. + * + * @param endpointUrl The endpoint URL + * @returns The resource parameter value + */ + static buildResourceParameter(endpointUrl: string): string { + const url = new URL(endpointUrl); + return `${url.protocol}//${url.host}`; + } +} diff --git a/packages/core/src/prompts/mcp-prompts.ts b/packages/core/src/prompts/mcp-prompts.ts new file mode 100644 index 000000000..7265a023e --- /dev/null +++ b/packages/core/src/prompts/mcp-prompts.ts @@ -0,0 +1,19 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { Config } from '../config/config.js'; +import { DiscoveredMCPPrompt } from '../tools/mcp-client.js'; + +export function getMCPServerPrompts( + config: Config, + serverName: string, +): DiscoveredMCPPrompt[] { + const promptRegistry = config.getPromptRegistry(); + if (!promptRegistry) { + return []; + } + return promptRegistry.getPromptsByServer(serverName); +} diff --git a/packages/core/src/prompts/prompt-registry.ts b/packages/core/src/prompts/prompt-registry.ts new file mode 100644 index 000000000..566991307 --- /dev/null +++ b/packages/core/src/prompts/prompt-registry.ts @@ -0,0 +1,56 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { DiscoveredMCPPrompt } from '../tools/mcp-client.js'; + +export class PromptRegistry { + private prompts: Map = new Map(); + + /** + * Registers a prompt definition. + * @param prompt - The prompt object containing schema and execution logic. + */ + registerPrompt(prompt: DiscoveredMCPPrompt): void { + if (this.prompts.has(prompt.name)) { + const newName = `${prompt.serverName}_${prompt.name}`; + console.warn( + `Prompt with name "${prompt.name}" is already registered. Renaming to "${newName}".`, + ); + this.prompts.set(newName, { ...prompt, name: newName }); + } else { + this.prompts.set(prompt.name, prompt); + } + } + + /** + * Returns an array of all registered and discovered prompt instances. + */ + getAllPrompts(): DiscoveredMCPPrompt[] { + return Array.from(this.prompts.values()).sort((a, b) => + a.name.localeCompare(b.name), + ); + } + + /** + * Get the definition of a specific prompt. + */ + getPrompt(name: string): DiscoveredMCPPrompt | undefined { + return this.prompts.get(name); + } + + /** + * Returns an array of prompts registered from a specific MCP server. + */ + getPromptsByServer(serverName: string): DiscoveredMCPPrompt[] { + const serverPrompts: DiscoveredMCPPrompt[] = []; + for (const prompt of this.prompts.values()) { + if (prompt.serverName === serverName) { + serverPrompts.push(prompt); + } + } + return serverPrompts.sort((a, b) => a.name.localeCompare(b.name)); + } +} diff --git a/packages/core/src/services/fileDiscoveryService.test.ts b/packages/core/src/services/fileDiscoveryService.test.ts index d7530cd69..f8a03f629 100644 --- a/packages/core/src/services/fileDiscoveryService.test.ts +++ b/packages/core/src/services/fileDiscoveryService.test.ts @@ -4,80 +4,86 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest'; -import type { Mocked } from 'vitest'; +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; +import * as fs from 'fs/promises'; +import * as os from 'os'; +import * as path from 'path'; import { FileDiscoveryService } from './fileDiscoveryService.js'; -import { GitIgnoreParser } from '../utils/gitIgnoreParser.js'; -import * as gitUtils from '../utils/gitUtils.js'; - -// Mock the GitIgnoreParser -vi.mock('../utils/gitIgnoreParser.js'); - -// Mock gitUtils module -vi.mock('../utils/gitUtils.js'); describe('FileDiscoveryService', () => { - let service: FileDiscoveryService; - let mockGitIgnoreParser: Mocked; - const mockProjectRoot = '/test/project'; + let testRootDir: string; + let projectRoot: string; - beforeEach(() => { - mockGitIgnoreParser = { - initialize: vi.fn(), - isIgnored: vi.fn(), - loadPatterns: vi.fn(), - loadGitRepoPatterns: vi.fn(), - } as unknown as Mocked; + async function createTestFile(filePath: string, content = '') { + const fullPath = path.join(projectRoot, filePath); + await fs.mkdir(path.dirname(fullPath), { recursive: true }); + await fs.writeFile(fullPath, content); + return fullPath; + } - vi.mocked(GitIgnoreParser).mockImplementation(() => mockGitIgnoreParser); - vi.mocked(gitUtils.isGitRepository).mockReturnValue(true); - vi.mocked(gitUtils.findGitRoot).mockReturnValue('/test/project'); - vi.clearAllMocks(); + beforeEach(async () => { + testRootDir = await fs.mkdtemp( + path.join(os.tmpdir(), 'file-discovery-test-'), + ); + projectRoot = path.join(testRootDir, 'project'); + await fs.mkdir(projectRoot, { recursive: true }); }); - afterEach(() => { - vi.restoreAllMocks(); + afterEach(async () => { + await fs.rm(testRootDir, { recursive: true, force: true }); }); describe('initialization', () => { - it('should initialize git ignore parser by default', () => { - service = new FileDiscoveryService(mockProjectRoot); - expect(GitIgnoreParser).toHaveBeenCalledWith(mockProjectRoot); - expect(GitIgnoreParser).toHaveBeenCalledTimes(2); - expect(mockGitIgnoreParser.loadGitRepoPatterns).toHaveBeenCalled(); - expect(mockGitIgnoreParser.loadPatterns).toHaveBeenCalled(); + it('should initialize git ignore parser by default in a git repo', async () => { + await fs.mkdir(path.join(projectRoot, '.git')); + await createTestFile('.gitignore', 'node_modules/'); + + const service = new FileDiscoveryService(projectRoot); + // Let's check the effect of the parser instead of mocking it. + expect(service.shouldGitIgnoreFile('node_modules/foo.js')).toBe(true); + expect(service.shouldGitIgnoreFile('src/foo.js')).toBe(false); }); - it('should not initialize git ignore parser when not a git repo', () => { - vi.mocked(gitUtils.isGitRepository).mockReturnValue(false); - service = new FileDiscoveryService(mockProjectRoot); + it('should not load git repo patterns when not in a git repo', async () => { + // No .git directory + await createTestFile('.gitignore', 'node_modules/'); + const service = new FileDiscoveryService(projectRoot); - expect(GitIgnoreParser).toHaveBeenCalledOnce(); - expect(mockGitIgnoreParser.loadGitRepoPatterns).not.toHaveBeenCalled(); + // .gitignore is not loaded in non-git repos + expect(service.shouldGitIgnoreFile('node_modules/foo.js')).toBe(false); + }); + + it('should load .geminiignore patterns even when not in a git repo', async () => { + await createTestFile('.geminiignore', 'secrets.txt'); + const service = new FileDiscoveryService(projectRoot); + + expect(service.shouldGeminiIgnoreFile('secrets.txt')).toBe(true); + expect(service.shouldGeminiIgnoreFile('src/index.js')).toBe(false); }); }); describe('filterFiles', () => { - beforeEach(() => { - mockGitIgnoreParser.isIgnored.mockImplementation( - (path: string) => - path.includes('node_modules') || path.includes('.git'), - ); - service = new FileDiscoveryService(mockProjectRoot); + beforeEach(async () => { + await fs.mkdir(path.join(projectRoot, '.git')); + await createTestFile('.gitignore', 'node_modules/\n.git/\ndist'); + await createTestFile('.geminiignore', 'logs/'); }); - it('should filter out git-ignored files by default', () => { + it('should filter out git-ignored and gemini-ignored files by default', () => { const files = [ 'src/index.ts', 'node_modules/package/index.js', 'README.md', '.git/config', 'dist/bundle.js', - ]; + 'logs/latest.log', + ].map((f) => path.join(projectRoot, f)); - const filtered = service.filterFiles(files); + const service = new FileDiscoveryService(projectRoot); - expect(filtered).toEqual(['src/index.ts', 'README.md', 'dist/bundle.js']); + expect(service.filterFiles(files)).toEqual( + ['src/index.ts', 'README.md'].map((f) => path.join(projectRoot, f)), + ); }); it('should not filter files when respectGitIgnore is false', () => { @@ -85,48 +91,121 @@ describe('FileDiscoveryService', () => { 'src/index.ts', 'node_modules/package/index.js', '.git/config', - ]; + 'logs/latest.log', + ].map((f) => path.join(projectRoot, f)); - const filtered = service.filterFiles(files, { respectGitIgnore: false }); + const service = new FileDiscoveryService(projectRoot); - expect(filtered).toEqual(files); + const filtered = service.filterFiles(files, { + respectGitIgnore: false, + respectGeminiIgnore: true, // still respect this one + }); + + expect(filtered).toEqual( + ['src/index.ts', 'node_modules/package/index.js', '.git/config'].map( + (f) => path.join(projectRoot, f), + ), + ); + }); + + it('should not filter files when respectGeminiIgnore is false', () => { + const files = [ + 'src/index.ts', + 'node_modules/package/index.js', + 'logs/latest.log', + ].map((f) => path.join(projectRoot, f)); + + const service = new FileDiscoveryService(projectRoot); + + const filtered = service.filterFiles(files, { + respectGitIgnore: true, + respectGeminiIgnore: false, + }); + + expect(filtered).toEqual( + ['src/index.ts', 'logs/latest.log'].map((f) => + path.join(projectRoot, f), + ), + ); }); it('should handle empty file list', () => { - const filtered = service.filterFiles([]); - expect(filtered).toEqual([]); + const service = new FileDiscoveryService(projectRoot); + + expect(service.filterFiles([])).toEqual([]); }); }); - describe('shouldGitIgnoreFile', () => { - beforeEach(() => { - mockGitIgnoreParser.isIgnored.mockImplementation((path: string) => - path.includes('node_modules'), - ); - service = new FileDiscoveryService(mockProjectRoot); + describe('shouldGitIgnoreFile & shouldGeminiIgnoreFile', () => { + beforeEach(async () => { + await fs.mkdir(path.join(projectRoot, '.git')); + await createTestFile('.gitignore', 'node_modules/'); + await createTestFile('.geminiignore', '*.log'); }); it('should return true for git-ignored files', () => { - expect(service.shouldGitIgnoreFile('node_modules/package/index.js')).toBe( - true, - ); + const service = new FileDiscoveryService(projectRoot); + + expect( + service.shouldGitIgnoreFile( + path.join(projectRoot, 'node_modules/package/index.js'), + ), + ).toBe(true); }); - it('should return false for non-ignored files', () => { - expect(service.shouldGitIgnoreFile('src/index.ts')).toBe(false); + it('should return false for non-git-ignored files', () => { + const service = new FileDiscoveryService(projectRoot); + + expect( + service.shouldGitIgnoreFile(path.join(projectRoot, 'src/index.ts')), + ).toBe(false); + }); + + it('should return true for gemini-ignored files', () => { + const service = new FileDiscoveryService(projectRoot); + + expect( + service.shouldGeminiIgnoreFile(path.join(projectRoot, 'debug.log')), + ).toBe(true); + }); + + it('should return false for non-gemini-ignored files', () => { + const service = new FileDiscoveryService(projectRoot); + + expect( + service.shouldGeminiIgnoreFile(path.join(projectRoot, 'src/index.ts')), + ).toBe(false); }); }); describe('edge cases', () => { - it('should handle relative project root paths', () => { - const relativeService = new FileDiscoveryService('./relative/path'); - expect(relativeService).toBeInstanceOf(FileDiscoveryService); + it('should handle relative project root paths', async () => { + await fs.mkdir(path.join(projectRoot, '.git')); + await createTestFile('.gitignore', 'ignored.txt'); + const service = new FileDiscoveryService( + path.relative(process.cwd(), projectRoot), + ); + + expect( + service.shouldGitIgnoreFile(path.join(projectRoot, 'ignored.txt')), + ).toBe(true); + expect( + service.shouldGitIgnoreFile(path.join(projectRoot, 'not-ignored.txt')), + ).toBe(false); }); - it('should handle filterFiles with undefined options', () => { - const files = ['src/index.ts']; - const filtered = service.filterFiles(files, undefined); - expect(filtered).toEqual(files); + it('should handle filterFiles with undefined options', async () => { + await fs.mkdir(path.join(projectRoot, '.git')); + await createTestFile('.gitignore', 'ignored.txt'); + const service = new FileDiscoveryService(projectRoot); + + const files = ['src/index.ts', 'ignored.txt'].map((f) => + path.join(projectRoot, f), + ); + + expect(service.filterFiles(files, undefined)).toEqual([ + path.join(projectRoot, 'src/index.ts'), + ]); }); }); }); diff --git a/packages/core/src/services/gitService.test.ts b/packages/core/src/services/gitService.test.ts index 5a3795644..9820ba5fc 100644 --- a/packages/core/src/services/gitService.test.ts +++ b/packages/core/src/services/gitService.test.ts @@ -7,28 +7,16 @@ import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; import { GitService } from './gitService.js'; import * as path from 'path'; -import type * as FsPromisesModule from 'fs/promises'; +import * as fs from 'fs/promises'; +import * as os from 'os'; import type { ChildProcess } from 'node:child_process'; +import { getProjectHash, GEMINI_DIR } from '../utils/paths.js'; const hoistedMockExec = vi.hoisted(() => vi.fn()); vi.mock('node:child_process', () => ({ exec: hoistedMockExec, })); -const hoistedMockMkdir = vi.hoisted(() => vi.fn()); -const hoistedMockReadFile = vi.hoisted(() => vi.fn()); -const hoistedMockWriteFile = vi.hoisted(() => vi.fn()); - -vi.mock('fs/promises', async (importOriginal) => { - const actual = (await importOriginal()) as typeof FsPromisesModule; - return { - ...actual, - mkdir: hoistedMockMkdir, - readFile: hoistedMockReadFile, - writeFile: hoistedMockWriteFile, - }; -}); - const hoistedMockEnv = vi.hoisted(() => vi.fn()); const hoistedMockSimpleGit = vi.hoisted(() => vi.fn()); const hoistedMockCheckIsRepo = vi.hoisted(() => vi.fn()); @@ -53,38 +41,30 @@ vi.mock('../utils/gitUtils.js', () => ({ isGitRepository: hoistedIsGitRepositoryMock, })); -const hoistedMockIsNodeError = vi.hoisted(() => vi.fn()); -vi.mock('../utils/errors.js', () => ({ - isNodeError: hoistedMockIsNodeError, -})); - const hoistedMockHomedir = vi.hoisted(() => vi.fn()); -vi.mock('os', () => ({ - homedir: hoistedMockHomedir, -})); - -const hoistedMockCreateHash = vi.hoisted(() => { - const mockUpdate = vi.fn().mockReturnThis(); - const mockDigest = vi.fn(); +vi.mock('os', async (importOriginal) => { + const actual = await importOriginal(); return { - createHash: vi.fn(() => ({ - update: mockUpdate, - digest: mockDigest, - })), - mockUpdate, - mockDigest, + ...actual, + homedir: hoistedMockHomedir, }; }); -vi.mock('crypto', () => ({ - createHash: hoistedMockCreateHash.createHash, -})); describe('GitService', () => { - const mockProjectRoot = '/test/project'; - const mockHomedir = '/mock/home'; - const mockHash = 'mock-hash'; + let testRootDir: string; + let projectRoot: string; + let homedir: string; + let hash: string; + + beforeEach(async () => { + testRootDir = await fs.mkdtemp(path.join(os.tmpdir(), 'git-service-test-')); + projectRoot = path.join(testRootDir, 'project'); + homedir = path.join(testRootDir, 'home'); + await fs.mkdir(projectRoot, { recursive: true }); + await fs.mkdir(homedir, { recursive: true }); + + hash = getProjectHash(projectRoot); - beforeEach(() => { vi.clearAllMocks(); hoistedIsGitRepositoryMock.mockReturnValue(true); hoistedMockExec.mockImplementation((command, callback) => { @@ -95,13 +75,8 @@ describe('GitService', () => { } return {}; }); - hoistedMockMkdir.mockResolvedValue(undefined); - hoistedMockReadFile.mockResolvedValue(''); - hoistedMockWriteFile.mockResolvedValue(undefined); - hoistedMockIsNodeError.mockImplementation((e) => e instanceof Error); - hoistedMockHomedir.mockReturnValue(mockHomedir); - hoistedMockCreateHash.mockUpdate.mockReturnThis(); - hoistedMockCreateHash.mockDigest.mockReturnValue(mockHash); + + hoistedMockHomedir.mockReturnValue(homedir); hoistedMockEnv.mockImplementation(() => ({ checkIsRepo: hoistedMockCheckIsRepo, @@ -127,19 +102,20 @@ describe('GitService', () => { }); }); - afterEach(() => { + afterEach(async () => { vi.restoreAllMocks(); + await fs.rm(testRootDir, { recursive: true, force: true }); }); describe('constructor', () => { - it('should successfully create an instance if projectRoot is a Git repository', () => { - expect(() => new GitService(mockProjectRoot)).not.toThrow(); + it('should successfully create an instance', () => { + expect(() => new GitService(projectRoot)).not.toThrow(); }); }); describe('verifyGitAvailability', () => { it('should resolve true if git --version command succeeds', async () => { - const service = new GitService(mockProjectRoot); + const service = new GitService(projectRoot); await expect(service.verifyGitAvailability()).resolves.toBe(true); }); @@ -148,7 +124,7 @@ describe('GitService', () => { callback(new Error('git not found')); return {} as ChildProcess; }); - const service = new GitService(mockProjectRoot); + const service = new GitService(projectRoot); await expect(service.verifyGitAvailability()).resolves.toBe(false); }); }); @@ -159,14 +135,14 @@ describe('GitService', () => { callback(new Error('git not found')); return {} as ChildProcess; }); - const service = new GitService(mockProjectRoot); + const service = new GitService(projectRoot); await expect(service.initialize()).rejects.toThrow( 'Checkpointing is enabled, but Git is not installed. Please install Git or disable checkpointing to continue.', ); }); it('should call setupShadowGitRepository if Git is available', async () => { - const service = new GitService(mockProjectRoot); + const service = new GitService(projectRoot); const setupSpy = vi .spyOn(service, 'setupShadowGitRepository') .mockResolvedValue(undefined); @@ -177,33 +153,34 @@ describe('GitService', () => { }); describe('setupShadowGitRepository', () => { - const repoDir = path.join(mockHomedir, '.qwen', 'history', mockHash); - const hiddenGitIgnorePath = path.join(repoDir, '.gitignore'); - const visibleGitIgnorePath = path.join(mockProjectRoot, '.gitignore'); - const gitConfigPath = path.join(repoDir, '.gitconfig'); + let repoDir: string; + let gitConfigPath: string; - it('should create a .gitconfig file with the correct content', async () => { - const service = new GitService(mockProjectRoot); - await service.setupShadowGitRepository(); - const expectedConfigContent = - '[user]\n name = Gemini CLI\n email = gemini-cli@google.com\n[commit]\n gpgsign = false\n'; - expect(hoistedMockWriteFile).toHaveBeenCalledWith( - gitConfigPath, - expectedConfigContent, - ); + beforeEach(() => { + repoDir = path.join(homedir, GEMINI_DIR, 'history', hash); + gitConfigPath = path.join(repoDir, '.gitconfig'); }); it('should create history and repository directories', async () => { - const service = new GitService(mockProjectRoot); + const service = new GitService(projectRoot); await service.setupShadowGitRepository(); - expect(hoistedMockMkdir).toHaveBeenCalledWith(repoDir, { - recursive: true, - }); + const stats = await fs.stat(repoDir); + expect(stats.isDirectory()).toBe(true); + }); + + it('should create a .gitconfig file with the correct content', async () => { + const service = new GitService(projectRoot); + await service.setupShadowGitRepository(); + + const expectedConfigContent = + '[user]\n name = Gemini CLI\n email = gemini-cli@google.com\n[commit]\n gpgsign = false\n'; + const actualConfigContent = await fs.readFile(gitConfigPath, 'utf-8'); + expect(actualConfigContent).toBe(expectedConfigContent); }); it('should initialize git repo in historyDir if not already initialized', async () => { hoistedMockCheckIsRepo.mockResolvedValue(false); - const service = new GitService(mockProjectRoot); + const service = new GitService(projectRoot); await service.setupShadowGitRepository(); expect(hoistedMockSimpleGit).toHaveBeenCalledWith(repoDir); expect(hoistedMockInit).toHaveBeenCalled(); @@ -211,52 +188,49 @@ describe('GitService', () => { it('should not initialize git repo if already initialized', async () => { hoistedMockCheckIsRepo.mockResolvedValue(true); - const service = new GitService(mockProjectRoot); + const service = new GitService(projectRoot); await service.setupShadowGitRepository(); expect(hoistedMockInit).not.toHaveBeenCalled(); }); it('should copy .gitignore from projectRoot if it exists', async () => { - const gitignoreContent = `node_modules/\n.env`; - hoistedMockReadFile.mockImplementation(async (filePath) => { - if (filePath === visibleGitIgnorePath) { - return gitignoreContent; - } - return ''; - }); - const service = new GitService(mockProjectRoot); + const gitignoreContent = 'node_modules/\n.env'; + const visibleGitIgnorePath = path.join(projectRoot, '.gitignore'); + await fs.writeFile(visibleGitIgnorePath, gitignoreContent); + + const service = new GitService(projectRoot); await service.setupShadowGitRepository(); - expect(hoistedMockReadFile).toHaveBeenCalledWith( - visibleGitIgnorePath, - 'utf-8', - ); - expect(hoistedMockWriteFile).toHaveBeenCalledWith( - hiddenGitIgnorePath, - gitignoreContent, - ); + + const hiddenGitIgnorePath = path.join(repoDir, '.gitignore'); + const copiedContent = await fs.readFile(hiddenGitIgnorePath, 'utf-8'); + expect(copiedContent).toBe(gitignoreContent); + }); + + it('should not create a .gitignore in shadow repo if project .gitignore does not exist', async () => { + const service = new GitService(projectRoot); + await service.setupShadowGitRepository(); + + const hiddenGitIgnorePath = path.join(repoDir, '.gitignore'); + // An empty string is written if the file doesn't exist. + const content = await fs.readFile(hiddenGitIgnorePath, 'utf-8'); + expect(content).toBe(''); }); it('should throw an error if reading projectRoot .gitignore fails with other errors', async () => { - const readError = new Error('Read permission denied'); - hoistedMockReadFile.mockImplementation(async (filePath) => { - if (filePath === visibleGitIgnorePath) { - throw readError; - } - return ''; - }); - hoistedMockIsNodeError.mockImplementation( - (e: unknown): e is NodeJS.ErrnoException => e instanceof Error, - ); + const visibleGitIgnorePath = path.join(projectRoot, '.gitignore'); + // Create a directory instead of a file to cause a read error + await fs.mkdir(visibleGitIgnorePath); - const service = new GitService(mockProjectRoot); + const service = new GitService(projectRoot); + // EISDIR is the expected error code on Unix-like systems await expect(service.setupShadowGitRepository()).rejects.toThrow( - 'Read permission denied', + /EISDIR: illegal operation on a directory, read|EBUSY: resource busy or locked, read/, ); }); it('should make an initial commit if no commits exist in history repo', async () => { hoistedMockCheckIsRepo.mockResolvedValue(false); - const service = new GitService(mockProjectRoot); + const service = new GitService(projectRoot); await service.setupShadowGitRepository(); expect(hoistedMockCommit).toHaveBeenCalledWith('Initial commit', { '--allow-empty': null, @@ -265,7 +239,7 @@ describe('GitService', () => { it('should not make an initial commit if commits already exist', async () => { hoistedMockCheckIsRepo.mockResolvedValue(true); - const service = new GitService(mockProjectRoot); + const service = new GitService(projectRoot); await service.setupShadowGitRepository(); expect(hoistedMockCommit).not.toHaveBeenCalled(); }); diff --git a/packages/core/src/services/loopDetectionService.test.ts b/packages/core/src/services/loopDetectionService.test.ts index f0d76166b..9f5d63a7f 100644 --- a/packages/core/src/services/loopDetectionService.test.ts +++ b/packages/core/src/services/loopDetectionService.test.ts @@ -4,16 +4,18 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { describe, it, expect, beforeEach, vi } from 'vitest'; -import { LoopDetectionService } from './loopDetectionService.js'; +import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; +import { Config } from '../config/config.js'; +import { GeminiClient } from '../core/client.js'; import { GeminiEventType, ServerGeminiContentEvent, + ServerGeminiStreamEvent, ServerGeminiToolCallRequestEvent, } from '../core/turn.js'; -import { ServerGeminiStreamEvent } from '../core/turn.js'; -import { Config } from '../config/config.js'; import * as loggers from '../telemetry/loggers.js'; +import { LoopType } from '../telemetry/types.js'; +import { LoopDetectionService } from './loopDetectionService.js'; vi.mock('../telemetry/loggers.js', () => ({ logLoopDetected: vi.fn(), @@ -21,6 +23,7 @@ vi.mock('../telemetry/loggers.js', () => ({ const TOOL_CALL_LOOP_THRESHOLD = 5; const CONTENT_LOOP_THRESHOLD = 10; +const CONTENT_CHUNK_SIZE = 50; describe('LoopDetectionService', () => { let service: LoopDetectionService; @@ -77,7 +80,7 @@ describe('LoopDetectionService', () => { service.addAndCheck(event); } expect(service.addAndCheck(event)).toBe(true); - expect(loggers.logLoopDetected).toHaveBeenCalledTimes(2); + expect(loggers.logLoopDetected).toHaveBeenCalledTimes(1); }); it('should not detect a loop for different tool calls', () => { @@ -97,182 +100,86 @@ describe('LoopDetectionService', () => { expect(service.addAndCheck(event3)).toBe(false); } }); + + it('should not reset tool call counter for other event types', () => { + const toolCallEvent = createToolCallRequestEvent('testTool', { + param: 'value', + }); + const otherEvent = { + type: 'thought', + } as unknown as ServerGeminiStreamEvent; + + // Send events just below the threshold + for (let i = 0; i < TOOL_CALL_LOOP_THRESHOLD - 1; i++) { + expect(service.addAndCheck(toolCallEvent)).toBe(false); + } + + // Send a different event type + expect(service.addAndCheck(otherEvent)).toBe(false); + + // Send the tool call event again, which should now trigger the loop + expect(service.addAndCheck(toolCallEvent)).toBe(true); + expect(loggers.logLoopDetected).toHaveBeenCalledTimes(1); + }); }); describe('Content Loop Detection', () => { - it(`should not detect a loop for fewer than CONTENT_LOOP_THRESHOLD identical content strings`, () => { - const event = createContentEvent('This is a test sentence.'); - for (let i = 0; i < CONTENT_LOOP_THRESHOLD - 1; i++) { - expect(service.addAndCheck(event)).toBe(false); + const generateRandomString = (length: number) => { + let result = ''; + const characters = + 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'; + const charactersLength = characters.length; + for (let i = 0; i < length; i++) { + result += characters.charAt( + Math.floor(Math.random() * charactersLength), + ); + } + return result; + }; + + it('should not detect a loop for random content', () => { + service.reset(''); + for (let i = 0; i < 1000; i++) { + const content = generateRandomString(10); + const isLoop = service.addAndCheck(createContentEvent(content)); + expect(isLoop).toBe(false); } expect(loggers.logLoopDetected).not.toHaveBeenCalled(); }); - it(`should detect a loop on the CONTENT_LOOP_THRESHOLD-th identical content string`, () => { - const event = createContentEvent('This is a test sentence.'); - for (let i = 0; i < CONTENT_LOOP_THRESHOLD - 1; i++) { - service.addAndCheck(event); + it('should detect a loop when a chunk of content repeats consecutively', () => { + service.reset(''); + const repeatedContent = 'a'.repeat(CONTENT_CHUNK_SIZE); + + let isLoop = false; + for (let i = 0; i < CONTENT_LOOP_THRESHOLD; i++) { + for (const char of repeatedContent) { + isLoop = service.addAndCheck(createContentEvent(char)); + } } - expect(service.addAndCheck(event)).toBe(true); + expect(isLoop).toBe(true); expect(loggers.logLoopDetected).toHaveBeenCalledTimes(1); }); - it('should not detect a loop for different content strings', () => { - const event1 = createContentEvent('Sentence A'); - const event2 = createContentEvent('Sentence B'); - for (let i = 0; i < CONTENT_LOOP_THRESHOLD - 2; i++) { - expect(service.addAndCheck(event1)).toBe(false); - expect(service.addAndCheck(event2)).toBe(false); + it('should not detect a loop if repetitions are very far apart', () => { + service.reset(''); + const repeatedContent = 'b'.repeat(CONTENT_CHUNK_SIZE); + const fillerContent = generateRandomString(500); + + let isLoop = false; + for (let i = 0; i < CONTENT_LOOP_THRESHOLD; i++) { + for (const char of repeatedContent) { + isLoop = service.addAndCheck(createContentEvent(char)); + } + for (const char of fillerContent) { + isLoop = service.addAndCheck(createContentEvent(char)); + } } + expect(isLoop).toBe(false); expect(loggers.logLoopDetected).not.toHaveBeenCalled(); }); }); - describe('Sentence Extraction and Punctuation', () => { - it('should not check for loops when content has no sentence-ending punctuation', () => { - const eventNoPunct = createContentEvent('This has no punctuation'); - expect(service.addAndCheck(eventNoPunct)).toBe(false); - - const eventWithPunct = createContentEvent('This has punctuation!'); - expect(service.addAndCheck(eventWithPunct)).toBe(false); - }); - - it('should not treat function calls or method calls as sentence endings', () => { - // These should not trigger sentence detection, so repeating them many times should never cause a loop - for (let i = 0; i < CONTENT_LOOP_THRESHOLD + 2; i++) { - expect(service.addAndCheck(createContentEvent('console.log()'))).toBe( - false, - ); - } - - service.reset(); - for (let i = 0; i < CONTENT_LOOP_THRESHOLD + 2; i++) { - expect(service.addAndCheck(createContentEvent('obj.method()'))).toBe( - false, - ); - } - - service.reset(); - for (let i = 0; i < CONTENT_LOOP_THRESHOLD + 2; i++) { - expect( - service.addAndCheck(createContentEvent('arr.filter().map()')), - ).toBe(false); - } - - service.reset(); - for (let i = 0; i < CONTENT_LOOP_THRESHOLD + 2; i++) { - expect( - service.addAndCheck( - createContentEvent('if (condition) { return true; }'), - ), - ).toBe(false); - } - }); - - it('should correctly identify actual sentence endings and trigger loop detection', () => { - // These should trigger sentence detection, so repeating them should eventually cause a loop - for (let i = 0; i < CONTENT_LOOP_THRESHOLD - 1; i++) { - expect( - service.addAndCheck(createContentEvent('This is a sentence.')), - ).toBe(false); - } - expect( - service.addAndCheck(createContentEvent('This is a sentence.')), - ).toBe(true); - - service.reset(); - for (let i = 0; i < CONTENT_LOOP_THRESHOLD - 1; i++) { - expect( - service.addAndCheck(createContentEvent('Is this a question? ')), - ).toBe(false); - } - expect( - service.addAndCheck(createContentEvent('Is this a question? ')), - ).toBe(true); - - service.reset(); - for (let i = 0; i < CONTENT_LOOP_THRESHOLD - 1; i++) { - expect( - service.addAndCheck(createContentEvent('What excitement!\n')), - ).toBe(false); - } - expect( - service.addAndCheck(createContentEvent('What excitement!\n')), - ).toBe(true); - }); - - it('should handle content with mixed punctuation', () => { - service.addAndCheck(createContentEvent('Question?')); - service.addAndCheck(createContentEvent('Exclamation!')); - service.addAndCheck(createContentEvent('Period.')); - - // Repeat one of them multiple times - for (let i = 0; i < CONTENT_LOOP_THRESHOLD - 1; i++) { - service.addAndCheck(createContentEvent('Period.')); - } - expect(service.addAndCheck(createContentEvent('Period.'))).toBe(true); - }); - - it('should handle empty sentences after trimming', () => { - service.addAndCheck(createContentEvent(' .')); - expect(service.addAndCheck(createContentEvent('Normal sentence.'))).toBe( - false, - ); - }); - - it('should require at least two sentences for loop detection', () => { - const event = createContentEvent('Only one sentence.'); - expect(service.addAndCheck(event)).toBe(false); - - // Even repeating the same single sentence shouldn't trigger detection - for (let i = 0; i < 5; i++) { - expect(service.addAndCheck(event)).toBe(false); - } - }); - }); - - describe('Performance Optimizations', () => { - it('should cache sentence extraction and only re-extract when content grows significantly', () => { - // Add initial content - service.addAndCheck(createContentEvent('First sentence.')); - service.addAndCheck(createContentEvent('Second sentence.')); - - // Add small amounts of content (shouldn't trigger re-extraction) - for (let i = 0; i < 10; i++) { - service.addAndCheck(createContentEvent('X')); - } - service.addAndCheck(createContentEvent('.')); - - // Should still work correctly - expect(service.addAndCheck(createContentEvent('Test.'))).toBe(false); - }); - - it('should re-extract sentences when content grows by more than 100 characters', () => { - service.addAndCheck(createContentEvent('Initial sentence.')); - - // Add enough content to trigger re-extraction - const longContent = 'X'.repeat(101); - service.addAndCheck(createContentEvent(longContent + '.')); - - // Should work correctly after re-extraction - expect(service.addAndCheck(createContentEvent('Test.'))).toBe(false); - }); - - it('should use indexOf for efficient counting instead of regex', () => { - const repeatedSentence = 'This is a repeated sentence.'; - - // Build up content with the sentence repeated - for (let i = 0; i < CONTENT_LOOP_THRESHOLD - 1; i++) { - service.addAndCheck(createContentEvent(repeatedSentence)); - } - - // The threshold should be reached - expect(service.addAndCheck(createContentEvent(repeatedSentence))).toBe( - true, - ); - }); - }); - describe('Edge Cases', () => { it('should handle empty content', () => { const event = createContentEvent(''); @@ -309,3 +216,112 @@ describe('LoopDetectionService', () => { }); }); }); + +describe('LoopDetectionService LLM Checks', () => { + let service: LoopDetectionService; + let mockConfig: Config; + let mockGeminiClient: GeminiClient; + let abortController: AbortController; + + beforeEach(() => { + mockGeminiClient = { + getHistory: vi.fn().mockReturnValue([]), + generateJson: vi.fn(), + } as unknown as GeminiClient; + + mockConfig = { + getGeminiClient: () => mockGeminiClient, + getDebugMode: () => false, + getTelemetryEnabled: () => true, + } as unknown as Config; + + service = new LoopDetectionService(mockConfig); + abortController = new AbortController(); + vi.clearAllMocks(); + }); + + afterEach(() => { + vi.restoreAllMocks(); + }); + + const advanceTurns = async (count: number) => { + for (let i = 0; i < count; i++) { + await service.turnStarted(abortController.signal); + } + }; + + it('should not trigger LLM check before LLM_CHECK_AFTER_TURNS', async () => { + await advanceTurns(29); + expect(mockGeminiClient.generateJson).not.toHaveBeenCalled(); + }); + + it('should trigger LLM check on the 30th turn', async () => { + mockGeminiClient.generateJson = vi + .fn() + .mockResolvedValue({ confidence: 0.1 }); + await advanceTurns(30); + expect(mockGeminiClient.generateJson).toHaveBeenCalledTimes(1); + }); + + it('should detect a cognitive loop when confidence is high', async () => { + // First check at turn 30 + mockGeminiClient.generateJson = vi + .fn() + .mockResolvedValue({ confidence: 0.85, reasoning: 'Repetitive actions' }); + await advanceTurns(30); + expect(mockGeminiClient.generateJson).toHaveBeenCalledTimes(1); + + // The confidence of 0.85 will result in a low interval. + // The interval will be: 5 + (15 - 5) * (1 - 0.85) = 5 + 10 * 0.15 = 6.5 -> rounded to 7 + await advanceTurns(6); // advance to turn 36 + + mockGeminiClient.generateJson = vi + .fn() + .mockResolvedValue({ confidence: 0.95, reasoning: 'Repetitive actions' }); + const finalResult = await service.turnStarted(abortController.signal); // This is turn 37 + + expect(finalResult).toBe(true); + expect(loggers.logLoopDetected).toHaveBeenCalledWith( + mockConfig, + expect.objectContaining({ + 'event.name': 'loop_detected', + loop_type: LoopType.LLM_DETECTED_LOOP, + }), + ); + }); + + it('should not detect a loop when confidence is low', async () => { + mockGeminiClient.generateJson = vi + .fn() + .mockResolvedValue({ confidence: 0.5, reasoning: 'Looks okay' }); + await advanceTurns(30); + const result = await service.turnStarted(abortController.signal); + expect(result).toBe(false); + expect(loggers.logLoopDetected).not.toHaveBeenCalled(); + }); + + it('should adjust the check interval based on confidence', async () => { + // Confidence is 0.0, so interval should be MAX_LLM_CHECK_INTERVAL (15) + mockGeminiClient.generateJson = vi + .fn() + .mockResolvedValue({ confidence: 0.0 }); + await advanceTurns(30); // First check at turn 30 + expect(mockGeminiClient.generateJson).toHaveBeenCalledTimes(1); + + await advanceTurns(14); // Advance to turn 44 + expect(mockGeminiClient.generateJson).toHaveBeenCalledTimes(1); + + await service.turnStarted(abortController.signal); // Turn 45 + expect(mockGeminiClient.generateJson).toHaveBeenCalledTimes(2); + }); + + it('should handle errors from generateJson gracefully', async () => { + mockGeminiClient.generateJson = vi + .fn() + .mockRejectedValue(new Error('API error')); + await advanceTurns(30); + const result = await service.turnStarted(abortController.signal); + expect(result).toBe(false); + expect(loggers.logLoopDetected).not.toHaveBeenCalled(); + }); +}); diff --git a/packages/core/src/services/loopDetectionService.ts b/packages/core/src/services/loopDetectionService.ts index c1078f69f..7b3da20bc 100644 --- a/packages/core/src/services/loopDetectionService.ts +++ b/packages/core/src/services/loopDetectionService.ts @@ -8,26 +8,64 @@ import { createHash } from 'crypto'; import { GeminiEventType, ServerGeminiStreamEvent } from '../core/turn.js'; import { logLoopDetected } from '../telemetry/loggers.js'; import { LoopDetectedEvent, LoopType } from '../telemetry/types.js'; -import { Config } from '../config/config.js'; +import { Config, DEFAULT_GEMINI_FLASH_MODEL } from '../config/config.js'; +import { SchemaUnion, Type } from '@google/genai'; const TOOL_CALL_LOOP_THRESHOLD = 5; const CONTENT_LOOP_THRESHOLD = 10; -const SENTENCE_ENDING_PUNCTUATION_REGEX = /[.!?]+(?=\s|$)/; +const CONTENT_CHUNK_SIZE = 50; +const MAX_HISTORY_LENGTH = 1000; + +/** + * The number of recent conversation turns to include in the history when asking the LLM to check for a loop. + */ +const LLM_LOOP_CHECK_HISTORY_COUNT = 20; + +/** + * The number of turns that must pass in a single prompt before the LLM-based loop check is activated. + */ +const LLM_CHECK_AFTER_TURNS = 30; + +/** + * The default interval, in number of turns, at which the LLM-based loop check is performed. + * This value is adjusted dynamically based on the LLM's confidence. + */ +const DEFAULT_LLM_CHECK_INTERVAL = 3; + +/** + * The minimum interval for LLM-based loop checks. + * This is used when the confidence of a loop is high, to check more frequently. + */ +const MIN_LLM_CHECK_INTERVAL = 5; + +/** + * The maximum interval for LLM-based loop checks. + * This is used when the confidence of a loop is low, to check less frequently. + */ +const MAX_LLM_CHECK_INTERVAL = 15; /** * Service for detecting and preventing infinite loops in AI responses. * Monitors tool call repetitions and content sentence repetitions. */ export class LoopDetectionService { + private readonly config: Config; + private promptId = ''; + // Tool call tracking private lastToolCallKey: string | null = null; private toolCallRepetitionCount: number = 0; // Content streaming tracking - private lastRepeatedSentence: string = ''; - private sentenceRepetitionCount: number = 0; - private partialContent: string = ''; - private config: Config; + private streamContentHistory = ''; + private contentStats = new Map(); + private lastContentIndex = 0; + private loopDetected = false; + + // LLM loop track tracking + private turnsInCurrentPrompt = 0; + private llmCheckInterval = DEFAULT_LLM_CHECK_INTERVAL; + private lastCheckTurn = 0; constructor(config: Config) { this.config = config; @@ -45,18 +83,48 @@ export class LoopDetectionService { * @returns true if a loop is detected, false otherwise */ addAndCheck(event: ServerGeminiStreamEvent): boolean { + if (this.loopDetected) { + return true; + } + switch (event.type) { case GeminiEventType.ToolCallRequest: // content chanting only happens in one single stream, reset if there // is a tool call in between - this.resetSentenceCount(); - return this.checkToolCallLoop(event.value); + this.resetContentTracking(); + this.loopDetected = this.checkToolCallLoop(event.value); + break; case GeminiEventType.Content: - return this.checkContentLoop(event.value); + this.loopDetected = this.checkContentLoop(event.value); + break; default: - this.reset(); - return false; + break; } + return this.loopDetected; + } + + /** + * Signals the start of a new turn in the conversation. + * + * This method increments the turn counter and, if specific conditions are met, + * triggers an LLM-based check to detect potential conversation loops. The check + * is performed periodically based on the `llmCheckInterval`. + * + * @param signal - An AbortSignal to allow for cancellation of the asynchronous LLM check. + * @returns A promise that resolves to `true` if a loop is detected, and `false` otherwise. + */ + async turnStarted(signal: AbortSignal) { + this.turnsInCurrentPrompt++; + + if ( + this.turnsInCurrentPrompt >= LLM_CHECK_AFTER_TURNS && + this.turnsInCurrentPrompt - this.lastCheckTurn >= this.llmCheckInterval + ) { + this.lastCheckTurn = this.turnsInCurrentPrompt; + return await this.checkForLoopWithLLM(signal); + } + + return false; } private checkToolCallLoop(toolCall: { name: string; args: object }): boolean { @@ -70,51 +138,227 @@ export class LoopDetectionService { if (this.toolCallRepetitionCount >= TOOL_CALL_LOOP_THRESHOLD) { logLoopDetected( this.config, - new LoopDetectedEvent(LoopType.CONSECUTIVE_IDENTICAL_TOOL_CALLS), + new LoopDetectedEvent( + LoopType.CONSECUTIVE_IDENTICAL_TOOL_CALLS, + this.promptId, + ), ); return true; } return false; } + /** + * Detects content loops by analyzing streaming text for repetitive patterns. + * + * The algorithm works by: + * 1. Appending new content to the streaming history + * 2. Truncating history if it exceeds the maximum length + * 3. Analyzing content chunks for repetitive patterns using hashing + * 4. Detecting loops when identical chunks appear frequently within a short distance + */ private checkContentLoop(content: string): boolean { - this.partialContent += content; + this.streamContentHistory += content; - if (!SENTENCE_ENDING_PUNCTUATION_REGEX.test(this.partialContent)) { - return false; + this.truncateAndUpdate(); + return this.analyzeContentChunksForLoop(); + } + + /** + * Truncates the content history to prevent unbounded memory growth. + * When truncating, adjusts all stored indices to maintain their relative positions. + */ + private truncateAndUpdate(): void { + if (this.streamContentHistory.length <= MAX_HISTORY_LENGTH) { + return; } - const completeSentences = - this.partialContent.match(/[^.!?]+[.!?]+(?=\s|$)/g) || []; - if (completeSentences.length === 0) { - return false; - } + // Calculate how much content to remove from the beginning + const truncationAmount = + this.streamContentHistory.length - MAX_HISTORY_LENGTH; + this.streamContentHistory = + this.streamContentHistory.slice(truncationAmount); + this.lastContentIndex = Math.max( + 0, + this.lastContentIndex - truncationAmount, + ); - const lastSentence = completeSentences[completeSentences.length - 1]; - const lastCompleteIndex = this.partialContent.lastIndexOf(lastSentence); - const endOfLastSentence = lastCompleteIndex + lastSentence.length; - this.partialContent = this.partialContent.slice(endOfLastSentence); + // Update all stored chunk indices to account for the truncation + for (const [hash, oldIndices] of this.contentStats.entries()) { + const adjustedIndices = oldIndices + .map((index) => index - truncationAmount) + .filter((index) => index >= 0); - for (const sentence of completeSentences) { - const trimmedSentence = sentence.trim(); - if (trimmedSentence === '') { - continue; - } - - if (this.lastRepeatedSentence === trimmedSentence) { - this.sentenceRepetitionCount++; + if (adjustedIndices.length > 0) { + this.contentStats.set(hash, adjustedIndices); } else { - this.lastRepeatedSentence = trimmedSentence; - this.sentenceRepetitionCount = 1; + this.contentStats.delete(hash); } + } + } - if (this.sentenceRepetitionCount >= CONTENT_LOOP_THRESHOLD) { + /** + * Analyzes content in fixed-size chunks to detect repetitive patterns. + * + * Uses a sliding window approach: + * 1. Extract chunks of fixed size (CONTENT_CHUNK_SIZE) + * 2. Hash each chunk for efficient comparison + * 3. Track positions where identical chunks appear + * 4. Detect loops when chunks repeat frequently within a short distance + */ + private analyzeContentChunksForLoop(): boolean { + while (this.hasMoreChunksToProcess()) { + // Extract current chunk of text + const currentChunk = this.streamContentHistory.substring( + this.lastContentIndex, + this.lastContentIndex + CONTENT_CHUNK_SIZE, + ); + const chunkHash = createHash('sha256').update(currentChunk).digest('hex'); + + if (this.isLoopDetectedForChunk(currentChunk, chunkHash)) { logLoopDetected( this.config, - new LoopDetectedEvent(LoopType.CHANTING_IDENTICAL_SENTENCES), + new LoopDetectedEvent( + LoopType.CHANTING_IDENTICAL_SENTENCES, + this.promptId, + ), ); return true; } + + // Move to next position in the sliding window + this.lastContentIndex++; + } + + return false; + } + + private hasMoreChunksToProcess(): boolean { + return ( + this.lastContentIndex + CONTENT_CHUNK_SIZE <= + this.streamContentHistory.length + ); + } + + /** + * Determines if a content chunk indicates a loop pattern. + * + * Loop detection logic: + * 1. Check if we've seen this hash before (new chunks are stored for future comparison) + * 2. Verify actual content matches to prevent hash collisions + * 3. Track all positions where this chunk appears + * 4. A loop is detected when the same chunk appears CONTENT_LOOP_THRESHOLD times + * within a small average distance (≤ 1.5 * chunk size) + */ + private isLoopDetectedForChunk(chunk: string, hash: string): boolean { + const existingIndices = this.contentStats.get(hash); + + if (!existingIndices) { + this.contentStats.set(hash, [this.lastContentIndex]); + return false; + } + + if (!this.isActualContentMatch(chunk, existingIndices[0])) { + return false; + } + + existingIndices.push(this.lastContentIndex); + + if (existingIndices.length < CONTENT_LOOP_THRESHOLD) { + return false; + } + + // Analyze the most recent occurrences to see if they're clustered closely together + const recentIndices = existingIndices.slice(-CONTENT_LOOP_THRESHOLD); + const totalDistance = + recentIndices[recentIndices.length - 1] - recentIndices[0]; + const averageDistance = totalDistance / (CONTENT_LOOP_THRESHOLD - 1); + const maxAllowedDistance = CONTENT_CHUNK_SIZE * 1.5; + + return averageDistance <= maxAllowedDistance; + } + + /** + * Verifies that two chunks with the same hash actually contain identical content. + * This prevents false positives from hash collisions. + */ + private isActualContentMatch( + currentChunk: string, + originalIndex: number, + ): boolean { + const originalChunk = this.streamContentHistory.substring( + originalIndex, + originalIndex + CONTENT_CHUNK_SIZE, + ); + return originalChunk === currentChunk; + } + + private async checkForLoopWithLLM(signal: AbortSignal) { + const recentHistory = this.config + .getGeminiClient() + .getHistory() + .slice(-LLM_LOOP_CHECK_HISTORY_COUNT); + + const prompt = `You are a sophisticated AI diagnostic agent specializing in identifying when a conversational AI is stuck in an unproductive state. Your task is to analyze the provided conversation history and determine if the assistant has ceased to make meaningful progress. + +An unproductive state is characterized by one or more of the following patterns over the last 5 or more assistant turns: + +Repetitive Actions: The assistant repeats the same tool calls or conversational responses a decent number of times. This includes simple loops (e.g., tool_A, tool_A, tool_A) and alternating patterns (e.g., tool_A, tool_B, tool_A, tool_B, ...). + +Cognitive Loop: The assistant seems unable to determine the next logical step. It might express confusion, repeatedly ask the same questions, or generate responses that don't logically follow from the previous turns, indicating it's stuck and not advancing the task. + +Crucially, differentiate between a true unproductive state and legitimate, incremental progress. +For example, a series of 'tool_A' or 'tool_B' tool calls that make small, distinct changes to the same file (like adding docstrings to functions one by one) is considered forward progress and is NOT a loop. A loop would be repeatedly replacing the same text with the same content, or cycling between a small set of files with no net change. + +Please analyze the conversation history to determine the possibility that the conversation is stuck in a repetitive, non-productive state.`; + const contents = [ + ...recentHistory, + { role: 'user', parts: [{ text: prompt }] }, + ]; + const schema: SchemaUnion = { + type: Type.OBJECT, + properties: { + reasoning: { + type: Type.STRING, + description: + 'Your reasoning on if the conversation is looping without forward progress.', + }, + confidence: { + type: Type.NUMBER, + description: + 'A number between 0.0 and 1.0 representing your confidence that the conversation is in an unproductive state.', + }, + }, + required: ['reasoning', 'confidence'], + }; + let result; + try { + result = await this.config + .getGeminiClient() + .generateJson(contents, schema, signal, DEFAULT_GEMINI_FLASH_MODEL); + } catch (e) { + // Do nothing, treat it as a non-loop. + this.config.getDebugMode() ? console.error(e) : console.debug(e); + return false; + } + + if (typeof result.confidence === 'number') { + if (result.confidence > 0.9) { + if (typeof result.reasoning === 'string' && result.reasoning) { + console.warn(result.reasoning); + } + logLoopDetected( + this.config, + new LoopDetectedEvent(LoopType.LLM_DETECTED_LOOP, this.promptId), + ); + return true; + } else { + this.llmCheckInterval = Math.round( + MIN_LLM_CHECK_INTERVAL + + (MAX_LLM_CHECK_INTERVAL - MIN_LLM_CHECK_INTERVAL) * + (1 - result.confidence), + ); + } } return false; } @@ -122,9 +366,12 @@ export class LoopDetectionService { /** * Resets all loop detection state. */ - reset(): void { + reset(promptId: string): void { + this.promptId = promptId; this.resetToolCallCount(); - this.resetSentenceCount(); + this.resetContentTracking(); + this.resetLlmCheckTracking(); + this.loopDetected = false; } private resetToolCallCount(): void { @@ -132,9 +379,17 @@ export class LoopDetectionService { this.toolCallRepetitionCount = 0; } - private resetSentenceCount(): void { - this.lastRepeatedSentence = ''; - this.sentenceRepetitionCount = 0; - this.partialContent = ''; + private resetContentTracking(resetHistory = true): void { + if (resetHistory) { + this.streamContentHistory = ''; + } + this.contentStats.clear(); + this.lastContentIndex = 0; + } + + private resetLlmCheckTracking(): void { + this.turnsInCurrentPrompt = 0; + this.llmCheckInterval = DEFAULT_LLM_CHECK_INTERVAL; + this.lastCheckTurn = 0; } } diff --git a/packages/core/src/services/shellExecutionService.test.ts b/packages/core/src/services/shellExecutionService.test.ts new file mode 100644 index 000000000..4d1655a27 --- /dev/null +++ b/packages/core/src/services/shellExecutionService.test.ts @@ -0,0 +1,357 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { vi, describe, it, expect, beforeEach, type Mock } from 'vitest'; +const mockSpawn = vi.hoisted(() => vi.fn()); +vi.mock('child_process', () => ({ + spawn: mockSpawn, +})); + +import EventEmitter from 'events'; +import { Readable } from 'stream'; +import { type ChildProcess } from 'child_process'; +import { + ShellExecutionService, + ShellOutputEvent, +} from './shellExecutionService.js'; + +const mockIsBinary = vi.hoisted(() => vi.fn()); +vi.mock('../utils/textUtils.js', () => ({ + isBinary: mockIsBinary, +})); + +const mockPlatform = vi.hoisted(() => vi.fn()); +vi.mock('os', () => ({ + default: { + platform: mockPlatform, + }, + platform: mockPlatform, +})); + +const mockProcessKill = vi + .spyOn(process, 'kill') + .mockImplementation(() => true); + +describe('ShellExecutionService', () => { + let mockChildProcess: EventEmitter & Partial; + let onOutputEventMock: Mock<(event: ShellOutputEvent) => void>; + + beforeEach(() => { + vi.clearAllMocks(); + + mockIsBinary.mockReturnValue(false); + mockPlatform.mockReturnValue('linux'); + + onOutputEventMock = vi.fn(); + + mockChildProcess = new EventEmitter() as EventEmitter & + Partial; + // FIX: Cast simple EventEmitters to the expected stream type. + mockChildProcess.stdout = new EventEmitter() as Readable; + mockChildProcess.stderr = new EventEmitter() as Readable; + mockChildProcess.kill = vi.fn(); + + // FIX: Use Object.defineProperty to set the readonly 'pid' property. + Object.defineProperty(mockChildProcess, 'pid', { + value: 12345, + configurable: true, + }); + + mockSpawn.mockReturnValue(mockChildProcess); + }); + + // Helper function to run a standard execution simulation + const simulateExecution = async ( + command: string, + simulation: (cp: typeof mockChildProcess, ac: AbortController) => void, + ) => { + const abortController = new AbortController(); + const handle = ShellExecutionService.execute( + command, + '/test/dir', + onOutputEventMock, + abortController.signal, + ); + + await new Promise((resolve) => setImmediate(resolve)); + simulation(mockChildProcess, abortController); + const result = await handle.result; + return { result, handle, abortController }; + }; + + describe('Successful Execution', () => { + it('should execute a command and capture stdout and stderr', async () => { + const { result, handle } = await simulateExecution('ls -l', (cp) => { + cp.stdout?.emit('data', Buffer.from('file1.txt\n')); + cp.stderr?.emit('data', Buffer.from('a warning')); + cp.emit('exit', 0, null); + }); + + expect(mockSpawn).toHaveBeenCalledWith( + 'bash', + ['-c', 'ls -l'], + expect.any(Object), + ); + expect(result.exitCode).toBe(0); + expect(result.signal).toBeNull(); + expect(result.error).toBeNull(); + expect(result.aborted).toBe(false); + expect(result.stdout).toBe('file1.txt\n'); + expect(result.stderr).toBe('a warning'); + expect(result.output).toBe('file1.txt\n\na warning'); + expect(handle.pid).toBe(12345); + + expect(onOutputEventMock).toHaveBeenCalledWith({ + type: 'data', + stream: 'stdout', + chunk: 'file1.txt\n', + }); + expect(onOutputEventMock).toHaveBeenCalledWith({ + type: 'data', + stream: 'stderr', + chunk: 'a warning', + }); + }); + + it('should strip ANSI codes from output', async () => { + const { result } = await simulateExecution('ls --color=auto', (cp) => { + cp.stdout?.emit('data', Buffer.from('a\u001b[31mred\u001b[0mword')); + cp.emit('exit', 0, null); + }); + + expect(result.stdout).toBe('aredword'); + expect(onOutputEventMock).toHaveBeenCalledWith({ + type: 'data', + stream: 'stdout', + chunk: 'aredword', + }); + }); + + it('should correctly decode multi-byte characters split across chunks', async () => { + const { result } = await simulateExecution('echo "你好"', (cp) => { + const multiByteChar = Buffer.from('你好', 'utf-8'); + cp.stdout?.emit('data', multiByteChar.slice(0, 2)); + cp.stdout?.emit('data', multiByteChar.slice(2)); + cp.emit('exit', 0, null); + }); + expect(result.stdout).toBe('你好'); + }); + + it('should handle commands with no output', async () => { + const { result } = await simulateExecution('touch file', (cp) => { + cp.emit('exit', 0, null); + }); + + expect(result.stdout).toBe(''); + expect(result.stderr).toBe(''); + expect(result.output).toBe(''); + expect(onOutputEventMock).not.toHaveBeenCalled(); + }); + }); + + describe('Failed Execution', () => { + it('should capture a non-zero exit code and format output correctly', async () => { + const { result } = await simulateExecution('a-bad-command', (cp) => { + cp.stderr?.emit('data', Buffer.from('command not found')); + cp.emit('exit', 127, null); + }); + + expect(result.exitCode).toBe(127); + expect(result.stderr).toBe('command not found'); + expect(result.stdout).toBe(''); + expect(result.output).toBe('\ncommand not found'); + expect(result.error).toBeNull(); + }); + + it('should capture a termination signal', async () => { + const { result } = await simulateExecution('long-process', (cp) => { + cp.emit('exit', null, 'SIGTERM'); + }); + + expect(result.exitCode).toBeNull(); + expect(result.signal).toBe('SIGTERM'); + }); + + it('should handle a spawn error', async () => { + const spawnError = new Error('spawn EACCES'); + const { result } = await simulateExecution('protected-cmd', (cp) => { + cp.emit('error', spawnError); + cp.emit('exit', 1, null); + }); + + expect(result.error).toBe(spawnError); + expect(result.exitCode).toBe(1); + }); + }); + + describe('Aborting Commands', () => { + describe.each([ + { + platform: 'linux', + expectedSignal: 'SIGTERM', + expectedExit: { signal: 'SIGKILL' as const }, + }, + { + platform: 'win32', + expectedCommand: 'taskkill', + expectedExit: { code: 1 }, + }, + ])( + 'on $platform', + ({ platform, expectedSignal, expectedCommand, expectedExit }) => { + it('should abort a running process and set the aborted flag', async () => { + mockPlatform.mockReturnValue(platform); + + const { result } = await simulateExecution( + 'sleep 10', + (cp, abortController) => { + abortController.abort(); + if (expectedExit.signal) + cp.emit('exit', null, expectedExit.signal); + if (typeof expectedExit.code === 'number') + cp.emit('exit', expectedExit.code, null); + }, + ); + + expect(result.aborted).toBe(true); + + if (platform === 'linux') { + expect(mockProcessKill).toHaveBeenCalledWith( + -mockChildProcess.pid!, + expectedSignal, + ); + } else { + expect(mockSpawn).toHaveBeenCalledWith(expectedCommand, [ + '/pid', + String(mockChildProcess.pid), + '/f', + '/t', + ]); + } + }); + }, + ); + + it('should gracefully attempt SIGKILL on linux if SIGTERM fails', async () => { + mockPlatform.mockReturnValue('linux'); + vi.useFakeTimers(); + + // Don't await the result inside the simulation block for this specific test. + // We need to control the timeline manually. + const abortController = new AbortController(); + const handle = ShellExecutionService.execute( + 'unresponsive_process', + '/test/dir', + onOutputEventMock, + abortController.signal, + ); + + abortController.abort(); + + // Check the first kill signal + expect(mockProcessKill).toHaveBeenCalledWith( + -mockChildProcess.pid!, + 'SIGTERM', + ); + + // Now, advance time past the timeout + await vi.advanceTimersByTimeAsync(250); + + // Check the second kill signal + expect(mockProcessKill).toHaveBeenCalledWith( + -mockChildProcess.pid!, + 'SIGKILL', + ); + + // Finally, simulate the process exiting and await the result + mockChildProcess.emit('exit', null, 'SIGKILL'); + const result = await handle.result; + + vi.useRealTimers(); + + expect(result.aborted).toBe(true); + expect(result.signal).toBe('SIGKILL'); + // The individual kill calls were already asserted above. + expect(mockProcessKill).toHaveBeenCalledTimes(2); + }); + }); + + describe('Binary Output', () => { + it('should detect binary output and switch to progress events', async () => { + mockIsBinary.mockReturnValueOnce(true); + const binaryChunk1 = Buffer.from([0x89, 0x50, 0x4e, 0x47]); + const binaryChunk2 = Buffer.from([0x0d, 0x0a, 0x1a, 0x0a]); + + const { result } = await simulateExecution('cat image.png', (cp) => { + cp.stdout?.emit('data', binaryChunk1); + cp.stdout?.emit('data', binaryChunk2); + cp.emit('exit', 0, null); + }); + + expect(result.rawOutput).toEqual( + Buffer.concat([binaryChunk1, binaryChunk2]), + ); + expect(onOutputEventMock).toHaveBeenCalledTimes(3); + expect(onOutputEventMock.mock.calls[0][0]).toEqual({ + type: 'binary_detected', + }); + expect(onOutputEventMock.mock.calls[1][0]).toEqual({ + type: 'binary_progress', + bytesReceived: 4, + }); + expect(onOutputEventMock.mock.calls[2][0]).toEqual({ + type: 'binary_progress', + bytesReceived: 8, + }); + }); + + it('should not emit data events after binary is detected', async () => { + mockIsBinary.mockImplementation((buffer) => buffer.includes(0x00)); + + await simulateExecution('cat mixed_file', (cp) => { + cp.stdout?.emit('data', Buffer.from('some text')); + cp.stdout?.emit('data', Buffer.from([0x00, 0x01, 0x02])); + cp.stdout?.emit('data', Buffer.from('more text')); + cp.emit('exit', 0, null); + }); + + // FIX: Provide explicit type for the 'call' parameter in the map function. + const eventTypes = onOutputEventMock.mock.calls.map( + (call: [ShellOutputEvent]) => call[0].type, + ); + expect(eventTypes).toEqual([ + 'data', + 'binary_detected', + 'binary_progress', + 'binary_progress', + ]); + }); + }); + + describe('Platform-Specific Behavior', () => { + it('should use cmd.exe on Windows', async () => { + mockPlatform.mockReturnValue('win32'); + await simulateExecution('dir', (cp) => cp.emit('exit', 0, null)); + + expect(mockSpawn).toHaveBeenCalledWith( + 'cmd.exe', + ['/c', 'dir'], + expect.objectContaining({ detached: false }), + ); + }); + + it('should use bash and detached process group on Linux', async () => { + mockPlatform.mockReturnValue('linux'); + await simulateExecution('ls', (cp) => cp.emit('exit', 0, null)); + + expect(mockSpawn).toHaveBeenCalledWith( + 'bash', + ['-c', 'ls'], + expect.objectContaining({ detached: true }), + ); + }); + }); +}); diff --git a/packages/core/src/services/shellExecutionService.ts b/packages/core/src/services/shellExecutionService.ts new file mode 100644 index 000000000..0f0002cde --- /dev/null +++ b/packages/core/src/services/shellExecutionService.ts @@ -0,0 +1,229 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { spawn } from 'child_process'; +import { TextDecoder } from 'util'; +import os from 'os'; +import stripAnsi from 'strip-ansi'; +import { getCachedEncodingForBuffer } from '../utils/systemEncoding.js'; +import { isBinary } from '../utils/textUtils.js'; + +const SIGKILL_TIMEOUT_MS = 200; + +/** A structured result from a shell command execution. */ +export interface ShellExecutionResult { + /** The raw, unprocessed output buffer. */ + rawOutput: Buffer; + /** The combined, decoded stdout and stderr as a string. */ + output: string; + /** The decoded stdout as a string. */ + stdout: string; + /** The decoded stderr as a string. */ + stderr: string; + /** The process exit code, or null if terminated by a signal. */ + exitCode: number | null; + /** The signal that terminated the process, if any. */ + signal: NodeJS.Signals | null; + /** An error object if the process failed to spawn. */ + error: Error | null; + /** A boolean indicating if the command was aborted by the user. */ + aborted: boolean; + /** The process ID of the spawned shell. */ + pid: number | undefined; +} + +/** A handle for an ongoing shell execution. */ +export interface ShellExecutionHandle { + /** The process ID of the spawned shell. */ + pid: number | undefined; + /** A promise that resolves with the complete execution result. */ + result: Promise; +} + +/** + * Describes a structured event emitted during shell command execution. + */ +export type ShellOutputEvent = + | { + /** The event contains a chunk of output data. */ + type: 'data'; + /** The stream from which the data originated. */ + stream: 'stdout' | 'stderr'; + /** The decoded string chunk. */ + chunk: string; + } + | { + /** Signals that the output stream has been identified as binary. */ + type: 'binary_detected'; + } + | { + /** Provides progress updates for a binary stream. */ + type: 'binary_progress'; + /** The total number of bytes received so far. */ + bytesReceived: number; + }; + +/** + * A centralized service for executing shell commands with robust process + * management, cross-platform compatibility, and streaming output capabilities. + * + */ +export class ShellExecutionService { + /** + * Executes a shell command using `spawn`, capturing all output and lifecycle events. + * + * @param commandToExecute The exact command string to run. + * @param cwd The working directory to execute the command in. + * @param onOutputEvent A callback for streaming structured events about the execution, including data chunks and status updates. + * @param abortSignal An AbortSignal to terminate the process and its children. + * @returns An object containing the process ID (pid) and a promise that + * resolves with the complete execution result. + */ + static execute( + commandToExecute: string, + cwd: string, + onOutputEvent: (event: ShellOutputEvent) => void, + abortSignal: AbortSignal, + ): ShellExecutionHandle { + const isWindows = os.platform() === 'win32'; + const shell = isWindows ? 'cmd.exe' : 'bash'; + const shellArgs = [isWindows ? '/c' : '-c', commandToExecute]; + + const child = spawn(shell, shellArgs, { + cwd, + stdio: ['ignore', 'pipe', 'pipe'], + detached: !isWindows, // Use process groups on non-Windows for robust killing + env: { + ...process.env, + GEMINI_CLI: '1', + }, + }); + + const result = new Promise((resolve) => { + // Use decoders to handle multi-byte characters safely (for streaming output). + let stdoutDecoder: TextDecoder | null = null; + let stderrDecoder: TextDecoder | null = null; + + let stdout = ''; + let stderr = ''; + const outputChunks: Buffer[] = []; + let error: Error | null = null; + let exited = false; + + let isStreamingRawContent = true; + const MAX_SNIFF_SIZE = 4096; + let sniffedBytes = 0; + + const handleOutput = (data: Buffer, stream: 'stdout' | 'stderr') => { + if (!stdoutDecoder || !stderrDecoder) { + const encoding = getCachedEncodingForBuffer(data); + try { + stdoutDecoder = new TextDecoder(encoding); + stderrDecoder = new TextDecoder(encoding); + } catch { + // If the encoding is not supported, fall back to utf-8. + // This can happen on some platforms for certain encodings like 'utf-32le'. + stdoutDecoder = new TextDecoder('utf-8'); + stderrDecoder = new TextDecoder('utf-8'); + } + } + + outputChunks.push(data); + + // Binary detection logic. This only runs until we've made a determination. + if (isStreamingRawContent && sniffedBytes < MAX_SNIFF_SIZE) { + const sniffBuffer = Buffer.concat(outputChunks.slice(0, 20)); + sniffedBytes = sniffBuffer.length; + + if (isBinary(sniffBuffer)) { + // Change state to stop streaming raw content. + isStreamingRawContent = false; + onOutputEvent({ type: 'binary_detected' }); + } + } + + const decodedChunk = + stream === 'stdout' + ? stdoutDecoder.decode(data, { stream: true }) + : stderrDecoder.decode(data, { stream: true }); + const strippedChunk = stripAnsi(decodedChunk); + + if (stream === 'stdout') { + stdout += strippedChunk; + } else { + stderr += strippedChunk; + } + + if (isStreamingRawContent) { + onOutputEvent({ type: 'data', stream, chunk: strippedChunk }); + } else { + const totalBytes = outputChunks.reduce( + (sum, chunk) => sum + chunk.length, + 0, + ); + onOutputEvent({ type: 'binary_progress', bytesReceived: totalBytes }); + } + }; + + child.stdout.on('data', (data) => handleOutput(data, 'stdout')); + child.stderr.on('data', (data) => handleOutput(data, 'stderr')); + child.on('error', (err) => { + error = err; + }); + + const abortHandler = async () => { + if (child.pid && !exited) { + if (isWindows) { + spawn('taskkill', ['/pid', child.pid.toString(), '/f', '/t']); + } else { + try { + // Kill the entire process group (negative PID). + // SIGTERM first, then SIGKILL if it doesn't die. + process.kill(-child.pid, 'SIGTERM'); + await new Promise((res) => setTimeout(res, SIGKILL_TIMEOUT_MS)); + if (!exited) { + process.kill(-child.pid, 'SIGKILL'); + } + } catch (_e) { + // Fall back to killing just the main process if group kill fails. + if (!exited) child.kill('SIGKILL'); + } + } + } + }; + + abortSignal.addEventListener('abort', abortHandler, { once: true }); + + child.on('exit', (code, signal) => { + exited = true; + abortSignal.removeEventListener('abort', abortHandler); + + if (stdoutDecoder) { + stdout += stripAnsi(stdoutDecoder.decode()); + } + if (stderrDecoder) { + stderr += stripAnsi(stderrDecoder.decode()); + } + + const finalBuffer = Buffer.concat(outputChunks); + + resolve({ + rawOutput: finalBuffer, + output: stdout + (stderr ? `\n${stderr}` : ''), + stdout, + stderr, + exitCode: code, + signal, + error, + aborted: abortSignal.aborted, + pid: child.pid, + }); + }); + }); + + return { pid: child.pid, result }; + } +} diff --git a/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts b/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts index 7beacb9b6..d36a16b51 100644 --- a/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts +++ b/packages/core/src/telemetry/clearcut-logger/clearcut-logger.ts @@ -6,6 +6,8 @@ import { Buffer } from 'buffer'; import * as https from 'https'; +import { HttpsProxyAgent } from 'https-proxy-agent'; + import { StartSessionEvent, EndSessionEvent, @@ -16,6 +18,7 @@ import { ApiErrorEvent, FlashFallbackEvent, LoopDetectedEvent, + FlashDecidedToContinueEvent, } from '../types.js'; import { EventMetadataKey } from './event-metadata-key.js'; import { Config } from '../../config/config.js'; @@ -35,6 +38,7 @@ const api_error_event_name = 'api_error'; const end_session_event_name = 'end_session'; const flash_fallback_event_name = 'flash_fallback'; const loop_detected_event_name = 'loop_detected'; +const flash_decided_to_continue_event_name = 'flash_decided_to_continue'; export interface LogResponse { nextRequestWaitMs?: number; @@ -132,12 +136,18 @@ export class ClearcutLogger { headers: { 'Content-Length': Buffer.byteLength(body) }, }; const bufs: Buffer[] = []; - const req = https.request(options, (res) => { - res.on('data', (buf) => bufs.push(buf)); - res.on('end', () => { - resolve(Buffer.concat(bufs)); - }); - }); + const req = https.request( + { + ...options, + agent: this.getProxyAgent(), + }, + (res) => { + res.on('data', (buf) => bufs.push(buf)); + res.on('end', () => { + resolve(Buffer.concat(bufs)); + }); + }, + ); req.on('error', (e) => { if (this.config?.getDebugMode()) { console.log('Clearcut POST request error: ', e); @@ -205,11 +215,16 @@ export class ClearcutLogger { } logStartSessionEvent(event: StartSessionEvent): void { + const surface = process.env.SURFACE || 'SURFACE_NOT_SET'; const data = [ { gemini_cli_key: EventMetadataKey.GEMINI_CLI_START_SESSION_MODEL, value: event.model, }, + { + gemini_cli_key: EventMetadataKey.GEMINI_CLI_SESSION_ID, + value: this.config?.getSessionId() ?? '', + }, { gemini_cli_key: EventMetadataKey.GEMINI_CLI_START_SESSION_EMBEDDING_MODEL, @@ -266,7 +281,12 @@ export class ClearcutLogger { EventMetadataKey.GEMINI_CLI_START_SESSION_TELEMETRY_LOG_USER_PROMPTS_ENABLED, value: event.telemetry_log_user_prompts_enabled.toString(), }, + { + gemini_cli_key: EventMetadataKey.GEMINI_CLI_SURFACE, + value: surface, + }, ]; + // Flush start event immediately this.enqueueLogEvent(this.createLogEvent(start_session_event_name, data)); this.flushToClearcut().catch((error) => { @@ -280,6 +300,10 @@ export class ClearcutLogger { gemini_cli_key: EventMetadataKey.GEMINI_CLI_USER_PROMPT_LENGTH, value: JSON.stringify(event.prompt_length), }, + { + gemini_cli_key: EventMetadataKey.GEMINI_CLI_SESSION_ID, + value: this.config?.getSessionId() ?? '', + }, { gemini_cli_key: EventMetadataKey.GEMINI_CLI_PROMPT_ID, value: JSON.stringify(event.prompt_id), @@ -442,6 +466,10 @@ export class ClearcutLogger { gemini_cli_key: EventMetadataKey.GEMINI_CLI_AUTH_TYPE, value: JSON.stringify(event.auth_type), }, + { + gemini_cli_key: EventMetadataKey.GEMINI_CLI_SESSION_ID, + value: this.config?.getSessionId() ?? '', + }, ]; this.enqueueLogEvent(this.createLogEvent(flash_fallback_event_name, data)); @@ -452,6 +480,10 @@ export class ClearcutLogger { logLoopDetectedEvent(event: LoopDetectedEvent): void { const data = [ + { + gemini_cli_key: EventMetadataKey.GEMINI_CLI_PROMPT_ID, + value: JSON.stringify(event.prompt_id), + }, { gemini_cli_key: EventMetadataKey.GEMINI_CLI_LOOP_DETECTED_TYPE, value: JSON.stringify(event.loop_type), @@ -462,10 +494,28 @@ export class ClearcutLogger { this.flushIfNeeded(); } + logFlashDecidedToContinueEvent(event: FlashDecidedToContinueEvent): void { + const data = [ + { + gemini_cli_key: EventMetadataKey.GEMINI_CLI_PROMPT_ID, + value: JSON.stringify(event.prompt_id), + }, + { + gemini_cli_key: EventMetadataKey.GEMINI_CLI_SESSION_ID, + value: this.config?.getSessionId() ?? '', + }, + ]; + + this.enqueueLogEvent( + this.createLogEvent(flash_decided_to_continue_event_name, data), + ); + this.flushIfNeeded(); + } + logEndSessionEvent(event: EndSessionEvent): void { const data = [ { - gemini_cli_key: EventMetadataKey.GEMINI_CLI_END_SESSION_ID, + gemini_cli_key: EventMetadataKey.GEMINI_CLI_SESSION_ID, value: event?.session_id?.toString() ?? '', }, ]; @@ -477,6 +527,18 @@ export class ClearcutLogger { }); } + getProxyAgent() { + const proxyUrl = this.config?.getProxy(); + if (!proxyUrl) return undefined; + // undici which is widely used in the repo can only support http & https proxy protocol, + // https://github.com/nodejs/undici/issues/2224 + if (proxyUrl.startsWith('http')) { + return new HttpsProxyAgent(proxyUrl); + } else { + throw new Error('Unsupported proxy type'); + } + } + shutdown() { const event = new EndSessionEvent(this.config); this.logEndSessionEvent(event); diff --git a/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts b/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts index 4a47488a0..b34cc6eab 100644 --- a/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts +++ b/packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts @@ -151,6 +151,12 @@ export enum EventMetadataKey { // Logs the total number of Google accounts ever used. GEMINI_CLI_GOOGLE_ACCOUNTS_COUNT = 37, + // Logs the Surface from where the Gemini CLI was invoked, eg: VSCode. + GEMINI_CLI_SURFACE = 39, + + // Logs the session id + GEMINI_CLI_SESSION_ID = 40, + // ========================================================================== // Loop Detected Event Keys // =========================================================================== diff --git a/packages/core/src/telemetry/constants.ts b/packages/core/src/telemetry/constants.ts index ccaf51e15..d6c8959d5 100644 --- a/packages/core/src/telemetry/constants.ts +++ b/packages/core/src/telemetry/constants.ts @@ -13,7 +13,8 @@ export const EVENT_API_ERROR = 'qwen-code.api_error'; export const EVENT_API_RESPONSE = 'qwen-code.api_response'; export const EVENT_CLI_CONFIG = 'qwen-code.config'; export const EVENT_FLASH_FALLBACK = 'qwen-code.flash_fallback'; - +export const EVENT_FLASH_DECIDED_TO_CONTINUE = + 'qwen-code.flash_decided_to_continue'; export const METRIC_TOOL_CALL_COUNT = 'qwen-code.tool.call.count'; export const METRIC_TOOL_CALL_LATENCY = 'qwen-code.tool.call.latency'; export const METRIC_API_REQUEST_COUNT = 'qwen-code.api.request.count'; diff --git a/packages/core/src/telemetry/file-exporters.ts b/packages/core/src/telemetry/file-exporters.ts new file mode 100644 index 000000000..aee3dfd65 --- /dev/null +++ b/packages/core/src/telemetry/file-exporters.ts @@ -0,0 +1,89 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import * as fs from 'node:fs'; +import { ExportResult, ExportResultCode } from '@opentelemetry/core'; +import { ReadableSpan, SpanExporter } from '@opentelemetry/sdk-trace-base'; +import { ReadableLogRecord, LogRecordExporter } from '@opentelemetry/sdk-logs'; +import { + ResourceMetrics, + PushMetricExporter, + AggregationTemporality, +} from '@opentelemetry/sdk-metrics'; + +class FileExporter { + protected writeStream: fs.WriteStream; + + constructor(filePath: string) { + this.writeStream = fs.createWriteStream(filePath, { flags: 'a' }); + } + + protected serialize(data: unknown): string { + return JSON.stringify(data, null, 2) + '\n'; + } + + shutdown(): Promise { + return new Promise((resolve) => { + this.writeStream.end(resolve); + }); + } +} + +export class FileSpanExporter extends FileExporter implements SpanExporter { + export( + spans: ReadableSpan[], + resultCallback: (result: ExportResult) => void, + ): void { + const data = spans.map((span) => this.serialize(span)).join(''); + this.writeStream.write(data, (err) => { + resultCallback({ + code: err ? ExportResultCode.FAILED : ExportResultCode.SUCCESS, + error: err || undefined, + }); + }); + } +} + +export class FileLogExporter extends FileExporter implements LogRecordExporter { + export( + logs: ReadableLogRecord[], + resultCallback: (result: ExportResult) => void, + ): void { + const data = logs.map((log) => this.serialize(log)).join(''); + this.writeStream.write(data, (err) => { + resultCallback({ + code: err ? ExportResultCode.FAILED : ExportResultCode.SUCCESS, + error: err || undefined, + }); + }); + } +} + +export class FileMetricExporter + extends FileExporter + implements PushMetricExporter +{ + export( + metrics: ResourceMetrics, + resultCallback: (result: ExportResult) => void, + ): void { + const data = this.serialize(metrics); + this.writeStream.write(data, (err) => { + resultCallback({ + code: err ? ExportResultCode.FAILED : ExportResultCode.SUCCESS, + error: err || undefined, + }); + }); + } + + getPreferredAggregationTemporality(): AggregationTemporality { + return AggregationTemporality.CUMULATIVE; + } + + async forceFlush(): Promise { + return Promise.resolve(); + } +} diff --git a/packages/core/src/telemetry/index.ts b/packages/core/src/telemetry/index.ts index eeb699e87..5163084a5 100644 --- a/packages/core/src/telemetry/index.ts +++ b/packages/core/src/telemetry/index.ts @@ -6,11 +6,12 @@ export enum TelemetryTarget { GCP = 'gcp', - QW = 'qw', + LOCAL = 'local', + QWEN = 'qwen', } -const DEFAULT_TELEMETRY_TARGET = TelemetryTarget.QW; -const DEFAULT_OTLP_ENDPOINT = 'http://tracing-analysis-dc-hz.aliyuncs.com:8090'; +const DEFAULT_TELEMETRY_TARGET = TelemetryTarget.LOCAL; +const DEFAULT_OTLP_ENDPOINT = 'http://localhost:4317'; export { DEFAULT_TELEMETRY_TARGET, DEFAULT_OTLP_ENDPOINT }; export { diff --git a/packages/core/src/telemetry/loggers.ts b/packages/core/src/telemetry/loggers.ts index 1d6da51a8..073124f48 100644 --- a/packages/core/src/telemetry/loggers.ts +++ b/packages/core/src/telemetry/loggers.ts @@ -5,7 +5,6 @@ */ import { logs, LogRecord, LogAttributes } from '@opentelemetry/api-logs'; -import { trace, context } from '@opentelemetry/api'; import { SemanticAttributes } from '@opentelemetry/semantic-conventions'; import { Config } from '../config/config.js'; import { @@ -16,6 +15,7 @@ import { EVENT_TOOL_CALL, EVENT_USER_PROMPT, EVENT_FLASH_FALLBACK, + EVENT_FLASH_DECIDED_TO_CONTINUE, SERVICE_NAME, } from './constants.js'; import { @@ -26,6 +26,7 @@ import { ToolCallEvent, UserPromptEvent, FlashFallbackEvent, + FlashDecidedToContinueEvent, LoopDetectedEvent, } from './types.js'; import { @@ -36,7 +37,7 @@ import { } from './metrics.js'; import { isTelemetrySdkInitialized } from './sdk.js'; import { uiTelemetryService, UiEvent } from './uiTelemetry.js'; -// import { ClearcutLogger } from './clearcut-logger/clearcut-logger.js'; +import { ClearcutLogger } from './clearcut-logger/clearcut-logger.js'; import { safeJsonStringify } from '../utils/safeJsonStringify.js'; const shouldLogUserPrompts = (config: Config): boolean => @@ -48,32 +49,11 @@ function getCommonAttributes(config: Config): LogAttributes { }; } -// Helper function to create spans and emit logs within span context -function logWithSpan( - spanName: string, - logBody: string, - attributes: LogAttributes, -): void { - const tracer = trace.getTracer(SERVICE_NAME); - const span = tracer.startSpan(spanName); - - context.with(trace.setSpan(context.active(), span), () => { - const logger = logs.getLogger(SERVICE_NAME); - const logRecord: LogRecord = { - body: logBody, - attributes, - }; - logger.emit(logRecord); - }); - - span.end(); -} - export function logCliConfiguration( config: Config, event: StartSessionEvent, ): void { - // ClearcutLogger.getInstance(config)?.logStartSessionEvent(event); + ClearcutLogger.getInstance(config)?.logStartSessionEvent(event); if (!isTelemetrySdkInitialized()) return; const attributes: LogAttributes = { @@ -93,11 +73,16 @@ export function logCliConfiguration( mcp_servers: event.mcp_servers, }; - logWithSpan('cli.configuration', 'CLI configuration loaded.', attributes); + const logger = logs.getLogger(SERVICE_NAME); + const logRecord: LogRecord = { + body: 'CLI configuration loaded.', + attributes, + }; + logger.emit(logRecord); } export function logUserPrompt(config: Config, event: UserPromptEvent): void { - // ClearcutLogger.getInstance(config)?.logNewPromptEvent(event); + ClearcutLogger.getInstance(config)?.logNewPromptEvent(event); if (!isTelemetrySdkInitialized()) return; const attributes: LogAttributes = { @@ -111,11 +96,12 @@ export function logUserPrompt(config: Config, event: UserPromptEvent): void { attributes.prompt = event.prompt; } - logWithSpan( - 'user.prompt', - `User prompt. Length: ${event.prompt_length}.`, + const logger = logs.getLogger(SERVICE_NAME); + const logRecord: LogRecord = { + body: `User prompt. Length: ${event.prompt_length}.`, attributes, - ); + }; + logger.emit(logRecord); } export function logToolCall(config: Config, event: ToolCallEvent): void { @@ -125,7 +111,7 @@ export function logToolCall(config: Config, event: ToolCallEvent): void { 'event.timestamp': new Date().toISOString(), } as UiEvent; uiTelemetryService.addEvent(uiEvent); - // ClearcutLogger.getInstance(config)?.logToolCallEvent(event); + ClearcutLogger.getInstance(config)?.logToolCallEvent(event); if (!isTelemetrySdkInitialized()) return; const attributes: LogAttributes = { @@ -142,11 +128,12 @@ export function logToolCall(config: Config, event: ToolCallEvent): void { } } - logWithSpan( - `tool.${event.function_name}`, - `Tool call: ${event.function_name}${event.decision ? `. Decision: ${event.decision}` : ''}. Success: ${event.success}. Duration: ${event.duration_ms}ms.`, + const logger = logs.getLogger(SERVICE_NAME); + const logRecord: LogRecord = { + body: `Tool call: ${event.function_name}${event.decision ? `. Decision: ${event.decision}` : ''}. Success: ${event.success}. Duration: ${event.duration_ms}ms.`, attributes, - ); + }; + logger.emit(logRecord); recordToolCallMetrics( config, event.function_name, @@ -157,7 +144,7 @@ export function logToolCall(config: Config, event: ToolCallEvent): void { } export function logApiRequest(config: Config, event: ApiRequestEvent): void { - // ClearcutLogger.getInstance(config)?.logApiRequestEvent(event); + ClearcutLogger.getInstance(config)?.logApiRequestEvent(event); if (!isTelemetrySdkInitialized()) return; const attributes: LogAttributes = { @@ -167,18 +154,19 @@ export function logApiRequest(config: Config, event: ApiRequestEvent): void { 'event.timestamp': new Date().toISOString(), }; - logWithSpan( - `api.request.${event.model}`, - `API request to ${event.model}.`, + const logger = logs.getLogger(SERVICE_NAME); + const logRecord: LogRecord = { + body: `API request to ${event.model}.`, attributes, - ); + }; + logger.emit(logRecord); } export function logFlashFallback( config: Config, event: FlashFallbackEvent, ): void { - // ClearcutLogger.getInstance(config)?.logFlashFallbackEvent(event); + ClearcutLogger.getInstance(config)?.logFlashFallbackEvent(event); if (!isTelemetrySdkInitialized()) return; const attributes: LogAttributes = { @@ -188,11 +176,12 @@ export function logFlashFallback( 'event.timestamp': new Date().toISOString(), }; - logWithSpan( - 'api.flash_fallback', - 'Switching to flash as Fallback.', + const logger = logs.getLogger(SERVICE_NAME); + const logRecord: LogRecord = { + body: `Switching to flash as Fallback.`, attributes, - ); + }; + logger.emit(logRecord); } export function logApiError(config: Config, event: ApiErrorEvent): void { @@ -202,7 +191,7 @@ export function logApiError(config: Config, event: ApiErrorEvent): void { 'event.timestamp': new Date().toISOString(), } as UiEvent; uiTelemetryService.addEvent(uiEvent); - // ClearcutLogger.getInstance(config)?.logApiErrorEvent(event); + ClearcutLogger.getInstance(config)?.logApiErrorEvent(event); if (!isTelemetrySdkInitialized()) return; const attributes: LogAttributes = { @@ -222,11 +211,12 @@ export function logApiError(config: Config, event: ApiErrorEvent): void { attributes[SemanticAttributes.HTTP_STATUS_CODE] = event.status_code; } - logWithSpan( - `api.error.${event.model}`, - `API error for ${event.model}. Error: ${event.error}. Duration: ${event.duration_ms}ms.`, + const logger = logs.getLogger(SERVICE_NAME); + const logRecord: LogRecord = { + body: `API error for ${event.model}. Error: ${event.error}. Duration: ${event.duration_ms}ms.`, attributes, - ); + }; + logger.emit(logRecord); recordApiErrorMetrics( config, event.model, @@ -243,7 +233,7 @@ export function logApiResponse(config: Config, event: ApiResponseEvent): void { 'event.timestamp': new Date().toISOString(), } as UiEvent; uiTelemetryService.addEvent(uiEvent); - // ClearcutLogger.getInstance(config)?.logApiResponseEvent(event); + ClearcutLogger.getInstance(config)?.logApiResponseEvent(event); if (!isTelemetrySdkInitialized()) return; const attributes: LogAttributes = { ...getCommonAttributes(config), @@ -262,11 +252,12 @@ export function logApiResponse(config: Config, event: ApiResponseEvent): void { } } - logWithSpan( - `api.response.${event.model}`, - `API response from ${event.model}. Status: ${event.status_code || 'N/A'}. Duration: ${event.duration_ms}ms.`, + const logger = logs.getLogger(SERVICE_NAME); + const logRecord: LogRecord = { + body: `API response from ${event.model}. Status: ${event.status_code || 'N/A'}. Duration: ${event.duration_ms}ms.`, attributes, - ); + }; + logger.emit(logRecord); recordApiResponseMetrics( config, event.model, @@ -305,7 +296,7 @@ export function logLoopDetected( config: Config, event: LoopDetectedEvent, ): void { - // ClearcutLogger.getInstance(config)?.logLoopDetectedEvent(event); + ClearcutLogger.getInstance(config)?.logLoopDetectedEvent(event); if (!isTelemetrySdkInitialized()) return; const attributes: LogAttributes = { @@ -313,9 +304,31 @@ export function logLoopDetected( ...event, }; - logWithSpan( - 'loop.detected', - `Loop detected. Type: ${event.loop_type}.`, + const logger = logs.getLogger(SERVICE_NAME); + const logRecord: LogRecord = { + body: `Loop detected. Type: ${event.loop_type}.`, attributes, - ); + }; + logger.emit(logRecord); +} + +export function logFlashDecidedToContinue( + config: Config, + event: FlashDecidedToContinueEvent, +): void { + ClearcutLogger.getInstance(config)?.logFlashDecidedToContinueEvent(event); + if (!isTelemetrySdkInitialized()) return; + + const attributes: LogAttributes = { + ...getCommonAttributes(config), + ...event, + 'event.name': EVENT_FLASH_DECIDED_TO_CONTINUE, + }; + + const logger = logs.getLogger(SERVICE_NAME); + const logRecord: LogRecord = { + body: `Flash decided to continue.`, + attributes, + }; + logger.emit(logRecord); } diff --git a/packages/core/src/telemetry/sdk.ts b/packages/core/src/telemetry/sdk.ts index 447a5fc64..1167750a4 100644 --- a/packages/core/src/telemetry/sdk.ts +++ b/packages/core/src/telemetry/sdk.ts @@ -6,23 +6,34 @@ import { DiagConsoleLogger, DiagLogLevel, diag } from '@opentelemetry/api'; import { OTLPTraceExporter } from '@opentelemetry/exporter-trace-otlp-grpc'; +import { OTLPLogExporter } from '@opentelemetry/exporter-logs-otlp-grpc'; import { OTLPMetricExporter } from '@opentelemetry/exporter-metrics-otlp-grpc'; import { CompressionAlgorithm } from '@opentelemetry/otlp-exporter-base'; -import { Metadata } from '@grpc/grpc-js'; import { NodeSDK } from '@opentelemetry/sdk-node'; import { SemanticResourceAttributes } from '@opentelemetry/semantic-conventions'; import { Resource } from '@opentelemetry/resources'; -import { BatchSpanProcessor } from '@opentelemetry/sdk-trace-node'; -import { BatchLogRecordProcessor } from '@opentelemetry/sdk-logs'; -import { PeriodicExportingMetricReader } from '@opentelemetry/sdk-metrics'; -import type { ReadableSpan } from '@opentelemetry/sdk-trace-base'; -import type { LogRecord } from '@opentelemetry/sdk-logs'; -import type { ResourceMetrics } from '@opentelemetry/sdk-metrics'; -import type { ExportResult } from '@opentelemetry/core'; +import { + BatchSpanProcessor, + ConsoleSpanExporter, +} from '@opentelemetry/sdk-trace-node'; +import { + BatchLogRecordProcessor, + ConsoleLogRecordExporter, +} from '@opentelemetry/sdk-logs'; +import { + ConsoleMetricExporter, + PeriodicExportingMetricReader, +} from '@opentelemetry/sdk-metrics'; import { HttpInstrumentation } from '@opentelemetry/instrumentation-http'; import { Config } from '../config/config.js'; import { SERVICE_NAME } from './constants.js'; import { initializeMetrics } from './metrics.js'; +import { ClearcutLogger } from './clearcut-logger/clearcut-logger.js'; +import { + FileLogExporter, + FileMetricExporter, + FileSpanExporter, +} from './file-exporters.js'; // For troubleshooting, set the log level to DiagLogLevel.DEBUG diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.INFO); @@ -68,63 +79,41 @@ export function initializeTelemetry(config: Config): void { const otlpEndpoint = config.getTelemetryOtlpEndpoint(); const grpcParsedEndpoint = parseGrpcEndpoint(otlpEndpoint); const useOtlp = !!grpcParsedEndpoint; - - const metadata = new Metadata(); - metadata.set( - 'Authentication', - 'gb7x9m2kzp@8f4e3b6c9d2a1e5_qw7x9m2kzp@19a8c5f2b4e7d93', - ); + const telemetryOutfile = config.getTelemetryOutfile(); const spanExporter = useOtlp ? new OTLPTraceExporter({ url: grpcParsedEndpoint, compression: CompressionAlgorithm.GZIP, - metadata, }) - : { - export: ( - spans: ReadableSpan[], - callback: (result: ExportResult) => void, - ) => callback({ code: 0 }), - forceFlush: () => Promise.resolve(), - shutdown: () => Promise.resolve(), - }; - - // FIXME: Temporarily disable OTLP log export due to gRPC endpoint not supporting LogsService - // const logExporter = useOtlp - // ? new OTLPLogExporter({ - // url: grpcParsedEndpoint, - // compression: CompressionAlgorithm.GZIP, - // metadata: _metadata, - // }) - // : new ConsoleLogRecordExporter(); - - // Create a no-op log exporter to avoid cluttering console output - const logExporter = { - export: (logs: LogRecord[], callback: (result: ExportResult) => void) => - callback({ code: 0 }), - shutdown: () => Promise.resolve(), - }; + : telemetryOutfile + ? new FileSpanExporter(telemetryOutfile) + : new ConsoleSpanExporter(); + const logExporter = useOtlp + ? new OTLPLogExporter({ + url: grpcParsedEndpoint, + compression: CompressionAlgorithm.GZIP, + }) + : telemetryOutfile + ? new FileLogExporter(telemetryOutfile) + : new ConsoleLogRecordExporter(); const metricReader = useOtlp ? new PeriodicExportingMetricReader({ exporter: new OTLPMetricExporter({ url: grpcParsedEndpoint, compression: CompressionAlgorithm.GZIP, - metadata, }), exportIntervalMillis: 10000, }) - : new PeriodicExportingMetricReader({ - exporter: { - export: ( - metrics: ResourceMetrics, - callback: (result: ExportResult) => void, - ) => callback({ code: 0 }), - forceFlush: () => Promise.resolve(), - shutdown: () => Promise.resolve(), - }, - exportIntervalMillis: 10000, - }); + : telemetryOutfile + ? new PeriodicExportingMetricReader({ + exporter: new FileMetricExporter(telemetryOutfile), + exportIntervalMillis: 10000, + }) + : new PeriodicExportingMetricReader({ + exporter: new ConsoleMetricExporter(), + exportIntervalMillis: 10000, + }); sdk = new NodeSDK({ resource, @@ -152,7 +141,7 @@ export async function shutdownTelemetry(): Promise { return; } try { - // ClearcutLogger.getInstance()?.shutdown(); + ClearcutLogger.getInstance()?.shutdown(); await sdk.shutdown(); console.log('OpenTelemetry SDK shut down successfully.'); } catch (error) { diff --git a/packages/core/src/telemetry/types.ts b/packages/core/src/telemetry/types.ts index 54da02143..69dffb088 100644 --- a/packages/core/src/telemetry/types.ts +++ b/packages/core/src/telemetry/types.ts @@ -249,17 +249,32 @@ export class FlashFallbackEvent { export enum LoopType { CONSECUTIVE_IDENTICAL_TOOL_CALLS = 'consecutive_identical_tool_calls', CHANTING_IDENTICAL_SENTENCES = 'chanting_identical_sentences', + LLM_DETECTED_LOOP = 'llm_detected_loop', } export class LoopDetectedEvent { 'event.name': 'loop_detected'; 'event.timestamp': string; // ISO 8601 loop_type: LoopType; + prompt_id: string; - constructor(loop_type: LoopType) { + constructor(loop_type: LoopType, prompt_id: string) { this['event.name'] = 'loop_detected'; this['event.timestamp'] = new Date().toISOString(); this.loop_type = loop_type; + this.prompt_id = prompt_id; + } +} + +export class FlashDecidedToContinueEvent { + 'event.name': 'flash_decided_to_continue'; + 'event.timestamp': string; // ISO 8601 + prompt_id: string; + + constructor(prompt_id: string) { + this['event.name'] = 'flash_decided_to_continue'; + this['event.timestamp'] = new Date().toISOString(); + this.prompt_id = prompt_id; } } @@ -272,4 +287,5 @@ export type TelemetryEvent = | ApiErrorEvent | ApiResponseEvent | FlashFallbackEvent - | LoopDetectedEvent; + | LoopDetectedEvent + | FlashDecidedToContinueEvent; diff --git a/packages/core/src/telemetry/uiTelemetry.test.ts b/packages/core/src/telemetry/uiTelemetry.test.ts index 34a2fe225..38ba7a91f 100644 --- a/packages/core/src/telemetry/uiTelemetry.test.ts +++ b/packages/core/src/telemetry/uiTelemetry.test.ts @@ -508,4 +508,116 @@ describe('UiTelemetryService', () => { expect(tools.byName['tool_B'].count).toBe(1); }); }); + + describe('resetLastPromptTokenCount', () => { + it('should reset the last prompt token count to 0', () => { + // First, set up some initial token count + const event = { + 'event.name': EVENT_API_RESPONSE, + model: 'gemini-2.5-pro', + duration_ms: 500, + input_token_count: 100, + output_token_count: 200, + total_token_count: 300, + cached_content_token_count: 50, + thoughts_token_count: 20, + tool_token_count: 30, + } as ApiResponseEvent & { 'event.name': typeof EVENT_API_RESPONSE }; + + service.addEvent(event); + expect(service.getLastPromptTokenCount()).toBe(100); + + // Now reset the token count + service.resetLastPromptTokenCount(); + expect(service.getLastPromptTokenCount()).toBe(0); + }); + + it('should emit an update event when resetLastPromptTokenCount is called', () => { + const spy = vi.fn(); + service.on('update', spy); + + // Set up initial token count + const event = { + 'event.name': EVENT_API_RESPONSE, + model: 'gemini-2.5-pro', + duration_ms: 500, + input_token_count: 100, + output_token_count: 200, + total_token_count: 300, + cached_content_token_count: 50, + thoughts_token_count: 20, + tool_token_count: 30, + } as ApiResponseEvent & { 'event.name': typeof EVENT_API_RESPONSE }; + + service.addEvent(event); + spy.mockClear(); // Clear the spy to focus on the reset call + + service.resetLastPromptTokenCount(); + + expect(spy).toHaveBeenCalledOnce(); + const { metrics, lastPromptTokenCount } = spy.mock.calls[0][0]; + expect(metrics).toBeDefined(); + expect(lastPromptTokenCount).toBe(0); + }); + + it('should not affect other metrics when resetLastPromptTokenCount is called', () => { + // Set up initial state with some metrics + const event = { + 'event.name': EVENT_API_RESPONSE, + model: 'gemini-2.5-pro', + duration_ms: 500, + input_token_count: 100, + output_token_count: 200, + total_token_count: 300, + cached_content_token_count: 50, + thoughts_token_count: 20, + tool_token_count: 30, + } as ApiResponseEvent & { 'event.name': typeof EVENT_API_RESPONSE }; + + service.addEvent(event); + + const metricsBefore = service.getMetrics(); + + service.resetLastPromptTokenCount(); + + const metricsAfter = service.getMetrics(); + + // Metrics should be unchanged + expect(metricsAfter).toEqual(metricsBefore); + + // Only the last prompt token count should be reset + expect(service.getLastPromptTokenCount()).toBe(0); + }); + + it('should work correctly when called multiple times', () => { + const spy = vi.fn(); + service.on('update', spy); + + // Set up initial token count + const event = { + 'event.name': EVENT_API_RESPONSE, + model: 'gemini-2.5-pro', + duration_ms: 500, + input_token_count: 100, + output_token_count: 200, + total_token_count: 300, + cached_content_token_count: 50, + thoughts_token_count: 20, + tool_token_count: 30, + } as ApiResponseEvent & { 'event.name': typeof EVENT_API_RESPONSE }; + + service.addEvent(event); + expect(service.getLastPromptTokenCount()).toBe(100); + + // Reset once + service.resetLastPromptTokenCount(); + expect(service.getLastPromptTokenCount()).toBe(0); + + // Reset again - should still be 0 and still emit event + spy.mockClear(); + service.resetLastPromptTokenCount(); + expect(service.getLastPromptTokenCount()).toBe(0); + expect(spy).toHaveBeenCalledOnce(); + }); + }); }); diff --git a/packages/core/src/telemetry/uiTelemetry.ts b/packages/core/src/telemetry/uiTelemetry.ts index 71409696e..2713ac658 100644 --- a/packages/core/src/telemetry/uiTelemetry.ts +++ b/packages/core/src/telemetry/uiTelemetry.ts @@ -133,6 +133,14 @@ export class UiTelemetryService extends EventEmitter { return this.#lastPromptTokenCount; } + resetLastPromptTokenCount(): void { + this.#lastPromptTokenCount = 0; + this.emit('update', { + metrics: this.#metrics, + lastPromptTokenCount: this.#lastPromptTokenCount, + }); + } + private getOrCreateModelMetrics(modelName: string): ModelMetrics { if (!this.#metrics.models[modelName]) { this.#metrics.models[modelName] = createInitialModelMetrics(); diff --git a/packages/core/src/tools/edit.test.ts b/packages/core/src/tools/edit.test.ts index 84ad1daf4..4ff33ff49 100644 --- a/packages/core/src/tools/edit.test.ts +++ b/packages/core/src/tools/edit.test.ts @@ -608,6 +608,19 @@ describe('EditTool', () => { /User modified the `new_string` content/, ); }); + + it('should return error if old_string and new_string are identical', async () => { + const initialContent = 'This is some identical text.'; + fs.writeFileSync(filePath, initialContent, 'utf8'); + const params: EditToolParams = { + file_path: filePath, + old_string: 'identical', + new_string: 'identical', + }; + const result = await tool.execute(params, new AbortController().signal); + expect(result.llmContent).toMatch(/No changes to apply/); + expect(result.returnDisplay).toMatch(/No changes to apply/); + }); }); describe('getDescription', () => { diff --git a/packages/core/src/tools/edit.ts b/packages/core/src/tools/edit.ts index 8d8753d42..fd936611e 100644 --- a/packages/core/src/tools/edit.ts +++ b/packages/core/src/tools/edit.ts @@ -9,9 +9,11 @@ import * as path from 'path'; import * as Diff from 'diff'; import { BaseTool, + Icon, ToolCallConfirmationDetails, ToolConfirmationOutcome, ToolEditConfirmationDetails, + ToolLocation, ToolResult, ToolResultDisplay, } from './tools.js'; @@ -89,6 +91,7 @@ Expectation for required parameters: 4. NEVER escape \`old_string\` or \`new_string\`, that would break the exact literal text requirement. **Important:** If ANY of the above are not satisfied, the tool will fail. CRITICAL for \`old_string\`: Must uniquely identify the single instance to change. Include at least 3 lines of context BEFORE and AFTER the target text, matching whitespace and indentation precisely. If this string matches multiple locations, or does not match exactly, the tool will fail. **Multiple replacements:** Set \`expected_replacements\` to the number of occurrences you want to replace. The tool will replace ALL occurrences that match \`old_string\` exactly. Ensure the number of replacements matches your expectation.`, + Icon.Pencil, { properties: { file_path: { @@ -141,6 +144,15 @@ Expectation for required parameters: return null; } + /** + * Determines any file locations affected by the tool execution + * @param params Parameters for the tool execution + * @returns A list of such paths + */ + toolLocations(params: EditToolParams): ToolLocation[] { + return [{ path: params.file_path }]; + } + private _applyReplacement( currentContent: string | null, oldString: string, @@ -197,7 +209,7 @@ Expectation for required parameters: // Creating a new file isNewFile = true; } else if (!fileExists) { - // Trying to edit a non-existent file (and old_string is not empty) + // Trying to edit a nonexistent file (and old_string is not empty) error = { display: `File not found. Cannot apply edit. Use an empty old_string to create a new file.`, raw: `File not found: ${params.file_path}`, @@ -227,12 +239,17 @@ Expectation for required parameters: raw: `Failed to edit, 0 occurrences found for old_string in ${params.file_path}. No edits made. The exact text in old_string was not found. Ensure you're not escaping content incorrectly and check whitespace, indentation, and context. Use ${ReadFileTool.Name} tool to verify.`, }; } else if (occurrences !== expectedReplacements) { - const occurenceTerm = + const occurrenceTerm = expectedReplacements === 1 ? 'occurrence' : 'occurrences'; error = { - display: `Failed to edit, expected ${expectedReplacements} ${occurenceTerm} but found ${occurrences}.`, - raw: `Failed to edit, Expected ${expectedReplacements} ${occurenceTerm} but found ${occurrences} for old_string in file: ${params.file_path}`, + display: `Failed to edit, expected ${expectedReplacements} ${occurrenceTerm} but found ${occurrences}.`, + raw: `Failed to edit, Expected ${expectedReplacements} ${occurrenceTerm} but found ${occurrences} for old_string in file: ${params.file_path}`, + }; + } else if (finalOldString === finalNewString) { + error = { + display: `No changes to apply. The old_string and new_string are identical.`, + raw: `No changes to apply. The old_string and new_string are identical in file: ${params.file_path}`, }; } } else { @@ -306,6 +323,8 @@ Expectation for required parameters: title: `Confirm Edit: ${shortenPath(makeRelative(params.file_path, this.config.getTargetDir()))}`, fileName, fileDiff, + originalContent: editData.currentContent, + newContent: editData.newContent, onConfirm: async (outcome: ToolConfirmationOutcome) => { if (outcome === ToolConfirmationOutcome.ProceedAlways) { this.config.setApprovalMode(ApprovalMode.AUTO_EDIT); @@ -394,7 +413,12 @@ Expectation for required parameters: 'Proposed', DEFAULT_DIFF_OPTIONS, ); - displayResult = { fileDiff, fileName }; + displayResult = { + fileDiff, + fileName, + originalContent: editData.currentContent, + newContent: editData.newContent, + }; } const llmSuccessMessageParts = [ diff --git a/packages/core/src/tools/glob.test.ts b/packages/core/src/tools/glob.test.ts index c63b41cc9..51effe4eb 100644 --- a/packages/core/src/tools/glob.test.ts +++ b/packages/core/src/tools/glob.test.ts @@ -150,11 +150,19 @@ describe('GlobTool', () => { expect(typeof llmContent).toBe('string'); const filesListed = llmContent - .substring(llmContent.indexOf(':') + 1) .trim() - .split('\n'); - expect(filesListed[0]).toContain(path.join(tempRootDir, 'newer.sortme')); - expect(filesListed[1]).toContain(path.join(tempRootDir, 'older.sortme')); + .split(/\r?\n/) + .slice(1) + .map((line) => line.trim()) + .filter(Boolean); + + expect(filesListed).toHaveLength(2); + expect(path.resolve(filesListed[0])).toBe( + path.resolve(tempRootDir, 'newer.sortme'), + ); + expect(path.resolve(filesListed[1])).toBe( + path.resolve(tempRootDir, 'older.sortme'), + ); }); }); diff --git a/packages/core/src/tools/glob.ts b/packages/core/src/tools/glob.ts index 9381894e5..2e829e4c4 100644 --- a/packages/core/src/tools/glob.ts +++ b/packages/core/src/tools/glob.ts @@ -8,7 +8,7 @@ import fs from 'fs'; import path from 'path'; import { glob } from 'glob'; import { SchemaValidator } from '../utils/schemaValidator.js'; -import { BaseTool, ToolResult } from './tools.js'; +import { BaseTool, Icon, ToolResult } from './tools.js'; import { Type } from '@google/genai'; import { shortenPath, makeRelative } from '../utils/paths.js'; import { isWithinRoot } from '../utils/fileUtils.js'; @@ -86,6 +86,7 @@ export class GlobTool extends BaseTool { GlobTool.Name, 'FindFiles', 'Efficiently finds files matching specific glob patterns (e.g., `src/**/*.ts`, `**/*.md`), returning absolute paths sorted by modification time (newest first). Ideal for quickly locating files based on their name or path structure, especially in large codebases.', + Icon.FileSearch, { properties: { pattern: { @@ -199,7 +200,7 @@ export class GlobTool extends BaseTool { this.config.getFileFilteringRespectGitIgnore(); const fileDiscovery = this.config.getFileService(); - const entries = (await glob(params.pattern, { + const entries = await glob(params.pattern, { cwd: searchDirAbsolute, withFileTypes: true, nodir: true, @@ -209,7 +210,7 @@ export class GlobTool extends BaseTool { ignore: ['**/node_modules/**', '**/.git/**'], follow: false, signal, - })) as GlobPath[]; + }); // Apply git-aware filtering if enabled and in git repository let filteredEntries = entries; diff --git a/packages/core/src/tools/grep.test.ts b/packages/core/src/tools/grep.test.ts index 2e018cce2..012950833 100644 --- a/packages/core/src/tools/grep.test.ts +++ b/packages/core/src/tools/grep.test.ts @@ -17,7 +17,7 @@ vi.mock('child_process', () => ({ on: (event: string, cb: (...args: unknown[]) => void) => { if (event === 'error' || event === 'close') { // Simulate command not found or error for git grep and system grep - // to force fallback to JS implementation. + // to force it to fall back to JS implementation. setTimeout(() => cb(1), 0); // cb(1) for error/close } }, @@ -125,7 +125,9 @@ describe('GrepTool', () => { expect(result.llmContent).toContain('File: fileA.txt'); expect(result.llmContent).toContain('L1: hello world'); expect(result.llmContent).toContain('L2: second line with world'); - expect(result.llmContent).toContain('File: sub/fileC.txt'); + expect(result.llmContent).toContain( + `File: ${path.join('sub', 'fileC.txt')}`, + ); expect(result.llmContent).toContain('L1: another world in sub dir'); expect(result.returnDisplay).toBe('Found 3 matches'); }); @@ -235,7 +237,7 @@ describe('GrepTool', () => { it('should generate correct description with pattern and path', () => { const params: GrepToolParams = { pattern: 'testPattern', - path: 'src/app', + path: path.join('src', 'app'), }; // The path will be relative to the tempRootDir, so we check for containment. expect(grepTool.getDescription(params)).toContain("'testPattern' within"); @@ -248,12 +250,14 @@ describe('GrepTool', () => { const params: GrepToolParams = { pattern: 'testPattern', include: '*.ts', - path: 'src/app', + path: path.join('src', 'app'), }; expect(grepTool.getDescription(params)).toContain( "'testPattern' in *.ts within", ); - expect(grepTool.getDescription(params)).toContain('src/app'); + expect(grepTool.getDescription(params)).toContain( + path.join('src', 'app'), + ); }); it('should use ./ for root path in description', () => { diff --git a/packages/core/src/tools/grep.ts b/packages/core/src/tools/grep.ts index afe830503..c1f9ecf6d 100644 --- a/packages/core/src/tools/grep.ts +++ b/packages/core/src/tools/grep.ts @@ -9,8 +9,8 @@ import fsPromises from 'fs/promises'; import path from 'path'; import { EOL } from 'os'; import { spawn } from 'child_process'; -import { globStream } from 'glob'; -import { BaseTool, ToolResult } from './tools.js'; +import { globIterate } from 'glob'; +import { BaseTool, Icon, ToolResult } from './tools.js'; import { Type } from '@google/genai'; import { SchemaValidator } from '../utils/schemaValidator.js'; import { makeRelative, shortenPath } from '../utils/paths.js'; @@ -62,6 +62,7 @@ export class GrepTool extends BaseTool { GrepTool.Name, 'SearchText', 'Searches for a regular expression pattern within the content of files in a specified directory (or current working directory). Can filter files by a glob pattern. Returns the lines containing matches, along with their file paths and line numbers.', + Icon.Regex, { properties: { pattern: { @@ -498,7 +499,7 @@ export class GrepTool extends BaseTool { '.hg/**', ]; // Use glob patterns for ignores here - const filesStream = globStream(globPattern, { + const filesIterator = globIterate(globPattern, { cwd: absolutePath, dot: true, ignore: ignorePatterns, @@ -510,7 +511,7 @@ export class GrepTool extends BaseTool { const regex = new RegExp(pattern, 'i'); const allMatches: GrepMatch[] = []; - for await (const filePath of filesStream) { + for await (const filePath of filesIterator) { const fileAbsolutePath = filePath as string; try { const content = await fsPromises.readFile(fileAbsolutePath, 'utf8'); diff --git a/packages/core/src/tools/ls.ts b/packages/core/src/tools/ls.ts index 9fb60072a..68a691013 100644 --- a/packages/core/src/tools/ls.ts +++ b/packages/core/src/tools/ls.ts @@ -6,11 +6,11 @@ import fs from 'fs'; import path from 'path'; -import { BaseTool, ToolResult } from './tools.js'; +import { BaseTool, Icon, ToolResult } from './tools.js'; import { Type } from '@google/genai'; import { SchemaValidator } from '../utils/schemaValidator.js'; import { makeRelative, shortenPath } from '../utils/paths.js'; -import { Config } from '../config/config.js'; +import { Config, DEFAULT_FILE_FILTERING_OPTIONS } from '../config/config.js'; import { isWithinRoot } from '../utils/fileUtils.js'; /** @@ -28,9 +28,12 @@ export interface LSToolParams { ignore?: string[]; /** - * Whether to respect .gitignore patterns (optional, defaults to true) + * Whether to respect .gitignore and .geminiignore patterns (optional, defaults to true) */ - respect_git_ignore?: boolean; + file_filtering_options?: { + respect_git_ignore?: boolean; + respect_gemini_ignore?: boolean; + }; } /** @@ -74,6 +77,7 @@ export class LSTool extends BaseTool { LSTool.Name, 'ReadFolder', 'Lists the names of files and subdirectories directly within a specified directory path. Can optionally ignore entries matching provided glob patterns.', + Icon.Folder, { properties: { path: { @@ -88,10 +92,22 @@ export class LSTool extends BaseTool { }, type: Type.ARRAY, }, - respect_git_ignore: { + file_filtering_options: { description: - 'Optional: Whether to respect .gitignore patterns when listing files. Only available in git repositories. Defaults to true.', - type: Type.BOOLEAN, + 'Optional: Whether to respect ignore patterns from .gitignore or .geminiignore', + type: Type.OBJECT, + properties: { + respect_git_ignore: { + description: + 'Optional: Whether to respect .gitignore patterns when listing files. Only available in git repositories. Defaults to true.', + type: Type.BOOLEAN, + }, + respect_gemini_ignore: { + description: + 'Optional: Whether to respect .geminiignore patterns when listing files. Defaults to true.', + type: Type.BOOLEAN, + }, + }, }, }, required: ['path'], @@ -198,14 +214,25 @@ export class LSTool extends BaseTool { const files = fs.readdirSync(params.path); + const defaultFileIgnores = + this.config.getFileFilteringOptions() ?? DEFAULT_FILE_FILTERING_OPTIONS; + + const fileFilteringOptions = { + respectGitIgnore: + params.file_filtering_options?.respect_git_ignore ?? + defaultFileIgnores.respectGitIgnore, + respectGeminiIgnore: + params.file_filtering_options?.respect_gemini_ignore ?? + defaultFileIgnores.respectGeminiIgnore, + }; + // Get centralized file discovery service - const respectGitIgnore = - params.respect_git_ignore ?? - this.config.getFileFilteringRespectGitIgnore(); + const fileDiscovery = this.config.getFileService(); const entries: FileEntry[] = []; let gitIgnoredCount = 0; + let geminiIgnoredCount = 0; if (files.length === 0) { // Changed error message to be more neutral for LLM @@ -226,14 +253,21 @@ export class LSTool extends BaseTool { fullPath, ); - // Check if this file should be git-ignored (only in git repositories) + // Check if this file should be ignored based on git or gemini ignore rules if ( - respectGitIgnore && + fileFilteringOptions.respectGitIgnore && fileDiscovery.shouldGitIgnoreFile(relativePath) ) { gitIgnoredCount++; continue; } + if ( + fileFilteringOptions.respectGeminiIgnore && + fileDiscovery.shouldGeminiIgnoreFile(relativePath) + ) { + geminiIgnoredCount++; + continue; + } try { const stats = fs.statSync(fullPath); @@ -264,13 +298,21 @@ export class LSTool extends BaseTool { .join('\n'); let resultMessage = `Directory listing for ${params.path}:\n${directoryContent}`; + const ignoredMessages = []; if (gitIgnoredCount > 0) { - resultMessage += `\n\n(${gitIgnoredCount} items were git-ignored)`; + ignoredMessages.push(`${gitIgnoredCount} git-ignored`); + } + if (geminiIgnoredCount > 0) { + ignoredMessages.push(`${geminiIgnoredCount} gemini-ignored`); + } + + if (ignoredMessages.length > 0) { + resultMessage += `\n\n(${ignoredMessages.join(', ')})`; } let displayMessage = `Listed ${entries.length} item(s).`; - if (gitIgnoredCount > 0) { - displayMessage += ` (${gitIgnoredCount} git-ignored)`; + if (ignoredMessages.length > 0) { + displayMessage += ` (${ignoredMessages.join(', ')})`; } return { diff --git a/packages/core/src/tools/mcp-client.test.ts b/packages/core/src/tools/mcp-client.test.ts index 353b4f056..4560982ce 100644 --- a/packages/core/src/tools/mcp-client.test.ts +++ b/packages/core/src/tools/mcp-client.test.ts @@ -9,18 +9,23 @@ import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/ import { populateMcpServerCommand, createTransport, - generateValidName, isEnabled, discoverTools, + discoverPrompts, } from './mcp-client.js'; import { SSEClientTransport } from '@modelcontextprotocol/sdk/client/sse.js'; import * as SdkClientStdioLib from '@modelcontextprotocol/sdk/client/stdio.js'; import * as ClientLib from '@modelcontextprotocol/sdk/client/index.js'; import * as GenAiLib from '@google/genai'; +import { GoogleCredentialProvider } from '../mcp/google-auth-provider.js'; +import { AuthProviderType } from '../config/config.js'; +import { PromptRegistry } from '../prompts/prompt-registry.js'; vi.mock('@modelcontextprotocol/sdk/client/stdio.js'); vi.mock('@modelcontextprotocol/sdk/client/index.js'); vi.mock('@google/genai'); +vi.mock('../mcp/oauth-provider.js'); +vi.mock('../mcp/oauth-token-storage.js'); describe('mcp-client', () => { afterEach(() => { @@ -47,6 +52,77 @@ describe('mcp-client', () => { }); }); + describe('discoverPrompts', () => { + const mockedPromptRegistry = { + registerPrompt: vi.fn(), + } as unknown as PromptRegistry; + + it('should discover and log prompts', async () => { + const mockRequest = vi.fn().mockResolvedValue({ + prompts: [ + { name: 'prompt1', description: 'desc1' }, + { name: 'prompt2' }, + ], + }); + const mockedClient = { + request: mockRequest, + } as unknown as ClientLib.Client; + + await discoverPrompts('test-server', mockedClient, mockedPromptRegistry); + + expect(mockRequest).toHaveBeenCalledWith( + { method: 'prompts/list', params: {} }, + expect.anything(), + ); + }); + + it('should do nothing if no prompts are discovered', async () => { + const mockRequest = vi.fn().mockResolvedValue({ + prompts: [], + }); + const mockedClient = { + request: mockRequest, + } as unknown as ClientLib.Client; + + const consoleLogSpy = vi + .spyOn(console, 'debug') + .mockImplementation(() => { + // no-op + }); + + await discoverPrompts('test-server', mockedClient, mockedPromptRegistry); + + expect(mockRequest).toHaveBeenCalledOnce(); + expect(consoleLogSpy).not.toHaveBeenCalled(); + + consoleLogSpy.mockRestore(); + }); + + it('should log an error if discovery fails', async () => { + const testError = new Error('test error'); + testError.message = 'test error'; + const mockRequest = vi.fn().mockRejectedValue(testError); + const mockedClient = { + request: mockRequest, + } as unknown as ClientLib.Client; + + const consoleErrorSpy = vi + .spyOn(console, 'error') + .mockImplementation(() => { + // no-op + }); + + await discoverPrompts('test-server', mockedClient, mockedPromptRegistry); + + expect(mockRequest).toHaveBeenCalledOnce(); + expect(consoleErrorSpy).toHaveBeenCalledWith( + `Error discovering prompts from test-server: ${testError.message}`, + ); + + consoleErrorSpy.mockRestore(); + }); + }); + describe('appendMcpServerCommand', () => { it('should do nothing if no MCP servers or command are configured', () => { const out = populateMcpServerCommand({}, undefined); @@ -83,7 +159,7 @@ describe('mcp-client', () => { describe('should connect via httpUrl', () => { it('without headers', async () => { - const transport = createTransport( + const transport = await createTransport( 'test-server', { httpUrl: 'http://test-server', @@ -97,7 +173,7 @@ describe('mcp-client', () => { }); it('with headers', async () => { - const transport = createTransport( + const transport = await createTransport( 'test-server', { httpUrl: 'http://test-server', @@ -118,7 +194,7 @@ describe('mcp-client', () => { describe('should connect via url', () => { it('without headers', async () => { - const transport = createTransport( + const transport = await createTransport( 'test-server', { url: 'http://test-server', @@ -131,7 +207,7 @@ describe('mcp-client', () => { }); it('with headers', async () => { - const transport = createTransport( + const transport = await createTransport( 'test-server', { url: 'http://test-server', @@ -150,10 +226,10 @@ describe('mcp-client', () => { }); }); - it('should connect via command', () => { + it('should connect via command', async () => { const mockedTransport = vi.mocked(SdkClientStdioLib.StdioClientTransport); - createTransport( + await createTransport( 'test-server', { command: 'test-command', @@ -172,91 +248,62 @@ describe('mcp-client', () => { stderr: 'pipe', }); }); - }); - describe('generateValidName', () => { - it('should return a valid name for a simple function', () => { - const funcDecl = { name: 'myFunction' }; - const serverName = 'myServer'; - const result = generateValidName(funcDecl, serverName); - expect(result).toBe('myServer__myFunction'); - }); - it('should prepend the server name', () => { - const funcDecl = { name: 'anotherFunction' }; - const serverName = 'production-server'; - const result = generateValidName(funcDecl, serverName); - expect(result).toBe('production-server__anotherFunction'); - }); + describe('useGoogleCredentialProvider', () => { + it('should use GoogleCredentialProvider when specified', async () => { + const transport = await createTransport( + 'test-server', + { + httpUrl: 'http://test-server', + authProviderType: AuthProviderType.GOOGLE_CREDENTIALS, + oauth: { + scopes: ['scope1'], + }, + }, + false, + ); - it('should replace invalid characters with underscores', () => { - const funcDecl = { name: 'invalid-name with spaces' }; - const serverName = 'test_server'; - const result = generateValidName(funcDecl, serverName); - expect(result).toBe('test_server__invalid-name_with_spaces'); - }); + expect(transport).toBeInstanceOf(StreamableHTTPClientTransport); + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const authProvider = (transport as any)._authProvider; + expect(authProvider).toBeInstanceOf(GoogleCredentialProvider); + }); - it('should truncate long names', () => { - const funcDecl = { - name: 'a_very_long_function_name_that_will_definitely_exceed_the_limit', - }; - const serverName = 'a_long_server_name'; - const result = generateValidName(funcDecl, serverName); - expect(result.length).toBe(63); - expect(result).toBe( - 'a_long_server_name__a_very_l___will_definitely_exceed_the_limit', - ); - }); + it('should use GoogleCredentialProvider with SSE transport', async () => { + const transport = await createTransport( + 'test-server', + { + url: 'http://test-server', + authProviderType: AuthProviderType.GOOGLE_CREDENTIALS, + oauth: { + scopes: ['scope1'], + }, + }, + false, + ); - it('should handle names with only invalid characters', () => { - const funcDecl = { name: '!@#$%^&*()' }; - const serverName = 'special-chars'; - const result = generateValidName(funcDecl, serverName); - expect(result).toBe('special-chars____________'); - }); + expect(transport).toBeInstanceOf(SSEClientTransport); + // eslint-disable-next-line @typescript-eslint/no-explicit-any + const authProvider = (transport as any)._authProvider; + expect(authProvider).toBeInstanceOf(GoogleCredentialProvider); + }); - it('should handle names that are already valid', () => { - const funcDecl = { name: 'already_valid' }; - const serverName = 'validator'; - const result = generateValidName(funcDecl, serverName); - expect(result).toBe('validator__already_valid'); - }); - - it('should handle names with leading/trailing invalid characters', () => { - const funcDecl = { name: '-_invalid-_' }; - const serverName = 'trim-test'; - const result = generateValidName(funcDecl, serverName); - expect(result).toBe('trim-test__-_invalid-_'); - }); - - it('should handle names that are exactly 63 characters long', () => { - const longName = 'a'.repeat(45); - const funcDecl = { name: longName }; - const serverName = 'server'; - const result = generateValidName(funcDecl, serverName); - expect(result).toBe(`server__${longName}`); - expect(result.length).toBe(53); - }); - - it('should handle names that are exactly 64 characters long', () => { - const longName = 'a'.repeat(55); - const funcDecl = { name: longName }; - const serverName = 'server'; - const result = generateValidName(funcDecl, serverName); - expect(result.length).toBe(63); - expect(result).toBe( - 'server__aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', - ); - }); - - it('should handle names that are longer than 64 characters', () => { - const longName = 'a'.repeat(100); - const funcDecl = { name: longName }; - const serverName = 'long-server'; - const result = generateValidName(funcDecl, serverName); - expect(result.length).toBe(63); - expect(result).toBe( - 'long-server__aaaaaaaaaaaaaaa___aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', - ); + it('should throw an error if no URL is provided with GoogleCredentialProvider', async () => { + await expect( + createTransport( + 'test-server', + { + authProviderType: AuthProviderType.GOOGLE_CREDENTIALS, + oauth: { + scopes: ['scope1'], + }, + }, + false, + ), + ).rejects.toThrow( + 'No URL configured for Google Credentials MCP server', + ); + }); }); }); describe('isEnabled', () => { diff --git a/packages/core/src/tools/mcp-client.ts b/packages/core/src/tools/mcp-client.ts index eb82190b6..d175af1f2 100644 --- a/packages/core/src/tools/mcp-client.ts +++ b/packages/core/src/tools/mcp-client.ts @@ -15,14 +15,32 @@ import { StreamableHTTPClientTransport, StreamableHTTPClientTransportOptions, } from '@modelcontextprotocol/sdk/client/streamableHttp.js'; +import { + Prompt, + ListPromptsResultSchema, + GetPromptResult, + GetPromptResultSchema, +} from '@modelcontextprotocol/sdk/types.js'; import { parse } from 'shell-quote'; -import { MCPServerConfig } from '../config/config.js'; +import { AuthProviderType, MCPServerConfig } from '../config/config.js'; +import { GoogleCredentialProvider } from '../mcp/google-auth-provider.js'; import { DiscoveredMCPTool } from './mcp-tool.js'; -import { FunctionDeclaration, Type, mcpToTool } from '@google/genai'; -import { sanitizeParameters, ToolRegistry } from './tool-registry.js'; + +import { FunctionDeclaration, mcpToTool } from '@google/genai'; +import { ToolRegistry } from './tool-registry.js'; +import { PromptRegistry } from '../prompts/prompt-registry.js'; +import { MCPOAuthProvider } from '../mcp/oauth-provider.js'; +import { OAuthUtils } from '../mcp/oauth-utils.js'; +import { MCPOAuthTokenStorage } from '../mcp/oauth-token-storage.js'; +import { getErrorMessage } from '../utils/errors.js'; export const MCP_DEFAULT_TIMEOUT_MSEC = 10 * 60 * 1000; // default to 10 minutes +export type DiscoveredMCPPrompt = Prompt & { + serverName: string; + invoke: (params: Record) => Promise; +}; + /** * Enum representing the connection status of an MCP server */ @@ -50,13 +68,18 @@ export enum MCPDiscoveryState { /** * Map to track the status of each MCP server within the core package */ -const mcpServerStatusesInternal: Map = new Map(); +const serverStatuses: Map = new Map(); /** * Track the overall MCP discovery state */ let mcpDiscoveryState: MCPDiscoveryState = MCPDiscoveryState.NOT_STARTED; +/** + * Map to track which MCP servers have been discovered to require OAuth + */ +export const mcpServerRequiresOAuth: Map = new Map(); + /** * Event listeners for MCP server status changes */ @@ -94,7 +117,7 @@ function updateMCPServerStatus( serverName: string, status: MCPServerStatus, ): void { - mcpServerStatusesInternal.set(serverName, status); + serverStatuses.set(serverName, status); // Notify all listeners for (const listener of statusChangeListeners) { listener(serverName, status); @@ -105,16 +128,14 @@ function updateMCPServerStatus( * Get the current status of an MCP server */ export function getMCPServerStatus(serverName: string): MCPServerStatus { - return ( - mcpServerStatusesInternal.get(serverName) || MCPServerStatus.DISCONNECTED - ); + return serverStatuses.get(serverName) || MCPServerStatus.DISCONNECTED; } /** * Get all MCP server statuses */ export function getAllMCPServerStatuses(): Map { - return new Map(mcpServerStatusesInternal); + return new Map(serverStatuses); } /** @@ -124,6 +145,165 @@ export function getMCPDiscoveryState(): MCPDiscoveryState { return mcpDiscoveryState; } +/** + * Parse www-authenticate header to extract OAuth metadata URI. + * + * @param wwwAuthenticate The www-authenticate header value + * @returns The resource metadata URI if found, null otherwise + */ +function _parseWWWAuthenticate(wwwAuthenticate: string): string | null { + // Parse header like: Bearer realm="MCP Server", resource_metadata_uri="https://..." + const resourceMetadataMatch = wwwAuthenticate.match( + /resource_metadata_uri="([^"]+)"/, + ); + return resourceMetadataMatch ? resourceMetadataMatch[1] : null; +} + +/** + * Extract WWW-Authenticate header from error message string. + * This is a more robust approach than regex matching. + * + * @param errorString The error message string + * @returns The www-authenticate header value if found, null otherwise + */ +function extractWWWAuthenticateHeader(errorString: string): string | null { + // Try multiple patterns to extract the header + const patterns = [ + /www-authenticate:\s*([^\n\r]+)/i, + /WWW-Authenticate:\s*([^\n\r]+)/i, + /"www-authenticate":\s*"([^"]+)"/i, + /'www-authenticate':\s*'([^']+)'/i, + ]; + + for (const pattern of patterns) { + const match = errorString.match(pattern); + if (match) { + return match[1].trim(); + } + } + + return null; +} + +/** + * Handle automatic OAuth discovery and authentication for a server. + * + * @param mcpServerName The name of the MCP server + * @param mcpServerConfig The MCP server configuration + * @param wwwAuthenticate The www-authenticate header value + * @returns True if OAuth was successfully configured and authenticated, false otherwise + */ +async function handleAutomaticOAuth( + mcpServerName: string, + mcpServerConfig: MCPServerConfig, + wwwAuthenticate: string, +): Promise { + try { + console.log(`🔐 '${mcpServerName}' requires OAuth authentication`); + + // Always try to parse the resource metadata URI from the www-authenticate header + let oauthConfig; + const resourceMetadataUri = + OAuthUtils.parseWWWAuthenticateHeader(wwwAuthenticate); + if (resourceMetadataUri) { + oauthConfig = await OAuthUtils.discoverOAuthConfig(resourceMetadataUri); + } else if (mcpServerConfig.url) { + // Fallback: try to discover OAuth config from the base URL for SSE + const sseUrl = new URL(mcpServerConfig.url); + const baseUrl = `${sseUrl.protocol}//${sseUrl.host}`; + oauthConfig = await OAuthUtils.discoverOAuthConfig(baseUrl); + } else if (mcpServerConfig.httpUrl) { + // Fallback: try to discover OAuth config from the base URL for HTTP + const httpUrl = new URL(mcpServerConfig.httpUrl); + const baseUrl = `${httpUrl.protocol}//${httpUrl.host}`; + oauthConfig = await OAuthUtils.discoverOAuthConfig(baseUrl); + } + + if (!oauthConfig) { + console.error( + `❌ Could not configure OAuth for '${mcpServerName}' - please authenticate manually with /mcp auth ${mcpServerName}`, + ); + return false; + } + + // OAuth configuration discovered - proceed with authentication + + // Create OAuth configuration for authentication + const oauthAuthConfig = { + enabled: true, + authorizationUrl: oauthConfig.authorizationUrl, + tokenUrl: oauthConfig.tokenUrl, + scopes: oauthConfig.scopes || [], + }; + + // Perform OAuth authentication + console.log( + `Starting OAuth authentication for server '${mcpServerName}'...`, + ); + await MCPOAuthProvider.authenticate(mcpServerName, oauthAuthConfig); + + console.log( + `OAuth authentication successful for server '${mcpServerName}'`, + ); + return true; + } catch (error) { + console.error( + `Failed to handle automatic OAuth for server '${mcpServerName}': ${getErrorMessage(error)}`, + ); + return false; + } +} + +/** + * Create a transport with OAuth token for the given server configuration. + * + * @param mcpServerName The name of the MCP server + * @param mcpServerConfig The MCP server configuration + * @param accessToken The OAuth access token + * @returns The transport with OAuth token, or null if creation fails + */ +async function createTransportWithOAuth( + mcpServerName: string, + mcpServerConfig: MCPServerConfig, + accessToken: string, +): Promise { + try { + if (mcpServerConfig.httpUrl) { + // Create HTTP transport with OAuth token + const oauthTransportOptions: StreamableHTTPClientTransportOptions = { + requestInit: { + headers: { + ...mcpServerConfig.headers, + Authorization: `Bearer ${accessToken}`, + }, + }, + }; + + return new StreamableHTTPClientTransport( + new URL(mcpServerConfig.httpUrl), + oauthTransportOptions, + ); + } else if (mcpServerConfig.url) { + // Create SSE transport with OAuth token in Authorization header + return new SSEClientTransport(new URL(mcpServerConfig.url), { + requestInit: { + headers: { + ...mcpServerConfig.headers, + Authorization: `Bearer ${accessToken}`, + }, + }, + }); + } + + return null; + } catch (error) { + console.error( + `Failed to create OAuth transport for server '${mcpServerName}': ${getErrorMessage(error)}`, + ); + return null; + } +} + /** * Discovers tools from all configured MCP servers and registers them with the tool registry. * It orchestrates the connection and discovery process for each server defined in the @@ -138,6 +318,7 @@ export async function discoverMcpTools( mcpServers: Record, mcpServerCommand: string | undefined, toolRegistry: ToolRegistry, + promptRegistry: PromptRegistry, debugMode: boolean, ): Promise { mcpDiscoveryState = MCPDiscoveryState.IN_PROGRESS; @@ -150,6 +331,7 @@ export async function discoverMcpTools( mcpServerName, mcpServerConfig, toolRegistry, + promptRegistry, debugMode, ), ); @@ -193,6 +375,7 @@ export async function connectAndDiscover( mcpServerName: string, mcpServerConfig: MCPServerConfig, toolRegistry: ToolRegistry, + promptRegistry: PromptRegistry, debugMode: boolean, ): Promise { updateMCPServerStatus(mcpServerName, MCPServerStatus.CONNECTING); @@ -205,11 +388,11 @@ export async function connectAndDiscover( ); try { updateMCPServerStatus(mcpServerName, MCPServerStatus.CONNECTED); - mcpClient.onerror = (error) => { console.error(`MCP ERROR (${mcpServerName}):`, error.toString()); updateMCPServerStatus(mcpServerName, MCPServerStatus.DISCONNECTED); }; + await discoverPrompts(mcpServerName, mcpClient, promptRegistry); const tools = await discoverTools( mcpServerName, @@ -224,7 +407,11 @@ export async function connectAndDiscover( throw error; } } catch (error) { - console.error(`Error connecting to MCP server '${mcpServerName}':`, error); + console.error( + `Error connecting to MCP server '${mcpServerName}': ${getErrorMessage( + error, + )}`, + ); updateMCPServerStatus(mcpServerName, MCPServerStatus.DISCONNECTED); } } @@ -259,32 +446,109 @@ export async function discoverTools( continue; } - const toolNameForModel = generateValidName(funcDecl, mcpServerName); - - sanitizeParameters(funcDecl.parameters); - discoveredTools.push( new DiscoveredMCPTool( mcpCallableTool, mcpServerName, - toolNameForModel, - funcDecl.description ?? '', - funcDecl.parameters ?? { type: Type.OBJECT, properties: {} }, funcDecl.name!, + funcDecl.description ?? '', + funcDecl.parametersJsonSchema ?? { type: 'object', properties: {} }, mcpServerConfig.timeout ?? MCP_DEFAULT_TIMEOUT_MSEC, mcpServerConfig.trust, ), ); } - if (discoveredTools.length === 0) { - throw Error('No enabled tools found'); - } return discoveredTools; } catch (error) { throw new Error(`Error discovering tools: ${error}`); } } +/** + * Discovers and logs prompts from a connected MCP client. + * It retrieves prompt declarations from the client and logs their names. + * + * @param mcpServerName The name of the MCP server. + * @param mcpClient The active MCP client instance. + */ +export async function discoverPrompts( + mcpServerName: string, + mcpClient: Client, + promptRegistry: PromptRegistry, +): Promise { + try { + const response = await mcpClient.request( + { method: 'prompts/list', params: {} }, + ListPromptsResultSchema, + ); + + for (const prompt of response.prompts) { + promptRegistry.registerPrompt({ + ...prompt, + serverName: mcpServerName, + invoke: (params: Record) => + invokeMcpPrompt(mcpServerName, mcpClient, prompt.name, params), + }); + } + } catch (error) { + // It's okay if this fails, not all servers will have prompts. + // Don't log an error if the method is not found, which is a common case. + if ( + error instanceof Error && + !error.message?.includes('Method not found') + ) { + console.error( + `Error discovering prompts from ${mcpServerName}: ${getErrorMessage( + error, + )}`, + ); + } + } +} + +/** + * Invokes a prompt on a connected MCP client. + * + * @param mcpServerName The name of the MCP server. + * @param mcpClient The active MCP client instance. + * @param promptName The name of the prompt to invoke. + * @param promptParams The parameters to pass to the prompt. + * @returns A promise that resolves to the result of the prompt invocation. + */ +export async function invokeMcpPrompt( + mcpServerName: string, + mcpClient: Client, + promptName: string, + promptParams: Record, +): Promise { + try { + const response = await mcpClient.request( + { + method: 'prompts/get', + params: { + name: promptName, + arguments: promptParams, + }, + }, + GetPromptResultSchema, + ); + + return response; + } catch (error) { + if ( + error instanceof Error && + !error.message?.includes('Method not found') + ) { + console.error( + `Error invoking prompt '${promptName}' from ${mcpServerName} ${promptParams}: ${getErrorMessage( + error, + )}`, + ); + } + throw error; + } +} + /** * Creates and connects an MCP client to a server based on the provided configuration. * It determines the appropriate transport (Stdio, SSE, or Streamable HTTP) and @@ -318,7 +582,7 @@ export async function connectToMcpServer( } try { - const transport = createTransport( + const transport = await createTransport( mcpServerName, mcpServerConfig, debugMode, @@ -333,40 +597,419 @@ export async function connectToMcpServer( throw error; } } catch (error) { - // Create a safe config object that excludes sensitive information - const safeConfig = { - command: mcpServerConfig.command, - url: mcpServerConfig.url, - httpUrl: mcpServerConfig.httpUrl, - cwd: mcpServerConfig.cwd, - timeout: mcpServerConfig.timeout, - trust: mcpServerConfig.trust, - // Exclude args, env, and headers which may contain sensitive data - }; + // Check if this is a 401 error that might indicate OAuth is required + const errorString = String(error); + if ( + errorString.includes('401') && + (mcpServerConfig.httpUrl || mcpServerConfig.url) + ) { + mcpServerRequiresOAuth.set(mcpServerName, true); + // Only trigger automatic OAuth discovery for HTTP servers or when OAuth is explicitly configured + // For SSE servers, we should not trigger new OAuth flows automatically + const shouldTriggerOAuth = + mcpServerConfig.httpUrl || mcpServerConfig.oauth?.enabled; - let errorString = - `failed to start or connect to MCP server '${mcpServerName}' ` + - `${JSON.stringify(safeConfig)}; \n${error}`; - if (process.env.SANDBOX) { - errorString += `\nMake sure it is available in the sandbox`; + if (!shouldTriggerOAuth) { + // For SSE servers without explicit OAuth config, if a token was found but rejected, report it accurately. + const credentials = await MCPOAuthTokenStorage.getToken(mcpServerName); + if (credentials) { + const hasStoredTokens = await MCPOAuthProvider.getValidToken( + mcpServerName, + { + // Pass client ID if available + clientId: credentials.clientId, + }, + ); + if (hasStoredTokens) { + console.log( + `Stored OAuth token for SSE server '${mcpServerName}' was rejected. ` + + `Please re-authenticate using: /mcp auth ${mcpServerName}`, + ); + } else { + console.log( + `401 error received for SSE server '${mcpServerName}' without OAuth configuration. ` + + `Please authenticate using: /mcp auth ${mcpServerName}`, + ); + } + } + throw new Error( + `401 error received for SSE server '${mcpServerName}' without OAuth configuration. ` + + `Please authenticate using: /mcp auth ${mcpServerName}`, + ); + } + + // Try to extract www-authenticate header from the error + let wwwAuthenticate = extractWWWAuthenticateHeader(errorString); + + // If we didn't get the header from the error string, try to get it from the server + if (!wwwAuthenticate && mcpServerConfig.url) { + console.log( + `No www-authenticate header in error, trying to fetch it from server...`, + ); + try { + const response = await fetch(mcpServerConfig.url, { + method: 'HEAD', + headers: { + Accept: 'text/event-stream', + }, + signal: AbortSignal.timeout(5000), + }); + + if (response.status === 401) { + wwwAuthenticate = response.headers.get('www-authenticate'); + if (wwwAuthenticate) { + console.log( + `Found www-authenticate header from server: ${wwwAuthenticate}`, + ); + } + } + } catch (fetchError) { + console.debug( + `Failed to fetch www-authenticate header: ${getErrorMessage(fetchError)}`, + ); + } + } + + if (wwwAuthenticate) { + console.log( + `Received 401 with www-authenticate header: ${wwwAuthenticate}`, + ); + + // Try automatic OAuth discovery and authentication + const oauthSuccess = await handleAutomaticOAuth( + mcpServerName, + mcpServerConfig, + wwwAuthenticate, + ); + if (oauthSuccess) { + // Retry connection with OAuth token + console.log( + `Retrying connection to '${mcpServerName}' with OAuth token...`, + ); + + // Get the valid token - we need to create a proper OAuth config + // The token should already be available from the authentication process + const credentials = + await MCPOAuthTokenStorage.getToken(mcpServerName); + if (credentials) { + const accessToken = await MCPOAuthProvider.getValidToken( + mcpServerName, + { + // Pass client ID if available + clientId: credentials.clientId, + }, + ); + + if (accessToken) { + // Create transport with OAuth token + const oauthTransport = await createTransportWithOAuth( + mcpServerName, + mcpServerConfig, + accessToken, + ); + if (oauthTransport) { + try { + await mcpClient.connect(oauthTransport, { + timeout: + mcpServerConfig.timeout ?? MCP_DEFAULT_TIMEOUT_MSEC, + }); + // Connection successful with OAuth + return mcpClient; + } catch (retryError) { + console.error( + `Failed to connect with OAuth token: ${getErrorMessage( + retryError, + )}`, + ); + throw retryError; + } + } else { + console.error( + `Failed to create OAuth transport for server '${mcpServerName}'`, + ); + throw new Error( + `Failed to create OAuth transport for server '${mcpServerName}'`, + ); + } + } else { + console.error( + `Failed to get OAuth token for server '${mcpServerName}'`, + ); + throw new Error( + `Failed to get OAuth token for server '${mcpServerName}'`, + ); + } + } else { + console.error( + `Failed to get credentials for server '${mcpServerName}' after successful OAuth authentication`, + ); + throw new Error( + `Failed to get credentials for server '${mcpServerName}' after successful OAuth authentication`, + ); + } + } else { + console.error( + `Failed to handle automatic OAuth for server '${mcpServerName}'`, + ); + throw new Error( + `Failed to handle automatic OAuth for server '${mcpServerName}'`, + ); + } + } else { + // No www-authenticate header found, but we got a 401 + // Only try OAuth discovery for HTTP servers or when OAuth is explicitly configured + // For SSE servers, we should not trigger new OAuth flows automatically + const shouldTryDiscovery = + mcpServerConfig.httpUrl || mcpServerConfig.oauth?.enabled; + + if (!shouldTryDiscovery) { + const credentials = + await MCPOAuthTokenStorage.getToken(mcpServerName); + if (credentials) { + const hasStoredTokens = await MCPOAuthProvider.getValidToken( + mcpServerName, + { + // Pass client ID if available + clientId: credentials.clientId, + }, + ); + if (hasStoredTokens) { + console.log( + `Stored OAuth token for SSE server '${mcpServerName}' was rejected. ` + + `Please re-authenticate using: /mcp auth ${mcpServerName}`, + ); + } else { + console.log( + `401 error received for SSE server '${mcpServerName}' without OAuth configuration. ` + + `Please authenticate using: /mcp auth ${mcpServerName}`, + ); + } + } + throw new Error( + `401 error received for SSE server '${mcpServerName}' without OAuth configuration. ` + + `Please authenticate using: /mcp auth ${mcpServerName}`, + ); + } + + // For SSE servers, try to discover OAuth configuration from the base URL + console.log(`🔍 Attempting OAuth discovery for '${mcpServerName}'...`); + + if (mcpServerConfig.url) { + const sseUrl = new URL(mcpServerConfig.url); + const baseUrl = `${sseUrl.protocol}//${sseUrl.host}`; + + try { + // Try to discover OAuth configuration from the base URL + const oauthConfig = await OAuthUtils.discoverOAuthConfig(baseUrl); + if (oauthConfig) { + console.log( + `Discovered OAuth configuration from base URL for server '${mcpServerName}'`, + ); + + // Create OAuth configuration for authentication + const oauthAuthConfig = { + enabled: true, + authorizationUrl: oauthConfig.authorizationUrl, + tokenUrl: oauthConfig.tokenUrl, + scopes: oauthConfig.scopes || [], + }; + + // Perform OAuth authentication + console.log( + `Starting OAuth authentication for server '${mcpServerName}'...`, + ); + await MCPOAuthProvider.authenticate( + mcpServerName, + oauthAuthConfig, + ); + + // Retry connection with OAuth token + const credentials = + await MCPOAuthTokenStorage.getToken(mcpServerName); + if (credentials) { + const accessToken = await MCPOAuthProvider.getValidToken( + mcpServerName, + { + // Pass client ID if available + clientId: credentials.clientId, + }, + ); + if (accessToken) { + // Create transport with OAuth token + const oauthTransport = await createTransportWithOAuth( + mcpServerName, + mcpServerConfig, + accessToken, + ); + if (oauthTransport) { + try { + await mcpClient.connect(oauthTransport, { + timeout: + mcpServerConfig.timeout ?? MCP_DEFAULT_TIMEOUT_MSEC, + }); + // Connection successful with OAuth + return mcpClient; + } catch (retryError) { + console.error( + `Failed to connect with OAuth token: ${getErrorMessage( + retryError, + )}`, + ); + throw retryError; + } + } else { + console.error( + `Failed to create OAuth transport for server '${mcpServerName}'`, + ); + throw new Error( + `Failed to create OAuth transport for server '${mcpServerName}'`, + ); + } + } else { + console.error( + `Failed to get OAuth token for server '${mcpServerName}'`, + ); + throw new Error( + `Failed to get OAuth token for server '${mcpServerName}'`, + ); + } + } else { + console.error( + `Failed to get stored credentials for server '${mcpServerName}'`, + ); + throw new Error( + `Failed to get stored credentials for server '${mcpServerName}'`, + ); + } + } else { + console.error( + `❌ Could not configure OAuth for '${mcpServerName}' - please authenticate manually with /mcp auth ${mcpServerName}`, + ); + throw new Error( + `OAuth configuration failed for '${mcpServerName}'. Please authenticate manually with /mcp auth ${mcpServerName}`, + ); + } + } catch (discoveryError) { + console.error( + `❌ OAuth discovery failed for '${mcpServerName}' - please authenticate manually with /mcp auth ${mcpServerName}`, + ); + throw discoveryError; + } + } else { + console.error( + `❌ '${mcpServerName}' requires authentication but no OAuth configuration found`, + ); + throw new Error( + `MCP server '${mcpServerName}' requires authentication. Please configure OAuth or check server settings.`, + ); + } + } + } else { + // Handle other connection errors + // Create a concise error message + const errorMessage = (error as Error).message || String(error); + const isNetworkError = + errorMessage.includes('ENOTFOUND') || + errorMessage.includes('ECONNREFUSED'); + + let conciseError: string; + if (isNetworkError) { + conciseError = `Cannot connect to '${mcpServerName}' - server may be down or URL incorrect`; + } else { + conciseError = `Connection failed for '${mcpServerName}': ${errorMessage}`; + } + + if (process.env.SANDBOX) { + conciseError += ` (check sandbox availability)`; + } + + throw new Error(conciseError); } - throw new Error(errorString); } } /** Visible for Testing */ -export function createTransport( +export async function createTransport( mcpServerName: string, mcpServerConfig: MCPServerConfig, debugMode: boolean, -): Transport { +): Promise { + if ( + mcpServerConfig.authProviderType === AuthProviderType.GOOGLE_CREDENTIALS + ) { + const provider = new GoogleCredentialProvider(mcpServerConfig); + const transportOptions: + | StreamableHTTPClientTransportOptions + | SSEClientTransportOptions = { + authProvider: provider, + }; + if (mcpServerConfig.httpUrl) { + return new StreamableHTTPClientTransport( + new URL(mcpServerConfig.httpUrl), + transportOptions, + ); + } else if (mcpServerConfig.url) { + return new SSEClientTransport( + new URL(mcpServerConfig.url), + transportOptions, + ); + } + throw new Error('No URL configured for Google Credentials MCP server'); + } + + // Check if we have OAuth configuration or stored tokens + let accessToken: string | null = null; + let hasOAuthConfig = mcpServerConfig.oauth?.enabled; + + if (hasOAuthConfig && mcpServerConfig.oauth) { + accessToken = await MCPOAuthProvider.getValidToken( + mcpServerName, + mcpServerConfig.oauth, + ); + + if (!accessToken) { + console.error( + `MCP server '${mcpServerName}' requires OAuth authentication. ` + + `Please authenticate using the /mcp auth command.`, + ); + throw new Error( + `MCP server '${mcpServerName}' requires OAuth authentication. ` + + `Please authenticate using the /mcp auth command.`, + ); + } + } else { + // Check if we have stored OAuth tokens for this server (from previous authentication) + const credentials = await MCPOAuthTokenStorage.getToken(mcpServerName); + if (credentials) { + accessToken = await MCPOAuthProvider.getValidToken(mcpServerName, { + // Pass client ID if available + clientId: credentials.clientId, + }); + + if (accessToken) { + hasOAuthConfig = true; + console.log(`Found stored OAuth token for server '${mcpServerName}'`); + } + } + } + if (mcpServerConfig.httpUrl) { const transportOptions: StreamableHTTPClientTransportOptions = {}; - if (mcpServerConfig.headers) { + + // Set up headers with OAuth token if available + if (hasOAuthConfig && accessToken) { + transportOptions.requestInit = { + headers: { + ...mcpServerConfig.headers, + Authorization: `Bearer ${accessToken}`, + }, + }; + } else if (mcpServerConfig.headers) { transportOptions.requestInit = { headers: mcpServerConfig.headers, }; } + return new StreamableHTTPClientTransport( new URL(mcpServerConfig.httpUrl), transportOptions, @@ -375,11 +1018,21 @@ export function createTransport( if (mcpServerConfig.url) { const transportOptions: SSEClientTransportOptions = {}; - if (mcpServerConfig.headers) { + + // Set up headers with OAuth token if available + if (hasOAuthConfig && accessToken) { + transportOptions.requestInit = { + headers: { + ...mcpServerConfig.headers, + Authorization: `Bearer ${accessToken}`, + }, + }; + } else if (mcpServerConfig.headers) { transportOptions.requestInit = { headers: mcpServerConfig.headers, }; } + return new SSEClientTransport( new URL(mcpServerConfig.url), transportOptions, @@ -411,26 +1064,6 @@ export function createTransport( ); } -/** Visible for testing */ -export function generateValidName( - funcDecl: FunctionDeclaration, - mcpServerName: string, -) { - // Replace invalid characters (based on 400 error message from Gemini API) with underscores - let validToolname = funcDecl.name!.replace(/[^a-zA-Z0-9_.-]/g, '_'); - - // Prepend MCP server name to avoid conflicts with other tools - validToolname = mcpServerName + '__' + validToolname; - - // If longer than 63 characters, replace middle with '___' - // (Gemini API says max length 64, but actual limit seems to be 63) - if (validToolname.length > 63) { - validToolname = - validToolname.slice(0, 28) + '___' + validToolname.slice(-32); - } - return validToolname; -} - /** Visible for testing */ export function isEnabled( funcDecl: FunctionDeclaration, diff --git a/packages/core/src/tools/mcp-tool.test.ts b/packages/core/src/tools/mcp-tool.test.ts index d972efdb4..b5843b95f 100644 --- a/packages/core/src/tools/mcp-tool.test.ts +++ b/packages/core/src/tools/mcp-tool.test.ts @@ -14,7 +14,7 @@ import { afterEach, Mocked, } from 'vitest'; -import { DiscoveredMCPTool } from './mcp-tool.js'; // Added getStringifiedResultForDisplay +import { DiscoveredMCPTool, generateValidName } from './mcp-tool.js'; // Added getStringifiedResultForDisplay import { ToolResult, ToolConfirmationOutcome } from './tools.js'; // Added ToolConfirmationOutcome import { CallableTool, Part } from '@google/genai'; @@ -29,9 +29,42 @@ const mockCallableToolInstance: Mocked = { // Add other methods if DiscoveredMCPTool starts using them }; +describe('generateValidName', () => { + it('should return a valid name for a simple function', () => { + expect(generateValidName('myFunction')).toBe('myFunction'); + }); + + it('should replace invalid characters with underscores', () => { + expect(generateValidName('invalid-name with spaces')).toBe( + 'invalid-name_with_spaces', + ); + }); + + it('should truncate long names', () => { + expect(generateValidName('x'.repeat(80))).toBe( + 'xxxxxxxxxxxxxxxxxxxxxxxxxxxx___xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx', + ); + }); + + it('should handle names with only invalid characters', () => { + expect(generateValidName('!@#$%^&*()')).toBe('__________'); + }); + + it('should handle names that are exactly 63 characters long', () => { + expect(generateValidName('a'.repeat(63)).length).toBe(63); + }); + + it('should handle names that are exactly 64 characters long', () => { + expect(generateValidName('a'.repeat(64)).length).toBe(63); + }); + + it('should handle names that are longer than 64 characters', () => { + expect(generateValidName('a'.repeat(80)).length).toBe(63); + }); +}); + describe('DiscoveredMCPTool', () => { const serverName = 'mock-mcp-server'; - const toolNameForModel = 'test-mcp-tool-for-model'; const serverToolName = 'actual-server-tool-name'; const baseDescription = 'A test MCP tool.'; const inputSchema: Record = { @@ -52,46 +85,32 @@ describe('DiscoveredMCPTool', () => { }); describe('constructor', () => { - it('should set properties correctly (non-generic server)', () => { + it('should set properties correctly', () => { const tool = new DiscoveredMCPTool( mockCallableToolInstance, - serverName, // serverName is 'mock-mcp-server', not 'mcp' - toolNameForModel, + serverName, + serverToolName, baseDescription, inputSchema, - serverToolName, ); - expect(tool.name).toBe(toolNameForModel); - expect(tool.schema.name).toBe(toolNameForModel); + expect(tool.name).toBe(serverToolName); + expect(tool.schema.name).toBe(serverToolName); expect(tool.schema.description).toBe(baseDescription); - expect(tool.schema.parameters).toEqual(inputSchema); + expect(tool.schema.parameters).toBeUndefined(); + expect(tool.schema.parametersJsonSchema).toEqual(inputSchema); expect(tool.serverToolName).toBe(serverToolName); expect(tool.timeout).toBeUndefined(); }); - it('should set properties correctly (generic "mcp" server)', () => { - const genericServerName = 'mcp'; - const tool = new DiscoveredMCPTool( - mockCallableToolInstance, - genericServerName, // serverName is 'mcp' - toolNameForModel, - baseDescription, - inputSchema, - serverToolName, - ); - expect(tool.schema.description).toBe(baseDescription); - }); - it('should accept and store a custom timeout', () => { const customTimeout = 5000; const tool = new DiscoveredMCPTool( mockCallableToolInstance, serverName, - toolNameForModel, + serverToolName, baseDescription, inputSchema, - serverToolName, customTimeout, ); expect(tool.timeout).toBe(customTimeout); @@ -103,10 +122,9 @@ describe('DiscoveredMCPTool', () => { const tool = new DiscoveredMCPTool( mockCallableToolInstance, serverName, - toolNameForModel, + serverToolName, baseDescription, inputSchema, - serverToolName, ); const params = { param: 'testValue' }; const mockToolSuccessResultObject = { @@ -143,10 +161,9 @@ describe('DiscoveredMCPTool', () => { const tool = new DiscoveredMCPTool( mockCallableToolInstance, serverName, - toolNameForModel, + serverToolName, baseDescription, inputSchema, - serverToolName, ); const params = { param: 'testValue' }; const mockMcpToolResponsePartsEmpty: Part[] = []; @@ -159,10 +176,9 @@ describe('DiscoveredMCPTool', () => { const tool = new DiscoveredMCPTool( mockCallableToolInstance, serverName, - toolNameForModel, + serverToolName, baseDescription, inputSchema, - serverToolName, ); const params = { param: 'failCase' }; const expectedError = new Error('MCP call failed'); @@ -179,10 +195,9 @@ describe('DiscoveredMCPTool', () => { const tool = new DiscoveredMCPTool( mockCallableToolInstance, serverName, - toolNameForModel, + serverToolName, baseDescription, inputSchema, - serverToolName, undefined, true, ); @@ -196,10 +211,9 @@ describe('DiscoveredMCPTool', () => { const tool = new DiscoveredMCPTool( mockCallableToolInstance, serverName, - toolNameForModel, + serverToolName, baseDescription, inputSchema, - serverToolName, ); expect( await tool.shouldConfirmExecute({}, new AbortController().signal), @@ -212,10 +226,9 @@ describe('DiscoveredMCPTool', () => { const tool = new DiscoveredMCPTool( mockCallableToolInstance, serverName, - toolNameForModel, + serverToolName, baseDescription, inputSchema, - serverToolName, ); expect( await tool.shouldConfirmExecute({}, new AbortController().signal), @@ -226,10 +239,9 @@ describe('DiscoveredMCPTool', () => { const tool = new DiscoveredMCPTool( mockCallableToolInstance, serverName, - toolNameForModel, + serverToolName, baseDescription, inputSchema, - serverToolName, ); const confirmation = await tool.shouldConfirmExecute( {}, @@ -257,10 +269,9 @@ describe('DiscoveredMCPTool', () => { const tool = new DiscoveredMCPTool( mockCallableToolInstance, serverName, - toolNameForModel, + serverToolName, baseDescription, inputSchema, - serverToolName, ); const confirmation = await tool.shouldConfirmExecute( {}, @@ -288,10 +299,9 @@ describe('DiscoveredMCPTool', () => { const tool = new DiscoveredMCPTool( mockCallableToolInstance, serverName, - toolNameForModel, + serverToolName, baseDescription, inputSchema, - serverToolName, ); const toolAllowlistKey = `${serverName}.${serverToolName}`; const confirmation = await tool.shouldConfirmExecute( @@ -315,5 +325,77 @@ describe('DiscoveredMCPTool', () => { ); } }); + + it('should handle Cancel confirmation outcome', async () => { + const tool = new DiscoveredMCPTool( + mockCallableToolInstance, + serverName, + serverToolName, + baseDescription, + inputSchema, + ); + const confirmation = await tool.shouldConfirmExecute( + {}, + new AbortController().signal, + ); + expect(confirmation).not.toBe(false); + if ( + confirmation && + typeof confirmation === 'object' && + 'onConfirm' in confirmation && + typeof confirmation.onConfirm === 'function' + ) { + // Cancel should not add anything to allowlist + await confirmation.onConfirm(ToolConfirmationOutcome.Cancel); + expect((DiscoveredMCPTool as any).allowlist.has(serverName)).toBe( + false, + ); + expect( + (DiscoveredMCPTool as any).allowlist.has( + `${serverName}.${serverToolName}`, + ), + ).toBe(false); + } else { + throw new Error( + 'Confirmation details or onConfirm not in expected format', + ); + } + }); + + it('should handle ProceedOnce confirmation outcome', async () => { + const tool = new DiscoveredMCPTool( + mockCallableToolInstance, + serverName, + serverToolName, + baseDescription, + inputSchema, + ); + const confirmation = await tool.shouldConfirmExecute( + {}, + new AbortController().signal, + ); + expect(confirmation).not.toBe(false); + if ( + confirmation && + typeof confirmation === 'object' && + 'onConfirm' in confirmation && + typeof confirmation.onConfirm === 'function' + ) { + // ProceedOnce should not add anything to allowlist + await confirmation.onConfirm(ToolConfirmationOutcome.ProceedOnce); + expect((DiscoveredMCPTool as any).allowlist.has(serverName)).toBe( + false, + ); + expect( + (DiscoveredMCPTool as any).allowlist.has( + `${serverName}.${serverToolName}`, + ), + ).toBe(false); + } else { + throw new Error( + 'Confirmation details or onConfirm not in expected format', + ); + } + }); }); }); diff --git a/packages/core/src/tools/mcp-tool.ts b/packages/core/src/tools/mcp-tool.ts index cc4739a45..9e814bba3 100644 --- a/packages/core/src/tools/mcp-tool.ts +++ b/packages/core/src/tools/mcp-tool.ts @@ -10,8 +10,15 @@ import { ToolCallConfirmationDetails, ToolConfirmationOutcome, ToolMcpConfirmationDetails, + Icon, } from './tools.js'; -import { CallableTool, Part, FunctionCall, Schema } from '@google/genai'; +import { + CallableTool, + Part, + FunctionCall, + FunctionDeclaration, + Type, +} from '@google/genai'; type ToolParams = Record; @@ -21,23 +28,49 @@ export class DiscoveredMCPTool extends BaseTool { constructor( private readonly mcpTool: CallableTool, readonly serverName: string, - readonly name: string, - readonly description: string, - readonly parameterSchema: Schema, readonly serverToolName: string, + description: string, + readonly parameterSchemaJson: unknown, readonly timeout?: number, readonly trust?: boolean, + nameOverride?: string, ) { super( - name, + nameOverride ?? generateValidName(serverToolName), `${serverToolName} (${serverName} MCP Server)`, description, - parameterSchema, + Icon.Hammer, + { type: Type.OBJECT }, // this is a dummy Schema for MCP, will be not be used to construct the FunctionDeclaration true, // isOutputMarkdown false, // canUpdateOutput ); } + asFullyQualifiedTool(): DiscoveredMCPTool { + return new DiscoveredMCPTool( + this.mcpTool, + this.serverName, + this.serverToolName, + this.description, + this.parameterSchemaJson, + this.timeout, + this.trust, + `${this.serverName}__${this.serverToolName}`, + ); + } + + /** + * Overrides the base schema to use parametersJsonSchema when building + * FunctionDeclaration + */ + override get schema(): FunctionDeclaration { + return { + name: this.name, + description: this.description, + parametersJsonSchema: this.parameterSchemaJson, + }; + } + async shouldConfirmExecute( _params: ToolParams, _abortSignal: AbortSignal, @@ -53,7 +86,7 @@ export class DiscoveredMCPTool extends BaseTool { DiscoveredMCPTool.allowlist.has(serverAllowListKey) || DiscoveredMCPTool.allowlist.has(toolAllowListKey) ) { - return false; // server and/or tool already allow listed + return false; // server and/or tool already allowlisted } const confirmationDetails: ToolMcpConfirmationDetails = { @@ -146,3 +179,17 @@ function getStringifiedResultForDisplay(result: Part[]) { return '```json\n' + JSON.stringify(processedResults, null, 2) + '\n```'; } + +/** Visible for testing */ +export function generateValidName(name: string) { + // Replace invalid characters (based on 400 error message from Gemini API) with underscores + let validToolname = name.replace(/[^a-zA-Z0-9_.-]/g, '_'); + + // If longer than 63 characters, replace middle with '___' + // (Gemini API says max length 64, but actual limit seems to be 63) + if (validToolname.length > 63) { + validToolname = + validToolname.slice(0, 28) + '___' + validToolname.slice(-32); + } + return validToolname; +} diff --git a/packages/core/src/tools/memoryTool.test.ts b/packages/core/src/tools/memoryTool.test.ts index 2caa27840..aff0cc2ef 100644 --- a/packages/core/src/tools/memoryTool.test.ts +++ b/packages/core/src/tools/memoryTool.test.ts @@ -87,7 +87,7 @@ describe('MemoryTool', () => { describe('performAddMemoryEntry (static method)', () => { const testFilePath = path.join( '/mock/home', - '.qwen', + '.gemini', DEFAULT_CONTEXT_FILENAME, // Use the default for basic tests ); @@ -207,7 +207,7 @@ describe('MemoryTool', () => { // Use getCurrentGeminiMdFilename for the default expectation before any setGeminiMdFilename calls in a test const expectedFilePath = path.join( '/mock/home', - '.qwen', + '.gemini', getCurrentGeminiMdFilename(), // This will be DEFAULT_CONTEXT_FILENAME unless changed by a test ); diff --git a/packages/core/src/tools/memoryTool.ts b/packages/core/src/tools/memoryTool.ts index 65b180a90..f0f1e16b1 100644 --- a/packages/core/src/tools/memoryTool.ts +++ b/packages/core/src/tools/memoryTool.ts @@ -4,7 +4,7 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { BaseTool, ToolResult } from './tools.js'; +import { BaseTool, Icon, ToolResult } from './tools.js'; import { FunctionDeclaration, Type } from '@google/genai'; import * as fs from 'fs/promises'; import * as path from 'path'; @@ -46,8 +46,8 @@ Do NOT use this tool: - \`fact\` (string, required): The specific fact or piece of information to remember. This should be a clear, self-contained statement. For example, if the user says "My favorite color is blue", the fact would be "My favorite color is blue". `; -export const GEMINI_CONFIG_DIR = '.qwen'; -export const DEFAULT_CONTEXT_FILENAME = 'QWEN.md'; +export const GEMINI_CONFIG_DIR = '.gemini'; +export const DEFAULT_CONTEXT_FILENAME = 'GEMINI.md'; export const MEMORY_SECTION_HEADER = '## Gemini Added Memories'; // This variable will hold the currently configured filename for GEMINI.md context files. @@ -105,6 +105,7 @@ export class MemoryTool extends BaseTool { MemoryTool.Name, 'Save Memory', memoryToolDescription, + Icon.LightBulb, memoryToolSchemaData.parameters as Record, ); } diff --git a/packages/core/src/tools/modifiable-tool.test.ts b/packages/core/src/tools/modifiable-tool.test.ts index d26729202..47cf41fe6 100644 --- a/packages/core/src/tools/modifiable-tool.test.ts +++ b/packages/core/src/tools/modifiable-tool.test.ts @@ -4,15 +4,7 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { - vi, - describe, - it, - expect, - beforeEach, - afterEach, - type Mock, -} from 'vitest'; +import { vi, describe, it, expect, beforeEach, afterEach } from 'vitest'; import { modifyWithEditor, ModifyContext, @@ -21,6 +13,7 @@ import { } from './modifiable-tool.js'; import { EditorType } from '../utils/editor.js'; import fs from 'fs'; +import fsp from 'fs/promises'; import os from 'os'; import * as path from 'path'; @@ -36,9 +29,6 @@ vi.mock('diff', () => ({ createPatch: mockCreatePatch, })); -vi.mock('fs'); -vi.mock('os'); - interface TestParams { filePath: string; someOtherParam: string; @@ -46,7 +36,7 @@ interface TestParams { } describe('modifyWithEditor', () => { - let tempDir: string; + let testProjectDir: string; let mockModifyContext: ModifyContext; let mockParams: TestParams; let currentContent: string; @@ -54,17 +44,19 @@ describe('modifyWithEditor', () => { let modifiedContent: string; let abortSignal: AbortSignal; - beforeEach(() => { + beforeEach(async () => { vi.resetAllMocks(); - tempDir = '/tmp/test-dir'; + testProjectDir = await fsp.mkdtemp( + path.join(os.tmpdir(), 'modifiable-tool-test-'), + ); abortSignal = new AbortController().signal; currentContent = 'original content\nline 2\nline 3'; proposedContent = 'modified content\nline 2\nline 3'; modifiedContent = 'user modified content\nline 2\nline 3\nnew line'; mockParams = { - filePath: path.join(tempDir, 'test.txt'), + filePath: path.join(testProjectDir, 'test.txt'), someOtherParam: 'value', }; @@ -81,26 +73,18 @@ describe('modifyWithEditor', () => { })), }; - (os.tmpdir as Mock).mockReturnValue(tempDir); - - (fs.existsSync as Mock).mockReturnValue(true); - (fs.mkdirSync as Mock).mockImplementation(() => undefined); - (fs.writeFileSync as Mock).mockImplementation(() => {}); - (fs.unlinkSync as Mock).mockImplementation(() => {}); - - (fs.readFileSync as Mock).mockImplementation((filePath: string) => { - if (filePath.includes('-new-')) { - return modifiedContent; - } - return currentContent; + mockOpenDiff.mockImplementation(async (_oldPath, newPath) => { + await fsp.writeFile(newPath, modifiedContent, 'utf8'); }); mockCreatePatch.mockReturnValue('mock diff content'); - mockOpenDiff.mockResolvedValue(undefined); }); - afterEach(() => { + afterEach(async () => { vi.restoreAllMocks(); + await fsp.rm(testProjectDir, { recursive: true, force: true }); + const diffDir = path.join(os.tmpdir(), 'gemini-cli-tool-modify-diffs'); + await fsp.rm(diffDir, { recursive: true, force: true }); }); describe('successful modification', () => { @@ -120,38 +104,8 @@ describe('modifyWithEditor', () => { ); expect(mockModifyContext.getFilePath).toHaveBeenCalledWith(mockParams); - expect(fs.writeFileSync).toHaveBeenCalledTimes(2); - expect(fs.writeFileSync).toHaveBeenNthCalledWith( - 1, - expect.stringContaining( - path.join(tempDir, 'gemini-cli-tool-modify-diffs'), - ), - currentContent, - 'utf8', - ); - expect(fs.writeFileSync).toHaveBeenNthCalledWith( - 2, - expect.stringContaining( - path.join(tempDir, 'gemini-cli-tool-modify-diffs'), - ), - proposedContent, - 'utf8', - ); - - expect(mockOpenDiff).toHaveBeenCalledWith( - expect.stringContaining('-old-'), - expect.stringContaining('-new-'), - 'vscode', - ); - - expect(fs.readFileSync).toHaveBeenCalledWith( - expect.stringContaining('-old-'), - 'utf8', - ); - expect(fs.readFileSync).toHaveBeenCalledWith( - expect.stringContaining('-new-'), - 'utf8', - ); + expect(mockOpenDiff).toHaveBeenCalledOnce(); + const [oldFilePath, newFilePath] = mockOpenDiff.mock.calls[0]; expect(mockModifyContext.createUpdatedParams).toHaveBeenCalledWith( currentContent, @@ -171,15 +125,9 @@ describe('modifyWithEditor', () => { }), ); - expect(fs.unlinkSync).toHaveBeenCalledTimes(2); - expect(fs.unlinkSync).toHaveBeenNthCalledWith( - 1, - expect.stringContaining('-old-'), - ); - expect(fs.unlinkSync).toHaveBeenNthCalledWith( - 2, - expect.stringContaining('-new-'), - ); + // Check that temp files are deleted. + await expect(fsp.access(oldFilePath)).rejects.toThrow(); + await expect(fsp.access(newFilePath)).rejects.toThrow(); expect(result).toEqual({ updatedParams: { @@ -192,7 +140,8 @@ describe('modifyWithEditor', () => { }); it('should create temp directory if it does not exist', async () => { - (fs.existsSync as Mock).mockReturnValue(false); + const diffDir = path.join(os.tmpdir(), 'gemini-cli-tool-modify-diffs'); + await fsp.rm(diffDir, { recursive: true, force: true }).catch(() => {}); await modifyWithEditor( mockParams, @@ -201,14 +150,15 @@ describe('modifyWithEditor', () => { abortSignal, ); - expect(fs.mkdirSync).toHaveBeenCalledWith( - path.join(tempDir, 'gemini-cli-tool-modify-diffs'), - { recursive: true }, - ); + const stats = await fsp.stat(diffDir); + expect(stats.isDirectory()).toBe(true); }); it('should not create temp directory if it already exists', async () => { - (fs.existsSync as Mock).mockReturnValue(true); + const diffDir = path.join(os.tmpdir(), 'gemini-cli-tool-modify-diffs'); + await fsp.mkdir(diffDir, { recursive: true }); + + const mkdirSpy = vi.spyOn(fs, 'mkdirSync'); await modifyWithEditor( mockParams, @@ -217,18 +167,15 @@ describe('modifyWithEditor', () => { abortSignal, ); - expect(fs.mkdirSync).not.toHaveBeenCalled(); + expect(mkdirSpy).not.toHaveBeenCalled(); + mkdirSpy.mockRestore(); }); }); it('should handle missing old temp file gracefully', async () => { - (fs.readFileSync as Mock).mockImplementation((filePath: string) => { - if (filePath.includes('-old-')) { - const error = new Error('ENOENT: no such file or directory'); - (error as NodeJS.ErrnoException).code = 'ENOENT'; - throw error; - } - return modifiedContent; + mockOpenDiff.mockImplementation(async (oldPath, newPath) => { + await fsp.writeFile(newPath, modifiedContent, 'utf8'); + await fsp.unlink(oldPath); }); const result = await modifyWithEditor( @@ -255,13 +202,8 @@ describe('modifyWithEditor', () => { }); it('should handle missing new temp file gracefully', async () => { - (fs.readFileSync as Mock).mockImplementation((filePath: string) => { - if (filePath.includes('-new-')) { - const error = new Error('ENOENT: no such file or directory'); - (error as NodeJS.ErrnoException).code = 'ENOENT'; - throw error; - } - return currentContent; + mockOpenDiff.mockImplementation(async (_oldPath, newPath) => { + await fsp.unlink(newPath); }); const result = await modifyWithEditor( @@ -291,6 +233,8 @@ describe('modifyWithEditor', () => { const editorError = new Error('Editor failed to open'); mockOpenDiff.mockRejectedValue(editorError); + const writeSpy = vi.spyOn(fs, 'writeFileSync'); + await expect( modifyWithEditor( mockParams, @@ -300,14 +244,21 @@ describe('modifyWithEditor', () => { ), ).rejects.toThrow('Editor failed to open'); - expect(fs.unlinkSync).toHaveBeenCalledTimes(2); + expect(writeSpy).toHaveBeenCalledTimes(2); + const oldFilePath = writeSpy.mock.calls[0][0] as string; + const newFilePath = writeSpy.mock.calls[1][0] as string; + + await expect(fsp.access(oldFilePath)).rejects.toThrow(); + await expect(fsp.access(newFilePath)).rejects.toThrow(); + + writeSpy.mockRestore(); }); it('should handle temp file cleanup errors gracefully', async () => { const consoleErrorSpy = vi .spyOn(console, 'error') .mockImplementation(() => {}); - (fs.unlinkSync as Mock).mockImplementation((_filePath: string) => { + vi.spyOn(fs, 'unlinkSync').mockImplementation(() => { throw new Error('Failed to delete file'); }); @@ -327,7 +278,11 @@ describe('modifyWithEditor', () => { }); it('should create temp files with correct naming with extension', async () => { - const testFilePath = path.join(tempDir, 'subfolder', 'test-file.txt'); + const testFilePath = path.join( + testProjectDir, + 'subfolder', + 'test-file.txt', + ); mockModifyContext.getFilePath = vi.fn().mockReturnValue(testFilePath); await modifyWithEditor( @@ -337,20 +292,18 @@ describe('modifyWithEditor', () => { abortSignal, ); - const writeFileCalls = (fs.writeFileSync as Mock).mock.calls; - expect(writeFileCalls).toHaveLength(2); - - const oldFilePath = writeFileCalls[0][0]; - const newFilePath = writeFileCalls[1][0]; - + expect(mockOpenDiff).toHaveBeenCalledOnce(); + const [oldFilePath, newFilePath] = mockOpenDiff.mock.calls[0]; expect(oldFilePath).toMatch(/gemini-cli-modify-test-file-old-\d+\.txt$/); expect(newFilePath).toMatch(/gemini-cli-modify-test-file-new-\d+\.txt$/); - expect(oldFilePath).toContain(`${tempDir}/gemini-cli-tool-modify-diffs/`); - expect(newFilePath).toContain(`${tempDir}/gemini-cli-tool-modify-diffs/`); + + const diffDir = path.join(os.tmpdir(), 'gemini-cli-tool-modify-diffs'); + expect(path.dirname(oldFilePath)).toBe(diffDir); + expect(path.dirname(newFilePath)).toBe(diffDir); }); it('should create temp files with correct naming without extension', async () => { - const testFilePath = path.join(tempDir, 'subfolder', 'test-file'); + const testFilePath = path.join(testProjectDir, 'subfolder', 'test-file'); mockModifyContext.getFilePath = vi.fn().mockReturnValue(testFilePath); await modifyWithEditor( @@ -360,16 +313,14 @@ describe('modifyWithEditor', () => { abortSignal, ); - const writeFileCalls = (fs.writeFileSync as Mock).mock.calls; - expect(writeFileCalls).toHaveLength(2); - - const oldFilePath = writeFileCalls[0][0]; - const newFilePath = writeFileCalls[1][0]; - + expect(mockOpenDiff).toHaveBeenCalledOnce(); + const [oldFilePath, newFilePath] = mockOpenDiff.mock.calls[0]; expect(oldFilePath).toMatch(/gemini-cli-modify-test-file-old-\d+$/); expect(newFilePath).toMatch(/gemini-cli-modify-test-file-new-\d+$/); - expect(oldFilePath).toContain(`${tempDir}/gemini-cli-tool-modify-diffs/`); - expect(newFilePath).toContain(`${tempDir}/gemini-cli-tool-modify-diffs/`); + + const diffDir = path.join(os.tmpdir(), 'gemini-cli-tool-modify-diffs'); + expect(path.dirname(oldFilePath)).toBe(diffDir); + expect(path.dirname(newFilePath)).toBe(diffDir); }); }); diff --git a/packages/core/src/tools/read-file.test.ts b/packages/core/src/tools/read-file.test.ts index 3c67b9dd7..e06c353a0 100644 --- a/packages/core/src/tools/read-file.test.ts +++ b/packages/core/src/tools/read-file.test.ts @@ -4,54 +4,37 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { vi, describe, it, expect, beforeEach, afterEach, Mock } from 'vitest'; +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; import { ReadFileTool, ReadFileToolParams } from './read-file.js'; -import * as fileUtils from '../utils/fileUtils.js'; import path from 'path'; import os from 'os'; -import fs from 'fs'; // For actual fs operations in setup +import fs from 'fs'; +import fsp from 'fs/promises'; import { Config } from '../config/config.js'; import { FileDiscoveryService } from '../services/fileDiscoveryService.js'; -// Mock fileUtils.processSingleFileContent -vi.mock('../utils/fileUtils', async () => { - const actualFileUtils = - await vi.importActual('../utils/fileUtils'); - return { - ...actualFileUtils, // Spread actual implementations - processSingleFileContent: vi.fn(), // Mock specific function - }; -}); - -const mockProcessSingleFileContent = fileUtils.processSingleFileContent as Mock; - describe('ReadFileTool', () => { let tempRootDir: string; let tool: ReadFileTool; const abortSignal = new AbortController().signal; - beforeEach(() => { + beforeEach(async () => { // Create a unique temporary root directory for each test run - tempRootDir = fs.mkdtempSync( + tempRootDir = await fsp.mkdtemp( path.join(os.tmpdir(), 'read-file-tool-root-'), ); - fs.writeFileSync( - path.join(tempRootDir, '.geminiignore'), - ['foo.*'].join('\n'), - ); - const fileService = new FileDiscoveryService(tempRootDir); + const mockConfigInstance = { - getFileService: () => fileService, + getFileService: () => new FileDiscoveryService(tempRootDir), getTargetDir: () => tempRootDir, } as unknown as Config; tool = new ReadFileTool(mockConfigInstance); - mockProcessSingleFileContent.mockReset(); }); - afterEach(() => { + afterEach(async () => { // Clean up the temporary root directory if (fs.existsSync(tempRootDir)) { - fs.rmSync(tempRootDir, { recursive: true, force: true }); + await fsp.rm(tempRootDir, { recursive: true, force: true }); } }); @@ -129,9 +112,9 @@ describe('ReadFileTool', () => { it('should return a shortened, relative path', () => { const filePath = path.join(tempRootDir, 'sub', 'dir', 'file.txt'); const params: ReadFileToolParams = { absolute_path: filePath }; - // Assuming tempRootDir is something like /tmp/read-file-tool-root-XXXXXX - // The relative path would be sub/dir/file.txt - expect(tool.getDescription(params)).toBe('sub/dir/file.txt'); + expect(tool.getDescription(params)).toBe( + path.join('sub', 'dir', 'file.txt'), + ); }); it('should return . if path is the root directory', () => { @@ -142,111 +125,140 @@ describe('ReadFileTool', () => { describe('execute', () => { it('should return validation error if params are invalid', async () => { - const params: ReadFileToolParams = { absolute_path: 'relative/path.txt' }; - const result = await tool.execute(params, abortSignal); - expect(result.llmContent).toBe( - 'Error: Invalid parameters provided. Reason: File path must be absolute, but was relative: relative/path.txt. You must provide an absolute path.', - ); - expect(result.returnDisplay).toBe( - 'File path must be absolute, but was relative: relative/path.txt. You must provide an absolute path.', - ); + const params: ReadFileToolParams = { + absolute_path: 'relative/path.txt', + }; + expect(await tool.execute(params, abortSignal)).toEqual({ + llmContent: + 'Error: Invalid parameters provided. Reason: File path must be absolute, but was relative: relative/path.txt. You must provide an absolute path.', + returnDisplay: + 'File path must be absolute, but was relative: relative/path.txt. You must provide an absolute path.', + }); }); - it('should return error from processSingleFileContent if it fails', async () => { - const filePath = path.join(tempRootDir, 'error.txt'); + it('should return error if file does not exist', async () => { + const filePath = path.join(tempRootDir, 'nonexistent.txt'); const params: ReadFileToolParams = { absolute_path: filePath }; - const errorMessage = 'Simulated read error'; - mockProcessSingleFileContent.mockResolvedValue({ - llmContent: `Error reading file ${filePath}: ${errorMessage}`, - returnDisplay: `Error reading file ${filePath}: ${errorMessage}`, - error: errorMessage, - }); - const result = await tool.execute(params, abortSignal); - expect(mockProcessSingleFileContent).toHaveBeenCalledWith( - filePath, - tempRootDir, - undefined, - undefined, - ); - expect(result.llmContent).toContain(errorMessage); - expect(result.returnDisplay).toContain(errorMessage); + expect(await tool.execute(params, abortSignal)).toEqual({ + llmContent: `File not found: ${filePath}`, + returnDisplay: 'File not found.', + }); }); it('should return success result for a text file', async () => { const filePath = path.join(tempRootDir, 'textfile.txt'); const fileContent = 'This is a test file.'; + await fsp.writeFile(filePath, fileContent, 'utf-8'); const params: ReadFileToolParams = { absolute_path: filePath }; - mockProcessSingleFileContent.mockResolvedValue({ - llmContent: fileContent, - returnDisplay: `Read text file: ${path.basename(filePath)}`, - }); - const result = await tool.execute(params, abortSignal); - expect(mockProcessSingleFileContent).toHaveBeenCalledWith( - filePath, - tempRootDir, - undefined, - undefined, - ); - expect(result.llmContent).toBe(fileContent); - expect(result.returnDisplay).toBe( - `Read text file: ${path.basename(filePath)}`, - ); + expect(await tool.execute(params, abortSignal)).toEqual({ + llmContent: fileContent, + returnDisplay: '', + }); }); it('should return success result for an image file', async () => { + // A minimal 1x1 transparent PNG file. + const pngContent = Buffer.from([ + 137, 80, 78, 71, 13, 10, 26, 10, 0, 0, 0, 13, 73, 72, 68, 82, 0, 0, 0, + 1, 0, 0, 0, 1, 8, 6, 0, 0, 0, 31, 21, 196, 137, 0, 0, 0, 10, 73, 68, 65, + 84, 120, 156, 99, 0, 1, 0, 0, 5, 0, 1, 13, 10, 45, 180, 0, 0, 0, 0, 73, + 69, 78, 68, 174, 66, 96, 130, + ]); const filePath = path.join(tempRootDir, 'image.png'); - const imageData = { - inlineData: { mimeType: 'image/png', data: 'base64...' }, - }; + await fsp.writeFile(filePath, pngContent); const params: ReadFileToolParams = { absolute_path: filePath }; - mockProcessSingleFileContent.mockResolvedValue({ - llmContent: imageData, - returnDisplay: `Read image file: ${path.basename(filePath)}`, - }); - const result = await tool.execute(params, abortSignal); - expect(mockProcessSingleFileContent).toHaveBeenCalledWith( - filePath, - tempRootDir, - undefined, - undefined, - ); - expect(result.llmContent).toEqual(imageData); - expect(result.returnDisplay).toBe( - `Read image file: ${path.basename(filePath)}`, - ); + expect(await tool.execute(params, abortSignal)).toEqual({ + llmContent: { + inlineData: { + mimeType: 'image/png', + data: pngContent.toString('base64'), + }, + }, + returnDisplay: `Read image file: image.png`, + }); }); - it('should pass offset and limit to processSingleFileContent', async () => { + it('should treat a non-image file with image extension as an image', async () => { + const filePath = path.join(tempRootDir, 'fake-image.png'); + const fileContent = 'This is not a real png.'; + await fsp.writeFile(filePath, fileContent, 'utf-8'); + const params: ReadFileToolParams = { absolute_path: filePath }; + + expect(await tool.execute(params, abortSignal)).toEqual({ + llmContent: { + inlineData: { + mimeType: 'image/png', + data: Buffer.from(fileContent).toString('base64'), + }, + }, + returnDisplay: `Read image file: fake-image.png`, + }); + }); + + it('should pass offset and limit to read a slice of a text file', async () => { const filePath = path.join(tempRootDir, 'paginated.txt'); + const fileContent = Array.from( + { length: 20 }, + (_, i) => `Line ${i + 1}`, + ).join('\n'); + await fsp.writeFile(filePath, fileContent, 'utf-8'); + const params: ReadFileToolParams = { absolute_path: filePath, - offset: 10, - limit: 5, + offset: 5, // Start from line 6 + limit: 3, }; - mockProcessSingleFileContent.mockResolvedValue({ - llmContent: 'some lines', - returnDisplay: 'Read text file (paginated)', - }); - await tool.execute(params, abortSignal); - expect(mockProcessSingleFileContent).toHaveBeenCalledWith( - filePath, - tempRootDir, - 10, - 5, - ); + expect(await tool.execute(params, abortSignal)).toEqual({ + llmContent: [ + '[File content truncated: showing lines 6-8 of 20 total lines. Use offset/limit parameters to view more.]', + 'Line 6', + 'Line 7', + 'Line 8', + ].join('\n'), + returnDisplay: '(truncated)', + }); }); - it('should return error if path is ignored by a .geminiignore pattern', async () => { - const params: ReadFileToolParams = { - absolute_path: path.join(tempRootDir, 'foo.bar'), - }; - const result = await tool.execute(params, abortSignal); - expect(result.returnDisplay).toContain('foo.bar'); - expect(result.returnDisplay).not.toContain('foo.baz'); + describe('with .geminiignore', () => { + beforeEach(async () => { + await fsp.writeFile( + path.join(tempRootDir, '.geminiignore'), + ['foo.*', 'ignored/'].join('\n'), + ); + }); + + it('should return error if path is ignored by a .geminiignore pattern', async () => { + const ignoredFilePath = path.join(tempRootDir, 'foo.bar'); + await fsp.writeFile(ignoredFilePath, 'content', 'utf-8'); + const params: ReadFileToolParams = { + absolute_path: ignoredFilePath, + }; + const expectedError = `File path '${ignoredFilePath}' is ignored by .geminiignore pattern(s).`; + expect(await tool.execute(params, abortSignal)).toEqual({ + llmContent: `Error: Invalid parameters provided. Reason: ${expectedError}`, + returnDisplay: expectedError, + }); + }); + + it('should return error if path is in an ignored directory', async () => { + const ignoredDirPath = path.join(tempRootDir, 'ignored'); + await fsp.mkdir(ignoredDirPath); + const filePath = path.join(ignoredDirPath, 'somefile.txt'); + await fsp.writeFile(filePath, 'content', 'utf-8'); + + const params: ReadFileToolParams = { + absolute_path: filePath, + }; + const expectedError = `File path '${filePath}' is ignored by .geminiignore pattern(s).`; + expect(await tool.execute(params, abortSignal)).toEqual({ + llmContent: `Error: Invalid parameters provided. Reason: ${expectedError}`, + returnDisplay: expectedError, + }); + }); }); }); }); diff --git a/packages/core/src/tools/read-file.ts b/packages/core/src/tools/read-file.ts index a2ff89c12..9ba806725 100644 --- a/packages/core/src/tools/read-file.ts +++ b/packages/core/src/tools/read-file.ts @@ -7,7 +7,7 @@ import path from 'path'; import { SchemaValidator } from '../utils/schemaValidator.js'; import { makeRelative, shortenPath } from '../utils/paths.js'; -import { BaseTool, ToolResult } from './tools.js'; +import { BaseTool, Icon, ToolLocation, ToolResult } from './tools.js'; import { Type } from '@google/genai'; import { isWithinRoot, @@ -51,6 +51,7 @@ export class ReadFileTool extends BaseTool { ReadFileTool.Name, 'ReadFile', 'Reads and returns the content of a specified file from the local filesystem. Handles text, images (PNG, JPG, GIF, WEBP, SVG, BMP), and PDF files. For text files, it can read specific line ranges.', + Icon.FileSearch, { properties: { absolute_path: { @@ -118,6 +119,10 @@ export class ReadFileTool extends BaseTool { return shortenPath(relativePath); } + toolLocations(params: ReadFileToolParams): ToolLocation[] { + return [{ path: params.absolute_path, line: params.offset }]; + } + async execute( params: ReadFileToolParams, _signal: AbortSignal, diff --git a/packages/core/src/tools/read-many-files.test.ts b/packages/core/src/tools/read-many-files.test.ts index 3bb824cdc..641aa7052 100644 --- a/packages/core/src/tools/read-many-files.test.ts +++ b/packages/core/src/tools/read-many-files.test.ts @@ -58,10 +58,13 @@ describe('ReadManyFilesTool', () => { const fileService = new FileDiscoveryService(tempRootDir); const mockConfig = { getFileService: () => fileService, - getFileFilteringRespectGitIgnore: () => true, + + getFileFilteringOptions: () => ({ + respectGitIgnore: true, + respectGeminiIgnore: true, + }), getTargetDir: () => tempRootDir, } as Partial as Config; - tool = new ReadManyFilesTool(mockConfig); mockReadFileFn = mockControl.mockReadFile; @@ -269,7 +272,7 @@ describe('ReadManyFilesTool', () => { ); }); - it('should handle non-existent specific files gracefully', async () => { + it('should handle nonexistent specific files gracefully', async () => { const params = { paths: ['nonexistent-file.txt'] }; const result = await tool.execute(params, new AbortController().signal); expect(result.llmContent).toEqual([ diff --git a/packages/core/src/tools/read-many-files.ts b/packages/core/src/tools/read-many-files.ts index c43841b59..94ec1a68a 100644 --- a/packages/core/src/tools/read-many-files.ts +++ b/packages/core/src/tools/read-many-files.ts @@ -4,7 +4,7 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { BaseTool, ToolResult } from './tools.js'; +import { BaseTool, Icon, ToolResult } from './tools.js'; import { SchemaValidator } from '../utils/schemaValidator.js'; import { getErrorMessage } from '../utils/errors.js'; import * as path from 'path'; @@ -17,7 +17,7 @@ import { getSpecificMimeType, } from '../utils/fileUtils.js'; import { PartListUnion, Schema, Type } from '@google/genai'; -import { Config } from '../config/config.js'; +import { Config, DEFAULT_FILE_FILTERING_OPTIONS } from '../config/config.js'; import { recordFileOperationMetric, FileOperation, @@ -62,9 +62,12 @@ export interface ReadManyFilesParams { useDefaultExcludes?: boolean; /** - * Optional. Whether to respect .gitignore patterns. Defaults to true. + * Whether to respect .gitignore and .geminiignore patterns (optional, defaults to true) */ - respect_git_ignore?: boolean; + file_filtering_options?: { + respect_git_ignore?: boolean; + respect_gemini_ignore?: boolean; + }; } /** @@ -125,8 +128,6 @@ export class ReadManyFilesTool extends BaseTool< > { static readonly Name: string = 'read_many_files'; - private readonly geminiIgnorePatterns: string[] = []; - constructor(private config: Config) { const parameterSchema: Schema = { type: Type.OBJECT, @@ -173,11 +174,22 @@ export class ReadManyFilesTool extends BaseTool< 'Optional. Whether to apply a list of default exclusion patterns (e.g., node_modules, .git, binary files). Defaults to true.', default: true, }, - respect_git_ignore: { - type: Type.BOOLEAN, + file_filtering_options: { description: - 'Optional. Whether to respect .gitignore patterns when discovering files. Only available in git repositories. Defaults to true.', - default: true, + 'Whether to respect ignore patterns from .gitignore or .geminiignore', + type: Type.OBJECT, + properties: { + respect_git_ignore: { + description: + 'Optional: Whether to respect .gitignore patterns when listing files. Only available in git repositories. Defaults to true.', + type: Type.BOOLEAN, + }, + respect_gemini_ignore: { + description: + 'Optional: Whether to respect .geminiignore patterns when listing files. Defaults to true.', + type: Type.BOOLEAN, + }, + }, }, }, required: ['paths'], @@ -196,11 +208,9 @@ This tool is useful when you need to understand or analyze a collection of files - When the user asks to "read all files in X directory" or "show me the content of all Y files". Use this tool when the user's query implies needing the content of several files simultaneously for context, analysis, or summarization. For text files, it uses default UTF-8 encoding and a '--- {filePath} ---' separator between file contents. Ensure paths are relative to the target directory. Glob patterns like 'src/**/*.js' are supported. Avoid using for single files if a more specific single-file reading tool is available, unless the user specifically requests to process a list containing just one file via this tool. Other binary files (not explicitly requested as image/PDF) are generally skipped. Default excludes apply to common non-text files (except for explicitly requested images/PDFs) and large dependency directories unless 'useDefaultExcludes' is false.`, + Icon.FileSearch, parameterSchema, ); - this.geminiIgnorePatterns = config - .getFileService() - .getGeminiIgnorePatterns(); } validateParams(params: ReadManyFilesParams): string | null { @@ -218,17 +228,19 @@ Use this tool when the user's query implies needing the content of several files // Determine the final list of exclusion patterns exactly as in execute method const paramExcludes = params.exclude || []; const paramUseDefaultExcludes = params.useDefaultExcludes !== false; - + const geminiIgnorePatterns = this.config + .getFileService() + .getGeminiIgnorePatterns(); const finalExclusionPatternsForDescription: string[] = paramUseDefaultExcludes - ? [...DEFAULT_EXCLUDES, ...paramExcludes, ...this.geminiIgnorePatterns] - : [...paramExcludes, ...this.geminiIgnorePatterns]; + ? [...DEFAULT_EXCLUDES, ...paramExcludes, ...geminiIgnorePatterns] + : [...paramExcludes, ...geminiIgnorePatterns]; let excludeDesc = `Excluding: ${finalExclusionPatternsForDescription.length > 0 ? `patterns like \`${finalExclusionPatternsForDescription.slice(0, 2).join('`, `')}${finalExclusionPatternsForDescription.length > 2 ? '...`' : '`'}` : 'none specified'}`; // Add a note if .geminiignore patterns contributed to the final list of exclusions - if (this.geminiIgnorePatterns.length > 0) { - const geminiPatternsInEffect = this.geminiIgnorePatterns.filter((p) => + if (geminiIgnorePatterns.length > 0) { + const geminiPatternsInEffect = geminiIgnorePatterns.filter((p) => finalExclusionPatternsForDescription.includes(p), ).length; if (geminiPatternsInEffect > 0) { @@ -256,12 +268,19 @@ Use this tool when the user's query implies needing the content of several files include = [], exclude = [], useDefaultExcludes = true, - respect_git_ignore = true, } = params; - const respectGitIgnore = - respect_git_ignore ?? this.config.getFileFilteringRespectGitIgnore(); + const defaultFileIgnores = + this.config.getFileFilteringOptions() ?? DEFAULT_FILE_FILTERING_OPTIONS; + const fileFilteringOptions = { + respectGitIgnore: + params.file_filtering_options?.respect_git_ignore ?? + defaultFileIgnores.respectGitIgnore, // Use the property from the returned object + respectGeminiIgnore: + params.file_filtering_options?.respect_gemini_ignore ?? + defaultFileIgnores.respectGeminiIgnore, // Use the property from the returned object + }; // Get centralized file discovery service const fileDiscovery = this.config.getFileService(); @@ -271,8 +290,8 @@ Use this tool when the user's query implies needing the content of several files const contentParts: PartListUnion = []; const effectiveExcludes = useDefaultExcludes - ? [...DEFAULT_EXCLUDES, ...exclude, ...this.geminiIgnorePatterns] - : [...exclude, ...this.geminiIgnorePatterns]; + ? [...DEFAULT_EXCLUDES, ...exclude] + : [...exclude]; const searchPatterns = [...inputPatterns, ...include]; if (searchPatterns.length === 0) { @@ -283,7 +302,8 @@ Use this tool when the user's query implies needing the content of several files } try { - const entries = await glob(searchPatterns, { + const patterns = searchPatterns.map((p) => p.replace(/\\/g, '/')); + const entries: string[] = await glob(patterns, { cwd: this.config.getTargetDir(), ignore: effectiveExcludes, nodir: true, @@ -291,20 +311,39 @@ Use this tool when the user's query implies needing the content of several files absolute: true, nocase: true, signal, + withFileTypes: false, }); - const filteredEntries = respectGitIgnore + const gitFilteredEntries = fileFilteringOptions.respectGitIgnore ? fileDiscovery .filterFiles( entries.map((p) => path.relative(this.config.getTargetDir(), p)), { - respectGitIgnore, + respectGitIgnore: true, + respectGeminiIgnore: false, }, ) .map((p) => path.resolve(this.config.getTargetDir(), p)) : entries; + // Apply gemini ignore filtering if enabled + const finalFilteredEntries = fileFilteringOptions.respectGeminiIgnore + ? fileDiscovery + .filterFiles( + gitFilteredEntries.map((p) => + path.relative(this.config.getTargetDir(), p), + ), + { + respectGitIgnore: false, + respectGeminiIgnore: true, + }, + ) + .map((p) => path.resolve(this.config.getTargetDir(), p)) + : gitFilteredEntries; + let gitIgnoredCount = 0; + let geminiIgnoredCount = 0; + for (const absoluteFilePath of entries) { // Security check: ensure the glob library didn't return something outside targetDir. if (!absoluteFilePath.startsWith(this.config.getTargetDir())) { @@ -316,11 +355,23 @@ Use this tool when the user's query implies needing the content of several files } // Check if this file was filtered out by git ignore - if (respectGitIgnore && !filteredEntries.includes(absoluteFilePath)) { + if ( + fileFilteringOptions.respectGitIgnore && + !gitFilteredEntries.includes(absoluteFilePath) + ) { gitIgnoredCount++; continue; } + // Check if this file was filtered out by gemini ignore + if ( + fileFilteringOptions.respectGeminiIgnore && + !finalFilteredEntries.includes(absoluteFilePath) + ) { + geminiIgnoredCount++; + continue; + } + filesToConsider.add(absoluteFilePath); } @@ -328,7 +379,15 @@ Use this tool when the user's query implies needing the content of several files if (gitIgnoredCount > 0) { skippedFiles.push({ path: `${gitIgnoredCount} file(s)`, - reason: 'ignored', + reason: 'git ignored', + }); + } + + // Add info about gemini-ignored files if any were filtered + if (geminiIgnoredCount > 0) { + skippedFiles.push({ + path: `${geminiIgnoredCount} file(s)`, + reason: 'gemini ignored', }); } } catch (error) { @@ -345,7 +404,7 @@ Use this tool when the user's query implies needing the content of several files .relative(this.config.getTargetDir(), filePath) .replace(/\\/g, '/'); - const fileType = detectFileType(filePath); + const fileType = await detectFileType(filePath); if (fileType === 'image' || fileType === 'pdf') { const fileExtension = path.extname(filePath).toLowerCase(); diff --git a/packages/core/src/tools/shell.test.ts b/packages/core/src/tools/shell.test.ts index a4d56e22c..553641974 100644 --- a/packages/core/src/tools/shell.test.ts +++ b/packages/core/src/tools/shell.test.ts @@ -4,429 +4,384 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { expect, describe, it, vi, beforeEach } from 'vitest'; +import { + vi, + describe, + it, + expect, + beforeEach, + afterEach, + type Mock, +} from 'vitest'; + +const mockShellExecutionService = vi.hoisted(() => vi.fn()); +vi.mock('../services/shellExecutionService.js', () => ({ + ShellExecutionService: { execute: mockShellExecutionService }, +})); +vi.mock('fs'); +vi.mock('os'); +vi.mock('crypto'); +vi.mock('../utils/summarizer.js'); + +import { isCommandAllowed } from '../utils/shell-utils.js'; import { ShellTool } from './shell.js'; -import { Config } from '../config/config.js'; +import { type Config } from '../config/config.js'; +import { + type ShellExecutionResult, + type ShellOutputEvent, +} from '../services/shellExecutionService.js'; +import * as fs from 'fs'; +import * as os from 'os'; +import * as path from 'path'; +import * as crypto from 'crypto'; import * as summarizer from '../utils/summarizer.js'; -import { GeminiClient } from '../core/client.js'; +import { ToolConfirmationOutcome } from './tools.js'; +import { OUTPUT_UPDATE_INTERVAL_MS } from './shell.js'; describe('ShellTool', () => { - it('should allow a command if no restrictions are provided', async () => { - const config = { - getCoreTools: () => undefined, - getExcludeTools: () => undefined, - } as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('ls -l'); - expect(result.allowed).toBe(true); - }); - - it('should allow a command if it is in the allowed list', async () => { - const config = { - getCoreTools: () => ['ShellTool(ls -l)'], - getExcludeTools: () => undefined, - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('ls -l'); - expect(result.allowed).toBe(true); - }); - - it('should block a command if it is not in the allowed list', async () => { - const config = { - getCoreTools: () => ['ShellTool(ls -l)'], - getExcludeTools: () => undefined, - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('rm -rf /'); - expect(result.allowed).toBe(false); - expect(result.reason).toBe( - "Command 'rm -rf /' is not in the allowed commands list", - ); - }); - - it('should block a command if it is in the blocked list', async () => { - const config = { - getCoreTools: () => undefined, - getExcludeTools: () => ['ShellTool(rm -rf /)'], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('rm -rf /'); - expect(result.allowed).toBe(false); - expect(result.reason).toBe( - "Command 'rm -rf /' is blocked by configuration", - ); - }); - - it('should allow a command if it is not in the blocked list', async () => { - const config = { - getCoreTools: () => undefined, - getExcludeTools: () => ['ShellTool(rm -rf /)'], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('ls -l'); - expect(result.allowed).toBe(true); - }); - - it('should block a command if it is in both the allowed and blocked lists', async () => { - const config = { - getCoreTools: () => ['ShellTool(rm -rf /)'], - getExcludeTools: () => ['ShellTool(rm -rf /)'], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('rm -rf /'); - expect(result.allowed).toBe(false); - expect(result.reason).toBe( - "Command 'rm -rf /' is blocked by configuration", - ); - }); - - it('should allow any command when ShellTool is in coreTools without specific commands', async () => { - const config = { - getCoreTools: () => ['ShellTool'], - getExcludeTools: () => [], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('any command'); - expect(result.allowed).toBe(true); - }); - - it('should block any command when ShellTool is in excludeTools without specific commands', async () => { - const config = { - getCoreTools: () => [], - getExcludeTools: () => ['ShellTool'], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('any command'); - expect(result.allowed).toBe(false); - expect(result.reason).toBe( - 'Shell tool is globally disabled in configuration', - ); - }); - - it('should allow a command if it is in the allowed list using the public-facing name', async () => { - const config = { - getCoreTools: () => ['run_shell_command(ls -l)'], - getExcludeTools: () => undefined, - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('ls -l'); - expect(result.allowed).toBe(true); - }); - - it('should block a command if it is in the blocked list using the public-facing name', async () => { - const config = { - getCoreTools: () => undefined, - getExcludeTools: () => ['run_shell_command(rm -rf /)'], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('rm -rf /'); - expect(result.allowed).toBe(false); - expect(result.reason).toBe( - "Command 'rm -rf /' is blocked by configuration", - ); - }); - - it('should block any command when ShellTool is in excludeTools using the public-facing name', async () => { - const config = { - getCoreTools: () => [], - getExcludeTools: () => ['run_shell_command'], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('any command'); - expect(result.allowed).toBe(false); - expect(result.reason).toBe( - 'Shell tool is globally disabled in configuration', - ); - }); - - it('should block any command if coreTools contains an empty ShellTool command list using the public-facing name', async () => { - const config = { - getCoreTools: () => ['run_shell_command()'], - getExcludeTools: () => [], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('any command'); - expect(result.allowed).toBe(false); - expect(result.reason).toBe( - "Command 'any command' is not in the allowed commands list", - ); - }); - - it('should block any command if coreTools contains an empty ShellTool command list', async () => { - const config = { - getCoreTools: () => ['ShellTool()'], - getExcludeTools: () => [], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('any command'); - expect(result.allowed).toBe(false); - expect(result.reason).toBe( - "Command 'any command' is not in the allowed commands list", - ); - }); - - it('should block a command with extra whitespace if it is in the blocked list', async () => { - const config = { - getCoreTools: () => undefined, - getExcludeTools: () => ['ShellTool(rm -rf /)'], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed(' rm -rf / '); - expect(result.allowed).toBe(false); - expect(result.reason).toBe( - "Command 'rm -rf /' is blocked by configuration", - ); - }); - - it('should allow any command when ShellTool is present with specific commands', async () => { - const config = { - getCoreTools: () => ['ShellTool', 'ShellTool(ls)'], - getExcludeTools: () => [], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('any command'); - expect(result.allowed).toBe(true); - }); - - it('should block a command on the blocklist even with a wildcard allow', async () => { - const config = { - getCoreTools: () => ['ShellTool'], - getExcludeTools: () => ['ShellTool(rm -rf /)'], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('rm -rf /'); - expect(result.allowed).toBe(false); - expect(result.reason).toBe( - "Command 'rm -rf /' is blocked by configuration", - ); - }); - - it('should allow a command that starts with an allowed command prefix', async () => { - const config = { - getCoreTools: () => ['ShellTool(gh issue edit)'], - getExcludeTools: () => [], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed( - 'gh issue edit 1 --add-label "kind/feature"', - ); - expect(result.allowed).toBe(true); - }); - - it('should allow a command that starts with an allowed command prefix using the public-facing name', async () => { - const config = { - getCoreTools: () => ['run_shell_command(gh issue edit)'], - getExcludeTools: () => [], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed( - 'gh issue edit 1 --add-label "kind/feature"', - ); - expect(result.allowed).toBe(true); - }); - - it('should not allow a command that starts with an allowed command prefix but is chained with another command', async () => { - const config = { - getCoreTools: () => ['run_shell_command(gh issue edit)'], - getExcludeTools: () => [], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('gh issue edit&&rm -rf /'); - expect(result.allowed).toBe(false); - expect(result.reason).toBe( - "Command 'rm -rf /' is not in the allowed commands list", - ); - }); - - it('should not allow a command that is a prefix of an allowed command', async () => { - const config = { - getCoreTools: () => ['run_shell_command(gh issue edit)'], - getExcludeTools: () => [], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('gh issue'); - expect(result.allowed).toBe(false); - expect(result.reason).toBe( - "Command 'gh issue' is not in the allowed commands list", - ); - }); - - it('should not allow a command that is a prefix of a blocked command', async () => { - const config = { - getCoreTools: () => [], - getExcludeTools: () => ['run_shell_command(gh issue edit)'], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('gh issue'); - expect(result.allowed).toBe(true); - }); - - it('should not allow a command that is chained with a pipe', async () => { - const config = { - getCoreTools: () => ['run_shell_command(gh issue list)'], - getExcludeTools: () => [], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('gh issue list | rm -rf /'); - expect(result.allowed).toBe(false); - expect(result.reason).toBe( - "Command 'rm -rf /' is not in the allowed commands list", - ); - }); - - it('should not allow a command that is chained with a semicolon', async () => { - const config = { - getCoreTools: () => ['run_shell_command(gh issue list)'], - getExcludeTools: () => [], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('gh issue list; rm -rf /'); - expect(result.allowed).toBe(false); - expect(result.reason).toBe( - "Command 'rm -rf /' is not in the allowed commands list", - ); - }); - - it('should block a chained command if any part is blocked', async () => { - const config = { - getCoreTools: () => ['run_shell_command(echo "hello")'], - getExcludeTools: () => ['run_shell_command(rm)'], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('echo "hello" && rm -rf /'); - expect(result.allowed).toBe(false); - expect(result.reason).toBe( - "Command 'rm -rf /' is blocked by configuration", - ); - }); - - it('should block a command if its prefix is on the blocklist, even if the command itself is on the allowlist', async () => { - const config = { - getCoreTools: () => ['run_shell_command(git push)'], - getExcludeTools: () => ['run_shell_command(git)'], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('git push'); - expect(result.allowed).toBe(false); - expect(result.reason).toBe( - "Command 'git push' is blocked by configuration", - ); - }); - - it('should be case-sensitive in its matching', async () => { - const config = { - getCoreTools: () => ['run_shell_command(echo)'], - getExcludeTools: () => [], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('ECHO "hello"'); - expect(result.allowed).toBe(false); - expect(result.reason).toBe( - 'Command \'ECHO "hello"\' is not in the allowed commands list', - ); - }); - - it('should correctly handle commands with extra whitespace around chaining operators', async () => { - const config = { - getCoreTools: () => ['run_shell_command(ls -l)'], - getExcludeTools: () => ['run_shell_command(rm)'], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('ls -l ; rm -rf /'); - expect(result.allowed).toBe(false); - expect(result.reason).toBe( - "Command 'rm -rf /' is blocked by configuration", - ); - }); - - it('should allow a chained command if all parts are allowed', async () => { - const config = { - getCoreTools: () => [ - 'run_shell_command(echo)', - 'run_shell_command(ls -l)', - ], - getExcludeTools: () => [], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('echo "hello" && ls -l'); - expect(result.allowed).toBe(true); - }); - - it('should allow a command with command substitution using backticks', async () => { - const config = { - getCoreTools: () => ['run_shell_command(echo)'], - getExcludeTools: () => [], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('echo `rm -rf /`'); - expect(result.allowed).toBe(true); - }); - - it('should block a command with command substitution using $()', async () => { - const config = { - getCoreTools: () => ['run_shell_command(echo)'], - getExcludeTools: () => [], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('echo $(rm -rf /)'); - expect(result.allowed).toBe(false); - expect(result.reason).toBe( - 'Command substitution using $() is not allowed for security reasons', - ); - }); - - it('should allow a command with I/O redirection', async () => { - const config = { - getCoreTools: () => ['run_shell_command(echo)'], - getExcludeTools: () => [], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('echo "hello" > file.txt'); - expect(result.allowed).toBe(true); - }); - - it('should not allow a command that is chained with a double pipe', async () => { - const config = { - getCoreTools: () => ['run_shell_command(gh issue list)'], - getExcludeTools: () => [], - } as unknown as Config; - const shellTool = new ShellTool(config); - const result = shellTool.isCommandAllowed('gh issue list || rm -rf /'); - expect(result.allowed).toBe(false); - expect(result.reason).toBe( - "Command 'rm -rf /' is not in the allowed commands list", - ); - }); -}); - -describe('ShellTool Bug Reproduction', () => { let shellTool: ShellTool; - let config: Config; + let mockConfig: Config; + let mockShellOutputCallback: (event: ShellOutputEvent) => void; + let resolveExecutionPromise: (result: ShellExecutionResult) => void; beforeEach(() => { - config = { - getCoreTools: () => undefined, - getExcludeTools: () => undefined, - getDebugMode: () => false, - getGeminiClient: () => ({}) as GeminiClient, - getTargetDir: () => '.', + vi.clearAllMocks(); + + mockConfig = { + getCoreTools: vi.fn().mockReturnValue([]), + getExcludeTools: vi.fn().mockReturnValue([]), + getDebugMode: vi.fn().mockReturnValue(false), + getTargetDir: vi.fn().mockReturnValue('/test/dir'), + getSummarizeToolOutputConfig: vi.fn().mockReturnValue(undefined), + getGeminiClient: vi.fn(), } as unknown as Config; - shellTool = new ShellTool(config); - }); - it('should not let the summarizer override the return display', async () => { - const summarizeSpy = vi - .spyOn(summarizer, 'summarizeToolOutput') - .mockResolvedValue('summarized output'); + shellTool = new ShellTool(mockConfig); - const abortSignal = new AbortController().signal; - const result = await shellTool.execute( - { command: 'echo "hello"' }, - abortSignal, + vi.mocked(os.platform).mockReturnValue('linux'); + vi.mocked(os.tmpdir).mockReturnValue('/tmp'); + (vi.mocked(crypto.randomBytes) as Mock).mockReturnValue( + Buffer.from('abcdef', 'hex'), ); - expect(result.returnDisplay).toBe('hello\n'); - expect(result.llmContent).toBe('summarized output'); - expect(summarizeSpy).toHaveBeenCalled(); + // Capture the output callback to simulate streaming events from the service + mockShellExecutionService.mockImplementation((_cmd, _cwd, callback) => { + mockShellOutputCallback = callback; + return { + pid: 12345, + result: new Promise((resolve) => { + resolveExecutionPromise = resolve; + }), + }; + }); + }); + + describe('isCommandAllowed', () => { + it('should allow a command if no restrictions are provided', () => { + (mockConfig.getCoreTools as Mock).mockReturnValue(undefined); + (mockConfig.getExcludeTools as Mock).mockReturnValue(undefined); + expect(isCommandAllowed('ls -l', mockConfig).allowed).toBe(true); + }); + + it('should block a command with command substitution using $()', () => { + expect(isCommandAllowed('echo $(rm -rf /)', mockConfig).allowed).toBe( + false, + ); + }); + }); + + describe('validateToolParams', () => { + it('should return null for a valid command', () => { + expect(shellTool.validateToolParams({ command: 'ls -l' })).toBeNull(); + }); + + it('should return an error for an empty command', () => { + expect(shellTool.validateToolParams({ command: ' ' })).toBe( + 'Command cannot be empty.', + ); + }); + + it('should return an error for a non-existent directory', () => { + vi.mocked(fs.existsSync).mockReturnValue(false); + expect( + shellTool.validateToolParams({ command: 'ls', directory: 'rel/path' }), + ).toBe('Directory must exist.'); + }); + }); + + describe('execute', () => { + const mockAbortSignal = new AbortController().signal; + + const resolveShellExecution = ( + result: Partial = {}, + ) => { + const fullResult: ShellExecutionResult = { + rawOutput: Buffer.from(result.output || ''), + output: 'Success', + stdout: 'Success', + stderr: '', + exitCode: 0, + signal: null, + error: null, + aborted: false, + pid: 12345, + ...result, + }; + resolveExecutionPromise(fullResult); + }; + + it('should wrap command on linux and parse pgrep output', async () => { + const promise = shellTool.execute( + { command: 'my-command &' }, + mockAbortSignal, + ); + resolveShellExecution({ pid: 54321 }); + + vi.mocked(fs.existsSync).mockReturnValue(true); + vi.mocked(fs.readFileSync).mockReturnValue('54321\n54322\n'); // Service PID and background PID + + const result = await promise; + + const tmpFile = path.join(os.tmpdir(), 'shell_pgrep_abcdef.tmp'); + const wrappedCommand = `{ my-command & }; __code=$?; pgrep -g 0 >${tmpFile} 2>&1; exit $__code;`; + expect(mockShellExecutionService).toHaveBeenCalledWith( + wrappedCommand, + expect.any(String), + expect.any(Function), + mockAbortSignal, + ); + expect(result.llmContent).toContain('Background PIDs: 54322'); + expect(vi.mocked(fs.unlinkSync)).toHaveBeenCalledWith(tmpFile); + }); + + it('should not wrap command on windows', async () => { + vi.mocked(os.platform).mockReturnValue('win32'); + const promise = shellTool.execute({ command: 'dir' }, mockAbortSignal); + resolveExecutionPromise({ + rawOutput: Buffer.from(''), + output: '', + stdout: '', + stderr: '', + exitCode: 0, + signal: null, + error: null, + aborted: false, + pid: 12345, + }); + await promise; + expect(mockShellExecutionService).toHaveBeenCalledWith( + 'dir', + expect.any(String), + expect.any(Function), + mockAbortSignal, + ); + }); + + it('should format error messages correctly', async () => { + const error = new Error('wrapped command failed'); + const promise = shellTool.execute( + { command: 'user-command' }, + mockAbortSignal, + ); + resolveShellExecution({ + error, + exitCode: 1, + output: 'err', + stderr: 'err', + rawOutput: Buffer.from('err'), + stdout: '', + signal: null, + aborted: false, + pid: 12345, + }); + + const result = await promise; + // The final llmContent should contain the user's command, not the wrapper + expect(result.llmContent).toContain('Error: wrapped command failed'); + expect(result.llmContent).not.toContain('pgrep'); + }); + + it('should summarize output when configured', async () => { + (mockConfig.getSummarizeToolOutputConfig as Mock).mockReturnValue({ + [shellTool.name]: { tokenBudget: 1000 }, + }); + vi.mocked(summarizer.summarizeToolOutput).mockResolvedValue( + 'summarized output', + ); + + const promise = shellTool.execute({ command: 'ls' }, mockAbortSignal); + resolveExecutionPromise({ + output: 'long output', + rawOutput: Buffer.from('long output'), + stdout: 'long output', + stderr: '', + exitCode: 0, + signal: null, + error: null, + aborted: false, + pid: 12345, + }); + + const result = await promise; + + expect(summarizer.summarizeToolOutput).toHaveBeenCalledWith( + expect.any(String), + mockConfig.getGeminiClient(), + mockAbortSignal, + 1000, + ); + expect(result.llmContent).toBe('summarized output'); + expect(result.returnDisplay).toBe('long output'); + }); + + it('should clean up the temp file on synchronous execution error', async () => { + const error = new Error('sync spawn error'); + mockShellExecutionService.mockImplementation(() => { + throw error; + }); + vi.mocked(fs.existsSync).mockReturnValue(true); // Pretend the file exists + + await expect( + shellTool.execute({ command: 'a-command' }, mockAbortSignal), + ).rejects.toThrow(error); + + const tmpFile = path.join(os.tmpdir(), 'shell_pgrep_abcdef.tmp'); + expect(vi.mocked(fs.unlinkSync)).toHaveBeenCalledWith(tmpFile); + }); + + describe('Streaming to `updateOutput`', () => { + let updateOutputMock: Mock; + beforeEach(() => { + vi.useFakeTimers({ toFake: ['Date'] }); + updateOutputMock = vi.fn(); + }); + afterEach(() => { + vi.useRealTimers(); + }); + + it('should throttle text output updates', async () => { + const promise = shellTool.execute( + { command: 'stream' }, + mockAbortSignal, + updateOutputMock, + ); + + // First chunk, should be throttled. + mockShellOutputCallback({ + type: 'data', + stream: 'stdout', + chunk: 'hello ', + }); + expect(updateOutputMock).not.toHaveBeenCalled(); + + // Advance time past the throttle interval. + await vi.advanceTimersByTimeAsync(OUTPUT_UPDATE_INTERVAL_MS + 1); + + // Send a second chunk. THIS event triggers the update with the CUMULATIVE content. + mockShellOutputCallback({ + type: 'data', + stream: 'stderr', + chunk: 'world', + }); + + // It should have been called once now with the combined output. + expect(updateOutputMock).toHaveBeenCalledOnce(); + expect(updateOutputMock).toHaveBeenCalledWith('hello \nworld'); + + resolveExecutionPromise({ + rawOutput: Buffer.from(''), + output: '', + stdout: '', + stderr: '', + exitCode: 0, + signal: null, + error: null, + aborted: false, + pid: 12345, + }); + await promise; + }); + + it('should immediately show binary detection message and throttle progress', async () => { + const promise = shellTool.execute( + { command: 'cat img' }, + mockAbortSignal, + updateOutputMock, + ); + + mockShellOutputCallback({ type: 'binary_detected' }); + expect(updateOutputMock).toHaveBeenCalledOnce(); + expect(updateOutputMock).toHaveBeenCalledWith( + '[Binary output detected. Halting stream...]', + ); + + mockShellOutputCallback({ + type: 'binary_progress', + bytesReceived: 1024, + }); + expect(updateOutputMock).toHaveBeenCalledOnce(); + + // Advance time past the throttle interval. + await vi.advanceTimersByTimeAsync(OUTPUT_UPDATE_INTERVAL_MS + 1); + + // Send a SECOND progress event. This one will trigger the flush. + mockShellOutputCallback({ + type: 'binary_progress', + bytesReceived: 2048, + }); + + // Now it should be called a second time with the latest progress. + expect(updateOutputMock).toHaveBeenCalledTimes(2); + expect(updateOutputMock).toHaveBeenLastCalledWith( + '[Receiving binary output... 2.0 KB received]', + ); + + resolveExecutionPromise({ + rawOutput: Buffer.from(''), + output: '', + stdout: '', + stderr: '', + exitCode: 0, + signal: null, + error: null, + aborted: false, + pid: 12345, + }); + await promise; + }); + }); + }); + + describe('shouldConfirmExecute', () => { + it('should request confirmation for a new command and whitelist it on "Always"', async () => { + const params = { command: 'npm install' }; + const confirmation = await shellTool.shouldConfirmExecute( + params, + new AbortController().signal, + ); + + expect(confirmation).not.toBe(false); + expect(confirmation && confirmation.type).toBe('exec'); + + // eslint-disable-next-line @typescript-eslint/no-explicit-any + await (confirmation as any).onConfirm( + ToolConfirmationOutcome.ProceedAlways, + ); + + // Should now be whitelisted + const secondConfirmation = await shellTool.shouldConfirmExecute( + { command: 'npm test' }, + new AbortController().signal, + ); + expect(secondConfirmation).toBe(false); + }); + + it('should skip confirmation if validation fails', async () => { + const confirmation = await shellTool.shouldConfirmExecute( + { command: '' }, + new AbortController().signal, + ); + expect(confirmation).toBe(false); + }); }); }); diff --git a/packages/core/src/tools/shell.ts b/packages/core/src/tools/shell.ts index 7e79c7177..02fcbb7f1 100644 --- a/packages/core/src/tools/shell.ts +++ b/packages/core/src/tools/shell.ts @@ -15,25 +15,34 @@ import { ToolCallConfirmationDetails, ToolExecuteConfirmationDetails, ToolConfirmationOutcome, + Icon, } from './tools.js'; import { Type } from '@google/genai'; import { SchemaValidator } from '../utils/schemaValidator.js'; import { getErrorMessage } from '../utils/errors.js'; -import stripAnsi from 'strip-ansi'; +import { summarizeToolOutput } from '../utils/summarizer.js'; +import { + ShellExecutionService, + ShellOutputEvent, +} from '../services/shellExecutionService.js'; +import { formatMemoryUsage } from '../utils/formatters.js'; +import { + getCommandRoots, + isCommandAllowed, + stripShellWrapper, +} from '../utils/shell-utils.js'; + +export const OUTPUT_UPDATE_INTERVAL_MS = 1000; export interface ShellToolParams { command: string; description?: string; directory?: string; } -import { spawn } from 'child_process'; -import { summarizeToolOutput } from '../utils/summarizer.js'; - -const OUTPUT_UPDATE_INTERVAL_MS = 1000; export class ShellTool extends BaseTool { static Name: string = 'run_shell_command'; - private whitelist: Set = new Set(); + private allowlist: Set = new Set(); constructor(private readonly config: Config) { super( @@ -41,17 +50,18 @@ export class ShellTool extends BaseTool { 'Shell', `This tool executes a given shell command as \`bash -c \`. Command can start background processes using \`&\`. Command is executed as a subprocess that leads its own process group. Command process group can be terminated as \`kill -- -PGID\` or signaled as \`kill -s SIGNAL -- -PGID\`. -The following information is returned: + The following information is returned: -Command: Executed command. -Directory: Directory (relative to project root) where command was executed, or \`(root)\`. -Stdout: Output on stdout stream. Can be \`(empty)\` or partial on error and for any unwaited background processes. -Stderr: Output on stderr stream. Can be \`(empty)\` or partial on error and for any unwaited background processes. -Error: Error or \`(none)\` if no error was reported for the subprocess. -Exit Code: Exit code or \`(none)\` if terminated by signal. -Signal: Signal number or \`(none)\` if no signal was received. -Background PIDs: List of background processes started or \`(none)\`. -Process Group PGID: Process group started or \`(none)\``, + Command: Executed command. + Directory: Directory (relative to project root) where command was executed, or \`(root)\`. + Stdout: Output on stdout stream. Can be \`(empty)\` or partial on error and for any unwaited background processes. + Stderr: Output on stderr stream. Can be \`(empty)\` or partial on error and for any unwaited background processes. + Error: Error or \`(none)\` if no error was reported for the subprocess. + Exit Code: Exit code or \`(none)\` if terminated by signal. + Signal: Signal number or \`(none)\` if no signal was received. + Background PIDs: List of background processes started or \`(none)\`. + Process Group PGID: Process group started or \`(none)\``, + Icon.Terminal, { type: Type.OBJECT, properties: { @@ -91,131 +101,8 @@ Process Group PGID: Process group started or \`(none)\``, return description; } - /** - * Extracts the root command from a given shell command string. - * This is used to identify the base command for permission checks. - * - * @param command The shell command string to parse - * @returns The root command name, or undefined if it cannot be determined - * @example getCommandRoot("ls -la /tmp") returns "ls" - * @example getCommandRoot("git status && npm test") returns "git" - */ - getCommandRoot(command: string): string | undefined { - return command - .trim() // remove leading and trailing whitespace - .replace(/[{}()]/g, '') // remove all grouping operators - .split(/[\s;&|]+/)[0] // split on any whitespace or separator or chaining operators and take first part - ?.split(/[/\\]/) // split on any path separators (or return undefined if previous line was undefined) - .pop(); // take last part and return command root (or undefined if previous line was empty) - } - - /** - * Determines whether a given shell command is allowed to execute based on - * the tool's configuration including allowlists and blocklists. - * - * @param command The shell command string to validate - * @returns An object with 'allowed' boolean and optional 'reason' string if not allowed - */ - isCommandAllowed(command: string): { allowed: boolean; reason?: string } { - // 0. Disallow command substitution - if (command.includes('$(')) { - return { - allowed: false, - reason: - 'Command substitution using $() is not allowed for security reasons', - }; - } - - const SHELL_TOOL_NAMES = [ShellTool.name, ShellTool.Name]; - - const normalize = (cmd: string): string => cmd.trim().replace(/\s+/g, ' '); - - /** - * Checks if a command string starts with a given prefix, ensuring it's a - * whole word match (i.e., followed by a space or it's an exact match). - * e.g., `isPrefixedBy('npm install', 'npm')` -> true - * e.g., `isPrefixedBy('npm', 'npm')` -> true - * e.g., `isPrefixedBy('npminstall', 'npm')` -> false - */ - const isPrefixedBy = (cmd: string, prefix: string): boolean => { - if (!cmd.startsWith(prefix)) { - return false; - } - return cmd.length === prefix.length || cmd[prefix.length] === ' '; - }; - - /** - * Extracts and normalizes shell commands from a list of tool strings. - * e.g., 'ShellTool("ls -l")' becomes 'ls -l' - */ - const extractCommands = (tools: string[]): string[] => - tools.flatMap((tool) => { - for (const toolName of SHELL_TOOL_NAMES) { - if (tool.startsWith(`${toolName}(`) && tool.endsWith(')')) { - return [normalize(tool.slice(toolName.length + 1, -1))]; - } - } - return []; - }); - - const coreTools = this.config.getCoreTools() || []; - const excludeTools = this.config.getExcludeTools() || []; - - // 1. Check if the shell tool is globally disabled. - if (SHELL_TOOL_NAMES.some((name) => excludeTools.includes(name))) { - return { - allowed: false, - reason: 'Shell tool is globally disabled in configuration', - }; - } - - const blockedCommands = new Set(extractCommands(excludeTools)); - const allowedCommands = new Set(extractCommands(coreTools)); - - const hasSpecificAllowedCommands = allowedCommands.size > 0; - const isWildcardAllowed = SHELL_TOOL_NAMES.some((name) => - coreTools.includes(name), - ); - - const commandsToValidate = command.split(/&&|\|\||\||;/).map(normalize); - - const blockedCommandsArr = [...blockedCommands]; - - for (const cmd of commandsToValidate) { - // 2. Check if the command is on the blocklist. - const isBlocked = blockedCommandsArr.some((blocked) => - isPrefixedBy(cmd, blocked), - ); - if (isBlocked) { - return { - allowed: false, - reason: `Command '${cmd}' is blocked by configuration`, - }; - } - - // 3. If in strict allow-list mode, check if the command is permitted. - const isStrictAllowlist = - hasSpecificAllowedCommands && !isWildcardAllowed; - const allowedCommandsArr = [...allowedCommands]; - if (isStrictAllowlist) { - const isAllowed = allowedCommandsArr.some((allowed) => - isPrefixedBy(cmd, allowed), - ); - if (!isAllowed) { - return { - allowed: false, - reason: `Command '${cmd}' is not in the allowed commands list`, - }; - } - } - } - - // 4. If all checks pass, the command is allowed. - return { allowed: true }; - } - validateToolParams(params: ShellToolParams): string | null { - const commandCheck = this.isCommandAllowed(params.command); + const commandCheck = isCommandAllowed(params.command, this.config); if (!commandCheck.allowed) { if (!commandCheck.reason) { console.error( @@ -232,7 +119,7 @@ Process Group PGID: Process group started or \`(none)\``, if (!params.command.trim()) { return 'Command cannot be empty.'; } - if (!this.getCommandRoot(params.command)) { + if (getCommandRoots(params.command).length === 0) { return 'Could not identify command root to obtain permission from user.'; } if (params.directory) { @@ -257,18 +144,25 @@ Process Group PGID: Process group started or \`(none)\``, if (this.validateToolParams(params)) { return false; // skip confirmation, execute call will fail immediately } - const rootCommand = this.getCommandRoot(params.command)!; // must be non-empty string post-validation - if (this.whitelist.has(rootCommand)) { + + const command = stripShellWrapper(params.command); + const rootCommands = [...new Set(getCommandRoots(command))]; + const commandsToConfirm = rootCommands.filter( + (command) => !this.allowlist.has(command), + ); + + if (commandsToConfirm.length === 0) { return false; // already approved and whitelisted } + const confirmationDetails: ToolExecuteConfirmationDetails = { type: 'exec', title: 'Confirm Shell Command', command: params.command, - rootCommand, + rootCommand: commandsToConfirm.join(', '), onConfirm: async (outcome: ToolConfirmationOutcome) => { if (outcome === ToolConfirmationOutcome.ProceedAlways) { - this.whitelist.add(rootCommand); + commandsToConfirm.forEach((command) => this.allowlist.add(command)); } }, }; @@ -277,21 +171,22 @@ Process Group PGID: Process group started or \`(none)\``, async execute( params: ShellToolParams, - abortSignal: AbortSignal, - updateOutput?: (chunk: string) => void, + signal: AbortSignal, + updateOutput?: (output: string) => void, ): Promise { - const validationError = this.validateToolParams(params); + const strippedCommand = stripShellWrapper(params.command); + const validationError = this.validateToolParams({ + ...params, + command: strippedCommand, + }); if (validationError) { return { - llmContent: [ - `Command rejected: ${params.command}`, - `Reason: ${validationError}`, - ].join('\n'), - returnDisplay: `Error: ${validationError}`, + llmContent: validationError, + returnDisplay: validationError, }; } - if (abortSignal.aborted) { + if (signal.aborted) { return { llmContent: 'Command was cancelled by user before it could start.', returnDisplay: 'Command cancelled by user.', @@ -304,200 +199,182 @@ Process Group PGID: Process group started or \`(none)\``, .toString('hex')}.tmp`; const tempFilePath = path.join(os.tmpdir(), tempFileName); - // pgrep is not available on Windows, so we can't get background PIDs - const command = isWindows - ? params.command - : (() => { - // wrap command to append subprocess pids (via pgrep) to temporary file - let command = params.command.trim(); - if (!command.endsWith('&')) command += ';'; - return `{ ${command} }; __code=$?; pgrep -g 0 >${tempFilePath} 2>&1; exit $__code;`; - })(); - - // spawn command in specified directory (or project root if not specified) - const shell = isWindows - ? spawn('cmd.exe', ['/c', command], { - stdio: ['ignore', 'pipe', 'pipe'], - // detached: true, // ensure subprocess starts its own process group (esp. in Linux) - cwd: path.resolve(this.config.getTargetDir(), params.directory || ''), - }) - : spawn('bash', ['-c', command], { - stdio: ['ignore', 'pipe', 'pipe'], - detached: true, // ensure subprocess starts its own process group (esp. in Linux) - cwd: path.resolve(this.config.getTargetDir(), params.directory || ''), - }); - - let exited = false; - let stdout = ''; - let output = ''; - let lastUpdateTime = Date.now(); - - const appendOutput = (str: string) => { - output += str; - if ( - updateOutput && - Date.now() - lastUpdateTime > OUTPUT_UPDATE_INTERVAL_MS - ) { - updateOutput(output); - lastUpdateTime = Date.now(); - } - }; - - shell.stdout.on('data', (data: Buffer) => { - // continue to consume post-exit for background processes - // removing listeners can overflow OS buffer and block subprocesses - // destroying (e.g. shell.stdout.destroy()) can terminate subprocesses via SIGPIPE - if (!exited) { - const str = stripAnsi(data.toString()); - stdout += str; - appendOutput(str); - } - }); - - let stderr = ''; - shell.stderr.on('data', (data: Buffer) => { - if (!exited) { - const str = stripAnsi(data.toString()); - stderr += str; - appendOutput(str); - } - }); - - let error: Error | null = null; - shell.on('error', (err: Error) => { - error = err; - // remove wrapper from user's command in error message - error.message = error.message.replace(command, params.command); - }); - - let code: number | null = null; - let processSignal: NodeJS.Signals | null = null; - const exitHandler = ( - _code: number | null, - _signal: NodeJS.Signals | null, - ) => { - exited = true; - code = _code; - processSignal = _signal; - }; - shell.on('exit', exitHandler); - - const abortHandler = async () => { - if (shell.pid && !exited) { - if (os.platform() === 'win32') { - // For Windows, use taskkill to kill the process tree - spawn('taskkill', ['/pid', shell.pid.toString(), '/f', '/t']); - } else { - try { - // attempt to SIGTERM process group (negative PID) - // fall back to SIGKILL (to group) after 200ms - process.kill(-shell.pid, 'SIGTERM'); - await new Promise((resolve) => setTimeout(resolve, 200)); - if (shell.pid && !exited) { - process.kill(-shell.pid, 'SIGKILL'); - } - } catch (_e) { - // if group kill fails, fall back to killing just the main process - try { - if (shell.pid) { - shell.kill('SIGKILL'); - } - } catch (_e) { - console.error(`failed to kill shell process ${shell.pid}: ${_e}`); - } - } - } - } - }; - abortSignal.addEventListener('abort', abortHandler); - - // wait for the shell to exit try { - await new Promise((resolve) => shell.on('exit', resolve)); + // pgrep is not available on Windows, so we can't get background PIDs + const commandToExecute = isWindows + ? strippedCommand + : (() => { + // wrap command to append subprocess pids (via pgrep) to temporary file + let command = strippedCommand.trim(); + if (!command.endsWith('&')) command += ';'; + return `{ ${command} }; __code=$?; pgrep -g 0 >${tempFilePath} 2>&1; exit $__code;`; + })(); + + const cwd = path.resolve( + this.config.getTargetDir(), + params.directory || '', + ); + + let cumulativeStdout = ''; + let cumulativeStderr = ''; + + let lastUpdateTime = Date.now(); + let isBinaryStream = false; + + const { result: resultPromise } = ShellExecutionService.execute( + commandToExecute, + cwd, + (event: ShellOutputEvent) => { + if (!updateOutput) { + return; + } + + let currentDisplayOutput = ''; + let shouldUpdate = false; + + switch (event.type) { + case 'data': + if (isBinaryStream) break; // Don't process text if we are in binary mode + if (event.stream === 'stdout') { + cumulativeStdout += event.chunk; + } else { + cumulativeStderr += event.chunk; + } + currentDisplayOutput = + cumulativeStdout + + (cumulativeStderr ? `\n${cumulativeStderr}` : ''); + if (Date.now() - lastUpdateTime > OUTPUT_UPDATE_INTERVAL_MS) { + shouldUpdate = true; + } + break; + case 'binary_detected': + isBinaryStream = true; + currentDisplayOutput = + '[Binary output detected. Halting stream...]'; + shouldUpdate = true; + break; + case 'binary_progress': + isBinaryStream = true; + currentDisplayOutput = `[Receiving binary output... ${formatMemoryUsage( + event.bytesReceived, + )} received]`; + if (Date.now() - lastUpdateTime > OUTPUT_UPDATE_INTERVAL_MS) { + shouldUpdate = true; + } + break; + default: { + throw new Error('An unhandled ShellOutputEvent was found.'); + } + } + + if (shouldUpdate) { + updateOutput(currentDisplayOutput); + lastUpdateTime = Date.now(); + } + }, + signal, + ); + + const result = await resultPromise; + + const backgroundPIDs: number[] = []; + if (os.platform() !== 'win32') { + if (fs.existsSync(tempFilePath)) { + const pgrepLines = fs + .readFileSync(tempFilePath, 'utf8') + .split('\n') + .filter(Boolean); + for (const line of pgrepLines) { + if (!/^\d+$/.test(line)) { + console.error(`pgrep: ${line}`); + } + const pid = Number(line); + if (pid !== result.pid) { + backgroundPIDs.push(pid); + } + } + } else { + if (!signal.aborted) { + console.error('missing pgrep output'); + } + } + } + + let llmContent = ''; + if (result.aborted) { + llmContent = 'Command was cancelled by user before it could complete.'; + if (result.output.trim()) { + llmContent += ` Below is the output (on stdout and stderr) before it was cancelled:\n${result.output}`; + } else { + llmContent += ' There was no output before it was cancelled.'; + } + } else { + // Create a formatted error string for display, replacing the wrapper command + // with the user-facing command. + const finalError = result.error + ? result.error.message.replace(commandToExecute, params.command) + : '(none)'; + + llmContent = [ + `Command: ${params.command}`, + `Directory: ${params.directory || '(root)'}`, + `Stdout: ${result.stdout || '(empty)'}`, + `Stderr: ${result.stderr || '(empty)'}`, + `Error: ${finalError}`, // Use the cleaned error string. + `Exit Code: ${result.exitCode ?? '(none)'}`, + `Signal: ${result.signal ?? '(none)'}`, + `Background PIDs: ${ + backgroundPIDs.length ? backgroundPIDs.join(', ') : '(none)' + }`, + `Process Group PGID: ${result.pid ?? '(none)'}`, + ].join('\n'); + } + + let returnDisplayMessage = ''; + if (this.config.getDebugMode()) { + returnDisplayMessage = llmContent; + } else { + if (result.output.trim()) { + returnDisplayMessage = result.output; + } else { + if (result.aborted) { + returnDisplayMessage = 'Command cancelled by user.'; + } else if (result.signal) { + returnDisplayMessage = `Command terminated by signal: ${result.signal}`; + } else if (result.error) { + returnDisplayMessage = `Command failed: ${getErrorMessage( + result.error, + )}`; + } else if (result.exitCode !== null && result.exitCode !== 0) { + returnDisplayMessage = `Command exited with code: ${result.exitCode}`; + } + // If output is empty and command succeeded (code 0, no error/signal/abort), + // returnDisplayMessage will remain empty, which is fine. + } + } + + const summarizeConfig = this.config.getSummarizeToolOutputConfig(); + if (summarizeConfig && summarizeConfig[this.name]) { + const summary = await summarizeToolOutput( + llmContent, + this.config.getGeminiClient(), + signal, + summarizeConfig[this.name].tokenBudget, + ); + return { + llmContent: summary, + returnDisplay: returnDisplayMessage, + }; + } + + return { + llmContent, + returnDisplay: returnDisplayMessage, + }; } finally { - abortSignal.removeEventListener('abort', abortHandler); - } - - // parse pids (pgrep output) from temporary file and remove it - const backgroundPIDs: number[] = []; - if (os.platform() !== 'win32') { if (fs.existsSync(tempFilePath)) { - const pgrepLines = fs - .readFileSync(tempFilePath, 'utf8') - .split('\n') - .filter(Boolean); - for (const line of pgrepLines) { - if (!/^\d+$/.test(line)) { - console.error(`pgrep: ${line}`); - } - const pid = Number(line); - // exclude the shell subprocess pid - if (pid !== shell.pid) { - backgroundPIDs.push(pid); - } - } fs.unlinkSync(tempFilePath); - } else { - if (!abortSignal.aborted) { - console.error('missing pgrep output'); - } } } - - let llmContent = ''; - if (abortSignal.aborted) { - llmContent = 'Command was cancelled by user before it could complete.'; - if (output.trim()) { - llmContent += ` Below is the output (on stdout and stderr) before it was cancelled:\n${output}`; - } else { - llmContent += ' There was no output before it was cancelled.'; - } - } else { - llmContent = [ - `Command: ${params.command}`, - `Directory: ${params.directory || '(root)'}`, - `Stdout: ${stdout || '(empty)'}`, - `Stderr: ${stderr || '(empty)'}`, - `Error: ${error ?? '(none)'}`, - `Exit Code: ${code ?? '(none)'}`, - `Signal: ${processSignal ?? '(none)'}`, - `Background PIDs: ${backgroundPIDs.length ? backgroundPIDs.join(', ') : '(none)'}`, - `Process Group PGID: ${shell.pid ?? '(none)'}`, - ].join('\n'); - } - - let returnDisplayMessage = ''; - if (this.config.getDebugMode()) { - returnDisplayMessage = llmContent; - } else { - if (output.trim()) { - returnDisplayMessage = output; - } else { - // Output is empty, let's provide a reason if the command failed or was cancelled - if (abortSignal.aborted) { - returnDisplayMessage = 'Command cancelled by user.'; - } else if (processSignal) { - returnDisplayMessage = `Command terminated by signal: ${processSignal}`; - } else if (error) { - // If error is not null, it's an Error object (or other truthy value) - returnDisplayMessage = `Command failed: ${getErrorMessage(error)}`; - } else if (code !== null && code !== 0) { - returnDisplayMessage = `Command exited with code: ${code}`; - } - // If output is empty and command succeeded (code 0, no error/signal/abort), - // returnDisplayMessage will remain empty, which is fine. - } - } - - const summary = await summarizeToolOutput( - llmContent, - this.config.getGeminiClient(), - abortSignal, - ); - - return { - llmContent: summary, - returnDisplay: returnDisplayMessage, - }; } } diff --git a/packages/core/src/tools/tool-registry.test.ts b/packages/core/src/tools/tool-registry.test.ts index 853f6458c..b3fdd7a3e 100644 --- a/packages/core/src/tools/tool-registry.test.ts +++ b/packages/core/src/tools/tool-registry.test.ts @@ -14,14 +14,14 @@ import { afterEach, Mocked, } from 'vitest'; +import { Config, ConfigParameters, ApprovalMode } from '../config/config.js'; import { ToolRegistry, DiscoveredTool, sanitizeParameters, } from './tool-registry.js'; import { DiscoveredMCPTool } from './mcp-tool.js'; -import { Config, ConfigParameters, ApprovalMode } from '../config/config.js'; -import { BaseTool, ToolResult } from './tools.js'; +import { BaseTool, Icon, ToolResult } from './tools.js'; import { FunctionDeclaration, CallableTool, @@ -104,8 +104,12 @@ const createMockCallableTool = ( }); class MockTool extends BaseTool<{ param: string }, ToolResult> { - constructor(name = 'mock-tool', description = 'A mock tool') { - super(name, name, description, { + constructor( + name = 'mock-tool', + displayName = 'A mock tool', + description = 'A mock tool description', + ) { + super(name, displayName, description, Icon.Hammer, { type: Type.OBJECT, properties: { param: { type: Type.STRING }, @@ -174,42 +178,85 @@ describe('ToolRegistry', () => { }); }); + describe('getAllTools', () => { + it('should return all registered tools sorted alphabetically by displayName', () => { + // Register tools with displayNames in non-alphabetical order + const toolC = new MockTool('c-tool', 'Tool C'); + const toolA = new MockTool('a-tool', 'Tool A'); + const toolB = new MockTool('b-tool', 'Tool B'); + + toolRegistry.registerTool(toolC); + toolRegistry.registerTool(toolA); + toolRegistry.registerTool(toolB); + + const allTools = toolRegistry.getAllTools(); + const displayNames = allTools.map((t) => t.displayName); + + // Assert that the returned array is sorted by displayName + expect(displayNames).toEqual(['Tool A', 'Tool B', 'Tool C']); + }); + }); + describe('getToolsByServer', () => { it('should return an empty array if no tools match the server name', () => { toolRegistry.registerTool(new MockTool()); expect(toolRegistry.getToolsByServer('any-mcp-server')).toEqual([]); }); - it('should return only tools matching the server name', async () => { + it('should return only tools matching the server name, sorted by name', async () => { const server1Name = 'mcp-server-uno'; const server2Name = 'mcp-server-dos'; const mockCallable = {} as CallableTool; - const mcpTool1 = new DiscoveredMCPTool( + const mcpTool1_c = new DiscoveredMCPTool( mockCallable, server1Name, - 'server1Name__tool-on-server1', + 'zebra-tool', 'd1', {}, - 'tool-on-server1', ); + const mcpTool1_a = new DiscoveredMCPTool( + mockCallable, + server1Name, + 'apple-tool', + 'd2', + {}, + ); + const mcpTool1_b = new DiscoveredMCPTool( + mockCallable, + server1Name, + 'banana-tool', + 'd3', + {}, + ); + const mcpTool2 = new DiscoveredMCPTool( mockCallable, server2Name, - 'server2Name__tool-on-server2', - 'd2', - {}, 'tool-on-server2', + 'd4', + {}, ); const nonMcpTool = new MockTool('regular-tool'); - toolRegistry.registerTool(mcpTool1); + toolRegistry.registerTool(mcpTool1_c); + toolRegistry.registerTool(mcpTool1_a); + toolRegistry.registerTool(mcpTool1_b); toolRegistry.registerTool(mcpTool2); toolRegistry.registerTool(nonMcpTool); const toolsFromServer1 = toolRegistry.getToolsByServer(server1Name); - expect(toolsFromServer1).toHaveLength(1); - expect(toolsFromServer1[0].name).toBe(mcpTool1.name); + const toolNames = toolsFromServer1.map((t) => t.name); + // Assert that the array has the correct tools and is sorted by name + expect(toolsFromServer1).toHaveLength(3); + expect(toolNames).toEqual(['apple-tool', 'banana-tool', 'zebra-tool']); + + // Assert that all returned tools are indeed from the correct server + for (const tool of toolsFromServer1) { + expect((tool as DiscoveredMCPTool).serverName).toBe(server1Name); + } + + // Assert that the other server's tools are returned correctly const toolsFromServer2 = toolRegistry.getToolsByServer(server2Name); expect(toolsFromServer2).toHaveLength(1); expect(toolsFromServer2[0].name).toBe(mcpTool2.name); @@ -265,7 +312,7 @@ describe('ToolRegistry', () => { return mockChildProcess as any; }); - await toolRegistry.discoverTools(); + await toolRegistry.discoverAllTools(); const discoveredTool = toolRegistry.getTool('tool-with-bad-format'); expect(discoveredTool).toBeDefined(); @@ -291,12 +338,13 @@ describe('ToolRegistry', () => { }; vi.spyOn(config, 'getMcpServers').mockReturnValue(mcpServerConfigVal); - await toolRegistry.discoverTools(); + await toolRegistry.discoverAllTools(); expect(mockDiscoverMcpTools).toHaveBeenCalledWith( mcpServerConfigVal, undefined, toolRegistry, + undefined, false, ); }); @@ -313,12 +361,13 @@ describe('ToolRegistry', () => { }; vi.spyOn(config, 'getMcpServers').mockReturnValue(mcpServerConfigVal); - await toolRegistry.discoverTools(); + await toolRegistry.discoverAllTools(); expect(mockDiscoverMcpTools).toHaveBeenCalledWith( mcpServerConfigVal, undefined, toolRegistry, + undefined, false, ); }); diff --git a/packages/core/src/tools/tool-registry.ts b/packages/core/src/tools/tool-registry.ts index 6f9d5ad55..57627ee08 100644 --- a/packages/core/src/tools/tool-registry.ts +++ b/packages/core/src/tools/tool-registry.ts @@ -5,7 +5,7 @@ */ import { FunctionDeclaration, Schema, Type } from '@google/genai'; -import { Tool, ToolResult, BaseTool } from './tools.js'; +import { Tool, ToolResult, BaseTool, Icon } from './tools.js'; import { Config } from '../config/config.js'; import { spawn } from 'node:child_process'; import { StringDecoder } from 'node:string_decoder'; @@ -18,7 +18,7 @@ type ToolParams = Record; export class DiscoveredTool extends BaseTool { constructor( private readonly config: Config, - readonly name: string, + name: string, readonly description: string, readonly parameterSchema: Record, ) { @@ -44,6 +44,7 @@ Signal: Signal number or \`(none)\` if no signal was received. name, name, description, + Icon.Hammer, parameterSchema, false, // isOutputMarkdown false, // canUpdateOutput @@ -137,10 +138,14 @@ export class ToolRegistry { */ registerTool(tool: Tool): void { if (this.tools.has(tool.name)) { - // Decide on behavior: throw error, log warning, or allow overwrite - console.warn( - `Tool with name "${tool.name}" is already registered. Overwriting.`, - ); + if (tool instanceof DiscoveredMCPTool) { + tool = tool.asFullyQualifiedTool(); + } else { + // Decide on behavior: throw error, log warning, or allow overwrite + console.warn( + `Tool with name "${tool.name}" is already registered. Overwriting.`, + ); + } } this.tools.set(tool.name, tool); } @@ -148,8 +153,9 @@ export class ToolRegistry { /** * Discovers tools from project (if available and configured). * Can be called multiple times to update discovered tools. + * This will discover tools from the command line and from MCP servers. */ - async discoverTools(): Promise { + async discoverAllTools(): Promise { // remove any previously discovered tools for (const tool of this.tools.values()) { if (tool instanceof DiscoveredTool || tool instanceof DiscoveredMCPTool) { @@ -164,10 +170,59 @@ export class ToolRegistry { this.config.getMcpServers() ?? {}, this.config.getMcpServerCommand(), this, + this.config.getPromptRegistry(), this.config.getDebugMode(), ); } + /** + * Discovers tools from project (if available and configured). + * Can be called multiple times to update discovered tools. + * This will NOT discover tools from the command line, only from MCP servers. + */ + async discoverMcpTools(): Promise { + // remove any previously discovered tools + for (const tool of this.tools.values()) { + if (tool instanceof DiscoveredMCPTool) { + this.tools.delete(tool.name); + } + } + + // discover tools using MCP servers, if configured + await discoverMcpTools( + this.config.getMcpServers() ?? {}, + this.config.getMcpServerCommand(), + this, + this.config.getPromptRegistry(), + this.config.getDebugMode(), + ); + } + + /** + * Discover or re-discover tools for a single MCP server. + * @param serverName - The name of the server to discover tools from. + */ + async discoverToolsForServer(serverName: string): Promise { + // Remove any previously discovered tools from this server + for (const [name, tool] of this.tools.entries()) { + if (tool instanceof DiscoveredMCPTool && tool.serverName === serverName) { + this.tools.delete(name); + } + } + + const mcpServers = this.config.getMcpServers() ?? {}; + const serverConfig = mcpServers[serverName]; + if (serverConfig) { + await discoverMcpTools( + { [serverName]: serverConfig }, + undefined, + this, + this.config.getPromptRegistry(), + this.config.getDebugMode(), + ); + } + } + private async discoverAndRegisterToolsFromCommand(): Promise { const discoveryCmd = this.config.getToolDiscoveryCommand(); if (!discoveryCmd) { @@ -308,7 +363,9 @@ export class ToolRegistry { * Returns an array of all registered and discovered tool instances. */ getAllTools(): Tool[] { - return Array.from(this.tools.values()); + return Array.from(this.tools.values()).sort((a, b) => + a.displayName.localeCompare(b.displayName), + ); } /** @@ -321,7 +378,7 @@ export class ToolRegistry { serverTools.push(tool); } } - return serverTools; + return serverTools.sort((a, b) => a.name.localeCompare(b.name)); } /** @@ -379,6 +436,19 @@ function _sanitizeParameters(schema: Schema | undefined, visited: Set) { } } } + + // Handle enum values - Gemini API only allows enum for STRING type + if (schema.enum && Array.isArray(schema.enum)) { + if (schema.type !== Type.STRING) { + // If enum is present but type is not STRING, convert type to STRING + schema.type = Type.STRING; + } + // Filter out null and undefined values, then convert remaining values to strings for Gemini API compatibility + schema.enum = schema.enum + .filter((value: unknown) => value !== null && value !== undefined) + .map((value: unknown) => String(value)); + } + // Vertex AI only supports 'enum' and 'date-time' for STRING format. if (schema.type === Type.STRING) { if ( diff --git a/packages/core/src/tools/tools.ts b/packages/core/src/tools/tools.ts index 68739d0e5..0d7b402a8 100644 --- a/packages/core/src/tools/tools.ts +++ b/packages/core/src/tools/tools.ts @@ -28,6 +28,11 @@ export interface Tool< */ description: string; + /** + * The icon to display when interacting via ACP + */ + icon: Icon; + /** * Function declaration schema from @google/genai */ @@ -60,6 +65,13 @@ export interface Tool< */ getDescription(params: TParams): string; + /** + * Determines what file system paths the tool will affect + * @param params Parameters for the tool execution + * @returns A list of such paths + */ + toolLocations(params: TParams): ToolLocation[]; + /** * Determines if the tool should prompt for confirmation before execution * @param params Parameters for the tool execution @@ -97,12 +109,13 @@ export abstract class BaseTool< * @param description Description of what the tool does * @param isOutputMarkdown Whether the tool's output should be rendered as markdown * @param canUpdateOutput Whether the tool supports live (streaming) output - * @param parameterSchema JSON Schema defining the parameters + * @param parameterSchema Open API 3.0 Schema defining the parameters */ constructor( readonly name: string, readonly displayName: string, readonly description: string, + readonly icon: Icon, readonly parameterSchema: Schema, readonly isOutputMarkdown: boolean = true, readonly canUpdateOutput: boolean = false, @@ -158,6 +171,18 @@ export abstract class BaseTool< return Promise.resolve(false); } + /** + * Determines what file system paths the tool will affect + * @param params Parameters for the tool execution + * @returns A list of such paths + */ + toolLocations( + // eslint-disable-next-line @typescript-eslint/no-unused-vars + params: TParams, + ): ToolLocation[] { + return []; + } + /** * Abstract method to execute the tool with the given parameters * Must be implemented by derived classes @@ -199,6 +224,8 @@ export type ToolResultDisplay = string | FileDiff; export interface FileDiff { fileDiff: string; fileName: string; + originalContent: string | null; + newContent: string; } export interface ToolEditConfirmationDetails { @@ -210,6 +237,8 @@ export interface ToolEditConfirmationDetails { ) => Promise; fileName: string; fileDiff: string; + originalContent: string | null; + newContent: string; isModifying?: boolean; } @@ -258,3 +287,21 @@ export enum ToolConfirmationOutcome { ModifyWithEditor = 'modify_with_editor', Cancel = 'cancel', } + +export enum Icon { + FileSearch = 'fileSearch', + Folder = 'folder', + Globe = 'globe', + Hammer = 'hammer', + LightBulb = 'lightBulb', + Pencil = 'pencil', + Regex = 'regex', + Terminal = 'terminal', +} + +export interface ToolLocation { + // Absolute path to the file + path: string; + // Which line (if known) + line?: number; +} diff --git a/packages/core/src/tools/web-fetch.test.ts b/packages/core/src/tools/web-fetch.test.ts index f4e3a6526..6be9d5046 100644 --- a/packages/core/src/tools/web-fetch.test.ts +++ b/packages/core/src/tools/web-fetch.test.ts @@ -13,6 +13,7 @@ describe('WebFetchTool', () => { const mockConfig = { getApprovalMode: vi.fn(), setApprovalMode: vi.fn(), + getProxy: vi.fn(), } as unknown as Config; describe('shouldConfirmExecute', () => { diff --git a/packages/core/src/tools/web-fetch.ts b/packages/core/src/tools/web-fetch.ts index 6e02df748..c96cae6ce 100644 --- a/packages/core/src/tools/web-fetch.ts +++ b/packages/core/src/tools/web-fetch.ts @@ -10,6 +10,7 @@ import { ToolResult, ToolCallConfirmationDetails, ToolConfirmationOutcome, + Icon, } from './tools.js'; import { Type } from '@google/genai'; import { getErrorMessage } from '../utils/errors.js'; @@ -17,9 +18,10 @@ import { Config, ApprovalMode } from '../config/config.js'; import { getResponseText } from '../utils/generateContentResponseUtilities.js'; import { fetchWithTimeout, isPrivateIp } from '../utils/fetch.js'; import { convert } from 'html-to-text'; +import { ProxyAgent, setGlobalDispatcher } from 'undici'; const URL_FETCH_TIMEOUT_MS = 10000; -const MAX_CONTENT_LENGTH = 50000; +const MAX_CONTENT_LENGTH = 100000; // Helper function to extract URLs from a string function extractUrls(text: string): string[] { @@ -69,6 +71,7 @@ export class WebFetchTool extends BaseTool { WebFetchTool.Name, 'WebFetch', "Processes content from URL(s), including local and private network addresses (e.g., localhost), embedded in a prompt. Include up to 20 URLs and instructions (e.g., summarize, extract specific data) directly in the 'prompt' parameter.", + Icon.Globe, { properties: { prompt: { @@ -81,6 +84,10 @@ export class WebFetchTool extends BaseTool { type: Type.OBJECT, }, ); + const proxy = config.getProxy(); + if (proxy) { + setGlobalDispatcher(new ProxyAgent(proxy as string)); + } } private async executeFallback( @@ -94,70 +101,40 @@ export class WebFetchTool extends BaseTool { returnDisplay: 'Error: No URL found in the prompt for fallback.', }; } + // For now, we only support one URL for fallback + let url = urls[0]; - const results: string[] = []; - const processedUrls: string[] = []; - - // Process multiple URLs (up to 20 as mentioned in description) - const urlsToProcess = urls.slice(0, 20); - - for (const originalUrl of urlsToProcess) { - let url = originalUrl; - - // Convert GitHub blob URL to raw URL - if (url.includes('github.com') && url.includes('/blob/')) { - url = url - .replace('github.com', 'raw.githubusercontent.com') - .replace('/blob/', '/'); - } - - try { - const response = await fetchWithTimeout(url, URL_FETCH_TIMEOUT_MS); - if (!response.ok) { - throw new Error( - `Request failed with status code ${response.status} ${response.statusText}`, - ); - } - const html = await response.text(); - const textContent = convert(html, { - wordwrap: false, - selectors: [ - { selector: 'a', options: { ignoreHref: true } }, - { selector: 'img', format: 'skip' }, - ], - }).substring(0, MAX_CONTENT_LENGTH); - - results.push(`Content from ${url}:\n${textContent}`); - processedUrls.push(url); - } catch (e) { - const error = e as Error; - results.push(`Error fetching ${url}: ${error.message}`); - processedUrls.push(url); - } + // Convert GitHub blob URL to raw URL + if (url.includes('github.com') && url.includes('/blob/')) { + url = url + .replace('github.com', 'raw.githubusercontent.com') + .replace('/blob/', '/'); } try { - const geminiClient = this.config.getGeminiClient(); - const combinedContent = results.join('\n\n---\n\n'); - - // Ensure the total prompt length doesn't exceed limits - const maxPromptLength = 200000; // Leave room for system instructions - const promptPrefix = `The user requested the following: "${params.prompt}". - -I have fetched the content from the following URL(s). Please use this content to answer the user's request. Do not attempt to access the URL(s) again. - -`; - - let finalContent = combinedContent; - if (promptPrefix.length + combinedContent.length > maxPromptLength) { - const availableLength = maxPromptLength - promptPrefix.length - 100; // Leave some buffer - finalContent = - combinedContent.substring(0, availableLength) + - '\n\n[Content truncated due to length limits]'; + const response = await fetchWithTimeout(url, URL_FETCH_TIMEOUT_MS); + if (!response.ok) { + throw new Error( + `Request failed with status code ${response.status} ${response.statusText}`, + ); } + const html = await response.text(); + const textContent = convert(html, { + wordwrap: false, + selectors: [ + { selector: 'a', options: { ignoreHref: true } }, + { selector: 'img', format: 'skip' }, + ], + }).substring(0, MAX_CONTENT_LENGTH); - const fallbackPrompt = promptPrefix + finalContent; + const geminiClient = this.config.getGeminiClient(); + const fallbackPrompt = `The user requested the following: "${params.prompt}". +I was unable to access the URL directly. Instead, I have fetched the raw content of the page. Please use the following content to answer the user's request. Do not attempt to access the URL again. + +--- +${textContent} +---`; const result = await geminiClient.generateContent( [{ role: 'user', parts: [{ text: fallbackPrompt }] }], {}, @@ -166,11 +143,11 @@ I have fetched the content from the following URL(s). Please use this content to const resultText = getResponseText(result) || ''; return { llmContent: resultText, - returnDisplay: `Content from ${processedUrls.length} URL(s) processed using fallback fetch.`, + returnDisplay: `Content for ${url} processed using fallback fetch.`, }; } catch (e) { const error = e as Error; - const errorMessage = `Error during fallback processing: ${error.message}`; + const errorMessage = `Error during fallback fetch for ${url}: ${error.message}`; return { llmContent: `Error: ${errorMessage}`, returnDisplay: `Error: ${errorMessage}`, @@ -262,12 +239,6 @@ I have fetched the content from the following URL(s). Please use this content to } const geminiClient = this.config.getGeminiClient(); - const contentGenerator = geminiClient.getContentGenerator(); - - // Check if using OpenAI content generator - if so, use fallback - if (contentGenerator.constructor.name === 'OpenAIContentGenerator') { - return this.executeFallback(params, signal); - } try { const response = await geminiClient.generateContent( diff --git a/packages/core/src/tools/web-search.ts b/packages/core/src/tools/web-search.ts index 98be1f309..480cc7e76 100644 --- a/packages/core/src/tools/web-search.ts +++ b/packages/core/src/tools/web-search.ts @@ -5,7 +5,7 @@ */ import { GroundingMetadata } from '@google/genai'; -import { BaseTool, ToolResult } from './tools.js'; +import { BaseTool, Icon, ToolResult } from './tools.js'; import { Type } from '@google/genai'; import { SchemaValidator } from '../utils/schemaValidator.js'; @@ -69,6 +69,7 @@ export class WebSearchTool extends BaseTool< WebSearchTool.Name, 'GoogleSearch', 'Performs a web search using Google Search (via the Gemini API) and returns the results. This tool is useful for finding information on the internet based on a query.', + Icon.Globe, { type: Type.OBJECT, properties: { diff --git a/packages/core/src/tools/write-file.ts b/packages/core/src/tools/write-file.ts index a3756c698..ae37ca8a3 100644 --- a/packages/core/src/tools/write-file.ts +++ b/packages/core/src/tools/write-file.ts @@ -15,6 +15,7 @@ import { ToolEditConfirmationDetails, ToolConfirmationOutcome, ToolCallConfirmationDetails, + Icon, } from './tools.js'; import { Type } from '@google/genai'; import { SchemaValidator } from '../utils/schemaValidator.js'; @@ -72,9 +73,10 @@ export class WriteFileTool super( WriteFileTool.Name, 'WriteFile', - `Writes content to a specified file in the local filesystem. - + `Writes content to a specified file in the local filesystem. + The user has the ability to modify \`content\`. If modified, this will be stated in the response.`, + Icon.Pencil, { properties: { file_path: { @@ -184,6 +186,8 @@ export class WriteFileTool title: `Confirm Write: ${shortenPath(relativePath)}`, fileName, fileDiff, + originalContent, + newContent: correctedContent, onConfirm: async (outcome: ToolConfirmationOutcome) => { if (outcome === ToolConfirmationOutcome.ProceedAlways) { this.config.setApprovalMode(ApprovalMode.AUTO_EDIT); @@ -269,7 +273,12 @@ export class WriteFileTool ); } - const displayResult: FileDiff = { fileDiff, fileName }; + const displayResult: FileDiff = { + fileDiff, + fileName, + originalContent: correctedContentResult.originalContent, + newContent: correctedContentResult.correctedContent, + }; const lines = fileContent.split('\n').length; const mimetype = getSpecificMimeType(params.file_path); diff --git a/packages/core/src/utils/bfsFileSearch.test.ts b/packages/core/src/utils/bfsFileSearch.test.ts index 83e9b0b9e..63198a8de 100644 --- a/packages/core/src/utils/bfsFileSearch.test.ts +++ b/packages/core/src/utils/bfsFileSearch.test.ts @@ -4,145 +4,189 @@ * SPDX-License-Identifier: Apache-2.0 */ -import * as fs from 'fs'; -import { vi, describe, it, expect, beforeEach } from 'vitest'; +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; import * as fsPromises from 'fs/promises'; -import * as gitUtils from './gitUtils.js'; +import * as path from 'path'; +import * as os from 'os'; import { bfsFileSearch } from './bfsFileSearch.js'; import { FileDiscoveryService } from '../services/fileDiscoveryService.js'; -vi.mock('fs'); -vi.mock('fs/promises'); -vi.mock('./gitUtils.js'); - -const createMockDirent = (name: string, isFile: boolean): fs.Dirent => { - const dirent = new fs.Dirent(); - dirent.name = name; - dirent.isFile = () => isFile; - dirent.isDirectory = () => !isFile; - return dirent; -}; - -// Type for the specific overload we're using -type ReaddirWithFileTypes = ( - path: fs.PathLike, - options: { withFileTypes: true }, -) => Promise; - describe('bfsFileSearch', () => { - beforeEach(() => { - vi.resetAllMocks(); + let testRootDir: string; + + async function createEmptyDir(...pathSegments: string[]) { + const fullPath = path.join(testRootDir, ...pathSegments); + await fsPromises.mkdir(fullPath, { recursive: true }); + return fullPath; + } + + async function createTestFile(content: string, ...pathSegments: string[]) { + const fullPath = path.join(testRootDir, ...pathSegments); + await fsPromises.mkdir(path.dirname(fullPath), { recursive: true }); + await fsPromises.writeFile(fullPath, content); + return fullPath; + } + + beforeEach(async () => { + testRootDir = await fsPromises.mkdtemp( + path.join(os.tmpdir(), 'bfs-file-search-test-'), + ); + }); + + afterEach(async () => { + await fsPromises.rm(testRootDir, { recursive: true, force: true }); }); it('should find a file in the root directory', async () => { - const mockFs = vi.mocked(fsPromises); - const mockReaddir = mockFs.readdir as unknown as ReaddirWithFileTypes; - vi.mocked(mockReaddir).mockResolvedValue([ - createMockDirent('file1.txt', true), - createMockDirent('file2.txt', true), - ]); - - const result = await bfsFileSearch('/test', { fileName: 'file1.txt' }); - expect(result).toEqual(['/test/file1.txt']); + const targetFilePath = await createTestFile('content', 'target.txt'); + const result = await bfsFileSearch(testRootDir, { fileName: 'target.txt' }); + expect(result).toEqual([targetFilePath]); }); - it('should find a file in a subdirectory', async () => { - const mockFs = vi.mocked(fsPromises); - const mockReaddir = mockFs.readdir as unknown as ReaddirWithFileTypes; - vi.mocked(mockReaddir).mockImplementation(async (dir) => { - if (dir === '/test') { - return [createMockDirent('subdir', false)]; - } - if (dir === '/test/subdir') { - return [createMockDirent('file1.txt', true)]; - } - return []; - }); - - const result = await bfsFileSearch('/test', { fileName: 'file1.txt' }); - expect(result).toEqual(['/test/subdir/file1.txt']); + it('should find a file in a nested directory', async () => { + const targetFilePath = await createTestFile( + 'content', + 'a', + 'b', + 'target.txt', + ); + const result = await bfsFileSearch(testRootDir, { fileName: 'target.txt' }); + expect(result).toEqual([targetFilePath]); }); - it('should ignore specified directories', async () => { - const mockFs = vi.mocked(fsPromises); - const mockReaddir = mockFs.readdir as unknown as ReaddirWithFileTypes; - vi.mocked(mockReaddir).mockImplementation(async (dir) => { - if (dir === '/test') { - return [ - createMockDirent('subdir1', false), - createMockDirent('subdir2', false), - ]; - } - if (dir === '/test/subdir1') { - return [createMockDirent('file1.txt', true)]; - } - if (dir === '/test/subdir2') { - return [createMockDirent('file1.txt', true)]; - } - return []; - }); - - const result = await bfsFileSearch('/test', { - fileName: 'file1.txt', - ignoreDirs: ['subdir2'], - }); - expect(result).toEqual(['/test/subdir1/file1.txt']); + it('should find multiple files with the same name', async () => { + const targetFilePath1 = await createTestFile('content1', 'a', 'target.txt'); + const targetFilePath2 = await createTestFile('content2', 'b', 'target.txt'); + const result = await bfsFileSearch(testRootDir, { fileName: 'target.txt' }); + result.sort(); + expect(result).toEqual([targetFilePath1, targetFilePath2].sort()); }); - it('should respect maxDirs limit', async () => { - const mockFs = vi.mocked(fsPromises); - const mockReaddir = mockFs.readdir as unknown as ReaddirWithFileTypes; - vi.mocked(mockReaddir).mockImplementation(async (dir) => { - if (dir === '/test') { - return [ - createMockDirent('subdir1', false), - createMockDirent('subdir2', false), - ]; - } - if (dir === '/test/subdir1') { - return [createMockDirent('file1.txt', true)]; - } - if (dir === '/test/subdir2') { - return [createMockDirent('file1.txt', true)]; - } - return []; - }); - - const result = await bfsFileSearch('/test', { - fileName: 'file1.txt', - maxDirs: 2, - }); - expect(result).toEqual(['/test/subdir1/file1.txt']); + it('should return an empty array if no file is found', async () => { + await createTestFile('content', 'other.txt'); + const result = await bfsFileSearch(testRootDir, { fileName: 'target.txt' }); + expect(result).toEqual([]); }); - it('should respect .gitignore files', async () => { - const mockFs = vi.mocked(fsPromises); - const mockGitUtils = vi.mocked(gitUtils); - mockGitUtils.isGitRepository.mockReturnValue(true); - const mockReaddir = mockFs.readdir as unknown as ReaddirWithFileTypes; - vi.mocked(mockReaddir).mockImplementation(async (dir) => { - if (dir === '/test') { - return [ - createMockDirent('.gitignore', true), - createMockDirent('subdir1', false), - createMockDirent('subdir2', false), - ]; - } - if (dir === '/test/subdir1') { - return [createMockDirent('file1.txt', true)]; - } - if (dir === '/test/subdir2') { - return [createMockDirent('file1.txt', true)]; - } - return []; + it('should ignore directories specified in ignoreDirs', async () => { + await createTestFile('content', 'ignored', 'target.txt'); + const targetFilePath = await createTestFile( + 'content', + 'not-ignored', + 'target.txt', + ); + const result = await bfsFileSearch(testRootDir, { + fileName: 'target.txt', + ignoreDirs: ['ignored'], }); - vi.mocked(fs).readFileSync.mockReturnValue('subdir2'); + expect(result).toEqual([targetFilePath]); + }); - const fileService = new FileDiscoveryService('/test'); - const result = await bfsFileSearch('/test', { - fileName: 'file1.txt', - fileService, + it('should respect the maxDirs limit and not find the file', async () => { + await createTestFile('content', 'a', 'b', 'c', 'target.txt'); + const result = await bfsFileSearch(testRootDir, { + fileName: 'target.txt', + maxDirs: 3, + }); + expect(result).toEqual([]); + }); + + it('should respect the maxDirs limit and find the file', async () => { + const targetFilePath = await createTestFile( + 'content', + 'a', + 'b', + 'c', + 'target.txt', + ); + const result = await bfsFileSearch(testRootDir, { + fileName: 'target.txt', + maxDirs: 4, + }); + expect(result).toEqual([targetFilePath]); + }); + + describe('with FileDiscoveryService', () => { + let projectRoot: string; + + beforeEach(async () => { + projectRoot = await createEmptyDir('project'); + }); + + it('should ignore gitignored files', async () => { + await createEmptyDir('project', '.git'); + await createTestFile('node_modules/', 'project', '.gitignore'); + await createTestFile('content', 'project', 'node_modules', 'target.txt'); + const targetFilePath = await createTestFile( + 'content', + 'project', + 'not-ignored', + 'target.txt', + ); + + const fileService = new FileDiscoveryService(projectRoot); + const result = await bfsFileSearch(projectRoot, { + fileName: 'target.txt', + fileService, + fileFilteringOptions: { + respectGitIgnore: true, + respectGeminiIgnore: true, + }, + }); + + expect(result).toEqual([targetFilePath]); + }); + + it('should ignore geminiignored files', async () => { + await createTestFile('node_modules/', 'project', '.geminiignore'); + await createTestFile('content', 'project', 'node_modules', 'target.txt'); + const targetFilePath = await createTestFile( + 'content', + 'project', + 'not-ignored', + 'target.txt', + ); + + const fileService = new FileDiscoveryService(projectRoot); + const result = await bfsFileSearch(projectRoot, { + fileName: 'target.txt', + fileService, + fileFilteringOptions: { + respectGitIgnore: false, + respectGeminiIgnore: true, + }, + }); + + expect(result).toEqual([targetFilePath]); + }); + + it('should not ignore files if respect flags are false', async () => { + await createEmptyDir('project', '.git'); + await createTestFile('node_modules/', 'project', '.gitignore'); + const target1 = await createTestFile( + 'content', + 'project', + 'node_modules', + 'target.txt', + ); + const target2 = await createTestFile( + 'content', + 'project', + 'not-ignored', + 'target.txt', + ); + + const fileService = new FileDiscoveryService(projectRoot); + const result = await bfsFileSearch(projectRoot, { + fileName: 'target.txt', + fileService, + fileFilteringOptions: { + respectGitIgnore: false, + respectGeminiIgnore: false, + }, + }); + + expect(result.sort()).toEqual([target1, target2].sort()); }); - expect(result).toEqual(['/test/subdir1/file1.txt']); }); }); diff --git a/packages/core/src/utils/bfsFileSearch.ts b/packages/core/src/utils/bfsFileSearch.ts index e552f520d..790521e01 100644 --- a/packages/core/src/utils/bfsFileSearch.ts +++ b/packages/core/src/utils/bfsFileSearch.ts @@ -8,7 +8,7 @@ import * as fs from 'fs/promises'; import * as path from 'path'; import { Dirent } from 'fs'; import { FileDiscoveryService } from '../services/fileDiscoveryService.js'; - +import { FileFilteringOptions } from '../config/config.js'; // Simple console logger for now. // TODO: Integrate with a more robust server-side logger. const logger = { @@ -22,6 +22,7 @@ interface BfsFileSearchOptions { maxDirs?: number; debug?: boolean; fileService?: FileDiscoveryService; + fileFilteringOptions?: FileFilteringOptions; } /** @@ -69,7 +70,13 @@ export async function bfsFileSearch( for (const entry of entries) { const fullPath = path.join(currentDir, entry.name); - if (fileService?.shouldGitIgnoreFile(fullPath)) { + if ( + fileService?.shouldIgnoreFile(fullPath, { + respectGitIgnore: options.fileFilteringOptions?.respectGitIgnore, + respectGeminiIgnore: + options.fileFilteringOptions?.respectGeminiIgnore, + }) + ) { continue; } diff --git a/packages/core/src/utils/browser.ts b/packages/core/src/utils/browser.ts new file mode 100644 index 000000000..a9b2b0139 --- /dev/null +++ b/packages/core/src/utils/browser.ts @@ -0,0 +1,53 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * Determines if we should attempt to launch a browser for authentication + * based on the user's environment. + * + * This is an adaptation of the logic from the Google Cloud SDK. + * @returns True if the tool should attempt to launch a browser. + */ +export function shouldAttemptBrowserLaunch(): boolean { + // A list of browser names that indicate we should not attempt to open a + // web browser for the user. + const browserBlocklist = ['www-browser']; + const browserEnv = process.env.BROWSER; + if (browserEnv && browserBlocklist.includes(browserEnv)) { + return false; + } + // Common environment variables used in CI/CD or other non-interactive shells. + if (process.env.CI || process.env.DEBIAN_FRONTEND === 'noninteractive') { + return false; + } + + // The presence of SSH_CONNECTION indicates a remote session. + // We should not attempt to launch a browser unless a display is explicitly available + // (checked below for Linux). + const isSSH = !!process.env.SSH_CONNECTION; + + // On Linux, the presence of a display server is a strong indicator of a GUI. + if (process.platform === 'linux') { + // These are environment variables that can indicate a running compositor on + // Linux. + const displayVariables = ['DISPLAY', 'WAYLAND_DISPLAY', 'MIR_SOCKET']; + const hasDisplay = displayVariables.some((v) => !!process.env[v]); + if (!hasDisplay) { + return false; + } + } + + // If in an SSH session on a non-Linux OS (e.g., macOS), don't launch browser. + // The Linux case is handled above (it's allowed if DISPLAY is set). + if (isSSH && process.platform !== 'linux') { + return false; + } + + // For non-Linux OSes, we generally assume a GUI is available + // unless other signals (like SSH) suggest otherwise. + // The `open` command's error handling will catch final edge cases. + return true; +} diff --git a/packages/core/src/utils/editCorrector.test.ts b/packages/core/src/utils/editCorrector.test.ts index cf9008ef8..cd5883125 100644 --- a/packages/core/src/utils/editCorrector.test.ts +++ b/packages/core/src/utils/editCorrector.test.ts @@ -81,7 +81,7 @@ describe('editCorrector', () => { it('should correctly count occurrences when substring is longer', () => { expect(countOccurrences('abc', 'abcdef')).toBe(0); }); - it('should be case sensitive', () => { + it('should be case-sensitive', () => { expect(countOccurrences('abcABC', 'a')).toBe(1); expect(countOccurrences('abcABC', 'A')).toBe(1); }); diff --git a/packages/core/src/utils/editCorrector.ts b/packages/core/src/utils/editCorrector.ts index bff1c2f20..a770c491a 100644 --- a/packages/core/src/utils/editCorrector.ts +++ b/packages/core/src/utils/editCorrector.ts @@ -77,10 +77,10 @@ function getTimestampFromFunctionId(fcnId: string): number { /** * Will look through the gemini client history and determine when the most recent - * edit to a target file occured. If no edit happened, it will return -1 + * edit to a target file occurred. If no edit happened, it will return -1 * @param filePath the path to the file * @param client the geminiClient, so that we can get the history - * @returns a DateTime (as a number) of when the last edit occured, or -1 if no edit was found. + * @returns a DateTime (as a number) of when the last edit occurred, or -1 if no edit was found. */ async function findLastEditTimestamp( filePath: string, @@ -132,8 +132,8 @@ async function findLastEditTimestamp( // Use the "blunt hammer" approach to find the file path in the content. // Note that the tool response data is inconsistent in their formatting - // with successes and errors - so, we just check for the existance - // as the best guess to if error/failed occured with the response. + // with successes and errors - so, we just check for the existence + // as the best guess to if error/failed occurred with the response. const stringified = JSON.stringify(content); if ( !stringified.includes('Error') && // only applicable for functionResponse diff --git a/packages/core/src/utils/editor.test.ts b/packages/core/src/utils/editor.test.ts index 382e5e187..a86d6f59d 100644 --- a/packages/core/src/utils/editor.test.ts +++ b/packages/core/src/utils/editor.test.ts @@ -52,56 +52,99 @@ describe('editor utils', () => { describe('checkHasEditorType', () => { const testCases: Array<{ editor: EditorType; - command: string; - win32Command: string; + commands: string[]; + win32Commands: string[]; }> = [ - { editor: 'vscode', command: 'code', win32Command: 'code.cmd' }, - { editor: 'vscodium', command: 'codium', win32Command: 'codium.cmd' }, - { editor: 'windsurf', command: 'windsurf', win32Command: 'windsurf' }, - { editor: 'cursor', command: 'cursor', win32Command: 'cursor' }, - { editor: 'vim', command: 'vim', win32Command: 'vim' }, - { editor: 'neovim', command: 'nvim', win32Command: 'nvim' }, - { editor: 'zed', command: 'zed', win32Command: 'zed' }, + { editor: 'vscode', commands: ['code'], win32Commands: ['code.cmd'] }, + { + editor: 'vscodium', + commands: ['codium'], + win32Commands: ['codium.cmd'], + }, + { + editor: 'windsurf', + commands: ['windsurf'], + win32Commands: ['windsurf'], + }, + { editor: 'cursor', commands: ['cursor'], win32Commands: ['cursor'] }, + { editor: 'vim', commands: ['vim'], win32Commands: ['vim'] }, + { editor: 'neovim', commands: ['nvim'], win32Commands: ['nvim'] }, + { editor: 'zed', commands: ['zed', 'zeditor'], win32Commands: ['zed'] }, ]; - for (const { editor, command, win32Command } of testCases) { + for (const { editor, commands, win32Commands } of testCases) { describe(`${editor}`, () => { - it(`should return true if "${command}" command exists on non-windows`, () => { + // Non-windows tests + it(`should return true if first command "${commands[0]}" exists on non-windows`, () => { Object.defineProperty(process, 'platform', { value: 'linux' }); (execSync as Mock).mockReturnValue( - Buffer.from(`/usr/bin/${command}`), + Buffer.from(`/usr/bin/${commands[0]}`), ); expect(checkHasEditorType(editor)).toBe(true); - expect(execSync).toHaveBeenCalledWith(`command -v ${command}`, { + expect(execSync).toHaveBeenCalledWith(`command -v ${commands[0]}`, { stdio: 'ignore', }); }); - it(`should return false if "${command}" command does not exist on non-windows`, () => { + if (commands.length > 1) { + it(`should return true if first command doesn't exist but second command "${commands[1]}" exists on non-windows`, () => { + Object.defineProperty(process, 'platform', { value: 'linux' }); + (execSync as Mock) + .mockImplementationOnce(() => { + throw new Error(); // first command not found + }) + .mockReturnValueOnce(Buffer.from(`/usr/bin/${commands[1]}`)); // second command found + expect(checkHasEditorType(editor)).toBe(true); + expect(execSync).toHaveBeenCalledTimes(2); + }); + } + + it(`should return false if none of the commands exist on non-windows`, () => { Object.defineProperty(process, 'platform', { value: 'linux' }); (execSync as Mock).mockImplementation(() => { - throw new Error(); + throw new Error(); // all commands not found }); expect(checkHasEditorType(editor)).toBe(false); + expect(execSync).toHaveBeenCalledTimes(commands.length); }); - it(`should return true if "${win32Command}" command exists on windows`, () => { + // Windows tests + it(`should return true if first command "${win32Commands[0]}" exists on windows`, () => { Object.defineProperty(process, 'platform', { value: 'win32' }); (execSync as Mock).mockReturnValue( - Buffer.from(`C:\\Program Files\\...\\${win32Command}`), + Buffer.from(`C:\\Program Files\\...\\${win32Commands[0]}`), ); expect(checkHasEditorType(editor)).toBe(true); - expect(execSync).toHaveBeenCalledWith(`where.exe ${win32Command}`, { - stdio: 'ignore', - }); + expect(execSync).toHaveBeenCalledWith( + `where.exe ${win32Commands[0]}`, + { + stdio: 'ignore', + }, + ); }); - it(`should return false if "${win32Command}" command does not exist on windows`, () => { + if (win32Commands.length > 1) { + it(`should return true if first command doesn't exist but second command "${win32Commands[1]}" exists on windows`, () => { + Object.defineProperty(process, 'platform', { value: 'win32' }); + (execSync as Mock) + .mockImplementationOnce(() => { + throw new Error(); // first command not found + }) + .mockReturnValueOnce( + Buffer.from(`C:\\Program Files\\...\\${win32Commands[1]}`), + ); // second command found + expect(checkHasEditorType(editor)).toBe(true); + expect(execSync).toHaveBeenCalledTimes(2); + }); + } + + it(`should return false if none of the commands exist on windows`, () => { Object.defineProperty(process, 'platform', { value: 'win32' }); (execSync as Mock).mockImplementation(() => { - throw new Error(); + throw new Error(); // all commands not found }); expect(checkHasEditorType(editor)).toBe(false); + expect(execSync).toHaveBeenCalledTimes(win32Commands.length); }); }); } @@ -110,31 +153,109 @@ describe('editor utils', () => { describe('getDiffCommand', () => { const guiEditors: Array<{ editor: EditorType; - command: string; - win32Command: string; + commands: string[]; + win32Commands: string[]; }> = [ - { editor: 'vscode', command: 'code', win32Command: 'code.cmd' }, - { editor: 'vscodium', command: 'codium', win32Command: 'codium.cmd' }, - { editor: 'windsurf', command: 'windsurf', win32Command: 'windsurf' }, - { editor: 'cursor', command: 'cursor', win32Command: 'cursor' }, - { editor: 'zed', command: 'zed', win32Command: 'zed' }, + { editor: 'vscode', commands: ['code'], win32Commands: ['code.cmd'] }, + { + editor: 'vscodium', + commands: ['codium'], + win32Commands: ['codium.cmd'], + }, + { + editor: 'windsurf', + commands: ['windsurf'], + win32Commands: ['windsurf'], + }, + { editor: 'cursor', commands: ['cursor'], win32Commands: ['cursor'] }, + { editor: 'zed', commands: ['zed', 'zeditor'], win32Commands: ['zed'] }, ]; - for (const { editor, command, win32Command } of guiEditors) { - it(`should return the correct command for ${editor} on non-windows`, () => { + for (const { editor, commands, win32Commands } of guiEditors) { + // Non-windows tests + it(`should use first command "${commands[0]}" when it exists on non-windows`, () => { Object.defineProperty(process, 'platform', { value: 'linux' }); + (execSync as Mock).mockReturnValue( + Buffer.from(`/usr/bin/${commands[0]}`), + ); const diffCommand = getDiffCommand('old.txt', 'new.txt', editor); expect(diffCommand).toEqual({ - command, + command: commands[0], args: ['--wait', '--diff', 'old.txt', 'new.txt'], }); }); - it(`should return the correct command for ${editor} on windows`, () => { - Object.defineProperty(process, 'platform', { value: 'win32' }); + if (commands.length > 1) { + it(`should use second command "${commands[1]}" when first doesn't exist on non-windows`, () => { + Object.defineProperty(process, 'platform', { value: 'linux' }); + (execSync as Mock) + .mockImplementationOnce(() => { + throw new Error(); // first command not found + }) + .mockReturnValueOnce(Buffer.from(`/usr/bin/${commands[1]}`)); // second command found + + const diffCommand = getDiffCommand('old.txt', 'new.txt', editor); + expect(diffCommand).toEqual({ + command: commands[1], + args: ['--wait', '--diff', 'old.txt', 'new.txt'], + }); + }); + } + + it(`should fall back to last command "${commands[commands.length - 1]}" when none exist on non-windows`, () => { + Object.defineProperty(process, 'platform', { value: 'linux' }); + (execSync as Mock).mockImplementation(() => { + throw new Error(); // all commands not found + }); + const diffCommand = getDiffCommand('old.txt', 'new.txt', editor); expect(diffCommand).toEqual({ - command: win32Command, + command: commands[commands.length - 1], + args: ['--wait', '--diff', 'old.txt', 'new.txt'], + }); + }); + + // Windows tests + it(`should use first command "${win32Commands[0]}" when it exists on windows`, () => { + Object.defineProperty(process, 'platform', { value: 'win32' }); + (execSync as Mock).mockReturnValue( + Buffer.from(`C:\\Program Files\\...\\${win32Commands[0]}`), + ); + const diffCommand = getDiffCommand('old.txt', 'new.txt', editor); + expect(diffCommand).toEqual({ + command: win32Commands[0], + args: ['--wait', '--diff', 'old.txt', 'new.txt'], + }); + }); + + if (win32Commands.length > 1) { + it(`should use second command "${win32Commands[1]}" when first doesn't exist on windows`, () => { + Object.defineProperty(process, 'platform', { value: 'win32' }); + (execSync as Mock) + .mockImplementationOnce(() => { + throw new Error(); // first command not found + }) + .mockReturnValueOnce( + Buffer.from(`C:\\Program Files\\...\\${win32Commands[1]}`), + ); // second command found + + const diffCommand = getDiffCommand('old.txt', 'new.txt', editor); + expect(diffCommand).toEqual({ + command: win32Commands[1], + args: ['--wait', '--diff', 'old.txt', 'new.txt'], + }); + }); + } + + it(`should fall back to last command "${win32Commands[win32Commands.length - 1]}" when none exist on windows`, () => { + Object.defineProperty(process, 'platform', { value: 'win32' }); + (execSync as Mock).mockImplementation(() => { + throw new Error(); // all commands not found + }); + + const diffCommand = getDiffCommand('old.txt', 'new.txt', editor); + expect(diffCommand).toEqual({ + command: win32Commands[win32Commands.length - 1], args: ['--wait', '--diff', 'old.txt', 'new.txt'], }); }); diff --git a/packages/core/src/utils/editor.ts b/packages/core/src/utils/editor.ts index 8d95a5931..2d65d5254 100644 --- a/packages/core/src/utils/editor.ts +++ b/packages/core/src/utils/editor.ts @@ -44,21 +44,28 @@ function commandExists(cmd: string): boolean { } } -const editorCommands: Record = { - vscode: { win32: 'code.cmd', default: 'code' }, - vscodium: { win32: 'codium.cmd', default: 'codium' }, - windsurf: { win32: 'windsurf', default: 'windsurf' }, - cursor: { win32: 'cursor', default: 'cursor' }, - vim: { win32: 'vim', default: 'vim' }, - neovim: { win32: 'nvim', default: 'nvim' }, - zed: { win32: 'zed', default: 'zed' }, +/** + * Editor command configurations for different platforms. + * Each editor can have multiple possible command names, listed in order of preference. + */ +const editorCommands: Record< + EditorType, + { win32: string[]; default: string[] } +> = { + vscode: { win32: ['code.cmd'], default: ['code'] }, + vscodium: { win32: ['codium.cmd'], default: ['codium'] }, + windsurf: { win32: ['windsurf'], default: ['windsurf'] }, + cursor: { win32: ['cursor'], default: ['cursor'] }, + vim: { win32: ['vim'], default: ['vim'] }, + neovim: { win32: ['nvim'], default: ['nvim'] }, + zed: { win32: ['zed'], default: ['zed', 'zeditor'] }, }; export function checkHasEditorType(editor: EditorType): boolean { const commandConfig = editorCommands[editor]; - const command = + const commands = process.platform === 'win32' ? commandConfig.win32 : commandConfig.default; - return commandExists(command); + return commands.some((cmd) => commandExists(cmd)); } export function allowEditorTypeInSandbox(editor: EditorType): boolean { @@ -92,8 +99,12 @@ export function getDiffCommand( return null; } const commandConfig = editorCommands[editor]; - const command = + const commands = process.platform === 'win32' ? commandConfig.win32 : commandConfig.default; + const command = + commands.slice(0, -1).find((cmd) => commandExists(cmd)) || + commands[commands.length - 1]; + switch (editor) { case 'vscode': case 'vscodium': diff --git a/packages/core/src/utils/errorReporting.test.ts b/packages/core/src/utils/errorReporting.test.ts index 8d1a5bab0..6b92c2499 100644 --- a/packages/core/src/utils/errorReporting.test.ts +++ b/packages/core/src/utils/errorReporting.test.ts @@ -4,36 +4,36 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { describe, it, expect, vi, beforeEach, afterEach, Mock } from 'vitest'; +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; +import fs from 'node:fs/promises'; +import os from 'node:os'; +import path from 'node:path'; +import { reportError } from './errorReporting.js'; // Use a type alias for SpyInstance as it's not directly exported type SpyInstance = ReturnType; -import { reportError } from './errorReporting.js'; -import fs from 'node:fs/promises'; -import os from 'node:os'; - -// Mock dependencies -vi.mock('node:fs/promises'); -vi.mock('node:os'); describe('reportError', () => { let consoleErrorSpy: SpyInstance; - const MOCK_TMP_DIR = '/tmp'; + let testDir: string; const MOCK_TIMESTAMP = '2025-01-01T00-00-00-000Z'; - beforeEach(() => { + beforeEach(async () => { + // Create a temporary directory for logs + testDir = await fs.mkdtemp(path.join(os.tmpdir(), 'gemini-report-test-')); vi.resetAllMocks(); consoleErrorSpy = vi.spyOn(console, 'error').mockImplementation(() => {}); - (os.tmpdir as Mock).mockReturnValue(MOCK_TMP_DIR); vi.spyOn(Date.prototype, 'toISOString').mockReturnValue(MOCK_TIMESTAMP); }); - afterEach(() => { + afterEach(async () => { vi.restoreAllMocks(); + // Clean up the temporary directory + await fs.rm(testDir, { recursive: true, force: true }); }); const getExpectedReportPath = (type: string) => - `${MOCK_TMP_DIR}/gemini-client-error-${type}-${MOCK_TIMESTAMP}.json`; + path.join(testDir, `gemini-client-error-${type}-${MOCK_TIMESTAMP}.json`); it('should generate a report and log the path', async () => { const error = new Error('Test error'); @@ -43,22 +43,18 @@ describe('reportError', () => { const type = 'test-type'; const expectedReportPath = getExpectedReportPath(type); - (fs.writeFile as Mock).mockResolvedValue(undefined); + await reportError(error, baseMessage, context, type, testDir); - await reportError(error, baseMessage, context, type); + // Verify the file was written + const reportContent = await fs.readFile(expectedReportPath, 'utf-8'); + const parsedReport = JSON.parse(reportContent); - expect(os.tmpdir).toHaveBeenCalledTimes(1); - expect(fs.writeFile).toHaveBeenCalledWith( - expectedReportPath, - JSON.stringify( - { - error: { message: 'Test error', stack: error.stack }, - context, - }, - null, - 2, - ), - ); + expect(parsedReport).toEqual({ + error: { message: 'Test error', stack: 'Test stack' }, + context, + }); + + // Verify the console log expect(consoleErrorSpy).toHaveBeenCalledWith( `${baseMessage} Full report available at: ${expectedReportPath}`, ); @@ -70,19 +66,15 @@ describe('reportError', () => { const type = 'general'; const expectedReportPath = getExpectedReportPath(type); - (fs.writeFile as Mock).mockResolvedValue(undefined); - await reportError(error, baseMessage); + await reportError(error, baseMessage, undefined, type, testDir); + + const reportContent = await fs.readFile(expectedReportPath, 'utf-8'); + const parsedReport = JSON.parse(reportContent); + + expect(parsedReport).toEqual({ + error: { message: 'Test plain object error' }, + }); - expect(fs.writeFile).toHaveBeenCalledWith( - expectedReportPath, - JSON.stringify( - { - error: { message: 'Test plain object error' }, - }, - null, - 2, - ), - ); expect(consoleErrorSpy).toHaveBeenCalledWith( `${baseMessage} Full report available at: ${expectedReportPath}`, ); @@ -94,19 +86,15 @@ describe('reportError', () => { const type = 'general'; const expectedReportPath = getExpectedReportPath(type); - (fs.writeFile as Mock).mockResolvedValue(undefined); - await reportError(error, baseMessage); + await reportError(error, baseMessage, undefined, type, testDir); + + const reportContent = await fs.readFile(expectedReportPath, 'utf-8'); + const parsedReport = JSON.parse(reportContent); + + expect(parsedReport).toEqual({ + error: { message: 'Just a string error' }, + }); - expect(fs.writeFile).toHaveBeenCalledWith( - expectedReportPath, - JSON.stringify( - { - error: { message: 'Just a string error' }, - }, - null, - 2, - ), - ); expect(consoleErrorSpy).toHaveBeenCalledWith( `${baseMessage} Full report available at: ${expectedReportPath}`, ); @@ -115,22 +103,15 @@ describe('reportError', () => { it('should log fallback message if writing report fails', async () => { const error = new Error('Main error'); const baseMessage = 'Failed operation.'; - const writeError = new Error('Failed to write file'); const context = ['some context']; const type = 'general'; - const expectedReportPath = getExpectedReportPath(type); + const nonExistentDir = path.join(testDir, 'non-existent-dir'); - (fs.writeFile as Mock).mockRejectedValue(writeError); + await reportError(error, baseMessage, context, type, nonExistentDir); - await reportError(error, baseMessage, context, type); - - expect(fs.writeFile).toHaveBeenCalledWith( - expectedReportPath, - expect.any(String), - ); // It still tries to write expect(consoleErrorSpy).toHaveBeenCalledWith( `${baseMessage} Additionally, failed to write detailed error report:`, - writeError, + expect.any(Error), // The actual write error ); expect(consoleErrorSpy).toHaveBeenCalledWith( 'Original error that triggered report generation:', @@ -163,9 +144,7 @@ describe('reportError', () => { return originalJsonStringify(value, replacer, space); }); - (fs.writeFile as Mock).mockResolvedValue(undefined); // Mock for the minimal report write - - await reportError(error, baseMessage, context, type); + await reportError(error, baseMessage, context, type, testDir); expect(consoleErrorSpy).toHaveBeenCalledWith( `${baseMessage} Could not stringify report content (likely due to context):`, @@ -178,15 +157,14 @@ describe('reportError', () => { expect(consoleErrorSpy).toHaveBeenCalledWith( 'Original context could not be stringified or included in report.', ); - // Check that it attempts to write a minimal report - expect(fs.writeFile).toHaveBeenCalledWith( - expectedMinimalReportPath, - originalJsonStringify( - { error: { message: error.message, stack: error.stack } }, - null, - 2, - ), - ); + + // Check that it writes a minimal report + const reportContent = await fs.readFile(expectedMinimalReportPath, 'utf-8'); + const parsedReport = JSON.parse(reportContent); + expect(parsedReport).toEqual({ + error: { message: error.message, stack: error.stack }, + }); + expect(consoleErrorSpy).toHaveBeenCalledWith( `${baseMessage} Partial report (excluding context) available at: ${expectedMinimalReportPath}`, ); @@ -199,19 +177,15 @@ describe('reportError', () => { const type = 'general'; const expectedReportPath = getExpectedReportPath(type); - (fs.writeFile as Mock).mockResolvedValue(undefined); - await reportError(error, baseMessage, undefined, type); + await reportError(error, baseMessage, undefined, type, testDir); + + const reportContent = await fs.readFile(expectedReportPath, 'utf-8'); + const parsedReport = JSON.parse(reportContent); + + expect(parsedReport).toEqual({ + error: { message: 'Error without context', stack: 'No context stack' }, + }); - expect(fs.writeFile).toHaveBeenCalledWith( - expectedReportPath, - JSON.stringify( - { - error: { message: 'Error without context', stack: error.stack }, - }, - null, - 2, - ), - ); expect(consoleErrorSpy).toHaveBeenCalledWith( `${baseMessage} Full report available at: ${expectedReportPath}`, ); diff --git a/packages/core/src/utils/errorReporting.ts b/packages/core/src/utils/errorReporting.ts index 41ce34685..e7aa34691 100644 --- a/packages/core/src/utils/errorReporting.ts +++ b/packages/core/src/utils/errorReporting.ts @@ -27,10 +27,11 @@ export async function reportError( baseMessage: string, context?: Content[] | Record | unknown[], type = 'general', + reportingDir = os.tmpdir(), // for testing ): Promise { const timestamp = new Date().toISOString().replace(/[:.]/g, '-'); const reportFileName = `gemini-client-error-${type}-${timestamp}.json`; - const reportPath = path.join(os.tmpdir(), reportFileName); + const reportPath = path.join(reportingDir, reportFileName); let errorToReport: { message: string; stack?: string }; if (error instanceof Error) { diff --git a/packages/core/src/utils/errors.ts b/packages/core/src/utils/errors.ts index 4787c4397..a57186b28 100644 --- a/packages/core/src/utils/errors.ts +++ b/packages/core/src/utils/errors.ts @@ -4,7 +4,11 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { GaxiosError } from 'gaxios'; +interface GaxiosError { + response?: { + data?: unknown; + }; +} export function isNodeError(error: unknown): error is NodeJS.ErrnoException { return error instanceof Error && 'code' in error; @@ -33,8 +37,9 @@ interface ResponseData { } export function toFriendlyError(error: unknown): unknown { - if (error instanceof GaxiosError) { - const data = parseResponseData(error); + if (error && typeof error === 'object' && 'response' in error) { + const gaxiosError = error as GaxiosError; + const data = parseResponseData(gaxiosError); if (data.error && data.error.message && data.error.code) { switch (data.error.code) { case 400: @@ -58,5 +63,5 @@ function parseResponseData(error: GaxiosError): ResponseData { if (typeof error.response?.data === 'string') { return JSON.parse(error.response?.data) as ResponseData; } - return typeof error.response?.data as ResponseData; + return error.response?.data as ResponseData; } diff --git a/packages/core/src/utils/fileUtils.test.ts b/packages/core/src/utils/fileUtils.test.ts index 78a5ab4c4..b8e75561d 100644 --- a/packages/core/src/utils/fileUtils.test.ts +++ b/packages/core/src/utils/fileUtils.test.ts @@ -42,7 +42,7 @@ describe('fileUtils', () => { let testImageFilePath: string; let testPdfFilePath: string; let testBinaryFilePath: string; - let nonExistentFilePath: string; + let nonexistentFilePath: string; let directoryPath: string; beforeEach(() => { @@ -57,7 +57,7 @@ describe('fileUtils', () => { testImageFilePath = path.join(tempRootDir, 'image.png'); testPdfFilePath = path.join(tempRootDir, 'document.pdf'); testBinaryFilePath = path.join(tempRootDir, 'app.exe'); - nonExistentFilePath = path.join(tempRootDir, 'notfound.txt'); + nonexistentFilePath = path.join(tempRootDir, 'nonexistent.txt'); directoryPath = path.join(tempRootDir, 'subdir'); actualNodeFs.mkdirSync(directoryPath, { recursive: true }); // Ensure subdir exists @@ -142,41 +142,41 @@ describe('fileUtils', () => { } }); - it('should return false for an empty file', () => { + it('should return false for an empty file', async () => { actualNodeFs.writeFileSync(filePathForBinaryTest, ''); - expect(isBinaryFile(filePathForBinaryTest)).toBe(false); + expect(await isBinaryFile(filePathForBinaryTest)).toBe(false); }); - it('should return false for a typical text file', () => { + it('should return false for a typical text file', async () => { actualNodeFs.writeFileSync( filePathForBinaryTest, 'Hello, world!\nThis is a test file with normal text content.', ); - expect(isBinaryFile(filePathForBinaryTest)).toBe(false); + expect(await isBinaryFile(filePathForBinaryTest)).toBe(false); }); - it('should return true for a file with many null bytes', () => { + it('should return true for a file with many null bytes', async () => { const binaryContent = Buffer.from([ 0x48, 0x65, 0x00, 0x6c, 0x6f, 0x00, 0x00, 0x00, 0x00, 0x00, ]); // "He\0llo\0\0\0\0\0" actualNodeFs.writeFileSync(filePathForBinaryTest, binaryContent); - expect(isBinaryFile(filePathForBinaryTest)).toBe(true); + expect(await isBinaryFile(filePathForBinaryTest)).toBe(true); }); - it('should return true for a file with high percentage of non-printable ASCII', () => { + it('should return true for a file with high percentage of non-printable ASCII', async () => { const binaryContent = Buffer.from([ 0x41, 0x42, 0x01, 0x02, 0x03, 0x04, 0x05, 0x43, 0x44, 0x06, ]); // AB\x01\x02\x03\x04\x05CD\x06 actualNodeFs.writeFileSync(filePathForBinaryTest, binaryContent); - expect(isBinaryFile(filePathForBinaryTest)).toBe(true); + expect(await isBinaryFile(filePathForBinaryTest)).toBe(true); }); - it('should return false if file access fails (e.g., ENOENT)', () => { + it('should return false if file access fails (e.g., ENOENT)', async () => { // Ensure the file does not exist if (actualNodeFs.existsSync(filePathForBinaryTest)) { actualNodeFs.unlinkSync(filePathForBinaryTest); } - expect(isBinaryFile(filePathForBinaryTest)).toBe(false); + expect(await isBinaryFile(filePathForBinaryTest)).toBe(false); }); }); @@ -196,64 +196,64 @@ describe('fileUtils', () => { vi.restoreAllMocks(); // Restore spies on actualNodeFs }); - it('should detect typescript type by extension (ts)', () => { - expect(detectFileType('file.ts')).toBe('text'); - expect(detectFileType('file.test.ts')).toBe('text'); + it('should detect typescript type by extension (ts)', async () => { + expect(await detectFileType('file.ts')).toBe('text'); + expect(await detectFileType('file.test.ts')).toBe('text'); }); - it('should detect image type by extension (png)', () => { + it('should detect image type by extension (png)', async () => { mockMimeLookup.mockReturnValueOnce('image/png'); - expect(detectFileType('file.png')).toBe('image'); + expect(await detectFileType('file.png')).toBe('image'); }); - it('should detect image type by extension (jpeg)', () => { + it('should detect image type by extension (jpeg)', async () => { mockMimeLookup.mockReturnValueOnce('image/jpeg'); - expect(detectFileType('file.jpg')).toBe('image'); + expect(await detectFileType('file.jpg')).toBe('image'); }); - it('should detect svg type by extension', () => { - expect(detectFileType('image.svg')).toBe('svg'); - expect(detectFileType('image.icon.svg')).toBe('svg'); + it('should detect svg type by extension', async () => { + expect(await detectFileType('image.svg')).toBe('svg'); + expect(await detectFileType('image.icon.svg')).toBe('svg'); }); - it('should detect pdf type by extension', () => { + it('should detect pdf type by extension', async () => { mockMimeLookup.mockReturnValueOnce('application/pdf'); - expect(detectFileType('file.pdf')).toBe('pdf'); + expect(await detectFileType('file.pdf')).toBe('pdf'); }); - it('should detect audio type by extension', () => { + it('should detect audio type by extension', async () => { mockMimeLookup.mockReturnValueOnce('audio/mpeg'); - expect(detectFileType('song.mp3')).toBe('audio'); + expect(await detectFileType('song.mp3')).toBe('audio'); }); - it('should detect video type by extension', () => { + it('should detect video type by extension', async () => { mockMimeLookup.mockReturnValueOnce('video/mp4'); - expect(detectFileType('movie.mp4')).toBe('video'); + expect(await detectFileType('movie.mp4')).toBe('video'); }); - it('should detect known binary extensions as binary (e.g. .zip)', () => { + it('should detect known binary extensions as binary (e.g. .zip)', async () => { mockMimeLookup.mockReturnValueOnce('application/zip'); - expect(detectFileType('archive.zip')).toBe('binary'); + expect(await detectFileType('archive.zip')).toBe('binary'); }); - it('should detect known binary extensions as binary (e.g. .exe)', () => { + it('should detect known binary extensions as binary (e.g. .exe)', async () => { mockMimeLookup.mockReturnValueOnce('application/octet-stream'); // Common for .exe - expect(detectFileType('app.exe')).toBe('binary'); + expect(await detectFileType('app.exe')).toBe('binary'); }); - it('should use isBinaryFile for unknown extensions and detect as binary', () => { + it('should use isBinaryFile for unknown extensions and detect as binary', async () => { mockMimeLookup.mockReturnValueOnce(false); // Unknown mime type // Create a file that isBinaryFile will identify as binary const binaryContent = Buffer.from([ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, ]); actualNodeFs.writeFileSync(filePathForDetectTest, binaryContent); - expect(detectFileType(filePathForDetectTest)).toBe('binary'); + expect(await detectFileType(filePathForDetectTest)).toBe('binary'); }); - it('should default to text if mime type is unknown and content is not binary', () => { + it('should default to text if mime type is unknown and content is not binary', async () => { mockMimeLookup.mockReturnValueOnce(false); // Unknown mime type // filePathForDetectTest is already a text file by default from beforeEach - expect(detectFileType(filePathForDetectTest)).toBe('text'); + expect(await detectFileType(filePathForDetectTest)).toBe('text'); }); }); @@ -284,7 +284,7 @@ describe('fileUtils', () => { it('should handle file not found', async () => { const result = await processSingleFileContent( - nonExistentFilePath, + nonexistentFilePath, tempRootDir, ); expect(result.error).toContain('File not found'); diff --git a/packages/core/src/utils/fileUtils.ts b/packages/core/src/utils/fileUtils.ts index 33eda6ab8..6b5ce42c1 100644 --- a/packages/core/src/utils/fileUtils.ts +++ b/packages/core/src/utils/fileUtils.ts @@ -4,8 +4,8 @@ * SPDX-License-Identifier: Apache-2.0 */ -import fs from 'fs'; -import path from 'path'; +import fs from 'node:fs'; +import path from 'node:path'; import { PartUnion } from '@google/genai'; import mime from 'mime-types'; @@ -56,22 +56,24 @@ export function isWithinRoot( /** * Determines if a file is likely binary based on content sampling. * @param filePath Path to the file. - * @returns True if the file appears to be binary. + * @returns Promise that resolves to true if the file appears to be binary. */ -export function isBinaryFile(filePath: string): boolean { +export async function isBinaryFile(filePath: string): Promise { + let fileHandle: fs.promises.FileHandle | undefined; try { - const fd = fs.openSync(filePath, 'r'); + fileHandle = await fs.promises.open(filePath, 'r'); + // Read up to 4KB or file size, whichever is smaller - const fileSize = fs.fstatSync(fd).size; + const stats = await fileHandle.stat(); + const fileSize = stats.size; if (fileSize === 0) { // Empty file is not considered binary for content checking - fs.closeSync(fd); return false; } const bufferSize = Math.min(4096, fileSize); const buffer = Buffer.alloc(bufferSize); - const bytesRead = fs.readSync(fd, buffer, 0, buffer.length, 0); - fs.closeSync(fd); + const result = await fileHandle.read(buffer, 0, buffer.length, 0); + const bytesRead = result.bytesRead; if (bytesRead === 0) return false; @@ -84,21 +86,40 @@ export function isBinaryFile(filePath: string): boolean { } // If >30% non-printable characters, consider it binary return nonPrintableCount / bytesRead > 0.3; - } catch { + } catch (error) { + // Log error for debugging while maintaining existing behavior + console.warn( + `Failed to check if file is binary: ${filePath}`, + error instanceof Error ? error.message : String(error), + ); // If any error occurs (e.g. file not found, permissions), // treat as not binary here; let higher-level functions handle existence/access errors. return false; + } finally { + // Safely close the file handle if it was successfully opened + if (fileHandle) { + try { + await fileHandle.close(); + } catch (closeError) { + // Log close errors for debugging while continuing with cleanup + console.warn( + `Failed to close file handle for: ${filePath}`, + closeError instanceof Error ? closeError.message : String(closeError), + ); + // The important thing is that we attempted to clean up + } + } } } /** * Detects the type of file based on extension and content. * @param filePath Path to the file. - * @returns 'text', 'image', 'pdf', 'audio', 'video', or 'binary'. + * @returns Promise that resolves to 'text', 'image', 'pdf', 'audio', 'video', 'binary' or 'svg'. */ -export function detectFileType( +export async function detectFileType( filePath: string, -): 'text' | 'image' | 'pdf' | 'audio' | 'video' | 'binary' | 'svg' { +): Promise<'text' | 'image' | 'pdf' | 'audio' | 'video' | 'binary' | 'svg'> { const ext = path.extname(filePath).toLowerCase(); // The mimetype for "ts" is MPEG transport stream (a video format) but we want @@ -164,9 +185,9 @@ export function detectFileType( return 'binary'; } - // Fallback to content-based check if mime type wasn't conclusive for image/pdf + // Fall back to content-based check if mime type wasn't conclusive for image/pdf // and it's not a known binary extension. - if (isBinaryFile(filePath)) { + if (await isBinaryFile(filePath)) { return 'binary'; } @@ -227,7 +248,7 @@ export async function processSingleFileContent( ); } - const fileType = detectFileType(filePath); + const fileType = await detectFileType(filePath); const relativePathForDisplay = path .relative(rootDirectory, filePath) .replace(/\\/g, '/'); diff --git a/packages/core/src/utils/formatters.ts b/packages/core/src/utils/formatters.ts new file mode 100644 index 000000000..ab02160e7 --- /dev/null +++ b/packages/core/src/utils/formatters.ts @@ -0,0 +1,16 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +export const formatMemoryUsage = (bytes: number): string => { + const gb = bytes / (1024 * 1024 * 1024); + if (bytes < 1024 * 1024) { + return `${(bytes / 1024).toFixed(1)} KB`; + } + if (bytes < 1024 * 1024 * 1024) { + return `${(bytes / (1024 * 1024)).toFixed(1)} MB`; + } + return `${gb.toFixed(2)} GB`; +}; diff --git a/packages/core/src/utils/getFolderStructure.test.ts b/packages/core/src/utils/getFolderStructure.test.ts index 6bad81196..f7b67ae4a 100644 --- a/packages/core/src/utils/getFolderStructure.test.ts +++ b/packages/core/src/utils/getFolderStructure.test.ts @@ -4,341 +4,337 @@ * SPDX-License-Identifier: Apache-2.0 */ -/* eslint-disable @typescript-eslint/no-explicit-any */ -import { describe, it, expect, vi, beforeEach, afterEach, Mock } from 'vitest'; +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; import fsPromises from 'fs/promises'; -import * as fs from 'fs'; -import { Dirent as FSDirent } from 'fs'; import * as nodePath from 'path'; +import * as os from 'os'; import { getFolderStructure } from './getFolderStructure.js'; -import * as gitUtils from './gitUtils.js'; import { FileDiscoveryService } from '../services/fileDiscoveryService.js'; - -vi.mock('path', async (importOriginal) => { - const original = (await importOriginal()) as typeof nodePath; - return { - ...original, - resolve: vi.fn((str) => str), - // Other path functions (basename, join, normalize, etc.) will use original implementation - }; -}); - -vi.mock('fs/promises'); -vi.mock('fs'); -vi.mock('./gitUtils.js'); - -// Import 'path' again here, it will be the mocked version import * as path from 'path'; -// Helper to create Dirent-like objects for mocking fs.readdir -const createDirent = (name: string, type: 'file' | 'dir'): FSDirent => ({ - name, - isFile: () => type === 'file', - isDirectory: () => type === 'dir', - isBlockDevice: () => false, - isCharacterDevice: () => false, - isSymbolicLink: () => false, - isFIFO: () => false, - isSocket: () => false, - path: '', - parentPath: '', -}); - describe('getFolderStructure', () => { - beforeEach(() => { - vi.resetAllMocks(); + let testRootDir: string; - // path.resolve is now a vi.fn() due to the top-level vi.mock. - // We ensure its implementation is set for each test (or rely on the one from vi.mock). - // vi.resetAllMocks() clears call history but not the implementation set by vi.fn() in vi.mock. - // If we needed to change it per test, we would do it here: - (path.resolve as Mock).mockImplementation((str: string) => str); + async function createEmptyDir(...pathSegments: string[]) { + const fullPath = path.join(testRootDir, ...pathSegments); + await fsPromises.mkdir(fullPath, { recursive: true }); + } - // Re-apply/define the mock implementation for fsPromises.readdir for each test - (fsPromises.readdir as Mock).mockImplementation( - async (dirPath: string | Buffer | URL) => { - // path.normalize here will use the mocked path module. - // Since normalize is spread from original, it should be the real one. - const normalizedPath = path.normalize(dirPath.toString()); - if (mockFsStructure[normalizedPath]) { - return mockFsStructure[normalizedPath]; - } - throw Object.assign( - new Error( - `ENOENT: no such file or directory, scandir '${normalizedPath}'`, - ), - { code: 'ENOENT' }, - ); - }, + async function createTestFile(...pathSegments: string[]) { + const fullPath = path.join(testRootDir, ...pathSegments); + await fsPromises.mkdir(path.dirname(fullPath), { recursive: true }); + await fsPromises.writeFile(fullPath, ''); + return fullPath; + } + + beforeEach(async () => { + testRootDir = await fsPromises.mkdtemp( + path.join(os.tmpdir(), 'folder-structure-test-'), ); }); - afterEach(() => { - vi.restoreAllMocks(); // Restores spies (like fsPromises.readdir) and resets vi.fn mocks (like path.resolve) + afterEach(async () => { + await fsPromises.rm(testRootDir, { recursive: true, force: true }); }); - const mockFsStructure: Record = { - '/testroot': [ - createDirent('file1.txt', 'file'), - createDirent('subfolderA', 'dir'), - createDirent('emptyFolder', 'dir'), - createDirent('.hiddenfile', 'file'), - createDirent('node_modules', 'dir'), - ], - '/testroot/subfolderA': [ - createDirent('fileA1.ts', 'file'), - createDirent('fileA2.js', 'file'), - createDirent('subfolderB', 'dir'), - ], - '/testroot/subfolderA/subfolderB': [createDirent('fileB1.md', 'file')], - '/testroot/emptyFolder': [], - '/testroot/node_modules': [createDirent('somepackage', 'dir')], - '/testroot/manyFilesFolder': Array.from({ length: 10 }, (_, i) => - createDirent(`file-${i}.txt`, 'file'), - ), - '/testroot/manyFolders': Array.from({ length: 5 }, (_, i) => - createDirent(`folder-${i}`, 'dir'), - ), - ...Array.from({ length: 5 }, (_, i) => ({ - [`/testroot/manyFolders/folder-${i}`]: [ - createDirent('child.txt', 'file'), - ], - })).reduce((acc, val) => ({ ...acc, ...val }), {}), - '/testroot/deepFolders': [createDirent('level1', 'dir')], - '/testroot/deepFolders/level1': [createDirent('level2', 'dir')], - '/testroot/deepFolders/level1/level2': [createDirent('level3', 'dir')], - '/testroot/deepFolders/level1/level2/level3': [ - createDirent('file.txt', 'file'), - ], - }; - it('should return basic folder structure', async () => { - const structure = await getFolderStructure('/testroot/subfolderA'); - const expected = ` -Showing up to 20 items (files + folders). + await createTestFile('fileA1.ts'); + await createTestFile('fileA2.js'); + await createTestFile('subfolderB', 'fileB1.md'); -/testroot/subfolderA/ + const structure = await getFolderStructure(testRootDir); + expect(structure.trim()).toBe( + ` +Showing up to 200 items (files + folders). + +${testRootDir}${path.sep} ├───fileA1.ts ├───fileA2.js -└───subfolderB/ +└───subfolderB${path.sep} └───fileB1.md -`.trim(); - expect(structure.trim()).toBe(expected); +`.trim(), + ); }); it('should handle an empty folder', async () => { - const structure = await getFolderStructure('/testroot/emptyFolder'); - const expected = ` -Showing up to 20 items (files + folders). + const structure = await getFolderStructure(testRootDir); + expect(structure.trim()).toBe( + ` +Showing up to 200 items (files + folders). -/testroot/emptyFolder/ -`.trim(); - expect(structure.trim()).toBe(expected.trim()); +${testRootDir}${path.sep} +` + .trim() + .trim(), + ); }); it('should ignore folders specified in ignoredFolders (default)', async () => { - const structure = await getFolderStructure('/testroot'); - const expected = ` -Showing up to 20 items (files + folders). Folders or files indicated with ... contain more items not shown, were ignored, or the display limit (20 items) was reached. + await createTestFile('.hiddenfile'); + await createTestFile('file1.txt'); + await createEmptyDir('emptyFolder'); + await createTestFile('node_modules', 'somepackage', 'index.js'); + await createTestFile('subfolderA', 'fileA1.ts'); + await createTestFile('subfolderA', 'fileA2.js'); + await createTestFile('subfolderA', 'subfolderB', 'fileB1.md'); -/testroot/ + const structure = await getFolderStructure(testRootDir); + expect(structure.trim()).toBe( + ` +Showing up to 200 items (files + folders). Folders or files indicated with ... contain more items not shown, were ignored, or the display limit (200 items) was reached. + +${testRootDir}${path.sep} ├───.hiddenfile ├───file1.txt -├───emptyFolder/ -├───node_modules/... -└───subfolderA/ +├───emptyFolder${path.sep} +├───node_modules${path.sep}... +└───subfolderA${path.sep} ├───fileA1.ts ├───fileA2.js - └───subfolderB/ + └───subfolderB${path.sep} └───fileB1.md -`.trim(); - expect(structure.trim()).toBe(expected); +`.trim(), + ); }); it('should ignore folders specified in custom ignoredFolders', async () => { - const structure = await getFolderStructure('/testroot', { + await createTestFile('.hiddenfile'); + await createTestFile('file1.txt'); + await createEmptyDir('emptyFolder'); + await createTestFile('node_modules', 'somepackage', 'index.js'); + await createTestFile('subfolderA', 'fileA1.ts'); + + const structure = await getFolderStructure(testRootDir, { ignoredFolders: new Set(['subfolderA', 'node_modules']), }); const expected = ` -Showing up to 20 items (files + folders). Folders or files indicated with ... contain more items not shown, were ignored, or the display limit (20 items) was reached. +Showing up to 200 items (files + folders). Folders or files indicated with ... contain more items not shown, were ignored, or the display limit (200 items) was reached. -/testroot/ +${testRootDir}${path.sep} ├───.hiddenfile ├───file1.txt -├───emptyFolder/ -├───node_modules/... -└───subfolderA/... +├───emptyFolder${path.sep} +├───node_modules${path.sep}... +└───subfolderA${path.sep}... `.trim(); expect(structure.trim()).toBe(expected); }); it('should filter files by fileIncludePattern', async () => { - const structure = await getFolderStructure('/testroot/subfolderA', { + await createTestFile('fileA1.ts'); + await createTestFile('fileA2.js'); + await createTestFile('subfolderB', 'fileB1.md'); + + const structure = await getFolderStructure(testRootDir, { fileIncludePattern: /\.ts$/, }); const expected = ` -Showing up to 20 items (files + folders). +Showing up to 200 items (files + folders). -/testroot/subfolderA/ +${testRootDir}${path.sep} ├───fileA1.ts -└───subfolderB/ +└───subfolderB${path.sep} `.trim(); expect(structure.trim()).toBe(expected); }); it('should handle maxItems truncation for files within a folder', async () => { - const structure = await getFolderStructure('/testroot/subfolderA', { + await createTestFile('fileA1.ts'); + await createTestFile('fileA2.js'); + await createTestFile('subfolderB', 'fileB1.md'); + + const structure = await getFolderStructure(testRootDir, { maxItems: 3, }); const expected = ` Showing up to 3 items (files + folders). -/testroot/subfolderA/ +${testRootDir}${path.sep} ├───fileA1.ts ├───fileA2.js -└───subfolderB/ +└───subfolderB${path.sep} `.trim(); expect(structure.trim()).toBe(expected); }); it('should handle maxItems truncation for subfolders', async () => { - const structure = await getFolderStructure('/testroot/manyFolders', { + for (let i = 0; i < 5; i++) { + await createTestFile(`folder-${i}`, 'child.txt'); + } + + const structure = await getFolderStructure(testRootDir, { maxItems: 4, }); const expectedRevised = ` Showing up to 4 items (files + folders). Folders or files indicated with ... contain more items not shown, were ignored, or the display limit (4 items) was reached. -/testroot/manyFolders/ -├───folder-0/ -├───folder-1/ -├───folder-2/ -├───folder-3/ +${testRootDir}${path.sep} +├───folder-0${path.sep} +├───folder-1${path.sep} +├───folder-2${path.sep} +├───folder-3${path.sep} └───... `.trim(); expect(structure.trim()).toBe(expectedRevised); }); it('should handle maxItems that only allows the root folder itself', async () => { - const structure = await getFolderStructure('/testroot/subfolderA', { + await createTestFile('fileA1.ts'); + await createTestFile('fileA2.ts'); + await createTestFile('subfolderB', 'fileB1.ts'); + + const structure = await getFolderStructure(testRootDir, { maxItems: 1, }); - const expectedRevisedMax1 = ` + const expected = ` Showing up to 1 items (files + folders). Folders or files indicated with ... contain more items not shown, were ignored, or the display limit (1 items) was reached. -/testroot/subfolderA/ +${testRootDir}${path.sep} ├───fileA1.ts ├───... └───... `.trim(); - expect(structure.trim()).toBe(expectedRevisedMax1); + expect(structure.trim()).toBe(expected); }); it('should handle non-existent directory', async () => { - // Temporarily make fsPromises.readdir throw ENOENT for this specific path - const originalReaddir = fsPromises.readdir; - (fsPromises.readdir as Mock).mockImplementation( - async (p: string | Buffer | URL) => { - if (p === '/nonexistent') { - throw Object.assign(new Error('ENOENT'), { code: 'ENOENT' }); - } - return originalReaddir(p); - }, - ); - - const structure = await getFolderStructure('/nonexistent'); + const nonExistentPath = path.join(testRootDir, 'non-existent'); + const structure = await getFolderStructure(nonExistentPath); expect(structure).toContain( - 'Error: Could not read directory "/nonexistent"', + `Error: Could not read directory "${nonExistentPath}". Check path and permissions.`, ); }); it('should handle deep folder structure within limits', async () => { - const structure = await getFolderStructure('/testroot/deepFolders', { + await createTestFile('level1', 'level2', 'level3', 'file.txt'); + + const structure = await getFolderStructure(testRootDir, { maxItems: 10, }); const expected = ` Showing up to 10 items (files + folders). -/testroot/deepFolders/ -└───level1/ - └───level2/ - └───level3/ +${testRootDir}${path.sep} +└───level1${path.sep} + └───level2${path.sep} + └───level3${path.sep} └───file.txt `.trim(); expect(structure.trim()).toBe(expected); }); it('should truncate deep folder structure if maxItems is small', async () => { - const structure = await getFolderStructure('/testroot/deepFolders', { + await createTestFile('level1', 'level2', 'level3', 'file.txt'); + + const structure = await getFolderStructure(testRootDir, { maxItems: 3, }); const expected = ` Showing up to 3 items (files + folders). -/testroot/deepFolders/ -└───level1/ - └───level2/ - └───level3/ +${testRootDir}${path.sep} +└───level1${path.sep} + └───level2${path.sep} + └───level3${path.sep} `.trim(); expect(structure.trim()).toBe(expected); }); -}); -describe('getFolderStructure gitignore', () => { - beforeEach(() => { - vi.resetAllMocks(); - (path.resolve as Mock).mockImplementation((str: string) => str); - - (fsPromises.readdir as Mock).mockImplementation(async (p) => { - const path = p.toString(); - if (path === '/test/project') { - return [ - createDirent('file1.txt', 'file'), - createDirent('node_modules', 'dir'), - createDirent('ignored.txt', 'file'), - createDirent('.qwen', 'dir'), - ] as any; - } - if (path === '/test/project/node_modules') { - return [createDirent('some-package', 'dir')] as any; - } - if (path === '/test/project/.gemini') { - return [ - createDirent('config.yaml', 'file'), - createDirent('logs.json', 'file'), - ] as any; - } - return []; + describe('with gitignore', () => { + beforeEach(async () => { + await fsPromises.mkdir(path.join(testRootDir, '.git'), { + recursive: true, + }); }); - (fs.readFileSync as Mock).mockImplementation((p) => { - const path = p.toString(); - if (path === '/test/project/.gitignore') { - return 'ignored.txt\nnode_modules/\n.qwen/\n!/.qwen/config.yaml'; - } - return ''; + it('should ignore files and folders specified in .gitignore', async () => { + await fsPromises.writeFile( + nodePath.join(testRootDir, '.gitignore'), + 'ignored.txt\nnode_modules/\n.gemini/*\n!/.gemini/config.yaml', + ); + await createTestFile('file1.txt'); + await createTestFile('node_modules', 'some-package', 'index.js'); + await createTestFile('ignored.txt'); + await createTestFile('.gemini', 'config.yaml'); + await createTestFile('.gemini', 'logs.json'); + + const fileService = new FileDiscoveryService(testRootDir); + const structure = await getFolderStructure(testRootDir, { + fileService, + }); + + expect(structure).not.toContain('ignored.txt'); + expect(structure).toContain(`node_modules${path.sep}...`); + expect(structure).not.toContain('logs.json'); + expect(structure).toContain('config.yaml'); + expect(structure).toContain('file1.txt'); }); - vi.mocked(gitUtils.isGitRepository).mockReturnValue(true); + it('should not ignore files if respectGitIgnore is false', async () => { + await fsPromises.writeFile( + nodePath.join(testRootDir, '.gitignore'), + 'ignored.txt', + ); + await createTestFile('file1.txt'); + await createTestFile('ignored.txt'); + + const fileService = new FileDiscoveryService(testRootDir); + const structure = await getFolderStructure(testRootDir, { + fileService, + fileFilteringOptions: { + respectGeminiIgnore: false, + respectGitIgnore: false, + }, + }); + + expect(structure).toContain('ignored.txt'); + expect(structure).toContain('file1.txt'); + }); }); - it('should ignore files and folders specified in .gitignore', async () => { - const fileService = new FileDiscoveryService('/test/project'); - const structure = await getFolderStructure('/test/project', { - fileService, - }); - expect(structure).not.toContain('ignored.txt'); - expect(structure).toContain('node_modules/...'); - expect(structure).not.toContain('logs.json'); - }); + describe('with geminiignore', () => { + it('should ignore geminiignore files by default', async () => { + await fsPromises.writeFile( + nodePath.join(testRootDir, '.geminiignore'), + 'ignored.txt\nnode_modules/\n.gemini/\n!/.gemini/config.yaml', + ); + await createTestFile('file1.txt'); + await createTestFile('node_modules', 'some-package', 'index.js'); + await createTestFile('ignored.txt'); + await createTestFile('.gemini', 'config.yaml'); + await createTestFile('.gemini', 'logs.json'); - it('should not ignore files if respectGitIgnore is false', async () => { - const fileService = new FileDiscoveryService('/test/project'); - const structure = await getFolderStructure('/test/project', { - fileService, - respectGitIgnore: false, + const fileService = new FileDiscoveryService(testRootDir); + const structure = await getFolderStructure(testRootDir, { + fileService, + }); + expect(structure).not.toContain('ignored.txt'); + expect(structure).toContain(`node_modules${path.sep}...`); + expect(structure).not.toContain('logs.json'); + }); + + it('should not ignore files if respectGeminiIgnore is false', async () => { + await fsPromises.writeFile( + nodePath.join(testRootDir, '.geminiignore'), + 'ignored.txt\nnode_modules/\n.gemini/\n!/.gemini/config.yaml', + ); + await createTestFile('file1.txt'); + await createTestFile('node_modules', 'some-package', 'index.js'); + await createTestFile('ignored.txt'); + await createTestFile('.gemini', 'config.yaml'); + await createTestFile('.gemini', 'logs.json'); + + const fileService = new FileDiscoveryService(testRootDir); + const structure = await getFolderStructure(testRootDir, { + fileService, + fileFilteringOptions: { + respectGeminiIgnore: false, + respectGitIgnore: true, // Explicitly disable gemini ignore only + }, + }); + expect(structure).toContain('ignored.txt'); + // node_modules is still ignored by default + expect(structure).toContain(`node_modules${path.sep}...`); }); - expect(structure).toContain('ignored.txt'); - // node_modules is still ignored by default - expect(structure).toContain('node_modules/...'); }); }); diff --git a/packages/core/src/utils/getFolderStructure.ts b/packages/core/src/utils/getFolderStructure.ts index 27424bce5..60c539b54 100644 --- a/packages/core/src/utils/getFolderStructure.ts +++ b/packages/core/src/utils/getFolderStructure.ts @@ -9,8 +9,10 @@ import { Dirent } from 'fs'; import * as path from 'path'; import { getErrorMessage, isNodeError } from './errors.js'; import { FileDiscoveryService } from '../services/fileDiscoveryService.js'; +import { FileFilteringOptions } from '../config/config.js'; +import { DEFAULT_FILE_FILTERING_OPTIONS } from '../config/config.js'; -const MAX_ITEMS = 20; +const MAX_ITEMS = 200; const TRUNCATION_INDICATOR = '...'; const DEFAULT_IGNORED_FOLDERS = new Set(['node_modules', '.git', 'dist']); @@ -18,7 +20,7 @@ const DEFAULT_IGNORED_FOLDERS = new Set(['node_modules', '.git', 'dist']); /** Options for customizing folder structure retrieval. */ interface FolderStructureOptions { - /** Maximum number of files and folders combined to display. Defaults to 20. */ + /** Maximum number of files and folders combined to display. Defaults to 200. */ maxItems?: number; /** Set of folder names to ignore completely. Case-sensitive. */ ignoredFolders?: Set; @@ -26,16 +28,16 @@ interface FolderStructureOptions { fileIncludePattern?: RegExp; /** For filtering files. */ fileService?: FileDiscoveryService; - /** Whether to use .gitignore patterns. */ - respectGitIgnore?: boolean; + /** File filtering ignore options. */ + fileFilteringOptions?: FileFilteringOptions; } - // Define a type for the merged options where fileIncludePattern remains optional type MergedFolderStructureOptions = Required< Omit > & { fileIncludePattern?: RegExp; fileService?: FileDiscoveryService; + fileFilteringOptions?: FileFilteringOptions; }; /** Represents the full, unfiltered information about a folder and its contents. */ @@ -126,8 +128,13 @@ async function readFullStructure( } const fileName = entry.name; const filePath = path.join(currentPath, fileName); - if (options.respectGitIgnore && options.fileService) { - if (options.fileService.shouldGitIgnoreFile(filePath)) { + if (options.fileService) { + const shouldIgnore = + (options.fileFilteringOptions.respectGitIgnore && + options.fileService.shouldGitIgnoreFile(filePath)) || + (options.fileFilteringOptions.respectGeminiIgnore && + options.fileService.shouldGeminiIgnoreFile(filePath)); + if (shouldIgnore) { continue; } } @@ -160,14 +167,16 @@ async function readFullStructure( const subFolderName = entry.name; const subFolderPath = path.join(currentPath, subFolderName); - let isIgnoredByGit = false; - if (options.respectGitIgnore && options.fileService) { - if (options.fileService.shouldGitIgnoreFile(subFolderPath)) { - isIgnoredByGit = true; - } + let isIgnored = false; + if (options.fileService) { + isIgnored = + (options.fileFilteringOptions.respectGitIgnore && + options.fileService.shouldGitIgnoreFile(subFolderPath)) || + (options.fileFilteringOptions.respectGeminiIgnore && + options.fileService.shouldGeminiIgnoreFile(subFolderPath)); } - if (options.ignoredFolders.has(subFolderName) || isIgnoredByGit) { + if (options.ignoredFolders.has(subFolderName) || isIgnored) { const ignoredSubFolder: FullFolderInfo = { name: subFolderName, path: subFolderPath, @@ -227,7 +236,7 @@ function formatStructure( // Ignored root nodes ARE printed with a connector. if (!isProcessingRootNode || node.isIgnored) { builder.push( - `${currentIndent}${connector}${node.name}/${node.isIgnored ? TRUNCATION_INDICATOR : ''}`, + `${currentIndent}${connector}${node.name}${path.sep}${node.isIgnored ? TRUNCATION_INDICATOR : ''}`, ); } @@ -295,7 +304,8 @@ export async function getFolderStructure( ignoredFolders: options?.ignoredFolders ?? DEFAULT_IGNORED_FOLDERS, fileIncludePattern: options?.fileIncludePattern, fileService: options?.fileService, - respectGitIgnore: options?.respectGitIgnore ?? true, + fileFilteringOptions: + options?.fileFilteringOptions ?? DEFAULT_FILE_FILTERING_OPTIONS, }; try { @@ -312,34 +322,25 @@ export async function getFolderStructure( formatStructure(structureRoot, '', true, true, structureLines); // 3. Build the final output string - const displayPath = resolvedPath.replace(/\\/g, '/'); - - let disclaimer = ''; - // Check if truncation occurred anywhere or if ignored folders are present. - // A simple check: if any node indicates more files/subfolders, or is ignored. - let truncationOccurred = false; - function checkForTruncation(node: FullFolderInfo) { + function isTruncated(node: FullFolderInfo): boolean { if (node.hasMoreFiles || node.hasMoreSubfolders || node.isIgnored) { - truncationOccurred = true; + return true; } - if (!truncationOccurred) { - for (const sub of node.subFolders) { - checkForTruncation(sub); - if (truncationOccurred) break; + for (const sub of node.subFolders) { + if (isTruncated(sub)) { + return true; } } - } - checkForTruncation(structureRoot); - - if (truncationOccurred) { - disclaimer = `Folders or files indicated with ${TRUNCATION_INDICATOR} contain more items not shown, were ignored, or the display limit (${mergedOptions.maxItems} items) was reached.`; + return false; } - const summary = - `Showing up to ${mergedOptions.maxItems} items (files + folders). ${disclaimer}`.trim(); + let summary = `Showing up to ${mergedOptions.maxItems} items (files + folders).`; - const output = `${summary}\n\n${displayPath}/\n${structureLines.join('\n')}`; - return output; + if (isTruncated(structureRoot)) { + summary += ` Folders or files indicated with ${TRUNCATION_INDICATOR} contain more items not shown, were ignored, or the display limit (${mergedOptions.maxItems} items) was reached.`; + } + + return `${summary}\n\n${resolvedPath}${path.sep}\n${structureLines.join('\n')}`; } catch (error: unknown) { console.error(`Error getting folder structure for ${resolvedPath}:`, error); return `Error processing directory "${resolvedPath}": ${getErrorMessage(error)}`; diff --git a/packages/core/src/utils/gitIgnoreParser.test.ts b/packages/core/src/utils/gitIgnoreParser.test.ts index f58d50be3..b5fe8b2ac 100644 --- a/packages/core/src/utils/gitIgnoreParser.test.ts +++ b/packages/core/src/utils/gitIgnoreParser.test.ts @@ -4,39 +4,43 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest'; +import { describe, it, expect, beforeEach, afterEach } from 'vitest'; import { GitIgnoreParser } from './gitIgnoreParser.js'; -import * as fs from 'fs'; +import * as fs from 'fs/promises'; import * as path from 'path'; -import { isGitRepository } from './gitUtils.js'; - -// Mock fs module -vi.mock('fs'); - -// Mock gitUtils module -vi.mock('./gitUtils.js'); +import * as os from 'os'; describe('GitIgnoreParser', () => { let parser: GitIgnoreParser; - const mockProjectRoot = '/test/project'; + let projectRoot: string; - beforeEach(() => { - parser = new GitIgnoreParser(mockProjectRoot); - // Reset mocks before each test - vi.mocked(fs.readFileSync).mockClear(); - vi.mocked(isGitRepository).mockReturnValue(true); + async function createTestFile(filePath: string, content = '') { + const fullPath = path.join(projectRoot, filePath); + await fs.mkdir(path.dirname(fullPath), { recursive: true }); + await fs.writeFile(fullPath, content); + } + + async function setupGitRepo() { + await fs.mkdir(path.join(projectRoot, '.git'), { recursive: true }); + } + + beforeEach(async () => { + projectRoot = await fs.mkdtemp(path.join(os.tmpdir(), 'gitignore-test-')); + parser = new GitIgnoreParser(projectRoot); }); - afterEach(() => { - vi.restoreAllMocks(); + afterEach(async () => { + await fs.rm(projectRoot, { recursive: true, force: true }); }); describe('initialization', () => { - it('should initialize without errors when no .gitignore exists', () => { + it('should initialize without errors when no .gitignore exists', async () => { + await setupGitRepo(); expect(() => parser.loadGitRepoPatterns()).not.toThrow(); }); - it('should load .gitignore patterns when file exists', () => { + it('should load .gitignore patterns when file exists', async () => { + await setupGitRepo(); const gitignoreContent = ` # Comment node_modules/ @@ -44,7 +48,7 @@ node_modules/ /dist .env `; - vi.mocked(fs.readFileSync).mockReturnValueOnce(gitignoreContent); + await createTestFile('.gitignore', gitignoreContent); parser.loadGitRepoPatterns(); @@ -55,41 +59,35 @@ node_modules/ '/dist', '.env', ]); - expect(parser.isIgnored('node_modules/some-lib')).toBe(true); - expect(parser.isIgnored('src/app.log')).toBe(true); - expect(parser.isIgnored('dist/index.js')).toBe(true); + expect(parser.isIgnored(path.join('node_modules', 'some-lib'))).toBe( + true, + ); + expect(parser.isIgnored(path.join('src', 'app.log'))).toBe(true); + expect(parser.isIgnored(path.join('dist', 'index.js'))).toBe(true); expect(parser.isIgnored('.env')).toBe(true); }); - it('should handle git exclude file', () => { - vi.mocked(fs.readFileSync).mockImplementation((filePath) => { - if ( - filePath === path.join(mockProjectRoot, '.git', 'info', 'exclude') - ) { - return 'temp/\n*.tmp'; - } - throw new Error('ENOENT'); - }); + it('should handle git exclude file', async () => { + await setupGitRepo(); + await createTestFile( + path.join('.git', 'info', 'exclude'), + 'temp/\n*.tmp', + ); parser.loadGitRepoPatterns(); expect(parser.getPatterns()).toEqual(['.git', 'temp/', '*.tmp']); - expect(parser.isIgnored('temp/file.txt')).toBe(true); - expect(parser.isIgnored('src/file.tmp')).toBe(true); + expect(parser.isIgnored(path.join('temp', 'file.txt'))).toBe(true); + expect(parser.isIgnored(path.join('src', 'file.tmp'))).toBe(true); }); - it('should handle custom patterns file name', () => { - vi.mocked(isGitRepository).mockReturnValue(false); - vi.mocked(fs.readFileSync).mockImplementation((filePath) => { - if (filePath === path.join(mockProjectRoot, '.geminiignore')) { - return 'temp/\n*.tmp'; - } - throw new Error('ENOENT'); - }); + it('should handle custom patterns file name', async () => { + // No .git directory for this test + await createTestFile('.geminiignore', 'temp/\n*.tmp'); parser.loadPatterns('.geminiignore'); expect(parser.getPatterns()).toEqual(['temp/', '*.tmp']); - expect(parser.isIgnored('temp/file.txt')).toBe(true); - expect(parser.isIgnored('src/file.tmp')).toBe(true); + expect(parser.isIgnored(path.join('temp', 'file.txt'))).toBe(true); + expect(parser.isIgnored(path.join('src', 'file.tmp'))).toBe(true); }); it('should initialize without errors when no .geminiignore exists', () => { @@ -98,7 +96,8 @@ node_modules/ }); describe('isIgnored', () => { - beforeEach(() => { + beforeEach(async () => { + await setupGitRepo(); const gitignoreContent = ` node_modules/ *.log @@ -107,66 +106,88 @@ node_modules/ src/*.tmp !src/important.tmp `; - vi.mocked(fs.readFileSync).mockReturnValueOnce(gitignoreContent); + await createTestFile('.gitignore', gitignoreContent); parser.loadGitRepoPatterns(); }); it('should always ignore .git directory', () => { expect(parser.isIgnored('.git')).toBe(true); - expect(parser.isIgnored('.git/config')).toBe(true); - expect(parser.isIgnored(path.join(mockProjectRoot, '.git', 'HEAD'))).toBe( + expect(parser.isIgnored(path.join('.git', 'config'))).toBe(true); + expect(parser.isIgnored(path.join(projectRoot, '.git', 'HEAD'))).toBe( true, ); }); it('should ignore files matching patterns', () => { - expect(parser.isIgnored('node_modules/package/index.js')).toBe(true); + expect( + parser.isIgnored(path.join('node_modules', 'package', 'index.js')), + ).toBe(true); expect(parser.isIgnored('app.log')).toBe(true); - expect(parser.isIgnored('logs/app.log')).toBe(true); - expect(parser.isIgnored('dist/bundle.js')).toBe(true); + expect(parser.isIgnored(path.join('logs', 'app.log'))).toBe(true); + expect(parser.isIgnored(path.join('dist', 'bundle.js'))).toBe(true); expect(parser.isIgnored('.env')).toBe(true); - expect(parser.isIgnored('config/.env')).toBe(false); // .env is anchored to root + expect(parser.isIgnored(path.join('config', '.env'))).toBe(false); // .env is anchored to root }); it('should ignore files with path-specific patterns', () => { - expect(parser.isIgnored('src/temp.tmp')).toBe(true); - expect(parser.isIgnored('other/temp.tmp')).toBe(false); + expect(parser.isIgnored(path.join('src', 'temp.tmp'))).toBe(true); + expect(parser.isIgnored(path.join('other', 'temp.tmp'))).toBe(false); }); it('should handle negation patterns', () => { - expect(parser.isIgnored('src/important.tmp')).toBe(false); + expect(parser.isIgnored(path.join('src', 'important.tmp'))).toBe(false); }); it('should not ignore files that do not match patterns', () => { - expect(parser.isIgnored('src/index.ts')).toBe(false); + expect(parser.isIgnored(path.join('src', 'index.ts'))).toBe(false); expect(parser.isIgnored('README.md')).toBe(false); }); it('should handle absolute paths correctly', () => { - const absolutePath = path.join(mockProjectRoot, 'node_modules', 'lib'); + const absolutePath = path.join(projectRoot, 'node_modules', 'lib'); expect(parser.isIgnored(absolutePath)).toBe(true); }); it('should handle paths outside project root by not ignoring them', () => { - const outsidePath = path.resolve(mockProjectRoot, '../other/file.txt'); + const outsidePath = path.resolve(projectRoot, '..', 'other', 'file.txt'); expect(parser.isIgnored(outsidePath)).toBe(false); }); it('should handle relative paths correctly', () => { - expect(parser.isIgnored('node_modules/some-package')).toBe(true); - expect(parser.isIgnored('../some/other/file.txt')).toBe(false); + expect(parser.isIgnored(path.join('node_modules', 'some-package'))).toBe( + true, + ); + expect( + parser.isIgnored(path.join('..', 'some', 'other', 'file.txt')), + ).toBe(false); }); it('should normalize path separators on Windows', () => { - expect(parser.isIgnored('node_modules\\package')).toBe(true); - expect(parser.isIgnored('src\\temp.tmp')).toBe(true); + expect(parser.isIgnored(path.join('node_modules', 'package'))).toBe(true); + expect(parser.isIgnored(path.join('src', 'temp.tmp'))).toBe(true); + }); + + it('should handle root path "/" without throwing error', () => { + expect(() => parser.isIgnored('/')).not.toThrow(); + expect(parser.isIgnored('/')).toBe(false); + }); + + it('should handle absolute-like paths without throwing error', () => { + expect(() => parser.isIgnored('/some/path')).not.toThrow(); + expect(parser.isIgnored('/some/path')).toBe(false); + }); + + it('should handle paths that start with forward slash', () => { + expect(() => parser.isIgnored('/node_modules')).not.toThrow(); + expect(parser.isIgnored('/node_modules')).toBe(false); }); }); describe('getIgnoredPatterns', () => { - it('should return the raw patterns added', () => { + it('should return the raw patterns added', async () => { + await setupGitRepo(); const gitignoreContent = '*.log\n!important.log'; - vi.mocked(fs.readFileSync).mockReturnValueOnce(gitignoreContent); + await createTestFile('.gitignore', gitignoreContent); parser.loadGitRepoPatterns(); expect(parser.getPatterns()).toEqual(['.git', '*.log', '!important.log']); diff --git a/packages/core/src/utils/gitIgnoreParser.ts b/packages/core/src/utils/gitIgnoreParser.ts index 370412722..f2422a929 100644 --- a/packages/core/src/utils/gitIgnoreParser.ts +++ b/packages/core/src/utils/gitIgnoreParser.ts @@ -57,19 +57,15 @@ export class GitIgnoreParser implements GitIgnoreFilter { } isIgnored(filePath: string): boolean { - const relativePath = path.isAbsolute(filePath) - ? path.relative(this.projectRoot, filePath) - : filePath; + const resolved = path.resolve(this.projectRoot, filePath); + const relativePath = path.relative(this.projectRoot, resolved); if (relativePath === '' || relativePath.startsWith('..')) { return false; } - let normalizedPath = relativePath.replace(/\\/g, '/'); - if (normalizedPath.startsWith('./')) { - normalizedPath = normalizedPath.substring(2); - } - + // Even in windows, Ignore expects forward slashes. + const normalizedPath = relativePath.replace(/\\/g, '/'); return this.ig.ignores(normalizedPath); } diff --git a/packages/core/src/utils/memoryDiscovery.test.ts b/packages/core/src/utils/memoryDiscovery.test.ts index 1e38d9720..2fb2fcb15 100644 --- a/packages/core/src/utils/memoryDiscovery.test.ts +++ b/packages/core/src/utils/memoryDiscovery.test.ts @@ -4,604 +4,380 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { vi, describe, it, expect, beforeEach, Mocked } from 'vitest'; +import { vi, describe, it, expect, beforeEach, afterEach } from 'vitest'; import * as fsPromises from 'fs/promises'; -import * as fsSync from 'fs'; -import { Stats, Dirent } from 'fs'; import * as os from 'os'; import * as path from 'path'; import { loadServerHierarchicalMemory } from './memoryDiscovery.js'; import { GEMINI_CONFIG_DIR, setGeminiMdFilename, - getCurrentGeminiMdFilename, DEFAULT_CONTEXT_FILENAME, } from '../tools/memoryTool.js'; import { FileDiscoveryService } from '../services/fileDiscoveryService.js'; -const ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST = DEFAULT_CONTEXT_FILENAME; - -// Mock the entire fs/promises module -vi.mock('fs/promises'); -// Mock the parts of fsSync we might use (like constants or existsSync if needed) -vi.mock('fs', async (importOriginal) => { - const actual = await importOriginal(); +vi.mock('os', async (importOriginal) => { + const actualOs = await importOriginal(); return { - ...actual, // Spread actual to get all exports, including Stats and Dirent if they are classes/constructors - constants: { ...actual.constants }, // Preserve constants + ...actualOs, + homedir: vi.fn(), }; }); -vi.mock('os'); describe('loadServerHierarchicalMemory', () => { - const mockFs = fsPromises as Mocked; - const mockOs = os as Mocked; + let testRootDir: string; + let cwd: string; + let projectRoot: string; + let homedir: string; - const CWD = '/test/project/src'; - const PROJECT_ROOT = '/test/project'; - const USER_HOME = '/test/userhome'; + async function createEmptyDir(fullPath: string) { + await fsPromises.mkdir(fullPath, { recursive: true }); + return fullPath; + } - let GLOBAL_GEMINI_DIR: string; - let GLOBAL_GEMINI_FILE: string; // Defined in beforeEach + async function createTestFile(fullPath: string, fileContents: string) { + await fsPromises.mkdir(path.dirname(fullPath), { recursive: true }); + await fsPromises.writeFile(fullPath, fileContents); + return path.resolve(testRootDir, fullPath); + } + + beforeEach(async () => { + testRootDir = await fsPromises.mkdtemp( + path.join(os.tmpdir(), 'folder-structure-test-'), + ); - const fileService = new FileDiscoveryService(PROJECT_ROOT); - beforeEach(() => { vi.resetAllMocks(); // Set environment variables to indicate test environment process.env.NODE_ENV = 'test'; process.env.VITEST = 'true'; - setGeminiMdFilename(DEFAULT_CONTEXT_FILENAME); // Use defined const - mockOs.homedir.mockReturnValue(USER_HOME); + projectRoot = await createEmptyDir(path.join(testRootDir, 'project')); + cwd = await createEmptyDir(path.join(projectRoot, 'src')); + homedir = await createEmptyDir(path.join(testRootDir, 'userhome')); + vi.mocked(os.homedir).mockReturnValue(homedir); + }); - // Define these here to use potentially reset/updated values from imports - GLOBAL_GEMINI_DIR = path.join(USER_HOME, GEMINI_CONFIG_DIR); - GLOBAL_GEMINI_FILE = path.join( - GLOBAL_GEMINI_DIR, - getCurrentGeminiMdFilename(), // Use current filename - ); - - mockFs.stat.mockRejectedValue(new Error('File not found')); - mockFs.readdir.mockResolvedValue([]); - mockFs.readFile.mockRejectedValue(new Error('File not found')); - mockFs.access.mockRejectedValue(new Error('File not found')); + afterEach(async () => { + // Some tests set this to a different value. + setGeminiMdFilename(DEFAULT_CONTEXT_FILENAME); + // Clean up the temporary directory to prevent resource leaks. + await fsPromises.rm(testRootDir, { recursive: true, force: true }); }); it('should return empty memory and count if no context files are found', async () => { - const { memoryContent, fileCount } = await loadServerHierarchicalMemory( - CWD, + const result = await loadServerHierarchicalMemory( + cwd, false, - fileService, + new FileDiscoveryService(projectRoot), ); - expect(memoryContent).toBe(''); - expect(fileCount).toBe(0); + + expect(result).toEqual({ + memoryContent: '', + fileCount: 0, + }); }); it('should load only the global context file if present and others are not (default filename)', async () => { - const globalDefaultFile = path.join( - GLOBAL_GEMINI_DIR, - DEFAULT_CONTEXT_FILENAME, + const defaultContextFile = await createTestFile( + path.join(homedir, GEMINI_CONFIG_DIR, DEFAULT_CONTEXT_FILENAME), + 'default context content', ); - mockFs.access.mockImplementation(async (p) => { - if (p === globalDefaultFile) { - return undefined; - } - throw new Error('File not found'); - }); - mockFs.readFile.mockImplementation(async (p) => { - if (p === globalDefaultFile) { - return 'Global memory content'; - } - throw new Error('File not found'); - }); - const { memoryContent, fileCount } = await loadServerHierarchicalMemory( - CWD, + const result = await loadServerHierarchicalMemory( + cwd, false, - fileService, + new FileDiscoveryService(projectRoot), ); - expect(memoryContent).toBe( - `--- Context from: ${path.relative(CWD, globalDefaultFile)} ---\nGlobal memory content\n--- End of Context from: ${path.relative(CWD, globalDefaultFile)} ---`, - ); - expect(fileCount).toBe(1); - expect(mockFs.readFile).toHaveBeenCalledWith(globalDefaultFile, 'utf-8'); + expect(result).toEqual({ + memoryContent: `--- Context from: ${path.relative(cwd, defaultContextFile)} --- +default context content +--- End of Context from: ${path.relative(cwd, defaultContextFile)} ---`, + fileCount: 1, + }); }); it('should load only the global custom context file if present and filename is changed', async () => { const customFilename = 'CUSTOM_AGENTS.md'; setGeminiMdFilename(customFilename); - const globalCustomFile = path.join(GLOBAL_GEMINI_DIR, customFilename); - mockFs.access.mockImplementation(async (p) => { - if (p === globalCustomFile) { - return undefined; - } - throw new Error('File not found'); - }); - mockFs.readFile.mockImplementation(async (p) => { - if (p === globalCustomFile) { - return 'Global custom memory'; - } - throw new Error('File not found'); - }); + const customContextFile = await createTestFile( + path.join(homedir, GEMINI_CONFIG_DIR, customFilename), + 'custom context content', + ); - const { memoryContent, fileCount } = await loadServerHierarchicalMemory( - CWD, + const result = await loadServerHierarchicalMemory( + cwd, false, - fileService, + new FileDiscoveryService(projectRoot), ); - expect(memoryContent).toBe( - `--- Context from: ${path.relative(CWD, globalCustomFile)} ---\nGlobal custom memory\n--- End of Context from: ${path.relative(CWD, globalCustomFile)} ---`, - ); - expect(fileCount).toBe(1); - expect(mockFs.readFile).toHaveBeenCalledWith(globalCustomFile, 'utf-8'); + expect(result).toEqual({ + memoryContent: `--- Context from: ${path.relative(cwd, customContextFile)} --- +custom context content +--- End of Context from: ${path.relative(cwd, customContextFile)} ---`, + fileCount: 1, + }); }); it('should load context files by upward traversal with custom filename', async () => { const customFilename = 'PROJECT_CONTEXT.md'; setGeminiMdFilename(customFilename); - const projectRootCustomFile = path.join(PROJECT_ROOT, customFilename); - const srcCustomFile = path.join(CWD, customFilename); - mockFs.stat.mockImplementation(async (p) => { - if (p === path.join(PROJECT_ROOT, '.git')) { - return { isDirectory: () => true } as Stats; - } - throw new Error('File not found'); - }); + const projectContextFile = await createTestFile( + path.join(projectRoot, customFilename), + 'project context content', + ); + const cwdContextFile = await createTestFile( + path.join(cwd, customFilename), + 'cwd context content', + ); - mockFs.access.mockImplementation(async (p) => { - if (p === projectRootCustomFile || p === srcCustomFile) { - return undefined; - } - throw new Error('File not found'); - }); - - mockFs.readFile.mockImplementation(async (p) => { - if (p === projectRootCustomFile) { - return 'Project root custom memory'; - } - if (p === srcCustomFile) { - return 'Src directory custom memory'; - } - throw new Error('File not found'); - }); - - const { memoryContent, fileCount } = await loadServerHierarchicalMemory( - CWD, + const result = await loadServerHierarchicalMemory( + cwd, false, - fileService, + new FileDiscoveryService(projectRoot), ); - const expectedContent = - `--- Context from: ${path.relative(CWD, projectRootCustomFile)} ---\nProject root custom memory\n--- End of Context from: ${path.relative(CWD, projectRootCustomFile)} ---\n\n` + - `--- Context from: ${customFilename} ---\nSrc directory custom memory\n--- End of Context from: ${customFilename} ---`; - expect(memoryContent).toBe(expectedContent); - expect(fileCount).toBe(2); - expect(mockFs.readFile).toHaveBeenCalledWith( - projectRootCustomFile, - 'utf-8', - ); - expect(mockFs.readFile).toHaveBeenCalledWith(srcCustomFile, 'utf-8'); + expect(result).toEqual({ + memoryContent: `--- Context from: ${path.relative(cwd, projectContextFile)} --- +project context content +--- End of Context from: ${path.relative(cwd, projectContextFile)} --- + +--- Context from: ${path.relative(cwd, cwdContextFile)} --- +cwd context content +--- End of Context from: ${path.relative(cwd, cwdContextFile)} ---`, + fileCount: 2, + }); }); it('should load context files by downward traversal with custom filename', async () => { const customFilename = 'LOCAL_CONTEXT.md'; setGeminiMdFilename(customFilename); - const subDir = path.join(CWD, 'subdir'); - const subDirCustomFile = path.join(subDir, customFilename); - const cwdCustomFile = path.join(CWD, customFilename); - mockFs.access.mockImplementation(async (p) => { - if (p === cwdCustomFile || p === subDirCustomFile) return undefined; - throw new Error('File not found'); - }); - - mockFs.readFile.mockImplementation(async (p) => { - if (p === cwdCustomFile) return 'CWD custom memory'; - if (p === subDirCustomFile) return 'Subdir custom memory'; - throw new Error('File not found'); - }); - - mockFs.readdir.mockImplementation((async ( - p: fsSync.PathLike, - ): Promise => { - if (p === CWD) { - return [ - { - name: customFilename, - isFile: () => true, - isDirectory: () => false, - } as Dirent, - { - name: 'subdir', - isFile: () => false, - isDirectory: () => true, - } as Dirent, - ] as Dirent[]; - } - if (p === subDir) { - return [ - { - name: customFilename, - isFile: () => true, - isDirectory: () => false, - } as Dirent, - ] as Dirent[]; - } - return [] as Dirent[]; - }) as unknown as typeof fsPromises.readdir); - - const { memoryContent, fileCount } = await loadServerHierarchicalMemory( - CWD, - false, - fileService, + await createTestFile( + path.join(cwd, 'subdir', customFilename), + 'Subdir custom memory', ); - const expectedContent = - `--- Context from: ${customFilename} ---\nCWD custom memory\n--- End of Context from: ${customFilename} ---\n\n` + - `--- Context from: ${path.join('subdir', customFilename)} ---\nSubdir custom memory\n--- End of Context from: ${path.join('subdir', customFilename)} ---`; + await createTestFile(path.join(cwd, customFilename), 'CWD custom memory'); - expect(memoryContent).toBe(expectedContent); - expect(fileCount).toBe(2); + const result = await loadServerHierarchicalMemory( + cwd, + false, + new FileDiscoveryService(projectRoot), + ); + + expect(result).toEqual({ + memoryContent: `--- Context from: ${customFilename} --- +CWD custom memory +--- End of Context from: ${customFilename} --- + +--- Context from: ${path.join('subdir', customFilename)} --- +Subdir custom memory +--- End of Context from: ${path.join('subdir', customFilename)} ---`, + fileCount: 2, + }); }); it('should load ORIGINAL_GEMINI_MD_FILENAME files by upward traversal from CWD to project root', async () => { - const projectRootGeminiFile = path.join( - PROJECT_ROOT, - ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST, + const projectRootGeminiFile = await createTestFile( + path.join(projectRoot, DEFAULT_CONTEXT_FILENAME), + 'Project root memory', ); - const srcGeminiFile = path.join( - CWD, - ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST, + const srcGeminiFile = await createTestFile( + path.join(cwd, DEFAULT_CONTEXT_FILENAME), + 'Src directory memory', ); - mockFs.stat.mockImplementation(async (p) => { - if (p === path.join(PROJECT_ROOT, '.git')) { - return { isDirectory: () => true } as Stats; - } - throw new Error('File not found'); - }); - - mockFs.access.mockImplementation(async (p) => { - if (p === projectRootGeminiFile || p === srcGeminiFile) { - return undefined; - } - throw new Error('File not found'); - }); - - mockFs.readFile.mockImplementation(async (p) => { - if (p === projectRootGeminiFile) { - return 'Project root memory'; - } - if (p === srcGeminiFile) { - return 'Src directory memory'; - } - throw new Error('File not found'); - }); - - const { memoryContent, fileCount } = await loadServerHierarchicalMemory( - CWD, + const result = await loadServerHierarchicalMemory( + cwd, false, - fileService, + new FileDiscoveryService(projectRoot), ); - const expectedContent = - `--- Context from: ${path.relative(CWD, projectRootGeminiFile)} ---\nProject root memory\n--- End of Context from: ${path.relative(CWD, projectRootGeminiFile)} ---\n\n` + - `--- Context from: ${ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST} ---\nSrc directory memory\n--- End of Context from: ${ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST} ---`; - expect(memoryContent).toBe(expectedContent); - expect(fileCount).toBe(2); - expect(mockFs.readFile).toHaveBeenCalledWith( - projectRootGeminiFile, - 'utf-8', - ); - expect(mockFs.readFile).toHaveBeenCalledWith(srcGeminiFile, 'utf-8'); + expect(result).toEqual({ + memoryContent: `--- Context from: ${path.relative(cwd, projectRootGeminiFile)} --- +Project root memory +--- End of Context from: ${path.relative(cwd, projectRootGeminiFile)} --- + +--- Context from: ${path.relative(cwd, srcGeminiFile)} --- +Src directory memory +--- End of Context from: ${path.relative(cwd, srcGeminiFile)} ---`, + fileCount: 2, + }); }); it('should load ORIGINAL_GEMINI_MD_FILENAME files by downward traversal from CWD', async () => { - const subDir = path.join(CWD, 'subdir'); - const subDirGeminiFile = path.join( - subDir, - ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST, + await createTestFile( + path.join(cwd, 'subdir', DEFAULT_CONTEXT_FILENAME), + 'Subdir memory', ); - const cwdGeminiFile = path.join( - CWD, - ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST, + await createTestFile( + path.join(cwd, DEFAULT_CONTEXT_FILENAME), + 'CWD memory', ); - mockFs.access.mockImplementation(async (p) => { - if (p === cwdGeminiFile || p === subDirGeminiFile) return undefined; - throw new Error('File not found'); - }); - - mockFs.readFile.mockImplementation(async (p) => { - if (p === cwdGeminiFile) return 'CWD memory'; - if (p === subDirGeminiFile) return 'Subdir memory'; - throw new Error('File not found'); - }); - - mockFs.readdir.mockImplementation((async ( - p: fsSync.PathLike, - ): Promise => { - if (p === CWD) { - return [ - { - name: ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST, - isFile: () => true, - isDirectory: () => false, - } as Dirent, - { - name: 'subdir', - isFile: () => false, - isDirectory: () => true, - } as Dirent, - ] as Dirent[]; - } - if (p === subDir) { - return [ - { - name: ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST, - isFile: () => true, - isDirectory: () => false, - } as Dirent, - ] as Dirent[]; - } - return [] as Dirent[]; - }) as unknown as typeof fsPromises.readdir); - - const { memoryContent, fileCount } = await loadServerHierarchicalMemory( - CWD, + const result = await loadServerHierarchicalMemory( + cwd, false, - fileService, + new FileDiscoveryService(projectRoot), ); - const expectedContent = - `--- Context from: ${ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST} ---\nCWD memory\n--- End of Context from: ${ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST} ---\n\n` + - `--- Context from: ${path.join('subdir', ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST)} ---\nSubdir memory\n--- End of Context from: ${path.join('subdir', ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST)} ---`; - expect(memoryContent).toBe(expectedContent); - expect(fileCount).toBe(2); + expect(result).toEqual({ + memoryContent: `--- Context from: ${DEFAULT_CONTEXT_FILENAME} --- +CWD memory +--- End of Context from: ${DEFAULT_CONTEXT_FILENAME} --- + +--- Context from: ${path.join('subdir', DEFAULT_CONTEXT_FILENAME)} --- +Subdir memory +--- End of Context from: ${path.join('subdir', DEFAULT_CONTEXT_FILENAME)} ---`, + fileCount: 2, + }); }); it('should load and correctly order global, upward, and downward ORIGINAL_GEMINI_MD_FILENAME files', async () => { - setGeminiMdFilename(ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST); // Explicitly set for this test - - const globalFileToUse = path.join( - GLOBAL_GEMINI_DIR, - ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST, + const defaultContextFile = await createTestFile( + path.join(homedir, GEMINI_CONFIG_DIR, DEFAULT_CONTEXT_FILENAME), + 'default context content', ); - const projectParentDir = path.dirname(PROJECT_ROOT); - const projectParentGeminiFile = path.join( - projectParentDir, - ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST, + const rootGeminiFile = await createTestFile( + path.join(testRootDir, DEFAULT_CONTEXT_FILENAME), + 'Project parent memory', ); - const projectRootGeminiFile = path.join( - PROJECT_ROOT, - ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST, + const projectRootGeminiFile = await createTestFile( + path.join(projectRoot, DEFAULT_CONTEXT_FILENAME), + 'Project root memory', ); - const cwdGeminiFile = path.join( - CWD, - ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST, + const cwdGeminiFile = await createTestFile( + path.join(cwd, DEFAULT_CONTEXT_FILENAME), + 'CWD memory', ); - const subDir = path.join(CWD, 'sub'); - const subDirGeminiFile = path.join( - subDir, - ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST, + const subDirGeminiFile = await createTestFile( + path.join(cwd, 'sub', DEFAULT_CONTEXT_FILENAME), + 'Subdir memory', ); - mockFs.stat.mockImplementation(async (p) => { - if (p === path.join(PROJECT_ROOT, '.git')) { - return { isDirectory: () => true } as Stats; - } else if (p === path.join(PROJECT_ROOT, '.gemini')) { - return { isDirectory: () => true } as Stats; - } - throw new Error('File not found'); - }); - - mockFs.access.mockImplementation(async (p) => { - if ( - p === globalFileToUse || // Use the dynamically set global file path - p === projectParentGeminiFile || - p === projectRootGeminiFile || - p === cwdGeminiFile || - p === subDirGeminiFile - ) { - return undefined; - } - throw new Error('File not found'); - }); - - mockFs.readFile.mockImplementation(async (p) => { - if (p === globalFileToUse) return 'Global memory'; // Use the dynamically set global file path - if (p === projectParentGeminiFile) return 'Project parent memory'; - if (p === projectRootGeminiFile) return 'Project root memory'; - if (p === cwdGeminiFile) return 'CWD memory'; - if (p === subDirGeminiFile) return 'Subdir memory'; - throw new Error('File not found'); - }); - - mockFs.readdir.mockImplementation((async ( - p: fsSync.PathLike, - ): Promise => { - if (p === CWD) { - return [ - { - name: 'sub', - isFile: () => false, - isDirectory: () => true, - } as Dirent, - ] as Dirent[]; - } - if (p === subDir) { - return [ - { - name: ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST, - isFile: () => true, - isDirectory: () => false, - } as Dirent, - ] as Dirent[]; - } - return [] as Dirent[]; - }) as unknown as typeof fsPromises.readdir); - - const { memoryContent, fileCount } = await loadServerHierarchicalMemory( - CWD, + const result = await loadServerHierarchicalMemory( + cwd, false, - fileService, + new FileDiscoveryService(projectRoot), ); - const relPathGlobal = path.relative(CWD, GLOBAL_GEMINI_FILE); - const relPathProjectParent = path.relative(CWD, projectParentGeminiFile); - const relPathProjectRoot = path.relative(CWD, projectRootGeminiFile); - const relPathCwd = ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST; - const relPathSubDir = path.join( - 'sub', - ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST, - ); + expect(result).toEqual({ + memoryContent: `--- Context from: ${path.relative(cwd, defaultContextFile)} --- +default context content +--- End of Context from: ${path.relative(cwd, defaultContextFile)} --- - const expectedContent = [ - `--- Context from: ${relPathGlobal} ---\nGlobal memory\n--- End of Context from: ${relPathGlobal} ---`, - `--- Context from: ${relPathProjectParent} ---\nProject parent memory\n--- End of Context from: ${relPathProjectParent} ---`, - `--- Context from: ${relPathProjectRoot} ---\nProject root memory\n--- End of Context from: ${relPathProjectRoot} ---`, - `--- Context from: ${relPathCwd} ---\nCWD memory\n--- End of Context from: ${relPathCwd} ---`, - `--- Context from: ${relPathSubDir} ---\nSubdir memory\n--- End of Context from: ${relPathSubDir} ---`, - ].join('\n\n'); +--- Context from: ${path.relative(cwd, rootGeminiFile)} --- +Project parent memory +--- End of Context from: ${path.relative(cwd, rootGeminiFile)} --- - expect(memoryContent).toBe(expectedContent); - expect(fileCount).toBe(5); +--- Context from: ${path.relative(cwd, projectRootGeminiFile)} --- +Project root memory +--- End of Context from: ${path.relative(cwd, projectRootGeminiFile)} --- + +--- Context from: ${path.relative(cwd, cwdGeminiFile)} --- +CWD memory +--- End of Context from: ${path.relative(cwd, cwdGeminiFile)} --- + +--- Context from: ${path.relative(cwd, subDirGeminiFile)} --- +Subdir memory +--- End of Context from: ${path.relative(cwd, subDirGeminiFile)} ---`, + fileCount: 5, + }); }); it('should ignore specified directories during downward scan', async () => { - const ignoredDir = path.join(CWD, 'node_modules'); - const ignoredDirGeminiFile = path.join( - ignoredDir, - ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST, - ); // Corrected - const regularSubDir = path.join(CWD, 'my_code'); - const regularSubDirGeminiFile = path.join( - regularSubDir, - ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST, + await createEmptyDir(path.join(projectRoot, '.git')); + await createTestFile(path.join(projectRoot, '.gitignore'), 'node_modules'); + + await createTestFile( + path.join(cwd, 'node_modules', DEFAULT_CONTEXT_FILENAME), + 'Ignored memory', + ); + const regularSubDirGeminiFile = await createTestFile( + path.join(cwd, 'my_code', DEFAULT_CONTEXT_FILENAME), + 'My code memory', ); - mockFs.access.mockImplementation(async (p) => { - if (p === regularSubDirGeminiFile) return undefined; - if (p === ignoredDirGeminiFile) - throw new Error('Should not access ignored file'); - throw new Error('File not found'); - }); - - mockFs.readFile.mockImplementation(async (p) => { - if (p === regularSubDirGeminiFile) return 'My code memory'; - throw new Error('File not found'); - }); - - mockFs.readdir.mockImplementation((async ( - p: fsSync.PathLike, - ): Promise => { - if (p === CWD) { - return [ - { - name: 'node_modules', - isFile: () => false, - isDirectory: () => true, - } as Dirent, - { - name: 'my_code', - isFile: () => false, - isDirectory: () => true, - } as Dirent, - ] as Dirent[]; - } - if (p === regularSubDir) { - return [ - { - name: ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST, - isFile: () => true, - isDirectory: () => false, - } as Dirent, - ] as Dirent[]; - } - if (p === ignoredDir) { - return [] as Dirent[]; - } - return [] as Dirent[]; - }) as unknown as typeof fsPromises.readdir); - - const { memoryContent, fileCount } = await loadServerHierarchicalMemory( - CWD, + const result = await loadServerHierarchicalMemory( + cwd, false, - fileService, + new FileDiscoveryService(projectRoot), + [], + { + respectGitIgnore: true, + respectGeminiIgnore: true, + }, ); - const expectedContent = `--- Context from: ${path.join('my_code', ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST)} ---\nMy code memory\n--- End of Context from: ${path.join('my_code', ORIGINAL_GEMINI_MD_FILENAME_CONST_FOR_TEST)} ---`; - - expect(memoryContent).toBe(expectedContent); - expect(fileCount).toBe(1); - expect(mockFs.readFile).not.toHaveBeenCalledWith( - ignoredDirGeminiFile, - 'utf-8', - ); + expect(result).toEqual({ + memoryContent: `--- Context from: ${path.relative(cwd, regularSubDirGeminiFile)} --- +My code memory +--- End of Context from: ${path.relative(cwd, regularSubDirGeminiFile)} ---`, + fileCount: 1, + }); }); - it('should respect MAX_DIRECTORIES_TO_SCAN_FOR_MEMORY during downward scan', async () => { + it('should respect the maxDirs parameter during downward scan', async () => { const consoleDebugSpy = vi .spyOn(console, 'debug') .mockImplementation(() => {}); - const dirNames: Dirent[] = []; - for (let i = 0; i < 250; i++) { - dirNames.push({ - name: `deep_dir_${i}`, - isFile: () => false, - isDirectory: () => true, - } as Dirent); + for (let i = 0; i < 100; i++) { + await createEmptyDir(path.join(cwd, `deep_dir_${i}`)); } - mockFs.readdir.mockImplementation((async ( - p: fsSync.PathLike, - ): Promise => { - if (p === CWD) return dirNames; - if (p.toString().startsWith(path.join(CWD, 'deep_dir_'))) - return [] as Dirent[]; - return [] as Dirent[]; - }) as unknown as typeof fsPromises.readdir); - mockFs.access.mockRejectedValue(new Error('not found')); - - await loadServerHierarchicalMemory(CWD, true, fileService); + // Pass the custom limit directly to the function + await loadServerHierarchicalMemory( + cwd, + true, + new FileDiscoveryService(projectRoot), + [], + { + respectGitIgnore: true, + respectGeminiIgnore: true, + }, + 50, // maxDirs + ); expect(consoleDebugSpy).toHaveBeenCalledWith( expect.stringContaining('[DEBUG] [BfsFileSearch]'), - expect.stringContaining('Scanning [200/200]:'), + expect.stringContaining('Scanning [50/50]:'), ); - consoleDebugSpy.mockRestore(); + + vi.mocked(console.debug).mockRestore(); + + const result = await loadServerHierarchicalMemory( + cwd, + false, + new FileDiscoveryService(projectRoot), + ); + + expect(result).toEqual({ + memoryContent: '', + fileCount: 0, + }); }); it('should load extension context file paths', async () => { - const extensionFilePath = '/test/extensions/ext1/GEMINI.md'; - mockFs.access.mockImplementation(async (p) => { - if (p === extensionFilePath) { - return undefined; - } - throw new Error('File not found'); - }); - mockFs.readFile.mockImplementation(async (p) => { - if (p === extensionFilePath) { - return 'Extension memory content'; - } - throw new Error('File not found'); - }); + const extensionFilePath = await createTestFile( + path.join(testRootDir, 'extensions/ext1/GEMINI.md'), + 'Extension memory content', + ); - const { memoryContent, fileCount } = await loadServerHierarchicalMemory( - CWD, + const result = await loadServerHierarchicalMemory( + cwd, false, - fileService, + new FileDiscoveryService(projectRoot), [extensionFilePath], ); - expect(memoryContent).toBe( - `--- Context from: ${path.relative(CWD, extensionFilePath)} ---\nExtension memory content\n--- End of Context from: ${path.relative(CWD, extensionFilePath)} ---`, - ); - expect(fileCount).toBe(1); - expect(mockFs.readFile).toHaveBeenCalledWith(extensionFilePath, 'utf-8'); + expect(result).toEqual({ + memoryContent: `--- Context from: ${path.relative(cwd, extensionFilePath)} --- +Extension memory content +--- End of Context from: ${path.relative(cwd, extensionFilePath)} ---`, + fileCount: 1, + }); }); }); diff --git a/packages/core/src/utils/memoryDiscovery.ts b/packages/core/src/utils/memoryDiscovery.ts index ab240ea87..88c82373d 100644 --- a/packages/core/src/utils/memoryDiscovery.ts +++ b/packages/core/src/utils/memoryDiscovery.ts @@ -15,6 +15,10 @@ import { } from '../tools/memoryTool.js'; import { FileDiscoveryService } from '../services/fileDiscoveryService.js'; import { processImports } from './memoryImportProcessor.js'; +import { + DEFAULT_MEMORY_FILE_FILTERING_OPTIONS, + FileFilteringOptions, +} from '../config/config.js'; // Simple console logger, similar to the one previously in CLI's config.ts // TODO: Integrate with a more robust server-side logger if available/appropriate. @@ -29,8 +33,6 @@ const logger = { console.error('[ERROR] [MemoryDiscovery]', ...args), }; -const MAX_DIRECTORIES_TO_SCAN_FOR_MEMORY = 200; - interface GeminiFileContent { filePath: string; content: string | null; @@ -85,6 +87,8 @@ async function getGeminiMdFilePathsInternal( debugMode: boolean, fileService: FileDiscoveryService, extensionContextFilePaths: string[] = [], + fileFilteringOptions: FileFilteringOptions, + maxDirs: number, ): Promise { const allPaths = new Set(); const geminiMdFilenames = getAllGeminiMdFilenames(); @@ -181,11 +185,18 @@ async function getGeminiMdFilePathsInternal( } upwardPaths.forEach((p) => allPaths.add(p)); + // Merge options with memory defaults, with options taking precedence + const mergedOptions = { + ...DEFAULT_MEMORY_FILE_FILTERING_OPTIONS, + ...fileFilteringOptions, + }; + const downwardPaths = await bfsFileSearch(resolvedCwd, { fileName: geminiMdFilename, - maxDirs: MAX_DIRECTORIES_TO_SCAN_FOR_MEMORY, + maxDirs, debug: debugMode, fileService, + fileFilteringOptions: mergedOptions, // Pass merged options as fileFilter }); downwardPaths.sort(); // Sort for consistent ordering, though hierarchy might be more complex if (debugMode && downwardPaths.length > 0) @@ -282,11 +293,14 @@ export async function loadServerHierarchicalMemory( debugMode: boolean, fileService: FileDiscoveryService, extensionContextFilePaths: string[] = [], + fileFilteringOptions?: FileFilteringOptions, + maxDirs: number = 200, ): Promise<{ memoryContent: string; fileCount: number }> { if (debugMode) logger.debug( `Loading server hierarchical memory for CWD: ${currentWorkingDirectory}`, ); + // For the server, homedir() refers to the server process's home. // This is consistent with how MemoryTool already finds the global path. const userHomePath = homedir(); @@ -296,6 +310,8 @@ export async function loadServerHierarchicalMemory( debugMode, fileService, extensionContextFilePaths, + fileFilteringOptions || DEFAULT_MEMORY_FILE_FILTERING_OPTIONS, + maxDirs, ); if (filePaths.length === 0) { if (debugMode) logger.debug('No GEMINI.md files found in hierarchy.'); diff --git a/packages/core/src/utils/partUtils.test.ts b/packages/core/src/utils/partUtils.test.ts new file mode 100644 index 000000000..eda85df27 --- /dev/null +++ b/packages/core/src/utils/partUtils.test.ts @@ -0,0 +1,166 @@ +/** + * @license + * Copyright 2025 Google LLC + * SPDX-License-Identifier: Apache-2.0 + */ + +import { describe, it, expect } from 'vitest'; +import { partToString, getResponseText } from './partUtils.js'; +import { GenerateContentResponse, Part } from '@google/genai'; + +const mockResponse = ( + parts?: Array<{ text?: string; functionCall?: unknown }>, +): GenerateContentResponse => ({ + candidates: parts + ? [{ content: { parts: parts as Part[], role: 'model' }, index: 0 }] + : [], + promptFeedback: { safetyRatings: [] }, + text: undefined, + data: undefined, + functionCalls: undefined, + executableCode: undefined, + codeExecutionResult: undefined, +}); + +describe('partUtils', () => { + describe('partToString (default behavior)', () => { + it('should return empty string for undefined or null', () => { + // @ts-expect-error Testing invalid input + expect(partToString(undefined)).toBe(''); + // @ts-expect-error Testing invalid input + expect(partToString(null)).toBe(''); + }); + + it('should return string input unchanged', () => { + expect(partToString('hello')).toBe('hello'); + }); + + it('should concatenate strings from an array', () => { + expect(partToString(['a', 'b'])).toBe('ab'); + }); + + it('should return text property when provided a text part', () => { + expect(partToString({ text: 'hi' })).toBe('hi'); + }); + + it('should return empty string for non-text parts', () => { + const part: Part = { inlineData: { mimeType: 'image/png', data: '' } }; + expect(partToString(part)).toBe(''); + const part2: Part = { functionCall: { name: 'test' } }; + expect(partToString(part2)).toBe(''); + }); + }); + + describe('partToString (verbose)', () => { + const verboseOptions = { verbose: true }; + + it('should return empty string for undefined or null', () => { + // @ts-expect-error Testing invalid input + expect(partToString(undefined, verboseOptions)).toBe(''); + // @ts-expect-error Testing invalid input + expect(partToString(null, verboseOptions)).toBe(''); + }); + + it('should return string input unchanged', () => { + expect(partToString('hello', verboseOptions)).toBe('hello'); + }); + + it('should join parts if the value is an array', () => { + const parts = ['hello', { text: ' world' }]; + expect(partToString(parts, verboseOptions)).toBe('hello world'); + }); + + it('should return the text property if the part is an object with text', () => { + const part: Part = { text: 'hello world' }; + expect(partToString(part, verboseOptions)).toBe('hello world'); + }); + + it('should return descriptive string for videoMetadata part', () => { + const part = { videoMetadata: {} } as Part; + expect(partToString(part, verboseOptions)).toBe('[Video Metadata]'); + }); + + it('should return descriptive string for thought part', () => { + const part = { thought: 'thinking' } as unknown as Part; + expect(partToString(part, verboseOptions)).toBe('[Thought: thinking]'); + }); + + it('should return descriptive string for codeExecutionResult part', () => { + const part = { codeExecutionResult: {} } as Part; + expect(partToString(part, verboseOptions)).toBe( + '[Code Execution Result]', + ); + }); + + it('should return descriptive string for executableCode part', () => { + const part = { executableCode: {} } as Part; + expect(partToString(part, verboseOptions)).toBe('[Executable Code]'); + }); + + it('should return descriptive string for fileData part', () => { + const part = { fileData: {} } as Part; + expect(partToString(part, verboseOptions)).toBe('[File Data]'); + }); + + it('should return descriptive string for functionCall part', () => { + const part = { functionCall: { name: 'myFunction' } } as Part; + expect(partToString(part, verboseOptions)).toBe( + '[Function Call: myFunction]', + ); + }); + + it('should return descriptive string for functionResponse part', () => { + const part = { functionResponse: { name: 'myFunction' } } as Part; + expect(partToString(part, verboseOptions)).toBe( + '[Function Response: myFunction]', + ); + }); + + it('should return descriptive string for inlineData part', () => { + const part = { inlineData: { mimeType: 'image/png', data: '' } } as Part; + expect(partToString(part, verboseOptions)).toBe(''); + }); + + it('should return an empty string for an unknown part type', () => { + const part: Part = {}; + expect(partToString(part, verboseOptions)).toBe(''); + }); + + it('should handle complex nested arrays with various part types', () => { + const parts = [ + 'start ', + { text: 'middle' }, + [ + { functionCall: { name: 'func1' } }, + ' end', + { inlineData: { mimeType: 'audio/mp3', data: '' } }, + ], + ]; + expect(partToString(parts as Part, verboseOptions)).toBe( + 'start middle[Function Call: func1] end