mirror of
https://github.com/openclaw/openclaw.git
synced 2026-04-28 06:31:11 +00:00
refactor(cli): remove bundled cli text providers
This commit is contained in:
parent
79d6713d81
commit
05d351c430
127 changed files with 87 additions and 8258 deletions
|
|
@ -38,6 +38,7 @@ Docs: https://docs.openclaw.ai
|
|||
- Agents/tool prompts: remove the duplicate in-band tool inventory from agent system prompts so tool-calling models rely on the structured tool definitions as the single source of truth, improving prompt stability and reducing stale tool guidance.
|
||||
- Agents/cache: diagnostics: add prompt-cache break diagnostics, trace live cache scenarios through embedded runner paths, and show cache reuse explicitly in `openclaw status --verbose`. Thanks @vincentkoc.
|
||||
- Agents/cache: stabilize cache-relevant system prompt fingerprints by normalizing equivalent structured prompt whitespace, line endings, hook-added system context, and runtime capability ordering so semantically unchanged prompts reuse KV/cache more reliably. Thanks @vincentkoc.
|
||||
- Providers/CLI: remove bundled CLI text-provider backends and the `agents.defaults.cliBackends` surface, while keeping ACP harness sessions and Gemini media understanding on the native bundled providers.
|
||||
- Providers/OpenAI Codex: add forward-compat `openai-codex/gpt-5.4-mini` synthesis across provider runtime, model catalog, and model listing so Codex mini works before bundled Pi catalog updates land.
|
||||
- Providers/OpenAI: add an opt-in GPT personality and move GPT-5 prompt tuning onto provider-owned system-prompt contributions so cache-stable guidance stays above the prompt cache boundary and embedded runner paths reuse the same provider-specific prompt behavior.
|
||||
- Docs/IRC: replace public IRC hostname examples with `irc.example.com` and recommend private servers for bot coordination while listing common public networks for intentional use.
|
||||
|
|
|
|||
|
|
@ -57,7 +57,6 @@ Notes:
|
|||
- `--reset`: reset dev config + credentials + sessions + workspace (requires `--dev`).
|
||||
- `--force`: kill any existing listener on the selected port before starting.
|
||||
- `--verbose`: verbose logs.
|
||||
- `--cli-backend-logs`: only show CLI backend logs in the console (and enable stdout/stderr).
|
||||
- `--ws-log <auto|full|compact>`: websocket log style (default `auto`).
|
||||
- `--compact`: alias for `--ws-log compact`.
|
||||
- `--raw-stream`: log raw model stream events to jsonl.
|
||||
|
|
|
|||
|
|
@ -501,7 +501,7 @@ Options:
|
|||
`openrouter-api-key`, `kilocode-api-key`, `litellm-api-key`, `ai-gateway-api-key`,
|
||||
`cloudflare-ai-gateway-api-key`, `moonshot-api-key`, `moonshot-api-key-cn`,
|
||||
`kimi-code-api-key`, `synthetic-api-key`, `venice-api-key`, `together-api-key`,
|
||||
`huggingface-api-key`, `apiKey`, `gemini-api-key`, `google-gemini-cli`, `zai-api-key`,
|
||||
`huggingface-api-key`, `apiKey`, `gemini-api-key`, `zai-api-key`,
|
||||
`zai-coding-global`, `zai-coding-cn`, `zai-global`, `zai-cn`, `xiaomi-api-key`,
|
||||
`minimax-global-oauth`, `minimax-global-api`, `minimax-cn-oauth`, `minimax-cn-api`,
|
||||
`opencode-zen`, `opencode-go`, `github-copilot`, `copilot-proxy`, `xai-api-key`,
|
||||
|
|
@ -1353,7 +1353,6 @@ Options:
|
|||
- `--reset` (reset dev config + credentials + sessions + workspace)
|
||||
- `--force` (kill existing listener on port)
|
||||
- `--verbose`
|
||||
- `--cli-backend-logs`
|
||||
- `--ws-log <auto|full|compact>`
|
||||
- `--compact` (alias for `--ws-log compact`)
|
||||
- `--raw-stream`
|
||||
|
|
|
|||
|
|
@ -163,11 +163,9 @@ Current bundled examples:
|
|||
normalization (`input` / `output` and `prompt` / `completion` families), the
|
||||
shared `openai-responses-defaults` stream family for native OpenAI/Codex
|
||||
wrappers, and provider-family metadata
|
||||
- `google` and `google-gemini-cli`: Gemini 3.1 forward-compat fallback,
|
||||
native Gemini replay validation, bootstrap replay sanitation, tagged
|
||||
reasoning-output mode, and modern-model matching; Gemini CLI OAuth also owns
|
||||
auth-profile token formatting, usage-token parsing, and quota endpoint
|
||||
fetching for usage surfaces
|
||||
- `google`: Gemini 3.1 forward-compat fallback, native Gemini replay
|
||||
validation, bootstrap replay sanitation, tagged reasoning-output mode, and
|
||||
modern-model matching
|
||||
- `moonshot`: shared transport, plugin-owned thinking payload normalization
|
||||
- `kilocode`: shared transport, plugin-owned request headers, reasoning payload
|
||||
normalization, proxy-Gemini thought-signature sanitation, and cache-TTL
|
||||
|
|
@ -331,21 +329,10 @@ OpenClaw ships with the pi‑ai catalog. These providers require **no**
|
|||
(or legacy `cached_content`) to forward a provider-native
|
||||
`cachedContents/...` handle; Gemini cache hits surface as OpenClaw `cacheRead`
|
||||
|
||||
### Google Vertex and Gemini CLI
|
||||
### Google Vertex
|
||||
|
||||
- Providers: `google-vertex`, `google-gemini-cli`
|
||||
- Auth: Vertex uses gcloud ADC; Gemini CLI uses its OAuth flow
|
||||
- Caution: Gemini CLI OAuth in OpenClaw is an unofficial integration. Some users have reported Google account restrictions after using third-party clients. Review Google terms and use a non-critical account if you choose to proceed.
|
||||
- Gemini CLI OAuth is shipped as part of the bundled `google` plugin.
|
||||
- Install Gemini CLI first:
|
||||
- `brew install gemini-cli`
|
||||
- or `npm install -g @google/gemini-cli`
|
||||
- Enable: `openclaw plugins enable google`
|
||||
- Login: `openclaw models auth login --provider google-gemini-cli --set-default`
|
||||
- Default model: `google-gemini-cli/gemini-3.1-pro-preview`
|
||||
- Note: you do **not** paste a client id or secret into `openclaw.json`. The CLI login flow stores
|
||||
tokens in auth profiles on the gateway host.
|
||||
- If requests fail after login, set `GOOGLE_CLOUD_PROJECT` or `GOOGLE_CLOUD_PROJECT_ID` on the gateway host.
|
||||
- Provider: `google-vertex`
|
||||
- Auth: gcloud ADC
|
||||
- Gemini CLI JSON replies are parsed from `response`; usage falls back to
|
||||
`stats`, with `stats.cached` normalized into OpenClaw `cacheRead`.
|
||||
|
||||
|
|
|
|||
|
|
@ -1360,7 +1360,6 @@
|
|||
"gateway/openai-http-api",
|
||||
"gateway/openresponses-http-api",
|
||||
"gateway/tools-invoke-http-api",
|
||||
"gateway/cli-backends",
|
||||
"gateway/local-models"
|
||||
]
|
||||
},
|
||||
|
|
|
|||
|
|
@ -1,287 +0,0 @@
|
|||
---
|
||||
summary: "CLI backends: local AI CLI fallback with optional MCP tool bridge"
|
||||
read_when:
|
||||
- You want a reliable fallback when API providers fail
|
||||
- You are running Codex CLI or other local AI CLIs and want to reuse them
|
||||
- You want to understand the MCP loopback bridge for CLI backend tool access
|
||||
title: "CLI Backends"
|
||||
---
|
||||
|
||||
# CLI backends (fallback runtime)
|
||||
|
||||
OpenClaw can run **local AI CLIs** as a **text-only fallback** when API providers are down,
|
||||
rate-limited, or temporarily misbehaving. This is intentionally conservative:
|
||||
|
||||
- **OpenClaw tools are not injected directly**, but backends with `bundleMcp: true`
|
||||
can receive gateway tools via a loopback MCP bridge.
|
||||
- **JSONL streaming** for CLIs that support it.
|
||||
- **Sessions are supported** (so follow-up turns stay coherent).
|
||||
- **Images can be passed through** if the CLI accepts image paths.
|
||||
|
||||
This is designed as a **safety net** rather than a primary path. Use it when you
|
||||
want “always works” text responses without relying on external APIs.
|
||||
|
||||
If you want a full harness runtime with ACP session controls, background tasks,
|
||||
thread/conversation binding, and persistent external coding sessions, use
|
||||
[ACP Agents](/tools/acp-agents) instead. CLI backends are not ACP.
|
||||
|
||||
## Beginner-friendly quick start
|
||||
|
||||
You can use Codex CLI **without any config** (the bundled OpenAI plugin
|
||||
registers a default backend):
|
||||
|
||||
```bash
|
||||
openclaw agent --message "hi" --model codex-cli/gpt-5.4
|
||||
```
|
||||
|
||||
If your gateway runs under launchd/systemd and PATH is minimal, add just the
|
||||
command path:
|
||||
|
||||
```json5
|
||||
{
|
||||
agents: {
|
||||
defaults: {
|
||||
cliBackends: {
|
||||
"codex-cli": {
|
||||
command: "/opt/homebrew/bin/codex",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
That’s it. No keys, no extra auth config needed beyond the CLI itself.
|
||||
|
||||
If you use a bundled CLI backend as the **primary message provider** on a
|
||||
gateway host, OpenClaw now auto-loads the owning bundled plugin when your config
|
||||
explicitly references that backend in a model ref or under
|
||||
`agents.defaults.cliBackends`.
|
||||
|
||||
## Using it as a fallback
|
||||
|
||||
Add a CLI backend to your fallback list so it only runs when primary models fail:
|
||||
|
||||
```json5
|
||||
{
|
||||
agents: {
|
||||
defaults: {
|
||||
model: {
|
||||
primary: "anthropic/claude-opus-4-6",
|
||||
fallbacks: ["codex-cli/gpt-5.4"],
|
||||
},
|
||||
models: {
|
||||
"anthropic/claude-opus-4-6": { alias: "Opus" },
|
||||
"codex-cli/gpt-5.4": {},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- If you use `agents.defaults.models` (allowlist), you must include your CLI backend models there too.
|
||||
- If the primary provider fails (auth, rate limits, timeouts), OpenClaw will
|
||||
try the CLI backend next.
|
||||
|
||||
## Configuration overview
|
||||
|
||||
All CLI backends live under:
|
||||
|
||||
```
|
||||
agents.defaults.cliBackends
|
||||
```
|
||||
|
||||
Each entry is keyed by a **provider id** (e.g. `codex-cli`, `my-cli`).
|
||||
The provider id becomes the left side of your model ref:
|
||||
|
||||
```
|
||||
<provider>/<model>
|
||||
```
|
||||
|
||||
### Example configuration
|
||||
|
||||
```json5
|
||||
{
|
||||
agents: {
|
||||
defaults: {
|
||||
cliBackends: {
|
||||
"codex-cli": {
|
||||
command: "/opt/homebrew/bin/codex",
|
||||
},
|
||||
"my-cli": {
|
||||
command: "my-cli",
|
||||
args: ["--json"],
|
||||
output: "json",
|
||||
input: "arg",
|
||||
modelArg: "--model",
|
||||
modelAliases: {
|
||||
"claude-opus-4-6": "opus",
|
||||
"claude-sonnet-4-6": "sonnet",
|
||||
},
|
||||
sessionArg: "--session",
|
||||
sessionMode: "existing",
|
||||
sessionIdFields: ["session_id", "conversation_id"],
|
||||
systemPromptArg: "--system",
|
||||
systemPromptWhen: "first",
|
||||
imageArg: "--image",
|
||||
imageMode: "repeat",
|
||||
serialize: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
## How it works
|
||||
|
||||
1. **Selects a backend** based on the provider prefix (`codex-cli/...`).
|
||||
2. **Builds a system prompt** using the same OpenClaw prompt + workspace context.
|
||||
3. **Executes the CLI** with a session id (if supported) so history stays consistent.
|
||||
4. **Parses output** (JSON or plain text) and returns the final text.
|
||||
5. **Persists session ids** per backend, so follow-ups reuse the same CLI session.
|
||||
|
||||
<Warning>
|
||||
The bundled Anthropic `claude-cli` backend was removed after Anthropic's
|
||||
OpenClaw billing boundary changed. OpenClaw still supports generic CLI
|
||||
backends, but Anthropic API traffic should use the Anthropic provider directly
|
||||
instead of the removed local Claude CLI path.
|
||||
</Warning>
|
||||
|
||||
## Sessions
|
||||
|
||||
- If the CLI supports sessions, set `sessionArg` (e.g. `--session-id`) or
|
||||
`sessionArgs` (placeholder `{sessionId}`) when the ID needs to be inserted
|
||||
into multiple flags.
|
||||
- If the CLI uses a **resume subcommand** with different flags, set
|
||||
`resumeArgs` (replaces `args` when resuming) and optionally `resumeOutput`
|
||||
(for non-JSON resumes).
|
||||
- `sessionMode`:
|
||||
- `always`: always send a session id (new UUID if none stored).
|
||||
- `existing`: only send a session id if one was stored before.
|
||||
- `none`: never send a session id.
|
||||
|
||||
Serialization notes:
|
||||
|
||||
- `serialize: true` keeps same-lane runs ordered.
|
||||
- Most CLIs serialize on one provider lane.
|
||||
- OpenClaw drops stored CLI session reuse when the backend auth state changes, including relogin, token rotation, or a changed auth profile credential.
|
||||
|
||||
## Images (pass-through)
|
||||
|
||||
If your CLI accepts image paths, set `imageArg`:
|
||||
|
||||
```json5
|
||||
imageArg: "--image",
|
||||
imageMode: "repeat"
|
||||
```
|
||||
|
||||
OpenClaw will write base64 images to temp files. If `imageArg` is set, those
|
||||
paths are passed as CLI args. If `imageArg` is missing, OpenClaw appends the
|
||||
file paths to the prompt (path injection), which is enough for CLIs that auto-
|
||||
load local files from plain paths.
|
||||
|
||||
## Inputs / outputs
|
||||
|
||||
- `output: "json"` (default) tries to parse JSON and extract text + session id.
|
||||
- For Gemini CLI JSON output, OpenClaw reads reply text from `response` and
|
||||
usage from `stats` when `usage` is missing or empty.
|
||||
- `output: "jsonl"` parses JSONL streams (for example Codex CLI `--json`) and extracts the final agent message plus session
|
||||
identifiers when present.
|
||||
- `output: "text"` treats stdout as the final response.
|
||||
|
||||
Input modes:
|
||||
|
||||
- `input: "arg"` (default) passes the prompt as the last CLI arg.
|
||||
- `input: "stdin"` sends the prompt via stdin.
|
||||
- If the prompt is very long and `maxPromptArgChars` is set, stdin is used.
|
||||
|
||||
## Defaults (plugin-owned)
|
||||
|
||||
The bundled OpenAI plugin also registers a default for `codex-cli`:
|
||||
|
||||
- `command: "codex"`
|
||||
- `args: ["exec","--json","--color","never","--sandbox","workspace-write","--skip-git-repo-check"]`
|
||||
- `resumeArgs: ["exec","resume","{sessionId}","--color","never","--sandbox","workspace-write","--skip-git-repo-check"]`
|
||||
- `output: "jsonl"`
|
||||
- `resumeOutput: "text"`
|
||||
- `modelArg: "--model"`
|
||||
- `imageArg: "--image"`
|
||||
- `sessionMode: "existing"`
|
||||
|
||||
The bundled Google plugin also registers a default for `google-gemini-cli`:
|
||||
|
||||
- `command: "gemini"`
|
||||
- `args: ["--prompt", "--output-format", "json"]`
|
||||
- `resumeArgs: ["--resume", "{sessionId}", "--prompt", "--output-format", "json"]`
|
||||
- `modelArg: "--model"`
|
||||
- `sessionMode: "existing"`
|
||||
- `sessionIdFields: ["session_id", "sessionId"]`
|
||||
|
||||
Prerequisite: the local Gemini CLI must be installed and available as
|
||||
`gemini` on `PATH` (`brew install gemini-cli` or
|
||||
`npm install -g @google/gemini-cli`).
|
||||
|
||||
Gemini CLI JSON notes:
|
||||
|
||||
- Reply text is read from the JSON `response` field.
|
||||
- Usage falls back to `stats` when `usage` is absent or empty.
|
||||
- `stats.cached` is normalized into OpenClaw `cacheRead`.
|
||||
- If `stats.input` is missing, OpenClaw derives input tokens from
|
||||
`stats.input_tokens - stats.cached`.
|
||||
|
||||
Override only if needed (common: absolute `command` path).
|
||||
|
||||
## Plugin-owned defaults
|
||||
|
||||
CLI backend defaults are now part of the plugin surface:
|
||||
|
||||
- Plugins register them with `api.registerCliBackend(...)`.
|
||||
- The backend `id` becomes the provider prefix in model refs.
|
||||
- User config in `agents.defaults.cliBackends.<id>` still overrides the plugin default.
|
||||
- Backend-specific config cleanup stays plugin-owned through the optional
|
||||
`normalizeConfig` hook.
|
||||
|
||||
## Bundle MCP overlays
|
||||
|
||||
CLI backends do **not** receive OpenClaw tool calls directly, but a backend can
|
||||
opt into a generated MCP config overlay with `bundleMcp: true`.
|
||||
|
||||
Current bundled behavior:
|
||||
|
||||
- `codex-cli`: no bundle MCP overlay
|
||||
- `google-gemini-cli`: no bundle MCP overlay
|
||||
|
||||
When bundle MCP is enabled, OpenClaw:
|
||||
|
||||
- spawns a loopback HTTP MCP server that exposes gateway tools to the CLI process
|
||||
- authenticates the bridge with a per-session token (`OPENCLAW_MCP_TOKEN`)
|
||||
- scopes tool access to the current session, account, and channel context
|
||||
- loads enabled bundle-MCP servers for the current workspace
|
||||
- merges them with any existing backend `--mcp-config`
|
||||
- rewrites the CLI args to pass `--strict-mcp-config --mcp-config <generated-file>`
|
||||
|
||||
If no MCP servers are enabled, OpenClaw still injects a strict config when a
|
||||
backend opts into bundle MCP so background runs stay isolated.
|
||||
|
||||
## Limitations
|
||||
|
||||
- **No direct OpenClaw tool calls.** OpenClaw does not inject tool calls into
|
||||
the CLI backend protocol. Backends only see gateway tools when they opt into
|
||||
`bundleMcp: true`.
|
||||
- **Streaming is backend-specific.** Some backends stream JSONL; others buffer
|
||||
until exit.
|
||||
- **Structured outputs** depend on the CLI’s JSON format.
|
||||
- **Codex CLI sessions** resume via text output (no JSONL), which is less
|
||||
structured than the initial `--json` run. OpenClaw sessions still work
|
||||
normally.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- **CLI not found**: set `command` to a full path.
|
||||
- **Wrong model name**: use `modelAliases` to map `provider/model` → CLI model.
|
||||
- **No session continuity**: ensure `sessionArg` is set and `sessionMode` is not
|
||||
`none` (Codex CLI currently cannot resume with JSON output).
|
||||
- **Images ignored**: set `imageArg` (and verify CLI supports file paths).
|
||||
|
|
@ -1064,36 +1064,6 @@ Z.AI GLM-4.x models automatically enable thinking mode unless you set `--thinkin
|
|||
Z.AI models enable `tool_stream` by default for tool call streaming. Set `agents.defaults.models["zai/<model>"].params.tool_stream` to `false` to disable it.
|
||||
Anthropic Claude 4.6 models default to `adaptive` thinking when no explicit thinking level is set.
|
||||
|
||||
### `agents.defaults.cliBackends`
|
||||
|
||||
Optional CLI backends for text-only fallback runs (no tool calls). Useful as a backup when API providers fail.
|
||||
|
||||
```json5
|
||||
{
|
||||
agents: {
|
||||
defaults: {
|
||||
cliBackends: {
|
||||
"codex-cli": {
|
||||
command: "/opt/homebrew/bin/codex",
|
||||
},
|
||||
"my-cli": {
|
||||
command: "my-cli",
|
||||
args: ["--json"],
|
||||
output: "json",
|
||||
modelArg: "--model",
|
||||
sessionArg: "--session",
|
||||
sessionMode: "existing",
|
||||
systemPromptArg: "--system",
|
||||
systemPromptWhen: "first",
|
||||
imageArg: "--image",
|
||||
imageMode: "repeat",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
- CLI backends are text-first; tools are always disabled.
|
||||
- Sessions supported when `sessionArg` is set.
|
||||
- Image pass-through supported when `imageArg` accepts file paths.
|
||||
|
|
|
|||
|
|
@ -315,7 +315,7 @@ skips refresh attempts.
|
|||
Doctor also detects stale removed Anthropic Claude CLI state. If old
|
||||
`anthropic:claude-cli` credential bytes still exist in `auth-profiles.json`,
|
||||
doctor converts them back into Anthropic token/OAuth profiles and rewrites
|
||||
stale `claude-cli/...` model refs plus `agents.defaults.cliBackends.claude-cli`.
|
||||
stale `claude-cli/...` model refs.
|
||||
If the bytes are gone, doctor removes the stale config and prints recovery
|
||||
commands instead.
|
||||
|
||||
|
|
|
|||
|
|
@ -675,17 +675,11 @@ for usage/billing and raise limits as needed.
|
|||
<Accordion title="How do I set up Gemini CLI OAuth?">
|
||||
Gemini CLI uses a **plugin auth flow**, not a client id or secret in `openclaw.json`.
|
||||
|
||||
Steps:
|
||||
Use the Gemini API provider instead:
|
||||
|
||||
1. Install Gemini CLI locally so `gemini` is on `PATH`
|
||||
- Homebrew: `brew install gemini-cli`
|
||||
- npm: `npm install -g @google/gemini-cli`
|
||||
2. Enable the plugin: `openclaw plugins enable google`
|
||||
3. Login: `openclaw models auth login --provider google-gemini-cli --set-default`
|
||||
4. Default model after login: `google-gemini-cli/gemini-3.1-pro-preview`
|
||||
5. If requests fail, set `GOOGLE_CLOUD_PROJECT` or `GOOGLE_CLOUD_PROJECT_ID` on the gateway host
|
||||
|
||||
This stores OAuth tokens in auth profiles on the gateway host. Details: [Model providers](/concepts/model-providers).
|
||||
1. Enable the plugin: `openclaw plugins enable google`
|
||||
2. Run `openclaw onboard --auth-choice gemini-api-key`
|
||||
3. Set a Google model such as `google/gemini-3.1-pro-preview`
|
||||
|
||||
</Accordion>
|
||||
|
||||
|
|
|
|||
|
|
@ -196,7 +196,7 @@ Live tests are split into two layers so we can isolate failures:
|
|||
- `OPENCLAW_LIVE_MODELS=all` is an alias for the modern allowlist
|
||||
- or `OPENCLAW_LIVE_MODELS="openai/gpt-5.4,anthropic/claude-opus-4-6,..."` (comma allowlist)
|
||||
- How to select providers:
|
||||
- `OPENCLAW_LIVE_PROVIDERS="google,google-antigravity,google-gemini-cli"` (comma allowlist)
|
||||
- `OPENCLAW_LIVE_PROVIDERS="google,google-antigravity"` (comma allowlist)
|
||||
- Where keys come from:
|
||||
- By default: profile store and env fallbacks
|
||||
- Set `OPENCLAW_LIVE_REQUIRE_PROFILE_KEYS=1` to enforce **profile store** only
|
||||
|
|
@ -227,7 +227,7 @@ Live tests are split into two layers so we can isolate failures:
|
|||
- `OPENCLAW_LIVE_GATEWAY_MODELS=all` is an alias for the modern allowlist
|
||||
- Or set `OPENCLAW_LIVE_GATEWAY_MODELS="provider/model"` (or comma list) to narrow
|
||||
- How to select providers (avoid “OpenRouter everything”):
|
||||
- `OPENCLAW_LIVE_GATEWAY_PROVIDERS="google,google-antigravity,google-gemini-cli,openai,anthropic,zai,minimax"` (comma allowlist)
|
||||
- `OPENCLAW_LIVE_GATEWAY_PROVIDERS="google,google-antigravity,openai,anthropic,zai,minimax"` (comma allowlist)
|
||||
- Tool + image probes are always on in this live test:
|
||||
- `read` probe + `exec+read` probe (tool stress)
|
||||
- image probe runs when the model advertises image input support
|
||||
|
|
@ -245,46 +245,6 @@ openclaw models list
|
|||
openclaw models list --json
|
||||
```
|
||||
|
||||
## Live: CLI backend smoke (Codex CLI or other local CLIs)
|
||||
|
||||
- Test: `src/gateway/gateway-cli-backend.live.test.ts`
|
||||
- Goal: validate the Gateway + agent pipeline using a local CLI backend, without touching your default config.
|
||||
- Enable:
|
||||
- `pnpm test:live` (or `OPENCLAW_LIVE_TEST=1` if invoking Vitest directly)
|
||||
- `OPENCLAW_LIVE_CLI_BACKEND=1`
|
||||
- Defaults:
|
||||
- Model: `codex-cli/gpt-5.4`
|
||||
- Command: `codex`
|
||||
- Args: `["exec","--json","--color","never","--sandbox","read-only","--skip-git-repo-check"]`
|
||||
- Overrides (optional):
|
||||
- `OPENCLAW_LIVE_CLI_BACKEND_MODEL="codex-cli/gpt-5.4"`
|
||||
- `OPENCLAW_LIVE_CLI_BACKEND_COMMAND="/full/path/to/codex"`
|
||||
- `OPENCLAW_LIVE_CLI_BACKEND_ARGS='["exec","--json","--color","never","--sandbox","read-only","--skip-git-repo-check"]'`
|
||||
- `OPENCLAW_LIVE_CLI_BACKEND_IMAGE_PROBE=1` to send a real image attachment (paths are injected into the prompt).
|
||||
- `OPENCLAW_LIVE_CLI_BACKEND_IMAGE_ARG="--image"` to pass image file paths as CLI args instead of prompt injection.
|
||||
- `OPENCLAW_LIVE_CLI_BACKEND_IMAGE_MODE="repeat"` (or `"list"`) to control how image args are passed when `IMAGE_ARG` is set.
|
||||
- `OPENCLAW_LIVE_CLI_BACKEND_RESUME_PROBE=1` to send a second turn and validate resume flow.
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
OPENCLAW_LIVE_CLI_BACKEND=1 \
|
||||
OPENCLAW_LIVE_CLI_BACKEND_MODEL="codex-cli/gpt-5.4" \
|
||||
pnpm test:live src/gateway/gateway-cli-backend.live.test.ts
|
||||
```
|
||||
|
||||
Docker recipe:
|
||||
|
||||
```bash
|
||||
pnpm test:docker:live-cli-backend
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- The Docker runner lives at `scripts/test-live-cli-backend-docker.sh`.
|
||||
- It runs the live CLI-backend smoke inside the repo Docker image as the non-root `node` user.
|
||||
- For `codex-cli`, it installs the Linux `@openai/codex` package into a cached writable prefix at `OPENCLAW_DOCKER_CLI_TOOLS_DIR` (default: `~/.cache/openclaw/docker-cli-tools`).
|
||||
|
||||
## Live: ACP bind smoke (`/acp spawn ... --bind here`)
|
||||
|
||||
- Test: `src/gateway/gateway-acp-bind.live.test.ts`
|
||||
|
|
@ -349,10 +309,6 @@ Notes:
|
|||
|
||||
- `google/...` uses the Gemini API (API key).
|
||||
- `google-antigravity/...` uses the Antigravity OAuth bridge (Cloud Code Assist-style agent endpoint).
|
||||
- `google-gemini-cli/...` uses the local Gemini CLI on your machine (separate auth + tooling quirks).
|
||||
- Gemini API vs Gemini CLI:
|
||||
- API: OpenClaw calls Google’s hosted Gemini API over HTTP (API key / profile auth); this is what most users mean by “Gemini”.
|
||||
- CLI: OpenClaw shells out to a local `gemini` binary; it has its own auth and can behave differently (streaming/tool support/version skew).
|
||||
|
||||
## Live: model matrix (what we cover)
|
||||
|
||||
|
|
@ -403,7 +359,7 @@ If you have keys enabled, we also support testing via:
|
|||
|
||||
More providers you can include in the live matrix (if you have creds/config):
|
||||
|
||||
- Built-in: `openai`, `openai-codex`, `anthropic`, `google`, `google-vertex`, `google-antigravity`, `google-gemini-cli`, `zai`, `openrouter`, `opencode`, `opencode-go`, `xai`, `groq`, `cerebras`, `mistral`, `github-copilot`
|
||||
- Built-in: `openai`, `openai-codex`, `anthropic`, `google`, `google-vertex`, `google-antigravity`, `zai`, `openrouter`, `opencode`, `opencode-go`, `xai`, `groq`, `cerebras`, `mistral`, `github-copilot`
|
||||
- Via `models.providers` (custom endpoints): `minimax` (cloud/API), plus any OpenAI/Anthropic-compatible proxy (LM Studio, vLLM, LiteLLM, etc.)
|
||||
|
||||
Tip: don’t try to hardcode “all models” in docs. The authoritative list is whatever `discoverModels(...)` returns on your machine + whatever keys are available.
|
||||
|
|
@ -476,7 +432,6 @@ The live-model Docker runners also bind-mount only the needed CLI auth homes (or
|
|||
|
||||
- Direct models: `pnpm test:docker:live-models` (script: `scripts/test-live-models-docker.sh`)
|
||||
- ACP bind smoke: `pnpm test:docker:live-acp-bind` (script: `scripts/test-live-acp-bind-docker.sh`)
|
||||
- CLI backend smoke: `pnpm test:docker:live-cli-backend` (script: `scripts/test-live-cli-backend-docker.sh`)
|
||||
- Gateway + dev agent: `pnpm test:docker:live-gateway` (script: `scripts/test-live-gateway-models-docker.sh`)
|
||||
- Open WebUI live smoke: `pnpm test:docker:openwebui` (script: `scripts/e2e/openwebui-docker.sh`)
|
||||
- Onboarding wizard (TTY, full scaffolding): `pnpm test:docker:onboard` (script: `scripts/e2e/onboard-docker.sh`)
|
||||
|
|
|
|||
|
|
@ -151,7 +151,6 @@ A single plugin can register any number of capabilities via the `api` object:
|
|||
| Capability | Registration method | Detailed guide |
|
||||
| ---------------------- | ------------------------------------------------ | ------------------------------------------------------------------------------- |
|
||||
| Text inference (LLM) | `api.registerProvider(...)` | [Provider Plugins](/plugins/sdk-provider-plugins) |
|
||||
| CLI inference backend | `api.registerCliBackend(...)` | [CLI Backends](/gateway/cli-backends) |
|
||||
| Channel / messaging | `api.registerChannel(...)` | [Channel Plugins](/plugins/sdk-channel-plugins) |
|
||||
| Speech (TTS/STT) | `api.registerSpeechProvider(...)` | [Provider Plugins](/plugins/sdk-provider-plugins#step-5-add-extra-capabilities) |
|
||||
| Realtime transcription | `api.registerRealtimeTranscriptionProvider(...)` | [Provider Plugins](/plugins/sdk-provider-plugins#step-5-add-extra-capabilities) |
|
||||
|
|
|
|||
|
|
@ -89,7 +89,6 @@ Those belong in your plugin code and `package.json`.
|
|||
"modelSupport": {
|
||||
"modelPrefixes": ["router-"]
|
||||
},
|
||||
"cliBackends": ["openrouter-cli"],
|
||||
"providerAuthEnvVars": {
|
||||
"openrouter": ["OPENROUTER_API_KEY"]
|
||||
},
|
||||
|
|
@ -140,7 +139,6 @@ Those belong in your plugin code and `package.json`.
|
|||
| `channels` | No | `string[]` | Channel ids owned by this plugin. Used for discovery and config validation. |
|
||||
| `providers` | No | `string[]` | Provider ids owned by this plugin. |
|
||||
| `modelSupport` | No | `object` | Manifest-owned shorthand model-family metadata used to auto-load the plugin before runtime. |
|
||||
| `cliBackends` | No | `string[]` | CLI inference backend ids owned by this plugin. Used for startup auto-activation from explicit config refs. |
|
||||
| `providerAuthEnvVars` | No | `Record<string, string[]>` | Cheap provider-auth env metadata that OpenClaw can inspect without loading plugin code. |
|
||||
| `providerAuthChoices` | No | `object[]` | Cheap auth-choice metadata for onboarding pickers, preferred-provider resolution, and simple CLI flag wiring. |
|
||||
| `contracts` | No | `object` | Static bundled capability snapshot for speech, realtime transcription, realtime voice, media-understanding, image-generation, video-generation, web-fetch, web search, and tool ownership. |
|
||||
|
|
@ -399,7 +397,7 @@ See [Configuration reference](/gateway/configuration) for the full `plugins.*` s
|
|||
- `kind: "memory"` is selected by `plugins.slots.memory`.
|
||||
- `kind: "context-engine"` is selected by `plugins.slots.contextEngine`
|
||||
(default: built-in `legacy`).
|
||||
- `channels`, `providers`, `cliBackends`, and `skills` can be omitted when a
|
||||
- `channels`, `providers`, and `skills` can be omitted when a
|
||||
plugin does not need them.
|
||||
- If your plugin depends on native modules, document the build steps and any
|
||||
package-manager allowlist requirements (for example, pnpm `allow-build-scripts`
|
||||
|
|
|
|||
|
|
@ -351,18 +351,6 @@ Use `commands` by itself only when you do not need lazy root CLI registration.
|
|||
That eager compatibility path remains supported, but it does not install
|
||||
descriptor-backed placeholders for parse-time lazy loading.
|
||||
|
||||
### CLI backend registration
|
||||
|
||||
`api.registerCliBackend(...)` lets a plugin own the default config for a local
|
||||
AI CLI backend such as `codex-cli`.
|
||||
|
||||
- The backend `id` becomes the provider prefix in model refs like `codex-cli/gpt-5`.
|
||||
- The backend `config` uses the same shape as `agents.defaults.cliBackends.<id>`.
|
||||
- User config still wins. OpenClaw merges `agents.defaults.cliBackends.<id>` over the
|
||||
plugin default before running the CLI.
|
||||
- Use `normalizeConfig` when a backend needs compatibility rewrites after merge
|
||||
(for example normalizing old flag shapes).
|
||||
|
||||
### Exclusive slots
|
||||
|
||||
| Method | What it registers |
|
||||
|
|
|
|||
|
|
@ -283,7 +283,7 @@ API key auth, and dynamic model resolution.
|
|||
|
||||
Real bundled examples:
|
||||
|
||||
- `google` and `google-gemini-cli`: `google-gemini`
|
||||
- `google`: `google-gemini`
|
||||
- `openrouter`, `kilocode`, `opencode`, and `opencode-go`: `passthrough-gemini`
|
||||
- `amazon-bedrock` and `anthropic-vertex`: `anthropic-by-model`
|
||||
- `minimax`: `hybrid-anthropic-openai`
|
||||
|
|
@ -303,7 +303,7 @@ API key auth, and dynamic model resolution.
|
|||
|
||||
Real bundled examples:
|
||||
|
||||
- `google` and `google-gemini-cli`: `google-thinking`
|
||||
- `google`: `google-thinking`
|
||||
- `kilocode`: `kilocode-thinking`
|
||||
- `moonshot`: `moonshot-thinking`
|
||||
- `minimax` and `minimax-portal`: `minimax-fast-mode`
|
||||
|
|
|
|||
|
|
@ -225,8 +225,6 @@ The bundled Anthropic `claude-cli` backend was removed.
|
|||
- The same OpenClaw-like system prompt does not hit that guard on the
|
||||
Anthropic SDK + `ANTHROPIC_API_KEY` path.
|
||||
- Use Anthropic API keys for Anthropic traffic in OpenClaw.
|
||||
- If you need a local CLI fallback runtime, use another supported CLI backend
|
||||
such as Codex CLI. See [/gateway/cli-backends](/gateway/cli-backends).
|
||||
|
||||
## Notes
|
||||
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
---
|
||||
title: "Google (Gemini)"
|
||||
summary: "Google Gemini setup (API key + OAuth, image generation, media understanding, web search)"
|
||||
summary: "Google Gemini setup (API key, image generation, media understanding, web search)"
|
||||
read_when:
|
||||
- You want to use Google Gemini models with OpenClaw
|
||||
- You need the API key or OAuth auth flow
|
||||
- You need the API key auth flow
|
||||
---
|
||||
|
||||
# Google (Gemini)
|
||||
|
|
@ -15,7 +15,6 @@ Gemini Grounding.
|
|||
- Provider: `google`
|
||||
- Auth: `GEMINI_API_KEY` or `GOOGLE_API_KEY`
|
||||
- API: Google Gemini API
|
||||
- Alternative provider: `google-gemini-cli` (OAuth)
|
||||
|
||||
## Quick start
|
||||
|
||||
|
|
@ -46,46 +45,6 @@ openclaw onboard --non-interactive \
|
|||
--gemini-api-key "$GEMINI_API_KEY"
|
||||
```
|
||||
|
||||
## OAuth (Gemini CLI)
|
||||
|
||||
An alternative provider `google-gemini-cli` uses PKCE OAuth instead of an API
|
||||
key. This is an unofficial integration; some users report account
|
||||
restrictions. Use at your own risk.
|
||||
|
||||
- Default model: `google-gemini-cli/gemini-3.1-pro-preview`
|
||||
- Alias: `gemini-cli`
|
||||
- Install prerequisite: local Gemini CLI available as `gemini`
|
||||
- Homebrew: `brew install gemini-cli`
|
||||
- npm: `npm install -g @google/gemini-cli`
|
||||
- Login:
|
||||
|
||||
```bash
|
||||
openclaw models auth login --provider google-gemini-cli --set-default
|
||||
```
|
||||
|
||||
Environment variables:
|
||||
|
||||
- `OPENCLAW_GEMINI_OAUTH_CLIENT_ID`
|
||||
- `OPENCLAW_GEMINI_OAUTH_CLIENT_SECRET`
|
||||
|
||||
(Or the `GEMINI_CLI_*` variants.)
|
||||
|
||||
If Gemini CLI OAuth requests fail after login, set
|
||||
`GOOGLE_CLOUD_PROJECT` or `GOOGLE_CLOUD_PROJECT_ID` on the gateway host and
|
||||
retry.
|
||||
|
||||
If login fails before the browser flow starts, make sure the local `gemini`
|
||||
command is installed and on `PATH`. OpenClaw supports both Homebrew installs
|
||||
and global npm installs, including common Windows/npm layouts.
|
||||
|
||||
Gemini CLI JSON usage notes:
|
||||
|
||||
- Reply text comes from the CLI JSON `response` field.
|
||||
- Usage falls back to `stats` when the CLI leaves `usage` empty.
|
||||
- `stats.cached` is normalized into OpenClaw `cacheRead`.
|
||||
- If `stats.input` is missing, OpenClaw derives input tokens from
|
||||
`stats.input_tokens - stats.cached`.
|
||||
|
||||
## Capabilities
|
||||
|
||||
| Capability | Supported |
|
||||
|
|
@ -138,9 +97,8 @@ The bundled `google` image-generation provider defaults to
|
|||
- Edit mode: enabled, up to 5 input images
|
||||
- Geometry controls: `size`, `aspectRatio`, and `resolution`
|
||||
|
||||
The OAuth-only `google-gemini-cli` provider is a separate text-inference
|
||||
surface. Image generation, media understanding, and Gemini Grounding stay on
|
||||
the `google` provider id.
|
||||
Image generation, media understanding, and Gemini Grounding all stay on the
|
||||
`google` provider id.
|
||||
|
||||
## Environment note
|
||||
|
||||
|
|
|
|||
|
|
@ -50,7 +50,6 @@ model as `provider/model`.
|
|||
|
||||
- `anthropic-vertex` - implicit Anthropic on Google Vertex support when Vertex credentials are available; no separate onboarding auth choice
|
||||
- `copilot-proxy` - local VS Code Copilot Proxy bridge; use `openclaw onboard --auth-choice copilot-proxy`
|
||||
- `google-gemini-cli` - unofficial Gemini CLI OAuth flow; requires a local `gemini` install (`brew install gemini-cli` or `npm install -g @google/gemini-cli`); default model `google-gemini-cli/gemini-3.1-pro-preview`; use `openclaw onboard --auth-choice google-gemini-cli` or `openclaw models auth login --provider google-gemini-cli --set-default`
|
||||
|
||||
For the full provider catalog (xAI, Groq, Mistral, etc.) and advanced configuration,
|
||||
see [Model providers](/concepts/model-providers).
|
||||
|
|
|
|||
|
|
@ -23,11 +23,10 @@ instead of ACP.
|
|||
|
||||
There are three nearby surfaces that are easy to confuse:
|
||||
|
||||
| You want to... | Use this | Notes |
|
||||
| ---------------------------------------------------------------------------------- | ------------------------------------- | ----------------------------------------------------------------------------------------------------------- |
|
||||
| Run Codex, Claude Code, Gemini CLI, or another external harness _through_ OpenClaw | This page: ACP agents | Chat-bound sessions, `/acp spawn`, `sessions_spawn({ runtime: "acp" })`, background tasks, runtime controls |
|
||||
| Expose an OpenClaw Gateway session _as_ an ACP server for an editor or client | [`openclaw acp`](/cli/acp) | Bridge mode. IDE/client talks ACP to OpenClaw over stdio/WebSocket |
|
||||
| Reuse a local AI CLI as a text-only fallback model | [CLI Backends](/gateway/cli-backends) | Not ACP. No OpenClaw tools, no ACP controls, no harness runtime |
|
||||
| You want to... | Use this | Notes |
|
||||
| ---------------------------------------------------------------------------------- | -------------------------- | ----------------------------------------------------------------------------------------------------------- |
|
||||
| Run Codex, Claude Code, Gemini CLI, or another external harness _through_ OpenClaw | This page: ACP agents | Chat-bound sessions, `/acp spawn`, `sessions_spawn({ runtime: "acp" })`, background tasks, runtime controls |
|
||||
| Expose an OpenClaw Gateway session _as_ an ACP server for an editor or client | [`openclaw acp`](/cli/acp) | Bridge mode. IDE/client talks ACP to OpenClaw over stdio/WebSocket |
|
||||
|
||||
## Does this work out of the box?
|
||||
|
||||
|
|
@ -112,9 +111,7 @@ For Claude Code through ACP, the stack is:
|
|||
Important distinction:
|
||||
|
||||
- ACP Claude is a harness session with ACP controls, session resume, background-task tracking, and optional conversation/thread binding.
|
||||
- CLI backends are separate text-only local fallback runtimes. See [CLI Backends](/gateway/cli-backends).
|
||||
|
||||
For operators, the practical rule is:
|
||||
For operators, the practical rule is:
|
||||
|
||||
- want `/acp spawn`, bindable sessions, runtime controls, or persistent harness work: use ACP
|
||||
- want simple local text fallback through the raw CLI: use CLI backends
|
||||
|
|
|
|||
|
|
@ -1,35 +0,0 @@
|
|||
import type { CliBackendPlugin } from "openclaw/plugin-sdk/cli-backend";
|
||||
import {
|
||||
CLI_FRESH_WATCHDOG_DEFAULTS,
|
||||
CLI_RESUME_WATCHDOG_DEFAULTS,
|
||||
} from "openclaw/plugin-sdk/cli-backend";
|
||||
|
||||
const GEMINI_MODEL_ALIASES: Record<string, string> = {
|
||||
pro: "gemini-3.1-pro-preview",
|
||||
flash: "gemini-3.1-flash-preview",
|
||||
"flash-lite": "gemini-3.1-flash-lite-preview",
|
||||
};
|
||||
|
||||
export function buildGoogleGeminiCliBackend(): CliBackendPlugin {
|
||||
return {
|
||||
id: "google-gemini-cli",
|
||||
config: {
|
||||
command: "gemini",
|
||||
args: ["--prompt", "--output-format", "json"],
|
||||
resumeArgs: ["--resume", "{sessionId}", "--prompt", "--output-format", "json"],
|
||||
output: "json",
|
||||
input: "arg",
|
||||
modelArg: "--model",
|
||||
modelAliases: GEMINI_MODEL_ALIASES,
|
||||
sessionMode: "existing",
|
||||
sessionIdFields: ["session_id", "sessionId"],
|
||||
reliability: {
|
||||
watchdog: {
|
||||
fresh: { ...CLI_FRESH_WATCHDOG_DEFAULTS },
|
||||
resume: { ...CLI_RESUME_WATCHDOG_DEFAULTS },
|
||||
},
|
||||
},
|
||||
serialize: true,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
|
@ -1,135 +0,0 @@
|
|||
import type {
|
||||
OpenClawPluginApi,
|
||||
ProviderAuthContext,
|
||||
ProviderFetchUsageSnapshotContext,
|
||||
} from "openclaw/plugin-sdk/plugin-entry";
|
||||
import { buildOauthProviderAuthResult } from "openclaw/plugin-sdk/provider-auth-result";
|
||||
import { buildProviderReplayFamilyHooks } from "openclaw/plugin-sdk/provider-model-shared";
|
||||
import { buildProviderStreamFamilyHooks } from "openclaw/plugin-sdk/provider-stream-family";
|
||||
import { buildProviderToolCompatFamilyHooks } from "openclaw/plugin-sdk/provider-tools";
|
||||
import { fetchGeminiUsage } from "openclaw/plugin-sdk/provider-usage";
|
||||
import { formatGoogleOauthApiKey, parseGoogleUsageToken } from "./oauth-token-shared.js";
|
||||
import { isModernGoogleModel, resolveGoogleGeminiForwardCompatModel } from "./provider-models.js";
|
||||
|
||||
const PROVIDER_ID = "google-gemini-cli";
|
||||
const PROVIDER_LABEL = "Gemini CLI OAuth";
|
||||
const DEFAULT_MODEL = "google-gemini-cli/gemini-3.1-pro-preview";
|
||||
const ENV_VARS = [
|
||||
"OPENCLAW_GEMINI_OAUTH_CLIENT_ID",
|
||||
"OPENCLAW_GEMINI_OAUTH_CLIENT_SECRET",
|
||||
"GEMINI_CLI_OAUTH_CLIENT_ID",
|
||||
"GEMINI_CLI_OAUTH_CLIENT_SECRET",
|
||||
];
|
||||
|
||||
const GOOGLE_GEMINI_CLI_PROVIDER_HOOKS = {
|
||||
...buildProviderReplayFamilyHooks({ family: "google-gemini" }),
|
||||
...buildProviderStreamFamilyHooks("google-thinking"),
|
||||
...buildProviderToolCompatFamilyHooks("gemini"),
|
||||
};
|
||||
|
||||
async function fetchGeminiCliUsage(ctx: ProviderFetchUsageSnapshotContext) {
|
||||
return await fetchGeminiUsage(ctx.token, ctx.timeoutMs, ctx.fetchFn, PROVIDER_ID);
|
||||
}
|
||||
|
||||
export function registerGoogleGeminiCliProvider(api: OpenClawPluginApi) {
|
||||
api.registerProvider({
|
||||
id: PROVIDER_ID,
|
||||
label: PROVIDER_LABEL,
|
||||
docsPath: "/providers/models",
|
||||
aliases: ["gemini-cli"],
|
||||
envVars: ENV_VARS,
|
||||
auth: [
|
||||
{
|
||||
id: "oauth",
|
||||
label: "Google OAuth",
|
||||
hint: "PKCE + localhost callback",
|
||||
kind: "oauth",
|
||||
run: async (ctx: ProviderAuthContext) => {
|
||||
await ctx.prompter.note(
|
||||
[
|
||||
"This is an unofficial integration and is not endorsed by Google.",
|
||||
"Some users have reported account restrictions or suspensions after using third-party Gemini CLI and Antigravity OAuth clients.",
|
||||
"Proceed only if you understand and accept this risk.",
|
||||
].join("\n"),
|
||||
"Google Gemini CLI caution",
|
||||
);
|
||||
|
||||
const proceed = await ctx.prompter.confirm({
|
||||
message: "Continue with Google Gemini CLI OAuth?",
|
||||
initialValue: false,
|
||||
});
|
||||
if (!proceed) {
|
||||
await ctx.prompter.note("Skipped Google Gemini CLI OAuth setup.", "Setup skipped");
|
||||
return { profiles: [] };
|
||||
}
|
||||
|
||||
const spin = ctx.prompter.progress("Starting Gemini CLI OAuth…");
|
||||
try {
|
||||
const { loginGeminiCliOAuth } = await import("./oauth.runtime.js");
|
||||
const result = await loginGeminiCliOAuth({
|
||||
isRemote: ctx.isRemote,
|
||||
openUrl: ctx.openUrl,
|
||||
log: (msg) => ctx.runtime.log(msg),
|
||||
note: ctx.prompter.note,
|
||||
prompt: async (message) => String(await ctx.prompter.text({ message })),
|
||||
progress: spin,
|
||||
});
|
||||
|
||||
spin.stop("Gemini CLI OAuth complete");
|
||||
return buildOauthProviderAuthResult({
|
||||
providerId: PROVIDER_ID,
|
||||
defaultModel: DEFAULT_MODEL,
|
||||
access: result.access,
|
||||
refresh: result.refresh,
|
||||
expires: result.expires,
|
||||
email: result.email,
|
||||
...(result.projectId ? { credentialExtra: { projectId: result.projectId } } : {}),
|
||||
...(result.projectId
|
||||
? {
|
||||
notes: [
|
||||
"If requests fail, set GOOGLE_CLOUD_PROJECT or GOOGLE_CLOUD_PROJECT_ID.",
|
||||
],
|
||||
}
|
||||
: {}),
|
||||
});
|
||||
} catch (err) {
|
||||
spin.stop("Gemini CLI OAuth failed");
|
||||
await ctx.prompter.note(
|
||||
"Trouble with OAuth? Ensure your Google account has Gemini CLI access.",
|
||||
"OAuth help",
|
||||
);
|
||||
throw err;
|
||||
}
|
||||
},
|
||||
},
|
||||
],
|
||||
wizard: {
|
||||
setup: {
|
||||
choiceId: "google-gemini-cli",
|
||||
choiceLabel: "Gemini CLI OAuth",
|
||||
choiceHint: "Google OAuth with project-aware token payload",
|
||||
methodId: "oauth",
|
||||
},
|
||||
},
|
||||
resolveDynamicModel: (ctx) =>
|
||||
resolveGoogleGeminiForwardCompatModel({
|
||||
providerId: PROVIDER_ID,
|
||||
templateProviderId: "google",
|
||||
ctx,
|
||||
}),
|
||||
...GOOGLE_GEMINI_CLI_PROVIDER_HOOKS,
|
||||
isModernModelRef: ({ modelId }) => isModernGoogleModel(modelId),
|
||||
formatApiKey: (cred) => formatGoogleOauthApiKey(cred),
|
||||
resolveUsageAuth: async (ctx) => {
|
||||
const auth = await ctx.resolveOAuthToken();
|
||||
if (!auth) {
|
||||
return null;
|
||||
}
|
||||
return {
|
||||
...auth,
|
||||
token: parseGoogleUsageToken(auth.token),
|
||||
};
|
||||
},
|
||||
fetchUsageSnapshot: async (ctx) => await fetchGeminiCliUsage(ctx),
|
||||
});
|
||||
}
|
||||
|
|
@ -1,39 +1,19 @@
|
|||
import type { ImageGenerationProvider } from "openclaw/plugin-sdk/image-generation";
|
||||
import type { MediaUnderstandingProvider } from "openclaw/plugin-sdk/media-understanding";
|
||||
import {
|
||||
definePluginEntry,
|
||||
type OpenClawPluginApi,
|
||||
type ProviderAuthContext,
|
||||
type ProviderFetchUsageSnapshotContext,
|
||||
} from "openclaw/plugin-sdk/plugin-entry";
|
||||
import { definePluginEntry } from "openclaw/plugin-sdk/plugin-entry";
|
||||
import { createProviderApiKeyAuthMethod } from "openclaw/plugin-sdk/provider-auth-api-key";
|
||||
import type { ProviderPlugin } from "openclaw/plugin-sdk/provider-model-shared";
|
||||
import { buildProviderReplayFamilyHooks } from "openclaw/plugin-sdk/provider-model-shared";
|
||||
import { buildProviderStreamFamilyHooks } from "openclaw/plugin-sdk/provider-stream-family";
|
||||
import { buildProviderToolCompatFamilyHooks } from "openclaw/plugin-sdk/provider-tools";
|
||||
import {
|
||||
GOOGLE_GEMINI_DEFAULT_MODEL,
|
||||
applyGoogleGeminiModelDefault,
|
||||
normalizeGoogleProviderConfig,
|
||||
resolveGoogleGenerativeAiTransport,
|
||||
normalizeGoogleModelId,
|
||||
resolveGoogleGenerativeAiTransport,
|
||||
} from "./api.js";
|
||||
import { buildGoogleGeminiCliBackend } from "./cli-backend.js";
|
||||
import { formatGoogleOauthApiKey } from "./oauth-token-shared.js";
|
||||
import { isModernGoogleModel, resolveGoogleGeminiForwardCompatModel } from "./provider-models.js";
|
||||
import { createGeminiWebSearchProvider } from "./src/gemini-web-search-provider.js";
|
||||
|
||||
const GOOGLE_GEMINI_CLI_PROVIDER_ID = "google-gemini-cli";
|
||||
const GOOGLE_GEMINI_CLI_PROVIDER_LABEL = "Gemini CLI OAuth";
|
||||
const GOOGLE_GEMINI_CLI_DEFAULT_MODEL = "google-gemini-cli/gemini-3.1-pro-preview";
|
||||
const GOOGLE_GEMINI_CLI_ENV_VARS = [
|
||||
"OPENCLAW_GEMINI_OAUTH_CLIENT_ID",
|
||||
"OPENCLAW_GEMINI_OAUTH_CLIENT_SECRET",
|
||||
"GEMINI_CLI_OAUTH_CLIENT_ID",
|
||||
"GEMINI_CLI_OAUTH_CLIENT_SECRET",
|
||||
] as const;
|
||||
|
||||
let googleGeminiCliProviderPromise: Promise<ProviderPlugin> | null = null;
|
||||
let googleImageGenerationProviderPromise: Promise<ImageGenerationProvider> | null = null;
|
||||
let googleMediaUnderstandingProviderPromise: Promise<MediaUnderstandingProvider> | null = null;
|
||||
|
||||
|
|
@ -44,35 +24,12 @@ type GoogleMediaUnderstandingProvider = MediaUnderstandingProvider & {
|
|||
describeVideo: NonNullable<MediaUnderstandingProvider["describeVideo"]>;
|
||||
};
|
||||
|
||||
const GOOGLE_GEMINI_REPLAY_HOOKS = buildProviderReplayFamilyHooks({
|
||||
family: "google-gemini",
|
||||
});
|
||||
const GOOGLE_GEMINI_PROVIDER_HOOKS = {
|
||||
...GOOGLE_GEMINI_REPLAY_HOOKS,
|
||||
...buildProviderReplayFamilyHooks({
|
||||
family: "google-gemini",
|
||||
}),
|
||||
...buildProviderStreamFamilyHooks("google-thinking"),
|
||||
};
|
||||
const GOOGLE_GEMINI_PROVIDER_HOOKS_WITH_TOOL_COMPAT = {
|
||||
...GOOGLE_GEMINI_PROVIDER_HOOKS,
|
||||
...buildProviderToolCompatFamilyHooks("gemini"),
|
||||
};
|
||||
|
||||
async function loadGoogleGeminiCliProvider(): Promise<ProviderPlugin> {
|
||||
if (!googleGeminiCliProviderPromise) {
|
||||
googleGeminiCliProviderPromise = import("./gemini-cli-provider.js").then((mod) => {
|
||||
let provider: ProviderPlugin | undefined;
|
||||
mod.registerGoogleGeminiCliProvider({
|
||||
registerProvider(entry) {
|
||||
provider = entry;
|
||||
},
|
||||
} as Pick<OpenClawPluginApi, "registerProvider"> as OpenClawPluginApi);
|
||||
if (!provider) {
|
||||
throw new Error("google gemini cli provider missing provider registration");
|
||||
}
|
||||
return provider;
|
||||
});
|
||||
}
|
||||
return await googleGeminiCliProviderPromise;
|
||||
}
|
||||
|
||||
async function loadGoogleImageGenerationProvider(): Promise<ImageGenerationProvider> {
|
||||
if (!googleImageGenerationProviderPromise) {
|
||||
|
|
@ -105,61 +62,6 @@ async function loadGoogleRequiredMediaUnderstandingProvider(): Promise<GoogleMed
|
|||
return provider as GoogleMediaUnderstandingProvider;
|
||||
}
|
||||
|
||||
function createLazyGoogleGeminiCliProvider(): ProviderPlugin {
|
||||
return {
|
||||
id: GOOGLE_GEMINI_CLI_PROVIDER_ID,
|
||||
label: GOOGLE_GEMINI_CLI_PROVIDER_LABEL,
|
||||
docsPath: "/providers/models",
|
||||
aliases: ["gemini-cli"],
|
||||
envVars: [...GOOGLE_GEMINI_CLI_ENV_VARS],
|
||||
auth: [
|
||||
{
|
||||
id: "oauth",
|
||||
label: "Google OAuth",
|
||||
hint: "PKCE + localhost callback",
|
||||
kind: "oauth",
|
||||
run: async (ctx: ProviderAuthContext) => {
|
||||
const provider = await loadGoogleGeminiCliProvider();
|
||||
const authMethod = provider.auth?.[0];
|
||||
if (!authMethod || authMethod.kind !== "oauth") {
|
||||
return { profiles: [] };
|
||||
}
|
||||
return await authMethod.run(ctx);
|
||||
},
|
||||
},
|
||||
],
|
||||
wizard: {
|
||||
setup: {
|
||||
choiceId: "google-gemini-cli",
|
||||
choiceLabel: "Gemini CLI OAuth",
|
||||
choiceHint: "Google OAuth with project-aware token payload",
|
||||
methodId: "oauth",
|
||||
},
|
||||
},
|
||||
normalizeModelId: ({ modelId }) => normalizeGoogleModelId(modelId),
|
||||
resolveDynamicModel: (ctx) =>
|
||||
resolveGoogleGeminiForwardCompatModel({
|
||||
providerId: GOOGLE_GEMINI_CLI_PROVIDER_ID,
|
||||
templateProviderId: "google",
|
||||
ctx,
|
||||
}),
|
||||
...GOOGLE_GEMINI_PROVIDER_HOOKS_WITH_TOOL_COMPAT,
|
||||
isModernModelRef: ({ modelId }) => isModernGoogleModel(modelId),
|
||||
formatApiKey: (cred) => formatGoogleOauthApiKey(cred),
|
||||
resolveUsageAuth: async (ctx) => {
|
||||
const provider = await loadGoogleGeminiCliProvider();
|
||||
return await provider.resolveUsageAuth?.(ctx);
|
||||
},
|
||||
fetchUsageSnapshot: async (ctx: ProviderFetchUsageSnapshotContext) => {
|
||||
const provider = await loadGoogleGeminiCliProvider();
|
||||
if (!provider.fetchUsageSnapshot) {
|
||||
throw new Error("google gemini cli provider missing usage snapshot handler");
|
||||
}
|
||||
return await provider.fetchUsageSnapshot(ctx);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function createLazyGoogleImageGenerationProvider(): ImageGenerationProvider {
|
||||
return {
|
||||
id: "google",
|
||||
|
|
@ -242,7 +144,7 @@ export default definePluginEntry({
|
|||
choiceLabel: "Google Gemini API key",
|
||||
groupId: "google",
|
||||
groupLabel: "Google",
|
||||
groupHint: "Gemini API key + OAuth",
|
||||
groupHint: "Gemini API key",
|
||||
},
|
||||
}),
|
||||
],
|
||||
|
|
@ -254,14 +156,11 @@ export default definePluginEntry({
|
|||
resolveDynamicModel: (ctx) =>
|
||||
resolveGoogleGeminiForwardCompatModel({
|
||||
providerId: ctx.provider,
|
||||
templateProviderId: GOOGLE_GEMINI_CLI_PROVIDER_ID,
|
||||
ctx,
|
||||
}),
|
||||
...GOOGLE_GEMINI_PROVIDER_HOOKS,
|
||||
isModernModelRef: ({ modelId }) => isModernGoogleModel(modelId),
|
||||
});
|
||||
api.registerCliBackend(buildGoogleGeminiCliBackend());
|
||||
api.registerProvider(createLazyGoogleGeminiCliProvider());
|
||||
api.registerImageGenerationProvider(createLazyGoogleImageGenerationProvider());
|
||||
api.registerMediaUnderstandingProvider(createLazyGoogleMediaUnderstandingProvider());
|
||||
api.registerWebSearchProvider(createGeminiWebSearchProvider());
|
||||
|
|
|
|||
|
|
@ -1,9 +1,7 @@
|
|||
{
|
||||
"id": "google",
|
||||
"enabledByDefault": true,
|
||||
"providers": ["google", "google-gemini-cli"],
|
||||
"autoEnableWhenConfiguredProviders": ["google-gemini-cli"],
|
||||
"cliBackends": ["google-gemini-cli"],
|
||||
"providers": ["google"],
|
||||
"providerAuthEnvVars": {
|
||||
"google": ["GEMINI_API_KEY", "GOOGLE_API_KEY"]
|
||||
},
|
||||
|
|
@ -20,16 +18,6 @@
|
|||
"cliFlag": "--gemini-api-key",
|
||||
"cliOption": "--gemini-api-key <key>",
|
||||
"cliDescription": "Gemini API key"
|
||||
},
|
||||
{
|
||||
"provider": "google-gemini-cli",
|
||||
"method": "oauth",
|
||||
"choiceId": "google-gemini-cli",
|
||||
"choiceLabel": "Gemini CLI OAuth",
|
||||
"choiceHint": "Google OAuth with project-aware token payload",
|
||||
"groupId": "google",
|
||||
"groupLabel": "Google",
|
||||
"groupHint": "Gemini API key + OAuth"
|
||||
}
|
||||
],
|
||||
"uiHints": {
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import type {
|
|||
} from "openclaw/plugin-sdk/plugin-entry";
|
||||
import { cloneFirstTemplateModel } from "openclaw/plugin-sdk/provider-model-shared";
|
||||
|
||||
const GOOGLE_GEMINI_CLI_PROVIDER_ID = "google-gemini-cli";
|
||||
const GEMINI_2_5_PRO_PREFIX = "gemini-2.5-pro";
|
||||
const GEMINI_2_5_FLASH_LITE_PREFIX = "gemini-2.5-flash-lite";
|
||||
const GEMINI_2_5_FLASH_PREFIX = "gemini-2.5-flash";
|
||||
|
|
@ -19,26 +18,18 @@ const GEMINI_3_1_FLASH_LITE_TEMPLATE_IDS = ["gemini-3.1-flash-lite-preview"] as
|
|||
const GEMINI_3_1_FLASH_TEMPLATE_IDS = ["gemini-3-flash-preview"] as const;
|
||||
|
||||
type GoogleForwardCompatFamily = {
|
||||
googleTemplateIds: readonly string[];
|
||||
cliTemplateIds: readonly string[];
|
||||
preferExternalFirstForCli?: boolean;
|
||||
};
|
||||
|
||||
type GoogleTemplateSource = {
|
||||
templateProviderId: string;
|
||||
templateIds: readonly string[];
|
||||
};
|
||||
|
||||
function cloneGoogleTemplateModel(params: {
|
||||
providerId: string;
|
||||
modelId: string;
|
||||
templateProviderId: string;
|
||||
templateIds: readonly string[];
|
||||
ctx: ProviderResolveDynamicModelContext;
|
||||
patch?: Partial<ProviderRuntimeModel>;
|
||||
}): ProviderRuntimeModel | undefined {
|
||||
return cloneFirstTemplateModel({
|
||||
providerId: params.templateProviderId,
|
||||
providerId: params.providerId,
|
||||
modelId: params.modelId,
|
||||
templateIds: params.templateIds,
|
||||
ctx: params.ctx,
|
||||
|
|
@ -49,50 +40,8 @@ function cloneGoogleTemplateModel(params: {
|
|||
});
|
||||
}
|
||||
|
||||
function isGoogleGeminiCliProvider(providerId: string): boolean {
|
||||
return providerId.trim().toLowerCase() === GOOGLE_GEMINI_CLI_PROVIDER_ID;
|
||||
}
|
||||
|
||||
function templateIdsForProvider(
|
||||
templateProviderId: string,
|
||||
family: GoogleForwardCompatFamily,
|
||||
): readonly string[] {
|
||||
return isGoogleGeminiCliProvider(templateProviderId)
|
||||
? family.cliTemplateIds
|
||||
: family.googleTemplateIds;
|
||||
}
|
||||
|
||||
function buildGoogleTemplateSources(params: {
|
||||
providerId: string;
|
||||
templateProviderId?: string;
|
||||
family: GoogleForwardCompatFamily;
|
||||
}): GoogleTemplateSource[] {
|
||||
const preferredExternalFirst =
|
||||
isGoogleGeminiCliProvider(params.providerId) &&
|
||||
params.family.preferExternalFirstForCli === true;
|
||||
const orderedTemplateProviderIds = preferredExternalFirst
|
||||
? [params.templateProviderId, params.providerId]
|
||||
: [params.providerId, params.templateProviderId];
|
||||
|
||||
const seen = new Set<string>();
|
||||
const sources: GoogleTemplateSource[] = [];
|
||||
for (const providerId of orderedTemplateProviderIds) {
|
||||
const trimmed = providerId?.trim();
|
||||
if (!trimmed || seen.has(trimmed)) {
|
||||
continue;
|
||||
}
|
||||
seen.add(trimmed);
|
||||
sources.push({
|
||||
templateProviderId: trimmed,
|
||||
templateIds: templateIdsForProvider(trimmed, params.family),
|
||||
});
|
||||
}
|
||||
return sources;
|
||||
}
|
||||
|
||||
export function resolveGoogleGeminiForwardCompatModel(params: {
|
||||
providerId: string;
|
||||
templateProviderId?: string;
|
||||
ctx: ProviderResolveDynamicModelContext;
|
||||
}): ProviderRuntimeModel | undefined {
|
||||
const trimmed = params.ctx.modelId.trim();
|
||||
|
|
@ -100,60 +49,27 @@ export function resolveGoogleGeminiForwardCompatModel(params: {
|
|||
|
||||
let family: GoogleForwardCompatFamily;
|
||||
if (lower.startsWith(GEMINI_2_5_PRO_PREFIX)) {
|
||||
family = {
|
||||
googleTemplateIds: GEMINI_2_5_PRO_TEMPLATE_IDS,
|
||||
cliTemplateIds: GEMINI_3_1_PRO_TEMPLATE_IDS,
|
||||
preferExternalFirstForCli: true,
|
||||
};
|
||||
family = { templateIds: GEMINI_2_5_PRO_TEMPLATE_IDS };
|
||||
} else if (lower.startsWith(GEMINI_2_5_FLASH_LITE_PREFIX)) {
|
||||
family = {
|
||||
googleTemplateIds: GEMINI_2_5_FLASH_LITE_TEMPLATE_IDS,
|
||||
cliTemplateIds: GEMINI_3_1_FLASH_LITE_TEMPLATE_IDS,
|
||||
preferExternalFirstForCli: true,
|
||||
};
|
||||
family = { templateIds: GEMINI_2_5_FLASH_LITE_TEMPLATE_IDS };
|
||||
} else if (lower.startsWith(GEMINI_2_5_FLASH_PREFIX)) {
|
||||
family = {
|
||||
googleTemplateIds: GEMINI_2_5_FLASH_TEMPLATE_IDS,
|
||||
cliTemplateIds: GEMINI_3_1_FLASH_TEMPLATE_IDS,
|
||||
preferExternalFirstForCli: true,
|
||||
};
|
||||
family = { templateIds: GEMINI_2_5_FLASH_TEMPLATE_IDS };
|
||||
} else if (lower.startsWith(GEMINI_3_1_PRO_PREFIX)) {
|
||||
family = {
|
||||
googleTemplateIds: GEMINI_3_1_PRO_TEMPLATE_IDS,
|
||||
cliTemplateIds: GEMINI_3_1_PRO_TEMPLATE_IDS,
|
||||
};
|
||||
family = { templateIds: GEMINI_3_1_PRO_TEMPLATE_IDS };
|
||||
} else if (lower.startsWith(GEMINI_3_1_FLASH_LITE_PREFIX)) {
|
||||
family = {
|
||||
googleTemplateIds: GEMINI_3_1_FLASH_LITE_TEMPLATE_IDS,
|
||||
cliTemplateIds: GEMINI_3_1_FLASH_LITE_TEMPLATE_IDS,
|
||||
};
|
||||
family = { templateIds: GEMINI_3_1_FLASH_LITE_TEMPLATE_IDS };
|
||||
} else if (lower.startsWith(GEMINI_3_1_FLASH_PREFIX)) {
|
||||
family = {
|
||||
googleTemplateIds: GEMINI_3_1_FLASH_TEMPLATE_IDS,
|
||||
cliTemplateIds: GEMINI_3_1_FLASH_TEMPLATE_IDS,
|
||||
};
|
||||
family = { templateIds: GEMINI_3_1_FLASH_TEMPLATE_IDS };
|
||||
} else {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
for (const source of buildGoogleTemplateSources({
|
||||
return cloneGoogleTemplateModel({
|
||||
providerId: params.providerId,
|
||||
templateProviderId: params.templateProviderId,
|
||||
family,
|
||||
})) {
|
||||
const model = cloneGoogleTemplateModel({
|
||||
providerId: params.providerId,
|
||||
modelId: trimmed,
|
||||
templateProviderId: source.templateProviderId,
|
||||
templateIds: source.templateIds,
|
||||
ctx: params.ctx,
|
||||
});
|
||||
if (model) {
|
||||
return model;
|
||||
}
|
||||
}
|
||||
|
||||
return undefined;
|
||||
modelId: trimmed,
|
||||
templateIds: family.templateIds,
|
||||
ctx: params.ctx,
|
||||
});
|
||||
}
|
||||
|
||||
export function isModernGoogleModel(modelId: string): boolean {
|
||||
|
|
|
|||
|
|
@ -1,3 +1,2 @@
|
|||
export { buildGoogleGeminiCliBackend } from "./cli-backend.js";
|
||||
export { buildGoogleImageGenerationProvider } from "./image-generation-provider.js";
|
||||
export { googleMediaUnderstandingProvider } from "./media-understanding-provider.js";
|
||||
|
|
|
|||
|
|
@ -1,48 +0,0 @@
|
|||
import type { CliBackendPlugin } from "openclaw/plugin-sdk/cli-backend";
|
||||
import {
|
||||
CLI_FRESH_WATCHDOG_DEFAULTS,
|
||||
CLI_RESUME_WATCHDOG_DEFAULTS,
|
||||
} from "openclaw/plugin-sdk/cli-backend";
|
||||
|
||||
export function buildOpenAICodexCliBackend(): CliBackendPlugin {
|
||||
return {
|
||||
id: "codex-cli",
|
||||
config: {
|
||||
command: "codex",
|
||||
args: [
|
||||
"exec",
|
||||
"--json",
|
||||
"--color",
|
||||
"never",
|
||||
"--sandbox",
|
||||
"workspace-write",
|
||||
"--skip-git-repo-check",
|
||||
],
|
||||
resumeArgs: [
|
||||
"exec",
|
||||
"resume",
|
||||
"{sessionId}",
|
||||
"--color",
|
||||
"never",
|
||||
"--sandbox",
|
||||
"workspace-write",
|
||||
"--skip-git-repo-check",
|
||||
],
|
||||
output: "jsonl",
|
||||
resumeOutput: "text",
|
||||
input: "arg",
|
||||
modelArg: "--model",
|
||||
sessionIdFields: ["thread_id"],
|
||||
sessionMode: "existing",
|
||||
imageArg: "--image",
|
||||
imageMode: "repeat",
|
||||
reliability: {
|
||||
watchdog: {
|
||||
fresh: { ...CLI_FRESH_WATCHDOG_DEFAULTS },
|
||||
resume: { ...CLI_RESUME_WATCHDOG_DEFAULTS },
|
||||
},
|
||||
},
|
||||
serialize: true,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
|
@ -1,5 +1,4 @@
|
|||
import { definePluginEntry } from "openclaw/plugin-sdk/plugin-entry";
|
||||
import { buildOpenAICodexCliBackend } from "./cli-backend.js";
|
||||
import { buildOpenAIImageGenerationProvider } from "./image-generation-provider.js";
|
||||
import {
|
||||
openaiCodexMediaUnderstandingProvider,
|
||||
|
|
@ -36,7 +35,6 @@ export default definePluginEntry({
|
|||
modelId: ctx.modelId,
|
||||
}),
|
||||
});
|
||||
api.registerCliBackend(buildOpenAICodexCliBackend());
|
||||
api.registerProvider(buildProviderWithPromptContribution(buildOpenAIProvider()));
|
||||
api.registerProvider(buildProviderWithPromptContribution(buildOpenAICodexProviderPlugin()));
|
||||
api.registerImageGenerationProvider(buildOpenAIImageGenerationProvider());
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@
|
|||
"modelSupport": {
|
||||
"modelPrefixes": ["gpt-", "o1", "o3", "o4"]
|
||||
},
|
||||
"cliBackends": ["codex-cli"],
|
||||
"providerAuthEnvVars": {
|
||||
"openai": ["OPENAI_API_KEY"]
|
||||
},
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
export { buildOpenAICodexCliBackend } from "./cli-backend.js";
|
||||
export { buildOpenAIImageGenerationProvider } from "./image-generation-provider.js";
|
||||
export {
|
||||
openaiCodexMediaUnderstandingProvider,
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
export { buildOpenAICodexCliBackend } from "./cli-backend.js";
|
||||
export { buildOpenAIImageGenerationProvider } from "./image-generation-provider.js";
|
||||
export {
|
||||
openaiCodexMediaUnderstandingProvider,
|
||||
|
|
|
|||
|
|
@ -263,10 +263,6 @@
|
|||
"types": "./dist/plugin-sdk/cli-runtime.d.ts",
|
||||
"default": "./dist/plugin-sdk/cli-runtime.js"
|
||||
},
|
||||
"./plugin-sdk/cli-backend": {
|
||||
"types": "./dist/plugin-sdk/cli-backend.d.ts",
|
||||
"default": "./dist/plugin-sdk/cli-backend.js"
|
||||
},
|
||||
"./plugin-sdk/hook-runtime": {
|
||||
"types": "./dist/plugin-sdk/hook-runtime.d.ts",
|
||||
"default": "./dist/plugin-sdk/hook-runtime.js"
|
||||
|
|
@ -1063,7 +1059,6 @@
|
|||
"test:docker:gateway-network": "bash scripts/e2e/gateway-network-docker.sh",
|
||||
"test:docker:live-acp-bind": "bash scripts/test-live-acp-bind-docker.sh",
|
||||
"test:docker:live-build": "bash scripts/test-live-build-docker.sh",
|
||||
"test:docker:live-cli-backend": "bash scripts/test-live-cli-backend-docker.sh",
|
||||
"test:docker:live-gateway": "bash scripts/test-live-gateway-models-docker.sh",
|
||||
"test:docker:live-models": "bash scripts/test-live-models-docker.sh",
|
||||
"test:docker:mcp-channels": "bash scripts/e2e/mcp-channels-docker.sh",
|
||||
|
|
|
|||
|
|
@ -1,177 +0,0 @@
|
|||
import { randomUUID } from "node:crypto";
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { clearRuntimeConfigSnapshot, loadConfig } from "../src/config/config.js";
|
||||
import { GatewayClient } from "../src/gateway/client.js";
|
||||
import { startGatewayServer } from "../src/gateway/server.js";
|
||||
import { extractPayloadText } from "../src/gateway/test-helpers.agent-results.js";
|
||||
import { getFreePortBlockWithPermissionFallback } from "../src/test-utils/ports.js";
|
||||
import { GATEWAY_CLIENT_NAMES } from "../src/utils/message-channel.js";
|
||||
|
||||
const DEFAULT_CODEX_ARGS = [
|
||||
"exec",
|
||||
"--json",
|
||||
"--color",
|
||||
"never",
|
||||
"--sandbox",
|
||||
"read-only",
|
||||
"--skip-git-repo-check",
|
||||
];
|
||||
|
||||
async function connectClient(params: { url: string; token: string }) {
|
||||
return await new Promise<GatewayClient>((resolve, reject) => {
|
||||
let done = false;
|
||||
const finish = (result: { client?: GatewayClient; error?: Error }) => {
|
||||
if (done) {
|
||||
return;
|
||||
}
|
||||
done = true;
|
||||
clearTimeout(connectTimeout);
|
||||
if (result.error) {
|
||||
reject(result.error);
|
||||
return;
|
||||
}
|
||||
resolve(result.client as GatewayClient);
|
||||
};
|
||||
const client = new GatewayClient({
|
||||
url: params.url,
|
||||
token: params.token,
|
||||
clientName: GATEWAY_CLIENT_NAMES.TEST,
|
||||
clientVersion: "dev",
|
||||
mode: "test",
|
||||
onHelloOk: () => finish({ client }),
|
||||
onConnectError: (error) => finish({ error }),
|
||||
onClose: (code, reason) =>
|
||||
finish({ error: new Error(`gateway closed during connect (${code}): ${reason}`) }),
|
||||
});
|
||||
const connectTimeout = setTimeout(
|
||||
() => finish({ error: new Error("gateway connect timeout") }),
|
||||
10_000,
|
||||
);
|
||||
connectTimeout.unref();
|
||||
client.start();
|
||||
});
|
||||
}
|
||||
|
||||
async function getFreeGatewayPort(): Promise<number> {
|
||||
return await getFreePortBlockWithPermissionFallback({
|
||||
offsets: [0, 1, 2, 4],
|
||||
fallbackBase: 40_000,
|
||||
});
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const preservedEnv = new Set(
|
||||
JSON.parse(process.env.OPENCLAW_LIVE_CLI_BACKEND_PRESERVE_ENV ?? "[]") as string[],
|
||||
);
|
||||
const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-inline-bootstrap-"));
|
||||
const workspaceRootDir = path.join(tempDir, "workspace");
|
||||
const workspaceDir = path.join(workspaceRootDir, "dev");
|
||||
const soulSecret = `SOUL-${randomUUID()}`;
|
||||
const identitySecret = `IDENTITY-${randomUUID()}`;
|
||||
const userSecret = `USER-${randomUUID()}`;
|
||||
await fs.mkdir(workspaceDir, { recursive: true });
|
||||
await fs.writeFile(
|
||||
path.join(workspaceDir, "AGENTS.md"),
|
||||
[
|
||||
"# AGENTS.md",
|
||||
"",
|
||||
"When the user sends a BOOTSTRAP_CHECK token, reply with exactly:",
|
||||
`BOOTSTRAP_OK ${soulSecret} ${identitySecret} ${userSecret}`,
|
||||
"Do not add any other words or punctuation.",
|
||||
].join("\n"),
|
||||
);
|
||||
await fs.writeFile(path.join(workspaceDir, "SOUL.md"), `${soulSecret}\n`);
|
||||
await fs.writeFile(path.join(workspaceDir, "IDENTITY.md"), `${identitySecret}\n`);
|
||||
await fs.writeFile(path.join(workspaceDir, "USER.md"), `${userSecret}\n`);
|
||||
|
||||
const cfg = loadConfig();
|
||||
const existingBackends = cfg.agents?.defaults?.cliBackends ?? {};
|
||||
const codexBackend = existingBackends["codex-cli"] ?? {};
|
||||
const cliCommand =
|
||||
process.env.OPENCLAW_LIVE_CLI_BACKEND_COMMAND ?? codexBackend.command ?? "codex";
|
||||
const cliArgs = codexBackend.args ?? DEFAULT_CODEX_ARGS;
|
||||
const cliClearEnv = (codexBackend.clearEnv ?? []).filter((name) => !preservedEnv.has(name));
|
||||
const preservedCliEnv = Object.fromEntries(
|
||||
[...preservedEnv]
|
||||
.map((name) => [name, process.env[name]])
|
||||
.filter((entry): entry is [string, string] => typeof entry[1] === "string"),
|
||||
);
|
||||
const nextCfg = {
|
||||
...cfg,
|
||||
agents: {
|
||||
...cfg.agents,
|
||||
defaults: {
|
||||
...cfg.agents?.defaults,
|
||||
workspace: workspaceRootDir,
|
||||
model: { primary: "codex-cli/gpt-5.4" },
|
||||
models: { "codex-cli/gpt-5.4": {} },
|
||||
cliBackends: {
|
||||
...existingBackends,
|
||||
"codex-cli": {
|
||||
...codexBackend,
|
||||
command: cliCommand,
|
||||
args: cliArgs,
|
||||
clearEnv: cliClearEnv.length > 0 ? cliClearEnv : undefined,
|
||||
env: Object.keys(preservedCliEnv).length > 0 ? preservedCliEnv : undefined,
|
||||
systemPromptWhen: "first",
|
||||
},
|
||||
},
|
||||
sandbox: { mode: "off" },
|
||||
},
|
||||
},
|
||||
};
|
||||
const tempConfigPath = path.join(tempDir, "openclaw.json");
|
||||
await fs.writeFile(tempConfigPath, `${JSON.stringify(nextCfg, null, 2)}\n`);
|
||||
process.env.OPENCLAW_CONFIG_PATH = tempConfigPath;
|
||||
process.env.OPENCLAW_SKIP_CHANNELS = "1";
|
||||
process.env.OPENCLAW_SKIP_GMAIL_WATCHER = "1";
|
||||
process.env.OPENCLAW_SKIP_CRON = "1";
|
||||
process.env.OPENCLAW_SKIP_CANVAS_HOST = "1";
|
||||
|
||||
const port = await getFreeGatewayPort();
|
||||
const token = `test-${randomUUID()}`;
|
||||
process.env.OPENCLAW_GATEWAY_TOKEN = token;
|
||||
|
||||
const server = await startGatewayServer(port, {
|
||||
bind: "loopback",
|
||||
auth: { mode: "token", token },
|
||||
controlUiEnabled: false,
|
||||
});
|
||||
const client = await connectClient({ url: `ws://127.0.0.1:${port}`, token });
|
||||
try {
|
||||
const payload = await client.request(
|
||||
"agent",
|
||||
{
|
||||
sessionKey: `agent:dev:inline-cli-bootstrap-${randomUUID()}`,
|
||||
idempotencyKey: `idem-${randomUUID()}`,
|
||||
message: `BOOTSTRAP_CHECK ${randomUUID()}`,
|
||||
deliver: false,
|
||||
},
|
||||
{ expectFinal: true, timeoutMs: 60_000 },
|
||||
);
|
||||
const text = extractPayloadText(payload?.result);
|
||||
process.stdout.write(
|
||||
`${JSON.stringify({
|
||||
ok: true,
|
||||
text,
|
||||
expectedText: `BOOTSTRAP_OK ${soulSecret} ${identitySecret} ${userSecret}`,
|
||||
systemPromptReport: payload?.result?.meta?.systemPromptReport ?? null,
|
||||
})}\n`,
|
||||
);
|
||||
} finally {
|
||||
await client.stopAndWait();
|
||||
await server.close({ reason: "bootstrap live probe done" });
|
||||
await fs.rm(tempDir, { recursive: true, force: true });
|
||||
clearRuntimeConfigSnapshot();
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
await main();
|
||||
process.exit(0);
|
||||
} catch (error) {
|
||||
process.stderr.write(`${String(error)}\n`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
|
@ -27,7 +27,7 @@ openclaw_live_should_include_auth_dir_for_provider() {
|
|||
local provider
|
||||
provider="$(openclaw_live_trim "${1:-}")"
|
||||
case "$provider" in
|
||||
codex-cli | openai-codex)
|
||||
openai-codex)
|
||||
printf '%s\n' ".codex"
|
||||
;;
|
||||
minimax | minimax-portal)
|
||||
|
|
|
|||
|
|
@ -55,7 +55,6 @@
|
|||
"github-copilot-login",
|
||||
"github-copilot-token",
|
||||
"cli-runtime",
|
||||
"cli-backend",
|
||||
"hook-runtime",
|
||||
"host-runtime",
|
||||
"process-runtime",
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ case "$ACP_AGENT" in
|
|||
CLI_BIN="claude"
|
||||
;;
|
||||
codex)
|
||||
AUTH_PROVIDER="codex-cli"
|
||||
AUTH_PROVIDER="openai-codex"
|
||||
CLI_PACKAGE="@openai/codex"
|
||||
CLI_BIN="codex"
|
||||
;;
|
||||
|
|
|
|||
|
|
@ -1,171 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
||||
source "$ROOT_DIR/scripts/lib/live-docker-auth.sh"
|
||||
IMAGE_NAME="${OPENCLAW_IMAGE:-openclaw:local}"
|
||||
LIVE_IMAGE_NAME="${OPENCLAW_LIVE_IMAGE:-${IMAGE_NAME}-live}"
|
||||
CONFIG_DIR="${OPENCLAW_CONFIG_DIR:-$HOME/.openclaw}"
|
||||
WORKSPACE_DIR="${OPENCLAW_WORKSPACE_DIR:-$HOME/.openclaw/workspace}"
|
||||
PROFILE_FILE="${OPENCLAW_PROFILE_FILE:-$HOME/.profile}"
|
||||
CLI_TOOLS_DIR="${OPENCLAW_DOCKER_CLI_TOOLS_DIR:-$HOME/.cache/openclaw/docker-cli-tools}"
|
||||
DEFAULT_MODEL="codex-cli/gpt-5.4"
|
||||
CLI_MODEL="${OPENCLAW_LIVE_CLI_BACKEND_MODEL:-$DEFAULT_MODEL}"
|
||||
CLI_PROVIDER="${CLI_MODEL%%/*}"
|
||||
|
||||
if [[ -z "$CLI_PROVIDER" || "$CLI_PROVIDER" == "$CLI_MODEL" ]]; then
|
||||
CLI_PROVIDER="codex-cli"
|
||||
fi
|
||||
|
||||
mkdir -p "$CLI_TOOLS_DIR"
|
||||
|
||||
PROFILE_MOUNT=()
|
||||
if [[ -f "$PROFILE_FILE" ]]; then
|
||||
PROFILE_MOUNT=(-v "$PROFILE_FILE":/home/node/.profile:ro)
|
||||
fi
|
||||
|
||||
AUTH_DIRS=()
|
||||
AUTH_FILES=()
|
||||
if [[ -n "${OPENCLAW_DOCKER_AUTH_DIRS:-}" ]]; then
|
||||
while IFS= read -r auth_dir; do
|
||||
[[ -n "$auth_dir" ]] || continue
|
||||
AUTH_DIRS+=("$auth_dir")
|
||||
done < <(openclaw_live_collect_auth_dirs)
|
||||
while IFS= read -r auth_file; do
|
||||
[[ -n "$auth_file" ]] || continue
|
||||
AUTH_FILES+=("$auth_file")
|
||||
done < <(openclaw_live_collect_auth_files)
|
||||
else
|
||||
while IFS= read -r auth_dir; do
|
||||
[[ -n "$auth_dir" ]] || continue
|
||||
AUTH_DIRS+=("$auth_dir")
|
||||
done < <(openclaw_live_collect_auth_dirs_from_csv "$CLI_PROVIDER")
|
||||
while IFS= read -r auth_file; do
|
||||
[[ -n "$auth_file" ]] || continue
|
||||
AUTH_FILES+=("$auth_file")
|
||||
done < <(openclaw_live_collect_auth_files_from_csv "$CLI_PROVIDER")
|
||||
fi
|
||||
AUTH_DIRS_CSV=""
|
||||
if ((${#AUTH_DIRS[@]} > 0)); then
|
||||
AUTH_DIRS_CSV="$(openclaw_live_join_csv "${AUTH_DIRS[@]}")"
|
||||
fi
|
||||
AUTH_FILES_CSV=""
|
||||
if ((${#AUTH_FILES[@]} > 0)); then
|
||||
AUTH_FILES_CSV="$(openclaw_live_join_csv "${AUTH_FILES[@]}")"
|
||||
fi
|
||||
|
||||
EXTERNAL_AUTH_MOUNTS=()
|
||||
if ((${#AUTH_DIRS[@]} > 0)); then
|
||||
for auth_dir in "${AUTH_DIRS[@]}"; do
|
||||
host_path="$HOME/$auth_dir"
|
||||
if [[ -d "$host_path" ]]; then
|
||||
EXTERNAL_AUTH_MOUNTS+=(-v "$host_path":/host-auth/"$auth_dir":ro)
|
||||
fi
|
||||
done
|
||||
fi
|
||||
if ((${#AUTH_FILES[@]} > 0)); then
|
||||
for auth_file in "${AUTH_FILES[@]}"; do
|
||||
host_path="$HOME/$auth_file"
|
||||
if [[ -f "$host_path" ]]; then
|
||||
EXTERNAL_AUTH_MOUNTS+=(-v "$host_path":/host-auth-files/"$auth_file":ro)
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
read -r -d '' LIVE_TEST_CMD <<'EOF' || true
|
||||
set -euo pipefail
|
||||
[ -f "$HOME/.profile" ] && source "$HOME/.profile" || true
|
||||
export PATH="$HOME/.npm-global/bin:$PATH"
|
||||
IFS=',' read -r -a auth_dirs <<<"${OPENCLAW_DOCKER_AUTH_DIRS_RESOLVED:-}"
|
||||
IFS=',' read -r -a auth_files <<<"${OPENCLAW_DOCKER_AUTH_FILES_RESOLVED:-}"
|
||||
if ((${#auth_dirs[@]} > 0)); then
|
||||
for auth_dir in "${auth_dirs[@]}"; do
|
||||
[ -n "$auth_dir" ] || continue
|
||||
if [ -d "/host-auth/$auth_dir" ]; then
|
||||
mkdir -p "$HOME/$auth_dir"
|
||||
cp -R "/host-auth/$auth_dir/." "$HOME/$auth_dir"
|
||||
chmod -R u+rwX "$HOME/$auth_dir" || true
|
||||
fi
|
||||
done
|
||||
fi
|
||||
if ((${#auth_files[@]} > 0)); then
|
||||
for auth_file in "${auth_files[@]}"; do
|
||||
[ -n "$auth_file" ] || continue
|
||||
if [ -f "/host-auth-files/$auth_file" ]; then
|
||||
mkdir -p "$(dirname "$HOME/$auth_file")"
|
||||
cp "/host-auth-files/$auth_file" "$HOME/$auth_file"
|
||||
chmod u+rw "$HOME/$auth_file" || true
|
||||
fi
|
||||
done
|
||||
fi
|
||||
provider="${OPENCLAW_DOCKER_CLI_BACKEND_PROVIDER:-codex-cli}"
|
||||
if [ "$provider" = "codex-cli" ]; then
|
||||
if [ -z "${OPENCLAW_LIVE_CLI_BACKEND_COMMAND:-}" ]; then
|
||||
export OPENCLAW_LIVE_CLI_BACKEND_COMMAND="$HOME/.npm-global/bin/codex"
|
||||
fi
|
||||
if [ ! -x "${OPENCLAW_LIVE_CLI_BACKEND_COMMAND}" ]; then
|
||||
npm_config_prefix="$HOME/.npm-global" npm install -g @openai/codex
|
||||
fi
|
||||
fi
|
||||
tmp_dir="$(mktemp -d)"
|
||||
cleanup() {
|
||||
rm -rf "$tmp_dir"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
tar -C /src \
|
||||
--exclude=.git \
|
||||
--exclude=node_modules \
|
||||
--exclude=dist \
|
||||
--exclude=ui/dist \
|
||||
--exclude=ui/node_modules \
|
||||
-cf - . | tar -C "$tmp_dir" -xf -
|
||||
ln -s /app/node_modules "$tmp_dir/node_modules"
|
||||
ln -s /app/dist "$tmp_dir/dist"
|
||||
if [ -d /app/dist-runtime/extensions ]; then
|
||||
export OPENCLAW_BUNDLED_PLUGINS_DIR=/app/dist-runtime/extensions
|
||||
elif [ -d /app/dist/extensions ]; then
|
||||
export OPENCLAW_BUNDLED_PLUGINS_DIR=/app/dist/extensions
|
||||
fi
|
||||
cd "$tmp_dir"
|
||||
pnpm test:live src/gateway/gateway-cli-backend.live.test.ts
|
||||
EOF
|
||||
|
||||
echo "==> Build live-test image: $LIVE_IMAGE_NAME (target=build)"
|
||||
docker build --target build -t "$LIVE_IMAGE_NAME" -f "$ROOT_DIR/Dockerfile" "$ROOT_DIR"
|
||||
|
||||
echo "==> Run CLI backend live test in Docker"
|
||||
echo "==> Model: $CLI_MODEL"
|
||||
echo "==> Provider: $CLI_PROVIDER"
|
||||
echo "==> External auth dirs: ${AUTH_DIRS_CSV:-none}"
|
||||
echo "==> External auth files: ${AUTH_FILES_CSV:-none}"
|
||||
docker run --rm -t \
|
||||
-u node \
|
||||
--entrypoint bash \
|
||||
-e OPENAI_API_KEY \
|
||||
-e COREPACK_ENABLE_DOWNLOAD_PROMPT=0 \
|
||||
-e HOME=/home/node \
|
||||
-e NODE_OPTIONS=--disable-warning=ExperimentalWarning \
|
||||
-e OPENCLAW_SKIP_CHANNELS=1 \
|
||||
-e OPENCLAW_VITEST_FS_MODULE_CACHE=0 \
|
||||
-e OPENCLAW_DOCKER_AUTH_DIRS_RESOLVED="$AUTH_DIRS_CSV" \
|
||||
-e OPENCLAW_DOCKER_AUTH_FILES_RESOLVED="$AUTH_FILES_CSV" \
|
||||
-e OPENCLAW_DOCKER_CLI_BACKEND_PROVIDER="$CLI_PROVIDER" \
|
||||
-e OPENCLAW_LIVE_TEST=1 \
|
||||
-e OPENCLAW_LIVE_CLI_BACKEND=1 \
|
||||
-e OPENCLAW_LIVE_CLI_BACKEND_MODEL="$CLI_MODEL" \
|
||||
-e OPENCLAW_LIVE_CLI_BACKEND_COMMAND="${OPENCLAW_LIVE_CLI_BACKEND_COMMAND:-}" \
|
||||
-e OPENCLAW_LIVE_CLI_BACKEND_ARGS="${OPENCLAW_LIVE_CLI_BACKEND_ARGS:-}" \
|
||||
-e OPENCLAW_LIVE_CLI_BACKEND_CLEAR_ENV="${OPENCLAW_LIVE_CLI_BACKEND_CLEAR_ENV:-}" \
|
||||
-e OPENCLAW_LIVE_CLI_BACKEND_PRESERVE_ENV="${OPENCLAW_LIVE_CLI_BACKEND_PRESERVE_ENV:-}" \
|
||||
-e OPENCLAW_LIVE_CLI_BACKEND_RESUME_PROBE="${OPENCLAW_LIVE_CLI_BACKEND_RESUME_PROBE:-}" \
|
||||
-e OPENCLAW_LIVE_CLI_BACKEND_IMAGE_PROBE="${OPENCLAW_LIVE_CLI_BACKEND_IMAGE_PROBE:-}" \
|
||||
-e OPENCLAW_LIVE_CLI_BACKEND_IMAGE_ARG="${OPENCLAW_LIVE_CLI_BACKEND_IMAGE_ARG:-}" \
|
||||
-e OPENCLAW_LIVE_CLI_BACKEND_IMAGE_MODE="${OPENCLAW_LIVE_CLI_BACKEND_IMAGE_MODE:-}" \
|
||||
-v "$ROOT_DIR":/src:ro \
|
||||
-v "$CONFIG_DIR":/home/node/.openclaw \
|
||||
-v "$WORKSPACE_DIR":/home/node/.openclaw/workspace \
|
||||
-v "$CLI_TOOLS_DIR":/home/node/.npm-global \
|
||||
"${EXTERNAL_AUTH_MOUNTS[@]}" \
|
||||
"${PROFILE_MOUNT[@]}" \
|
||||
"$LIVE_IMAGE_NAME" \
|
||||
-lc "$LIVE_TEST_CMD"
|
||||
|
|
@ -17,11 +17,6 @@ const ACP_AGENT_RESUME_HINT_BY_KEY = new Map<string, SessionResumeHintResolver>(
|
|||
({ agentSessionId }) =>
|
||||
`resume in Codex CLI: \`codex resume ${agentSessionId}\` (continues this conversation).`,
|
||||
],
|
||||
[
|
||||
"codex-cli",
|
||||
({ agentSessionId }) =>
|
||||
`resume in Codex CLI: \`codex resume ${agentSessionId}\` (continues this conversation).`,
|
||||
],
|
||||
[
|
||||
"kimi",
|
||||
({ agentSessionId }) =>
|
||||
|
|
|
|||
|
|
@ -102,8 +102,7 @@ type OverrideFieldClearedByDelete =
|
|||
| "authProfileOverrideCompactionCount"
|
||||
| "fallbackNoticeSelectedModel"
|
||||
| "fallbackNoticeActiveModel"
|
||||
| "fallbackNoticeReason"
|
||||
| "claudeCliSessionId";
|
||||
| "fallbackNoticeReason";
|
||||
|
||||
const OVERRIDE_FIELDS_CLEARED_BY_DELETE: OverrideFieldClearedByDelete[] = [
|
||||
"providerOverride",
|
||||
|
|
@ -114,7 +113,6 @@ const OVERRIDE_FIELDS_CLEARED_BY_DELETE: OverrideFieldClearedByDelete[] = [
|
|||
"fallbackNoticeSelectedModel",
|
||||
"fallbackNoticeActiveModel",
|
||||
"fallbackNoticeReason",
|
||||
"claudeCliSessionId",
|
||||
];
|
||||
|
||||
const OVERRIDE_VALUE_MAX_LENGTH = 256;
|
||||
|
|
|
|||
|
|
@ -1,257 +0,0 @@
|
|||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import type { AuthProfileStore, OAuthCredential } from "./auth-profiles/types.js";
|
||||
|
||||
const mocks = vi.hoisted(() => ({
|
||||
readCodexCliCredentialsCached: vi.fn<() => OAuthCredential | null>(() => null),
|
||||
readMiniMaxCliCredentialsCached: vi.fn<() => OAuthCredential | null>(() => null),
|
||||
}));
|
||||
|
||||
let syncExternalCliCredentials: typeof import("./auth-profiles/external-cli-sync.js").syncExternalCliCredentials;
|
||||
let shouldReplaceStoredOAuthCredential: typeof import("./auth-profiles/external-cli-sync.js").shouldReplaceStoredOAuthCredential;
|
||||
let CODEX_CLI_PROFILE_ID: typeof import("./auth-profiles/constants.js").CODEX_CLI_PROFILE_ID;
|
||||
let OPENAI_CODEX_DEFAULT_PROFILE_ID: typeof import("./auth-profiles/constants.js").OPENAI_CODEX_DEFAULT_PROFILE_ID;
|
||||
let MINIMAX_CLI_PROFILE_ID: typeof import("./auth-profiles/constants.js").MINIMAX_CLI_PROFILE_ID;
|
||||
|
||||
function makeOAuthCredential(
|
||||
overrides: Partial<OAuthCredential> & Pick<OAuthCredential, "provider">,
|
||||
) {
|
||||
return {
|
||||
type: "oauth" as const,
|
||||
provider: overrides.provider,
|
||||
access: overrides.access ?? `${overrides.provider}-access`,
|
||||
refresh: overrides.refresh ?? `${overrides.provider}-refresh`,
|
||||
expires: overrides.expires ?? Date.now() + 60_000,
|
||||
accountId: overrides.accountId,
|
||||
email: overrides.email,
|
||||
enterpriseUrl: overrides.enterpriseUrl,
|
||||
projectId: overrides.projectId,
|
||||
};
|
||||
}
|
||||
|
||||
function makeStore(profileId?: string, credential?: OAuthCredential): AuthProfileStore {
|
||||
return {
|
||||
version: 1,
|
||||
profiles: profileId && credential ? { [profileId]: credential } : {},
|
||||
};
|
||||
}
|
||||
|
||||
function getProviderCases() {
|
||||
return [
|
||||
{
|
||||
label: "Codex",
|
||||
profileId: OPENAI_CODEX_DEFAULT_PROFILE_ID,
|
||||
provider: "openai-codex" as const,
|
||||
readMock: mocks.readCodexCliCredentialsCached,
|
||||
legacyProfileId: CODEX_CLI_PROFILE_ID,
|
||||
},
|
||||
{
|
||||
label: "MiniMax",
|
||||
profileId: MINIMAX_CLI_PROFILE_ID,
|
||||
provider: "minimax-portal" as const,
|
||||
readMock: mocks.readMiniMaxCliCredentialsCached,
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
describe("syncExternalCliCredentials", () => {
|
||||
beforeEach(async () => {
|
||||
vi.resetModules();
|
||||
mocks.readCodexCliCredentialsCached.mockReset().mockReturnValue(null);
|
||||
mocks.readMiniMaxCliCredentialsCached.mockReset().mockReturnValue(null);
|
||||
vi.doMock("./cli-credentials.js", () => ({
|
||||
readCodexCliCredentialsCached: mocks.readCodexCliCredentialsCached,
|
||||
readMiniMaxCliCredentialsCached: mocks.readMiniMaxCliCredentialsCached,
|
||||
}));
|
||||
({ syncExternalCliCredentials, shouldReplaceStoredOAuthCredential } =
|
||||
await import("./auth-profiles/external-cli-sync.js"));
|
||||
({ CODEX_CLI_PROFILE_ID, OPENAI_CODEX_DEFAULT_PROFILE_ID, MINIMAX_CLI_PROFILE_ID } =
|
||||
await import("./auth-profiles/constants.js"));
|
||||
});
|
||||
|
||||
describe("shouldReplaceStoredOAuthCredential", () => {
|
||||
it("keeps equivalent stored credentials", () => {
|
||||
const stored = makeOAuthCredential({ provider: "openai-codex", access: "a", refresh: "r" });
|
||||
const incoming = makeOAuthCredential({ provider: "openai-codex", access: "a", refresh: "r" });
|
||||
|
||||
expect(shouldReplaceStoredOAuthCredential(stored, incoming)).toBe(false);
|
||||
});
|
||||
|
||||
it("keeps the newer stored credential", () => {
|
||||
const incoming = makeOAuthCredential({
|
||||
provider: "openai-codex",
|
||||
expires: Date.now() + 60_000,
|
||||
});
|
||||
const stored = makeOAuthCredential({
|
||||
provider: "openai-codex",
|
||||
access: "fresh-access",
|
||||
refresh: "fresh-refresh",
|
||||
expires: Date.now() + 5 * 24 * 60 * 60_000,
|
||||
});
|
||||
|
||||
expect(shouldReplaceStoredOAuthCredential(stored, incoming)).toBe(false);
|
||||
});
|
||||
|
||||
it("replaces when incoming credentials are fresher", () => {
|
||||
const stored = makeOAuthCredential({
|
||||
provider: "openai-codex",
|
||||
expires: Date.now() + 60_000,
|
||||
});
|
||||
const incoming = makeOAuthCredential({
|
||||
provider: "openai-codex",
|
||||
access: "new-access",
|
||||
refresh: "new-refresh",
|
||||
expires: Date.now() + 5 * 24 * 60 * 60_000,
|
||||
});
|
||||
|
||||
expect(shouldReplaceStoredOAuthCredential(stored, incoming)).toBe(true);
|
||||
expect(shouldReplaceStoredOAuthCredential(undefined, incoming)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
it.each([{ providerLabel: "Codex" }, { providerLabel: "MiniMax" }])(
|
||||
"syncs $providerLabel CLI credentials into the target auth profile",
|
||||
({ providerLabel }) => {
|
||||
const providerCase = getProviderCases().find((entry) => entry.label === providerLabel);
|
||||
expect(providerCase).toBeDefined();
|
||||
const current = providerCase!;
|
||||
const expires = Date.now() + 60_000;
|
||||
current.readMock.mockReturnValue(
|
||||
makeOAuthCredential({
|
||||
provider: current.provider,
|
||||
access: `${current.provider}-access-token`,
|
||||
refresh: `${current.provider}-refresh-token`,
|
||||
expires,
|
||||
accountId: "acct_123",
|
||||
}),
|
||||
);
|
||||
|
||||
const store = makeStore();
|
||||
|
||||
const mutated = syncExternalCliCredentials(store);
|
||||
|
||||
expect(mutated).toBe(true);
|
||||
expect(current.readMock).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ ttlMs: expect.any(Number) }),
|
||||
);
|
||||
expect(store.profiles[current.profileId]).toMatchObject({
|
||||
type: "oauth",
|
||||
provider: current.provider,
|
||||
access: `${current.provider}-access-token`,
|
||||
refresh: `${current.provider}-refresh-token`,
|
||||
expires,
|
||||
accountId: "acct_123",
|
||||
managedBy: current.provider === "openai-codex" ? "codex-cli" : ("minimax-cli" as const),
|
||||
});
|
||||
if (current.legacyProfileId) {
|
||||
expect(store.profiles[current.legacyProfileId]).toBeUndefined();
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
it("refreshes stored Codex expiry from external CLI even when the cached profile looks fresh", () => {
|
||||
const staleExpiry = Date.now() + 30 * 60_000;
|
||||
const freshExpiry = Date.now() + 5 * 24 * 60 * 60_000;
|
||||
mocks.readCodexCliCredentialsCached.mockReturnValue(
|
||||
makeOAuthCredential({
|
||||
provider: "openai-codex",
|
||||
access: "new-access-token",
|
||||
refresh: "new-refresh-token",
|
||||
expires: freshExpiry,
|
||||
accountId: "acct_456",
|
||||
}),
|
||||
);
|
||||
|
||||
const store = makeStore(
|
||||
OPENAI_CODEX_DEFAULT_PROFILE_ID,
|
||||
makeOAuthCredential({
|
||||
provider: "openai-codex",
|
||||
access: "old-access-token",
|
||||
refresh: "old-refresh-token",
|
||||
expires: staleExpiry,
|
||||
accountId: "acct_456",
|
||||
}),
|
||||
);
|
||||
|
||||
const mutated = syncExternalCliCredentials(store);
|
||||
|
||||
expect(mutated).toBe(true);
|
||||
expect(store.profiles[OPENAI_CODEX_DEFAULT_PROFILE_ID]).toMatchObject({
|
||||
access: "new-access-token",
|
||||
refresh: "new-refresh-token",
|
||||
expires: freshExpiry,
|
||||
managedBy: "codex-cli",
|
||||
});
|
||||
});
|
||||
|
||||
it.each([{ providerLabel: "Codex" }, { providerLabel: "MiniMax" }])(
|
||||
"does not overwrite newer stored $providerLabel credentials",
|
||||
({ providerLabel }) => {
|
||||
const providerCase = getProviderCases().find((entry) => entry.label === providerLabel);
|
||||
expect(providerCase).toBeDefined();
|
||||
const current = providerCase!;
|
||||
const staleExpiry = Date.now() + 30 * 60_000;
|
||||
const freshExpiry = Date.now() + 5 * 24 * 60 * 60_000;
|
||||
current.readMock.mockReturnValue(
|
||||
makeOAuthCredential({
|
||||
provider: current.provider,
|
||||
access: `stale-${current.provider}-access-token`,
|
||||
refresh: `stale-${current.provider}-refresh-token`,
|
||||
expires: staleExpiry,
|
||||
accountId: "acct_789",
|
||||
}),
|
||||
);
|
||||
|
||||
const store = makeStore(
|
||||
current.profileId,
|
||||
makeOAuthCredential({
|
||||
provider: current.provider,
|
||||
access: `fresh-${current.provider}-access-token`,
|
||||
refresh: `fresh-${current.provider}-refresh-token`,
|
||||
expires: freshExpiry,
|
||||
accountId: "acct_789",
|
||||
}),
|
||||
);
|
||||
|
||||
const mutated = syncExternalCliCredentials(store);
|
||||
|
||||
expect(mutated).toBe(false);
|
||||
expect(store.profiles[current.profileId]).toMatchObject({
|
||||
access: `fresh-${current.provider}-access-token`,
|
||||
refresh: `fresh-${current.provider}-refresh-token`,
|
||||
expires: freshExpiry,
|
||||
});
|
||||
},
|
||||
);
|
||||
|
||||
it("upgrades matching Codex CLI credentials with external ownership metadata", () => {
|
||||
const expires = Date.now() + 60_000;
|
||||
mocks.readCodexCliCredentialsCached.mockReturnValue(
|
||||
makeOAuthCredential({
|
||||
provider: "openai-codex",
|
||||
access: "same-access-token",
|
||||
refresh: "same-refresh-token",
|
||||
expires,
|
||||
}),
|
||||
);
|
||||
|
||||
const store = makeStore(
|
||||
OPENAI_CODEX_DEFAULT_PROFILE_ID,
|
||||
makeOAuthCredential({
|
||||
provider: "openai-codex",
|
||||
access: "same-access-token",
|
||||
refresh: "same-refresh-token",
|
||||
expires,
|
||||
}),
|
||||
);
|
||||
|
||||
const mutated = syncExternalCliCredentials(store);
|
||||
|
||||
expect(mutated).toBe(true);
|
||||
expect(store.profiles[OPENAI_CODEX_DEFAULT_PROFILE_ID]).toMatchObject({
|
||||
access: "same-access-token",
|
||||
refresh: "same-refresh-token",
|
||||
expires,
|
||||
managedBy: "codex-cli",
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -6,7 +6,6 @@ export const LEGACY_AUTH_FILENAME = "auth.json";
|
|||
|
||||
export const CODEX_CLI_PROFILE_ID = "openai-codex:codex-cli";
|
||||
export const OPENAI_CODEX_DEFAULT_PROFILE_ID = "openai-codex:default";
|
||||
export const MINIMAX_CLI_PROFILE_ID = "minimax-portal:minimax-cli";
|
||||
|
||||
export const AUTH_STORE_LOCK_OPTIONS = {
|
||||
retries: {
|
||||
|
|
@ -19,7 +18,4 @@ export const AUTH_STORE_LOCK_OPTIONS = {
|
|||
stale: 30_000,
|
||||
} as const;
|
||||
|
||||
export const EXTERNAL_CLI_SYNC_TTL_MS = 15 * 60 * 1000;
|
||||
export const EXTERNAL_CLI_NEAR_EXPIRY_MS = 10 * 60 * 1000;
|
||||
|
||||
export const log = createSubsystemLogger("agents/auth-profiles");
|
||||
|
|
|
|||
|
|
@ -1,196 +0,0 @@
|
|||
import {
|
||||
readCodexCliCredentialsCached,
|
||||
readMiniMaxCliCredentialsCached,
|
||||
} from "../cli-credentials.js";
|
||||
import {
|
||||
EXTERNAL_CLI_SYNC_TTL_MS,
|
||||
OPENAI_CODEX_DEFAULT_PROFILE_ID,
|
||||
MINIMAX_CLI_PROFILE_ID,
|
||||
log,
|
||||
} from "./constants.js";
|
||||
import type { AuthProfileStore, ExternalOAuthManager, OAuthCredential } from "./types.js";
|
||||
|
||||
type ExternalCliSyncOptions = {
|
||||
log?: boolean;
|
||||
};
|
||||
|
||||
type ExternalCliSyncProvider = {
|
||||
profileId: string;
|
||||
provider: string;
|
||||
managedBy: ExternalOAuthManager;
|
||||
readCredentials: () => OAuthCredential | null;
|
||||
};
|
||||
|
||||
export function areOAuthCredentialsEquivalent(
|
||||
a: OAuthCredential | undefined,
|
||||
b: OAuthCredential,
|
||||
): boolean {
|
||||
if (!a) {
|
||||
return false;
|
||||
}
|
||||
if (a.type !== "oauth") {
|
||||
return false;
|
||||
}
|
||||
return (
|
||||
a.provider === b.provider &&
|
||||
a.access === b.access &&
|
||||
a.refresh === b.refresh &&
|
||||
a.expires === b.expires &&
|
||||
a.email === b.email &&
|
||||
a.enterpriseUrl === b.enterpriseUrl &&
|
||||
a.projectId === b.projectId &&
|
||||
a.accountId === b.accountId &&
|
||||
a.managedBy === b.managedBy
|
||||
);
|
||||
}
|
||||
|
||||
function hasNewerStoredOAuthCredential(
|
||||
existing: OAuthCredential | undefined,
|
||||
incoming: OAuthCredential,
|
||||
): boolean {
|
||||
return Boolean(
|
||||
existing &&
|
||||
existing.provider === incoming.provider &&
|
||||
Number.isFinite(existing.expires) &&
|
||||
(!Number.isFinite(incoming.expires) || existing.expires > incoming.expires),
|
||||
);
|
||||
}
|
||||
|
||||
export function shouldReplaceStoredOAuthCredential(
|
||||
existing: OAuthCredential | undefined,
|
||||
incoming: OAuthCredential,
|
||||
): boolean {
|
||||
if (!existing || existing.type !== "oauth") {
|
||||
return true;
|
||||
}
|
||||
if (areOAuthCredentialsEquivalent(existing, incoming)) {
|
||||
return false;
|
||||
}
|
||||
return !hasNewerStoredOAuthCredential(existing, incoming);
|
||||
}
|
||||
|
||||
const EXTERNAL_CLI_SYNC_PROVIDERS: ExternalCliSyncProvider[] = [
|
||||
{
|
||||
profileId: MINIMAX_CLI_PROFILE_ID,
|
||||
provider: "minimax-portal",
|
||||
managedBy: "minimax-cli",
|
||||
readCredentials: () => readMiniMaxCliCredentialsCached({ ttlMs: EXTERNAL_CLI_SYNC_TTL_MS }),
|
||||
},
|
||||
{
|
||||
profileId: OPENAI_CODEX_DEFAULT_PROFILE_ID,
|
||||
provider: "openai-codex",
|
||||
managedBy: "codex-cli",
|
||||
readCredentials: () => readCodexCliCredentialsCached({ ttlMs: EXTERNAL_CLI_SYNC_TTL_MS }),
|
||||
},
|
||||
];
|
||||
|
||||
function withExternalCliManager(
|
||||
creds: OAuthCredential,
|
||||
managedBy: ExternalOAuthManager,
|
||||
): OAuthCredential {
|
||||
return {
|
||||
...creds,
|
||||
managedBy,
|
||||
};
|
||||
}
|
||||
|
||||
function resolveExternalCliSyncProvider(params: {
|
||||
profileId?: string;
|
||||
credential?: OAuthCredential;
|
||||
}): ExternalCliSyncProvider | null {
|
||||
const byProfileId =
|
||||
typeof params.profileId === "string"
|
||||
? EXTERNAL_CLI_SYNC_PROVIDERS.find((entry) => entry.profileId === params.profileId)
|
||||
: undefined;
|
||||
if (byProfileId) {
|
||||
return byProfileId;
|
||||
}
|
||||
const managedBy = params.credential?.managedBy;
|
||||
if (!managedBy) {
|
||||
return null;
|
||||
}
|
||||
return (
|
||||
EXTERNAL_CLI_SYNC_PROVIDERS.find(
|
||||
(entry) =>
|
||||
entry.managedBy === managedBy &&
|
||||
(!params.credential || entry.provider === params.credential.provider),
|
||||
) ?? null
|
||||
);
|
||||
}
|
||||
|
||||
export function readManagedExternalCliCredential(params: {
|
||||
profileId?: string;
|
||||
credential: OAuthCredential;
|
||||
}): OAuthCredential | null {
|
||||
const provider = resolveExternalCliSyncProvider(params);
|
||||
if (!provider) {
|
||||
return null;
|
||||
}
|
||||
const creds = provider.readCredentials();
|
||||
if (!creds) {
|
||||
return null;
|
||||
}
|
||||
return withExternalCliManager(creds, provider.managedBy);
|
||||
}
|
||||
|
||||
/** Sync external CLI credentials into the store for a given provider. */
|
||||
function syncExternalCliCredentialsForProvider(
|
||||
store: AuthProfileStore,
|
||||
providerConfig: ExternalCliSyncProvider,
|
||||
options: ExternalCliSyncOptions,
|
||||
): boolean {
|
||||
const { profileId, provider, managedBy, readCredentials } = providerConfig;
|
||||
const existing = store.profiles[profileId];
|
||||
const creds = readCredentials();
|
||||
if (!creds) {
|
||||
return false;
|
||||
}
|
||||
const managedCreds = withExternalCliManager(creds, managedBy);
|
||||
|
||||
const existingOAuth = existing?.type === "oauth" ? existing : undefined;
|
||||
if (!shouldReplaceStoredOAuthCredential(existingOAuth, managedCreds)) {
|
||||
if (options.log !== false) {
|
||||
if (!areOAuthCredentialsEquivalent(existingOAuth, managedCreds) && existingOAuth) {
|
||||
log.debug(`kept newer stored ${provider} credentials over external cli sync`, {
|
||||
profileId,
|
||||
storedExpires: new Date(existingOAuth.expires).toISOString(),
|
||||
externalExpires: Number.isFinite(managedCreds.expires)
|
||||
? new Date(managedCreds.expires).toISOString()
|
||||
: null,
|
||||
});
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
store.profiles[profileId] = managedCreds;
|
||||
if (options.log !== false) {
|
||||
log.info(`synced ${provider} credentials from external cli`, {
|
||||
profileId,
|
||||
expires: new Date(managedCreds.expires).toISOString(),
|
||||
managedBy,
|
||||
});
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sync OAuth credentials from external CLI tools (MiniMax CLI, Codex CLI)
|
||||
* into the store.
|
||||
*
|
||||
* Returns true if any credentials were updated.
|
||||
*/
|
||||
export function syncExternalCliCredentials(
|
||||
store: AuthProfileStore,
|
||||
options: ExternalCliSyncOptions = {},
|
||||
): boolean {
|
||||
let mutated = false;
|
||||
|
||||
for (const provider of EXTERNAL_CLI_SYNC_PROVIDERS) {
|
||||
if (syncExternalCliCredentialsForProvider(store, provider, options)) {
|
||||
mutated = true;
|
||||
}
|
||||
}
|
||||
|
||||
return mutated;
|
||||
}
|
||||
|
|
@ -13,14 +13,9 @@ import {
|
|||
} from "../../plugins/provider-runtime.runtime.js";
|
||||
import { resolveSecretRefString, type SecretRefResolveCache } from "../../secrets/resolve.js";
|
||||
import { refreshChutesTokens } from "../chutes-oauth.js";
|
||||
import { writeCodexCliCredentials } from "../cli-credentials.js";
|
||||
import { AUTH_STORE_LOCK_OPTIONS, log } from "./constants.js";
|
||||
import { resolveTokenExpiryState } from "./credential-state.js";
|
||||
import { formatAuthDoctorHint } from "./doctor.js";
|
||||
import {
|
||||
areOAuthCredentialsEquivalent,
|
||||
readManagedExternalCliCredential,
|
||||
} from "./external-cli-sync.js";
|
||||
import { ensureAuthStoreFile, resolveAuthStorePath } from "./paths.js";
|
||||
import { assertNoOAuthSecretRefPolicyViolations } from "./policy.js";
|
||||
import { suggestOAuthProfileIdForLegacyDefault } from "./repair.js";
|
||||
|
|
@ -185,56 +180,6 @@ async function refreshOAuthTokenWithLock(params: {
|
|||
};
|
||||
}
|
||||
|
||||
const externallyManaged = readManagedExternalCliCredential({
|
||||
profileId: params.profileId,
|
||||
credential: cred,
|
||||
});
|
||||
if (externallyManaged) {
|
||||
if (!areOAuthCredentialsEquivalent(cred, externallyManaged)) {
|
||||
store.profiles[params.profileId] = externallyManaged;
|
||||
saveAuthProfileStore(store, params.agentDir);
|
||||
}
|
||||
if (Date.now() < externallyManaged.expires) {
|
||||
return {
|
||||
apiKey: await buildOAuthApiKey(externallyManaged.provider, externallyManaged),
|
||||
newCredentials: externallyManaged,
|
||||
};
|
||||
}
|
||||
if (externallyManaged.managedBy === "codex-cli") {
|
||||
const pluginRefreshed = await refreshProviderOAuthCredentialWithPlugin({
|
||||
provider: externallyManaged.provider,
|
||||
context: externallyManaged,
|
||||
});
|
||||
if (pluginRefreshed) {
|
||||
const refreshedCredentials: OAuthCredential = {
|
||||
...externallyManaged,
|
||||
...pluginRefreshed,
|
||||
type: "oauth",
|
||||
managedBy: "codex-cli",
|
||||
};
|
||||
if (!writeCodexCliCredentials(refreshedCredentials)) {
|
||||
log.warn("failed to persist refreshed codex credentials back to Codex storage", {
|
||||
profileId: params.profileId,
|
||||
});
|
||||
}
|
||||
store.profiles[params.profileId] = refreshedCredentials;
|
||||
saveAuthProfileStore(store, params.agentDir);
|
||||
return {
|
||||
apiKey: await buildOAuthApiKey(refreshedCredentials.provider, refreshedCredentials),
|
||||
newCredentials: refreshedCredentials,
|
||||
};
|
||||
}
|
||||
}
|
||||
throw new Error(
|
||||
`${externallyManaged.managedBy} credential is expired; refresh it in the external CLI and retry.`,
|
||||
);
|
||||
}
|
||||
if (cred.managedBy) {
|
||||
throw new Error(
|
||||
`${cred.managedBy} credential is unavailable; re-authenticate in the external CLI and retry.`,
|
||||
);
|
||||
}
|
||||
|
||||
const pluginRefreshed = await refreshProviderOAuthCredentialWithPlugin({
|
||||
provider: cred.provider,
|
||||
context: cred,
|
||||
|
|
|
|||
|
|
@ -3,13 +3,7 @@ import { resolveOAuthPath } from "../../config/paths.js";
|
|||
import { coerceSecretRef } from "../../config/types.secrets.js";
|
||||
import { withFileLock } from "../../infra/file-lock.js";
|
||||
import { loadJsonFile, saveJsonFile } from "../../infra/json-file.js";
|
||||
import {
|
||||
AUTH_STORE_LOCK_OPTIONS,
|
||||
AUTH_STORE_VERSION,
|
||||
EXTERNAL_CLI_SYNC_TTL_MS,
|
||||
log,
|
||||
} from "./constants.js";
|
||||
import { syncExternalCliCredentials } from "./external-cli-sync.js";
|
||||
import { AUTH_STORE_LOCK_OPTIONS, AUTH_STORE_VERSION, log } from "./constants.js";
|
||||
import { ensureAuthStoreFile, resolveAuthStorePath, resolveLegacyAuthStorePath } from "./paths.js";
|
||||
import type {
|
||||
AuthProfileCredential,
|
||||
|
|
@ -33,6 +27,7 @@ const loadedAuthStoreCache = new Map<
|
|||
string,
|
||||
{ mtimeMs: number | null; syncedAtMs: number; store: AuthProfileStore }
|
||||
>();
|
||||
const AUTH_STORE_CACHE_TTL_MS = 15 * 60 * 1000;
|
||||
|
||||
function resolveRuntimeStoreKey(agentDir?: string): string {
|
||||
return resolveAuthStorePath(agentDir);
|
||||
|
|
@ -108,7 +103,7 @@ function readCachedAuthProfileStore(
|
|||
if (!cached || cached.mtimeMs !== mtimeMs) {
|
||||
return null;
|
||||
}
|
||||
if (Date.now() - cached.syncedAtMs >= EXTERNAL_CLI_SYNC_TTL_MS) {
|
||||
if (Date.now() - cached.syncedAtMs >= AUTH_STORE_CACHE_TTL_MS) {
|
||||
return null;
|
||||
}
|
||||
return cloneAuthProfileStore(cached.store);
|
||||
|
|
@ -356,9 +351,6 @@ function mergeAuthProfileStores(
|
|||
function buildPersistedAuthProfileStore(store: AuthProfileStore): AuthProfileStore {
|
||||
const profiles = Object.fromEntries(
|
||||
Object.entries(store.profiles).flatMap(([profileId, credential]) => {
|
||||
if (credential.type === "oauth" && credential.managedBy) {
|
||||
return [];
|
||||
}
|
||||
if (credential.type === "api_key" && credential.keyRef && credential.key !== undefined) {
|
||||
const sanitized = { ...credential } as Record<string, unknown>;
|
||||
delete sanitized.key;
|
||||
|
|
@ -449,31 +441,10 @@ function loadCoercedStore(authPath: string): AuthProfileStore | null {
|
|||
return coerceAuthStore(raw);
|
||||
}
|
||||
|
||||
function shouldLogAuthStoreTiming(): boolean {
|
||||
return process.env.OPENCLAW_DEBUG_INGRESS_TIMING === "1";
|
||||
}
|
||||
|
||||
function syncExternalCliCredentialsTimed(
|
||||
store: AuthProfileStore,
|
||||
options?: Parameters<typeof syncExternalCliCredentials>[1],
|
||||
): boolean {
|
||||
if (!shouldLogAuthStoreTiming()) {
|
||||
return syncExternalCliCredentials(store, options);
|
||||
}
|
||||
const startMs = Date.now();
|
||||
const mutated = syncExternalCliCredentials(store, options);
|
||||
log.info(
|
||||
`auth-store stage=external-cli-sync elapsedMs=${Date.now() - startMs} mutated=${mutated}`,
|
||||
);
|
||||
return mutated;
|
||||
}
|
||||
|
||||
export function loadAuthProfileStore(): AuthProfileStore {
|
||||
const authPath = resolveAuthStorePath();
|
||||
const asStore = loadCoercedStore(authPath);
|
||||
if (asStore) {
|
||||
// Sync from external CLI tools on every load.
|
||||
syncExternalCliCredentialsTimed(asStore);
|
||||
return asStore;
|
||||
}
|
||||
const legacyRaw = loadJsonFile(resolveLegacyAuthStorePath());
|
||||
|
|
@ -484,13 +455,10 @@ export function loadAuthProfileStore(): AuthProfileStore {
|
|||
profiles: {},
|
||||
};
|
||||
applyLegacyStore(store, legacy);
|
||||
syncExternalCliCredentialsTimed(store);
|
||||
return store;
|
||||
}
|
||||
|
||||
const store: AuthProfileStore = { version: AUTH_STORE_VERSION, profiles: {} };
|
||||
syncExternalCliCredentialsTimed(store);
|
||||
return store;
|
||||
return { version: AUTH_STORE_VERSION, profiles: {} };
|
||||
}
|
||||
|
||||
function loadAuthProfileStoreForAgent(
|
||||
|
|
@ -507,9 +475,6 @@ function loadAuthProfileStoreForAgent(
|
|||
}
|
||||
const asStore = loadCoercedStore(authPath);
|
||||
if (asStore) {
|
||||
// Runtime secret activation must remain read-only:
|
||||
// sync external CLI credentials in-memory, but never persist while readOnly.
|
||||
syncExternalCliCredentialsTimed(asStore, { log: !readOnly });
|
||||
if (!readOnly) {
|
||||
writeCachedAuthProfileStore(authPath, readAuthStoreMtimeMs(authPath), asStore);
|
||||
}
|
||||
|
|
@ -541,8 +506,6 @@ function loadAuthProfileStoreForAgent(
|
|||
}
|
||||
|
||||
const mergedOAuth = mergeOAuthFileIntoStore(store);
|
||||
// Keep external CLI credentials visible in runtime even during read-only loads.
|
||||
syncExternalCliCredentialsTimed(store, { log: !readOnly });
|
||||
const forceReadOnly = process.env.OPENCLAW_AUTH_STORE_READONLY === "1";
|
||||
const shouldWrite = !readOnly && !forceReadOnly && (legacy !== null || mergedOAuth);
|
||||
if (shouldWrite) {
|
||||
|
|
@ -619,7 +582,6 @@ export function saveAuthProfileStore(store: AuthProfileStore, agentDir?: string)
|
|||
const payload = buildPersistedAuthProfileStore(store);
|
||||
saveJsonFile(authPath, payload);
|
||||
const runtimeStore = cloneAuthProfileStore(store);
|
||||
syncExternalCliCredentialsTimed(runtimeStore, { log: false });
|
||||
writeCachedAuthProfileStore(authPath, readAuthStoreMtimeMs(authPath), runtimeStore);
|
||||
if (runtimeAuthStoreSnapshots.has(runtimeKey)) {
|
||||
runtimeAuthStoreSnapshots.set(runtimeKey, cloneAuthProfileStore(runtimeStore));
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ import type { OpenClawConfig } from "../../config/config.js";
|
|||
import type { SecretRef } from "../../config/types.secrets.js";
|
||||
|
||||
export type OAuthProvider = string;
|
||||
export type ExternalOAuthManager = "codex-cli" | "minimax-cli";
|
||||
|
||||
export type OAuthCredentials = {
|
||||
access: string;
|
||||
|
|
@ -48,11 +47,9 @@ export type OAuthCredential = OAuthCredentials & {
|
|||
email?: string;
|
||||
displayName?: string;
|
||||
/**
|
||||
* When set, another CLI owns refresh-token rotation for this credential.
|
||||
* OpenClaw should prefer that external source as canonical storage and avoid
|
||||
* persisting copied secrets into auth-profiles.json.
|
||||
* Legacy metadata preserved for backwards compatibility with older stores.
|
||||
*/
|
||||
managedBy?: ExternalOAuthManager;
|
||||
managedBy?: string;
|
||||
};
|
||||
|
||||
export type AuthProfileCredential = ApiKeyCredential | TokenCredential | OAuthCredential;
|
||||
|
|
|
|||
|
|
@ -1,144 +0,0 @@
|
|||
import { afterEach, describe, expect, it } from "vitest";
|
||||
import type { AuthProfileStore } from "./auth-profiles/types.js";
|
||||
import {
|
||||
resetCliAuthEpochTestDeps,
|
||||
resolveCliAuthEpoch,
|
||||
setCliAuthEpochTestDeps,
|
||||
} from "./cli-auth-epoch.js";
|
||||
|
||||
describe("resolveCliAuthEpoch", () => {
|
||||
afterEach(() => {
|
||||
resetCliAuthEpochTestDeps();
|
||||
});
|
||||
|
||||
it("returns undefined when no local or auth-profile credentials exist", async () => {
|
||||
setCliAuthEpochTestDeps({
|
||||
readCodexCliCredentialsCached: () => null,
|
||||
loadAuthProfileStoreForRuntime: () => ({
|
||||
version: 1,
|
||||
profiles: {},
|
||||
}),
|
||||
});
|
||||
|
||||
await expect(resolveCliAuthEpoch({ provider: "codex-cli" })).resolves.toBeUndefined();
|
||||
await expect(
|
||||
resolveCliAuthEpoch({
|
||||
provider: "google-gemini-cli",
|
||||
authProfileId: "google:work",
|
||||
}),
|
||||
).resolves.toBeUndefined();
|
||||
});
|
||||
|
||||
it("changes when codex cli credentials change", async () => {
|
||||
let access = "access-a";
|
||||
setCliAuthEpochTestDeps({
|
||||
readCodexCliCredentialsCached: () => ({
|
||||
type: "oauth",
|
||||
provider: "openai-codex",
|
||||
access,
|
||||
refresh: "refresh",
|
||||
expires: 1,
|
||||
accountId: "acct-1",
|
||||
}),
|
||||
});
|
||||
|
||||
const first = await resolveCliAuthEpoch({ provider: "codex-cli" });
|
||||
access = "access-b";
|
||||
const second = await resolveCliAuthEpoch({ provider: "codex-cli" });
|
||||
|
||||
expect(first).toBeDefined();
|
||||
expect(second).toBeDefined();
|
||||
expect(second).not.toBe(first);
|
||||
});
|
||||
|
||||
it("changes when auth profile credentials change", async () => {
|
||||
let store: AuthProfileStore = {
|
||||
version: 1,
|
||||
profiles: {
|
||||
"anthropic:work": {
|
||||
type: "oauth",
|
||||
provider: "anthropic",
|
||||
access: "access-a",
|
||||
refresh: "refresh",
|
||||
expires: 1,
|
||||
},
|
||||
},
|
||||
};
|
||||
setCliAuthEpochTestDeps({
|
||||
loadAuthProfileStoreForRuntime: () => store,
|
||||
});
|
||||
|
||||
const first = await resolveCliAuthEpoch({
|
||||
provider: "google-gemini-cli",
|
||||
authProfileId: "anthropic:work",
|
||||
});
|
||||
store = {
|
||||
version: 1,
|
||||
profiles: {
|
||||
"anthropic:work": {
|
||||
type: "oauth",
|
||||
provider: "anthropic",
|
||||
access: "access-b",
|
||||
refresh: "refresh",
|
||||
expires: 1,
|
||||
},
|
||||
},
|
||||
};
|
||||
const second = await resolveCliAuthEpoch({
|
||||
provider: "google-gemini-cli",
|
||||
authProfileId: "anthropic:work",
|
||||
});
|
||||
|
||||
expect(first).toBeDefined();
|
||||
expect(second).toBeDefined();
|
||||
expect(second).not.toBe(first);
|
||||
});
|
||||
|
||||
it("mixes local codex and auth-profile state", async () => {
|
||||
let access = "local-access-a";
|
||||
let refresh = "profile-refresh-a";
|
||||
setCliAuthEpochTestDeps({
|
||||
readCodexCliCredentialsCached: () => ({
|
||||
type: "oauth",
|
||||
provider: "openai-codex",
|
||||
access,
|
||||
refresh: "local-refresh",
|
||||
expires: 1,
|
||||
accountId: "acct-1",
|
||||
}),
|
||||
loadAuthProfileStoreForRuntime: () => ({
|
||||
version: 1,
|
||||
profiles: {
|
||||
"openai:work": {
|
||||
type: "oauth",
|
||||
provider: "openai",
|
||||
access: "profile-access",
|
||||
refresh,
|
||||
expires: 1,
|
||||
},
|
||||
},
|
||||
}),
|
||||
});
|
||||
|
||||
const first = await resolveCliAuthEpoch({
|
||||
provider: "codex-cli",
|
||||
authProfileId: "openai:work",
|
||||
});
|
||||
access = "local-access-b";
|
||||
const second = await resolveCliAuthEpoch({
|
||||
provider: "codex-cli",
|
||||
authProfileId: "openai:work",
|
||||
});
|
||||
refresh = "profile-refresh-b";
|
||||
const third = await resolveCliAuthEpoch({
|
||||
provider: "codex-cli",
|
||||
authProfileId: "openai:work",
|
||||
});
|
||||
|
||||
expect(first).toBeDefined();
|
||||
expect(second).toBeDefined();
|
||||
expect(third).toBeDefined();
|
||||
expect(second).not.toBe(first);
|
||||
expect(third).not.toBe(second);
|
||||
});
|
||||
});
|
||||
|
|
@ -1,138 +0,0 @@
|
|||
import crypto from "node:crypto";
|
||||
import { loadAuthProfileStoreForRuntime } from "./auth-profiles/store.js";
|
||||
import type { AuthProfileCredential, AuthProfileStore } from "./auth-profiles/types.js";
|
||||
import { readCodexCliCredentialsCached, type CodexCliCredential } from "./cli-credentials.js";
|
||||
|
||||
type CliAuthEpochDeps = {
|
||||
readCodexCliCredentialsCached: typeof readCodexCliCredentialsCached;
|
||||
loadAuthProfileStoreForRuntime: typeof loadAuthProfileStoreForRuntime;
|
||||
};
|
||||
|
||||
const defaultCliAuthEpochDeps: CliAuthEpochDeps = {
|
||||
readCodexCliCredentialsCached,
|
||||
loadAuthProfileStoreForRuntime,
|
||||
};
|
||||
|
||||
const cliAuthEpochDeps: CliAuthEpochDeps = { ...defaultCliAuthEpochDeps };
|
||||
|
||||
export function setCliAuthEpochTestDeps(overrides: Partial<CliAuthEpochDeps>): void {
|
||||
Object.assign(cliAuthEpochDeps, overrides);
|
||||
}
|
||||
|
||||
export function resetCliAuthEpochTestDeps(): void {
|
||||
Object.assign(cliAuthEpochDeps, defaultCliAuthEpochDeps);
|
||||
}
|
||||
|
||||
function hashCliAuthEpochPart(value: string): string {
|
||||
return crypto.createHash("sha256").update(value).digest("hex");
|
||||
}
|
||||
|
||||
function encodeUnknown(value: unknown): string {
|
||||
return JSON.stringify(value ?? null);
|
||||
}
|
||||
|
||||
function encodeCodexCredential(credential: CodexCliCredential): string {
|
||||
return JSON.stringify([
|
||||
credential.type,
|
||||
credential.provider,
|
||||
credential.access,
|
||||
credential.refresh,
|
||||
credential.expires,
|
||||
credential.accountId ?? null,
|
||||
]);
|
||||
}
|
||||
|
||||
function encodeAuthProfileCredential(credential: AuthProfileCredential): string {
|
||||
switch (credential.type) {
|
||||
case "api_key":
|
||||
return JSON.stringify([
|
||||
"api_key",
|
||||
credential.provider,
|
||||
credential.key ?? null,
|
||||
encodeUnknown(credential.keyRef),
|
||||
credential.email ?? null,
|
||||
credential.displayName ?? null,
|
||||
encodeUnknown(credential.metadata),
|
||||
]);
|
||||
case "token":
|
||||
return JSON.stringify([
|
||||
"token",
|
||||
credential.provider,
|
||||
credential.token ?? null,
|
||||
encodeUnknown(credential.tokenRef),
|
||||
credential.expires ?? null,
|
||||
credential.email ?? null,
|
||||
credential.displayName ?? null,
|
||||
]);
|
||||
case "oauth":
|
||||
return JSON.stringify([
|
||||
"oauth",
|
||||
credential.provider,
|
||||
credential.access,
|
||||
credential.refresh,
|
||||
credential.expires,
|
||||
credential.clientId ?? null,
|
||||
credential.email ?? null,
|
||||
credential.displayName ?? null,
|
||||
credential.enterpriseUrl ?? null,
|
||||
credential.projectId ?? null,
|
||||
credential.accountId ?? null,
|
||||
credential.managedBy ?? null,
|
||||
]);
|
||||
}
|
||||
}
|
||||
|
||||
function getLocalCliCredentialFingerprint(provider: string): string | undefined {
|
||||
switch (provider) {
|
||||
case "codex-cli": {
|
||||
const credential = cliAuthEpochDeps.readCodexCliCredentialsCached({
|
||||
ttlMs: 5000,
|
||||
});
|
||||
return credential ? hashCliAuthEpochPart(encodeCodexCredential(credential)) : undefined;
|
||||
}
|
||||
default:
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
function getAuthProfileCredential(
|
||||
store: AuthProfileStore,
|
||||
authProfileId: string | undefined,
|
||||
): AuthProfileCredential | undefined {
|
||||
if (!authProfileId) {
|
||||
return undefined;
|
||||
}
|
||||
return store.profiles[authProfileId];
|
||||
}
|
||||
|
||||
export async function resolveCliAuthEpoch(params: {
|
||||
provider: string;
|
||||
authProfileId?: string;
|
||||
}): Promise<string | undefined> {
|
||||
const provider = params.provider.trim();
|
||||
const authProfileId = params.authProfileId?.trim() || undefined;
|
||||
const parts: string[] = [];
|
||||
|
||||
const localFingerprint = getLocalCliCredentialFingerprint(provider);
|
||||
if (localFingerprint) {
|
||||
parts.push(`local:${provider}:${localFingerprint}`);
|
||||
}
|
||||
|
||||
if (authProfileId) {
|
||||
const store = cliAuthEpochDeps.loadAuthProfileStoreForRuntime(undefined, {
|
||||
readOnly: true,
|
||||
allowKeychainPrompt: false,
|
||||
});
|
||||
const credential = getAuthProfileCredential(store, authProfileId);
|
||||
if (credential) {
|
||||
parts.push(
|
||||
`profile:${authProfileId}:${hashCliAuthEpochPart(encodeAuthProfileCredential(credential))}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (parts.length === 0) {
|
||||
return undefined;
|
||||
}
|
||||
return hashCliAuthEpochPart(parts.join("\n"));
|
||||
}
|
||||
|
|
@ -1,205 +0,0 @@
|
|||
import { beforeEach, describe, expect, it } from "vitest";
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import type { CliBackendConfig } from "../config/types.js";
|
||||
import { createEmptyPluginRegistry } from "../plugins/registry.js";
|
||||
import { setActivePluginRegistry } from "../plugins/runtime.js";
|
||||
import { resolveCliBackendConfig } from "./cli-backends.js";
|
||||
|
||||
function createBackendEntry(params: {
|
||||
pluginId: string;
|
||||
id: string;
|
||||
config: CliBackendConfig;
|
||||
bundleMcp?: boolean;
|
||||
normalizeConfig?: (config: CliBackendConfig) => CliBackendConfig;
|
||||
}) {
|
||||
return {
|
||||
pluginId: params.pluginId,
|
||||
source: "test",
|
||||
backend: {
|
||||
id: params.id,
|
||||
config: params.config,
|
||||
...(params.bundleMcp ? { bundleMcp: params.bundleMcp } : {}),
|
||||
...(params.normalizeConfig ? { normalizeConfig: params.normalizeConfig } : {}),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
const registry = createEmptyPluginRegistry();
|
||||
registry.cliBackends = [
|
||||
createBackendEntry({
|
||||
pluginId: "openai",
|
||||
id: "codex-cli",
|
||||
config: {
|
||||
command: "codex",
|
||||
args: [
|
||||
"exec",
|
||||
"--json",
|
||||
"--color",
|
||||
"never",
|
||||
"--sandbox",
|
||||
"workspace-write",
|
||||
"--skip-git-repo-check",
|
||||
],
|
||||
resumeArgs: [
|
||||
"exec",
|
||||
"resume",
|
||||
"{sessionId}",
|
||||
"--color",
|
||||
"never",
|
||||
"--sandbox",
|
||||
"workspace-write",
|
||||
"--skip-git-repo-check",
|
||||
],
|
||||
reliability: {
|
||||
watchdog: {
|
||||
fresh: {
|
||||
noOutputTimeoutRatio: 0.8,
|
||||
minMs: 60_000,
|
||||
maxMs: 180_000,
|
||||
},
|
||||
resume: {
|
||||
noOutputTimeoutRatio: 0.3,
|
||||
minMs: 60_000,
|
||||
maxMs: 180_000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}),
|
||||
createBackendEntry({
|
||||
pluginId: "google",
|
||||
id: "google-gemini-cli",
|
||||
bundleMcp: false,
|
||||
config: {
|
||||
command: "gemini",
|
||||
args: ["--prompt", "--output-format", "json"],
|
||||
resumeArgs: ["--resume", "{sessionId}", "--prompt", "--output-format", "json"],
|
||||
modelArg: "--model",
|
||||
sessionMode: "existing",
|
||||
sessionIdFields: ["session_id", "sessionId"],
|
||||
modelAliases: { pro: "gemini-3.1-pro-preview" },
|
||||
},
|
||||
}),
|
||||
];
|
||||
setActivePluginRegistry(registry);
|
||||
});
|
||||
|
||||
describe("resolveCliBackendConfig reliability merge", () => {
|
||||
it("defaults codex-cli to workspace-write for fresh and resume runs", () => {
|
||||
const resolved = resolveCliBackendConfig("codex-cli");
|
||||
|
||||
expect(resolved).not.toBeNull();
|
||||
expect(resolved?.config.args).toEqual([
|
||||
"exec",
|
||||
"--json",
|
||||
"--color",
|
||||
"never",
|
||||
"--sandbox",
|
||||
"workspace-write",
|
||||
"--skip-git-repo-check",
|
||||
]);
|
||||
expect(resolved?.config.resumeArgs).toEqual([
|
||||
"exec",
|
||||
"resume",
|
||||
"{sessionId}",
|
||||
"--color",
|
||||
"never",
|
||||
"--sandbox",
|
||||
"workspace-write",
|
||||
"--skip-git-repo-check",
|
||||
]);
|
||||
});
|
||||
|
||||
it("deep-merges reliability watchdog overrides for codex", () => {
|
||||
const cfg = {
|
||||
agents: {
|
||||
defaults: {
|
||||
cliBackends: {
|
||||
"codex-cli": {
|
||||
command: "codex",
|
||||
reliability: {
|
||||
watchdog: {
|
||||
resume: {
|
||||
noOutputTimeoutMs: 42_000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} satisfies OpenClawConfig;
|
||||
|
||||
const resolved = resolveCliBackendConfig("codex-cli", cfg);
|
||||
|
||||
expect(resolved).not.toBeNull();
|
||||
expect(resolved?.config.reliability?.watchdog?.resume?.noOutputTimeoutMs).toBe(42_000);
|
||||
// Ensure defaults are retained when only one field is overridden.
|
||||
expect(resolved?.config.reliability?.watchdog?.resume?.noOutputTimeoutRatio).toBe(0.3);
|
||||
expect(resolved?.config.reliability?.watchdog?.resume?.minMs).toBe(60_000);
|
||||
expect(resolved?.config.reliability?.watchdog?.resume?.maxMs).toBe(180_000);
|
||||
expect(resolved?.config.reliability?.watchdog?.fresh?.noOutputTimeoutRatio).toBe(0.8);
|
||||
});
|
||||
});
|
||||
|
||||
describe("resolveCliBackendConfig google-gemini-cli defaults", () => {
|
||||
it("uses Gemini CLI json args and existing-session resume mode", () => {
|
||||
const resolved = resolveCliBackendConfig("google-gemini-cli");
|
||||
|
||||
expect(resolved).not.toBeNull();
|
||||
expect(resolved?.bundleMcp).toBe(false);
|
||||
expect(resolved?.config.args).toEqual(["--prompt", "--output-format", "json"]);
|
||||
expect(resolved?.config.resumeArgs).toEqual([
|
||||
"--resume",
|
||||
"{sessionId}",
|
||||
"--prompt",
|
||||
"--output-format",
|
||||
"json",
|
||||
]);
|
||||
expect(resolved?.config.modelArg).toBe("--model");
|
||||
expect(resolved?.config.sessionMode).toBe("existing");
|
||||
expect(resolved?.config.sessionIdFields).toEqual(["session_id", "sessionId"]);
|
||||
expect(resolved?.config.modelAliases?.pro).toBe("gemini-3.1-pro-preview");
|
||||
});
|
||||
});
|
||||
|
||||
describe("resolveCliBackendConfig alias precedence", () => {
|
||||
it("prefers the canonical backend key over legacy aliases when both are configured", () => {
|
||||
const registry = createEmptyPluginRegistry();
|
||||
registry.cliBackends = [
|
||||
createBackendEntry({
|
||||
pluginId: "moonshot",
|
||||
id: "kimi",
|
||||
config: {
|
||||
command: "kimi",
|
||||
args: ["--default"],
|
||||
},
|
||||
}),
|
||||
];
|
||||
setActivePluginRegistry(registry);
|
||||
|
||||
const cfg = {
|
||||
agents: {
|
||||
defaults: {
|
||||
cliBackends: {
|
||||
"kimi-coding": {
|
||||
command: "kimi-legacy",
|
||||
args: ["--legacy"],
|
||||
},
|
||||
kimi: {
|
||||
command: "kimi-canonical",
|
||||
args: ["--canonical"],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} satisfies OpenClawConfig;
|
||||
|
||||
const resolved = resolveCliBackendConfig("kimi", cfg);
|
||||
|
||||
expect(resolved).not.toBeNull();
|
||||
expect(resolved?.config.command).toBe("kimi-canonical");
|
||||
expect(resolved?.config.args).toEqual(["--canonical"]);
|
||||
});
|
||||
});
|
||||
|
|
@ -1,172 +0,0 @@
|
|||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import type { CliBackendConfig } from "../config/types.js";
|
||||
import { resolveRuntimeCliBackends } from "../plugins/cli-backends.runtime.js";
|
||||
import { resolvePluginSetupCliBackend } from "../plugins/setup-registry.js";
|
||||
import { normalizeProviderId } from "./model-selection.js";
|
||||
|
||||
export type ResolvedCliBackend = {
|
||||
id: string;
|
||||
config: CliBackendConfig;
|
||||
bundleMcp: boolean;
|
||||
pluginId?: string;
|
||||
};
|
||||
|
||||
type FallbackCliBackendPolicy = {
|
||||
bundleMcp: boolean;
|
||||
baseConfig?: CliBackendConfig;
|
||||
normalizeConfig?: (config: CliBackendConfig) => CliBackendConfig;
|
||||
};
|
||||
|
||||
const FALLBACK_CLI_BACKEND_POLICIES: Record<string, FallbackCliBackendPolicy> = {};
|
||||
|
||||
function resolveSetupCliBackendPolicy(provider: string): FallbackCliBackendPolicy | undefined {
|
||||
const entry = resolvePluginSetupCliBackend({
|
||||
backend: provider,
|
||||
});
|
||||
if (!entry) {
|
||||
return undefined;
|
||||
}
|
||||
return {
|
||||
// Setup-registered backends keep narrow CLI paths generic even when the
|
||||
// runtime plugin registry has not booted yet.
|
||||
bundleMcp: entry.backend.bundleMcp === true,
|
||||
baseConfig: entry.backend.config,
|
||||
normalizeConfig: entry.backend.normalizeConfig,
|
||||
};
|
||||
}
|
||||
|
||||
function resolveFallbackCliBackendPolicy(provider: string): FallbackCliBackendPolicy | undefined {
|
||||
return FALLBACK_CLI_BACKEND_POLICIES[provider] ?? resolveSetupCliBackendPolicy(provider);
|
||||
}
|
||||
|
||||
function normalizeBackendKey(key: string): string {
|
||||
return normalizeProviderId(key);
|
||||
}
|
||||
|
||||
function pickBackendConfig(
|
||||
config: Record<string, CliBackendConfig>,
|
||||
normalizedId: string,
|
||||
): CliBackendConfig | undefined {
|
||||
const directKey = Object.keys(config).find((key) => key.trim().toLowerCase() === normalizedId);
|
||||
if (directKey) {
|
||||
return config[directKey];
|
||||
}
|
||||
for (const [key, entry] of Object.entries(config)) {
|
||||
if (normalizeBackendKey(key) === normalizedId) {
|
||||
return entry;
|
||||
}
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
function resolveRegisteredBackend(provider: string) {
|
||||
const normalized = normalizeBackendKey(provider);
|
||||
return resolveRuntimeCliBackends().find((entry) => normalizeBackendKey(entry.id) === normalized);
|
||||
}
|
||||
|
||||
function mergeBackendConfig(base: CliBackendConfig, override?: CliBackendConfig): CliBackendConfig {
|
||||
if (!override) {
|
||||
return { ...base };
|
||||
}
|
||||
const baseFresh = base.reliability?.watchdog?.fresh ?? {};
|
||||
const baseResume = base.reliability?.watchdog?.resume ?? {};
|
||||
const overrideFresh = override.reliability?.watchdog?.fresh ?? {};
|
||||
const overrideResume = override.reliability?.watchdog?.resume ?? {};
|
||||
return {
|
||||
...base,
|
||||
...override,
|
||||
args: override.args ?? base.args,
|
||||
env: { ...base.env, ...override.env },
|
||||
modelAliases: { ...base.modelAliases, ...override.modelAliases },
|
||||
clearEnv: Array.from(new Set([...(base.clearEnv ?? []), ...(override.clearEnv ?? [])])),
|
||||
sessionIdFields: override.sessionIdFields ?? base.sessionIdFields,
|
||||
sessionArgs: override.sessionArgs ?? base.sessionArgs,
|
||||
resumeArgs: override.resumeArgs ?? base.resumeArgs,
|
||||
reliability: {
|
||||
...base.reliability,
|
||||
...override.reliability,
|
||||
watchdog: {
|
||||
...base.reliability?.watchdog,
|
||||
...override.reliability?.watchdog,
|
||||
fresh: {
|
||||
...baseFresh,
|
||||
...overrideFresh,
|
||||
},
|
||||
resume: {
|
||||
...baseResume,
|
||||
...overrideResume,
|
||||
},
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export function resolveCliBackendIds(cfg?: OpenClawConfig): Set<string> {
|
||||
const ids = new Set<string>();
|
||||
for (const backend of resolveRuntimeCliBackends()) {
|
||||
ids.add(normalizeBackendKey(backend.id));
|
||||
}
|
||||
const configured = cfg?.agents?.defaults?.cliBackends ?? {};
|
||||
for (const key of Object.keys(configured)) {
|
||||
ids.add(normalizeBackendKey(key));
|
||||
}
|
||||
return ids;
|
||||
}
|
||||
|
||||
export function resolveCliBackendConfig(
|
||||
provider: string,
|
||||
cfg?: OpenClawConfig,
|
||||
): ResolvedCliBackend | null {
|
||||
const normalized = normalizeBackendKey(provider);
|
||||
const fallbackPolicy = resolveFallbackCliBackendPolicy(normalized);
|
||||
const configured = cfg?.agents?.defaults?.cliBackends ?? {};
|
||||
const override = pickBackendConfig(configured, normalized);
|
||||
const registered = resolveRegisteredBackend(normalized);
|
||||
if (registered) {
|
||||
const merged = mergeBackendConfig(registered.config, override);
|
||||
const config = registered.normalizeConfig ? registered.normalizeConfig(merged) : merged;
|
||||
const command = config.command?.trim();
|
||||
if (!command) {
|
||||
return null;
|
||||
}
|
||||
return {
|
||||
id: normalized,
|
||||
config: { ...config, command },
|
||||
bundleMcp: registered.bundleMcp === true,
|
||||
pluginId: registered.pluginId,
|
||||
};
|
||||
}
|
||||
|
||||
if (!override) {
|
||||
if (!fallbackPolicy?.baseConfig) {
|
||||
return null;
|
||||
}
|
||||
const baseConfig = fallbackPolicy.normalizeConfig
|
||||
? fallbackPolicy.normalizeConfig(fallbackPolicy.baseConfig)
|
||||
: fallbackPolicy.baseConfig;
|
||||
const command = baseConfig.command?.trim();
|
||||
if (!command) {
|
||||
return null;
|
||||
}
|
||||
return {
|
||||
id: normalized,
|
||||
config: { ...baseConfig, command },
|
||||
bundleMcp: fallbackPolicy.bundleMcp,
|
||||
};
|
||||
}
|
||||
const mergedFallback = fallbackPolicy?.baseConfig
|
||||
? mergeBackendConfig(fallbackPolicy.baseConfig, override)
|
||||
: override;
|
||||
const config = fallbackPolicy?.normalizeConfig
|
||||
? fallbackPolicy.normalizeConfig(mergedFallback)
|
||||
: mergedFallback;
|
||||
const command = config.command?.trim();
|
||||
if (!command) {
|
||||
return null;
|
||||
}
|
||||
return {
|
||||
id: normalized,
|
||||
config: { ...config, command },
|
||||
bundleMcp: fallbackPolicy?.bundleMcp === true,
|
||||
};
|
||||
}
|
||||
|
|
@ -1,281 +0,0 @@
|
|||
import fs from "node:fs";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { afterEach, beforeAll, beforeEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
const execSyncMock = vi.fn();
|
||||
const execFileSyncMock = vi.fn();
|
||||
const CLI_CREDENTIALS_CACHE_TTL_MS = 15 * 60 * 1000;
|
||||
let readCodexCliCredentialsCached: typeof import("./cli-credentials.js").readCodexCliCredentialsCached;
|
||||
let resetCliCredentialCachesForTest: typeof import("./cli-credentials.js").resetCliCredentialCachesForTest;
|
||||
let readCodexCliCredentials: typeof import("./cli-credentials.js").readCodexCliCredentials;
|
||||
let writeCodexCliCredentials: typeof import("./cli-credentials.js").writeCodexCliCredentials;
|
||||
let writeCodexCliFileCredentials: typeof import("./cli-credentials.js").writeCodexCliFileCredentials;
|
||||
|
||||
function getAddGenericPasswordCall() {
|
||||
return execFileSyncMock.mock.calls.find(
|
||||
([binary, args]) =>
|
||||
String(binary) === "security" &&
|
||||
Array.isArray(args) &&
|
||||
(args as unknown[]).map(String).includes("add-generic-password"),
|
||||
);
|
||||
}
|
||||
|
||||
function createJwtWithExp(expSeconds: number): string {
|
||||
const encode = (value: Record<string, unknown>) =>
|
||||
Buffer.from(JSON.stringify(value)).toString("base64url");
|
||||
return `${encode({ alg: "RS256", typ: "JWT" })}.${encode({ exp: expSeconds })}.signature`;
|
||||
}
|
||||
|
||||
describe("cli credentials", () => {
|
||||
beforeAll(async () => {
|
||||
({
|
||||
readCodexCliCredentialsCached,
|
||||
resetCliCredentialCachesForTest,
|
||||
readCodexCliCredentials,
|
||||
writeCodexCliCredentials,
|
||||
writeCodexCliFileCredentials,
|
||||
} = await import("./cli-credentials.js"));
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.useRealTimers();
|
||||
execSyncMock.mockClear().mockImplementation(() => undefined);
|
||||
execFileSyncMock.mockClear().mockImplementation(() => undefined);
|
||||
delete process.env.CODEX_HOME;
|
||||
resetCliCredentialCachesForTest();
|
||||
});
|
||||
|
||||
it("reads Codex credentials from keychain when available", async () => {
|
||||
const tempHome = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-codex-"));
|
||||
process.env.CODEX_HOME = tempHome;
|
||||
const expSeconds = Math.floor(Date.parse("2026-03-23T00:48:49Z") / 1000);
|
||||
|
||||
const accountHash = "cli|";
|
||||
|
||||
execSyncMock.mockImplementation((command: unknown) => {
|
||||
const cmd = String(command);
|
||||
expect(cmd).toContain("Codex Auth");
|
||||
expect(cmd).toContain(accountHash);
|
||||
return JSON.stringify({
|
||||
tokens: {
|
||||
access_token: createJwtWithExp(expSeconds),
|
||||
refresh_token: "keychain-refresh",
|
||||
},
|
||||
last_refresh: "2026-01-01T00:00:00Z",
|
||||
});
|
||||
});
|
||||
|
||||
const creds = readCodexCliCredentials({ platform: "darwin", execSync: execSyncMock });
|
||||
|
||||
expect(creds).toMatchObject({
|
||||
access: createJwtWithExp(expSeconds),
|
||||
refresh: "keychain-refresh",
|
||||
provider: "openai-codex",
|
||||
expires: expSeconds * 1000,
|
||||
});
|
||||
});
|
||||
|
||||
it("falls back to Codex auth.json when keychain is unavailable", async () => {
|
||||
const tempHome = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-codex-"));
|
||||
process.env.CODEX_HOME = tempHome;
|
||||
const expSeconds = Math.floor(Date.parse("2026-03-24T12:34:56Z") / 1000);
|
||||
execSyncMock.mockImplementation(() => {
|
||||
throw new Error("not found");
|
||||
});
|
||||
|
||||
const authPath = path.join(tempHome, "auth.json");
|
||||
fs.mkdirSync(tempHome, { recursive: true, mode: 0o700 });
|
||||
fs.writeFileSync(
|
||||
authPath,
|
||||
JSON.stringify({
|
||||
tokens: {
|
||||
access_token: createJwtWithExp(expSeconds),
|
||||
refresh_token: "file-refresh",
|
||||
},
|
||||
}),
|
||||
"utf8",
|
||||
);
|
||||
|
||||
const creds = readCodexCliCredentials({ execSync: execSyncMock });
|
||||
|
||||
expect(creds).toMatchObject({
|
||||
access: createJwtWithExp(expSeconds),
|
||||
refresh: "file-refresh",
|
||||
provider: "openai-codex",
|
||||
expires: expSeconds * 1000,
|
||||
});
|
||||
});
|
||||
|
||||
it("invalidates cached Codex credentials when auth.json changes within the TTL window", () => {
|
||||
const tempHome = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-codex-cache-"));
|
||||
process.env.CODEX_HOME = tempHome;
|
||||
const authPath = path.join(tempHome, "auth.json");
|
||||
const firstExpiry = Math.floor(Date.parse("2026-03-24T12:34:56Z") / 1000);
|
||||
const secondExpiry = Math.floor(Date.parse("2026-03-25T12:34:56Z") / 1000);
|
||||
try {
|
||||
fs.mkdirSync(tempHome, { recursive: true, mode: 0o700 });
|
||||
fs.writeFileSync(
|
||||
authPath,
|
||||
JSON.stringify({
|
||||
tokens: {
|
||||
access_token: createJwtWithExp(firstExpiry),
|
||||
refresh_token: "stale-refresh",
|
||||
},
|
||||
}),
|
||||
"utf8",
|
||||
);
|
||||
fs.utimesSync(authPath, new Date("2026-03-24T10:00:00Z"), new Date("2026-03-24T10:00:00Z"));
|
||||
vi.setSystemTime(new Date("2026-03-24T10:00:00Z"));
|
||||
|
||||
const first = readCodexCliCredentialsCached({
|
||||
ttlMs: CLI_CREDENTIALS_CACHE_TTL_MS,
|
||||
platform: "linux",
|
||||
execSync: execSyncMock,
|
||||
});
|
||||
|
||||
expect(first).toMatchObject({
|
||||
refresh: "stale-refresh",
|
||||
expires: firstExpiry * 1000,
|
||||
});
|
||||
|
||||
fs.writeFileSync(
|
||||
authPath,
|
||||
JSON.stringify({
|
||||
tokens: {
|
||||
access_token: createJwtWithExp(secondExpiry),
|
||||
refresh_token: "fresh-refresh",
|
||||
},
|
||||
}),
|
||||
"utf8",
|
||||
);
|
||||
fs.utimesSync(authPath, new Date("2026-03-24T10:05:00Z"), new Date("2026-03-24T10:05:00Z"));
|
||||
vi.advanceTimersByTime(60_000);
|
||||
|
||||
const second = readCodexCliCredentialsCached({
|
||||
ttlMs: CLI_CREDENTIALS_CACHE_TTL_MS,
|
||||
platform: "linux",
|
||||
execSync: execSyncMock,
|
||||
});
|
||||
|
||||
expect(second).toMatchObject({
|
||||
refresh: "fresh-refresh",
|
||||
expires: secondExpiry * 1000,
|
||||
});
|
||||
} finally {
|
||||
fs.rmSync(tempHome, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("updates existing Codex auth.json in place", () => {
|
||||
const tempHome = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-codex-write-"));
|
||||
process.env.CODEX_HOME = tempHome;
|
||||
try {
|
||||
fs.mkdirSync(tempHome, { recursive: true, mode: 0o700 });
|
||||
const authPath = path.join(tempHome, "auth.json");
|
||||
fs.writeFileSync(
|
||||
authPath,
|
||||
JSON.stringify(
|
||||
{
|
||||
auth_mode: "chatgpt",
|
||||
OPENAI_API_KEY: "sk-existing",
|
||||
tokens: {
|
||||
id_token: "id-token",
|
||||
access_token: "old-access",
|
||||
refresh_token: "old-refresh",
|
||||
account_id: "acct-old",
|
||||
},
|
||||
last_refresh: "2026-03-01T00:00:00.000Z",
|
||||
},
|
||||
null,
|
||||
2,
|
||||
),
|
||||
"utf8",
|
||||
);
|
||||
|
||||
const ok = writeCodexCliFileCredentials({
|
||||
access: "new-access",
|
||||
refresh: "new-refresh",
|
||||
expires: Date.now() + 60_000,
|
||||
accountId: "acct-new",
|
||||
});
|
||||
|
||||
expect(ok).toBe(true);
|
||||
const persisted = JSON.parse(fs.readFileSync(authPath, "utf8")) as Record<string, unknown>;
|
||||
expect(persisted).toMatchObject({
|
||||
auth_mode: "chatgpt",
|
||||
OPENAI_API_KEY: "sk-existing",
|
||||
});
|
||||
expect(persisted.tokens).toMatchObject({
|
||||
id_token: "id-token",
|
||||
access_token: "new-access",
|
||||
refresh_token: "new-refresh",
|
||||
account_id: "acct-new",
|
||||
});
|
||||
expect(typeof persisted.last_refresh).toBe("string");
|
||||
} finally {
|
||||
fs.rmSync(tempHome, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("prefers the existing Codex keychain entry over auth.json on darwin writes", () => {
|
||||
const tempHome = fs.mkdtempSync(path.join(os.tmpdir(), "openclaw-codex-keychain-write-"));
|
||||
process.env.CODEX_HOME = tempHome;
|
||||
try {
|
||||
const expSeconds = Math.floor(Date.parse("2026-03-26T12:34:56Z") / 1000);
|
||||
execSyncMock.mockImplementation((command: unknown) => {
|
||||
const cmd = String(command);
|
||||
expect(cmd).toContain("Codex Auth");
|
||||
return JSON.stringify({
|
||||
auth_mode: "chatgpt",
|
||||
tokens: {
|
||||
id_token: "id-token",
|
||||
access_token: createJwtWithExp(expSeconds),
|
||||
refresh_token: "old-refresh",
|
||||
account_id: "acct-old",
|
||||
},
|
||||
last_refresh: "2026-03-01T00:00:00.000Z",
|
||||
});
|
||||
});
|
||||
|
||||
const ok = writeCodexCliCredentials(
|
||||
{
|
||||
access: "new-access",
|
||||
refresh: "new-refresh",
|
||||
expires: Date.now() + 60_000,
|
||||
accountId: "acct-new",
|
||||
},
|
||||
{
|
||||
platform: "darwin",
|
||||
execSync: execSyncMock,
|
||||
execFileSync: execFileSyncMock,
|
||||
},
|
||||
);
|
||||
|
||||
expect(ok).toBe(true);
|
||||
expect(execFileSyncMock).toHaveBeenCalledTimes(1);
|
||||
const addCall = getAddGenericPasswordCall();
|
||||
expect(addCall?.[0]).toBe("security");
|
||||
const payload = (() => {
|
||||
const args = (addCall?.[1] as string[] | undefined) ?? [];
|
||||
const valueIndex = args.indexOf("-w");
|
||||
return valueIndex >= 0 ? args[valueIndex + 1] : undefined;
|
||||
})();
|
||||
expect(payload).toBeDefined();
|
||||
const parsed = JSON.parse(String(payload)) as Record<string, unknown>;
|
||||
expect(parsed.tokens).toMatchObject({
|
||||
id_token: "id-token",
|
||||
access_token: "new-access",
|
||||
refresh_token: "new-refresh",
|
||||
account_id: "acct-new",
|
||||
});
|
||||
expect(parsed.auth_mode).toBe("chatgpt");
|
||||
} finally {
|
||||
fs.rmSync(tempHome, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
|
|
@ -1,490 +0,0 @@
|
|||
import { execFileSync, execSync } from "node:child_process";
|
||||
import { createHash } from "node:crypto";
|
||||
import fs from "node:fs";
|
||||
import path from "node:path";
|
||||
import { loadJsonFile, saveJsonFile } from "../infra/json-file.js";
|
||||
import { createSubsystemLogger } from "../logging/subsystem.js";
|
||||
import { resolveUserPath } from "../utils.js";
|
||||
import type { OAuthCredentials, OAuthProvider } from "./auth-profiles/types.js";
|
||||
|
||||
const log = createSubsystemLogger("agents/auth-profiles");
|
||||
|
||||
const CODEX_CLI_AUTH_FILENAME = "auth.json";
|
||||
const MINIMAX_CLI_CREDENTIALS_RELATIVE_PATH = ".minimax/oauth_creds.json";
|
||||
|
||||
type CachedValue<T> = {
|
||||
value: T | null;
|
||||
readAt: number;
|
||||
cacheKey: string;
|
||||
sourceFingerprint?: number | string | null;
|
||||
};
|
||||
|
||||
let codexCliCache: CachedValue<CodexCliCredential> | null = null;
|
||||
let minimaxCliCache: CachedValue<MiniMaxCliCredential> | null = null;
|
||||
|
||||
export function resetCliCredentialCachesForTest(): void {
|
||||
codexCliCache = null;
|
||||
minimaxCliCache = null;
|
||||
}
|
||||
|
||||
export type CodexCliCredential = {
|
||||
type: "oauth";
|
||||
provider: OAuthProvider;
|
||||
access: string;
|
||||
refresh: string;
|
||||
expires: number;
|
||||
accountId?: string;
|
||||
};
|
||||
|
||||
export type MiniMaxCliCredential = {
|
||||
type: "oauth";
|
||||
provider: "minimax-portal";
|
||||
access: string;
|
||||
refresh: string;
|
||||
expires: number;
|
||||
};
|
||||
|
||||
type CodexCliFileOptions = {
|
||||
codexHome?: string;
|
||||
};
|
||||
|
||||
type CodexCliWriteOptions = CodexCliFileOptions & {
|
||||
platform?: NodeJS.Platform;
|
||||
execSync?: ExecSyncFn;
|
||||
execFileSync?: ExecFileSyncFn;
|
||||
writeKeychain?: (
|
||||
credentials: OAuthCredentials,
|
||||
options?: {
|
||||
codexHome?: string;
|
||||
platform?: NodeJS.Platform;
|
||||
execSync?: ExecSyncFn;
|
||||
execFileSync?: ExecFileSyncFn;
|
||||
},
|
||||
) => boolean;
|
||||
writeFile?: (credentials: OAuthCredentials, options?: CodexCliFileOptions) => boolean;
|
||||
};
|
||||
|
||||
type ExecSyncFn = typeof execSync;
|
||||
type ExecFileSyncFn = typeof execFileSync;
|
||||
|
||||
function resolveCodexHomePath(codexHome?: string) {
|
||||
const configured = codexHome ?? process.env.CODEX_HOME;
|
||||
const home = configured ? resolveUserPath(configured) : resolveUserPath("~/.codex");
|
||||
try {
|
||||
return fs.realpathSync.native(home);
|
||||
} catch {
|
||||
return home;
|
||||
}
|
||||
}
|
||||
|
||||
function resolveMiniMaxCliCredentialsPath(homeDir?: string) {
|
||||
const baseDir = homeDir ?? resolveUserPath("~");
|
||||
return path.join(baseDir, MINIMAX_CLI_CREDENTIALS_RELATIVE_PATH);
|
||||
}
|
||||
|
||||
function readFileMtimeMs(filePath: string): number | null {
|
||||
try {
|
||||
return fs.statSync(filePath).mtimeMs;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function readCachedCliCredential<T>(options: {
|
||||
ttlMs: number;
|
||||
cache: CachedValue<T> | null;
|
||||
cacheKey: string;
|
||||
read: () => T | null;
|
||||
setCache: (next: CachedValue<T> | null) => void;
|
||||
readSourceFingerprint?: () => number | string | null;
|
||||
}): T | null {
|
||||
const { ttlMs, cache, cacheKey, read, setCache, readSourceFingerprint } = options;
|
||||
if (ttlMs <= 0) {
|
||||
return read();
|
||||
}
|
||||
|
||||
const now = Date.now();
|
||||
const sourceFingerprint = readSourceFingerprint?.();
|
||||
if (
|
||||
cache &&
|
||||
cache.cacheKey === cacheKey &&
|
||||
cache.sourceFingerprint === sourceFingerprint &&
|
||||
now - cache.readAt < ttlMs
|
||||
) {
|
||||
return cache.value;
|
||||
}
|
||||
|
||||
const value = read();
|
||||
const cachedSourceFingerprint = readSourceFingerprint?.();
|
||||
if (!readSourceFingerprint || cachedSourceFingerprint === sourceFingerprint) {
|
||||
setCache({
|
||||
value,
|
||||
readAt: now,
|
||||
cacheKey,
|
||||
sourceFingerprint: cachedSourceFingerprint,
|
||||
});
|
||||
} else {
|
||||
setCache(null);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
function computeCodexKeychainAccount(codexHome: string) {
|
||||
const hash = createHash("sha256").update(codexHome).digest("hex");
|
||||
return `cli|${hash.slice(0, 16)}`;
|
||||
}
|
||||
|
||||
function resolveCodexKeychainParams(options?: {
|
||||
codexHome?: string;
|
||||
platform?: NodeJS.Platform;
|
||||
execSync?: ExecSyncFn;
|
||||
}) {
|
||||
return {
|
||||
platform: options?.platform ?? process.platform,
|
||||
execSyncImpl: options?.execSync ?? execSync,
|
||||
codexHome: resolveCodexHomePath(options?.codexHome),
|
||||
};
|
||||
}
|
||||
|
||||
function decodeJwtExpiryMs(token: string): number | null {
|
||||
const parts = token.split(".");
|
||||
if (parts.length < 2) {
|
||||
return null;
|
||||
}
|
||||
try {
|
||||
const payloadRaw = Buffer.from(parts[1], "base64url").toString("utf8");
|
||||
const payload = JSON.parse(payloadRaw) as { exp?: unknown };
|
||||
return typeof payload.exp === "number" && Number.isFinite(payload.exp) && payload.exp > 0
|
||||
? payload.exp * 1000
|
||||
: null;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function readCodexKeychainAuthRecord(options?: {
|
||||
codexHome?: string;
|
||||
platform?: NodeJS.Platform;
|
||||
execSync?: ExecSyncFn;
|
||||
}): Record<string, unknown> | null {
|
||||
const { platform, execSyncImpl, codexHome } = resolveCodexKeychainParams(options);
|
||||
if (platform !== "darwin") {
|
||||
return null;
|
||||
}
|
||||
const account = computeCodexKeychainAccount(codexHome);
|
||||
|
||||
try {
|
||||
const secret = execSyncImpl(
|
||||
`security find-generic-password -s "Codex Auth" -a "${account}" -w`,
|
||||
{
|
||||
encoding: "utf8",
|
||||
timeout: 5000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
},
|
||||
).trim();
|
||||
|
||||
const parsed = JSON.parse(secret) as Record<string, unknown>;
|
||||
return parsed;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function readCodexKeychainCredentials(options?: {
|
||||
codexHome?: string;
|
||||
platform?: NodeJS.Platform;
|
||||
execSync?: ExecSyncFn;
|
||||
}): CodexCliCredential | null {
|
||||
const parsed = readCodexKeychainAuthRecord(options);
|
||||
if (!parsed) {
|
||||
return null;
|
||||
}
|
||||
const tokens = parsed.tokens as Record<string, unknown> | undefined;
|
||||
try {
|
||||
const accessToken = tokens?.access_token;
|
||||
const refreshToken = tokens?.refresh_token;
|
||||
if (typeof accessToken !== "string" || !accessToken) {
|
||||
return null;
|
||||
}
|
||||
if (typeof refreshToken !== "string" || !refreshToken) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// No explicit expiry stored; treat as fresh for an hour from last_refresh or now.
|
||||
const lastRefreshRaw = parsed.last_refresh;
|
||||
const lastRefresh =
|
||||
typeof lastRefreshRaw === "string" || typeof lastRefreshRaw === "number"
|
||||
? new Date(lastRefreshRaw).getTime()
|
||||
: Date.now();
|
||||
const fallbackExpiry = Number.isFinite(lastRefresh)
|
||||
? lastRefresh + 60 * 60 * 1000
|
||||
: Date.now() + 60 * 60 * 1000;
|
||||
const expires = decodeJwtExpiryMs(accessToken) ?? fallbackExpiry;
|
||||
const accountId = typeof tokens?.account_id === "string" ? tokens.account_id : undefined;
|
||||
|
||||
log.info("read codex credentials from keychain", {
|
||||
source: "keychain",
|
||||
expires: new Date(expires).toISOString(),
|
||||
});
|
||||
|
||||
return {
|
||||
type: "oauth",
|
||||
provider: "openai-codex" as OAuthProvider,
|
||||
access: accessToken,
|
||||
refresh: refreshToken,
|
||||
expires,
|
||||
accountId,
|
||||
};
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function readPortalCliOauthCredentials<TProvider extends string>(
|
||||
credPath: string,
|
||||
provider: TProvider,
|
||||
): { type: "oauth"; provider: TProvider; access: string; refresh: string; expires: number } | null {
|
||||
const raw = loadJsonFile(credPath);
|
||||
if (!raw || typeof raw !== "object") {
|
||||
return null;
|
||||
}
|
||||
const data = raw as Record<string, unknown>;
|
||||
const accessToken = data.access_token;
|
||||
const refreshToken = data.refresh_token;
|
||||
const expiresAt = data.expiry_date;
|
||||
|
||||
if (typeof accessToken !== "string" || !accessToken) {
|
||||
return null;
|
||||
}
|
||||
if (typeof refreshToken !== "string" || !refreshToken) {
|
||||
return null;
|
||||
}
|
||||
if (typeof expiresAt !== "number" || !Number.isFinite(expiresAt)) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
type: "oauth",
|
||||
provider,
|
||||
access: accessToken,
|
||||
refresh: refreshToken,
|
||||
expires: expiresAt,
|
||||
};
|
||||
}
|
||||
|
||||
function readMiniMaxCliCredentials(options?: { homeDir?: string }): MiniMaxCliCredential | null {
|
||||
const credPath = resolveMiniMaxCliCredentialsPath(options?.homeDir);
|
||||
return readPortalCliOauthCredentials(credPath, "minimax-portal");
|
||||
}
|
||||
|
||||
function buildUpdatedCodexAuthRecord(
|
||||
existing: Record<string, unknown> | null,
|
||||
newCredentials: OAuthCredentials,
|
||||
): Record<string, unknown> {
|
||||
const next = existing ? { ...existing } : {};
|
||||
const existingTokens =
|
||||
next.tokens && typeof next.tokens === "object" ? (next.tokens as Record<string, unknown>) : {};
|
||||
next.auth_mode = next.auth_mode ?? "chatgpt";
|
||||
next.tokens = {
|
||||
...existingTokens,
|
||||
access_token: newCredentials.access,
|
||||
refresh_token: newCredentials.refresh,
|
||||
...(typeof newCredentials.accountId === "string" && newCredentials.accountId.trim().length > 0
|
||||
? { account_id: newCredentials.accountId }
|
||||
: {}),
|
||||
};
|
||||
next.last_refresh = new Date().toISOString();
|
||||
return next;
|
||||
}
|
||||
|
||||
export function writeCodexCliKeychainCredentials(
|
||||
newCredentials: OAuthCredentials,
|
||||
options?: {
|
||||
codexHome?: string;
|
||||
platform?: NodeJS.Platform;
|
||||
execSync?: ExecSyncFn;
|
||||
execFileSync?: ExecFileSyncFn;
|
||||
},
|
||||
): boolean {
|
||||
const { platform, codexHome } = resolveCodexKeychainParams(options);
|
||||
if (platform !== "darwin") {
|
||||
return false;
|
||||
}
|
||||
const existing = readCodexKeychainAuthRecord(options);
|
||||
if (!existing) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const execFileSyncImpl = options?.execFileSync ?? execFileSync;
|
||||
const account = computeCodexKeychainAccount(codexHome);
|
||||
const next = buildUpdatedCodexAuthRecord(existing, newCredentials);
|
||||
|
||||
try {
|
||||
execFileSyncImpl(
|
||||
"security",
|
||||
["add-generic-password", "-U", "-s", "Codex Auth", "-a", account, "-w", JSON.stringify(next)],
|
||||
{ encoding: "utf8", timeout: 5000, stdio: ["pipe", "pipe", "pipe"] },
|
||||
);
|
||||
codexCliCache = null;
|
||||
log.info("wrote refreshed credentials to codex cli keychain", {
|
||||
expires: new Date(newCredentials.expires).toISOString(),
|
||||
});
|
||||
return true;
|
||||
} catch (error) {
|
||||
log.warn("failed to write credentials to codex cli keychain", {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
});
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
export function writeCodexCliFileCredentials(
|
||||
newCredentials: OAuthCredentials,
|
||||
options?: CodexCliFileOptions,
|
||||
): boolean {
|
||||
const codexHome = resolveCodexHomePath(options?.codexHome);
|
||||
const authPath = path.join(codexHome, CODEX_CLI_AUTH_FILENAME);
|
||||
if (!fs.existsSync(authPath)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
const raw = loadJsonFile(authPath);
|
||||
if (!raw || typeof raw !== "object") {
|
||||
return false;
|
||||
}
|
||||
const next = buildUpdatedCodexAuthRecord(raw as Record<string, unknown>, newCredentials);
|
||||
saveJsonFile(authPath, next);
|
||||
codexCliCache = null;
|
||||
log.info("wrote refreshed credentials to codex cli file", {
|
||||
expires: new Date(newCredentials.expires).toISOString(),
|
||||
});
|
||||
return true;
|
||||
} catch (error) {
|
||||
log.warn("failed to write credentials to codex cli file", {
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
});
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
export function writeCodexCliCredentials(
|
||||
newCredentials: OAuthCredentials,
|
||||
options?: CodexCliWriteOptions,
|
||||
): boolean {
|
||||
const platform = options?.platform ?? process.platform;
|
||||
const writeKeychain = options?.writeKeychain ?? writeCodexCliKeychainCredentials;
|
||||
const writeFile =
|
||||
options?.writeFile ??
|
||||
((credentials, fileOptions) => writeCodexCliFileCredentials(credentials, fileOptions));
|
||||
|
||||
if (
|
||||
platform === "darwin" &&
|
||||
writeKeychain(newCredentials, {
|
||||
codexHome: options?.codexHome,
|
||||
platform,
|
||||
execSync: options?.execSync,
|
||||
execFileSync: options?.execFileSync,
|
||||
})
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return writeFile(newCredentials, { codexHome: options?.codexHome });
|
||||
}
|
||||
|
||||
export function readCodexCliCredentials(options?: {
|
||||
codexHome?: string;
|
||||
platform?: NodeJS.Platform;
|
||||
execSync?: ExecSyncFn;
|
||||
}): CodexCliCredential | null {
|
||||
const keychain = readCodexKeychainCredentials({
|
||||
codexHome: options?.codexHome,
|
||||
platform: options?.platform,
|
||||
execSync: options?.execSync,
|
||||
});
|
||||
if (keychain) {
|
||||
return keychain;
|
||||
}
|
||||
|
||||
const authPath = path.join(resolveCodexHomePath(options?.codexHome), CODEX_CLI_AUTH_FILENAME);
|
||||
const raw = loadJsonFile(authPath);
|
||||
if (!raw || typeof raw !== "object") {
|
||||
return null;
|
||||
}
|
||||
|
||||
const data = raw as Record<string, unknown>;
|
||||
const tokens = data.tokens as Record<string, unknown> | undefined;
|
||||
if (!tokens || typeof tokens !== "object") {
|
||||
return null;
|
||||
}
|
||||
|
||||
const accessToken = tokens.access_token;
|
||||
const refreshToken = tokens.refresh_token;
|
||||
|
||||
if (typeof accessToken !== "string" || !accessToken) {
|
||||
return null;
|
||||
}
|
||||
if (typeof refreshToken !== "string" || !refreshToken) {
|
||||
return null;
|
||||
}
|
||||
|
||||
let fallbackExpiry: number;
|
||||
try {
|
||||
const stat = fs.statSync(authPath);
|
||||
fallbackExpiry = stat.mtimeMs + 60 * 60 * 1000;
|
||||
} catch {
|
||||
fallbackExpiry = Date.now() + 60 * 60 * 1000;
|
||||
}
|
||||
const expires = decodeJwtExpiryMs(accessToken) ?? fallbackExpiry;
|
||||
|
||||
return {
|
||||
type: "oauth",
|
||||
provider: "openai-codex" as OAuthProvider,
|
||||
access: accessToken,
|
||||
refresh: refreshToken,
|
||||
expires,
|
||||
accountId: typeof tokens.account_id === "string" ? tokens.account_id : undefined,
|
||||
};
|
||||
}
|
||||
|
||||
export function readCodexCliCredentialsCached(options?: {
|
||||
codexHome?: string;
|
||||
ttlMs?: number;
|
||||
platform?: NodeJS.Platform;
|
||||
execSync?: ExecSyncFn;
|
||||
}): CodexCliCredential | null {
|
||||
const authPath = path.join(resolveCodexHomePath(options?.codexHome), CODEX_CLI_AUTH_FILENAME);
|
||||
return readCachedCliCredential({
|
||||
ttlMs: options?.ttlMs ?? 0,
|
||||
cache: codexCliCache,
|
||||
cacheKey: `${options?.platform ?? process.platform}|${authPath}`,
|
||||
read: () =>
|
||||
readCodexCliCredentials({
|
||||
codexHome: options?.codexHome,
|
||||
platform: options?.platform,
|
||||
execSync: options?.execSync,
|
||||
}),
|
||||
setCache: (next) => {
|
||||
codexCliCache = next;
|
||||
},
|
||||
readSourceFingerprint: () => readFileMtimeMs(authPath),
|
||||
});
|
||||
}
|
||||
|
||||
export function readMiniMaxCliCredentialsCached(options?: {
|
||||
ttlMs?: number;
|
||||
homeDir?: string;
|
||||
}): MiniMaxCliCredential | null {
|
||||
const credPath = resolveMiniMaxCliCredentialsPath(options?.homeDir);
|
||||
return readCachedCliCredential({
|
||||
ttlMs: options?.ttlMs ?? 0,
|
||||
cache: minimaxCliCache,
|
||||
cacheKey: credPath,
|
||||
read: () => readMiniMaxCliCredentials({ homeDir: options?.homeDir }),
|
||||
setCache: (next) => {
|
||||
minimaxCliCache = next;
|
||||
},
|
||||
readSourceFingerprint: () => readFileMtimeMs(credPath),
|
||||
});
|
||||
}
|
||||
|
|
@ -1,214 +0,0 @@
|
|||
import { describe, expect, it } from "vitest";
|
||||
import { parseCliJson, parseCliJsonl } from "./cli-output.js";
|
||||
|
||||
describe("parseCliJson", () => {
|
||||
it("recovers mixed-output Claude session metadata from embedded JSON objects", () => {
|
||||
const result = parseCliJson(
|
||||
[
|
||||
"Claude Code starting...",
|
||||
'{"type":"init","session_id":"session-789"}',
|
||||
'{"type":"result","result":"Claude says hi","usage":{"input_tokens":9,"output_tokens":4}}',
|
||||
].join("\n"),
|
||||
{
|
||||
command: "claude",
|
||||
output: "json",
|
||||
sessionIdFields: ["session_id"],
|
||||
},
|
||||
);
|
||||
|
||||
expect(result).toEqual({
|
||||
text: "Claude says hi",
|
||||
sessionId: "session-789",
|
||||
usage: {
|
||||
input: 9,
|
||||
output: 4,
|
||||
cacheRead: undefined,
|
||||
cacheWrite: undefined,
|
||||
total: undefined,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it("parses Gemini CLI response text and stats payloads", () => {
|
||||
const result = parseCliJson(
|
||||
JSON.stringify({
|
||||
session_id: "gemini-session-123",
|
||||
response: "Gemini says hello",
|
||||
stats: {
|
||||
total_tokens: 21,
|
||||
input_tokens: 13,
|
||||
output_tokens: 5,
|
||||
cached: 8,
|
||||
input: 5,
|
||||
},
|
||||
}),
|
||||
{
|
||||
command: "gemini",
|
||||
output: "json",
|
||||
sessionIdFields: ["session_id"],
|
||||
},
|
||||
);
|
||||
|
||||
expect(result).toEqual({
|
||||
text: "Gemini says hello",
|
||||
sessionId: "gemini-session-123",
|
||||
usage: {
|
||||
input: 5,
|
||||
output: 5,
|
||||
cacheRead: 8,
|
||||
cacheWrite: undefined,
|
||||
total: 21,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it("falls back to input_tokens minus cached when Gemini stats omit input", () => {
|
||||
const result = parseCliJson(
|
||||
JSON.stringify({
|
||||
session_id: "gemini-session-456",
|
||||
response: "Hello",
|
||||
stats: {
|
||||
total_tokens: 21,
|
||||
input_tokens: 13,
|
||||
output_tokens: 5,
|
||||
cached: 8,
|
||||
},
|
||||
}),
|
||||
{
|
||||
command: "gemini",
|
||||
output: "json",
|
||||
sessionIdFields: ["session_id"],
|
||||
},
|
||||
);
|
||||
|
||||
expect(result?.usage?.input).toBe(5);
|
||||
expect(result?.usage?.cacheRead).toBe(8);
|
||||
});
|
||||
|
||||
it("falls back to Gemini stats when usage exists without token fields", () => {
|
||||
const result = parseCliJson(
|
||||
JSON.stringify({
|
||||
session_id: "gemini-session-789",
|
||||
response: "Gemini says hello",
|
||||
usage: {},
|
||||
stats: {
|
||||
total_tokens: 21,
|
||||
input_tokens: 13,
|
||||
output_tokens: 5,
|
||||
cached: 8,
|
||||
input: 5,
|
||||
},
|
||||
}),
|
||||
{
|
||||
command: "gemini",
|
||||
output: "json",
|
||||
sessionIdFields: ["session_id"],
|
||||
},
|
||||
);
|
||||
|
||||
expect(result).toEqual({
|
||||
text: "Gemini says hello",
|
||||
sessionId: "gemini-session-789",
|
||||
usage: {
|
||||
input: 5,
|
||||
output: 5,
|
||||
cacheRead: 8,
|
||||
cacheWrite: undefined,
|
||||
total: 21,
|
||||
},
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("parseCliJsonl", () => {
|
||||
it("parses generic jsonl result events", () => {
|
||||
const result = parseCliJsonl(
|
||||
[
|
||||
JSON.stringify({ type: "init", session_id: "session-123" }),
|
||||
JSON.stringify({
|
||||
type: "result",
|
||||
session_id: "session-123",
|
||||
result: "Claude says hello",
|
||||
usage: {
|
||||
input_tokens: 12,
|
||||
output_tokens: 3,
|
||||
cache_read_input_tokens: 4,
|
||||
},
|
||||
}),
|
||||
].join("\n"),
|
||||
{
|
||||
command: "codex",
|
||||
output: "jsonl",
|
||||
sessionIdFields: ["session_id"],
|
||||
},
|
||||
"codex-cli",
|
||||
);
|
||||
|
||||
expect(result).toEqual({
|
||||
text: "Claude says hello",
|
||||
sessionId: "session-123",
|
||||
usage: {
|
||||
input: 12,
|
||||
output: 3,
|
||||
cacheRead: 4,
|
||||
cacheWrite: undefined,
|
||||
total: undefined,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it("preserves cache creation tokens instead of flattening them to zero", () => {
|
||||
const result = parseCliJsonl(
|
||||
[
|
||||
JSON.stringify({ type: "init", session_id: "session-cache-123" }),
|
||||
JSON.stringify({
|
||||
type: "result",
|
||||
session_id: "session-cache-123",
|
||||
result: "Claude says hello",
|
||||
usage: {
|
||||
input_tokens: 12,
|
||||
output_tokens: 3,
|
||||
cache_read_input_tokens: 4,
|
||||
cache_creation_input_tokens: 7,
|
||||
},
|
||||
}),
|
||||
].join("\n"),
|
||||
{
|
||||
command: "codex",
|
||||
output: "jsonl",
|
||||
sessionIdFields: ["session_id"],
|
||||
},
|
||||
"codex-cli",
|
||||
);
|
||||
|
||||
expect(result).toEqual({
|
||||
text: "Claude says hello",
|
||||
sessionId: "session-cache-123",
|
||||
usage: {
|
||||
input: 12,
|
||||
output: 3,
|
||||
cacheRead: 4,
|
||||
cacheWrite: 7,
|
||||
total: undefined,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
it("parses multiple JSON objects embedded on the same line", () => {
|
||||
const result = parseCliJsonl(
|
||||
'{"type":"init","session_id":"session-999"} {"type":"result","session_id":"session-999","result":"done"}',
|
||||
{
|
||||
command: "codex",
|
||||
output: "jsonl",
|
||||
sessionIdFields: ["session_id"],
|
||||
},
|
||||
"codex-cli",
|
||||
);
|
||||
|
||||
expect(result).toEqual({
|
||||
text: "done",
|
||||
sessionId: "session-999",
|
||||
usage: undefined,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -1,386 +0,0 @@
|
|||
import type { CliBackendConfig } from "../config/types.js";
|
||||
import { isRecord } from "../utils.js";
|
||||
|
||||
type CliUsage = {
|
||||
input?: number;
|
||||
output?: number;
|
||||
cacheRead?: number;
|
||||
cacheWrite?: number;
|
||||
total?: number;
|
||||
};
|
||||
|
||||
export type CliOutput = {
|
||||
text: string;
|
||||
sessionId?: string;
|
||||
usage?: CliUsage;
|
||||
};
|
||||
|
||||
export type CliStreamingDelta = {
|
||||
text: string;
|
||||
delta: string;
|
||||
sessionId?: string;
|
||||
usage?: CliUsage;
|
||||
};
|
||||
|
||||
function extractJsonObjectCandidates(raw: string): string[] {
|
||||
const candidates: string[] = [];
|
||||
let depth = 0;
|
||||
let start = -1;
|
||||
let inString = false;
|
||||
let escaped = false;
|
||||
|
||||
for (let index = 0; index < raw.length; index += 1) {
|
||||
const char = raw[index] ?? "";
|
||||
if (escaped) {
|
||||
escaped = false;
|
||||
continue;
|
||||
}
|
||||
if (char === "\\") {
|
||||
if (inString) {
|
||||
escaped = true;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (char === '"') {
|
||||
inString = !inString;
|
||||
continue;
|
||||
}
|
||||
if (inString) {
|
||||
continue;
|
||||
}
|
||||
if (char === "{") {
|
||||
if (depth === 0) {
|
||||
start = index;
|
||||
}
|
||||
depth += 1;
|
||||
continue;
|
||||
}
|
||||
if (char === "}" && depth > 0) {
|
||||
depth -= 1;
|
||||
if (depth === 0 && start >= 0) {
|
||||
candidates.push(raw.slice(start, index + 1));
|
||||
start = -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return candidates;
|
||||
}
|
||||
|
||||
function parseJsonRecordCandidates(raw: string): Record<string, unknown>[] {
|
||||
const parsedRecords: Record<string, unknown>[] = [];
|
||||
const trimmed = raw.trim();
|
||||
if (!trimmed) {
|
||||
return parsedRecords;
|
||||
}
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(trimmed);
|
||||
if (isRecord(parsed)) {
|
||||
parsedRecords.push(parsed);
|
||||
return parsedRecords;
|
||||
}
|
||||
} catch {
|
||||
// Fall back to scanning for top-level JSON objects embedded in mixed output.
|
||||
}
|
||||
|
||||
for (const candidate of extractJsonObjectCandidates(trimmed)) {
|
||||
try {
|
||||
const parsed = JSON.parse(candidate);
|
||||
if (isRecord(parsed)) {
|
||||
parsedRecords.push(parsed);
|
||||
}
|
||||
} catch {
|
||||
// Ignore malformed fragments and keep scanning remaining objects.
|
||||
}
|
||||
}
|
||||
|
||||
return parsedRecords;
|
||||
}
|
||||
|
||||
function toCliUsage(raw: Record<string, unknown>): CliUsage | undefined {
|
||||
const pick = (key: string) =>
|
||||
typeof raw[key] === "number" && raw[key] > 0 ? raw[key] : undefined;
|
||||
const totalInput = pick("input_tokens") ?? pick("inputTokens");
|
||||
const output = pick("output_tokens") ?? pick("outputTokens");
|
||||
const cacheRead =
|
||||
pick("cache_read_input_tokens") ??
|
||||
pick("cached_input_tokens") ??
|
||||
pick("cacheRead") ??
|
||||
pick("cached");
|
||||
const input =
|
||||
pick("input") ??
|
||||
(Object.hasOwn(raw, "cached") && typeof totalInput === "number"
|
||||
? Math.max(0, totalInput - (cacheRead ?? 0))
|
||||
: totalInput);
|
||||
const cacheWrite =
|
||||
pick("cache_creation_input_tokens") ?? pick("cache_write_input_tokens") ?? pick("cacheWrite");
|
||||
const total = pick("total_tokens") ?? pick("total");
|
||||
if (!input && !output && !cacheRead && !cacheWrite && !total) {
|
||||
return undefined;
|
||||
}
|
||||
return { input, output, cacheRead, cacheWrite, total };
|
||||
}
|
||||
|
||||
function readCliUsage(parsed: Record<string, unknown>): CliUsage | undefined {
|
||||
if (isRecord(parsed.usage)) {
|
||||
const usage = toCliUsage(parsed.usage);
|
||||
if (usage) {
|
||||
return usage;
|
||||
}
|
||||
}
|
||||
if (isRecord(parsed.stats)) {
|
||||
return toCliUsage(parsed.stats);
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
function collectCliText(value: unknown): string {
|
||||
if (!value) {
|
||||
return "";
|
||||
}
|
||||
if (typeof value === "string") {
|
||||
return value;
|
||||
}
|
||||
if (Array.isArray(value)) {
|
||||
return value.map((entry) => collectCliText(entry)).join("");
|
||||
}
|
||||
if (!isRecord(value)) {
|
||||
return "";
|
||||
}
|
||||
if (typeof value.response === "string") {
|
||||
return value.response;
|
||||
}
|
||||
if (typeof value.text === "string") {
|
||||
return value.text;
|
||||
}
|
||||
if (typeof value.result === "string") {
|
||||
return value.result;
|
||||
}
|
||||
if (typeof value.content === "string") {
|
||||
return value.content;
|
||||
}
|
||||
if (Array.isArray(value.content)) {
|
||||
return value.content.map((entry) => collectCliText(entry)).join("");
|
||||
}
|
||||
if (isRecord(value.message)) {
|
||||
return collectCliText(value.message);
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
function pickCliSessionId(
|
||||
parsed: Record<string, unknown>,
|
||||
backend: CliBackendConfig,
|
||||
): string | undefined {
|
||||
const fields = backend.sessionIdFields ?? [
|
||||
"session_id",
|
||||
"sessionId",
|
||||
"conversation_id",
|
||||
"conversationId",
|
||||
];
|
||||
for (const field of fields) {
|
||||
const value = parsed[field];
|
||||
if (typeof value === "string" && value.trim()) {
|
||||
return value.trim();
|
||||
}
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
export function parseCliJson(raw: string, backend: CliBackendConfig): CliOutput | null {
|
||||
const parsedRecords = parseJsonRecordCandidates(raw);
|
||||
if (parsedRecords.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
let sessionId: string | undefined;
|
||||
let usage: CliUsage | undefined;
|
||||
let text = "";
|
||||
let sawStructuredOutput = false;
|
||||
for (const parsed of parsedRecords) {
|
||||
sessionId = pickCliSessionId(parsed, backend) ?? sessionId;
|
||||
usage = readCliUsage(parsed) ?? usage;
|
||||
const nextText =
|
||||
collectCliText(parsed.message) ||
|
||||
collectCliText(parsed.content) ||
|
||||
collectCliText(parsed.result) ||
|
||||
collectCliText(parsed.response) ||
|
||||
collectCliText(parsed);
|
||||
const trimmedText = nextText.trim();
|
||||
if (trimmedText) {
|
||||
text = trimmedText;
|
||||
sawStructuredOutput = true;
|
||||
continue;
|
||||
}
|
||||
if (sessionId || usage) {
|
||||
sawStructuredOutput = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!text && !sawStructuredOutput) {
|
||||
return null;
|
||||
}
|
||||
return { text, sessionId, usage };
|
||||
}
|
||||
|
||||
export function createCliJsonlStreamingParser(params: {
|
||||
backend: CliBackendConfig;
|
||||
providerId: string;
|
||||
onAssistantDelta: (delta: CliStreamingDelta) => void;
|
||||
}) {
|
||||
let lineBuffer = "";
|
||||
let assistantText = "";
|
||||
let sessionId: string | undefined;
|
||||
let usage: CliUsage | undefined;
|
||||
|
||||
const handleParsedRecord = (parsed: Record<string, unknown>) => {
|
||||
sessionId = pickCliSessionId(parsed, params.backend) ?? sessionId;
|
||||
if (!sessionId && typeof parsed.thread_id === "string") {
|
||||
sessionId = parsed.thread_id.trim();
|
||||
}
|
||||
if (isRecord(parsed.usage)) {
|
||||
usage = toCliUsage(parsed.usage) ?? usage;
|
||||
}
|
||||
|
||||
const nextText =
|
||||
collectCliText(parsed.message) ||
|
||||
collectCliText(parsed.content) ||
|
||||
collectCliText(parsed.result) ||
|
||||
collectCliText(parsed.response);
|
||||
if (!nextText) {
|
||||
return;
|
||||
}
|
||||
const deltaText = nextText.startsWith(assistantText)
|
||||
? nextText.slice(assistantText.length)
|
||||
: nextText;
|
||||
if (!deltaText) {
|
||||
return;
|
||||
}
|
||||
assistantText = nextText;
|
||||
params.onAssistantDelta({
|
||||
text: assistantText,
|
||||
delta: deltaText,
|
||||
sessionId,
|
||||
usage,
|
||||
});
|
||||
};
|
||||
|
||||
const flushLines = (flushPartial: boolean) => {
|
||||
while (true) {
|
||||
const newlineIndex = lineBuffer.indexOf("\n");
|
||||
if (newlineIndex < 0) {
|
||||
break;
|
||||
}
|
||||
const line = lineBuffer.slice(0, newlineIndex).trim();
|
||||
lineBuffer = lineBuffer.slice(newlineIndex + 1);
|
||||
if (!line) {
|
||||
continue;
|
||||
}
|
||||
for (const parsed of parseJsonRecordCandidates(line)) {
|
||||
handleParsedRecord(parsed);
|
||||
}
|
||||
}
|
||||
if (!flushPartial) {
|
||||
return;
|
||||
}
|
||||
const tail = lineBuffer.trim();
|
||||
lineBuffer = "";
|
||||
if (!tail) {
|
||||
return;
|
||||
}
|
||||
for (const parsed of parseJsonRecordCandidates(tail)) {
|
||||
handleParsedRecord(parsed);
|
||||
}
|
||||
};
|
||||
|
||||
return {
|
||||
push(chunk: string) {
|
||||
if (!chunk) {
|
||||
return;
|
||||
}
|
||||
lineBuffer += chunk;
|
||||
flushLines(false);
|
||||
},
|
||||
finish() {
|
||||
flushLines(true);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export function parseCliJsonl(
|
||||
raw: string,
|
||||
backend: CliBackendConfig,
|
||||
_providerId: string,
|
||||
): CliOutput | null {
|
||||
const lines = raw
|
||||
.split(/\r?\n/g)
|
||||
.map((line) => line.trim())
|
||||
.filter(Boolean);
|
||||
if (lines.length === 0) {
|
||||
return null;
|
||||
}
|
||||
let sessionId: string | undefined;
|
||||
let usage: CliUsage | undefined;
|
||||
const texts: string[] = [];
|
||||
for (const line of lines) {
|
||||
for (const parsed of parseJsonRecordCandidates(line)) {
|
||||
if (!sessionId) {
|
||||
sessionId = pickCliSessionId(parsed, backend);
|
||||
}
|
||||
if (!sessionId && typeof parsed.thread_id === "string") {
|
||||
sessionId = parsed.thread_id.trim();
|
||||
}
|
||||
usage = readCliUsage(parsed) ?? usage;
|
||||
|
||||
const item = isRecord(parsed.item) ? parsed.item : null;
|
||||
if (item && typeof item.text === "string") {
|
||||
const type = typeof item.type === "string" ? item.type.toLowerCase() : "";
|
||||
if (!type || type.includes("message")) {
|
||||
texts.push(item.text);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
const nextText =
|
||||
collectCliText(parsed.message) ||
|
||||
collectCliText(parsed.content) ||
|
||||
collectCliText(parsed.result) ||
|
||||
collectCliText(parsed.response);
|
||||
if (nextText) {
|
||||
texts.push(nextText);
|
||||
}
|
||||
}
|
||||
}
|
||||
const text = texts.join("\n").trim();
|
||||
if (!text) {
|
||||
return null;
|
||||
}
|
||||
return { text, sessionId, usage };
|
||||
}
|
||||
|
||||
export function parseCliOutput(params: {
|
||||
raw: string;
|
||||
backend: CliBackendConfig;
|
||||
providerId: string;
|
||||
outputMode?: "json" | "jsonl" | "text";
|
||||
fallbackSessionId?: string;
|
||||
}): CliOutput {
|
||||
const outputMode = params.outputMode ?? "text";
|
||||
if (outputMode === "text") {
|
||||
return { text: params.raw.trim(), sessionId: params.fallbackSessionId };
|
||||
}
|
||||
if (outputMode === "jsonl") {
|
||||
return (
|
||||
parseCliJsonl(params.raw, params.backend, params.providerId) ?? {
|
||||
text: params.raw.trim(),
|
||||
sessionId: params.fallbackSessionId,
|
||||
}
|
||||
);
|
||||
}
|
||||
return (
|
||||
parseCliJson(params.raw, params.backend) ?? {
|
||||
text: params.raw.trim(),
|
||||
sessionId: params.fallbackSessionId,
|
||||
}
|
||||
);
|
||||
}
|
||||
|
|
@ -1,107 +0,0 @@
|
|||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import { createEmptyPluginRegistry } from "../plugins/registry.js";
|
||||
import { setActivePluginRegistry } from "../plugins/runtime.js";
|
||||
import { captureEnv } from "../test-utils/env.js";
|
||||
import {
|
||||
writeBundleProbeMcpServer,
|
||||
writeClaudeBundle,
|
||||
writeFakeClaudeCli,
|
||||
} from "./bundle-mcp.test-harness.js";
|
||||
|
||||
vi.mock("./cli-runner/helpers.js", async () => {
|
||||
const original =
|
||||
await vi.importActual<typeof import("./cli-runner/helpers.js")>("./cli-runner/helpers.js");
|
||||
return {
|
||||
...original,
|
||||
// This e2e only validates bundle MCP wiring into the spawned CLI backend.
|
||||
// Stub the large prompt-construction path so cold Vitest workers do not
|
||||
// time out before the actual MCP roundtrip runs.
|
||||
buildSystemPrompt: () => "Bundle MCP e2e test prompt.",
|
||||
};
|
||||
});
|
||||
|
||||
// This e2e spins a real stdio MCP server plus a spawned CLI process, which is
|
||||
// notably slower under Docker and cold Vitest imports.
|
||||
const E2E_TIMEOUT_MS = 40_000;
|
||||
|
||||
describe("runCliAgent bundle MCP e2e", () => {
|
||||
it(
|
||||
"routes enabled bundle MCP config into a registered CLI backend and executes the tool",
|
||||
{ timeout: E2E_TIMEOUT_MS },
|
||||
async () => {
|
||||
const { runCliAgent } = await import("./cli-runner.js");
|
||||
const envSnapshot = captureEnv(["HOME"]);
|
||||
const tempHome = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-cli-bundle-mcp-"));
|
||||
process.env.HOME = tempHome;
|
||||
|
||||
const workspaceDir = path.join(tempHome, "workspace");
|
||||
const sessionFile = path.join(tempHome, "session.jsonl");
|
||||
const binDir = path.join(tempHome, "bin");
|
||||
const serverScriptPath = path.join(tempHome, "mcp", "bundle-probe.mjs");
|
||||
const fakeClaudePath = path.join(binDir, "fake-claude.mjs");
|
||||
const pluginRoot = path.join(tempHome, ".openclaw", "extensions", "bundle-probe");
|
||||
const registry = createEmptyPluginRegistry();
|
||||
registry.cliBackends = [
|
||||
{
|
||||
pluginId: "bundle-cli-test",
|
||||
source: "test",
|
||||
backend: {
|
||||
id: "bundle-cli",
|
||||
bundleMcp: true,
|
||||
config: {
|
||||
command: "node",
|
||||
args: [fakeClaudePath],
|
||||
output: "jsonl",
|
||||
input: "arg",
|
||||
sessionArg: "--session-id",
|
||||
sessionIdFields: ["session_id"],
|
||||
clearEnv: [],
|
||||
},
|
||||
},
|
||||
},
|
||||
];
|
||||
setActivePluginRegistry(registry);
|
||||
await fs.mkdir(workspaceDir, { recursive: true });
|
||||
await writeBundleProbeMcpServer(serverScriptPath);
|
||||
await writeFakeClaudeCli(fakeClaudePath);
|
||||
await writeClaudeBundle({ pluginRoot, serverScriptPath });
|
||||
|
||||
const config: OpenClawConfig = {
|
||||
agents: {
|
||||
defaults: {
|
||||
workspace: workspaceDir,
|
||||
},
|
||||
},
|
||||
plugins: {
|
||||
entries: {
|
||||
"bundle-probe": { enabled: true },
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
try {
|
||||
const result = await runCliAgent({
|
||||
sessionId: "session:test",
|
||||
sessionFile,
|
||||
workspaceDir,
|
||||
config,
|
||||
prompt: "Use your configured MCP tools and report the bundle probe text.",
|
||||
provider: "bundle-cli",
|
||||
model: "test-bundle",
|
||||
timeoutMs: 10_000,
|
||||
runId: "bundle-mcp-e2e",
|
||||
});
|
||||
|
||||
expect(result.payloads?.[0]?.text).toContain("BUNDLE MCP OK FROM-BUNDLE");
|
||||
expect(result.meta.agentMeta?.sessionId.length ?? 0).toBeGreaterThan(0);
|
||||
} finally {
|
||||
await fs.rm(tempHome, { recursive: true, force: true });
|
||||
envSnapshot.restore();
|
||||
}
|
||||
},
|
||||
);
|
||||
});
|
||||
|
|
@ -1,223 +0,0 @@
|
|||
import fs from "node:fs/promises";
|
||||
import type { ImageContent } from "@mariozechner/pi-ai";
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { resolvePreferredOpenClawTmpDir } from "../infra/tmp-openclaw-dir.js";
|
||||
import { MAX_IMAGE_BYTES } from "../media/constants.js";
|
||||
import {
|
||||
buildSystemPrompt,
|
||||
buildCliArgs,
|
||||
loadPromptRefImages,
|
||||
resolveCliRunQueueKey,
|
||||
writeCliImages,
|
||||
} from "./cli-runner/helpers.js";
|
||||
import * as promptImageUtils from "./pi-embedded-runner/run/images.js";
|
||||
import type { SandboxFsBridge } from "./sandbox/fs-bridge.js";
|
||||
import { SYSTEM_PROMPT_CACHE_BOUNDARY } from "./system-prompt-cache-boundary.js";
|
||||
import * as toolImages from "./tool-images.js";
|
||||
|
||||
describe("loadPromptRefImages", () => {
|
||||
beforeEach(() => {
|
||||
vi.restoreAllMocks();
|
||||
});
|
||||
|
||||
it("returns empty results when the prompt has no image refs", async () => {
|
||||
const loadImageFromRefSpy = vi.spyOn(promptImageUtils, "loadImageFromRef");
|
||||
const sanitizeImageBlocksSpy = vi.spyOn(toolImages, "sanitizeImageBlocks");
|
||||
|
||||
await expect(
|
||||
loadPromptRefImages({
|
||||
prompt: "just text",
|
||||
workspaceDir: "/workspace",
|
||||
}),
|
||||
).resolves.toEqual([]);
|
||||
|
||||
expect(loadImageFromRefSpy).not.toHaveBeenCalled();
|
||||
expect(sanitizeImageBlocksSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("passes the max-byte guardrail through load and sanitize", async () => {
|
||||
const loadedImage: ImageContent = {
|
||||
type: "image",
|
||||
data: "c29tZS1pbWFnZQ==",
|
||||
mimeType: "image/png",
|
||||
};
|
||||
const sanitizedImage: ImageContent = {
|
||||
type: "image",
|
||||
data: "c2FuaXRpemVkLWltYWdl",
|
||||
mimeType: "image/jpeg",
|
||||
};
|
||||
const sandbox = {
|
||||
root: "/sandbox",
|
||||
bridge: {} as SandboxFsBridge,
|
||||
};
|
||||
|
||||
const loadImageFromRefSpy = vi
|
||||
.spyOn(promptImageUtils, "loadImageFromRef")
|
||||
.mockResolvedValueOnce(loadedImage);
|
||||
const sanitizeImageBlocksSpy = vi
|
||||
.spyOn(toolImages, "sanitizeImageBlocks")
|
||||
.mockResolvedValueOnce({ images: [sanitizedImage], dropped: 0 });
|
||||
|
||||
const result = await loadPromptRefImages({
|
||||
prompt: "Look at /tmp/photo.png",
|
||||
workspaceDir: "/workspace",
|
||||
workspaceOnly: true,
|
||||
sandbox,
|
||||
});
|
||||
|
||||
const [ref, workspaceDir, options] = loadImageFromRefSpy.mock.calls[0] ?? [];
|
||||
expect(ref).toMatchObject({ resolved: "/tmp/photo.png", type: "path" });
|
||||
expect(workspaceDir).toBe("/workspace");
|
||||
expect(options).toEqual({
|
||||
maxBytes: MAX_IMAGE_BYTES,
|
||||
workspaceOnly: true,
|
||||
sandbox,
|
||||
});
|
||||
expect(sanitizeImageBlocksSpy).toHaveBeenCalledWith([loadedImage], "prompt:images", {
|
||||
maxBytes: MAX_IMAGE_BYTES,
|
||||
});
|
||||
expect(result).toEqual([sanitizedImage]);
|
||||
});
|
||||
|
||||
it("dedupes repeated refs and skips failed loads before sanitizing", async () => {
|
||||
const loadedImage: ImageContent = {
|
||||
type: "image",
|
||||
data: "b25lLWltYWdl",
|
||||
mimeType: "image/png",
|
||||
};
|
||||
|
||||
const loadImageFromRefSpy = vi
|
||||
.spyOn(promptImageUtils, "loadImageFromRef")
|
||||
.mockResolvedValueOnce(loadedImage)
|
||||
.mockResolvedValueOnce(null);
|
||||
const sanitizeImageBlocksSpy = vi
|
||||
.spyOn(toolImages, "sanitizeImageBlocks")
|
||||
.mockResolvedValueOnce({ images: [loadedImage], dropped: 0 });
|
||||
|
||||
const result = await loadPromptRefImages({
|
||||
prompt: "Compare /tmp/a.png with /tmp/a.png and /tmp/b.png",
|
||||
workspaceDir: "/workspace",
|
||||
});
|
||||
|
||||
expect(loadImageFromRefSpy).toHaveBeenCalledTimes(2);
|
||||
expect(
|
||||
loadImageFromRefSpy.mock.calls.map(
|
||||
(call) => (call[0] as { resolved?: string } | undefined)?.resolved,
|
||||
),
|
||||
).toEqual(["/tmp/a.png", "/tmp/b.png"]);
|
||||
expect(sanitizeImageBlocksSpy).toHaveBeenCalledWith([loadedImage], "prompt:images", {
|
||||
maxBytes: MAX_IMAGE_BYTES,
|
||||
});
|
||||
expect(result).toEqual([loadedImage]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("buildCliArgs", () => {
|
||||
it("keeps passing model overrides on resumed CLI sessions", () => {
|
||||
expect(
|
||||
buildCliArgs({
|
||||
backend: {
|
||||
command: "codex",
|
||||
modelArg: "--model",
|
||||
},
|
||||
baseArgs: ["exec", "resume", "thread-123"],
|
||||
modelId: "gpt-5.4",
|
||||
useResume: true,
|
||||
}),
|
||||
).toEqual(["exec", "resume", "thread-123", "--model", "gpt-5.4"]);
|
||||
});
|
||||
|
||||
it("strips the internal cache boundary from CLI system prompt args", () => {
|
||||
expect(
|
||||
buildCliArgs({
|
||||
backend: {
|
||||
command: "claude",
|
||||
systemPromptArg: "--append-system-prompt",
|
||||
},
|
||||
baseArgs: ["-p"],
|
||||
modelId: "claude-sonnet-4-6",
|
||||
systemPrompt: `Stable prefix${SYSTEM_PROMPT_CACHE_BOUNDARY}Dynamic suffix`,
|
||||
useResume: false,
|
||||
}),
|
||||
).toEqual(["-p", "--append-system-prompt", "Stable prefix\nDynamic suffix"]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("buildSystemPrompt", () => {
|
||||
it("keeps prompts unchanged across CLI backends", () => {
|
||||
const prompt = buildSystemPrompt({
|
||||
workspaceDir: "/tmp/openclaw",
|
||||
modelDisplay: "gpt-5.4",
|
||||
tools: [],
|
||||
backendId: "codex-cli",
|
||||
});
|
||||
|
||||
expect(prompt).toContain("You are a personal assistant running inside OpenClaw.");
|
||||
expect(prompt).toContain("## OpenClaw CLI Quick Reference");
|
||||
expect(prompt).toContain("OpenClaw docs:");
|
||||
});
|
||||
});
|
||||
|
||||
describe("writeCliImages", () => {
|
||||
it("uses stable hashed file paths so repeated image hydration reuses the same path", async () => {
|
||||
const image: ImageContent = {
|
||||
type: "image",
|
||||
data: "c29tZS1pbWFnZQ==",
|
||||
mimeType: "image/png",
|
||||
};
|
||||
|
||||
const first = await writeCliImages([image]);
|
||||
const second = await writeCliImages([image]);
|
||||
|
||||
try {
|
||||
expect(first.paths).toHaveLength(1);
|
||||
expect(second.paths).toEqual(first.paths);
|
||||
expect(first.paths[0]).toContain(`${resolvePreferredOpenClawTmpDir()}/openclaw-cli-images/`);
|
||||
expect(first.paths[0]).toMatch(/\.png$/);
|
||||
await expect(fs.readFile(first.paths[0])).resolves.toEqual(Buffer.from(image.data, "base64"));
|
||||
} finally {
|
||||
await fs.rm(first.paths[0], { force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("uses the shared media extension map for image formats beyond the tiny builtin list", async () => {
|
||||
const image: ImageContent = {
|
||||
type: "image",
|
||||
data: "aGVpYy1pbWFnZQ==",
|
||||
mimeType: "image/heic",
|
||||
};
|
||||
|
||||
const written = await writeCliImages([image]);
|
||||
|
||||
try {
|
||||
expect(written.paths[0]).toMatch(/\.heic$/);
|
||||
} finally {
|
||||
await fs.rm(written.paths[0], { force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
describe("resolveCliRunQueueKey", () => {
|
||||
it("keeps serialized runs on the provider lane", () => {
|
||||
expect(
|
||||
resolveCliRunQueueKey({
|
||||
backendId: "codex-cli",
|
||||
serialize: true,
|
||||
runId: "run-1",
|
||||
workspaceDir: "/tmp/project-a",
|
||||
cliSessionId: "thread-123",
|
||||
}),
|
||||
).toBe("codex-cli");
|
||||
});
|
||||
|
||||
it("disables serialization when serialize=false", () => {
|
||||
expect(
|
||||
resolveCliRunQueueKey({
|
||||
backendId: "codex-cli",
|
||||
serialize: false,
|
||||
runId: "run-2",
|
||||
workspaceDir: "/tmp/project-a",
|
||||
}),
|
||||
).toBe("codex-cli:run-2");
|
||||
});
|
||||
});
|
||||
|
|
@ -1,177 +0,0 @@
|
|||
import { describe, expect, it } from "vitest";
|
||||
import {
|
||||
createManagedRun,
|
||||
enqueueSystemEventMock,
|
||||
requestHeartbeatNowMock,
|
||||
setupCliRunnerTestModule,
|
||||
supervisorSpawnMock,
|
||||
} from "./cli-runner.test-support.js";
|
||||
import { resolveCliNoOutputTimeoutMs } from "./cli-runner/helpers.js";
|
||||
|
||||
describe("runCliAgent reliability", () => {
|
||||
it("fails with timeout when no-output watchdog trips", async () => {
|
||||
const runCliAgent = await setupCliRunnerTestModule();
|
||||
supervisorSpawnMock.mockResolvedValueOnce(
|
||||
createManagedRun({
|
||||
reason: "no-output-timeout",
|
||||
exitCode: null,
|
||||
exitSignal: "SIGKILL",
|
||||
durationMs: 200,
|
||||
stdout: "",
|
||||
stderr: "",
|
||||
timedOut: true,
|
||||
noOutputTimedOut: true,
|
||||
}),
|
||||
);
|
||||
|
||||
await expect(
|
||||
runCliAgent({
|
||||
sessionId: "s1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
workspaceDir: "/tmp",
|
||||
prompt: "hi",
|
||||
provider: "codex-cli",
|
||||
model: "gpt-5.4",
|
||||
timeoutMs: 1_000,
|
||||
runId: "run-2",
|
||||
cliSessionId: "thread-123",
|
||||
}),
|
||||
).rejects.toThrow("produced no output");
|
||||
});
|
||||
|
||||
it("enqueues a system event and heartbeat wake on no-output watchdog timeout for session runs", async () => {
|
||||
const runCliAgent = await setupCliRunnerTestModule();
|
||||
supervisorSpawnMock.mockResolvedValueOnce(
|
||||
createManagedRun({
|
||||
reason: "no-output-timeout",
|
||||
exitCode: null,
|
||||
exitSignal: "SIGKILL",
|
||||
durationMs: 200,
|
||||
stdout: "",
|
||||
stderr: "",
|
||||
timedOut: true,
|
||||
noOutputTimedOut: true,
|
||||
}),
|
||||
);
|
||||
|
||||
await expect(
|
||||
runCliAgent({
|
||||
sessionId: "s1",
|
||||
sessionKey: "agent:main:main",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
workspaceDir: "/tmp",
|
||||
prompt: "hi",
|
||||
provider: "codex-cli",
|
||||
model: "gpt-5.4",
|
||||
timeoutMs: 1_000,
|
||||
runId: "run-2b",
|
||||
cliSessionId: "thread-123",
|
||||
}),
|
||||
).rejects.toThrow("produced no output");
|
||||
|
||||
expect(enqueueSystemEventMock).toHaveBeenCalledTimes(1);
|
||||
const [notice, opts] = enqueueSystemEventMock.mock.calls[0] ?? [];
|
||||
expect(String(notice)).toContain("produced no output");
|
||||
expect(String(notice)).toContain("interactive input or an approval prompt");
|
||||
expect(opts).toMatchObject({ sessionKey: "agent:main:main" });
|
||||
expect(requestHeartbeatNowMock).toHaveBeenCalledWith({
|
||||
reason: "cli:watchdog:stall",
|
||||
sessionKey: "agent:main:main",
|
||||
});
|
||||
});
|
||||
|
||||
it("fails with timeout when overall timeout trips", async () => {
|
||||
const runCliAgent = await setupCliRunnerTestModule();
|
||||
supervisorSpawnMock.mockResolvedValueOnce(
|
||||
createManagedRun({
|
||||
reason: "overall-timeout",
|
||||
exitCode: null,
|
||||
exitSignal: "SIGKILL",
|
||||
durationMs: 200,
|
||||
stdout: "",
|
||||
stderr: "",
|
||||
timedOut: true,
|
||||
noOutputTimedOut: false,
|
||||
}),
|
||||
);
|
||||
|
||||
await expect(
|
||||
runCliAgent({
|
||||
sessionId: "s1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
workspaceDir: "/tmp",
|
||||
prompt: "hi",
|
||||
provider: "codex-cli",
|
||||
model: "gpt-5.4",
|
||||
timeoutMs: 1_000,
|
||||
runId: "run-3",
|
||||
cliSessionId: "thread-123",
|
||||
}),
|
||||
).rejects.toThrow("exceeded timeout");
|
||||
});
|
||||
|
||||
it("rethrows the retry failure when session-expired recovery retry also fails", async () => {
|
||||
const runCliAgent = await setupCliRunnerTestModule();
|
||||
supervisorSpawnMock.mockResolvedValueOnce(
|
||||
createManagedRun({
|
||||
reason: "exit",
|
||||
exitCode: 1,
|
||||
exitSignal: null,
|
||||
durationMs: 150,
|
||||
stdout: "",
|
||||
stderr: "session expired",
|
||||
timedOut: false,
|
||||
noOutputTimedOut: false,
|
||||
}),
|
||||
);
|
||||
supervisorSpawnMock.mockResolvedValueOnce(
|
||||
createManagedRun({
|
||||
reason: "exit",
|
||||
exitCode: 1,
|
||||
exitSignal: null,
|
||||
durationMs: 150,
|
||||
stdout: "",
|
||||
stderr: "rate limit exceeded",
|
||||
timedOut: false,
|
||||
noOutputTimedOut: false,
|
||||
}),
|
||||
);
|
||||
|
||||
await expect(
|
||||
runCliAgent({
|
||||
sessionId: "s1",
|
||||
sessionKey: "agent:main:subagent:retry",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
workspaceDir: "/tmp",
|
||||
prompt: "hi",
|
||||
provider: "codex-cli",
|
||||
model: "gpt-5.4",
|
||||
timeoutMs: 1_000,
|
||||
runId: "run-retry-failure",
|
||||
cliSessionId: "thread-123",
|
||||
}),
|
||||
).rejects.toThrow("rate limit exceeded");
|
||||
|
||||
expect(supervisorSpawnMock).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
});
|
||||
|
||||
describe("resolveCliNoOutputTimeoutMs", () => {
|
||||
it("uses backend-configured resume watchdog override", () => {
|
||||
const timeoutMs = resolveCliNoOutputTimeoutMs({
|
||||
backend: {
|
||||
command: "codex",
|
||||
reliability: {
|
||||
watchdog: {
|
||||
resume: {
|
||||
noOutputTimeoutMs: 42_000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
timeoutMs: 120_000,
|
||||
useResume: true,
|
||||
});
|
||||
expect(timeoutMs).toBe(42_000);
|
||||
});
|
||||
});
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
export { runCliAgent } from "./cli-runner.js";
|
||||
export { getCliSessionId, setCliSessionId } from "./cli-session.js";
|
||||
|
|
@ -1,49 +0,0 @@
|
|||
import { describe, expect, it } from "vitest";
|
||||
import {
|
||||
mockSuccessfulCliRun,
|
||||
runExistingCodexCliAgent,
|
||||
setupCliRunnerTestModule,
|
||||
supervisorSpawnMock,
|
||||
} from "./cli-runner.test-support.js";
|
||||
|
||||
describe("runCliAgent session behavior", () => {
|
||||
it("keeps resuming the CLI across model changes and passes the new model flag", async () => {
|
||||
const runCliAgent = await setupCliRunnerTestModule();
|
||||
mockSuccessfulCliRun();
|
||||
|
||||
await runExistingCodexCliAgent({
|
||||
runCliAgent,
|
||||
runId: "run-model-switch",
|
||||
cliSessionBindingAuthProfileId: "openai:default",
|
||||
authProfileId: "openai:default",
|
||||
});
|
||||
|
||||
const input = supervisorSpawnMock.mock.calls[0]?.[0] as { argv?: string[] };
|
||||
expect(input.argv).toEqual([
|
||||
"codex",
|
||||
"exec",
|
||||
"resume",
|
||||
"thread-123",
|
||||
"--json",
|
||||
"--model",
|
||||
"gpt-5.4",
|
||||
"hi",
|
||||
]);
|
||||
});
|
||||
|
||||
it("starts a fresh CLI session when the auth profile changes", async () => {
|
||||
const runCliAgent = await setupCliRunnerTestModule();
|
||||
mockSuccessfulCliRun();
|
||||
|
||||
await runExistingCodexCliAgent({
|
||||
runCliAgent,
|
||||
runId: "run-auth-change",
|
||||
cliSessionBindingAuthProfileId: "openai:work",
|
||||
authProfileId: "openai:personal",
|
||||
});
|
||||
|
||||
const input = supervisorSpawnMock.mock.calls[0]?.[0] as { argv?: string[]; scopeKey?: string };
|
||||
expect(input.argv).toEqual(["codex", "exec", "--json", "--model", "gpt-5.4", "hi"]);
|
||||
expect(input.scopeKey).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
|
@ -1,708 +0,0 @@
|
|||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import { beforeEach, describe, expect, it } from "vitest";
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import { onAgentEvent, resetAgentEventsForTest } from "../infra/agent-events.js";
|
||||
import { resolvePreferredOpenClawTmpDir } from "../infra/tmp-openclaw-dir.js";
|
||||
import {
|
||||
makeBootstrapWarn as realMakeBootstrapWarn,
|
||||
resolveBootstrapContextForRun as realResolveBootstrapContextForRun,
|
||||
} from "./bootstrap-files.js";
|
||||
import {
|
||||
createManagedRun,
|
||||
mockSuccessfulCliRun,
|
||||
restoreCliRunnerPrepareTestDeps,
|
||||
runCliAgentWithBackendConfig,
|
||||
setupCliRunnerTestModule,
|
||||
SMALL_PNG_BASE64,
|
||||
stubBootstrapContext,
|
||||
supervisorSpawnMock,
|
||||
} from "./cli-runner.test-support.js";
|
||||
import { setCliRunnerPrepareTestDeps } from "./cli-runner/prepare.js";
|
||||
|
||||
beforeEach(() => {
|
||||
resetAgentEventsForTest();
|
||||
restoreCliRunnerPrepareTestDeps();
|
||||
});
|
||||
|
||||
describe("runCliAgent spawn path", () => {
|
||||
it("does not inject hardcoded 'Tools are disabled' text into CLI arguments", async () => {
|
||||
const runCliAgent = await setupCliRunnerTestModule();
|
||||
supervisorSpawnMock.mockResolvedValueOnce(
|
||||
createManagedRun({
|
||||
reason: "exit",
|
||||
exitCode: 0,
|
||||
exitSignal: null,
|
||||
durationMs: 50,
|
||||
stdout: "ok",
|
||||
stderr: "",
|
||||
timedOut: false,
|
||||
noOutputTimedOut: false,
|
||||
}),
|
||||
);
|
||||
|
||||
await runCliAgent({
|
||||
sessionId: "s1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
workspaceDir: "/tmp",
|
||||
prompt: "Run: node script.mjs",
|
||||
provider: "codex-cli",
|
||||
model: "gpt-5.4",
|
||||
timeoutMs: 1_000,
|
||||
runId: "run-no-tools-disabled",
|
||||
extraSystemPrompt: "You are a helpful assistant.",
|
||||
});
|
||||
|
||||
const input = supervisorSpawnMock.mock.calls[0]?.[0] as { argv?: string[] };
|
||||
const allArgs = (input.argv ?? []).join("\n");
|
||||
expect(allArgs).not.toContain("Tools are disabled in this session");
|
||||
expect(allArgs).toContain("You are a helpful assistant.");
|
||||
});
|
||||
|
||||
it("pipes prompts over stdin when the backend requests stdin mode", async () => {
|
||||
const runCliAgent = await setupCliRunnerTestModule();
|
||||
supervisorSpawnMock.mockResolvedValueOnce(
|
||||
createManagedRun({
|
||||
reason: "exit",
|
||||
exitCode: 0,
|
||||
exitSignal: null,
|
||||
durationMs: 50,
|
||||
stdout: "ok",
|
||||
stderr: "",
|
||||
timedOut: false,
|
||||
noOutputTimedOut: false,
|
||||
}),
|
||||
);
|
||||
|
||||
await runCliAgent({
|
||||
sessionId: "s1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
workspaceDir: "/tmp",
|
||||
config: {
|
||||
agents: {
|
||||
defaults: {
|
||||
cliBackends: {
|
||||
"custom-cli": {
|
||||
command: "custom-cli",
|
||||
input: "stdin",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} satisfies OpenClawConfig,
|
||||
prompt: "Explain this diff",
|
||||
provider: "custom-cli",
|
||||
model: "default",
|
||||
timeoutMs: 1_000,
|
||||
runId: "run-stdin-custom",
|
||||
});
|
||||
|
||||
const input = supervisorSpawnMock.mock.calls[0]?.[0] as {
|
||||
argv?: string[];
|
||||
input?: string;
|
||||
};
|
||||
expect(input.input).toContain("Explain this diff");
|
||||
expect(input.argv).not.toContain("Explain this diff");
|
||||
});
|
||||
|
||||
it("runs CLI through supervisor and returns payload", async () => {
|
||||
const runCliAgent = await setupCliRunnerTestModule();
|
||||
supervisorSpawnMock.mockResolvedValueOnce(
|
||||
createManagedRun({
|
||||
reason: "exit",
|
||||
exitCode: 0,
|
||||
exitSignal: null,
|
||||
durationMs: 50,
|
||||
stdout: "ok",
|
||||
stderr: "",
|
||||
timedOut: false,
|
||||
noOutputTimedOut: false,
|
||||
}),
|
||||
);
|
||||
|
||||
const result = await runCliAgent({
|
||||
sessionId: "s1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
workspaceDir: "/tmp",
|
||||
prompt: "hi",
|
||||
provider: "codex-cli",
|
||||
model: "gpt-5.4",
|
||||
timeoutMs: 1_000,
|
||||
runId: "run-1",
|
||||
cliSessionId: "thread-123",
|
||||
});
|
||||
|
||||
expect(result.payloads?.[0]?.text).toBe("ok");
|
||||
const input = supervisorSpawnMock.mock.calls[0]?.[0] as {
|
||||
argv?: string[];
|
||||
mode?: string;
|
||||
timeoutMs?: number;
|
||||
noOutputTimeoutMs?: number;
|
||||
replaceExistingScope?: boolean;
|
||||
scopeKey?: string;
|
||||
};
|
||||
expect(input.mode).toBe("child");
|
||||
expect(input.argv?.[0]).toBe("codex");
|
||||
expect(input.timeoutMs).toBe(1_000);
|
||||
expect(input.noOutputTimeoutMs).toBeGreaterThanOrEqual(1_000);
|
||||
expect(input.replaceExistingScope).toBe(true);
|
||||
expect(input.scopeKey).toContain("thread-123");
|
||||
});
|
||||
|
||||
it("cancels the managed CLI run when the abort signal fires", async () => {
|
||||
const runCliAgent = await setupCliRunnerTestModule();
|
||||
const abortController = new AbortController();
|
||||
let resolveWait!: (value: {
|
||||
reason:
|
||||
| "manual-cancel"
|
||||
| "overall-timeout"
|
||||
| "no-output-timeout"
|
||||
| "spawn-error"
|
||||
| "signal"
|
||||
| "exit";
|
||||
exitCode: number | null;
|
||||
exitSignal: NodeJS.Signals | number | null;
|
||||
durationMs: number;
|
||||
stdout: string;
|
||||
stderr: string;
|
||||
timedOut: boolean;
|
||||
noOutputTimedOut: boolean;
|
||||
}) => void;
|
||||
const cancel = vi.fn((reason?: string) => {
|
||||
resolveWait({
|
||||
reason: reason === "manual-cancel" ? "manual-cancel" : "signal",
|
||||
exitCode: null,
|
||||
exitSignal: null,
|
||||
durationMs: 50,
|
||||
stdout: "",
|
||||
stderr: "",
|
||||
timedOut: false,
|
||||
noOutputTimedOut: false,
|
||||
});
|
||||
});
|
||||
supervisorSpawnMock.mockResolvedValueOnce({
|
||||
runId: "run-supervisor",
|
||||
pid: 1234,
|
||||
startedAtMs: Date.now(),
|
||||
stdin: undefined,
|
||||
wait: vi.fn(
|
||||
async () =>
|
||||
await new Promise((resolve) => {
|
||||
resolveWait = resolve;
|
||||
}),
|
||||
),
|
||||
cancel,
|
||||
});
|
||||
|
||||
const runPromise = runCliAgent({
|
||||
sessionId: "s1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
workspaceDir: "/tmp",
|
||||
prompt: "hi",
|
||||
provider: "codex-cli",
|
||||
model: "gpt-5.4",
|
||||
timeoutMs: 1_000,
|
||||
runId: "run-abort",
|
||||
abortSignal: abortController.signal,
|
||||
});
|
||||
|
||||
await Promise.resolve();
|
||||
abortController.abort();
|
||||
|
||||
await expect(runPromise).rejects.toMatchObject({ name: "AbortError" });
|
||||
expect(cancel).toHaveBeenCalledWith("manual-cancel");
|
||||
});
|
||||
|
||||
it("streams CLI text deltas from JSONL stdout", async () => {
|
||||
const runCliAgent = await setupCliRunnerTestModule();
|
||||
const agentEvents: Array<{ stream: string; text?: string; delta?: string }> = [];
|
||||
const stop = onAgentEvent((evt) => {
|
||||
agentEvents.push({
|
||||
stream: evt.stream,
|
||||
text: typeof evt.data.text === "string" ? evt.data.text : undefined,
|
||||
delta: typeof evt.data.delta === "string" ? evt.data.delta : undefined,
|
||||
});
|
||||
});
|
||||
supervisorSpawnMock.mockImplementationOnce(async (...args: unknown[]) => {
|
||||
const input = (args[0] ?? {}) as { onStdout?: (chunk: string) => void };
|
||||
input.onStdout?.(
|
||||
[
|
||||
JSON.stringify({ type: "init", session_id: "session-123" }),
|
||||
JSON.stringify({
|
||||
type: "stream_event",
|
||||
event: { type: "content_block_delta", delta: { type: "text_delta", text: "Hello" } },
|
||||
}),
|
||||
].join("\n") + "\n",
|
||||
);
|
||||
input.onStdout?.(
|
||||
JSON.stringify({
|
||||
type: "stream_event",
|
||||
event: { type: "content_block_delta", delta: { type: "text_delta", text: " world" } },
|
||||
}) + "\n",
|
||||
);
|
||||
return createManagedRun({
|
||||
reason: "exit",
|
||||
exitCode: 0,
|
||||
exitSignal: null,
|
||||
durationMs: 50,
|
||||
stdout: [
|
||||
JSON.stringify({ type: "init", session_id: "session-123" }),
|
||||
JSON.stringify({
|
||||
type: "stream_event",
|
||||
event: { type: "content_block_delta", delta: { type: "text_delta", text: "Hello" } },
|
||||
}),
|
||||
JSON.stringify({
|
||||
type: "stream_event",
|
||||
event: { type: "content_block_delta", delta: { type: "text_delta", text: " world" } },
|
||||
}),
|
||||
JSON.stringify({
|
||||
type: "result",
|
||||
session_id: "session-123",
|
||||
result: "Hello world",
|
||||
}),
|
||||
].join("\n"),
|
||||
stderr: "",
|
||||
timedOut: false,
|
||||
noOutputTimedOut: false,
|
||||
});
|
||||
});
|
||||
|
||||
try {
|
||||
const result = await runCliAgent({
|
||||
sessionId: "s1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
workspaceDir: "/tmp",
|
||||
prompt: "hi",
|
||||
provider: "codex-cli",
|
||||
model: "gpt-5.4",
|
||||
timeoutMs: 1_000,
|
||||
runId: "run-cli-stream-json",
|
||||
});
|
||||
|
||||
expect(result.payloads?.[0]?.text).toBe("Hello world");
|
||||
expect(agentEvents).toEqual([
|
||||
{ stream: "assistant", text: "Hello", delta: "Hello" },
|
||||
{ stream: "assistant", text: "Hello world", delta: " world" },
|
||||
]);
|
||||
} finally {
|
||||
stop();
|
||||
}
|
||||
});
|
||||
|
||||
it("sanitizes dangerous backend env overrides before spawn", async () => {
|
||||
const runCliAgent = await setupCliRunnerTestModule();
|
||||
mockSuccessfulCliRun();
|
||||
await runCliAgentWithBackendConfig({
|
||||
runCliAgent,
|
||||
backend: {
|
||||
command: "codex",
|
||||
env: {
|
||||
NODE_OPTIONS: "--require ./malicious.js",
|
||||
LD_PRELOAD: "/tmp/pwn.so",
|
||||
PATH: "/tmp/evil",
|
||||
HOME: "/tmp/evil-home",
|
||||
SAFE_KEY: "ok",
|
||||
},
|
||||
},
|
||||
runId: "run-env-sanitized",
|
||||
});
|
||||
|
||||
const input = supervisorSpawnMock.mock.calls[0]?.[0] as {
|
||||
env?: Record<string, string | undefined>;
|
||||
};
|
||||
expect(input.env?.SAFE_KEY).toBe("ok");
|
||||
expect(input.env?.PATH).toBe(process.env.PATH);
|
||||
expect(input.env?.HOME).toBe(process.env.HOME);
|
||||
expect(input.env?.NODE_OPTIONS).toBeUndefined();
|
||||
expect(input.env?.LD_PRELOAD).toBeUndefined();
|
||||
});
|
||||
|
||||
it("applies clearEnv after sanitizing backend env overrides", async () => {
|
||||
const runCliAgent = await setupCliRunnerTestModule();
|
||||
process.env.SAFE_CLEAR = "from-base";
|
||||
mockSuccessfulCliRun();
|
||||
await runCliAgentWithBackendConfig({
|
||||
runCliAgent,
|
||||
backend: {
|
||||
command: "codex",
|
||||
env: {
|
||||
SAFE_KEEP: "keep-me",
|
||||
},
|
||||
clearEnv: ["SAFE_CLEAR"],
|
||||
},
|
||||
runId: "run-clear-env",
|
||||
});
|
||||
|
||||
const input = supervisorSpawnMock.mock.calls[0]?.[0] as {
|
||||
env?: Record<string, string | undefined>;
|
||||
};
|
||||
expect(input.env?.SAFE_KEEP).toBe("keep-me");
|
||||
expect(input.env?.SAFE_CLEAR).toBeUndefined();
|
||||
});
|
||||
|
||||
it("keeps explicit backend env overrides even when clearEnv drops inherited values", async () => {
|
||||
const runCliAgent = await setupCliRunnerTestModule();
|
||||
process.env.SAFE_OVERRIDE = "from-base";
|
||||
mockSuccessfulCliRun();
|
||||
await runCliAgentWithBackendConfig({
|
||||
runCliAgent,
|
||||
backend: {
|
||||
command: "codex",
|
||||
env: {
|
||||
SAFE_OVERRIDE: "from-override",
|
||||
},
|
||||
clearEnv: ["SAFE_OVERRIDE"],
|
||||
},
|
||||
runId: "run-clear-env-override",
|
||||
});
|
||||
|
||||
const input = supervisorSpawnMock.mock.calls[0]?.[0] as {
|
||||
env?: Record<string, string | undefined>;
|
||||
};
|
||||
expect(input.env?.SAFE_OVERRIDE).toBe("from-override");
|
||||
});
|
||||
|
||||
it("prepends bootstrap warnings to the CLI prompt body", async () => {
|
||||
const runCliAgent = await setupCliRunnerTestModule();
|
||||
supervisorSpawnMock.mockResolvedValueOnce(
|
||||
createManagedRun({
|
||||
reason: "exit",
|
||||
exitCode: 0,
|
||||
exitSignal: null,
|
||||
durationMs: 50,
|
||||
stdout: "ok",
|
||||
stderr: "",
|
||||
timedOut: false,
|
||||
noOutputTimedOut: false,
|
||||
}),
|
||||
);
|
||||
stubBootstrapContext({
|
||||
bootstrapFiles: [
|
||||
{
|
||||
name: "AGENTS.md",
|
||||
path: "/tmp/AGENTS.md",
|
||||
content: "A".repeat(200),
|
||||
missing: false,
|
||||
},
|
||||
],
|
||||
contextFiles: [{ path: "AGENTS.md", content: "A".repeat(20) }],
|
||||
});
|
||||
|
||||
await runCliAgent({
|
||||
sessionId: "s1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
workspaceDir: "/tmp",
|
||||
config: {
|
||||
agents: {
|
||||
defaults: {
|
||||
bootstrapMaxChars: 50,
|
||||
bootstrapTotalMaxChars: 50,
|
||||
},
|
||||
},
|
||||
} satisfies OpenClawConfig,
|
||||
prompt: "hi",
|
||||
provider: "codex-cli",
|
||||
model: "gpt-5.4",
|
||||
timeoutMs: 1_000,
|
||||
runId: "run-warning",
|
||||
cliSessionId: "thread-123",
|
||||
});
|
||||
|
||||
const input = supervisorSpawnMock.mock.calls[0]?.[0] as {
|
||||
argv?: string[];
|
||||
input?: string;
|
||||
};
|
||||
const promptCarrier = [input.input ?? "", ...(input.argv ?? [])].join("\n");
|
||||
|
||||
expect(promptCarrier).toContain("[Bootstrap truncation warning]");
|
||||
expect(promptCarrier).toContain("- AGENTS.md: 200 raw -> 20 injected");
|
||||
expect(promptCarrier).toContain("hi");
|
||||
});
|
||||
|
||||
it("loads workspace bootstrap files into the configured CLI system prompt", async () => {
|
||||
const runCliAgent = await setupCliRunnerTestModule();
|
||||
const workspaceDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), "openclaw-cli-bootstrap-context-"),
|
||||
);
|
||||
supervisorSpawnMock.mockResolvedValueOnce(
|
||||
createManagedRun({
|
||||
reason: "exit",
|
||||
exitCode: 0,
|
||||
exitSignal: null,
|
||||
durationMs: 50,
|
||||
stdout: "ok",
|
||||
stderr: "",
|
||||
timedOut: false,
|
||||
noOutputTimedOut: false,
|
||||
}),
|
||||
);
|
||||
|
||||
await fs.writeFile(
|
||||
path.join(workspaceDir, "AGENTS.md"),
|
||||
[
|
||||
"# AGENTS.md",
|
||||
"",
|
||||
"Read SOUL.md and IDENTITY.md before replying.",
|
||||
"Use the injected workspace bootstrap files as standing instructions.",
|
||||
].join("\n"),
|
||||
"utf-8",
|
||||
);
|
||||
await fs.writeFile(path.join(workspaceDir, "SOUL.md"), "SOUL-SECRET\n", "utf-8");
|
||||
await fs.writeFile(path.join(workspaceDir, "IDENTITY.md"), "IDENTITY-SECRET\n", "utf-8");
|
||||
await fs.writeFile(path.join(workspaceDir, "USER.md"), "USER-SECRET\n", "utf-8");
|
||||
|
||||
setCliRunnerPrepareTestDeps({
|
||||
makeBootstrapWarn: realMakeBootstrapWarn,
|
||||
resolveBootstrapContextForRun: realResolveBootstrapContextForRun,
|
||||
});
|
||||
|
||||
try {
|
||||
await runCliAgent({
|
||||
sessionId: "s1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
workspaceDir,
|
||||
config: {
|
||||
agents: {
|
||||
defaults: {
|
||||
cliBackends: {
|
||||
"custom-cli": {
|
||||
command: "custom-cli",
|
||||
input: "stdin",
|
||||
systemPromptArg: "--append-system-prompt",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} satisfies OpenClawConfig,
|
||||
prompt: "BOOTSTRAP_CAPTURE_CHECK",
|
||||
provider: "custom-cli",
|
||||
model: "default",
|
||||
timeoutMs: 1_000,
|
||||
runId: "run-bootstrap-context",
|
||||
});
|
||||
|
||||
const input = supervisorSpawnMock.mock.calls[0]?.[0] as {
|
||||
argv?: string[];
|
||||
input?: string;
|
||||
};
|
||||
const allArgs = (input.argv ?? []).join("\n");
|
||||
const agentsPath = path.join(workspaceDir, "AGENTS.md");
|
||||
const soulPath = path.join(workspaceDir, "SOUL.md");
|
||||
const identityPath = path.join(workspaceDir, "IDENTITY.md");
|
||||
const userPath = path.join(workspaceDir, "USER.md");
|
||||
expect(input.input).toContain("BOOTSTRAP_CAPTURE_CHECK");
|
||||
expect(allArgs).toContain("--append-system-prompt");
|
||||
expect(allArgs).toContain("# Project Context");
|
||||
expect(allArgs).toContain(`## ${agentsPath}`);
|
||||
expect(allArgs).toContain("Read SOUL.md and IDENTITY.md before replying.");
|
||||
expect(allArgs).toContain(`## ${soulPath}`);
|
||||
expect(allArgs).toContain("SOUL-SECRET");
|
||||
expect(allArgs).toContain(
|
||||
"If SOUL.md is present, embody its persona and tone. Avoid stiff, generic replies; follow its guidance unless higher-priority instructions override it.",
|
||||
);
|
||||
expect(allArgs).toContain(`## ${identityPath}`);
|
||||
expect(allArgs).toContain("IDENTITY-SECRET");
|
||||
expect(allArgs).toContain(`## ${userPath}`);
|
||||
expect(allArgs).toContain("USER-SECRET");
|
||||
} finally {
|
||||
await fs.rm(workspaceDir, { recursive: true, force: true });
|
||||
restoreCliRunnerPrepareTestDeps();
|
||||
}
|
||||
});
|
||||
|
||||
it("hydrates prompt media refs into CLI image args", async () => {
|
||||
const runCliAgent = await setupCliRunnerTestModule();
|
||||
supervisorSpawnMock.mockResolvedValueOnce(
|
||||
createManagedRun({
|
||||
reason: "exit",
|
||||
exitCode: 0,
|
||||
exitSignal: null,
|
||||
durationMs: 50,
|
||||
stdout: "ok",
|
||||
stderr: "",
|
||||
timedOut: false,
|
||||
noOutputTimedOut: false,
|
||||
}),
|
||||
);
|
||||
|
||||
const tempDir = await fs.mkdtemp(
|
||||
path.join(resolvePreferredOpenClawTmpDir(), "openclaw-cli-prompt-image-"),
|
||||
);
|
||||
const sourceImage = path.join(tempDir, "bb-image.png");
|
||||
await fs.writeFile(sourceImage, Buffer.from(SMALL_PNG_BASE64, "base64"));
|
||||
|
||||
try {
|
||||
await runCliAgent({
|
||||
sessionId: "s1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
workspaceDir: tempDir,
|
||||
prompt: `[media attached: ${sourceImage} (image/png)]\n\n<media:image>`,
|
||||
provider: "codex-cli",
|
||||
model: "gpt-5.4",
|
||||
timeoutMs: 1_000,
|
||||
runId: "run-prompt-image",
|
||||
});
|
||||
} finally {
|
||||
await fs.rm(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
const input = supervisorSpawnMock.mock.calls[0]?.[0] as { argv?: string[] };
|
||||
const argv = input.argv ?? [];
|
||||
const imageArgIndex = argv.indexOf("--image");
|
||||
expect(imageArgIndex).toBeGreaterThanOrEqual(0);
|
||||
expect(argv[imageArgIndex + 1]).toContain("openclaw-cli-images");
|
||||
expect(argv[imageArgIndex + 1]).not.toBe(sourceImage);
|
||||
});
|
||||
|
||||
it("appends hydrated prompt media refs to generic backend prompts", async () => {
|
||||
const runCliAgent = await setupCliRunnerTestModule();
|
||||
supervisorSpawnMock.mockResolvedValueOnce(
|
||||
createManagedRun({
|
||||
reason: "exit",
|
||||
exitCode: 0,
|
||||
exitSignal: null,
|
||||
durationMs: 50,
|
||||
stdout: "ok",
|
||||
stderr: "",
|
||||
timedOut: false,
|
||||
noOutputTimedOut: false,
|
||||
}),
|
||||
);
|
||||
|
||||
const tempDir = await fs.mkdtemp(
|
||||
path.join(resolvePreferredOpenClawTmpDir(), "openclaw-cli-prompt-image-generic-"),
|
||||
);
|
||||
const sourceImage = path.join(tempDir, "claude-image.png");
|
||||
await fs.writeFile(sourceImage, Buffer.from(SMALL_PNG_BASE64, "base64"));
|
||||
|
||||
try {
|
||||
await runCliAgent({
|
||||
sessionId: "s1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
workspaceDir: tempDir,
|
||||
config: {
|
||||
agents: {
|
||||
defaults: {
|
||||
cliBackends: {
|
||||
"custom-cli": {
|
||||
command: "custom-cli",
|
||||
input: "stdin",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} satisfies OpenClawConfig,
|
||||
prompt: `[media attached: ${sourceImage} (image/png)]\n\n<media:image>`,
|
||||
provider: "custom-cli",
|
||||
model: "default",
|
||||
timeoutMs: 1_000,
|
||||
runId: "run-prompt-image-generic",
|
||||
});
|
||||
} finally {
|
||||
await fs.rm(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
const input = supervisorSpawnMock.mock.calls[0]?.[0] as { argv?: string[]; input?: string };
|
||||
const argv = input.argv ?? [];
|
||||
expect(argv).not.toContain("--image");
|
||||
const promptCarrier = [input.input ?? "", ...argv].join("\n");
|
||||
const appendedPath = promptCarrier
|
||||
.split("\n")
|
||||
.find((value) => value.includes("openclaw-cli-images"));
|
||||
expect(appendedPath).toBeDefined();
|
||||
expect(appendedPath).not.toBe(sourceImage);
|
||||
expect(promptCarrier).toContain(appendedPath ?? "");
|
||||
});
|
||||
|
||||
it("prefers explicit images over prompt refs", async () => {
|
||||
const runCliAgent = await setupCliRunnerTestModule();
|
||||
supervisorSpawnMock.mockResolvedValueOnce(
|
||||
createManagedRun({
|
||||
reason: "exit",
|
||||
exitCode: 0,
|
||||
exitSignal: null,
|
||||
durationMs: 50,
|
||||
stdout: "ok",
|
||||
stderr: "",
|
||||
timedOut: false,
|
||||
noOutputTimedOut: false,
|
||||
}),
|
||||
);
|
||||
|
||||
const tempDir = await fs.mkdtemp(
|
||||
path.join(resolvePreferredOpenClawTmpDir(), "openclaw-cli-explicit-images-"),
|
||||
);
|
||||
const sourceImage = path.join(tempDir, "ignored-prompt-image.png");
|
||||
await fs.writeFile(sourceImage, Buffer.from(SMALL_PNG_BASE64, "base64"));
|
||||
|
||||
try {
|
||||
await runCliAgent({
|
||||
sessionId: "s1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
workspaceDir: tempDir,
|
||||
prompt: `[media attached: ${sourceImage} (image/png)]\n\n<media:image>`,
|
||||
images: [{ type: "image", data: SMALL_PNG_BASE64, mimeType: "image/png" }],
|
||||
provider: "codex-cli",
|
||||
model: "gpt-5.4",
|
||||
timeoutMs: 1_000,
|
||||
runId: "run-explicit-image-precedence",
|
||||
});
|
||||
} finally {
|
||||
await fs.rm(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
const input = supervisorSpawnMock.mock.calls[0]?.[0] as { argv?: string[] };
|
||||
const argv = input.argv ?? [];
|
||||
expect(argv.filter((arg) => arg === "--image")).toHaveLength(1);
|
||||
});
|
||||
|
||||
it("falls back to per-agent workspace when workspaceDir is missing", async () => {
|
||||
const runCliAgent = await setupCliRunnerTestModule();
|
||||
const tempDir = await fs.mkdtemp(
|
||||
path.join(process.env.TMPDIR ?? "/tmp", "openclaw-cli-runner-"),
|
||||
);
|
||||
const fallbackWorkspace = path.join(tempDir, "workspace-main");
|
||||
await fs.mkdir(fallbackWorkspace, { recursive: true });
|
||||
const cfg = {
|
||||
agents: {
|
||||
defaults: {
|
||||
workspace: fallbackWorkspace,
|
||||
},
|
||||
},
|
||||
} satisfies OpenClawConfig;
|
||||
|
||||
supervisorSpawnMock.mockResolvedValueOnce(
|
||||
createManagedRun({
|
||||
reason: "exit",
|
||||
exitCode: 0,
|
||||
exitSignal: null,
|
||||
durationMs: 25,
|
||||
stdout: "ok",
|
||||
stderr: "",
|
||||
timedOut: false,
|
||||
noOutputTimedOut: false,
|
||||
}),
|
||||
);
|
||||
|
||||
try {
|
||||
await runCliAgent({
|
||||
sessionId: "s1",
|
||||
sessionKey: "agent:main:subagent:missing-workspace",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
workspaceDir: undefined as unknown as string,
|
||||
config: cfg,
|
||||
prompt: "hi",
|
||||
provider: "codex-cli",
|
||||
model: "gpt-5.4",
|
||||
timeoutMs: 1_000,
|
||||
runId: "run-4",
|
||||
});
|
||||
} finally {
|
||||
await fs.rm(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
const input = supervisorSpawnMock.mock.calls[0]?.[0] as { cwd?: string };
|
||||
expect(input.cwd).toBe(path.resolve(fallbackWorkspace));
|
||||
});
|
||||
});
|
||||
|
|
@ -1,324 +0,0 @@
|
|||
import fs from "node:fs/promises";
|
||||
import type { Mock } from "vitest";
|
||||
import { beforeEach, vi } from "vitest";
|
||||
import type { OpenClawConfig } from "../config/config.js";
|
||||
import type { requestHeartbeatNow } from "../infra/heartbeat-wake.js";
|
||||
import type { enqueueSystemEvent } from "../infra/system-events.js";
|
||||
import type { CliBackendPlugin } from "../plugin-sdk/cli-backend.js";
|
||||
import {
|
||||
CLI_FRESH_WATCHDOG_DEFAULTS,
|
||||
CLI_RESUME_WATCHDOG_DEFAULTS,
|
||||
} from "../plugin-sdk/cli-backend.js";
|
||||
import { createEmptyPluginRegistry } from "../plugins/registry.js";
|
||||
import { setActivePluginRegistry } from "../plugins/runtime.js";
|
||||
import type { getProcessSupervisor } from "../process/supervisor/index.js";
|
||||
import { setCliRunnerExecuteTestDeps } from "./cli-runner/execute.js";
|
||||
import { setCliRunnerPrepareTestDeps } from "./cli-runner/prepare.js";
|
||||
import type { EmbeddedContextFile } from "./pi-embedded-helpers.js";
|
||||
import type { WorkspaceBootstrapFile } from "./workspace.js";
|
||||
|
||||
type ProcessSupervisor = ReturnType<typeof getProcessSupervisor>;
|
||||
type SupervisorSpawnFn = ProcessSupervisor["spawn"];
|
||||
type EnqueueSystemEventFn = typeof enqueueSystemEvent;
|
||||
type RequestHeartbeatNowFn = typeof requestHeartbeatNow;
|
||||
type UnknownMock = Mock<(...args: unknown[]) => unknown>;
|
||||
type BootstrapContext = {
|
||||
bootstrapFiles: WorkspaceBootstrapFile[];
|
||||
contextFiles: EmbeddedContextFile[];
|
||||
};
|
||||
type ResolveBootstrapContextForRunMock = Mock<() => Promise<BootstrapContext>>;
|
||||
|
||||
export const supervisorSpawnMock: UnknownMock = vi.fn();
|
||||
export const enqueueSystemEventMock: UnknownMock = vi.fn();
|
||||
export const requestHeartbeatNowMock: UnknownMock = vi.fn();
|
||||
export const SMALL_PNG_BASE64 =
|
||||
"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/woAAn8B9FD5fHAAAAAASUVORK5CYII=";
|
||||
|
||||
const hoisted = vi.hoisted(
|
||||
(): {
|
||||
resolveBootstrapContextForRunMock: ResolveBootstrapContextForRunMock;
|
||||
} => {
|
||||
return {
|
||||
resolveBootstrapContextForRunMock: vi.fn<() => Promise<BootstrapContext>>(async () => ({
|
||||
bootstrapFiles: [],
|
||||
contextFiles: [],
|
||||
})),
|
||||
};
|
||||
},
|
||||
);
|
||||
|
||||
setCliRunnerExecuteTestDeps({
|
||||
getProcessSupervisor: () => ({
|
||||
spawn: (params: Parameters<SupervisorSpawnFn>[0]) =>
|
||||
supervisorSpawnMock(params) as ReturnType<SupervisorSpawnFn>,
|
||||
cancel: vi.fn(),
|
||||
cancelScope: vi.fn(),
|
||||
reconcileOrphans: vi.fn(),
|
||||
getRecord: vi.fn(),
|
||||
}),
|
||||
enqueueSystemEvent: (
|
||||
text: Parameters<EnqueueSystemEventFn>[0],
|
||||
options: Parameters<EnqueueSystemEventFn>[1],
|
||||
) => enqueueSystemEventMock(text, options) as ReturnType<EnqueueSystemEventFn>,
|
||||
requestHeartbeatNow: (options?: Parameters<RequestHeartbeatNowFn>[0]) =>
|
||||
requestHeartbeatNowMock(options) as ReturnType<RequestHeartbeatNowFn>,
|
||||
});
|
||||
|
||||
setCliRunnerPrepareTestDeps({
|
||||
makeBootstrapWarn: () => () => {},
|
||||
resolveBootstrapContextForRun: hoisted.resolveBootstrapContextForRunMock,
|
||||
});
|
||||
|
||||
type MockRunExit = {
|
||||
reason:
|
||||
| "manual-cancel"
|
||||
| "overall-timeout"
|
||||
| "no-output-timeout"
|
||||
| "spawn-error"
|
||||
| "signal"
|
||||
| "exit";
|
||||
exitCode: number | null;
|
||||
exitSignal: NodeJS.Signals | number | null;
|
||||
durationMs: number;
|
||||
stdout: string;
|
||||
stderr: string;
|
||||
timedOut: boolean;
|
||||
noOutputTimedOut: boolean;
|
||||
};
|
||||
|
||||
type TestCliBackendConfig = {
|
||||
command: string;
|
||||
env?: Record<string, string>;
|
||||
clearEnv?: string[];
|
||||
};
|
||||
|
||||
type ManagedRunMock = {
|
||||
runId: string;
|
||||
pid: number;
|
||||
startedAtMs: number;
|
||||
stdin: undefined;
|
||||
wait: Mock<() => Promise<MockRunExit>>;
|
||||
cancel: Mock<() => void>;
|
||||
};
|
||||
|
||||
function buildOpenAICodexCliBackendFixture(): CliBackendPlugin {
|
||||
return {
|
||||
id: "codex-cli",
|
||||
config: {
|
||||
command: "codex",
|
||||
args: [
|
||||
"exec",
|
||||
"--json",
|
||||
"--color",
|
||||
"never",
|
||||
"--sandbox",
|
||||
"workspace-write",
|
||||
"--skip-git-repo-check",
|
||||
],
|
||||
resumeArgs: [
|
||||
"exec",
|
||||
"resume",
|
||||
"{sessionId}",
|
||||
"--color",
|
||||
"never",
|
||||
"--sandbox",
|
||||
"workspace-write",
|
||||
"--skip-git-repo-check",
|
||||
],
|
||||
output: "jsonl",
|
||||
resumeOutput: "text",
|
||||
input: "arg",
|
||||
modelArg: "--model",
|
||||
sessionIdFields: ["thread_id"],
|
||||
sessionMode: "existing",
|
||||
imageArg: "--image",
|
||||
imageMode: "repeat",
|
||||
reliability: {
|
||||
watchdog: {
|
||||
fresh: { ...CLI_FRESH_WATCHDOG_DEFAULTS },
|
||||
resume: { ...CLI_RESUME_WATCHDOG_DEFAULTS },
|
||||
},
|
||||
},
|
||||
serialize: true,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
function buildGoogleGeminiCliBackendFixture(): CliBackendPlugin {
|
||||
return {
|
||||
id: "google-gemini-cli",
|
||||
config: {
|
||||
command: "gemini",
|
||||
args: ["--prompt", "--output-format", "json"],
|
||||
resumeArgs: ["--resume", "{sessionId}", "--prompt", "--output-format", "json"],
|
||||
output: "json",
|
||||
input: "arg",
|
||||
modelArg: "--model",
|
||||
modelAliases: {
|
||||
pro: "gemini-3.1-pro-preview",
|
||||
flash: "gemini-3.1-flash-preview",
|
||||
"flash-lite": "gemini-3.1-flash-lite-preview",
|
||||
},
|
||||
sessionMode: "existing",
|
||||
sessionIdFields: ["session_id", "sessionId"],
|
||||
reliability: {
|
||||
watchdog: {
|
||||
fresh: { ...CLI_FRESH_WATCHDOG_DEFAULTS },
|
||||
resume: { ...CLI_RESUME_WATCHDOG_DEFAULTS },
|
||||
},
|
||||
},
|
||||
serialize: true,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
export function createManagedRun(
|
||||
exit: MockRunExit,
|
||||
pid = 1234,
|
||||
): ManagedRunMock & Awaited<ReturnType<SupervisorSpawnFn>> {
|
||||
return {
|
||||
runId: "run-supervisor",
|
||||
pid,
|
||||
startedAtMs: Date.now(),
|
||||
stdin: undefined,
|
||||
wait: vi.fn().mockResolvedValue(exit),
|
||||
cancel: vi.fn(),
|
||||
};
|
||||
}
|
||||
|
||||
export function mockSuccessfulCliRun() {
|
||||
supervisorSpawnMock.mockResolvedValueOnce(
|
||||
createManagedRun({
|
||||
reason: "exit",
|
||||
exitCode: 0,
|
||||
exitSignal: null,
|
||||
durationMs: 50,
|
||||
stdout: "ok",
|
||||
stderr: "",
|
||||
timedOut: false,
|
||||
noOutputTimedOut: false,
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
export const EXISTING_CODEX_CONFIG = {
|
||||
agents: {
|
||||
defaults: {
|
||||
cliBackends: {
|
||||
"codex-cli": {
|
||||
command: "codex",
|
||||
args: ["exec", "--json"],
|
||||
resumeArgs: ["exec", "resume", "{sessionId}", "--json"],
|
||||
output: "text",
|
||||
modelArg: "--model",
|
||||
sessionMode: "existing",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} satisfies OpenClawConfig;
|
||||
|
||||
export async function setupCliRunnerTestModule() {
|
||||
const registry = createEmptyPluginRegistry();
|
||||
registry.cliBackends = [
|
||||
{
|
||||
pluginId: "openai",
|
||||
backend: buildOpenAICodexCliBackendFixture(),
|
||||
source: "test",
|
||||
},
|
||||
{
|
||||
pluginId: "google",
|
||||
backend: buildGoogleGeminiCliBackendFixture(),
|
||||
source: "test",
|
||||
},
|
||||
];
|
||||
setActivePluginRegistry(registry);
|
||||
supervisorSpawnMock.mockClear();
|
||||
enqueueSystemEventMock.mockClear();
|
||||
requestHeartbeatNowMock.mockClear();
|
||||
hoisted.resolveBootstrapContextForRunMock.mockReset().mockResolvedValue({
|
||||
bootstrapFiles: [],
|
||||
contextFiles: [],
|
||||
});
|
||||
return (await import("./cli-runner.js")).runCliAgent;
|
||||
}
|
||||
|
||||
export function stubBootstrapContext(params: {
|
||||
bootstrapFiles: WorkspaceBootstrapFile[];
|
||||
contextFiles: EmbeddedContextFile[];
|
||||
}) {
|
||||
hoisted.resolveBootstrapContextForRunMock.mockResolvedValueOnce(params);
|
||||
}
|
||||
|
||||
export function restoreCliRunnerPrepareTestDeps() {
|
||||
setCliRunnerPrepareTestDeps({
|
||||
makeBootstrapWarn: () => () => {},
|
||||
resolveBootstrapContextForRun: hoisted.resolveBootstrapContextForRunMock,
|
||||
});
|
||||
}
|
||||
|
||||
export async function runCliAgentWithBackendConfig(params: {
|
||||
runCliAgent: typeof import("./cli-runner.js").runCliAgent;
|
||||
backend: TestCliBackendConfig;
|
||||
runId: string;
|
||||
}) {
|
||||
await params.runCliAgent({
|
||||
sessionId: "s1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
workspaceDir: "/tmp",
|
||||
config: {
|
||||
agents: {
|
||||
defaults: {
|
||||
cliBackends: {
|
||||
"codex-cli": params.backend,
|
||||
},
|
||||
},
|
||||
},
|
||||
} satisfies OpenClawConfig,
|
||||
prompt: "hi",
|
||||
provider: "codex-cli",
|
||||
model: "gpt-5.4",
|
||||
timeoutMs: 1_000,
|
||||
runId: params.runId,
|
||||
cliSessionId: "thread-123",
|
||||
});
|
||||
}
|
||||
|
||||
export async function runExistingCodexCliAgent(params: {
|
||||
runCliAgent: typeof import("./cli-runner.js").runCliAgent;
|
||||
runId: string;
|
||||
cliSessionBindingAuthProfileId: string;
|
||||
authProfileId: string;
|
||||
}) {
|
||||
await params.runCliAgent({
|
||||
sessionId: "s1",
|
||||
sessionFile: "/tmp/session.jsonl",
|
||||
workspaceDir: "/tmp",
|
||||
config: EXISTING_CODEX_CONFIG,
|
||||
prompt: "hi",
|
||||
provider: "codex-cli",
|
||||
model: "gpt-5.4",
|
||||
timeoutMs: 1_000,
|
||||
runId: params.runId,
|
||||
cliSessionBinding: {
|
||||
sessionId: "thread-123",
|
||||
authProfileId: params.cliSessionBindingAuthProfileId,
|
||||
},
|
||||
authProfileId: params.authProfileId,
|
||||
});
|
||||
}
|
||||
|
||||
export async function withTempImageFile(
|
||||
prefix: string,
|
||||
): Promise<{ tempDir: string; sourceImage: string }> {
|
||||
const os = await import("node:os");
|
||||
const path = await import("node:path");
|
||||
const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), prefix));
|
||||
const sourceImage = path.join(tempDir, "image.png");
|
||||
await fs.writeFile(sourceImage, Buffer.from(SMALL_PNG_BASE64, "base64"));
|
||||
return { tempDir, sourceImage };
|
||||
}
|
||||
|
||||
beforeEach(() => {
|
||||
vi.unstubAllEnvs();
|
||||
});
|
||||
|
|
@ -1,89 +0,0 @@
|
|||
import { executePreparedCliRun } from "./cli-runner/execute.js";
|
||||
import { prepareCliRunContext } from "./cli-runner/prepare.js";
|
||||
import type { RunCliAgentParams } from "./cli-runner/types.js";
|
||||
import { FailoverError, resolveFailoverStatus } from "./failover-error.js";
|
||||
import { classifyFailoverReason, isFailoverErrorMessage } from "./pi-embedded-helpers.js";
|
||||
import type { EmbeddedPiRunResult } from "./pi-embedded-runner.js";
|
||||
|
||||
export async function runCliAgent(params: RunCliAgentParams): Promise<EmbeddedPiRunResult> {
|
||||
const context = await prepareCliRunContext(params);
|
||||
|
||||
const buildCliRunResult = (resultParams: {
|
||||
output: Awaited<ReturnType<typeof executePreparedCliRun>>;
|
||||
effectiveCliSessionId?: string;
|
||||
}): EmbeddedPiRunResult => {
|
||||
const text = resultParams.output.text?.trim();
|
||||
const payloads = text ? [{ text }] : undefined;
|
||||
|
||||
return {
|
||||
payloads,
|
||||
meta: {
|
||||
durationMs: Date.now() - context.started,
|
||||
systemPromptReport: context.systemPromptReport,
|
||||
agentMeta: {
|
||||
sessionId: resultParams.effectiveCliSessionId ?? params.sessionId ?? "",
|
||||
provider: params.provider,
|
||||
model: context.modelId,
|
||||
usage: resultParams.output.usage,
|
||||
...(resultParams.effectiveCliSessionId
|
||||
? {
|
||||
cliSessionBinding: {
|
||||
sessionId: resultParams.effectiveCliSessionId,
|
||||
...(params.authProfileId ? { authProfileId: params.authProfileId } : {}),
|
||||
...(context.authEpoch ? { authEpoch: context.authEpoch } : {}),
|
||||
...(context.extraSystemPromptHash
|
||||
? { extraSystemPromptHash: context.extraSystemPromptHash }
|
||||
: {}),
|
||||
...(context.preparedBackend.mcpConfigHash
|
||||
? { mcpConfigHash: context.preparedBackend.mcpConfigHash }
|
||||
: {}),
|
||||
},
|
||||
}
|
||||
: {}),
|
||||
},
|
||||
},
|
||||
};
|
||||
};
|
||||
|
||||
// Try with the provided CLI session ID first
|
||||
try {
|
||||
try {
|
||||
const output = await executePreparedCliRun(context, context.reusableCliSession.sessionId);
|
||||
const effectiveCliSessionId = output.sessionId ?? context.reusableCliSession.sessionId;
|
||||
return buildCliRunResult({ output, effectiveCliSessionId });
|
||||
} catch (err) {
|
||||
if (err instanceof FailoverError) {
|
||||
// Check if this is a session expired error and we have a session to clear
|
||||
if (
|
||||
err.reason === "session_expired" &&
|
||||
context.reusableCliSession.sessionId &&
|
||||
params.sessionKey
|
||||
) {
|
||||
// Clear the expired session ID from the session entry
|
||||
// This requires access to the session store, which we don't have here
|
||||
// We'll need to modify the caller to handle this case
|
||||
|
||||
// For now, retry without the session ID to create a new session
|
||||
const output = await executePreparedCliRun(context, undefined);
|
||||
const effectiveCliSessionId = output.sessionId;
|
||||
return buildCliRunResult({ output, effectiveCliSessionId });
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
const message = err instanceof Error ? err.message : String(err);
|
||||
if (isFailoverErrorMessage(message, { provider: params.provider })) {
|
||||
const reason = classifyFailoverReason(message, { provider: params.provider }) ?? "unknown";
|
||||
const status = resolveFailoverStatus(reason);
|
||||
throw new FailoverError(message, {
|
||||
reason,
|
||||
provider: params.provider,
|
||||
model: context.modelId,
|
||||
status,
|
||||
});
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
} finally {
|
||||
await context.preparedBackend.cleanup?.();
|
||||
}
|
||||
}
|
||||
|
|
@ -1,179 +0,0 @@
|
|||
import fs from "node:fs/promises";
|
||||
import { afterEach, describe, expect, it } from "vitest";
|
||||
import type { OpenClawConfig } from "../../config/config.js";
|
||||
import {
|
||||
createBundleMcpTempHarness,
|
||||
createBundleProbePlugin,
|
||||
} from "../../plugins/bundle-mcp.test-support.js";
|
||||
import { captureEnv } from "../../test-utils/env.js";
|
||||
import { prepareCliBundleMcpConfig } from "./bundle-mcp.js";
|
||||
|
||||
const tempHarness = createBundleMcpTempHarness();
|
||||
|
||||
afterEach(async () => {
|
||||
await tempHarness.cleanup();
|
||||
});
|
||||
|
||||
describe("prepareCliBundleMcpConfig", () => {
|
||||
it("injects a strict empty --mcp-config overlay for bundle-MCP-enabled backends without servers", async () => {
|
||||
const workspaceDir = await tempHarness.createTempDir("openclaw-cli-bundle-mcp-empty-");
|
||||
|
||||
const prepared = await prepareCliBundleMcpConfig({
|
||||
enabled: true,
|
||||
backend: {
|
||||
command: "node",
|
||||
args: ["./fake-claude.mjs"],
|
||||
},
|
||||
workspaceDir,
|
||||
config: {},
|
||||
});
|
||||
|
||||
const configFlagIndex = prepared.backend.args?.indexOf("--mcp-config") ?? -1;
|
||||
expect(configFlagIndex).toBeGreaterThanOrEqual(0);
|
||||
expect(prepared.backend.args).toContain("--strict-mcp-config");
|
||||
const generatedConfigPath = prepared.backend.args?.[configFlagIndex + 1];
|
||||
expect(typeof generatedConfigPath).toBe("string");
|
||||
const raw = JSON.parse(await fs.readFile(generatedConfigPath as string, "utf-8")) as {
|
||||
mcpServers?: Record<string, unknown>;
|
||||
};
|
||||
expect(raw.mcpServers).toEqual({});
|
||||
|
||||
await prepared.cleanup?.();
|
||||
});
|
||||
|
||||
it("injects a merged --mcp-config overlay for bundle-MCP-enabled backends", async () => {
|
||||
const env = captureEnv(["HOME"]);
|
||||
try {
|
||||
const homeDir = await tempHarness.createTempDir("openclaw-cli-bundle-mcp-home-");
|
||||
const workspaceDir = await tempHarness.createTempDir("openclaw-cli-bundle-mcp-workspace-");
|
||||
process.env.HOME = homeDir;
|
||||
|
||||
const { serverPath } = await createBundleProbePlugin(homeDir);
|
||||
|
||||
const config: OpenClawConfig = {
|
||||
plugins: {
|
||||
entries: {
|
||||
"bundle-probe": { enabled: true },
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const prepared = await prepareCliBundleMcpConfig({
|
||||
enabled: true,
|
||||
backend: {
|
||||
command: "node",
|
||||
args: ["./fake-claude.mjs"],
|
||||
},
|
||||
workspaceDir,
|
||||
config,
|
||||
});
|
||||
|
||||
const configFlagIndex = prepared.backend.args?.indexOf("--mcp-config") ?? -1;
|
||||
expect(configFlagIndex).toBeGreaterThanOrEqual(0);
|
||||
expect(prepared.backend.args).toContain("--strict-mcp-config");
|
||||
const generatedConfigPath = prepared.backend.args?.[configFlagIndex + 1];
|
||||
expect(typeof generatedConfigPath).toBe("string");
|
||||
const raw = JSON.parse(await fs.readFile(generatedConfigPath as string, "utf-8")) as {
|
||||
mcpServers?: Record<string, { args?: string[] }>;
|
||||
};
|
||||
expect(raw.mcpServers?.bundleProbe?.args).toEqual([await fs.realpath(serverPath)]);
|
||||
expect(prepared.mcpConfigHash).toMatch(/^[0-9a-f]{64}$/);
|
||||
|
||||
await prepared.cleanup?.();
|
||||
} finally {
|
||||
env.restore();
|
||||
}
|
||||
});
|
||||
|
||||
it("merges loopback overlay config with bundle MCP servers", async () => {
|
||||
const env = captureEnv(["HOME"]);
|
||||
try {
|
||||
const homeDir = await tempHarness.createTempDir("openclaw-cli-bundle-mcp-home-");
|
||||
const workspaceDir = await tempHarness.createTempDir("openclaw-cli-bundle-mcp-workspace-");
|
||||
process.env.HOME = homeDir;
|
||||
|
||||
await createBundleProbePlugin(homeDir);
|
||||
|
||||
const config: OpenClawConfig = {
|
||||
plugins: {
|
||||
entries: {
|
||||
"bundle-probe": { enabled: true },
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const prepared = await prepareCliBundleMcpConfig({
|
||||
enabled: true,
|
||||
backend: {
|
||||
command: "node",
|
||||
args: ["./fake-claude.mjs"],
|
||||
},
|
||||
workspaceDir,
|
||||
config,
|
||||
additionalConfig: {
|
||||
mcpServers: {
|
||||
openclaw: {
|
||||
type: "http",
|
||||
url: "http://127.0.0.1:23119/mcp",
|
||||
headers: {
|
||||
Authorization: "Bearer ${OPENCLAW_MCP_TOKEN}",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
const configFlagIndex = prepared.backend.args?.indexOf("--mcp-config") ?? -1;
|
||||
const generatedConfigPath = prepared.backend.args?.[configFlagIndex + 1];
|
||||
const raw = JSON.parse(await fs.readFile(generatedConfigPath as string, "utf-8")) as {
|
||||
mcpServers?: Record<string, { url?: string; headers?: Record<string, string> }>;
|
||||
};
|
||||
expect(Object.keys(raw.mcpServers ?? {}).toSorted()).toEqual(["bundleProbe", "openclaw"]);
|
||||
expect(raw.mcpServers?.openclaw?.url).toBe("http://127.0.0.1:23119/mcp");
|
||||
expect(raw.mcpServers?.openclaw?.headers?.Authorization).toBe("Bearer ${OPENCLAW_MCP_TOKEN}");
|
||||
|
||||
await prepared.cleanup?.();
|
||||
} finally {
|
||||
env.restore();
|
||||
}
|
||||
});
|
||||
|
||||
it("preserves extra env values alongside generated MCP config", async () => {
|
||||
const workspaceDir = await tempHarness.createTempDir("openclaw-cli-bundle-mcp-env-");
|
||||
|
||||
const prepared = await prepareCliBundleMcpConfig({
|
||||
enabled: true,
|
||||
backend: {
|
||||
command: "node",
|
||||
args: ["./fake-claude.mjs"],
|
||||
},
|
||||
workspaceDir,
|
||||
config: {},
|
||||
env: {
|
||||
OPENCLAW_MCP_TOKEN: "loopback-token-123",
|
||||
OPENCLAW_MCP_SESSION_KEY: "agent:main:telegram:group:chat123",
|
||||
},
|
||||
});
|
||||
|
||||
expect(prepared.env).toEqual({
|
||||
OPENCLAW_MCP_TOKEN: "loopback-token-123",
|
||||
OPENCLAW_MCP_SESSION_KEY: "agent:main:telegram:group:chat123",
|
||||
});
|
||||
|
||||
await prepared.cleanup?.();
|
||||
});
|
||||
|
||||
it("leaves args untouched when bundle MCP is disabled", async () => {
|
||||
const prepared = await prepareCliBundleMcpConfig({
|
||||
enabled: false,
|
||||
backend: {
|
||||
command: "node",
|
||||
args: ["./fake-cli.mjs"],
|
||||
},
|
||||
workspaceDir: "/tmp/openclaw-bundle-mcp-disabled",
|
||||
});
|
||||
|
||||
expect(prepared.backend.args).toEqual(["./fake-cli.mjs"]);
|
||||
expect(prepared.cleanup).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
|
@ -1,129 +0,0 @@
|
|||
import crypto from "node:crypto";
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import type { OpenClawConfig } from "../../config/config.js";
|
||||
import { applyMergePatch } from "../../config/merge-patch.js";
|
||||
import type { CliBackendConfig } from "../../config/types.js";
|
||||
import {
|
||||
extractMcpServerMap,
|
||||
loadEnabledBundleMcpConfig,
|
||||
type BundleMcpConfig,
|
||||
} from "../../plugins/bundle-mcp.js";
|
||||
|
||||
type PreparedCliBundleMcpConfig = {
|
||||
backend: CliBackendConfig;
|
||||
cleanup?: () => Promise<void>;
|
||||
mcpConfigHash?: string;
|
||||
env?: Record<string, string>;
|
||||
};
|
||||
|
||||
async function readExternalMcpConfig(configPath: string): Promise<BundleMcpConfig> {
|
||||
try {
|
||||
const raw = JSON.parse(await fs.readFile(configPath, "utf-8")) as unknown;
|
||||
return { mcpServers: extractMcpServerMap(raw) };
|
||||
} catch {
|
||||
return { mcpServers: {} };
|
||||
}
|
||||
}
|
||||
|
||||
function findMcpConfigPath(args?: string[]): string | undefined {
|
||||
if (!args?.length) {
|
||||
return undefined;
|
||||
}
|
||||
for (let i = 0; i < args.length; i += 1) {
|
||||
const arg = args[i] ?? "";
|
||||
if (arg === "--mcp-config") {
|
||||
const next = args[i + 1];
|
||||
return typeof next === "string" && next.trim() ? next.trim() : undefined;
|
||||
}
|
||||
if (arg.startsWith("--mcp-config=")) {
|
||||
const inline = arg.slice("--mcp-config=".length).trim();
|
||||
return inline || undefined;
|
||||
}
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
function injectMcpConfigArgs(args: string[] | undefined, mcpConfigPath: string): string[] {
|
||||
const next: string[] = [];
|
||||
for (let i = 0; i < (args?.length ?? 0); i += 1) {
|
||||
const arg = args?.[i] ?? "";
|
||||
if (arg === "--strict-mcp-config") {
|
||||
continue;
|
||||
}
|
||||
if (arg === "--mcp-config") {
|
||||
i += 1;
|
||||
continue;
|
||||
}
|
||||
if (arg.startsWith("--mcp-config=")) {
|
||||
continue;
|
||||
}
|
||||
next.push(arg);
|
||||
}
|
||||
next.push("--strict-mcp-config", "--mcp-config", mcpConfigPath);
|
||||
return next;
|
||||
}
|
||||
|
||||
export async function prepareCliBundleMcpConfig(params: {
|
||||
enabled: boolean;
|
||||
backend: CliBackendConfig;
|
||||
workspaceDir: string;
|
||||
config?: OpenClawConfig;
|
||||
additionalConfig?: BundleMcpConfig;
|
||||
env?: Record<string, string>;
|
||||
warn?: (message: string) => void;
|
||||
}): Promise<PreparedCliBundleMcpConfig> {
|
||||
if (!params.enabled) {
|
||||
return { backend: params.backend, env: params.env };
|
||||
}
|
||||
|
||||
const existingMcpConfigPath =
|
||||
findMcpConfigPath(params.backend.resumeArgs) ?? findMcpConfigPath(params.backend.args);
|
||||
let mergedConfig: BundleMcpConfig = { mcpServers: {} };
|
||||
|
||||
if (existingMcpConfigPath) {
|
||||
const resolvedExistingPath = path.isAbsolute(existingMcpConfigPath)
|
||||
? existingMcpConfigPath
|
||||
: path.resolve(params.workspaceDir, existingMcpConfigPath);
|
||||
mergedConfig = applyMergePatch(
|
||||
mergedConfig,
|
||||
await readExternalMcpConfig(resolvedExistingPath),
|
||||
) as BundleMcpConfig;
|
||||
}
|
||||
|
||||
const bundleConfig = loadEnabledBundleMcpConfig({
|
||||
workspaceDir: params.workspaceDir,
|
||||
cfg: params.config,
|
||||
});
|
||||
for (const diagnostic of bundleConfig.diagnostics) {
|
||||
params.warn?.(`bundle MCP skipped for ${diagnostic.pluginId}: ${diagnostic.message}`);
|
||||
}
|
||||
mergedConfig = applyMergePatch(mergedConfig, bundleConfig.config) as BundleMcpConfig;
|
||||
if (params.additionalConfig) {
|
||||
mergedConfig = applyMergePatch(mergedConfig, params.additionalConfig) as BundleMcpConfig;
|
||||
}
|
||||
|
||||
// Always pass an explicit strict MCP config for background CLI runs so they
|
||||
// do not inherit ambient user/global MCP servers (for example Playwright).
|
||||
const tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-cli-mcp-"));
|
||||
const mcpConfigPath = path.join(tempDir, "mcp.json");
|
||||
const serializedConfig = `${JSON.stringify(mergedConfig, null, 2)}\n`;
|
||||
await fs.writeFile(mcpConfigPath, serializedConfig, "utf-8");
|
||||
|
||||
return {
|
||||
backend: {
|
||||
...params.backend,
|
||||
args: injectMcpConfigArgs(params.backend.args, mcpConfigPath),
|
||||
resumeArgs: injectMcpConfigArgs(
|
||||
params.backend.resumeArgs ?? params.backend.args ?? [],
|
||||
mcpConfigPath,
|
||||
),
|
||||
},
|
||||
mcpConfigHash: crypto.createHash("sha256").update(serializedConfig).digest("hex"),
|
||||
env: params.env,
|
||||
cleanup: async () => {
|
||||
await fs.rm(tempDir, { recursive: true, force: true });
|
||||
},
|
||||
};
|
||||
}
|
||||
|
|
@ -1,347 +0,0 @@
|
|||
import { shouldLogVerbose } from "../../globals.js";
|
||||
import { emitAgentEvent } from "../../infra/agent-events.js";
|
||||
import { isTruthyEnvValue } from "../../infra/env.js";
|
||||
import { requestHeartbeatNow as requestHeartbeatNowImpl } from "../../infra/heartbeat-wake.js";
|
||||
import { sanitizeHostExecEnv } from "../../infra/host-env-security.js";
|
||||
import { enqueueSystemEvent as enqueueSystemEventImpl } from "../../infra/system-events.js";
|
||||
import { getProcessSupervisor as getProcessSupervisorImpl } from "../../process/supervisor/index.js";
|
||||
import { scopedHeartbeatWakeOptions } from "../../routing/session-key.js";
|
||||
import { prependBootstrapPromptWarning } from "../bootstrap-budget.js";
|
||||
import { createCliJsonlStreamingParser, parseCliOutput, type CliOutput } from "../cli-output.js";
|
||||
import { FailoverError, resolveFailoverStatus } from "../failover-error.js";
|
||||
import { classifyFailoverReason } from "../pi-embedded-helpers.js";
|
||||
import {
|
||||
appendImagePathsToPrompt,
|
||||
buildCliSupervisorScopeKey,
|
||||
buildCliArgs,
|
||||
resolveCliRunQueueKey,
|
||||
enqueueCliRun,
|
||||
loadPromptRefImages,
|
||||
resolveCliNoOutputTimeoutMs,
|
||||
resolvePromptInput,
|
||||
resolveSessionIdToSend,
|
||||
resolveSystemPromptUsage,
|
||||
writeCliImages,
|
||||
} from "./helpers.js";
|
||||
import { cliBackendLog, CLI_BACKEND_LOG_OUTPUT_ENV } from "./log.js";
|
||||
import type { PreparedCliRunContext } from "./types.js";
|
||||
|
||||
const executeDeps = {
|
||||
getProcessSupervisor: getProcessSupervisorImpl,
|
||||
enqueueSystemEvent: enqueueSystemEventImpl,
|
||||
requestHeartbeatNow: requestHeartbeatNowImpl,
|
||||
};
|
||||
|
||||
export function setCliRunnerExecuteTestDeps(overrides: Partial<typeof executeDeps>): void {
|
||||
Object.assign(executeDeps, overrides);
|
||||
}
|
||||
|
||||
function createCliAbortError(): Error {
|
||||
const error = new Error("CLI run aborted");
|
||||
error.name = "AbortError";
|
||||
return error;
|
||||
}
|
||||
|
||||
function buildCliLogArgs(params: {
|
||||
args: string[];
|
||||
systemPromptArg?: string;
|
||||
sessionArg?: string;
|
||||
modelArg?: string;
|
||||
imageArg?: string;
|
||||
argsPrompt?: string;
|
||||
}): string[] {
|
||||
const logArgs: string[] = [];
|
||||
for (let i = 0; i < params.args.length; i += 1) {
|
||||
const arg = params.args[i] ?? "";
|
||||
if (arg === params.systemPromptArg) {
|
||||
const systemPromptValue = params.args[i + 1] ?? "";
|
||||
logArgs.push(arg, `<systemPrompt:${systemPromptValue.length} chars>`);
|
||||
i += 1;
|
||||
continue;
|
||||
}
|
||||
if (arg === params.sessionArg) {
|
||||
logArgs.push(arg, params.args[i + 1] ?? "");
|
||||
i += 1;
|
||||
continue;
|
||||
}
|
||||
if (arg === params.modelArg) {
|
||||
logArgs.push(arg, params.args[i + 1] ?? "");
|
||||
i += 1;
|
||||
continue;
|
||||
}
|
||||
if (arg === params.imageArg) {
|
||||
logArgs.push(arg, "<image>");
|
||||
i += 1;
|
||||
continue;
|
||||
}
|
||||
logArgs.push(arg);
|
||||
}
|
||||
if (params.argsPrompt) {
|
||||
const promptIndex = logArgs.indexOf(params.argsPrompt);
|
||||
if (promptIndex >= 0) {
|
||||
logArgs[promptIndex] = `<prompt:${params.argsPrompt.length} chars>`;
|
||||
}
|
||||
}
|
||||
return logArgs;
|
||||
}
|
||||
|
||||
export async function executePreparedCliRun(
|
||||
context: PreparedCliRunContext,
|
||||
cliSessionIdToUse?: string,
|
||||
): Promise<CliOutput> {
|
||||
const params = context.params;
|
||||
if (params.abortSignal?.aborted) {
|
||||
throw createCliAbortError();
|
||||
}
|
||||
const backend = context.preparedBackend.backend;
|
||||
const { sessionId: resolvedSessionId, isNew } = resolveSessionIdToSend({
|
||||
backend,
|
||||
cliSessionId: cliSessionIdToUse,
|
||||
});
|
||||
const useResume = Boolean(
|
||||
cliSessionIdToUse && resolvedSessionId && backend.resumeArgs && backend.resumeArgs.length > 0,
|
||||
);
|
||||
const systemPromptArg = resolveSystemPromptUsage({
|
||||
backend,
|
||||
isNewSession: isNew,
|
||||
systemPrompt: context.systemPrompt,
|
||||
});
|
||||
|
||||
let imagePaths: string[] | undefined;
|
||||
let cleanupImages: (() => Promise<void>) | undefined;
|
||||
let prompt = prependBootstrapPromptWarning(params.prompt, context.bootstrapPromptWarningLines, {
|
||||
preserveExactPrompt: context.heartbeatPrompt,
|
||||
});
|
||||
const resolvedImages =
|
||||
params.images && params.images.length > 0
|
||||
? params.images
|
||||
: await loadPromptRefImages({ prompt, workspaceDir: context.workspaceDir });
|
||||
if (resolvedImages.length > 0) {
|
||||
const imagePayload = await writeCliImages(resolvedImages);
|
||||
imagePaths = imagePayload.paths;
|
||||
cleanupImages = imagePayload.cleanup;
|
||||
if (!backend.imageArg) {
|
||||
prompt = appendImagePathsToPrompt(prompt, imagePaths);
|
||||
}
|
||||
}
|
||||
|
||||
const { argsPrompt, stdin } = resolvePromptInput({
|
||||
backend,
|
||||
prompt,
|
||||
});
|
||||
const stdinPayload = stdin ?? "";
|
||||
const baseArgs = useResume ? (backend.resumeArgs ?? backend.args ?? []) : (backend.args ?? []);
|
||||
const resolvedArgs = useResume
|
||||
? baseArgs.map((entry) => entry.replaceAll("{sessionId}", resolvedSessionId ?? ""))
|
||||
: baseArgs;
|
||||
const args = buildCliArgs({
|
||||
backend,
|
||||
baseArgs: resolvedArgs,
|
||||
modelId: context.normalizedModel,
|
||||
sessionId: resolvedSessionId,
|
||||
systemPrompt: systemPromptArg,
|
||||
imagePaths,
|
||||
promptArg: argsPrompt,
|
||||
useResume,
|
||||
});
|
||||
|
||||
const queueKey = resolveCliRunQueueKey({
|
||||
backendId: context.backendResolved.id,
|
||||
serialize: backend.serialize,
|
||||
runId: params.runId,
|
||||
workspaceDir: context.workspaceDir,
|
||||
cliSessionId: useResume ? resolvedSessionId : undefined,
|
||||
});
|
||||
|
||||
try {
|
||||
return await enqueueCliRun(queueKey, async () => {
|
||||
cliBackendLog.info(
|
||||
`cli exec: provider=${params.provider} model=${context.normalizedModel} promptChars=${params.prompt.length}`,
|
||||
);
|
||||
const logOutputText = isTruthyEnvValue(process.env[CLI_BACKEND_LOG_OUTPUT_ENV]);
|
||||
if (logOutputText) {
|
||||
const logArgs = buildCliLogArgs({
|
||||
args,
|
||||
systemPromptArg: backend.systemPromptArg,
|
||||
sessionArg: backend.sessionArg,
|
||||
modelArg: backend.modelArg,
|
||||
imageArg: backend.imageArg,
|
||||
argsPrompt,
|
||||
});
|
||||
cliBackendLog.info(`cli argv: ${backend.command} ${logArgs.join(" ")}`);
|
||||
}
|
||||
|
||||
const env = (() => {
|
||||
const next = sanitizeHostExecEnv({
|
||||
baseEnv: process.env,
|
||||
blockPathOverrides: true,
|
||||
});
|
||||
for (const key of backend.clearEnv ?? []) {
|
||||
delete next[key];
|
||||
}
|
||||
if (backend.env && Object.keys(backend.env).length > 0) {
|
||||
Object.assign(
|
||||
next,
|
||||
sanitizeHostExecEnv({
|
||||
baseEnv: {},
|
||||
overrides: backend.env,
|
||||
blockPathOverrides: true,
|
||||
}),
|
||||
);
|
||||
}
|
||||
Object.assign(next, context.preparedBackend.env);
|
||||
return next;
|
||||
})();
|
||||
const noOutputTimeoutMs = resolveCliNoOutputTimeoutMs({
|
||||
backend,
|
||||
timeoutMs: params.timeoutMs,
|
||||
useResume,
|
||||
});
|
||||
const streamingParser =
|
||||
backend.output === "jsonl"
|
||||
? createCliJsonlStreamingParser({
|
||||
backend,
|
||||
providerId: context.backendResolved.id,
|
||||
onAssistantDelta: ({ text, delta }) => {
|
||||
emitAgentEvent({
|
||||
runId: params.runId,
|
||||
stream: "assistant",
|
||||
data: {
|
||||
text,
|
||||
delta,
|
||||
},
|
||||
});
|
||||
},
|
||||
})
|
||||
: null;
|
||||
const supervisor = executeDeps.getProcessSupervisor();
|
||||
const scopeKey = buildCliSupervisorScopeKey({
|
||||
backend,
|
||||
backendId: context.backendResolved.id,
|
||||
cliSessionId: useResume ? resolvedSessionId : undefined,
|
||||
});
|
||||
|
||||
const managedRun = await supervisor.spawn({
|
||||
sessionId: params.sessionId,
|
||||
backendId: context.backendResolved.id,
|
||||
scopeKey,
|
||||
replaceExistingScope: Boolean(useResume && scopeKey),
|
||||
mode: "child",
|
||||
argv: [backend.command, ...args],
|
||||
timeoutMs: params.timeoutMs,
|
||||
noOutputTimeoutMs,
|
||||
cwd: context.workspaceDir,
|
||||
env,
|
||||
input: stdinPayload,
|
||||
onStdout: streamingParser ? (chunk: string) => streamingParser.push(chunk) : undefined,
|
||||
});
|
||||
const replyBackendHandle = params.replyOperation
|
||||
? {
|
||||
kind: "cli" as const,
|
||||
cancel: () => {
|
||||
managedRun.cancel("manual-cancel");
|
||||
},
|
||||
isStreaming: () => false,
|
||||
}
|
||||
: undefined;
|
||||
if (replyBackendHandle) {
|
||||
params.replyOperation?.attachBackend(replyBackendHandle);
|
||||
}
|
||||
const abortManagedRun = () => {
|
||||
managedRun.cancel("manual-cancel");
|
||||
};
|
||||
params.abortSignal?.addEventListener("abort", abortManagedRun, { once: true });
|
||||
if (params.abortSignal?.aborted) {
|
||||
abortManagedRun();
|
||||
}
|
||||
let result: Awaited<ReturnType<typeof managedRun.wait>>;
|
||||
try {
|
||||
result = await managedRun.wait();
|
||||
} finally {
|
||||
if (replyBackendHandle) {
|
||||
params.replyOperation?.detachBackend(replyBackendHandle);
|
||||
}
|
||||
params.abortSignal?.removeEventListener("abort", abortManagedRun);
|
||||
}
|
||||
streamingParser?.finish();
|
||||
if (params.abortSignal?.aborted && result.reason === "manual-cancel") {
|
||||
throw createCliAbortError();
|
||||
}
|
||||
|
||||
const stdout = result.stdout.trim();
|
||||
const stderr = result.stderr.trim();
|
||||
if (logOutputText) {
|
||||
if (stdout) {
|
||||
cliBackendLog.info(`cli stdout:\n${stdout}`);
|
||||
}
|
||||
if (stderr) {
|
||||
cliBackendLog.info(`cli stderr:\n${stderr}`);
|
||||
}
|
||||
}
|
||||
if (shouldLogVerbose()) {
|
||||
if (stdout) {
|
||||
cliBackendLog.debug(`cli stdout:\n${stdout}`);
|
||||
}
|
||||
if (stderr) {
|
||||
cliBackendLog.debug(`cli stderr:\n${stderr}`);
|
||||
}
|
||||
}
|
||||
|
||||
if (result.exitCode !== 0 || result.reason !== "exit") {
|
||||
if (result.reason === "no-output-timeout" || result.noOutputTimedOut) {
|
||||
const timeoutReason = `CLI produced no output for ${Math.round(noOutputTimeoutMs / 1000)}s and was terminated.`;
|
||||
cliBackendLog.warn(
|
||||
`cli watchdog timeout: provider=${params.provider} model=${context.modelId} session=${resolvedSessionId ?? params.sessionId} noOutputTimeoutMs=${noOutputTimeoutMs} pid=${managedRun.pid ?? "unknown"}`,
|
||||
);
|
||||
if (params.sessionKey) {
|
||||
const stallNotice = [
|
||||
`CLI agent (${params.provider}) produced no output for ${Math.round(noOutputTimeoutMs / 1000)}s and was terminated.`,
|
||||
"It may have been waiting for interactive input or an approval prompt.",
|
||||
"For Claude Code, prefer --permission-mode bypassPermissions --print.",
|
||||
].join(" ");
|
||||
executeDeps.enqueueSystemEvent(stallNotice, { sessionKey: params.sessionKey });
|
||||
executeDeps.requestHeartbeatNow(
|
||||
scopedHeartbeatWakeOptions(params.sessionKey, { reason: "cli:watchdog:stall" }),
|
||||
);
|
||||
}
|
||||
throw new FailoverError(timeoutReason, {
|
||||
reason: "timeout",
|
||||
provider: params.provider,
|
||||
model: context.modelId,
|
||||
status: resolveFailoverStatus("timeout"),
|
||||
});
|
||||
}
|
||||
if (result.reason === "overall-timeout") {
|
||||
const timeoutReason = `CLI exceeded timeout (${Math.round(params.timeoutMs / 1000)}s) and was terminated.`;
|
||||
throw new FailoverError(timeoutReason, {
|
||||
reason: "timeout",
|
||||
provider: params.provider,
|
||||
model: context.modelId,
|
||||
status: resolveFailoverStatus("timeout"),
|
||||
});
|
||||
}
|
||||
const err = stderr || stdout || "CLI failed.";
|
||||
const reason = classifyFailoverReason(err, { provider: params.provider }) ?? "unknown";
|
||||
const status = resolveFailoverStatus(reason);
|
||||
throw new FailoverError(err, {
|
||||
reason,
|
||||
provider: params.provider,
|
||||
model: context.modelId,
|
||||
status,
|
||||
});
|
||||
}
|
||||
|
||||
return parseCliOutput({
|
||||
raw: stdout,
|
||||
backend,
|
||||
providerId: context.backendResolved.id,
|
||||
outputMode: useResume ? (backend.resumeOutput ?? backend.output) : backend.output,
|
||||
fallbackSessionId: resolvedSessionId,
|
||||
});
|
||||
});
|
||||
} finally {
|
||||
if (cleanupImages) {
|
||||
await cleanupImages();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,296 +0,0 @@
|
|||
import crypto from "node:crypto";
|
||||
import fs from "node:fs/promises";
|
||||
import os from "node:os";
|
||||
import path from "node:path";
|
||||
import type { AgentTool } from "@mariozechner/pi-agent-core";
|
||||
import type { ImageContent } from "@mariozechner/pi-ai";
|
||||
import { KeyedAsyncQueue } from "openclaw/plugin-sdk/keyed-async-queue";
|
||||
import type { ThinkLevel } from "../../auto-reply/thinking.js";
|
||||
import type { OpenClawConfig } from "../../config/config.js";
|
||||
import type { CliBackendConfig } from "../../config/types.js";
|
||||
import { resolvePreferredOpenClawTmpDir } from "../../infra/tmp-openclaw-dir.js";
|
||||
import { MAX_IMAGE_BYTES } from "../../media/constants.js";
|
||||
import { extensionForMime } from "../../media/mime.js";
|
||||
import { buildTtsSystemPromptHint } from "../../tts/tts.js";
|
||||
import { buildModelAliasLines } from "../model-alias-lines.js";
|
||||
import { resolveDefaultModelForAgent } from "../model-selection.js";
|
||||
import { resolveOwnerDisplaySetting } from "../owner-display.js";
|
||||
import type { EmbeddedContextFile } from "../pi-embedded-helpers.js";
|
||||
import { detectImageReferences, loadImageFromRef } from "../pi-embedded-runner/run/images.js";
|
||||
import type { SandboxFsBridge } from "../sandbox/fs-bridge.js";
|
||||
import { detectRuntimeShell } from "../shell-utils.js";
|
||||
import { stripSystemPromptCacheBoundary } from "../system-prompt-cache-boundary.js";
|
||||
import { buildSystemPromptParams } from "../system-prompt-params.js";
|
||||
import { buildAgentSystemPrompt } from "../system-prompt.js";
|
||||
import { sanitizeImageBlocks } from "../tool-images.js";
|
||||
export { buildCliSupervisorScopeKey, resolveCliNoOutputTimeoutMs } from "./reliability.js";
|
||||
|
||||
const CLI_RUN_QUEUE = new KeyedAsyncQueue();
|
||||
export function enqueueCliRun<T>(key: string, task: () => Promise<T>): Promise<T> {
|
||||
return CLI_RUN_QUEUE.enqueue(key, task);
|
||||
}
|
||||
|
||||
export function resolveCliRunQueueKey(params: {
|
||||
backendId: string;
|
||||
serialize?: boolean;
|
||||
runId: string;
|
||||
workspaceDir: string;
|
||||
cliSessionId?: string;
|
||||
}): string {
|
||||
if (params.serialize === false) {
|
||||
return `${params.backendId}:${params.runId}`;
|
||||
}
|
||||
return params.backendId;
|
||||
}
|
||||
|
||||
export function buildSystemPrompt(params: {
|
||||
workspaceDir: string;
|
||||
config?: OpenClawConfig;
|
||||
defaultThinkLevel?: ThinkLevel;
|
||||
extraSystemPrompt?: string;
|
||||
ownerNumbers?: string[];
|
||||
heartbeatPrompt?: string;
|
||||
docsPath?: string;
|
||||
tools: AgentTool[];
|
||||
contextFiles?: EmbeddedContextFile[];
|
||||
modelDisplay: string;
|
||||
agentId?: string;
|
||||
backendId?: string;
|
||||
}) {
|
||||
const defaultModelRef = resolveDefaultModelForAgent({
|
||||
cfg: params.config ?? {},
|
||||
agentId: params.agentId,
|
||||
});
|
||||
const defaultModelLabel = `${defaultModelRef.provider}/${defaultModelRef.model}`;
|
||||
const { runtimeInfo, userTimezone, userTime, userTimeFormat } = buildSystemPromptParams({
|
||||
config: params.config,
|
||||
agentId: params.agentId,
|
||||
workspaceDir: params.workspaceDir,
|
||||
cwd: process.cwd(),
|
||||
runtime: {
|
||||
host: "openclaw",
|
||||
os: `${os.type()} ${os.release()}`,
|
||||
arch: os.arch(),
|
||||
node: process.version,
|
||||
model: params.modelDisplay,
|
||||
defaultModel: defaultModelLabel,
|
||||
shell: detectRuntimeShell(),
|
||||
},
|
||||
});
|
||||
const ttsHint = params.config ? buildTtsSystemPromptHint(params.config) : undefined;
|
||||
const ownerDisplay = resolveOwnerDisplaySetting(params.config);
|
||||
const prompt = buildAgentSystemPrompt({
|
||||
workspaceDir: params.workspaceDir,
|
||||
defaultThinkLevel: params.defaultThinkLevel,
|
||||
extraSystemPrompt: params.extraSystemPrompt,
|
||||
ownerNumbers: params.ownerNumbers,
|
||||
ownerDisplay: ownerDisplay.ownerDisplay,
|
||||
ownerDisplaySecret: ownerDisplay.ownerDisplaySecret,
|
||||
reasoningTagHint: false,
|
||||
heartbeatPrompt: params.heartbeatPrompt,
|
||||
docsPath: params.docsPath,
|
||||
acpEnabled: params.config?.acp?.enabled !== false,
|
||||
runtimeInfo,
|
||||
toolNames: params.tools.map((tool) => tool.name),
|
||||
modelAliasLines: buildModelAliasLines(params.config),
|
||||
userTimezone,
|
||||
userTime,
|
||||
userTimeFormat,
|
||||
contextFiles: params.contextFiles,
|
||||
ttsHint,
|
||||
memoryCitationsMode: params.config?.memory?.citations,
|
||||
});
|
||||
return prompt;
|
||||
}
|
||||
|
||||
export function normalizeCliModel(modelId: string, backend: CliBackendConfig): string {
|
||||
const trimmed = modelId.trim();
|
||||
if (!trimmed) {
|
||||
return trimmed;
|
||||
}
|
||||
const direct = backend.modelAliases?.[trimmed];
|
||||
if (direct) {
|
||||
return direct;
|
||||
}
|
||||
const lower = trimmed.toLowerCase();
|
||||
const mapped = backend.modelAliases?.[lower];
|
||||
if (mapped) {
|
||||
return mapped;
|
||||
}
|
||||
return trimmed;
|
||||
}
|
||||
|
||||
export function resolveSystemPromptUsage(params: {
|
||||
backend: CliBackendConfig;
|
||||
isNewSession: boolean;
|
||||
systemPrompt?: string;
|
||||
}): string | null {
|
||||
const systemPrompt = params.systemPrompt?.trim();
|
||||
if (!systemPrompt) {
|
||||
return null;
|
||||
}
|
||||
const when = params.backend.systemPromptWhen ?? "first";
|
||||
if (when === "never") {
|
||||
return null;
|
||||
}
|
||||
if (when === "first" && !params.isNewSession) {
|
||||
return null;
|
||||
}
|
||||
if (!params.backend.systemPromptArg?.trim()) {
|
||||
return null;
|
||||
}
|
||||
return systemPrompt;
|
||||
}
|
||||
|
||||
export function resolveSessionIdToSend(params: {
|
||||
backend: CliBackendConfig;
|
||||
cliSessionId?: string;
|
||||
}): { sessionId?: string; isNew: boolean } {
|
||||
const mode = params.backend.sessionMode ?? "always";
|
||||
const existing = params.cliSessionId?.trim();
|
||||
if (mode === "none") {
|
||||
return { sessionId: undefined, isNew: !existing };
|
||||
}
|
||||
if (mode === "existing") {
|
||||
return { sessionId: existing, isNew: !existing };
|
||||
}
|
||||
if (existing) {
|
||||
return { sessionId: existing, isNew: false };
|
||||
}
|
||||
return { sessionId: crypto.randomUUID(), isNew: true };
|
||||
}
|
||||
|
||||
export function resolvePromptInput(params: { backend: CliBackendConfig; prompt: string }): {
|
||||
argsPrompt?: string;
|
||||
stdin?: string;
|
||||
} {
|
||||
const inputMode = params.backend.input ?? "arg";
|
||||
if (inputMode === "stdin") {
|
||||
return { stdin: params.prompt };
|
||||
}
|
||||
if (params.backend.maxPromptArgChars && params.prompt.length > params.backend.maxPromptArgChars) {
|
||||
return { stdin: params.prompt };
|
||||
}
|
||||
return { argsPrompt: params.prompt };
|
||||
}
|
||||
|
||||
function resolveCliImagePath(image: ImageContent): string {
|
||||
const ext = extensionForMime(image.mimeType) ?? ".bin";
|
||||
const digest = crypto
|
||||
.createHash("sha256")
|
||||
.update(image.mimeType)
|
||||
.update("\0")
|
||||
.update(image.data)
|
||||
.digest("hex");
|
||||
return path.join(resolvePreferredOpenClawTmpDir(), "openclaw-cli-images", `${digest}${ext}`);
|
||||
}
|
||||
|
||||
export function appendImagePathsToPrompt(prompt: string, paths: string[]): string {
|
||||
if (!paths.length) {
|
||||
return prompt;
|
||||
}
|
||||
const trimmed = prompt.trimEnd();
|
||||
const separator = trimmed ? "\n\n" : "";
|
||||
return `${trimmed}${separator}${paths.join("\n")}`;
|
||||
}
|
||||
|
||||
export async function loadPromptRefImages(params: {
|
||||
prompt: string;
|
||||
workspaceDir: string;
|
||||
maxBytes?: number;
|
||||
workspaceOnly?: boolean;
|
||||
sandbox?: { root: string; bridge: SandboxFsBridge };
|
||||
}): Promise<ImageContent[]> {
|
||||
const refs = detectImageReferences(params.prompt);
|
||||
if (refs.length === 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const maxBytes = params.maxBytes ?? MAX_IMAGE_BYTES;
|
||||
const seen = new Set<string>();
|
||||
const images: ImageContent[] = [];
|
||||
for (const ref of refs) {
|
||||
const key = `${ref.type}:${ref.resolved}`;
|
||||
if (seen.has(key)) {
|
||||
continue;
|
||||
}
|
||||
seen.add(key);
|
||||
const image = await loadImageFromRef(ref, params.workspaceDir, {
|
||||
maxBytes,
|
||||
workspaceOnly: params.workspaceOnly,
|
||||
sandbox: params.sandbox,
|
||||
});
|
||||
if (image) {
|
||||
images.push(image);
|
||||
}
|
||||
}
|
||||
|
||||
const { images: sanitizedImages } = await sanitizeImageBlocks(images, "prompt:images", {
|
||||
maxBytes,
|
||||
});
|
||||
return sanitizedImages;
|
||||
}
|
||||
|
||||
export async function writeCliImages(
|
||||
images: ImageContent[],
|
||||
): Promise<{ paths: string[]; cleanup: () => Promise<void> }> {
|
||||
const imageRoot = path.join(resolvePreferredOpenClawTmpDir(), "openclaw-cli-images");
|
||||
await fs.mkdir(imageRoot, { recursive: true, mode: 0o700 });
|
||||
const paths: string[] = [];
|
||||
for (let i = 0; i < images.length; i += 1) {
|
||||
const image = images[i];
|
||||
const filePath = resolveCliImagePath(image);
|
||||
const buffer = Buffer.from(image.data, "base64");
|
||||
await fs.writeFile(filePath, buffer, { mode: 0o600 });
|
||||
paths.push(filePath);
|
||||
}
|
||||
// Keep content-addressed image paths stable across Claude CLI runs so prompt
|
||||
// text and argv don't churn on every turn with fresh temp-dir suffixes.
|
||||
const cleanup = async () => {};
|
||||
return { paths, cleanup };
|
||||
}
|
||||
|
||||
export function buildCliArgs(params: {
|
||||
backend: CliBackendConfig;
|
||||
baseArgs: string[];
|
||||
modelId: string;
|
||||
sessionId?: string;
|
||||
systemPrompt?: string | null;
|
||||
imagePaths?: string[];
|
||||
promptArg?: string;
|
||||
useResume: boolean;
|
||||
}): string[] {
|
||||
const args: string[] = [...params.baseArgs];
|
||||
if (params.backend.modelArg && params.modelId) {
|
||||
args.push(params.backend.modelArg, params.modelId);
|
||||
}
|
||||
if (!params.useResume && params.systemPrompt && params.backend.systemPromptArg) {
|
||||
args.push(params.backend.systemPromptArg, stripSystemPromptCacheBoundary(params.systemPrompt));
|
||||
}
|
||||
if (!params.useResume && params.sessionId) {
|
||||
if (params.backend.sessionArgs && params.backend.sessionArgs.length > 0) {
|
||||
for (const entry of params.backend.sessionArgs) {
|
||||
args.push(entry.replaceAll("{sessionId}", params.sessionId));
|
||||
}
|
||||
} else if (params.backend.sessionArg) {
|
||||
args.push(params.backend.sessionArg, params.sessionId);
|
||||
}
|
||||
}
|
||||
if (params.imagePaths && params.imagePaths.length > 0) {
|
||||
const mode = params.backend.imageMode ?? "repeat";
|
||||
const imageArg = params.backend.imageArg;
|
||||
if (imageArg) {
|
||||
if (mode === "list") {
|
||||
args.push(imageArg, params.imagePaths.join(","));
|
||||
} else {
|
||||
for (const imagePath of params.imagePaths) {
|
||||
args.push(imageArg, imagePath);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (params.promptArg !== undefined) {
|
||||
args.push(params.promptArg);
|
||||
}
|
||||
return args;
|
||||
}
|
||||
|
|
@ -1,4 +0,0 @@
|
|||
import { createSubsystemLogger } from "../../logging/subsystem.js";
|
||||
|
||||
export const cliBackendLog = createSubsystemLogger("agent/cli-backend");
|
||||
export const CLI_BACKEND_LOG_OUTPUT_ENV = "OPENCLAW_CLI_BACKEND_LOG_OUTPUT";
|
||||
|
|
@ -1,190 +0,0 @@
|
|||
import { resolveHeartbeatPrompt } from "../../auto-reply/heartbeat.js";
|
||||
import { resolveSessionAgentIds } from "../agent-scope.js";
|
||||
import {
|
||||
buildBootstrapInjectionStats,
|
||||
buildBootstrapPromptWarning,
|
||||
buildBootstrapTruncationReportMeta,
|
||||
analyzeBootstrapBudget,
|
||||
} from "../bootstrap-budget.js";
|
||||
import {
|
||||
makeBootstrapWarn as makeBootstrapWarnImpl,
|
||||
resolveBootstrapContextForRun as resolveBootstrapContextForRunImpl,
|
||||
} from "../bootstrap-files.js";
|
||||
import { resolveCliAuthEpoch } from "../cli-auth-epoch.js";
|
||||
import { resolveCliBackendConfig } from "../cli-backends.js";
|
||||
import { hashCliSessionText, resolveCliSessionReuse } from "../cli-session.js";
|
||||
import { resolveOpenClawDocsPath } from "../docs-path.js";
|
||||
import {
|
||||
resolveBootstrapMaxChars,
|
||||
resolveBootstrapPromptTruncationWarningMode,
|
||||
resolveBootstrapTotalMaxChars,
|
||||
} from "../pi-embedded-helpers.js";
|
||||
import { buildSystemPromptReport } from "../system-prompt-report.js";
|
||||
import { redactRunIdentifier, resolveRunWorkspaceDir } from "../workspace-run.js";
|
||||
import { prepareCliBundleMcpConfig } from "./bundle-mcp.js";
|
||||
import { buildSystemPrompt, normalizeCliModel } from "./helpers.js";
|
||||
import { cliBackendLog } from "./log.js";
|
||||
import type { PreparedCliRunContext, RunCliAgentParams } from "./types.js";
|
||||
|
||||
const prepareDeps = {
|
||||
makeBootstrapWarn: makeBootstrapWarnImpl,
|
||||
resolveBootstrapContextForRun: resolveBootstrapContextForRunImpl,
|
||||
};
|
||||
|
||||
export function setCliRunnerPrepareTestDeps(overrides: Partial<typeof prepareDeps>): void {
|
||||
Object.assign(prepareDeps, overrides);
|
||||
}
|
||||
|
||||
export async function prepareCliRunContext(
|
||||
params: RunCliAgentParams,
|
||||
): Promise<PreparedCliRunContext> {
|
||||
const started = Date.now();
|
||||
const workspaceResolution = resolveRunWorkspaceDir({
|
||||
workspaceDir: params.workspaceDir,
|
||||
sessionKey: params.sessionKey,
|
||||
agentId: params.agentId,
|
||||
config: params.config,
|
||||
});
|
||||
const resolvedWorkspace = workspaceResolution.workspaceDir;
|
||||
const redactedSessionId = redactRunIdentifier(params.sessionId);
|
||||
const redactedSessionKey = redactRunIdentifier(params.sessionKey);
|
||||
const redactedWorkspace = redactRunIdentifier(resolvedWorkspace);
|
||||
if (workspaceResolution.usedFallback) {
|
||||
cliBackendLog.warn(
|
||||
`[workspace-fallback] caller=runCliAgent reason=${workspaceResolution.fallbackReason} run=${params.runId} session=${redactedSessionId} sessionKey=${redactedSessionKey} agent=${workspaceResolution.agentId} workspace=${redactedWorkspace}`,
|
||||
);
|
||||
}
|
||||
const workspaceDir = resolvedWorkspace;
|
||||
|
||||
const backendResolved = resolveCliBackendConfig(params.provider, params.config);
|
||||
if (!backendResolved) {
|
||||
throw new Error(`Unknown CLI backend: ${params.provider}`);
|
||||
}
|
||||
const authEpoch = await resolveCliAuthEpoch({
|
||||
provider: params.provider,
|
||||
authProfileId: params.authProfileId,
|
||||
});
|
||||
const extraSystemPrompt = params.extraSystemPrompt?.trim() ?? "";
|
||||
const extraSystemPromptHash = hashCliSessionText(extraSystemPrompt);
|
||||
const modelId = (params.model ?? "default").trim() || "default";
|
||||
const normalizedModel = normalizeCliModel(modelId, backendResolved.config);
|
||||
const modelDisplay = `${params.provider}/${modelId}`;
|
||||
|
||||
const sessionLabel = params.sessionKey ?? params.sessionId;
|
||||
const { bootstrapFiles, contextFiles } = await prepareDeps.resolveBootstrapContextForRun({
|
||||
workspaceDir,
|
||||
config: params.config,
|
||||
sessionKey: params.sessionKey,
|
||||
sessionId: params.sessionId,
|
||||
warn: prepareDeps.makeBootstrapWarn({
|
||||
sessionLabel,
|
||||
warn: (message) => cliBackendLog.warn(message),
|
||||
}),
|
||||
});
|
||||
const bootstrapMaxChars = resolveBootstrapMaxChars(params.config);
|
||||
const bootstrapTotalMaxChars = resolveBootstrapTotalMaxChars(params.config);
|
||||
const bootstrapAnalysis = analyzeBootstrapBudget({
|
||||
files: buildBootstrapInjectionStats({
|
||||
bootstrapFiles,
|
||||
injectedFiles: contextFiles,
|
||||
}),
|
||||
bootstrapMaxChars,
|
||||
bootstrapTotalMaxChars,
|
||||
});
|
||||
const bootstrapPromptWarningMode = resolveBootstrapPromptTruncationWarningMode(params.config);
|
||||
const bootstrapPromptWarning = buildBootstrapPromptWarning({
|
||||
analysis: bootstrapAnalysis,
|
||||
mode: bootstrapPromptWarningMode,
|
||||
seenSignatures: params.bootstrapPromptWarningSignaturesSeen,
|
||||
previousSignature: params.bootstrapPromptWarningSignature,
|
||||
});
|
||||
const { defaultAgentId, sessionAgentId } = resolveSessionAgentIds({
|
||||
sessionKey: params.sessionKey,
|
||||
config: params.config,
|
||||
agentId: params.agentId,
|
||||
});
|
||||
const preparedBackend = await prepareCliBundleMcpConfig({
|
||||
enabled: backendResolved.bundleMcp,
|
||||
backend: backendResolved.config,
|
||||
workspaceDir,
|
||||
config: params.config,
|
||||
warn: (message) => cliBackendLog.warn(message),
|
||||
});
|
||||
const reusableCliSession = resolveCliSessionReuse({
|
||||
binding:
|
||||
params.cliSessionBinding ??
|
||||
(params.cliSessionId ? { sessionId: params.cliSessionId } : undefined),
|
||||
authProfileId: params.authProfileId,
|
||||
authEpoch,
|
||||
extraSystemPromptHash,
|
||||
mcpConfigHash: preparedBackend.mcpConfigHash,
|
||||
});
|
||||
if (reusableCliSession.invalidatedReason) {
|
||||
cliBackendLog.info(
|
||||
`cli session reset: provider=${params.provider} reason=${reusableCliSession.invalidatedReason}`,
|
||||
);
|
||||
}
|
||||
const heartbeatPrompt =
|
||||
sessionAgentId === defaultAgentId
|
||||
? resolveHeartbeatPrompt(params.config?.agents?.defaults?.heartbeat?.prompt)
|
||||
: undefined;
|
||||
const docsPath = await resolveOpenClawDocsPath({
|
||||
workspaceDir,
|
||||
argv1: process.argv[1],
|
||||
cwd: process.cwd(),
|
||||
moduleUrl: import.meta.url,
|
||||
});
|
||||
const systemPrompt = buildSystemPrompt({
|
||||
workspaceDir,
|
||||
config: params.config,
|
||||
defaultThinkLevel: params.thinkLevel,
|
||||
extraSystemPrompt,
|
||||
ownerNumbers: params.ownerNumbers,
|
||||
heartbeatPrompt,
|
||||
docsPath: docsPath ?? undefined,
|
||||
tools: [],
|
||||
contextFiles,
|
||||
modelDisplay,
|
||||
agentId: sessionAgentId,
|
||||
backendId: backendResolved.id,
|
||||
});
|
||||
const systemPromptReport = buildSystemPromptReport({
|
||||
source: "run",
|
||||
generatedAt: Date.now(),
|
||||
sessionId: params.sessionId,
|
||||
sessionKey: params.sessionKey,
|
||||
provider: params.provider,
|
||||
model: modelId,
|
||||
workspaceDir,
|
||||
bootstrapMaxChars,
|
||||
bootstrapTotalMaxChars,
|
||||
bootstrapTruncation: buildBootstrapTruncationReportMeta({
|
||||
analysis: bootstrapAnalysis,
|
||||
warningMode: bootstrapPromptWarningMode,
|
||||
warning: bootstrapPromptWarning,
|
||||
}),
|
||||
sandbox: { mode: "off", sandboxed: false },
|
||||
systemPrompt,
|
||||
bootstrapFiles,
|
||||
injectedFiles: contextFiles,
|
||||
skillsPrompt: "",
|
||||
tools: [],
|
||||
});
|
||||
|
||||
return {
|
||||
params,
|
||||
started,
|
||||
workspaceDir,
|
||||
backendResolved,
|
||||
preparedBackend,
|
||||
reusableCliSession,
|
||||
modelId,
|
||||
normalizedModel,
|
||||
systemPrompt,
|
||||
systemPromptReport,
|
||||
bootstrapPromptWarningLines: bootstrapPromptWarning.lines,
|
||||
heartbeatPrompt,
|
||||
authEpoch,
|
||||
extraSystemPromptHash,
|
||||
};
|
||||
}
|
||||
|
|
@ -1,88 +0,0 @@
|
|||
import path from "node:path";
|
||||
import type { CliBackendConfig } from "../../config/types.js";
|
||||
import {
|
||||
CLI_FRESH_WATCHDOG_DEFAULTS,
|
||||
CLI_RESUME_WATCHDOG_DEFAULTS,
|
||||
CLI_WATCHDOG_MIN_TIMEOUT_MS,
|
||||
} from "../cli-watchdog-defaults.js";
|
||||
|
||||
function pickWatchdogProfile(
|
||||
backend: CliBackendConfig,
|
||||
useResume: boolean,
|
||||
): {
|
||||
noOutputTimeoutMs?: number;
|
||||
noOutputTimeoutRatio: number;
|
||||
minMs: number;
|
||||
maxMs: number;
|
||||
} {
|
||||
const defaults = useResume ? CLI_RESUME_WATCHDOG_DEFAULTS : CLI_FRESH_WATCHDOG_DEFAULTS;
|
||||
const configured = useResume
|
||||
? backend.reliability?.watchdog?.resume
|
||||
: backend.reliability?.watchdog?.fresh;
|
||||
|
||||
const ratio = (() => {
|
||||
const value = configured?.noOutputTimeoutRatio;
|
||||
if (typeof value !== "number" || !Number.isFinite(value)) {
|
||||
return defaults.noOutputTimeoutRatio;
|
||||
}
|
||||
return Math.max(0.05, Math.min(0.95, value));
|
||||
})();
|
||||
const minMs = (() => {
|
||||
const value = configured?.minMs;
|
||||
if (typeof value !== "number" || !Number.isFinite(value)) {
|
||||
return defaults.minMs;
|
||||
}
|
||||
return Math.max(CLI_WATCHDOG_MIN_TIMEOUT_MS, Math.floor(value));
|
||||
})();
|
||||
const maxMs = (() => {
|
||||
const value = configured?.maxMs;
|
||||
if (typeof value !== "number" || !Number.isFinite(value)) {
|
||||
return defaults.maxMs;
|
||||
}
|
||||
return Math.max(CLI_WATCHDOG_MIN_TIMEOUT_MS, Math.floor(value));
|
||||
})();
|
||||
|
||||
return {
|
||||
noOutputTimeoutMs:
|
||||
typeof configured?.noOutputTimeoutMs === "number" &&
|
||||
Number.isFinite(configured.noOutputTimeoutMs)
|
||||
? Math.max(CLI_WATCHDOG_MIN_TIMEOUT_MS, Math.floor(configured.noOutputTimeoutMs))
|
||||
: undefined,
|
||||
noOutputTimeoutRatio: ratio,
|
||||
minMs: Math.min(minMs, maxMs),
|
||||
maxMs: Math.max(minMs, maxMs),
|
||||
};
|
||||
}
|
||||
|
||||
export function resolveCliNoOutputTimeoutMs(params: {
|
||||
backend: CliBackendConfig;
|
||||
timeoutMs: number;
|
||||
useResume: boolean;
|
||||
}): number {
|
||||
const profile = pickWatchdogProfile(params.backend, params.useResume);
|
||||
// Keep watchdog below global timeout in normal cases.
|
||||
const cap = Math.max(CLI_WATCHDOG_MIN_TIMEOUT_MS, params.timeoutMs - 1_000);
|
||||
if (profile.noOutputTimeoutMs !== undefined) {
|
||||
return Math.min(profile.noOutputTimeoutMs, cap);
|
||||
}
|
||||
const computed = Math.floor(params.timeoutMs * profile.noOutputTimeoutRatio);
|
||||
const bounded = Math.min(profile.maxMs, Math.max(profile.minMs, computed));
|
||||
return Math.min(bounded, cap);
|
||||
}
|
||||
|
||||
export function buildCliSupervisorScopeKey(params: {
|
||||
backend: CliBackendConfig;
|
||||
backendId: string;
|
||||
cliSessionId?: string;
|
||||
}): string | undefined {
|
||||
const commandToken = path
|
||||
.basename(params.backend.command ?? "")
|
||||
.trim()
|
||||
.toLowerCase();
|
||||
const backendToken = params.backendId.trim().toLowerCase();
|
||||
const sessionToken = params.cliSessionId?.trim();
|
||||
if (!sessionToken) {
|
||||
return undefined;
|
||||
}
|
||||
return `cli:${backendToken}:${commandToken}:${sessionToken}`;
|
||||
}
|
||||
|
|
@ -1,67 +0,0 @@
|
|||
import type { ImageContent } from "@mariozechner/pi-ai";
|
||||
import type { ReplyOperation } from "../../auto-reply/reply/reply-run-registry.js";
|
||||
import type { ThinkLevel } from "../../auto-reply/thinking.js";
|
||||
import type { OpenClawConfig } from "../../config/config.js";
|
||||
import type { CliSessionBinding } from "../../config/sessions.js";
|
||||
import type { SessionSystemPromptReport } from "../../config/sessions/types.js";
|
||||
import type { CliBackendConfig } from "../../config/types.js";
|
||||
import type { PromptImageOrderEntry } from "../../media/prompt-image-order.js";
|
||||
import type { ResolvedCliBackend } from "../cli-backends.js";
|
||||
|
||||
export type RunCliAgentParams = {
|
||||
sessionId: string;
|
||||
sessionKey?: string;
|
||||
agentId?: string;
|
||||
sessionFile: string;
|
||||
workspaceDir: string;
|
||||
config?: OpenClawConfig;
|
||||
prompt: string;
|
||||
provider: string;
|
||||
model?: string;
|
||||
thinkLevel?: ThinkLevel;
|
||||
timeoutMs: number;
|
||||
runId: string;
|
||||
extraSystemPrompt?: string;
|
||||
streamParams?: import("../command/types.js").AgentStreamParams;
|
||||
ownerNumbers?: string[];
|
||||
cliSessionId?: string;
|
||||
cliSessionBinding?: CliSessionBinding;
|
||||
authProfileId?: string;
|
||||
bootstrapPromptWarningSignaturesSeen?: string[];
|
||||
bootstrapPromptWarningSignature?: string;
|
||||
images?: ImageContent[];
|
||||
imageOrder?: PromptImageOrderEntry[];
|
||||
messageProvider?: string;
|
||||
agentAccountId?: string;
|
||||
abortSignal?: AbortSignal;
|
||||
replyOperation?: ReplyOperation;
|
||||
};
|
||||
|
||||
export type CliPreparedBackend = {
|
||||
backend: CliBackendConfig;
|
||||
cleanup?: () => Promise<void>;
|
||||
mcpConfigHash?: string;
|
||||
env?: Record<string, string>;
|
||||
};
|
||||
|
||||
export type CliReusableSession = {
|
||||
sessionId?: string;
|
||||
invalidatedReason?: "auth-profile" | "auth-epoch" | "system-prompt" | "mcp";
|
||||
};
|
||||
|
||||
export type PreparedCliRunContext = {
|
||||
params: RunCliAgentParams;
|
||||
started: number;
|
||||
workspaceDir: string;
|
||||
backendResolved: ResolvedCliBackend;
|
||||
preparedBackend: CliPreparedBackend;
|
||||
reusableCliSession: CliReusableSession;
|
||||
modelId: string;
|
||||
normalizedModel: string;
|
||||
systemPrompt: string;
|
||||
systemPromptReport: SessionSystemPromptReport;
|
||||
bootstrapPromptWarningLines: string[];
|
||||
heartbeatPrompt?: string;
|
||||
authEpoch?: string;
|
||||
extraSystemPromptHash?: string;
|
||||
};
|
||||
|
|
@ -1,165 +0,0 @@
|
|||
import { describe, expect, it } from "vitest";
|
||||
import type { SessionEntry } from "../config/sessions.js";
|
||||
import {
|
||||
clearAllCliSessions,
|
||||
clearCliSession,
|
||||
getCliSessionBinding,
|
||||
hashCliSessionText,
|
||||
resolveCliSessionReuse,
|
||||
setCliSessionBinding,
|
||||
} from "./cli-session.js";
|
||||
|
||||
describe("cli-session helpers", () => {
|
||||
it("persists binding metadata alongside provider session ids", () => {
|
||||
const entry: SessionEntry = {
|
||||
sessionId: "openclaw-session",
|
||||
updatedAt: Date.now(),
|
||||
};
|
||||
|
||||
setCliSessionBinding(entry, "codex-cli", {
|
||||
sessionId: "cli-session-1",
|
||||
authProfileId: "openai-codex:work",
|
||||
authEpoch: "auth-epoch",
|
||||
extraSystemPromptHash: "prompt-hash",
|
||||
mcpConfigHash: "mcp-hash",
|
||||
});
|
||||
|
||||
expect(entry.cliSessionIds?.["codex-cli"]).toBe("cli-session-1");
|
||||
expect(getCliSessionBinding(entry, "codex-cli")).toEqual({
|
||||
sessionId: "cli-session-1",
|
||||
authProfileId: "openai-codex:work",
|
||||
authEpoch: "auth-epoch",
|
||||
extraSystemPromptHash: "prompt-hash",
|
||||
mcpConfigHash: "mcp-hash",
|
||||
});
|
||||
});
|
||||
|
||||
it("keeps legacy bindings reusable until richer metadata is persisted", () => {
|
||||
const entry: SessionEntry = {
|
||||
sessionId: "openclaw-session",
|
||||
updatedAt: Date.now(),
|
||||
cliSessionIds: { "codex-cli": "legacy-session" },
|
||||
};
|
||||
|
||||
expect(resolveCliSessionReuse({ binding: getCliSessionBinding(entry, "codex-cli") })).toEqual({
|
||||
sessionId: "legacy-session",
|
||||
});
|
||||
});
|
||||
|
||||
it("invalidates legacy bindings when auth, prompt, or MCP state changes", () => {
|
||||
const entry: SessionEntry = {
|
||||
sessionId: "openclaw-session",
|
||||
updatedAt: Date.now(),
|
||||
cliSessionIds: { "codex-cli": "legacy-session" },
|
||||
};
|
||||
const binding = getCliSessionBinding(entry, "codex-cli");
|
||||
|
||||
expect(
|
||||
resolveCliSessionReuse({
|
||||
binding,
|
||||
authProfileId: "openai-codex:work",
|
||||
}),
|
||||
).toEqual({ invalidatedReason: "auth-profile" });
|
||||
expect(
|
||||
resolveCliSessionReuse({
|
||||
binding,
|
||||
extraSystemPromptHash: "prompt-hash",
|
||||
}),
|
||||
).toEqual({ invalidatedReason: "system-prompt" });
|
||||
expect(
|
||||
resolveCliSessionReuse({
|
||||
binding,
|
||||
mcpConfigHash: "mcp-hash",
|
||||
}),
|
||||
).toEqual({ invalidatedReason: "mcp" });
|
||||
});
|
||||
|
||||
it("invalidates reuse when stored auth profile or prompt shape changes", () => {
|
||||
const binding = {
|
||||
sessionId: "cli-session-1",
|
||||
authProfileId: "openai-codex:work",
|
||||
authEpoch: "auth-epoch-a",
|
||||
extraSystemPromptHash: "prompt-a",
|
||||
mcpConfigHash: "mcp-a",
|
||||
};
|
||||
|
||||
expect(
|
||||
resolveCliSessionReuse({
|
||||
binding,
|
||||
authProfileId: "openai-codex:personal",
|
||||
authEpoch: "auth-epoch-a",
|
||||
extraSystemPromptHash: "prompt-a",
|
||||
mcpConfigHash: "mcp-a",
|
||||
}),
|
||||
).toEqual({ invalidatedReason: "auth-profile" });
|
||||
expect(
|
||||
resolveCliSessionReuse({
|
||||
binding,
|
||||
authProfileId: "openai-codex:work",
|
||||
authEpoch: "auth-epoch-b",
|
||||
extraSystemPromptHash: "prompt-a",
|
||||
mcpConfigHash: "mcp-a",
|
||||
}),
|
||||
).toEqual({ invalidatedReason: "auth-epoch" });
|
||||
expect(
|
||||
resolveCliSessionReuse({
|
||||
binding,
|
||||
authProfileId: "openai-codex:work",
|
||||
authEpoch: "auth-epoch-a",
|
||||
extraSystemPromptHash: "prompt-b",
|
||||
mcpConfigHash: "mcp-a",
|
||||
}),
|
||||
).toEqual({ invalidatedReason: "system-prompt" });
|
||||
expect(
|
||||
resolveCliSessionReuse({
|
||||
binding,
|
||||
authProfileId: "openai-codex:work",
|
||||
authEpoch: "auth-epoch-a",
|
||||
extraSystemPromptHash: "prompt-a",
|
||||
mcpConfigHash: "mcp-b",
|
||||
}),
|
||||
).toEqual({ invalidatedReason: "mcp" });
|
||||
});
|
||||
|
||||
it("does not treat model changes as a session mismatch", () => {
|
||||
const binding = {
|
||||
sessionId: "cli-session-1",
|
||||
authProfileId: "anthropic:work",
|
||||
authEpoch: "auth-epoch-a",
|
||||
extraSystemPromptHash: "prompt-a",
|
||||
mcpConfigHash: "mcp-a",
|
||||
};
|
||||
|
||||
expect(
|
||||
resolveCliSessionReuse({
|
||||
binding,
|
||||
authProfileId: "anthropic:work",
|
||||
authEpoch: "auth-epoch-a",
|
||||
extraSystemPromptHash: "prompt-a",
|
||||
mcpConfigHash: "mcp-a",
|
||||
}),
|
||||
).toEqual({ sessionId: "cli-session-1" });
|
||||
});
|
||||
|
||||
it("clears provider-scoped and global CLI session state", () => {
|
||||
const entry: SessionEntry = {
|
||||
sessionId: "openclaw-session",
|
||||
updatedAt: Date.now(),
|
||||
};
|
||||
setCliSessionBinding(entry, "codex-cli", { sessionId: "codex-session" });
|
||||
setCliSessionBinding(entry, "codex-cli", { sessionId: "codex-session" });
|
||||
|
||||
clearCliSession(entry, "codex-cli");
|
||||
expect(getCliSessionBinding(entry, "codex-cli")).toBeUndefined();
|
||||
expect(entry.cliSessionIds?.["codex-cli"]).toBeUndefined();
|
||||
|
||||
clearAllCliSessions(entry);
|
||||
expect(entry.cliSessionBindings).toBeUndefined();
|
||||
expect(entry.cliSessionIds).toBeUndefined();
|
||||
});
|
||||
|
||||
it("hashes trimmed extra system prompts consistently", () => {
|
||||
expect(hashCliSessionText(" keep this ")).toBe(hashCliSessionText("keep this"));
|
||||
expect(hashCliSessionText("")).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
|
@ -1,139 +0,0 @@
|
|||
import crypto from "node:crypto";
|
||||
import type { CliSessionBinding, SessionEntry } from "../config/sessions.js";
|
||||
import { normalizeProviderId } from "./model-selection.js";
|
||||
|
||||
function trimOptional(value: string | undefined): string | undefined {
|
||||
const trimmed = value?.trim();
|
||||
return trimmed ? trimmed : undefined;
|
||||
}
|
||||
|
||||
export function hashCliSessionText(value: string | undefined): string | undefined {
|
||||
const trimmed = trimOptional(value);
|
||||
if (!trimmed) {
|
||||
return undefined;
|
||||
}
|
||||
return crypto.createHash("sha256").update(trimmed).digest("hex");
|
||||
}
|
||||
|
||||
export function getCliSessionBinding(
|
||||
entry: SessionEntry | undefined,
|
||||
provider: string,
|
||||
): CliSessionBinding | undefined {
|
||||
if (!entry) {
|
||||
return undefined;
|
||||
}
|
||||
const normalized = normalizeProviderId(provider);
|
||||
const fromBindings = entry.cliSessionBindings?.[normalized];
|
||||
const bindingSessionId = trimOptional(fromBindings?.sessionId);
|
||||
if (bindingSessionId) {
|
||||
return {
|
||||
sessionId: bindingSessionId,
|
||||
authProfileId: trimOptional(fromBindings?.authProfileId),
|
||||
authEpoch: trimOptional(fromBindings?.authEpoch),
|
||||
extraSystemPromptHash: trimOptional(fromBindings?.extraSystemPromptHash),
|
||||
mcpConfigHash: trimOptional(fromBindings?.mcpConfigHash),
|
||||
};
|
||||
}
|
||||
const fromMap = entry.cliSessionIds?.[normalized];
|
||||
if (fromMap?.trim()) {
|
||||
return { sessionId: fromMap.trim() };
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
export function getCliSessionId(
|
||||
entry: SessionEntry | undefined,
|
||||
provider: string,
|
||||
): string | undefined {
|
||||
return getCliSessionBinding(entry, provider)?.sessionId;
|
||||
}
|
||||
|
||||
export function setCliSessionId(entry: SessionEntry, provider: string, sessionId: string): void {
|
||||
setCliSessionBinding(entry, provider, { sessionId });
|
||||
}
|
||||
|
||||
export function setCliSessionBinding(
|
||||
entry: SessionEntry,
|
||||
provider: string,
|
||||
binding: CliSessionBinding,
|
||||
): void {
|
||||
const normalized = normalizeProviderId(provider);
|
||||
const trimmed = binding.sessionId.trim();
|
||||
if (!trimmed) {
|
||||
return;
|
||||
}
|
||||
entry.cliSessionBindings = {
|
||||
...entry.cliSessionBindings,
|
||||
[normalized]: {
|
||||
sessionId: trimmed,
|
||||
...(trimOptional(binding.authProfileId)
|
||||
? { authProfileId: trimOptional(binding.authProfileId) }
|
||||
: {}),
|
||||
...(trimOptional(binding.authEpoch) ? { authEpoch: trimOptional(binding.authEpoch) } : {}),
|
||||
...(trimOptional(binding.extraSystemPromptHash)
|
||||
? { extraSystemPromptHash: trimOptional(binding.extraSystemPromptHash) }
|
||||
: {}),
|
||||
...(trimOptional(binding.mcpConfigHash)
|
||||
? { mcpConfigHash: trimOptional(binding.mcpConfigHash) }
|
||||
: {}),
|
||||
},
|
||||
};
|
||||
entry.cliSessionIds = { ...entry.cliSessionIds, [normalized]: trimmed };
|
||||
}
|
||||
|
||||
export function clearCliSession(entry: SessionEntry, provider: string): void {
|
||||
const normalized = normalizeProviderId(provider);
|
||||
if (entry.cliSessionBindings?.[normalized] !== undefined) {
|
||||
const next = { ...entry.cliSessionBindings };
|
||||
delete next[normalized];
|
||||
entry.cliSessionBindings = Object.keys(next).length > 0 ? next : undefined;
|
||||
}
|
||||
if (entry.cliSessionIds?.[normalized] !== undefined) {
|
||||
const next = { ...entry.cliSessionIds };
|
||||
delete next[normalized];
|
||||
entry.cliSessionIds = Object.keys(next).length > 0 ? next : undefined;
|
||||
}
|
||||
}
|
||||
|
||||
export function clearAllCliSessions(entry: SessionEntry): void {
|
||||
delete entry.cliSessionBindings;
|
||||
delete entry.cliSessionIds;
|
||||
}
|
||||
|
||||
export function resolveCliSessionReuse(params: {
|
||||
binding?: CliSessionBinding;
|
||||
authProfileId?: string;
|
||||
authEpoch?: string;
|
||||
extraSystemPromptHash?: string;
|
||||
mcpConfigHash?: string;
|
||||
}): {
|
||||
sessionId?: string;
|
||||
invalidatedReason?: "auth-profile" | "auth-epoch" | "system-prompt" | "mcp";
|
||||
} {
|
||||
const binding = params.binding;
|
||||
const sessionId = trimOptional(binding?.sessionId);
|
||||
if (!sessionId) {
|
||||
return {};
|
||||
}
|
||||
const currentAuthProfileId = trimOptional(params.authProfileId);
|
||||
const currentAuthEpoch = trimOptional(params.authEpoch);
|
||||
const currentExtraSystemPromptHash = trimOptional(params.extraSystemPromptHash);
|
||||
const currentMcpConfigHash = trimOptional(params.mcpConfigHash);
|
||||
const storedAuthProfileId = trimOptional(binding?.authProfileId);
|
||||
if (storedAuthProfileId !== currentAuthProfileId) {
|
||||
return { invalidatedReason: "auth-profile" };
|
||||
}
|
||||
const storedAuthEpoch = trimOptional(binding?.authEpoch);
|
||||
if (storedAuthEpoch !== currentAuthEpoch) {
|
||||
return { invalidatedReason: "auth-epoch" };
|
||||
}
|
||||
const storedExtraSystemPromptHash = trimOptional(binding?.extraSystemPromptHash);
|
||||
if (storedExtraSystemPromptHash !== currentExtraSystemPromptHash) {
|
||||
return { invalidatedReason: "system-prompt" };
|
||||
}
|
||||
const storedMcpConfigHash = trimOptional(binding?.mcpConfigHash);
|
||||
if (storedMcpConfigHash !== currentMcpConfigHash) {
|
||||
return { invalidatedReason: "mcp" };
|
||||
}
|
||||
return { sessionId };
|
||||
}
|
||||
|
|
@ -12,25 +12,17 @@ import { loadConfig } from "../../config/config.js";
|
|||
import { mergeSessionEntry, type SessionEntry, updateSessionStore } from "../../config/sessions.js";
|
||||
import { resolveSessionTranscriptFile } from "../../config/sessions/transcript.js";
|
||||
import { emitAgentEvent } from "../../infra/agent-events.js";
|
||||
import { createSubsystemLogger } from "../../logging/subsystem.js";
|
||||
import { emitSessionTranscriptUpdate } from "../../sessions/transcript-events.js";
|
||||
import { sanitizeForLog } from "../../terminal/ansi.js";
|
||||
import { resolveMessageChannel } from "../../utils/message-channel.js";
|
||||
import { resolveBootstrapWarningSignaturesSeen } from "../bootstrap-budget.js";
|
||||
import { runCliAgent } from "../cli-runner.js";
|
||||
import { clearCliSession, getCliSessionBinding, setCliSessionBinding } from "../cli-session.js";
|
||||
import { FailoverError } from "../failover-error.js";
|
||||
import { formatAgentInternalEventsForPrompt } from "../internal-events.js";
|
||||
import { hasInternalRuntimeContext } from "../internal-runtime-context.js";
|
||||
import { isCliProvider } from "../model-selection.js";
|
||||
import { prepareSessionManagerForRun } from "../pi-embedded-runner/session-manager-init.js";
|
||||
import { runEmbeddedPiAgent } from "../pi-embedded.js";
|
||||
import { buildWorkspaceSkillSnapshot } from "../skills.js";
|
||||
import { resolveAgentRunContext } from "./run-context.js";
|
||||
import type { AgentCommandOpts } from "./types.js";
|
||||
|
||||
const log = createSubsystemLogger("agents/agent-command");
|
||||
|
||||
/** Maximum number of JSONL records to inspect before giving up. */
|
||||
const SESSION_FILE_MAX_RECORDS = 500;
|
||||
|
||||
|
|
@ -345,97 +337,6 @@ export function runAgentAttempt(params: {
|
|||
params.providerOverride === params.authProfileProvider
|
||||
? params.sessionEntry?.authProfileOverride
|
||||
: undefined;
|
||||
if (isCliProvider(params.providerOverride, params.cfg)) {
|
||||
const cliSessionBinding = getCliSessionBinding(params.sessionEntry, params.providerOverride);
|
||||
const runCliWithSession = (nextCliSessionId: string | undefined) =>
|
||||
runCliAgent({
|
||||
sessionId: params.sessionId,
|
||||
sessionKey: params.sessionKey,
|
||||
agentId: params.sessionAgentId,
|
||||
sessionFile: params.sessionFile,
|
||||
workspaceDir: params.workspaceDir,
|
||||
config: params.cfg,
|
||||
prompt: effectivePrompt,
|
||||
provider: params.providerOverride,
|
||||
model: params.modelOverride,
|
||||
thinkLevel: params.resolvedThinkLevel,
|
||||
timeoutMs: params.timeoutMs,
|
||||
runId: params.runId,
|
||||
extraSystemPrompt: params.opts.extraSystemPrompt,
|
||||
cliSessionId: nextCliSessionId,
|
||||
cliSessionBinding:
|
||||
nextCliSessionId === cliSessionBinding?.sessionId ? cliSessionBinding : undefined,
|
||||
authProfileId,
|
||||
bootstrapPromptWarningSignaturesSeen,
|
||||
bootstrapPromptWarningSignature,
|
||||
images: params.isFallbackRetry ? undefined : params.opts.images,
|
||||
imageOrder: params.isFallbackRetry ? undefined : params.opts.imageOrder,
|
||||
streamParams: params.opts.streamParams,
|
||||
messageProvider: params.messageChannel,
|
||||
agentAccountId: params.runContext.accountId,
|
||||
});
|
||||
return runCliWithSession(cliSessionBinding?.sessionId).catch(async (err) => {
|
||||
if (
|
||||
err instanceof FailoverError &&
|
||||
err.reason === "session_expired" &&
|
||||
cliSessionBinding?.sessionId &&
|
||||
params.sessionKey &&
|
||||
params.sessionStore &&
|
||||
params.storePath
|
||||
) {
|
||||
log.warn(
|
||||
`CLI session expired, clearing from session store: provider=${sanitizeForLog(params.providerOverride)} sessionKey=${params.sessionKey}`,
|
||||
);
|
||||
|
||||
const entry = params.sessionStore[params.sessionKey];
|
||||
if (entry) {
|
||||
const updatedEntry = { ...entry };
|
||||
clearCliSession(updatedEntry, params.providerOverride);
|
||||
updatedEntry.updatedAt = Date.now();
|
||||
|
||||
await persistSessionEntry({
|
||||
sessionStore: params.sessionStore,
|
||||
sessionKey: params.sessionKey,
|
||||
storePath: params.storePath,
|
||||
entry: updatedEntry,
|
||||
clearedFields: ["cliSessionBindings", "cliSessionIds", "claudeCliSessionId"],
|
||||
});
|
||||
|
||||
params.sessionEntry = updatedEntry;
|
||||
}
|
||||
|
||||
return runCliWithSession(undefined).then(async (result) => {
|
||||
if (
|
||||
result.meta.agentMeta?.cliSessionBinding?.sessionId &&
|
||||
params.sessionKey &&
|
||||
params.sessionStore &&
|
||||
params.storePath
|
||||
) {
|
||||
const entry = params.sessionStore[params.sessionKey];
|
||||
if (entry) {
|
||||
const updatedEntry = { ...entry };
|
||||
setCliSessionBinding(
|
||||
updatedEntry,
|
||||
params.providerOverride,
|
||||
result.meta.agentMeta.cliSessionBinding,
|
||||
);
|
||||
updatedEntry.updatedAt = Date.now();
|
||||
|
||||
await persistSessionEntry({
|
||||
sessionStore: params.sessionStore,
|
||||
sessionKey: params.sessionKey,
|
||||
storePath: params.storePath,
|
||||
entry: updatedEntry,
|
||||
});
|
||||
}
|
||||
}
|
||||
return result;
|
||||
});
|
||||
}
|
||||
throw err;
|
||||
});
|
||||
}
|
||||
|
||||
return runEmbeddedPiAgent({
|
||||
sessionId: params.sessionId,
|
||||
sessionKey: params.sessionKey,
|
||||
|
|
|
|||
|
|
@ -6,10 +6,8 @@ import {
|
|||
updateSessionStore,
|
||||
} from "../../config/sessions.js";
|
||||
import { estimateUsageCost, resolveModelCostConfig } from "../../utils/usage-format.js";
|
||||
import { setCliSessionBinding, setCliSessionId } from "../cli-session.js";
|
||||
import { resolveContextTokensForModel } from "../context.js";
|
||||
import { DEFAULT_CONTEXT_TOKENS } from "../defaults.js";
|
||||
import { isCliProvider } from "../model-selection.js";
|
||||
import { deriveSessionTotalTokens, hasNonzeroUsage } from "../usage.js";
|
||||
|
||||
type RunResult = Awaited<ReturnType<(typeof import("../pi-embedded.js"))["runEmbeddedPiAgent"]>>;
|
||||
|
|
@ -73,17 +71,6 @@ export async function updateSessionStoreAfterAgentRun(params: {
|
|||
provider: providerUsed,
|
||||
model: modelUsed,
|
||||
});
|
||||
if (isCliProvider(providerUsed, cfg)) {
|
||||
const cliSessionBinding = result.meta.agentMeta?.cliSessionBinding;
|
||||
if (cliSessionBinding?.sessionId?.trim()) {
|
||||
setCliSessionBinding(next, providerUsed, cliSessionBinding);
|
||||
} else {
|
||||
const cliSessionId = result.meta.agentMeta?.sessionId?.trim();
|
||||
if (cliSessionId) {
|
||||
setCliSessionId(next, providerUsed, cliSessionId);
|
||||
}
|
||||
}
|
||||
}
|
||||
next.abortedLastRun = result.meta.aborted ?? false;
|
||||
if (result.meta.systemPromptReport) {
|
||||
next.systemPromptReport = result.meta.systemPromptReport;
|
||||
|
|
|
|||
|
|
@ -424,8 +424,8 @@ export function resolveContextTokensForModel(params: {
|
|||
|
||||
// When provider is explicitly given and the model ID is bare (no slash),
|
||||
// try the provider-qualified cache key BEFORE the bare key. Discovery
|
||||
// entries are stored under qualified IDs (e.g. "google-gemini-cli/
|
||||
// gemini-3.1-pro-preview → 1M"), while the bare key may hold a cross-
|
||||
// entries are stored under qualified IDs (e.g. "google/
|
||||
// gemini-3.1-pro-preview" → 1M), while the bare key may hold a cross-
|
||||
// provider minimum (128k). Returning the qualified entry gives the correct
|
||||
// provider-specific window for /status and session context-token persistence.
|
||||
//
|
||||
|
|
@ -454,7 +454,7 @@ export function resolveContextTokensForModel(params: {
|
|||
}
|
||||
|
||||
// When provider is implicit, try qualified as a last resort so inferred
|
||||
// provider/model pairs (e.g. model="google-gemini-cli/gemini-3.1-pro")
|
||||
// provider/model pairs (e.g. model="google/gemini-3.1-pro")
|
||||
// still find discovery entries stored under that qualified ID.
|
||||
if (!params.provider && ref && !ref.model.includes("/")) {
|
||||
const qualifiedResult = lookupContextTokens(
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@ import {
|
|||
toAgentModelListLike,
|
||||
} from "../config/model-input.js";
|
||||
import { createSubsystemLogger } from "../logging/subsystem.js";
|
||||
import { resolveRuntimeCliBackends } from "../plugins/cli-backends.runtime.js";
|
||||
import { sanitizeForLog, stripAnsi } from "../terminal/ansi.js";
|
||||
import {
|
||||
resolveAgentConfig,
|
||||
|
|
@ -87,14 +86,8 @@ export {
|
|||
normalizeProviderIdForAuth,
|
||||
};
|
||||
|
||||
export function isCliProvider(provider: string, cfg?: OpenClawConfig): boolean {
|
||||
const normalized = normalizeProviderId(provider);
|
||||
const cliBackends = resolveRuntimeCliBackends();
|
||||
if (cliBackends.some((backend) => normalizeProviderId(backend.id) === normalized)) {
|
||||
return true;
|
||||
}
|
||||
const backends = cfg?.agents?.defaults?.cliBackends ?? {};
|
||||
return Object.keys(backends).some((key) => normalizeProviderId(key) === normalized);
|
||||
export function isCliProvider(_provider: string, _cfg?: OpenClawConfig): boolean {
|
||||
return false;
|
||||
}
|
||||
|
||||
function normalizeProviderModelId(provider: string, model: string): string {
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import { sanitizeGoogleTurnOrdering } from "./bootstrap.js";
|
||||
|
||||
export function isGoogleModelApi(api?: string | null): boolean {
|
||||
return api === "google-gemini-cli" || api === "google-generative-ai";
|
||||
return api === "google-generative-ai";
|
||||
}
|
||||
|
||||
export { sanitizeGoogleTurnOrdering };
|
||||
|
|
|
|||
|
|
@ -391,7 +391,7 @@ export function sanitizeToolsForGoogle<
|
|||
// AND Claude models. This field does not support JSON Schema keywords such as
|
||||
// patternProperties, additionalProperties, $ref, etc. We must clean schemas
|
||||
// for every provider that routes through this path.
|
||||
if (provider !== "google-gemini-cli") {
|
||||
if (provider !== "google") {
|
||||
return params.tools;
|
||||
}
|
||||
return params.tools.map((tool) => {
|
||||
|
|
@ -417,7 +417,7 @@ export function logToolSchemasForGoogle(params: {
|
|||
modelApi?: string | null;
|
||||
model?: ProviderRuntimeModel;
|
||||
}) {
|
||||
if (params.provider.trim() !== "google-gemini-cli") {
|
||||
if (params.provider.trim() !== "google") {
|
||||
return;
|
||||
}
|
||||
const toolNames = params.tools.map((tool, index) => `${index}:${tool.name}`);
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ export function buildForwardCompatTemplate(params: {
|
|||
id: string;
|
||||
name: string;
|
||||
provider: string;
|
||||
api: "anthropic-messages" | "google-gemini-cli" | "openai-completions" | "openai-responses";
|
||||
api: "anthropic-messages" | "google-generative-ai" | "openai-completions" | "openai-responses";
|
||||
baseUrl: string;
|
||||
reasoning?: boolean;
|
||||
input?: readonly ["text"] | readonly ["text", "image"];
|
||||
|
|
|
|||
|
|
@ -363,14 +363,14 @@ function buildDynamicModel(
|
|||
modelId,
|
||||
{
|
||||
provider: "google-antigravity",
|
||||
api: "google-gemini-cli",
|
||||
api: "google-generative-ai",
|
||||
baseUrl: GOOGLE_GEMINI_CLI_BASE_URL,
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
},
|
||||
{
|
||||
provider: "google-antigravity",
|
||||
api: "google-gemini-cli",
|
||||
api: "google-generative-ai",
|
||||
baseUrl: GOOGLE_GEMINI_CLI_BASE_URL,
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
|
|
|
|||
|
|
@ -83,8 +83,8 @@ export function buildOpenAICodexForwardCompatExpectation(
|
|||
export const GOOGLE_GEMINI_CLI_PRO_TEMPLATE_MODEL = {
|
||||
id: "gemini-3-pro-preview",
|
||||
name: "Gemini 3 Pro Preview (Cloud Code Assist)",
|
||||
provider: "google-gemini-cli",
|
||||
api: "google-gemini-cli",
|
||||
provider: "google",
|
||||
api: "google-generative-ai",
|
||||
baseUrl: "https://cloudcode-pa.googleapis.com",
|
||||
reasoning: true,
|
||||
input: ["text", "image"] as const,
|
||||
|
|
@ -96,8 +96,8 @@ export const GOOGLE_GEMINI_CLI_PRO_TEMPLATE_MODEL = {
|
|||
export const GOOGLE_GEMINI_CLI_FLASH_TEMPLATE_MODEL = {
|
||||
id: "gemini-3-flash-preview",
|
||||
name: "Gemini 3 Flash Preview (Cloud Code Assist)",
|
||||
provider: "google-gemini-cli",
|
||||
api: "google-gemini-cli",
|
||||
provider: "google",
|
||||
api: "google-generative-ai",
|
||||
baseUrl: "https://cloudcode-pa.googleapis.com",
|
||||
reasoning: false,
|
||||
input: ["text", "image"] as const,
|
||||
|
|
@ -109,7 +109,7 @@ export const GOOGLE_GEMINI_CLI_FLASH_TEMPLATE_MODEL = {
|
|||
export function mockGoogleGeminiCliProTemplateModel(discoverModelsMock: DiscoverModelsMock): void {
|
||||
mockTemplateModel(
|
||||
discoverModelsMock,
|
||||
"google-gemini-cli",
|
||||
"google",
|
||||
"gemini-3-pro-preview",
|
||||
GOOGLE_GEMINI_CLI_PRO_TEMPLATE_MODEL,
|
||||
);
|
||||
|
|
@ -120,7 +120,7 @@ export function mockGoogleGeminiCliFlashTemplateModel(
|
|||
): void {
|
||||
mockTemplateModel(
|
||||
discoverModelsMock,
|
||||
"google-gemini-cli",
|
||||
"google",
|
||||
"gemini-3-flash-preview",
|
||||
GOOGLE_GEMINI_CLI_FLASH_TEMPLATE_MODEL,
|
||||
);
|
||||
|
|
|
|||
|
|
@ -1,11 +1,10 @@
|
|||
import type { CliSessionBinding, SessionSystemPromptReport } from "../../config/sessions/types.js";
|
||||
import type { SessionSystemPromptReport } from "../../config/sessions/types.js";
|
||||
import type { MessagingToolSend } from "../pi-embedded-messaging.js";
|
||||
|
||||
export type EmbeddedPiAgentMeta = {
|
||||
sessionId: string;
|
||||
provider: string;
|
||||
model: string;
|
||||
cliSessionBinding?: CliSessionBinding;
|
||||
compactionCount?: number;
|
||||
promptTokens?: number;
|
||||
usage?: {
|
||||
|
|
|
|||
|
|
@ -5,11 +5,8 @@ import {
|
|||
resolveSendableOutboundReplyParts,
|
||||
} from "openclaw/plugin-sdk/reply-payload";
|
||||
import { resolveBootstrapWarningSignaturesSeen } from "../../agents/bootstrap-budget.js";
|
||||
import { runCliAgent } from "../../agents/cli-runner.js";
|
||||
import { getCliSessionBinding } from "../../agents/cli-session.js";
|
||||
import { LiveSessionModelSwitchError } from "../../agents/live-model-switch-error.js";
|
||||
import { runWithModelFallback, isFallbackSummaryError } from "../../agents/model-fallback.js";
|
||||
import { isCliProvider } from "../../agents/model-selection.js";
|
||||
import {
|
||||
BILLING_ERROR_USER_MESSAGE,
|
||||
isCompactionFailureError,
|
||||
|
|
@ -697,126 +694,6 @@ export async function runAgentTurnWithFallback(params: {
|
|||
);
|
||||
}
|
||||
|
||||
if (isCliProvider(provider, params.followupRun.run.config)) {
|
||||
const startedAt = Date.now();
|
||||
notifyAgentRunStart();
|
||||
emitAgentEvent({
|
||||
runId,
|
||||
stream: "lifecycle",
|
||||
data: {
|
||||
phase: "start",
|
||||
startedAt,
|
||||
},
|
||||
});
|
||||
const cliSessionBinding = getCliSessionBinding(
|
||||
params.getActiveSessionEntry(),
|
||||
provider,
|
||||
);
|
||||
const authProfileId =
|
||||
provider === params.followupRun.run.provider
|
||||
? params.followupRun.run.authProfileId
|
||||
: undefined;
|
||||
return (async () => {
|
||||
let lifecycleTerminalEmitted = false;
|
||||
try {
|
||||
const result = await runCliAgent({
|
||||
sessionId: params.followupRun.run.sessionId,
|
||||
sessionKey: params.sessionKey,
|
||||
agentId: params.followupRun.run.agentId,
|
||||
sessionFile: params.followupRun.run.sessionFile,
|
||||
workspaceDir: params.followupRun.run.workspaceDir,
|
||||
config: params.followupRun.run.config,
|
||||
prompt: params.commandBody,
|
||||
provider,
|
||||
model,
|
||||
thinkLevel: params.followupRun.run.thinkLevel,
|
||||
timeoutMs: params.followupRun.run.timeoutMs,
|
||||
runId,
|
||||
extraSystemPrompt: params.followupRun.run.extraSystemPrompt,
|
||||
ownerNumbers: params.followupRun.run.ownerNumbers,
|
||||
cliSessionId: cliSessionBinding?.sessionId,
|
||||
cliSessionBinding,
|
||||
authProfileId,
|
||||
bootstrapPromptWarningSignaturesSeen,
|
||||
bootstrapPromptWarningSignature:
|
||||
bootstrapPromptWarningSignaturesSeen[
|
||||
bootstrapPromptWarningSignaturesSeen.length - 1
|
||||
],
|
||||
images: params.opts?.images,
|
||||
imageOrder: params.opts?.imageOrder,
|
||||
messageProvider: params.followupRun.run.messageProvider,
|
||||
agentAccountId: params.followupRun.run.agentAccountId,
|
||||
abortSignal: params.replyOperation?.abortSignal ?? params.opts?.abortSignal,
|
||||
replyOperation: params.replyOperation,
|
||||
});
|
||||
bootstrapPromptWarningSignaturesSeen = resolveBootstrapWarningSignaturesSeen(
|
||||
result.meta?.systemPromptReport,
|
||||
);
|
||||
|
||||
// CLI backends don't emit streaming assistant events, so we need to
|
||||
// emit one with the final text so server-chat can populate its buffer
|
||||
// and send the response to TUI/WebSocket clients.
|
||||
const cliText = result.payloads?.[0]?.text?.trim();
|
||||
if (cliText) {
|
||||
emitAgentEvent({
|
||||
runId,
|
||||
stream: "assistant",
|
||||
data: { text: cliText },
|
||||
});
|
||||
}
|
||||
|
||||
emitAgentEvent({
|
||||
runId,
|
||||
stream: "lifecycle",
|
||||
data: {
|
||||
phase: "end",
|
||||
startedAt,
|
||||
endedAt: Date.now(),
|
||||
},
|
||||
});
|
||||
lifecycleTerminalEmitted = true;
|
||||
|
||||
return result;
|
||||
} catch (err) {
|
||||
if (rollbackFallbackCandidateSelection) {
|
||||
try {
|
||||
await rollbackFallbackCandidateSelection();
|
||||
} catch (rollbackError) {
|
||||
logVerbose(
|
||||
`failed to roll back fallback candidate selection (non-fatal): ${String(rollbackError)}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
emitAgentEvent({
|
||||
runId,
|
||||
stream: "lifecycle",
|
||||
data: {
|
||||
phase: "error",
|
||||
startedAt,
|
||||
endedAt: Date.now(),
|
||||
error: String(err),
|
||||
},
|
||||
});
|
||||
lifecycleTerminalEmitted = true;
|
||||
throw err;
|
||||
} finally {
|
||||
// Defensive backstop: never let a CLI run complete without a terminal
|
||||
// lifecycle event, otherwise downstream consumers can hang.
|
||||
if (!lifecycleTerminalEmitted) {
|
||||
emitAgentEvent({
|
||||
runId,
|
||||
stream: "lifecycle",
|
||||
data: {
|
||||
phase: "error",
|
||||
startedAt,
|
||||
endedAt: Date.now(),
|
||||
error: "CLI run completed without lifecycle terminal event",
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
})();
|
||||
}
|
||||
const { embeddedContext, senderContext, runBaseParams } = buildEmbeddedRunExecutionParams(
|
||||
{
|
||||
run: params.followupRun.run,
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ import fs from "node:fs";
|
|||
import { lookupContextTokens } from "../../agents/context.js";
|
||||
import { DEFAULT_CONTEXT_TOKENS } from "../../agents/defaults.js";
|
||||
import { resolveModelAuthMode } from "../../agents/model-auth.js";
|
||||
import { isCliProvider } from "../../agents/model-selection.js";
|
||||
import { queueEmbeddedPiMessage } from "../../agents/pi-embedded.js";
|
||||
import { hasNonzeroUsage } from "../../agents/usage.js";
|
||||
import {
|
||||
|
|
@ -558,12 +557,6 @@ export async function runReplyAgent(params: {
|
|||
});
|
||||
}
|
||||
}
|
||||
const cliSessionId = isCliProvider(providerUsed, cfg)
|
||||
? runResult.meta?.agentMeta?.sessionId?.trim()
|
||||
: undefined;
|
||||
const cliSessionBinding = isCliProvider(providerUsed, cfg)
|
||||
? runResult.meta?.agentMeta?.cliSessionBinding
|
||||
: undefined;
|
||||
const contextTokensUsed =
|
||||
agentCfgContextTokens ??
|
||||
lookupContextTokens(modelUsed) ??
|
||||
|
|
@ -581,9 +574,7 @@ export async function runReplyAgent(params: {
|
|||
providerUsed,
|
||||
contextTokensUsed,
|
||||
systemPromptReport: runResult.meta?.systemPromptReport,
|
||||
cliSessionId,
|
||||
cliSessionBinding,
|
||||
usageIsContextSnapshot: isCliProvider(providerUsed, cfg),
|
||||
usageIsContextSnapshot: false,
|
||||
});
|
||||
|
||||
// Drain any late tool/block deliveries before deciding there's "nothing to send".
|
||||
|
|
|
|||
|
|
@ -42,12 +42,7 @@ import { resolveSubagentLabel } from "./subagents-utils.js";
|
|||
|
||||
// Some usage endpoints only work with CLI/session OAuth tokens, not API keys.
|
||||
// Skip those probes when the active auth mode cannot satisfy the endpoint.
|
||||
const USAGE_OAUTH_ONLY_PROVIDERS = new Set([
|
||||
"anthropic",
|
||||
"github-copilot",
|
||||
"google-gemini-cli",
|
||||
"openai-codex",
|
||||
]);
|
||||
const USAGE_OAUTH_ONLY_PROVIDERS = new Set(["anthropic", "github-copilot", "openai-codex"]);
|
||||
|
||||
function shouldLoadUsageSummary(params: {
|
||||
provider?: string;
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ import { resolveBootstrapWarningSignaturesSeen } from "../../agents/bootstrap-bu
|
|||
import { lookupContextTokens } from "../../agents/context.js";
|
||||
import { DEFAULT_CONTEXT_TOKENS } from "../../agents/defaults.js";
|
||||
import { runWithModelFallback } from "../../agents/model-fallback.js";
|
||||
import { isCliProvider } from "../../agents/model-selection.js";
|
||||
import { runEmbeddedPiAgent } from "../../agents/pi-embedded.js";
|
||||
import type { SessionEntry } from "../../config/sessions.js";
|
||||
import type { TypingMode } from "../../config/types.js";
|
||||
|
|
@ -305,11 +304,7 @@ export function createFollowupRunner(params: {
|
|||
providerUsed: fallbackProvider,
|
||||
contextTokensUsed,
|
||||
systemPromptReport: runResult.meta?.systemPromptReport,
|
||||
cliSessionBinding: runResult.meta?.agentMeta?.cliSessionBinding,
|
||||
usageIsContextSnapshot: isCliProvider(
|
||||
fallbackProvider ?? queued.run.provider,
|
||||
queued.run.config,
|
||||
),
|
||||
usageIsContextSnapshot: false,
|
||||
logLabel: "followup",
|
||||
});
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
import { setCliSessionBinding, setCliSessionId } from "../../agents/cli-session.js";
|
||||
import {
|
||||
deriveSessionTotalTokens,
|
||||
hasNonzeroUsage,
|
||||
|
|
@ -14,39 +13,6 @@ import {
|
|||
import { logVerbose } from "../../globals.js";
|
||||
import { estimateUsageCost, resolveModelCostConfig } from "../../utils/usage-format.js";
|
||||
|
||||
function applyCliSessionIdToSessionPatch(
|
||||
params: {
|
||||
providerUsed?: string;
|
||||
cliSessionId?: string;
|
||||
cliSessionBinding?: import("../../config/sessions.js").CliSessionBinding;
|
||||
},
|
||||
entry: SessionEntry,
|
||||
patch: Partial<SessionEntry>,
|
||||
): Partial<SessionEntry> {
|
||||
const cliProvider = params.providerUsed ?? entry.modelProvider;
|
||||
if (params.cliSessionBinding && cliProvider) {
|
||||
const nextEntry = { ...entry, ...patch };
|
||||
setCliSessionBinding(nextEntry, cliProvider, params.cliSessionBinding);
|
||||
return {
|
||||
...patch,
|
||||
cliSessionIds: nextEntry.cliSessionIds,
|
||||
cliSessionBindings: nextEntry.cliSessionBindings,
|
||||
claudeCliSessionId: nextEntry.claudeCliSessionId,
|
||||
};
|
||||
}
|
||||
if (params.cliSessionId && cliProvider) {
|
||||
const nextEntry = { ...entry, ...patch };
|
||||
setCliSessionId(nextEntry, cliProvider, params.cliSessionId);
|
||||
return {
|
||||
...patch,
|
||||
cliSessionIds: nextEntry.cliSessionIds,
|
||||
cliSessionBindings: nextEntry.cliSessionBindings,
|
||||
claudeCliSessionId: nextEntry.claudeCliSessionId,
|
||||
};
|
||||
}
|
||||
return patch;
|
||||
}
|
||||
|
||||
function resolveNonNegativeNumber(value: number | undefined): number | undefined {
|
||||
return typeof value === "number" && Number.isFinite(value) && value >= 0 ? value : undefined;
|
||||
}
|
||||
|
|
@ -86,8 +52,6 @@ export async function persistSessionUsageUpdate(params: {
|
|||
promptTokens?: number;
|
||||
usageIsContextSnapshot?: boolean;
|
||||
systemPromptReport?: SessionSystemPromptReport;
|
||||
cliSessionId?: string;
|
||||
cliSessionBinding?: import("../../config/sessions.js").CliSessionBinding;
|
||||
logLabel?: string;
|
||||
}): Promise<void> {
|
||||
const { storePath, sessionKey } = params;
|
||||
|
|
@ -158,7 +122,7 @@ export async function persistSessionUsageUpdate(params: {
|
|||
// context utilization is stale/unknown.
|
||||
patch.totalTokens = totalTokens;
|
||||
patch.totalTokensFresh = typeof totalTokens === "number";
|
||||
return applyCliSessionIdToSessionPatch(params, entry, patch);
|
||||
return patch;
|
||||
},
|
||||
});
|
||||
} catch (err) {
|
||||
|
|
@ -180,7 +144,7 @@ export async function persistSessionUsageUpdate(params: {
|
|||
systemPromptReport: params.systemPromptReport ?? entry.systemPromptReport,
|
||||
updatedAt: Date.now(),
|
||||
};
|
||||
return applyCliSessionIdToSessionPatch(params, entry, patch);
|
||||
return patch;
|
||||
},
|
||||
});
|
||||
} catch (err) {
|
||||
|
|
|
|||
|
|
@ -569,9 +569,6 @@ export async function initSessionState(params: {
|
|||
persistedAuthProfileOverrideSource ?? baseEntry?.authProfileOverrideSource,
|
||||
authProfileOverrideCompactionCount:
|
||||
persistedAuthProfileOverrideCompactionCount ?? baseEntry?.authProfileOverrideCompactionCount,
|
||||
cliSessionIds: baseEntry?.cliSessionIds,
|
||||
cliSessionBindings: baseEntry?.cliSessionBindings,
|
||||
claudeCliSessionId: baseEntry?.claudeCliSessionId,
|
||||
label: persistedLabel ?? baseEntry?.label,
|
||||
spawnedBy: persistedSpawnedBy ?? baseEntry?.spawnedBy,
|
||||
spawnedWorkspaceDir: persistedSpawnedWorkspaceDir ?? baseEntry?.spawnedWorkspaceDir,
|
||||
|
|
|
|||
|
|
@ -197,18 +197,6 @@ describe("gateway run option collisions", () => {
|
|||
);
|
||||
});
|
||||
|
||||
it.each([["--cli-backend-logs", "generic flag"]])(
|
||||
"enables CLI backend log filtering via %s (%s)",
|
||||
async (flag) => {
|
||||
delete process.env.OPENCLAW_CLI_BACKEND_LOG_OUTPUT;
|
||||
|
||||
await runGatewayCli(["gateway", "run", flag, "--allow-unconfigured"]);
|
||||
|
||||
expect(setConsoleSubsystemFilter).toHaveBeenCalledWith(["agent/cli-backend"]);
|
||||
expect(process.env.OPENCLAW_CLI_BACKEND_LOG_OUTPUT).toBe("1");
|
||||
},
|
||||
);
|
||||
|
||||
it("starts gateway when token mode has no configured token (startup bootstrap path)", async () => {
|
||||
await runGatewayCli(["gateway", "run", "--allow-unconfigured"]);
|
||||
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ import { GatewayLockError } from "../../infra/gateway-lock.js";
|
|||
import { formatPortDiagnostics, inspectPortUsage } from "../../infra/ports.js";
|
||||
import { cleanStaleGatewayProcessesSync } from "../../infra/restart-stale-pids.js";
|
||||
import { detectRespawnSupervisor } from "../../infra/supervisor-markers.js";
|
||||
import { setConsoleSubsystemFilter, setConsoleTimestampPrefix } from "../../logging/console.js";
|
||||
import { setConsoleTimestampPrefix } from "../../logging/console.js";
|
||||
import { createSubsystemLogger } from "../../logging/subsystem.js";
|
||||
import { defaultRuntime } from "../../runtime.js";
|
||||
import { formatCliCommand } from "../command-format.js";
|
||||
|
|
@ -49,8 +49,6 @@ type GatewayRunOpts = {
|
|||
allowUnconfigured?: boolean;
|
||||
force?: boolean;
|
||||
verbose?: boolean;
|
||||
cliBackendLogs?: boolean;
|
||||
claudeCliLogs?: boolean;
|
||||
wsLog?: unknown;
|
||||
compact?: boolean;
|
||||
rawStream?: boolean;
|
||||
|
|
@ -80,8 +78,6 @@ const GATEWAY_RUN_BOOLEAN_KEYS = [
|
|||
"reset",
|
||||
"force",
|
||||
"verbose",
|
||||
"cliBackendLogs",
|
||||
"claudeCliLogs",
|
||||
"compact",
|
||||
"rawStream",
|
||||
] as const;
|
||||
|
|
@ -241,10 +237,6 @@ async function runGatewayCommand(opts: GatewayRunOpts) {
|
|||
}
|
||||
|
||||
setVerbose(Boolean(opts.verbose));
|
||||
if (opts.cliBackendLogs || opts.claudeCliLogs) {
|
||||
setConsoleSubsystemFilter(["agent/cli-backend"]);
|
||||
process.env.OPENCLAW_CLI_BACKEND_LOG_OUTPUT = "1";
|
||||
}
|
||||
const wsLogRaw = (opts.compact ? "compact" : opts.wsLog) as string | undefined;
|
||||
const wsLogStyle: GatewayWsLogStyle =
|
||||
wsLogRaw === "compact" ? "compact" : wsLogRaw === "full" ? "full" : "auto";
|
||||
|
|
@ -599,11 +591,6 @@ export function addGatewayRunCommand(cmd: Command): Command {
|
|||
)
|
||||
.option("--force", "Kill any existing listener on the target port before starting", false)
|
||||
.option("--verbose", "Verbose logging to stdout/stderr", false)
|
||||
.option(
|
||||
"--cli-backend-logs",
|
||||
"Only show CLI backend logs in the console (includes stdout/stderr)",
|
||||
false,
|
||||
)
|
||||
.option("--ws-log <style>", 'WebSocket log style ("auto"|"full"|"compact")', "auto")
|
||||
.option("--compact", 'Alias for "--ws-log compact"', false)
|
||||
.option("--raw-stream", "Log raw model stream events to jsonl", false)
|
||||
|
|
|
|||
|
|
@ -111,10 +111,6 @@ describe("maybeRepairRemovedAnthropicClaudeCliState", () => {
|
|||
models: {
|
||||
"claude-cli/claude-sonnet-4-6": { alias: "Claude" },
|
||||
},
|
||||
cliBackends: {
|
||||
"claude-cli": { command: "claude" },
|
||||
"codex-cli": { command: "codex" },
|
||||
},
|
||||
},
|
||||
},
|
||||
} as OpenClawConfig,
|
||||
|
|
@ -137,8 +133,6 @@ describe("maybeRepairRemovedAnthropicClaudeCliState", () => {
|
|||
expect(next.agents?.defaults?.models?.["anthropic/claude-sonnet-4-6"]).toEqual({
|
||||
alias: "Claude",
|
||||
});
|
||||
expect(next.agents?.defaults?.cliBackends?.["claude-cli"]).toBeUndefined();
|
||||
expect(next.agents?.defaults?.cliBackends?.["codex-cli"]).toEqual({ command: "codex" });
|
||||
|
||||
const raw = JSON.parse(fs.readFileSync(authPath, "utf8")) as {
|
||||
profiles?: Record<string, unknown>;
|
||||
|
|
@ -212,9 +206,6 @@ describe("maybeRepairRemovedAnthropicClaudeCliState", () => {
|
|||
models: {
|
||||
"claude-cli/claude-sonnet-4-6": {},
|
||||
},
|
||||
cliBackends: {
|
||||
"claude-cli": { command: "claude" },
|
||||
},
|
||||
},
|
||||
},
|
||||
} as OpenClawConfig,
|
||||
|
|
@ -230,7 +221,6 @@ describe("maybeRepairRemovedAnthropicClaudeCliState", () => {
|
|||
expect(next.agents?.defaults?.model).toBe("anthropic/claude-sonnet-4-6");
|
||||
expect(next.agents?.defaults?.models?.["claude-cli/claude-sonnet-4-6"]).toBeUndefined();
|
||||
expect(next.agents?.defaults?.models?.["anthropic/claude-sonnet-4-6"]).toEqual({});
|
||||
expect(next.agents?.defaults?.cliBackends?.["claude-cli"]).toBeUndefined();
|
||||
|
||||
const raw = JSON.parse(fs.readFileSync(authPath, "utf8")) as {
|
||||
profiles?: Record<string, unknown>;
|
||||
|
|
|
|||
|
|
@ -307,15 +307,6 @@ function rewriteAnthropicClaudeCliConfig(params: {
|
|||
const rewrittenModel = rewriteModelSelection(defaults?.model);
|
||||
const rewrittenModels = rewriteModelMap(defaults?.models);
|
||||
|
||||
let nextCliBackends = defaults?.cliBackends;
|
||||
let cliBackendsChanged = false;
|
||||
if (nextCliBackends?.[CLAUDE_CLI_PROVIDER_ID]) {
|
||||
const clone = { ...nextCliBackends };
|
||||
delete clone[CLAUDE_CLI_PROVIDER_ID];
|
||||
nextCliBackends = Object.keys(clone).length > 0 ? clone : undefined;
|
||||
cliBackendsChanged = true;
|
||||
}
|
||||
|
||||
if (rewrittenProfiles.changed) {
|
||||
changes.push("removed stale Anthropic Claude CLI auth-profile config");
|
||||
}
|
||||
|
|
@ -325,10 +316,6 @@ function rewriteAnthropicClaudeCliConfig(params: {
|
|||
if (rewrittenModel.changed || rewrittenModels.changed) {
|
||||
changes.push("rewrote claude-cli model refs back to anthropic/*");
|
||||
}
|
||||
if (cliBackendsChanged) {
|
||||
changes.push("removed agents.defaults.cliBackends.claude-cli");
|
||||
}
|
||||
|
||||
if (changes.length === 0) {
|
||||
return { next: params.cfg, changes };
|
||||
}
|
||||
|
|
@ -337,7 +324,6 @@ function rewriteAnthropicClaudeCliConfig(params: {
|
|||
const nextOrder = rewrittenOrder.value;
|
||||
const nextModel = rewrittenModel.value as AgentDefaultsConfig["model"];
|
||||
const nextModels = rewrittenModels.value as AgentDefaultsConfig["models"];
|
||||
const nextCliBackendsTyped: AgentDefaultsConfig["cliBackends"] = nextCliBackends;
|
||||
|
||||
const nextAuth =
|
||||
nextProfiles || nextOrder || params.cfg.auth?.cooldowns
|
||||
|
|
@ -351,12 +337,11 @@ function rewriteAnthropicClaudeCliConfig(params: {
|
|||
: undefined;
|
||||
|
||||
const nextDefaults =
|
||||
rewrittenModel.changed || rewrittenModels.changed || cliBackendsChanged
|
||||
rewrittenModel.changed || rewrittenModels.changed
|
||||
? {
|
||||
...defaults,
|
||||
...(rewrittenModel.changed ? { model: nextModel } : {}),
|
||||
...(rewrittenModels.changed ? { models: nextModels } : {}),
|
||||
...(cliBackendsChanged ? { cliBackends: nextCliBackendsTyped } : {}),
|
||||
}
|
||||
: defaults;
|
||||
|
||||
|
|
@ -484,7 +469,7 @@ function hasStaleAnthropicClaudeCliConfig(cfg: OpenClawConfig): boolean {
|
|||
) {
|
||||
return true;
|
||||
}
|
||||
return Boolean(defaults?.cliBackends?.[CLAUDE_CLI_PROVIDER_ID]);
|
||||
return false;
|
||||
}
|
||||
|
||||
export async function maybeRepairRemovedAnthropicClaudeCliState(
|
||||
|
|
|
|||
|
|
@ -26,7 +26,6 @@ export type BuiltInAuthChoice =
|
|||
| "huggingface-api-key"
|
||||
| "apiKey"
|
||||
| "gemini-api-key"
|
||||
| "google-gemini-cli"
|
||||
| "zai-api-key"
|
||||
| "zai-coding-global"
|
||||
| "zai-coding-cn"
|
||||
|
|
|
|||
|
|
@ -75,7 +75,6 @@ export function makeRegistry(
|
|||
contracts: plugin.contracts,
|
||||
channelConfigs: plugin.channelConfigs,
|
||||
providers: plugin.providers ?? [],
|
||||
cliBackends: [],
|
||||
skills: [],
|
||||
hooks: [],
|
||||
origin: "config" as const,
|
||||
|
|
|
|||
|
|
@ -3238,256 +3238,6 @@ export const GENERATED_BASE_CONFIG_SCHEMA: BaseConfigSchemaResponse = {
|
|||
exclusiveMinimum: 0,
|
||||
maximum: 9007199254740991,
|
||||
},
|
||||
cliBackends: {
|
||||
type: "object",
|
||||
propertyNames: {
|
||||
type: "string",
|
||||
},
|
||||
additionalProperties: {
|
||||
type: "object",
|
||||
properties: {
|
||||
command: {
|
||||
type: "string",
|
||||
},
|
||||
args: {
|
||||
type: "array",
|
||||
items: {
|
||||
type: "string",
|
||||
},
|
||||
},
|
||||
output: {
|
||||
anyOf: [
|
||||
{
|
||||
type: "string",
|
||||
const: "json",
|
||||
},
|
||||
{
|
||||
type: "string",
|
||||
const: "text",
|
||||
},
|
||||
{
|
||||
type: "string",
|
||||
const: "jsonl",
|
||||
},
|
||||
],
|
||||
},
|
||||
resumeOutput: {
|
||||
anyOf: [
|
||||
{
|
||||
type: "string",
|
||||
const: "json",
|
||||
},
|
||||
{
|
||||
type: "string",
|
||||
const: "text",
|
||||
},
|
||||
{
|
||||
type: "string",
|
||||
const: "jsonl",
|
||||
},
|
||||
],
|
||||
},
|
||||
input: {
|
||||
anyOf: [
|
||||
{
|
||||
type: "string",
|
||||
const: "arg",
|
||||
},
|
||||
{
|
||||
type: "string",
|
||||
const: "stdin",
|
||||
},
|
||||
],
|
||||
},
|
||||
maxPromptArgChars: {
|
||||
type: "integer",
|
||||
exclusiveMinimum: 0,
|
||||
maximum: 9007199254740991,
|
||||
},
|
||||
env: {
|
||||
type: "object",
|
||||
propertyNames: {
|
||||
type: "string",
|
||||
},
|
||||
additionalProperties: {
|
||||
type: "string",
|
||||
},
|
||||
},
|
||||
clearEnv: {
|
||||
type: "array",
|
||||
items: {
|
||||
type: "string",
|
||||
},
|
||||
},
|
||||
modelArg: {
|
||||
type: "string",
|
||||
},
|
||||
modelAliases: {
|
||||
type: "object",
|
||||
propertyNames: {
|
||||
type: "string",
|
||||
},
|
||||
additionalProperties: {
|
||||
type: "string",
|
||||
},
|
||||
},
|
||||
sessionArg: {
|
||||
type: "string",
|
||||
},
|
||||
sessionArgs: {
|
||||
type: "array",
|
||||
items: {
|
||||
type: "string",
|
||||
},
|
||||
},
|
||||
resumeArgs: {
|
||||
type: "array",
|
||||
items: {
|
||||
type: "string",
|
||||
},
|
||||
},
|
||||
sessionMode: {
|
||||
anyOf: [
|
||||
{
|
||||
type: "string",
|
||||
const: "always",
|
||||
},
|
||||
{
|
||||
type: "string",
|
||||
const: "existing",
|
||||
},
|
||||
{
|
||||
type: "string",
|
||||
const: "none",
|
||||
},
|
||||
],
|
||||
},
|
||||
sessionIdFields: {
|
||||
type: "array",
|
||||
items: {
|
||||
type: "string",
|
||||
},
|
||||
},
|
||||
systemPromptArg: {
|
||||
type: "string",
|
||||
},
|
||||
systemPromptMode: {
|
||||
anyOf: [
|
||||
{
|
||||
type: "string",
|
||||
const: "append",
|
||||
},
|
||||
{
|
||||
type: "string",
|
||||
const: "replace",
|
||||
},
|
||||
],
|
||||
},
|
||||
systemPromptWhen: {
|
||||
anyOf: [
|
||||
{
|
||||
type: "string",
|
||||
const: "first",
|
||||
},
|
||||
{
|
||||
type: "string",
|
||||
const: "always",
|
||||
},
|
||||
{
|
||||
type: "string",
|
||||
const: "never",
|
||||
},
|
||||
],
|
||||
},
|
||||
imageArg: {
|
||||
type: "string",
|
||||
},
|
||||
imageMode: {
|
||||
anyOf: [
|
||||
{
|
||||
type: "string",
|
||||
const: "repeat",
|
||||
},
|
||||
{
|
||||
type: "string",
|
||||
const: "list",
|
||||
},
|
||||
],
|
||||
},
|
||||
serialize: {
|
||||
type: "boolean",
|
||||
},
|
||||
reliability: {
|
||||
type: "object",
|
||||
properties: {
|
||||
watchdog: {
|
||||
type: "object",
|
||||
properties: {
|
||||
fresh: {
|
||||
type: "object",
|
||||
properties: {
|
||||
noOutputTimeoutMs: {
|
||||
type: "integer",
|
||||
minimum: 1000,
|
||||
maximum: 9007199254740991,
|
||||
},
|
||||
noOutputTimeoutRatio: {
|
||||
type: "number",
|
||||
minimum: 0.05,
|
||||
maximum: 0.95,
|
||||
},
|
||||
minMs: {
|
||||
type: "integer",
|
||||
minimum: 1000,
|
||||
maximum: 9007199254740991,
|
||||
},
|
||||
maxMs: {
|
||||
type: "integer",
|
||||
minimum: 1000,
|
||||
maximum: 9007199254740991,
|
||||
},
|
||||
},
|
||||
additionalProperties: false,
|
||||
},
|
||||
resume: {
|
||||
type: "object",
|
||||
properties: {
|
||||
noOutputTimeoutMs: {
|
||||
type: "integer",
|
||||
minimum: 1000,
|
||||
maximum: 9007199254740991,
|
||||
},
|
||||
noOutputTimeoutRatio: {
|
||||
type: "number",
|
||||
minimum: 0.05,
|
||||
maximum: 0.95,
|
||||
},
|
||||
minMs: {
|
||||
type: "integer",
|
||||
minimum: 1000,
|
||||
maximum: 9007199254740991,
|
||||
},
|
||||
maxMs: {
|
||||
type: "integer",
|
||||
minimum: 1000,
|
||||
maximum: 9007199254740991,
|
||||
},
|
||||
},
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
required: ["command"],
|
||||
additionalProperties: false,
|
||||
},
|
||||
title: "CLI Backends",
|
||||
description: "Optional CLI backends for text-only fallback.",
|
||||
},
|
||||
memorySearch: {
|
||||
type: "object",
|
||||
properties: {
|
||||
|
|
@ -24973,11 +24723,6 @@ export const GENERATED_BASE_CONFIG_SCHEMA: BaseConfigSchemaResponse = {
|
|||
help: "Maximum delay in ms for custom humanDelay (default: 2500).",
|
||||
tags: ["performance"],
|
||||
},
|
||||
"agents.defaults.cliBackends": {
|
||||
label: "CLI Backends",
|
||||
help: "Optional CLI backends for text-only fallback.",
|
||||
tags: ["advanced"],
|
||||
},
|
||||
"agents.defaults.compaction": {
|
||||
label: "Compaction",
|
||||
help: "Compaction tuning for when context nears token limits, including history share, reserve headroom, and pre-compaction memory flush behavior. Use this when long-running sessions need stable continuity under tight context windows.",
|
||||
|
|
|
|||
|
|
@ -1098,7 +1098,6 @@ export const FIELD_HELP: Record<string, string> = {
|
|||
"Maximum number of PDF pages to process for the PDF tool (default: 20).",
|
||||
"agents.defaults.imageMaxDimensionPx":
|
||||
"Max image side length in pixels when sanitizing transcript/tool-result image payloads (default: 1200).",
|
||||
"agents.defaults.cliBackends": "Optional CLI backends for text-only fallback.",
|
||||
"agents.defaults.compaction":
|
||||
"Compaction tuning for when context nears token limits, including history share, reserve headroom, and pre-compaction memory flush behavior. Use this when long-running sessions need stable continuity under tight context windows.",
|
||||
"agents.defaults.compaction.mode":
|
||||
|
|
|
|||
|
|
@ -504,7 +504,6 @@ export const FIELD_LABELS: Record<string, string> = {
|
|||
"agents.defaults.humanDelay.mode": "Human Delay Mode",
|
||||
"agents.defaults.humanDelay.minMs": "Human Delay Min (ms)",
|
||||
"agents.defaults.humanDelay.maxMs": "Human Delay Max (ms)",
|
||||
"agents.defaults.cliBackends": "CLI Backends",
|
||||
"agents.defaults.compaction": "Compaction",
|
||||
"agents.defaults.compaction.mode": "Compaction Mode",
|
||||
"agents.defaults.compaction.reserveTokens": "Compaction Reserve Tokens",
|
||||
|
|
|
|||
|
|
@ -65,14 +65,6 @@ export type AcpSessionRuntimeOptions = {
|
|||
backendExtras?: Record<string, string>;
|
||||
};
|
||||
|
||||
export type CliSessionBinding = {
|
||||
sessionId: string;
|
||||
authProfileId?: string;
|
||||
authEpoch?: string;
|
||||
extraSystemPromptHash?: string;
|
||||
mcpConfigHash?: string;
|
||||
};
|
||||
|
||||
export type SessionEntry = {
|
||||
/**
|
||||
* Last delivered heartbeat payload (used to suppress duplicate heartbeat notifications).
|
||||
|
|
@ -183,9 +175,6 @@ export type SessionEntry = {
|
|||
memoryFlushAt?: number;
|
||||
memoryFlushCompactionCount?: number;
|
||||
memoryFlushContextHash?: string;
|
||||
cliSessionIds?: Record<string, string>;
|
||||
cliSessionBindings?: Record<string, CliSessionBinding>;
|
||||
claudeCliSessionId?: string;
|
||||
label?: string;
|
||||
displayName?: string;
|
||||
channel?: string;
|
||||
|
|
|
|||
|
|
@ -44,79 +44,6 @@ export type AgentContextPruningConfig = {
|
|||
};
|
||||
};
|
||||
|
||||
export type CliBackendConfig = {
|
||||
/** CLI command to execute (absolute path or on PATH). */
|
||||
command: string;
|
||||
/** Base args applied to every invocation. */
|
||||
args?: string[];
|
||||
/** Output parsing mode (default: json). */
|
||||
output?: "json" | "text" | "jsonl";
|
||||
/** Output parsing mode when resuming a CLI session. */
|
||||
resumeOutput?: "json" | "text" | "jsonl";
|
||||
/** Prompt input mode (default: arg). */
|
||||
input?: "arg" | "stdin";
|
||||
/** Max prompt length for arg mode (if exceeded, stdin is used). */
|
||||
maxPromptArgChars?: number;
|
||||
/** Extra env vars injected for this CLI. */
|
||||
env?: Record<string, string>;
|
||||
/** Env vars to remove before launching this CLI. */
|
||||
clearEnv?: string[];
|
||||
/** Flag used to pass model id (e.g. --model). */
|
||||
modelArg?: string;
|
||||
/** Model aliases mapping (config model id → CLI model id). */
|
||||
modelAliases?: Record<string, string>;
|
||||
/** Flag used to pass session id (e.g. --session-id). */
|
||||
sessionArg?: string;
|
||||
/** Extra args used when resuming a session (use {sessionId} placeholder). */
|
||||
sessionArgs?: string[];
|
||||
/** Alternate args to use when resuming a session (use {sessionId} placeholder). */
|
||||
resumeArgs?: string[];
|
||||
/** When to pass session ids. */
|
||||
sessionMode?: "always" | "existing" | "none";
|
||||
/** JSON fields to read session id from (in order). */
|
||||
sessionIdFields?: string[];
|
||||
/** Flag used to pass system prompt. */
|
||||
systemPromptArg?: string;
|
||||
/** System prompt behavior (append vs replace). */
|
||||
systemPromptMode?: "append" | "replace";
|
||||
/** When to send system prompt. */
|
||||
systemPromptWhen?: "first" | "always" | "never";
|
||||
/** Flag used to pass image paths. */
|
||||
imageArg?: string;
|
||||
/** How to pass multiple images. */
|
||||
imageMode?: "repeat" | "list";
|
||||
/** Serialize runs for this CLI. */
|
||||
serialize?: boolean;
|
||||
/** Runtime reliability tuning for this backend's process lifecycle. */
|
||||
reliability?: {
|
||||
/** No-output watchdog tuning (fresh vs resumed runs). */
|
||||
watchdog?: {
|
||||
/** Fresh/new sessions (non-resume). */
|
||||
fresh?: {
|
||||
/** Fixed watchdog timeout in ms (overrides ratio when set). */
|
||||
noOutputTimeoutMs?: number;
|
||||
/** Fraction of overall timeout used when fixed timeout is not set. */
|
||||
noOutputTimeoutRatio?: number;
|
||||
/** Lower bound for computed watchdog timeout. */
|
||||
minMs?: number;
|
||||
/** Upper bound for computed watchdog timeout. */
|
||||
maxMs?: number;
|
||||
};
|
||||
/** Resume sessions. */
|
||||
resume?: {
|
||||
/** Fixed watchdog timeout in ms (overrides ratio when set). */
|
||||
noOutputTimeoutMs?: number;
|
||||
/** Fraction of overall timeout used when fixed timeout is not set. */
|
||||
noOutputTimeoutRatio?: number;
|
||||
/** Lower bound for computed watchdog timeout. */
|
||||
minMs?: number;
|
||||
/** Upper bound for computed watchdog timeout. */
|
||||
maxMs?: number;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
export type AgentDefaultsConfig = {
|
||||
/** Global default provider params applied to all models before per-model and per-agent overrides. */
|
||||
params?: Record<string, unknown>;
|
||||
|
|
@ -173,8 +100,6 @@ export type AgentDefaultsConfig = {
|
|||
envelopeElapsed?: "on" | "off";
|
||||
/** Optional context window cap (used for runtime estimates + status %). */
|
||||
contextTokens?: number;
|
||||
/** Optional CLI backends for text-only fallback. */
|
||||
cliBackends?: Record<string, CliBackendConfig>;
|
||||
/** Opt-in: prune old tool results from the LLM context to reduce token usage. */
|
||||
contextPruning?: AgentContextPruningConfig;
|
||||
/** LLM timeout configuration. */
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ import {
|
|||
import {
|
||||
BlockStreamingChunkSchema,
|
||||
BlockStreamingCoalesceSchema,
|
||||
CliBackendSchema,
|
||||
HumanDelaySchema,
|
||||
TypingModeSchema,
|
||||
} from "./zod-schema.core.js";
|
||||
|
|
@ -54,7 +53,6 @@ export const AgentDefaultsSchema = z
|
|||
envelopeTimestamp: z.union([z.literal("on"), z.literal("off")]).optional(),
|
||||
envelopeElapsed: z.union([z.literal("on"), z.literal("off")]).optional(),
|
||||
contextTokens: z.number().int().positive().optional(),
|
||||
cliBackends: z.record(z.string(), CliBackendSchema).optional(),
|
||||
memorySearch: MemorySearchSchema,
|
||||
contextPruning: z
|
||||
.object({
|
||||
|
|
|
|||
|
|
@ -1,11 +1,9 @@
|
|||
export { resolveEffectiveModelFallbacks } from "../../agents/agent-scope.js";
|
||||
export { resolveBootstrapWarningSignaturesSeen } from "../../agents/bootstrap-budget.js";
|
||||
export { getCliSessionId, runCliAgent } from "../../agents/cli-runner.runtime.js";
|
||||
export { resolveFastModeState } from "../../agents/fast-mode.js";
|
||||
export { resolveNestedAgentLane } from "../../agents/lanes.js";
|
||||
export { LiveSessionModelSwitchError } from "../../agents/live-model-switch.js";
|
||||
export { runWithModelFallback } from "../../agents/model-fallback.js";
|
||||
export { isCliProvider } from "../../agents/model-selection.js";
|
||||
export { runEmbeddedPiAgent } from "../../agents/pi-embedded.js";
|
||||
export {
|
||||
countActiveDescendantRuns,
|
||||
|
|
|
|||
|
|
@ -8,8 +8,6 @@ import {
|
|||
countActiveDescendantRuns,
|
||||
listDescendantRunsForRequester,
|
||||
LiveSessionModelSwitchError,
|
||||
getCliSessionId,
|
||||
isCliProvider,
|
||||
logWarn,
|
||||
normalizeVerboseLevel,
|
||||
registerAgentRunContext,
|
||||
|
|
@ -17,7 +15,6 @@ import {
|
|||
resolveFastModeState,
|
||||
resolveNestedAgentLane,
|
||||
resolveSessionTranscriptPath,
|
||||
runCliAgent,
|
||||
runEmbeddedPiAgent,
|
||||
runWithModelFallback,
|
||||
} from "./run-execution.runtime.js";
|
||||
|
|
@ -31,7 +28,7 @@ import { syncCronSessionLiveSelection } from "./run-session-state.js";
|
|||
import { isLikelyInterimCronMessage } from "./subagent-followup-hints.js";
|
||||
|
||||
type AgentTurnPayload = Extract<CronJob["payload"], { kind: "agentTurn" }> | null;
|
||||
type CronPromptRunResult = Awaited<ReturnType<typeof runCliAgent>>;
|
||||
type CronPromptRunResult = Awaited<ReturnType<typeof runEmbeddedPiAgent>>;
|
||||
|
||||
export type CronExecutionResult = {
|
||||
runResult: CronPromptRunResult;
|
||||
|
|
@ -98,32 +95,6 @@ export function createCronPromptExecutor(params: {
|
|||
}
|
||||
const bootstrapPromptWarningSignature =
|
||||
bootstrapPromptWarningSignaturesSeen[bootstrapPromptWarningSignaturesSeen.length - 1];
|
||||
if (isCliProvider(providerOverride, params.cfgWithAgentDefaults)) {
|
||||
const cliSessionId = params.cronSession.isNewSession
|
||||
? undefined
|
||||
: getCliSessionId(params.cronSession.sessionEntry, providerOverride);
|
||||
const result = await runCliAgent({
|
||||
sessionId: params.cronSession.sessionEntry.sessionId,
|
||||
sessionKey: params.agentSessionKey,
|
||||
agentId: params.agentId,
|
||||
sessionFile,
|
||||
workspaceDir: params.workspaceDir,
|
||||
config: params.cfgWithAgentDefaults,
|
||||
prompt: promptText,
|
||||
provider: providerOverride,
|
||||
model: modelOverride,
|
||||
thinkLevel: params.thinkLevel,
|
||||
timeoutMs: params.timeoutMs,
|
||||
runId: params.cronSession.sessionEntry.sessionId,
|
||||
cliSessionId,
|
||||
bootstrapPromptWarningSignaturesSeen,
|
||||
bootstrapPromptWarningSignature,
|
||||
});
|
||||
bootstrapPromptWarningSignaturesSeen = resolveBootstrapWarningSignaturesSeen(
|
||||
result.meta?.systemPromptReport,
|
||||
);
|
||||
return result;
|
||||
}
|
||||
const result = await runEmbeddedPiAgent({
|
||||
sessionId: params.cronSession.sessionEntry.sessionId,
|
||||
sessionKey: params.agentSessionKey,
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@ export {
|
|||
resolveAgentSkillsFilter,
|
||||
} from "../../agents/agent-scope.js";
|
||||
export { resolveSessionAuthProfileOverride } from "../../agents/auth-profiles/session-override.js";
|
||||
export { setCliSessionId } from "../../agents/cli-session.js";
|
||||
export { lookupContextTokens } from "../../agents/context.js";
|
||||
export { resolveCronStyleNow } from "../../agents/current-time.js";
|
||||
export { DEFAULT_CONTEXT_TOKENS, DEFAULT_MODEL, DEFAULT_PROVIDER } from "../../agents/defaults.js";
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue