From 60986e5a0536a956069492d57dd859b6300f68e5 Mon Sep 17 00:00:00 2001 From: A <258483684+la14-1@users.noreply.github.com> Date: Sun, 22 Feb 2026 11:32:27 -0800 Subject: [PATCH] refactor: remove shared/common.sh and 27 subprocess-heavy test files (#1728) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit shared/common.sh (3852 lines) was dead code — the entire architecture was rewritten to TypeScript in cli/src/. No agent scripts source it anymore. The only consumer was github-auth.sh which just needed 4 log functions (now inlined). Remove 27 test files that spawned ~800+ real bash/bun subprocesses per run (the root cause of slow bun test). Every shared-common-*.test.ts file forked a real bash shell per test case to source shared/common.sh. CLI subprocess tests spawned `bun run index.ts` per assertion. These were integration tests, not unit tests. Also removes: - mock-tests CI job from test.yml (ran test/mock.sh which opens browser) - Stale plan files referencing deleted infrastructure - All CLAUDE.md/README.md references to the old lib/common.sh pattern Co-authored-by: lab <6723574+louisgv@users.noreply.github.com> Co-authored-by: Claude Opus 4.6 (1M context) --- .claude/plans/sunny-petting-plum.md | 218 - .claude/plans/zippy-seeking-squid.md | 17 - .../setup-agent-team/qa-fixtures-prompt.md | 2 +- .../setup-agent-team/qa-quality-prompt.md | 8 +- .github/workflows/test.yml | 49 - CLAUDE.md | 95 +- README.md | 10 +- cli/src/__tests__/agent-config-setup.test.ts | 699 --- .../__tests__/cli-entry-edge-cases.test.ts | 556 --- .../cli-version-and-dispatch.test.ts | 528 --- cli/src/__tests__/cmdrun-resolution.test.ts | 291 -- cli/src/__tests__/index-main-routing.test.ts | 290 -- cli/src/__tests__/install-helpers.test.ts | 518 --- .../__tests__/no-cloud-error-paths.test.ts | 288 -- cli/src/__tests__/script-syntax.test.ts | 101 - .../shared-common-api-classify.test.ts | 605 --- .../shared-common-api-helpers.test.ts | 1031 ----- .../shared-common-credential-mgmt.test.ts | 731 ---- .../shared-common-env-inject.test.ts | 302 -- .../shared-common-error-polling.test.ts | 297 -- .../__tests__/shared-common-helpers.test.ts | 589 --- .../shared-common-input-validation.test.ts | 637 --- .../shared-common-json-extraction.test.ts | 277 -- .../shared-common-logging-utils.test.ts | 620 --- .../shared-common-oauth-flow.test.ts | 852 ---- .../shared-common-post-session.test.ts | 385 -- .../shared-common-ssh-helpers.test.ts | 616 --- .../shared-common-ssh-key-lifecycle.test.ts | 624 --- .../shared-common-untested-helpers.test.ts | 162 - .../shared-common-validators.test.ts | 517 --- cli/src/__tests__/shared-github-auth.test.ts | 875 ---- cli/src/__tests__/shared-key-request.test.ts | 776 ---- cli/src/__tests__/show-info-or-error.test.ts | 258 -- cli/src/__tests__/unicode-detect.test.ts | 181 - shared/common.sh | 3852 ----------------- shared/github-auth.sh | 20 +- shared/key-request.sh | 2 +- 37 files changed, 26 insertions(+), 17853 deletions(-) delete mode 100644 .claude/plans/sunny-petting-plum.md delete mode 100644 .claude/plans/zippy-seeking-squid.md delete mode 100644 cli/src/__tests__/agent-config-setup.test.ts delete mode 100644 cli/src/__tests__/cli-entry-edge-cases.test.ts delete mode 100644 cli/src/__tests__/cli-version-and-dispatch.test.ts delete mode 100644 cli/src/__tests__/cmdrun-resolution.test.ts delete mode 100644 cli/src/__tests__/index-main-routing.test.ts delete mode 100644 cli/src/__tests__/install-helpers.test.ts delete mode 100644 cli/src/__tests__/no-cloud-error-paths.test.ts delete mode 100644 cli/src/__tests__/script-syntax.test.ts delete mode 100644 cli/src/__tests__/shared-common-api-classify.test.ts delete mode 100644 cli/src/__tests__/shared-common-api-helpers.test.ts delete mode 100644 cli/src/__tests__/shared-common-credential-mgmt.test.ts delete mode 100644 cli/src/__tests__/shared-common-env-inject.test.ts delete mode 100644 cli/src/__tests__/shared-common-error-polling.test.ts delete mode 100644 cli/src/__tests__/shared-common-helpers.test.ts delete mode 100644 cli/src/__tests__/shared-common-input-validation.test.ts delete mode 100644 cli/src/__tests__/shared-common-json-extraction.test.ts delete mode 100644 cli/src/__tests__/shared-common-logging-utils.test.ts delete mode 100644 cli/src/__tests__/shared-common-oauth-flow.test.ts delete mode 100644 cli/src/__tests__/shared-common-post-session.test.ts delete mode 100644 cli/src/__tests__/shared-common-ssh-helpers.test.ts delete mode 100644 cli/src/__tests__/shared-common-ssh-key-lifecycle.test.ts delete mode 100644 cli/src/__tests__/shared-common-untested-helpers.test.ts delete mode 100644 cli/src/__tests__/shared-common-validators.test.ts delete mode 100644 cli/src/__tests__/shared-github-auth.test.ts delete mode 100644 cli/src/__tests__/shared-key-request.test.ts delete mode 100644 cli/src/__tests__/show-info-or-error.test.ts delete mode 100644 cli/src/__tests__/unicode-detect.test.ts delete mode 100644 shared/common.sh diff --git a/.claude/plans/sunny-petting-plum.md b/.claude/plans/sunny-petting-plum.md deleted file mode 100644 index e4502a18..00000000 --- a/.claude/plans/sunny-petting-plum.md +++ /dev/null @@ -1,218 +0,0 @@ -# Refactor: Cloud Adapter + Agent Runner System - -## Context - -149 agent scripts across 11 clouds share ~70% identical boilerplate (auth, SSH key, provision, wait, API key). Only the agent-specific parts differ (install, env vars, config, launch). The refactor introduces a standard `cloud_*` adapter interface and a `spawn_agent` runner that eliminates this duplication. - -## Architecture - -### 1. Cloud Adapter Interface (added to each `{cloud}/lib/common.sh`) - -Every cloud adds 7 standard functions at the bottom of its `lib/common.sh`. These bind cloud-specific globals (IP, sandbox ID, sprite name) so callers never need to know them: - -```bash -cloud_authenticate() # Ensure creds + SSH key (if applicable) -cloud_provision(name) # Create server, set internal globals -cloud_wait_ready() # Wait for connectivity + cloud-init -cloud_run(cmd) # Execute command on server -cloud_upload(local, remote) # Upload file to server -cloud_interactive(cmd) # Start interactive session -cloud_label() # Return display name string -``` - -**SSH-based clouds** (hetzner, digitalocean, gcp, aws-lightsail, oracle, ovh) — thin wrappers: -```bash -cloud_run() { run_server "${HETZNER_SERVER_IP}" "$1"; } -cloud_upload() { upload_file "${HETZNER_SERVER_IP}" "$1" "$2"; } -cloud_interactive() { interactive_session "${HETZNER_SERVER_IP}" "$1"; } -``` - -**CLI-based clouds** (fly, daytona, sprite) — delegate to their CLI wrappers: -```bash -cloud_run() { run_server "$1"; } # fly/daytona: no IP arg -cloud_run() { run_sprite "${SPRITE_NAME}" "$1"; } # sprite -``` - -**Local** — no-ops for provision/wait: -```bash -cloud_provision() { :; } -cloud_wait_ready() { :; } -cloud_run() { eval "$1"; } -``` - -### 2. `spawn_agent` Runner (added to `shared/common.sh`) - -~60 lines. Orchestrates the common flow, calling agent-defined hooks where needed: - -```bash -spawn_agent() { - local agent_key="$1" - - # 1. Authenticate cloud - cloud_authenticate - - # 2. Pre-provision prompts (github auth if agent wants it) - if _fn_exists agent_pre_provision; then agent_pre_provision; fi - - # 3. Provision - local server_name - server_name=$(get_server_name) - cloud_provision "${server_name}" - - # 4. Wait for readiness - cloud_wait_ready - - # 5. Install agent (hook or default) - if _fn_exists agent_install; then - agent_install - fi - - # 6. Get API key - get_or_prompt_api_key - - # 7. Model selection (if agent needs it) - if [[ -n "${AGENT_MODEL_PROMPT:-}" ]]; then - MODEL_ID=$(get_model_id_interactive "${AGENT_MODEL_DEFAULT:-openrouter/auto}" "${agent_key}") - fi - - # 8. Inject env vars (hook provides the vars) - _spawn_inject_env_vars - - # 9. Agent-specific config (optional hook) - if _fn_exists agent_configure; then agent_configure; fi - - # 10. Save connection info (optional hook) - if _fn_exists agent_save_connection; then agent_save_connection; fi - - # 11. Pre-launch (optional hook, e.g., start gateway daemon) - if _fn_exists agent_pre_launch; then agent_pre_launch; fi - - # 12. Launch - local launch_cmd - launch_cmd=$(agent_launch_cmd) - launch_session "$(cloud_label)" cloud_interactive "${launch_cmd}" -} -``` - -Helper for env injection — uses `cloud_run`/`cloud_upload` directly: -```bash -_spawn_inject_env_vars() { - log_step "Setting up environment variables..." - local env_temp; env_temp=$(mktemp) - chmod 600 "${env_temp}"; track_temp_file "${env_temp}" - agent_env_vars > "${env_temp}" # Hook: agent defines this - cloud_upload "${env_temp}" "/tmp/env_config" - cloud_run "cat /tmp/env_config >> ~/.bashrc && cat /tmp/env_config >> ~/.zshrc && rm /tmp/env_config" - offer_github_auth cloud_run -} -``` - -`_fn_exists` helper (bash 3.2 compatible): -```bash -_fn_exists() { type "$1" 2>/dev/null | head -1 | grep -q 'function'; } -``` - -### 3. Agent Script Pattern (after refactor) - -**Simple agent** — e.g., `hetzner/aider.sh` (was 37 lines → ~25 lines): -```bash -#!/bin/bash -set -eo pipefail -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" 2>/dev/null && pwd)" -if [[ -f "${SCRIPT_DIR}/lib/common.sh" ]]; then - source "${SCRIPT_DIR}/lib/common.sh" -else - eval "$(curl -fsSL https://raw.githubusercontent.com/OpenRouterTeam/spawn/main/hetzner/lib/common.sh)" -fi - -log_info "Aider on Hetzner Cloud" -echo "" - -AGENT_MODEL_PROMPT=1 -AGENT_MODEL_DEFAULT="openrouter/auto" - -agent_install() { - install_agent "Aider" "pip install aider-chat 2>/dev/null || pip3 install aider-chat" cloud_run - verify_agent "Aider" "command -v aider && aider --version" "pip install aider-chat" cloud_run -} -agent_env_vars() { generate_env_config "OPENROUTER_API_KEY=${OPENROUTER_API_KEY}"; } -agent_launch_cmd() { printf 'source ~/.zshrc && aider --model openrouter/%s' "${MODEL_ID}"; } - -spawn_agent "Aider" -``` - -**Complex agent** — e.g., `hetzner/claude.sh`: -```bash -agent_pre_provision() { prompt_github_auth; } -agent_install() { install_claude_code cloud_run; } -agent_env_vars() { - generate_env_config \ - "OPENROUTER_API_KEY=${OPENROUTER_API_KEY}" \ - "ANTHROPIC_BASE_URL=https://openrouter.ai/api" \ - "ANTHROPIC_AUTH_TOKEN=${OPENROUTER_API_KEY}" \ - "ANTHROPIC_API_KEY=" \ - "CLAUDE_CODE_SKIP_ONBOARDING=1" \ - "CLAUDE_CODE_ENABLE_TELEMETRY=0" -} -agent_configure() { setup_claude_code_config "${OPENROUTER_API_KEY}" cloud_upload cloud_run; } -agent_launch_cmd() { echo 'source ~/.bashrc 2>/dev/null; export PATH=$HOME/.claude/local/bin:$HOME/.local/bin:$HOME/.bun/bin:$PATH; claude'; } - -spawn_agent "Claude Code" -``` - -**Edge-case agent** — e.g., `hetzner/openclaw.sh` (needs gateway daemon): -```bash -agent_pre_launch() { - cloud_run "source ~/.zshrc && nohup openclaw gateway > /tmp/openclaw-gateway.log 2>&1 &" - sleep 2 -} -``` - -**Cross-cloud portability**: An agent's hooks are identical across all clouds. Only the source line at the top changes (e.g., `hetzner/lib/common.sh` → `fly/lib/common.sh`). - -### 4. Special Cases - -**Sprite `SPAWN_PROMPT`**: Handled in `cloud_interactive()` — Sprite's adapter checks `SPAWN_PROMPT` and uses non-tty exec if set. - -**OVH no cloud-init**: OVH's `cloud_wait_ready()` calls `install_base_deps` instead of `wait_for_cloud_init`. - -**Local (no provisioning)**: `cloud_provision()` and `cloud_wait_ready()` are no-ops. `cloud_run` uses `eval`. Local agent scripts still use `spawn_agent` — it just skips provisioning steps naturally. - -**`save_vm_connection`**: Clouds that need it (digitalocean, sprite) call it from `cloud_provision()` or a post-provision hook. - -## Files to Modify - -### Core (2 files) -- `shared/common.sh` — Add `spawn_agent()`, `_spawn_inject_env_vars()`, `_fn_exists()` - -### Cloud Adapters (11 files) -- `hetzner/lib/common.sh` — Add `cloud_*` functions wrapping `run_server $HETZNER_SERVER_IP` etc. -- `digitalocean/lib/common.sh` — Same, wrapping `$DO_SERVER_IP` -- `gcp/lib/common.sh` — Same, wrapping `$GCP_SERVER_IP` -- `aws-lightsail/lib/common.sh` — Same, wrapping `$LIGHTSAIL_SERVER_IP` -- `oracle/lib/common.sh` — Same, wrapping `$OCI_SERVER_IP` -- `ovh/lib/common.sh` — Same, wrapping `$OVH_SERVER_IP`, also `cloud_wait_ready()` calls `install_base_deps` -- `fly/lib/common.sh` — Same, no IP arg -- `daytona/lib/common.sh` — Same, no IP arg -- `sprite/lib/common.sh` — Same, wrapping `$SPRITE_NAME`, handles `SPAWN_PROMPT` in `cloud_interactive` -- `local/lib/common.sh` — No-op provision/wait, `eval` for run - -### Agent Scripts (~149 files) -All `{cloud}/{agent}.sh` files get rewritten to use the hook pattern + `spawn_agent`. Each shrinks from ~40-80 lines to ~20-35 lines. - -## Execution Strategy - -Use a team of agents working in parallel: -1. **Agent 1**: Add `spawn_agent` + `_fn_exists` + `_spawn_inject_env_vars` to `shared/common.sh` -2. **Agent 2**: Add `cloud_*` adapter functions to all 11 cloud `lib/common.sh` files -3. **Agents 3-5**: Convert agent scripts (split by cloud groups) -4. **Agent 6**: Run `bash -n` on all files + run test suite - -Work sequentially: core first (1+2), then scripts (3-5), then verify (6). - -## Verification - -1. `bash -n` syntax check on every modified `.sh` file -2. `bash test/run.sh` — full mock test suite -3. Spot-check: read 5-6 converted scripts to verify hook pattern is correct -4. Verify `curl|bash` compatibility — source fallback pattern preserved in all files diff --git a/.claude/plans/zippy-seeking-squid.md b/.claude/plans/zippy-seeking-squid.md deleted file mode 100644 index ded75a75..00000000 --- a/.claude/plans/zippy-seeking-squid.md +++ /dev/null @@ -1,17 +0,0 @@ -# Fix: GitHub CLI auth never works on remote servers - -## Problem -`gh auth login` (bare, no flags) tries to open a browser — always fails on headless remote servers. Also, local GitHub tokens are never passed through to the remote. - -## Fix (2 files) - -### 1. `shared/github-auth.sh` — Use device code flow -Change `gh auth login` → `gh auth login --web -p https -h github.com` (shows URL + code for user to enter in local browser) - -### 2. `shared/common.sh` — Token passthrough -- In `prompt_github_auth`: capture local GITHUB_TOKEN or `gh auth token` -- In `offer_github_auth`: pass captured token as env var prefix to remote command - -## Verification -- `bash -n` on modified files -- `bash test/run.sh` diff --git a/.claude/skills/setup-agent-team/qa-fixtures-prompt.md b/.claude/skills/setup-agent-team/qa-fixtures-prompt.md index 4e8a4e51..ecdb3f8c 100644 --- a/.claude/skills/setup-agent-team/qa-fixtures-prompt.md +++ b/.claude/skills/setup-agent-team/qa-fixtures-prompt.md @@ -63,7 +63,7 @@ curl -s -H "Authorization: Bearer ${DO_API_TOKEN}" "https://api.digitalocean.com curl -s -H "Authorization: Bearer ${FLY_API_TOKEN}" "https://api.machines.dev/v1/apps?org_slug=personal" ``` -For any other cloud directories found, read their `lib/common.sh` to discover the API base URL and auth pattern, then call equivalent GET-only endpoints. +For any other cloud directories found, read their TypeScript module in `cli/src/{cloud}/` to discover the API base URL and auth pattern, then call equivalent GET-only endpoints. ## Step 4 — Save Fixtures diff --git a/.claude/skills/setup-agent-team/qa-quality-prompt.md b/.claude/skills/setup-agent-team/qa-quality-prompt.md index 2c8e456b..e0f8b6e9 100644 --- a/.claude/skills/setup-agent-team/qa-quality-prompt.md +++ b/.claude/skills/setup-agent-team/qa-quality-prompt.md @@ -94,8 +94,8 @@ cd REPO_ROOT_PLACEHOLDER && git worktree remove WORKTREE_BASE_PLACEHOLDER/TASK_N 2. `cd` into worktree 3. Scan for these issues: - **a) Dead code**: Functions in `shared/*.sh` or `*/lib/common.sh` that are never called by any script - - Grep for the function name across all `.sh` files + **a) Dead code**: Functions in `shared/*.sh` or `cli/src/` that are never called + - Grep for the function name across all source files - If only the definition exists (no callers), remove the function **b) Stale references**: Scripts or code referencing deleted files: @@ -106,8 +106,8 @@ cd REPO_ROOT_PLACEHOLDER && git worktree remove WORKTREE_BASE_PLACEHOLDER/TASK_N **c) Python usage**: Any `python3 -c` or `python -c` calls in shell scripts - Replace with `bun eval` or `jq` as appropriate per CLAUDE.md rules - **d) Duplicate utilities**: Same helper function defined in multiple cloud `lib/common.sh` files - - If identical, move to `shared/common.sh` and have cloud libs call the shared version + **d) Duplicate utilities**: Same helper function defined in multiple TypeScript cloud modules + - If identical, move to `cli/src/shared/` and have cloud modules import it **e) Stale comments**: Comments referencing removed infrastructure, old test files, or deleted functions - Remove or update these comments diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 5d9a4336..171dcea3 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -30,52 +30,3 @@ jobs: - name: Verify cloud bundles build run: bun run cli/build-clouds.ts - mock-tests: - name: Mock Tests - runs-on: ubuntu-latest - timeout-minutes: 10 - steps: - - name: Checkout code - uses: actions/checkout@v4 - - - name: Setup Bun - uses: oven-sh/setup-bun@v2 - - - name: Install dependencies - working-directory: cli - run: bun install - - - name: Run mock tests - id: tests - env: - NO_COLOR: 1 - run: | - set +e - bash test/mock.sh 2>&1 | tee /tmp/mock-output.log - echo "exit_code=${PIPESTATUS[0]}" >> "$GITHUB_OUTPUT" - - - name: Post summary - if: always() - run: | - echo '## Mock Test Results' >> "$GITHUB_STEP_SUMMARY" - echo '```' >> "$GITHUB_STEP_SUMMARY" - grep -E 'Results:' /tmp/mock-output.log >> "$GITHUB_STEP_SUMMARY" || true - echo '```' >> "$GITHUB_STEP_SUMMARY" - FAILURES=$(grep '✗' /tmp/mock-output.log | head -50) - if [[ -n "$FAILURES" ]]; then - echo '' >> "$GITHUB_STEP_SUMMARY" - echo '
Failures (first 50)' >> "$GITHUB_STEP_SUMMARY" - echo '' >> "$GITHUB_STEP_SUMMARY" - echo '```' >> "$GITHUB_STEP_SUMMARY" - printf '%s\n' "$FAILURES" >> "$GITHUB_STEP_SUMMARY" - echo '```' >> "$GITHUB_STEP_SUMMARY" - echo '
' >> "$GITHUB_STEP_SUMMARY" - fi - - - name: Check results - if: always() - run: | - if [[ "${{ steps.tests.outputs.exit_code }}" != "0" ]]; then - echo "Mock tests failed (exit code ${{ steps.tests.outputs.exit_code }})" - exit 1 - fi diff --git a/CLAUDE.md b/CLAUDE.md index f15ea475..f80384ce 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -17,24 +17,10 @@ When run via `./discovery.sh`, your job is to pick ONE of these tasks and execut Look at `manifest.json` → `matrix` for any `"missing"` entry. To implement it: -- Find the **cloud's** `lib/common.sh` — it has all the provider-specific primitives (create server, run command, upload file, interactive session) - Find the **agent's** existing script on another cloud — it shows the install steps, config files, env vars, and launch command -- Combine them: use the cloud's primitives to execute the agent's setup steps +- The agent scripts are thin bash wrappers that bootstrap bun and run the TypeScript CLI - The script goes at `{cloud}/{agent}.sh` -**Pattern for every script:** -``` -1. Source {cloud}/lib/common.sh (local or remote fallback) -2. Authenticate with cloud provider -3. Provision server/VM -4. Wait for readiness -5. Install the agent -6. Get OpenRouter API key (env var or OAuth) -7. Inject env vars into shell config -8. Write agent-specific config files -9. Launch interactive session -``` - **OpenRouter injection is mandatory.** Every agent script MUST: - Set `OPENROUTER_API_KEY` in the shell environment - Set provider-specific env vars (e.g., `ANTHROPIC_BASE_URL=https://openrouter.ai/api`) @@ -62,7 +48,7 @@ We are currently shipping with **9 curated clouds** (sorted by price): - **Test coverage is mandatory** (see "Mock Test Infrastructure" section below) Steps to add one: -1. Create `{cloud}/lib/common.sh` with the provider's primitives +1. Add cloud-specific TypeScript module in `cli/src/{cloud}/` 2. Add an entry to `manifest.json` → `clouds` 3. Add `"missing"` entries to the matrix for every existing agent 4. Implement at least 2-3 agent scripts to prove the lib works @@ -116,10 +102,10 @@ spawn/ package.json # npm package (@openrouter/spawn) install.sh # One-liner installer (bun → npm → auto-install bun) shared/ - common.sh # Provider-agnostic shared utilities + github-auth.sh # Standalone GitHub CLI auth helper + key-request.sh # API key provisioning helpers (used by QA) {cloud}/ - lib/common.sh # Cloud-specific functions (sources shared/common.sh) - {agent}.sh # Agent deployment scripts + {agent}.sh # Agent deployment scripts (thin bash → bun wrappers) .claude/skills/setup-agent-team/ trigger-server.ts # HTTP trigger server (concurrent runs, dedup) discovery.sh # Discovery cycle script (fill gaps, scout new clouds/agents) @@ -154,61 +140,13 @@ The only documentation files allowed in the repository are: If you need to create documentation during development, write it to `.docs/` and add `.docs/` to `.gitignore`. -### Architecture: Shared Library Pattern +### Architecture -**`shared/common.sh`** - Core utilities used by all clouds: -- **Logging**: `log_info`, `log_warn`, `log_error` (colored output) -- **Input handling**: `safe_read` (works in interactive and piped contexts) -- **OAuth flow**: `try_oauth_flow`, `get_openrouter_api_key_oauth` (browser-based auth) -- **Network utilities**: `nc_listen` (cross-platform netcat wrapper), `open_browser` -- **SSH helpers**: `generate_ssh_key_if_missing`, `get_ssh_fingerprint`, `generic_ssh_wait` -- **Security**: `validate_model_id`, `json_escape` +All cloud provisioning and agent setup logic lives in TypeScript under `cli/src/`. Agent scripts (`{cloud}/{agent}.sh`) are thin bash wrappers that bootstrap bun and invoke the CLI. -**`{cloud}/lib/common.sh`** - Cloud-specific extensions: -- Sources `shared/common.sh` at the top -- Adds provider-specific functions: - - **Sprite**: `ensure_sprite_installed`, `get_sprite_name`, `run_sprite`, etc. - - **Hetzner**: API wrappers for server creation, SSH key management, etc. - - **DigitalOcean**: Droplet provisioning, API calls, etc. - - **Vultr**: Instance management via REST API - - **Linode**: Linode-specific provisioning functions +**`shared/github-auth.sh`** — Standalone GitHub CLI installer + OAuth login helper. Used by `cli/src/shared/agent-setup.ts` to set up `gh` on remote VMs. -**Agent scripts** (`{cloud}/{agent}.sh`): -1. Source their cloud's `lib/common.sh` (which auto-sources `shared/common.sh`) -2. Use shared functions for logging, OAuth, SSH setup -3. Use cloud functions for provisioning and connecting to servers -4. Deploy the specific agent with its configuration - -### Why This Structure? - -- **DRY principle**: OAuth, logging, SSH logic written once in `shared/common.sh` -- **Consistency**: All scripts use same authentication and error handling patterns -- **Maintainability**: Bug fixes in shared code benefit all providers automatically -- **Extensibility**: New clouds only need to implement provider-specific logic -- **Testability**: Shared functions can be tested independently - -### Source Pattern - -Every cloud's `lib/common.sh` starts with: - -```bash -#!/bin/bash -# Cloud-specific functions for {provider} - -# Source shared provider-agnostic functions -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -source "$SCRIPT_DIR/../../shared/common.sh" || { - echo "ERROR: Failed to load shared/common.sh" >&2 - exit 1 -} - -# ... cloud-specific functions below ... -``` - -This pattern ensures: -- Shared utilities are always available -- Path resolution works when sourced from any location -- Script fails fast if shared library is missing +**`shared/key-request.sh`** — API key provisioning helpers sourced by the QA harness (`qa.sh`) for loading cloud credentials from `~/.config/spawn/{cloud}.json`. ## Shell Script Rules @@ -218,16 +156,6 @@ These rules are **non-negotiable** — violating them breaks remote execution fo Every script MUST work when executed via `bash <(curl -fsSL URL)`: - **NEVER** use relative paths for sourcing (`source ./lib/...`, `source ../shared/...`) - **NEVER** rely on `$0`, `dirname $0`, or `BASH_SOURCE` resolving to a real filesystem path -- **ALWAYS** use the local-or-remote fallback pattern: - ```bash - SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" 2>/dev/null && pwd)" - if [[ -f "$SCRIPT_DIR/lib/common.sh" ]]; then - source "$SCRIPT_DIR/lib/common.sh" - else - eval "$(curl -fsSL https://raw.githubusercontent.com/OpenRouterTeam/spawn/main/{cloud}/lib/common.sh)" - fi - ``` -- Similarly, `{cloud}/lib/common.sh` MUST use the same fallback for `shared/common.sh` ### macOS bash 3.x Compatibility macOS ships bash 3.2. All scripts MUST work on it: @@ -309,9 +237,8 @@ Without these, the cloud has **no test coverage**, body validation is missing, m When running autonomous discovery/refactoring loops (`./discovery.sh --loop`): - **Run `bash -n` on every changed .sh file** before committing — syntax errors break everything -- **NEVER revert a prior fix** — if `shared/common.sh` was changed to fix macOS compat, don't undo it -- **NEVER re-introduce deleted functions** — if `write_oauth_response_file` was removed, don't call it -- **NEVER change the source/eval fallback pattern** in lib/common.sh files — it's load-bearing for curl|bash +- **NEVER revert a prior fix** — don't undo previously applied compatibility fixes +- **NEVER re-introduce deleted functions** — if a function was removed, don't call it - **Test after EACH iteration** — don't batch multiple changes without verification - **If a change breaks tests, STOP** — revert and ask for guidance rather than compounding the regression diff --git a/README.md b/README.md index 2fae71af..720f01ab 100644 --- a/README.md +++ b/README.md @@ -191,18 +191,16 @@ git config core.hooksPath .githooks ### Structure ``` -{cloud}/lib/common.sh # Cloud provider primitives (provision, SSH, cleanup) -{cloud}/{agent}.sh # Agent deployment script -shared/common.sh # Shared utilities (OAuth, logging, SSH helpers) -cli/ # TypeScript CLI (bun) +{cloud}/{agent}.sh # Agent deployment script (thin bash → bun wrapper) +cli/ # TypeScript CLI — all provisioning logic (bun) manifest.json # Source of truth for the matrix ``` ### Adding a new cloud -1. Create `{cloud}/lib/common.sh` with provisioning primitives +1. Add cloud-specific TypeScript module in `cli/src/{cloud}/` 2. Add to `manifest.json` -3. Implement agent scripts using the cloud's primitives +3. Implement agent scripts 4. See [CLAUDE.md](CLAUDE.md) for full contributor guide ### Adding a new agent diff --git a/cli/src/__tests__/agent-config-setup.test.ts b/cli/src/__tests__/agent-config-setup.test.ts deleted file mode 100644 index ad159104..00000000 --- a/cli/src/__tests__/agent-config-setup.test.ts +++ /dev/null @@ -1,699 +0,0 @@ -import { describe, it, expect } from "bun:test"; -import { execSync } from "child_process"; -import { resolve } from "path"; -import { mkdirSync, readFileSync, rmSync, existsSync } from "fs"; -import { tmpdir } from "os"; -import { join } from "path"; - -/** - * Tests for agent configuration and verification functions in shared/common.sh: - * - verify_agent_installed: command existence and version check - * - upload_config_file: temp file creation and callback invocation - * - setup_claude_code_config: Claude Code settings.json + .claude.json generation - * - setup_openclaw_config: OpenClaw openclaw.json generation - * - setup_continue_config: Continue config.json generation - * - * These functions had zero test coverage despite being used by every agent - * script across all cloud providers. They are security-relevant because they - * inject API keys into JSON config files using json_escape. - * - * Each test sources shared/common.sh and calls the function in a real bash - * subprocess to catch actual shell behavior (quoting, escaping, JSON structure). - * - * Agent: test-engineer - */ - -const REPO_ROOT = resolve(import.meta.dir, "../../.."); -const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh"); - -/** - * Run a bash snippet that sources shared/common.sh first. - * Returns { exitCode, stdout, stderr }. - * - * Note: Scripts run in a bash subprocess, so template variables must be - * properly escaped or injected via environment variables. - */ -function runBash(script: string, env?: Record): { exitCode: number; stdout: string; stderr: string } { - const fullScript = `source "${COMMON_SH}"\n${script}`; - const { spawnSync } = require("child_process"); - const result = spawnSync("bash", ["-c", fullScript], { - encoding: "utf-8", - timeout: 10000, - stdio: ["pipe", "pipe", "pipe"], - env: { ...process.env, ...env }, - }); - return { - exitCode: result.status ?? 1, - stdout: (result.stdout || "").trim(), - stderr: (result.stderr || "").trim(), - }; -} - -/** - * Create a temporary directory for test files. - */ -function createTempDir(): string { - const dir = join(tmpdir(), `spawn-config-test-${Date.now()}-${Math.random().toString(36).slice(2)}`); - mkdirSync(dir, { recursive: true }); - return dir; -} - -/** - * Create a bash script that sets up mock_upload and mock_run callbacks - * that redirect to a temp directory instead of the user's home directory. - * This allows tests to verify file creation without affecting the real filesystem. - */ -function createMockSetup(tempDir: string, configDir: string): string { - return ` - mock_upload() { cp "$1" "$TEMP_DIR/\$(basename "$2")"; } - mock_run() { - local cmd="$1" - # Replace $HOME with $TEMP_DIR - cmd=\$(echo "$cmd" | sed "s|\\\$HOME|$TEMP_DIR|g") - # Replace /tmp/spawn_config_* with $TEMP_DIR/spawn_config_* - cmd=\$(echo "$cmd" | sed "s|/tmp/spawn_config_|$TEMP_DIR/spawn_config_|g") - eval "$cmd" - } - HOME="$TEMP_DIR" - `; -} - - -// ── verify_agent_installed ────────────────────────────────────────────────── - -describe("verify_agent_installed", () => { - describe("command found and verifies", () => { - it("should return 0 for a known command (bash)", () => { - const result = runBash('verify_agent_installed "bash" "--version" "Bash"'); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain("verified successfully"); - }); - - it("should return 0 for ls with --help", () => { - const result = runBash('verify_agent_installed "ls" "--help" "ls"'); - expect(result.exitCode).toBe(0); - }); - - it("should use --version as default verify arg", () => { - // bash supports --version without second arg - const result = runBash('verify_agent_installed "bash"'); - expect(result.exitCode).toBe(0); - }); - - it("should use command name as default agent name", () => { - const result = runBash('verify_agent_installed "bash"'); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain("bash"); - expect(result.stderr).toContain("verified successfully"); - }); - - it("should display custom agent name in messages", () => { - const result = runBash('verify_agent_installed "bash" "--version" "My Custom Agent"'); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain("My Custom Agent"); - }); - }); - - describe("command not found", () => { - it("should return 1 for non-existent command", () => { - const result = runBash('verify_agent_installed "definitely_not_a_real_command_xyz"'); - expect(result.exitCode).toBe(1); - }); - - it("should show not found error message", () => { - const result = runBash('verify_agent_installed "nonexistent_cmd_abc" "--version" "TestAgent"'); - expect(result.exitCode).toBe(1); - expect(result.stderr).toContain("not found in PATH"); - expect(result.stderr).toContain("TestAgent"); - }); - - it("should show troubleshooting hints on failure", () => { - const result = runBash('verify_agent_installed "nonexistent_cmd_abc" "--version" "TestAgent"'); - expect(result.exitCode).toBe(1); - expect(result.stderr).toContain("Possible causes"); - expect(result.stderr).toContain("How to fix"); - }); - - it("should include command name in error output", () => { - const result = runBash('verify_agent_installed "fake_agent_xyz" "--version" "FakeAgent"'); - expect(result.exitCode).toBe(1); - expect(result.stderr).toContain("fake_agent_xyz"); - }); - }); - - describe("command exists but verification fails", () => { - it("should return 1 when verify command fails", () => { - // 'false' is a valid command that always returns 1 - const result = runBash('verify_agent_installed "true" "--nonexistent-flag-xyz" "TrueCmd"'); - // true command ignores flags and succeeds, so test with a script - // Use a custom script that exists but fails verification - const tempDir = createTempDir(); - try { - const scriptPath = join(tempDir, "fake-agent"); - execSync(`echo '#!/bin/bash\nif [ "$1" = "--version" ]; then exit 1; fi' > "${scriptPath}" && chmod +x "${scriptPath}"`, { - encoding: "utf-8", - }); - const result2 = runBash(`PATH="${tempDir}:$PATH" verify_agent_installed "fake-agent" "--version" "FakeAgent"`); - expect(result2.exitCode).toBe(1); - expect(result2.stderr).toContain("verification failed"); - expect(result2.stderr).toContain("returned an error"); - } finally { - rmSync(tempDir, { recursive: true, force: true }); - } - }); - - it("should show prerequisite hints on verification failure", () => { - const tempDir = createTempDir(); - try { - const scriptPath = join(tempDir, "bad-agent"); - execSync(`echo '#!/bin/bash\nexit 1' > "${scriptPath}" && chmod +x "${scriptPath}"`, { - encoding: "utf-8", - }); - const result = runBash(`PATH="${tempDir}:$PATH" verify_agent_installed "bad-agent" "--version" "BadAgent"`); - expect(result.exitCode).toBe(1); - expect(result.stderr).toContain("Missing runtime dependencies"); - } finally { - rmSync(tempDir, { recursive: true, force: true }); - } - }); - }); -}); - -// ── upload_config_file ────────────────────────────────────────────────────── - -describe("upload_config_file", () => { - it("should create a temp file with correct content", () => { - const tempDir = createTempDir(); - try { - // Use mock callbacks that record their arguments - const result = runBash(` - mock_upload() { cp "$1" "${tempDir}/uploaded_file"; echo "UPLOAD:$1:$2"; } - mock_run() { echo "RUN:$1"; } - upload_config_file "mock_upload" "mock_run" "hello world content" "/remote/path/config.json" - `); - expect(result.exitCode).toBe(0); - // Verify the content was uploaded - const uploadedContent = readFileSync(join(tempDir, "uploaded_file"), "utf-8"); - expect(uploadedContent.trim()).toBe("hello world content"); - } finally { - rmSync(tempDir, { recursive: true, force: true }); - } - }); - - it("should invoke upload callback with temp file and remote temp path", () => { - const result = runBash(` - mock_upload() { echo "UPLOAD:$1:$2"; } - mock_run() { echo "RUN:$1"; } - upload_config_file "mock_upload" "mock_run" "test content" "~/.config/app.json" - `); - expect(result.exitCode).toBe(0); - // Should contain UPLOAD line - expect(result.stdout).toContain("UPLOAD:"); - // Remote temp path should contain spawn_config prefix - expect(result.stdout).toContain("spawn_config"); - // Should have a basename of the remote path - expect(result.stdout).toContain("app.json"); - }); - - it("should invoke run callback with mv command", () => { - const result = runBash(` - mock_upload() { echo "UPLOAD"; } - mock_run() { echo "RUN:$1"; } - upload_config_file "mock_upload" "mock_run" "test" "~/.config/test.json" - `); - expect(result.exitCode).toBe(0); - // Should run mv to move temp file to final path with chmod for permissions - expect(result.stdout).toContain("mv"); - expect(result.stdout).toContain("~/.config/test.json"); - }); - - it("should preserve multiline content", () => { - const tempDir = createTempDir(); - try { - const result = runBash(` - mock_upload() { cp "$1" "${tempDir}/uploaded"; } - mock_run() { :; } - upload_config_file "mock_upload" "mock_run" '{"key": "value", - "nested": true}' "/remote/config.json" - `); - expect(result.exitCode).toBe(0); - const content = readFileSync(join(tempDir, "uploaded"), "utf-8").trim(); - expect(content).toContain('"key": "value"'); - expect(content).toContain('"nested": true'); - } finally { - rmSync(tempDir, { recursive: true, force: true }); - } - }); - - it("should handle special characters in content", () => { - const tempDir = createTempDir(); - try { - const result = runBash(` - mock_upload() { cp "$1" "${tempDir}/uploaded"; } - mock_run() { :; } - upload_config_file "mock_upload" "mock_run" 'key with $dollar and "quotes"' "/remote/config" - `); - expect(result.exitCode).toBe(0); - const content = readFileSync(join(tempDir, "uploaded"), "utf-8").trim(); - expect(content).toContain("$dollar"); - expect(content).toContain('"quotes"'); - } finally { - rmSync(tempDir, { recursive: true, force: true }); - } - }); -}); - -// ── setup_claude_code_config ──────────────────────────────────────────────── - -describe("setup_claude_code_config", () => { - describe("generates valid JSON", () => { - it("should produce valid settings.json", () => { - const result = runBash(` - mock_upload() { echo "UPLOAD $2"; } - mock_run() { echo "RUN: $1"; } - setup_claude_code_config "sk-or-v1-test-key-123" "mock_upload" "mock_run" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("settings.json"); - }); - - it("should include OpenRouter base URL in settings", () => { - const result = runBash(` - mock_upload() { cat "$1"; } - mock_run() { :; } - setup_claude_code_config "sk-or-v1-test" "mock_upload" "mock_run" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("https://openrouter.ai/api"); - }); - - it("should include API key in settings via json_escape", () => { - const result = runBash(` - mock_upload() { cat "$1"; } - mock_run() { :; } - setup_claude_code_config "my-test-api-key-value" "mock_upload" "mock_run" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("my-test-api-key-value"); - }); - - it("should set bypass permissions in settings", () => { - const result = runBash(` - mock_upload() { cat "$1"; } - mock_run() { :; } - setup_claude_code_config "key123" "mock_upload" "mock_run" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("dangerouslySkipPermissions"); - }); - - it("should disable telemetry in settings", () => { - const result = runBash(` - mock_upload() { cat "$1"; } - mock_run() { :; } - setup_claude_code_config "key123" "mock_upload" "mock_run" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("CLAUDE_CODE_ENABLE_TELEMETRY"); - }); - - it("should produce valid .claude.json with onboarding completed", () => { - const tempDir = createTempDir(); - try { - const result = runBash(` - mock_upload() { cp "$1" "$TEMP_DIR/\$(basename "$2")"; } - mock_run() { - local cmd="$1" - # Replace $HOME with $TEMP_DIR - cmd=\$(echo "$cmd" | sed "s|\\\$HOME|$TEMP_DIR|g") - # Replace /tmp/spawn_config_* with $TEMP_DIR/spawn_config_* - cmd=\$(echo "$cmd" | sed "s|/tmp/spawn_config_|$TEMP_DIR/spawn_config_|g") - eval "$cmd" - } - HOME="$TEMP_DIR" - setup_claude_code_config "key" "mock_upload" "mock_run" - `, { TEMP_DIR: tempDir }); - expect(result.exitCode).toBe(0); - // List all files recursively to find .claude.json - const output = execSync(`find "${tempDir}" -type f 2>/dev/null`, { encoding: "utf-8" }).trim(); - const files = output.split("\n").filter(f => f); - const claudeFile = files.find(f => f.includes(".claude.json")); - expect(claudeFile).toBeDefined(); - const content = readFileSync(claudeFile!, "utf-8"); - const parsed = JSON.parse(content); - expect(parsed.hasCompletedOnboarding).toBe(true); - } finally { - rmSync(tempDir, { recursive: true, force: true }); - } - }); - - it("should create both settings.json and .claude.json files", () => { - const tempDir = createTempDir(); - try { - const result = runBash(` - mock_upload() { cp "$1" "$TEMP_DIR/\$(basename "$2")"; } - mock_run() { - local cmd="$1" - # Replace $HOME with $TEMP_DIR - cmd=\$(echo "$cmd" | sed "s|\\\$HOME|$TEMP_DIR|g") - # Replace /tmp/spawn_config_* with $TEMP_DIR/spawn_config_* - cmd=\$(echo "$cmd" | sed "s|/tmp/spawn_config_|$TEMP_DIR/spawn_config_|g") - eval "$cmd" - } - HOME="$TEMP_DIR" - setup_claude_code_config "key" "mock_upload" "mock_run" - `, { TEMP_DIR: tempDir }); - expect(result.exitCode).toBe(0); - const files = execSync(`find "${tempDir}" -type f`, { encoding: "utf-8" }).trim().split("\n").filter(f => f); - expect(files.some(f => f.includes("settings.json"))).toBe(true); - expect(files.some(f => f.includes(".claude.json"))).toBe(true); - } finally { - rmSync(tempDir, { recursive: true, force: true }); - } - }); - - it("should invoke run callback to create .claude directory", () => { - const result = runBash(` - mock_upload() { :; } - mock_run() { echo "CMD:$1"; } - setup_claude_code_config "key" "mock_upload" "mock_run" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("CMD:mkdir -p ~/.claude"); - }); - - it("should invoke run callback to create CLAUDE.md", () => { - const result = runBash(` - mock_upload() { :; } - mock_run() { echo "CMD:$1"; } - setup_claude_code_config "key" "mock_upload" "mock_run" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("CMD:touch ~/.claude/CLAUDE.md"); - }); - }); - - describe("json_escape security", () => { - it("should safely escape API key with double quotes", () => { - const tempDir = createTempDir(); - try { - const result = runBash(` - mock_upload() { cp "$1" "$TEMP_DIR/\$(basename "$2")"; } - mock_run() { - local cmd="$1" - # Replace $HOME with $TEMP_DIR - cmd=\$(echo "$cmd" | sed "s|\\\$HOME|$TEMP_DIR|g") - # Replace /tmp/spawn_config_* with $TEMP_DIR/spawn_config_* - cmd=\$(echo "$cmd" | sed "s|/tmp/spawn_config_|$TEMP_DIR/spawn_config_|g") - eval "$cmd" - } - HOME="$TEMP_DIR" - setup_claude_code_config 'key-with-"quotes"-inside' "mock_upload" "mock_run" - `, { TEMP_DIR: tempDir }); - expect(result.exitCode).toBe(0); - const files = execSync(`find "${tempDir}" -name "*settings.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f); - const settingsFile = files[0]; - expect(settingsFile).toBeDefined(); - const content = readFileSync(settingsFile, "utf-8"); - // Should be valid JSON even with quotes in the key - const parsed = JSON.parse(content); - expect(parsed.env.ANTHROPIC_AUTH_TOKEN).toContain("quotes"); - } finally { - rmSync(tempDir, { recursive: true, force: true }); - } - }); - - it("should safely escape API key with backslashes", () => { - const tempDir = createTempDir(); - try { - const result = runBash(` - mock_upload() { cp "$1" "$TEMP_DIR/\$(basename "$2")"; } - mock_run() { - local cmd="$1" - # Replace $HOME with $TEMP_DIR - cmd=\$(echo "$cmd" | sed "s|\\\$HOME|$TEMP_DIR|g") - # Replace /tmp/spawn_config_* with $TEMP_DIR/spawn_config_* - cmd=\$(echo "$cmd" | sed "s|/tmp/spawn_config_|$TEMP_DIR/spawn_config_|g") - eval "$cmd" - } - HOME="$TEMP_DIR" - setup_claude_code_config 'key\\\\with\\\\backslashes' "mock_upload" "mock_run" - `, { TEMP_DIR: tempDir }); - expect(result.exitCode).toBe(0); - const files = execSync(`find "${tempDir}" -name "*settings.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f); - const settingsFile = files[0]; - expect(settingsFile).toBeDefined(); - const content = readFileSync(settingsFile, "utf-8"); - const parsed = JSON.parse(content); - expect(parsed.env.ANTHROPIC_AUTH_TOKEN).toBeDefined(); - } finally { - rmSync(tempDir, { recursive: true, force: true }); - } - }); - }); -}); - -// ── setup_openclaw_config ─────────────────────────────────────────────────── - -describe("setup_openclaw_config", () => { - it("should produce valid openclaw.json", () => { - const tempDir = createTempDir(); - try { - const result = runBash(` - ${createMockSetup(tempDir, ".openclaw")} - setup_openclaw_config "sk-or-v1-test-key" "openrouter/auto" "mock_upload" "mock_run" - `, { TEMP_DIR: tempDir }); - expect(result.exitCode).toBe(0); - const files = execSync(`find "${tempDir}" -name "*openclaw.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f); - const opClawFile = files[0]; - expect(opClawFile).toBeDefined(); - const content = readFileSync(opClawFile, "utf-8"); - const parsed = JSON.parse(content); - expect(parsed).toBeDefined(); - } finally { - rmSync(tempDir, { recursive: true, force: true }); - } - }); - - it("should include OPENROUTER_API_KEY in env section", () => { - const tempDir = createTempDir(); - try { - const result = runBash(` - ${createMockSetup(tempDir, ".openclaw")} - setup_openclaw_config "my-api-key-123" "openrouter/auto" "mock_upload" "mock_run" - `, { TEMP_DIR: tempDir }); - expect(result.exitCode).toBe(0); - const files = execSync(`find "${tempDir}" -name "*openclaw.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f); - const opClawFile = files[0]; - expect(opClawFile).toBeDefined(); - const content = readFileSync(opClawFile, "utf-8"); - const parsed = JSON.parse(content); - expect(parsed.env.OPENROUTER_API_KEY).toBe("my-api-key-123"); - } finally { - rmSync(tempDir, { recursive: true, force: true }); - } - }); - - it("should include model ID in agents.defaults.model.primary", () => { - const tempDir = createTempDir(); - try { - const result = runBash(` - ${createMockSetup(tempDir, ".openclaw")} - setup_openclaw_config "key" "anthropic/claude-3.5-sonnet" "mock_upload" "mock_run" - `, { TEMP_DIR: tempDir }); - expect(result.exitCode).toBe(0); - const files = execSync(`find "${tempDir}" -name "*openclaw.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f); - const opClawFile = files[0]; - expect(opClawFile).toBeDefined(); - const content = readFileSync(opClawFile, "utf-8"); - const parsed = JSON.parse(content); - expect(parsed.agents.defaults.model.primary).toBe("anthropic/claude-3.5-sonnet"); - } finally { - rmSync(tempDir, { recursive: true, force: true }); - } - }); - - it("should include gateway config with local mode", () => { - const tempDir = createTempDir(); - try { - const result = runBash(` - ${createMockSetup(tempDir, ".openclaw")} - setup_openclaw_config "key" "auto" "mock_upload" "mock_run" - `, { TEMP_DIR: tempDir }); - expect(result.exitCode).toBe(0); - const files = execSync(`find "${tempDir}" -name "*openclaw.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f); - const opClawFile = files[0]; - expect(opClawFile).toBeDefined(); - const content = readFileSync(opClawFile, "utf-8"); - const parsed = JSON.parse(content); - expect(parsed.gateway.mode).toBe("local"); - } finally { - rmSync(tempDir, { recursive: true, force: true }); - } - }); - - it("should generate a gateway auth token", () => { - const tempDir = createTempDir(); - try { - const result = runBash(` - ${createMockSetup(tempDir, ".openclaw")} - setup_openclaw_config "key" "auto" "mock_upload" "mock_run" - `, { TEMP_DIR: tempDir }); - expect(result.exitCode).toBe(0); - const files = execSync(`find "${tempDir}" -name "*openclaw.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f); - const opClawFile = files[0]; - expect(opClawFile).toBeDefined(); - const content = readFileSync(opClawFile, "utf-8"); - const parsed = JSON.parse(content); - // Gateway token should be a 32-char hex string (openssl rand -hex 16) - expect(parsed.gateway.auth.token).toBeDefined(); - expect(typeof parsed.gateway.auth.token).toBe("string"); - expect(parsed.gateway.auth.token.length).toBe(32); - expect(parsed.gateway.auth.token).toMatch(/^[0-9a-f]+$/); - } finally { - rmSync(tempDir, { recursive: true, force: true }); - } - }); - - it("should invoke run callback to clean and create .openclaw directory", () => { - const result = runBash(` - mock_upload() { :; } - mock_run() { echo "CMD:$1"; } - setup_openclaw_config "key" "auto" "mock_upload" "mock_run" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("CMD:mkdir -p ~/.openclaw"); - }); -}); - -// ── setup_continue_config ─────────────────────────────────────────────────── - -describe("setup_continue_config", () => { - it("should produce valid config.json", () => { - const tempDir = createTempDir(); - try { - const result = runBash(` - ${createMockSetup(tempDir, ".continue")} - setup_continue_config "sk-or-v1-test-key" "mock_upload" "mock_run" - `, { TEMP_DIR: tempDir }); - expect(result.exitCode).toBe(0); - const files = execSync(`find "${tempDir}" -name "*config.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f); - const configFile = files[0]; - expect(configFile).toBeDefined(); - const content = readFileSync(configFile, "utf-8"); - const parsed = JSON.parse(content); - expect(parsed).toBeDefined(); - } finally { - rmSync(tempDir, { recursive: true, force: true }); - } - }); - - it("should include OpenRouter model config", () => { - const tempDir = createTempDir(); - try { - const result = runBash(` - ${createMockSetup(tempDir, ".continue")} - setup_continue_config "test-key" "mock_upload" "mock_run" - `, { TEMP_DIR: tempDir }); - expect(result.exitCode).toBe(0); - const files = execSync(`find "${tempDir}" -name "*config.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f); - const configFile = files[0]; - expect(configFile).toBeDefined(); - const content = readFileSync(configFile, "utf-8"); - const parsed = JSON.parse(content); - expect(parsed.models).toBeArray(); - expect(parsed.models.length).toBeGreaterThan(0); - expect(parsed.models[0].provider).toBe("openrouter"); - expect(parsed.models[0].model).toBe("openrouter/auto"); - } finally { - rmSync(tempDir, { recursive: true, force: true }); - } - }); - - it("should include API key from json_escape", () => { - const tempDir = createTempDir(); - try { - const result = runBash(` - ${createMockSetup(tempDir, ".continue")} - setup_continue_config "my-continue-api-key" "mock_upload" "mock_run" - `, { TEMP_DIR: tempDir }); - expect(result.exitCode).toBe(0); - const files = execSync(`find "${tempDir}" -name "*config.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f); - const configFile = files[0]; - expect(configFile).toBeDefined(); - const content = readFileSync(configFile, "utf-8"); - const parsed = JSON.parse(content); - expect(parsed.models[0].apiKey).toBe("my-continue-api-key"); - } finally { - rmSync(tempDir, { recursive: true, force: true }); - } - }); - - it("should set apiBase to OpenRouter API v1", () => { - const tempDir = createTempDir(); - try { - const result = runBash(` - ${createMockSetup(tempDir, ".continue")} - setup_continue_config "key" "mock_upload" "mock_run" - `, { TEMP_DIR: tempDir }); - expect(result.exitCode).toBe(0); - const files = execSync(`find "${tempDir}" -name "*config.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f); - const configFile = files[0]; - expect(configFile).toBeDefined(); - const content = readFileSync(configFile, "utf-8"); - const parsed = JSON.parse(content); - expect(parsed.models[0].apiBase).toBe("https://openrouter.ai/api/v1"); - } finally { - rmSync(tempDir, { recursive: true, force: true }); - } - }); - - it("should set title to OpenRouter", () => { - const tempDir = createTempDir(); - try { - const result = runBash(` - ${createMockSetup(tempDir, ".continue")} - setup_continue_config "key" "mock_upload" "mock_run" - `, { TEMP_DIR: tempDir }); - expect(result.exitCode).toBe(0); - const files = execSync(`find "${tempDir}" -name "*config.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f); - const configFile = files[0]; - expect(configFile).toBeDefined(); - const content = readFileSync(configFile, "utf-8"); - const parsed = JSON.parse(content); - expect(parsed.models[0].title).toBe("OpenRouter"); - } finally { - rmSync(tempDir, { recursive: true, force: true }); - } - }); - - it("should invoke run callback to create .continue directory", () => { - const result = runBash(` - mock_upload() { :; } - mock_run() { echo "CMD:$1"; } - setup_continue_config "key" "mock_upload" "mock_run" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("CMD:mkdir -p ~/.continue"); - }); - - it("should safely handle API key with special JSON characters", () => { - const tempDir = createTempDir(); - try { - const result = runBash(` - ${createMockSetup(tempDir, ".continue")} - setup_continue_config 'key-with-"quotes"-and\\backslash' "mock_upload" "mock_run" - `, { TEMP_DIR: tempDir }); - expect(result.exitCode).toBe(0); - const files = execSync(`find "${tempDir}" -name "*config.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f); - const configFile = files[0]; - expect(configFile).toBeDefined(); - const content = readFileSync(configFile, "utf-8"); - // Must be valid JSON even with special characters - const parsed = JSON.parse(content); - expect(parsed.models[0].apiKey).toContain("quotes"); - } finally { - rmSync(tempDir, { recursive: true, force: true }); - } - }); -}); diff --git a/cli/src/__tests__/cli-entry-edge-cases.test.ts b/cli/src/__tests__/cli-entry-edge-cases.test.ts deleted file mode 100644 index d0c74780..00000000 --- a/cli/src/__tests__/cli-entry-edge-cases.test.ts +++ /dev/null @@ -1,556 +0,0 @@ -import { describe, it, expect } from "bun:test"; -import { spawnSync } from "child_process"; -import { resolve } from "path"; - -/** - * Edge case tests for the CLI entry point (index.ts). - * - * Tests paths that are not covered by other test files: - * - handleError formatting for various thrown value types (non-Error, number, etc.) - * - Flag ordering edge cases (flags before, between, and after positional args) - * - Multiple positional args beyond expected count (extra args ignored) - * - Unknown flags combined with valid subcommands - * - --prompt interaction with subcommands (list, agents, clouds) - * - --prompt-file with a real file on disk (subprocess-level verification) - * - Version flag combined with other flags - * - Empty string and whitespace positional args - * - isInteractiveTTY: non-TTY stdin shows help instead of interactive picker - * - SPAWN_NO_UPDATE_CHECK actually prevents update check in subprocess - * - * Agent: test-engineer - */ - -const CLI_DIR = resolve(import.meta.dir, "../.."); -const PROJECT_ROOT = resolve(CLI_DIR, ".."); - -function runCli( - args: string[], - env: Record = {} -): { stdout: string; stderr: string; exitCode: number } { - const result = spawnSync( - "bun", - ["run", `${CLI_DIR}/src/index.ts`, ...args], - { - cwd: PROJECT_ROOT, - env: { - PATH: `${process.env.HOME}/.bun/bin:${process.env.PATH}`, - HOME: process.env.HOME, - SHELL: process.env.SHELL, - TERM: process.env.TERM || "xterm", - // Prevent OAuth browser from opening during tests — if OPENROUTER_API_KEY - // is set, get_or_prompt_api_key() skips the entire OAuth flow. - OPENROUTER_API_KEY: "sk-or-test-fake", - ...env, - SPAWN_NO_UPDATE_CHECK: "1", - NODE_ENV: "", - BUN_ENV: "", - }, - encoding: "utf-8", - timeout: 5000, - } - ); - return { - stdout: result.stdout || "", - stderr: result.stderr || "", - exitCode: result.status ?? 1, - }; -} - -function output(result: { stdout: string; stderr: string }): string { - return result.stdout + result.stderr; -} - -// ── handleError output formatting ───────────────────────────────────────── - -describe("error output formatting", () => { - it("should show error with valid names hint for invalid identifier", () => { - const result = runCli(["../hack", "sprite"]); - const out = output(result); - expect(out).toContain("can only contain"); - expect(out).toContain("spawn agents"); - expect(result.exitCode).not.toBe(0); - }); - - it("should format error message for semicolon injection", () => { - const result = runCli(["agent;rm", "sprite"]); - const out = output(result); - expect(out).toContain("can only contain"); - expect(result.exitCode).not.toBe(0); - }); - - it("should format error message for dollar sign injection", () => { - const result = runCli(["agent$var", "sprite"]); - const out = output(result); - expect(out).toContain("can only contain"); - expect(result.exitCode).not.toBe(0); - }); - - it("should format error message for backtick injection", () => { - const result = runCli(["agent`cmd`", "sprite"]); - const out = output(result); - expect(out).toContain("can only contain"); - expect(result.exitCode).not.toBe(0); - }); - - it("should show identifier rules in error message", () => { - const result = runCli(["Agent!", "sprite"]); - const out = output(result); - expect(out).toContain("Lowercase letters"); - expect(out).toContain("Numbers"); - expect(out).toContain("Hyphens"); - expect(result.exitCode).not.toBe(0); - }); -}); - -// ── Flag ordering edge cases ────────────────────────────────────────────── - -describe("flag ordering edge cases", () => { - it("should handle --prompt before positional args", () => { - const result = runCli(["--prompt", "Fix bugs", "claude", "sprite", "--dry-run"]); - const out = output(result); - // Should attempt to run (not error about prompt) - expect(out).not.toContain("--prompt requires both"); - }); - - it("should handle -p between positional args", () => { - const result = runCli(["claude", "-p", "Fix bugs", "sprite", "--dry-run"]); - const out = output(result); - // Should attempt to run - expect(out).not.toContain("--prompt requires both"); - }); - - it("should handle --prompt after positional args", () => { - const result = runCli(["claude", "sprite", "--prompt", "Fix bugs", "--dry-run"]); - const out = output(result); - expect(out).not.toContain("--prompt requires both"); - }); - - it("should reject --prompt with no cloud regardless of flag position", () => { - const result = runCli(["--prompt", "Fix bugs", "claude"]); - const out = output(result); - expect(out).toContain("--prompt requires both"); - expect(result.exitCode).not.toBe(0); - }); - - it("should reject -p with no cloud regardless of flag position", () => { - const result = runCli(["-p", "Fix bugs", "claude"]); - const out = output(result); - expect(out).toContain("--prompt requires both"); - expect(result.exitCode).not.toBe(0); - }); -}); - -// ── Unknown flags with subcommands ──────────────────────────────────────── - -describe("unknown flags with subcommands", () => { - it("should reject --json with list command", () => { - const result = runCli(["list", "--json"]); - const out = output(result); - expect(out).toContain("Unknown flag"); - expect(out).toContain("--json"); - expect(result.exitCode).not.toBe(0); - }); - - it("should reject --format with agents command", () => { - const result = runCli(["agents", "--format"]); - const out = output(result); - expect(out).toContain("Unknown flag"); - expect(result.exitCode).not.toBe(0); - }); - - it("should handle --dry-run with valid agent and cloud", () => { - const result = runCli(["claude", "sprite", "--dry-run"]); - const out = output(result); - expect(out).toContain("Dry run"); - expect(out).toContain("no resources"); - expect(result.exitCode).toBe(0); - }); - - it("should show supported flags list in unknown flag error", () => { - const result = runCli(["list", "--json"]); - const out = output(result); - expect(out).toContain("Supported flags"); - expect(out).toContain("--prompt"); - expect(out).toContain("--help"); - expect(out).toContain("--version"); - }); - - it("should not reject flags that look like negative numbers", () => { - // -1, -42 etc should NOT be treated as unknown flags - const result = runCli(["-1"]); - const out = output(result); - // Should be treated as a positional arg, not as a flag - expect(out).not.toContain("Unknown flag"); - }); -}); - -// ── --prompt interaction with subcommands ────────────────────────────────── - -describe("--prompt interaction with subcommands", () => { - it("should error when --prompt is used with no args at all", () => { - const result = runCli(["--prompt", "Fix bugs"]); - const out = output(result); - expect(out).toContain("--prompt requires both"); - expect(result.exitCode).not.toBe(0); - }); - - it("should error when --prompt is used with 'list' subcommand", () => { - // "spawn list --prompt 'text'" - list doesn't take a prompt - // After extracting --prompt, filtered args become ["list"] - // which dispatches to cmdList (no error about prompt, but --prompt value is ignored) - const result = runCli(["list", "--prompt", "text"]); - // cmdList will run since "list" is a subcommand and prompt is not passed to it - // This should succeed (prompt is simply ignored for subcommands) - expect(result.exitCode).toBe(0); - }); - - it("should show agent info when --prompt used with single agent arg", () => { - // "spawn claude --prompt 'text'" - only agent, no cloud - const result = runCli(["claude", "--prompt", "Fix bugs"]); - const out = output(result); - expect(out).toContain("--prompt requires both"); - expect(result.exitCode).not.toBe(0); - }); -}); - -// ── Version flag edge cases ─────────────────────────────────────────────── - -describe("version flag edge cases", () => { - it("should show version for 'version' as first arg regardless of other args", () => { - const result = runCli(["version"]); - const out = output(result); - expect(out).toMatch(/spawn v\d+\.\d+\.\d+/); - expect(result.exitCode).toBe(0); - }); - - it("should show version for --version flag", () => { - const result = runCli(["--version"]); - const out = output(result); - expect(out).toMatch(/spawn v\d+\.\d+\.\d+/); - expect(result.exitCode).toBe(0); - }); - - it("should show version and exit for -V flag", () => { - const result = runCli(["-V"]); - const out = output(result); - expect(out).toMatch(/spawn v\d+\.\d+\.\d+/); - expect(result.exitCode).toBe(0); - }); - - it("should handle 'version' command and ignore extra args", () => { - // "spawn version extra" - immediateCommands[cmd] fires for "version" - const result = runCli(["version"]); - expect(result.exitCode).toBe(0); - }); -}); - -// ── Non-TTY behavior ────────────────────────────────────────────────────── - -describe("non-TTY behavior", () => { - it("should show non-TTY hint when no args in non-TTY (subprocess) mode", () => { - // Subprocesses don't have TTY stdin, so isInteractiveTTY returns false - const result = runCli([]); - const out = output(result); - expect(out).toContain("Cannot run interactive picker"); - expect(result.exitCode).toBe(1); - }); - - it("should include launch hint in non-TTY output", () => { - const result = runCli([]); - const out = output(result); - expect(out).toContain("spawn "); - }); - - it("should include help hint in non-TTY output", () => { - const result = runCli([]); - const out = output(result); - expect(out).toContain("spawn help"); - }); -}); - -// ── Alias commands ──────────────────────────────────────────────────────── - -describe("command aliases", () => { - it("should treat 'ls' as alias for 'list'", () => { - const result = runCli(["ls"]); - const out = output(result); - // 'ls' should produce spawn history output - expect(out).toMatch(/AGENT|No spawns recorded/); - expect(result.exitCode).toBe(0); - }); - - it("should treat 'm' as alias for 'matrix'", () => { - const result = runCli(["m"]); - const out = output(result); - expect(out).toContain("combinations implemented"); - expect(result.exitCode).toBe(0); - }); - - it("should show help for 'ls --help'", () => { - const result = runCli(["ls", "--help"]); - const out = output(result); - expect(out).toContain("USAGE"); - expect(result.exitCode).toBe(0); - }); - - it("should show help for 'ls -h'", () => { - const result = runCli(["ls", "-h"]); - const out = output(result); - expect(out).toContain("USAGE"); - expect(result.exitCode).toBe(0); - }); -}); - -// ── --prompt-file with real file ────────────────────────────────────────── - -describe("--prompt-file with real files", () => { - it("should error for non-existent prompt file", () => { - const result = runCli([ - "claude", - "sprite", - "--prompt-file", - "/tmp/spawn-nonexistent-test-file-12345.txt", - ]); - const out = output(result); - expect(out).toContain("Prompt file not found"); - expect(result.exitCode).not.toBe(0); - }); - - it("should include filename in error message for missing file", () => { - const result = runCli([ - "claude", - "sprite", - "--prompt-file", - "/tmp/spawn-missing-file.txt", - ]); - const out = output(result); - expect(out).toContain("spawn-missing-file.txt"); - }); - - it("should include hint about file existence in error", () => { - const result = runCli([ - "claude", - "sprite", - "--prompt-file", - "/tmp/spawn-missing-file.txt", - ]); - const out = output(result); - expect(out).toContain("Check the path and try again"); - expect(result.exitCode).not.toBe(0); - }); -}); - -// ── Multiple agent/cloud resolution ─────────────────────────────────────── - -describe("agent and cloud display name resolution in cmdRun", () => { - it("should resolve uppercase agent key and show resolution message", () => { - const result = runCli(["CLAUDE", "sprite", "--dry-run"]); - const out = output(result); - expect(out).toContain("Resolved"); - expect(out).not.toContain("Unknown agent"); - }); - - it("should resolve uppercase cloud key and show resolution message", () => { - const result = runCli(["claude", "SPRITE", "--dry-run"]); - const out = output(result); - expect(out).toContain("Resolved"); - expect(out).not.toContain("Unknown cloud"); - }); - - it("should resolve both uppercase agent and cloud", () => { - const result = runCli(["CLAUDE", "SPRITE", "--dry-run"]); - const out = output(result); - // Both should be resolved - expect(out).toContain("Resolved"); - expect(out).not.toContain("Unknown"); - }); - - it("should not show resolution for exact lowercase keys", () => { - const result = runCli(["claude", "sprite", "--dry-run"]); - const out = output(result); - expect(out).not.toContain("Resolved"); - }); -}); - -// ── Subcommand list and agents output format ────────────────────────────── - -describe("subcommand output format verification", () => { - it("'agents' should list all agents in manifest", () => { - const result = runCli(["agents"]); - const out = output(result); - expect(out).toContain("Agents"); - expect(out).toContain("claude"); - expect(out).toContain("codex"); - expect(result.exitCode).toBe(0); - }); - - it("'clouds' should list all clouds in manifest", () => { - const result = runCli(["clouds"]); - const out = output(result); - expect(out).toContain("Cloud Providers"); - expect(out).toContain("sprite"); - expect(out).toContain("hetzner"); - expect(result.exitCode).toBe(0); - }); - - it("'list' should show spawn history", () => { - const result = runCli(["list"]); - const out = output(result); - // list now shows spawn history (may be empty or have entries) - expect(out).toMatch(/AGENT|No spawns recorded/); - expect(result.exitCode).toBe(0); - }); - - it("'matrix' should show the availability matrix", () => { - const result = runCli(["matrix"]); - const out = output(result); - expect(out).toContain("combinations implemented"); - expect(result.exitCode).toBe(0); - }); - - it("'matrix' should show usage hints at the bottom", () => { - const result = runCli(["matrix"]); - const out = output(result); - expect(out).toContain("spawn "); - expect(out).toContain("spawn "); - }); -}); - -// ── Fuzzy match edge cases ──────────────────────────────────────────────── - -describe("fuzzy matching edge cases in showInfoOrError", () => { - it("should suggest close agent match for 2-char typo", () => { - // "cloude" is distance 1 from "claude" - const result = runCli(["cloude"]); - const out = output(result); - expect(out).toContain("Did you mean"); - expect(out).toContain("claude"); - }); - - it("should suggest close cloud match for 1-char typo", () => { - // "hetzne" is distance 1 from "hetzner" - const result = runCli(["hetzne"]); - const out = output(result); - expect(out).toContain("Did you mean"); - expect(out).toContain("hetzner"); - }); - - it("should not suggest for string with distance > 3", () => { - // "abcdefgh" is far from any agent/cloud - const result = runCli(["abcdefgh"]); - const out = output(result); - expect(out).toContain("Unknown agent or cloud"); - expect(out).not.toContain("Did you mean"); - }); - - it("should show both agent and cloud suggestions when both match", () => { - // Need a string close to both an agent and a cloud name - // "sprit" is close to "sprite" (cloud, distance 1) - const result = runCli(["sprit"]); - const out = output(result); - // Should suggest sprite as a cloud - expect(out).toContain("sprite"); - expect(out).toContain("(cloud:"); - }); -}); - -// ── SPAWN_NO_UNICODE env var ────────────────────────────────────────────── - -describe("SPAWN_NO_UNICODE environment variable", () => { - it("should work normally with SPAWN_NO_UNICODE=1", () => { - const result = runCli(["help"], { SPAWN_NO_UNICODE: "1" }); - const out = output(result); - expect(out).toContain("USAGE"); - expect(result.exitCode).toBe(0); - }); - - it("should work normally with SPAWN_ASCII=1", () => { - const result = runCli(["version"], { SPAWN_ASCII: "1" }); - const out = output(result); - expect(out).toMatch(/spawn v\d+\.\d+\.\d+/); - expect(result.exitCode).toBe(0); - }); -}); - -// ── SPAWN_NO_UPDATE_CHECK env var ───────────────────────────────────────── - -describe("SPAWN_NO_UPDATE_CHECK behavior", () => { - it("should skip update check and run command immediately", () => { - const start = Date.now(); - const result = runCli(["version"], { SPAWN_NO_UPDATE_CHECK: "1" }); - const elapsed = Date.now() - start; - expect(output(result)).toMatch(/spawn v\d+\.\d+\.\d+/); - expect(result.exitCode).toBe(0); - // With update check skipped, should be fast (< 10s) - expect(elapsed).toBeLessThan(10000); - }); -}); - -// ── Extra positional argument warnings ────────────────────────────────── - -describe("extra positional argument warnings", () => { - it("should warn when 3 positional args given (agent cloud extra)", () => { - const result = runCli(["claude", "sprite", "hetzner", "--dry-run"]); - const out = output(result); - expect(out).toContain("Extra argument ignored"); - expect(out).toContain("hetzner"); - expect(out).toContain("Usage:"); - }); - - it("should warn about multiple extra args", () => { - const result = runCli(["claude", "sprite", "foo", "bar", "--dry-run"]); - const out = output(result); - expect(out).toContain("Extra arguments ignored"); - expect(out).toContain("foo"); - expect(out).toContain("bar"); - }); - - it("should still work for subcommands with extra args (warning on stderr)", () => { - // "spawn matrix extra" runs successfully - the warning goes to stderr - // which isn't captured by execSync on success, but the command should still work - const result = runCli(["matrix", "extra"]); - const out = output(result); - expect(out).toContain("combinations implemented"); - expect(result.exitCode).toBe(0); - }); - - it("should still work for version with extra args (warning on stderr)", () => { - // "spawn version extra" runs successfully - the warning goes to stderr - const result = runCli(["version", "extra"]); - const out = output(result); - expect(out).toMatch(/spawn v\d+\.\d+\.\d+/); - expect(result.exitCode).toBe(0); - }); - - it("should NOT warn when exactly 2 positional args given", () => { - const result = runCli(["claude", "sprite", "--dry-run"]); - const out = output(result); - expect(out).not.toContain("extra argument"); - }); - - it("should NOT warn when exactly 1 positional arg given", () => { - const result = runCli(["claude"]); - const out = output(result); - expect(out).not.toContain("extra argument"); - }); -}); - -// ── Mismatched argument type errors ───────────────────────────────────── - -describe("mismatched argument type detection", () => { - it("should detect two agents passed as agent+cloud", () => { - const result = runCli(["claude", "codex"]); - const out = output(result); - expect(out).toContain("is an agent"); - expect(out).toContain("spawn "); - }); - - it("should detect two clouds passed as agent+cloud", () => { - const result = runCli(["hetzner", "sprite"]); - const out = output(result); - // The swap detection won't fire (sprite is not an agent), so validateAgent - // catches that hetzner is a cloud - expect(out).toContain("is a cloud provider"); - expect(out).toContain("spawn "); - }); -}); diff --git a/cli/src/__tests__/cli-version-and-dispatch.test.ts b/cli/src/__tests__/cli-version-and-dispatch.test.ts deleted file mode 100644 index c68064ea..00000000 --- a/cli/src/__tests__/cli-version-and-dispatch.test.ts +++ /dev/null @@ -1,528 +0,0 @@ -import { describe, it, expect, beforeEach, afterEach, spyOn } from "bun:test"; -import { resolve, join } from "path"; - -/** - * Tests for CLI version output and dispatch routing via subprocess execution. - * - * These tests exercise the ACTUAL index.ts entry point by running it as a - * subprocess, verifying the real behavior users see when they run spawn commands. - * This catches integration issues that unit tests with mocked modules miss: - * - * - showVersion: output format, runtime info (bun/node, platform, arch) - * - Version flags: --version, -v, -V, and "version" subcommand - * - Help flags: --help, -h, and "help" subcommand - * - handleNoCommand: --dry-run and --prompt without agent/cloud - * - Subcommand aliases: "m" for "matrix", "ls"/"history" for "list" - * - Verb alias routing: "run", "launch", "start", "deploy", "exec" - * - Unknown flag error messaging - * - Extra args warning - * - showInfoOrError: unknown command with fuzzy suggestions - * - * Agent: test-engineer - */ - -const CLI_PATH = resolve(import.meta.dir, "../../src/index.ts"); -const REPO_ROOT = resolve(import.meta.dir, "../../.."); - -/** - * Run the CLI with given args as a subprocess. - * Sets SPAWN_NO_UPDATE_CHECK to skip auto-update and BUN_ENV=test to skip - * local manifest loading. Returns { stdout, stderr, exitCode }. - */ -function runCLI( - args: string[], - env?: Record, -): { stdout: string; stderr: string; exitCode: number } { - const { spawnSync } = require("child_process"); - const result = spawnSync("bun", ["run", CLI_PATH, ...args], { - cwd: REPO_ROOT, - encoding: "utf-8", - timeout: 15000, - env: { - ...process.env, - // Ensure bun is in PATH for child processes - PATH: `${process.env.HOME}/.bun/bin:${process.env.PATH}`, - SPAWN_NO_UPDATE_CHECK: "1", - BUN_ENV: "test", - // Avoid terminal-dependent output - TERM: "dumb", - SPAWN_NO_UNICODE: "1", - // Ensure no color codes in output for easier assertion - NO_COLOR: "1", - ...env, - }, - stdio: ["pipe", "pipe", "pipe"], - }); - return { - stdout: (result.stdout || "").toString(), - stderr: (result.stderr || "").toString(), - exitCode: result.status ?? 1, - }; -} - -// ── showVersion output ────────────────────────────────────────────────────── - -describe("showVersion via CLI subprocess", () => { - it("should show version string with 'spawn v' prefix", () => { - const { stdout, exitCode } = runCLI(["version"]); - expect(exitCode).toBe(0); - expect(stdout).toMatch(/spawn v\d+\.\d+\.\d+/); - }); - - it("should show bun runtime info", () => { - const { stdout, exitCode } = runCLI(["version"]); - expect(exitCode).toBe(0); - expect(stdout).toContain("bun"); - }); - - it("should show platform info", () => { - const { stdout, exitCode } = runCLI(["version"]); - expect(exitCode).toBe(0); - expect(stdout).toContain(process.platform); - }); - - it("should show arch info", () => { - const { stdout, exitCode } = runCLI(["version"]); - expect(exitCode).toBe(0); - expect(stdout).toContain(process.arch); - }); - - it("should suggest 'spawn update' command", () => { - const { stdout, exitCode } = runCLI(["version"]); - expect(exitCode).toBe(0); - expect(stdout).toContain("spawn update"); - }); - - it("should show binary path", () => { - const { stdout, exitCode } = runCLI(["version"]); - expect(exitCode).toBe(0); - // The binary path should contain the path to index.ts - expect(stdout).toContain("index.ts"); - }); -}); - -// ── Version flag aliases ──────────────────────────────────────────────────── - -describe("version flag aliases", () => { - it("--version should produce same version line as 'version'", () => { - const versionResult = runCLI(["version"]); - const flagResult = runCLI(["--version"]); - expect(flagResult.exitCode).toBe(0); - // Both should contain the version string - const versionMatch = versionResult.stdout.match(/spawn v[\d.]+/); - const flagMatch = flagResult.stdout.match(/spawn v[\d.]+/); - expect(versionMatch).not.toBeNull(); - expect(flagMatch).not.toBeNull(); - expect(versionMatch![0]).toBe(flagMatch![0]); - }); - - it("-v should produce same version line as 'version'", () => { - const { stdout, exitCode } = runCLI(["-v"]); - expect(exitCode).toBe(0); - expect(stdout).toMatch(/spawn v\d+\.\d+\.\d+/); - }); - - it("-V should produce same version line as 'version'", () => { - const { stdout, exitCode } = runCLI(["-V"]); - expect(exitCode).toBe(0); - expect(stdout).toMatch(/spawn v\d+\.\d+\.\d+/); - }); -}); - -// ── Help flags ────────────────────────────────────────────────────────────── - -describe("help command and flags", () => { - it("'help' should show USAGE section", () => { - const { stdout, exitCode } = runCLI(["help"]); - expect(exitCode).toBe(0); - expect(stdout).toContain("USAGE"); - }); - - it("--help should show USAGE section", () => { - const { stdout, exitCode } = runCLI(["--help"]); - expect(exitCode).toBe(0); - expect(stdout).toContain("USAGE"); - }); - - it("-h should show USAGE section", () => { - const { stdout, exitCode } = runCLI(["-h"]); - expect(exitCode).toBe(0); - expect(stdout).toContain("USAGE"); - }); - - it("help should include EXAMPLES section", () => { - const { stdout } = runCLI(["help"]); - expect(stdout).toContain("EXAMPLES"); - }); - - it("help should include AUTHENTICATION section", () => { - const { stdout } = runCLI(["help"]); - expect(stdout).toContain("AUTHENTICATION"); - }); - - it("help should include ENVIRONMENT VARIABLES section", () => { - const { stdout } = runCLI(["help"]); - expect(stdout).toContain("ENVIRONMENT VARIABLES"); - }); - - it("help should include TROUBLESHOOTING section", () => { - const { stdout } = runCLI(["help"]); - expect(stdout).toContain("TROUBLESHOOTING"); - }); - - it("help should mention --dry-run flag", () => { - const { stdout } = runCLI(["help"]); - expect(stdout).toContain("--dry-run"); - }); - - it("help should mention --prompt-file flag", () => { - const { stdout } = runCLI(["help"]); - expect(stdout).toContain("--prompt-file"); - }); - - it("help should mention list aliases (ls, history)", () => { - const { stdout } = runCLI(["help"]); - expect(stdout).toContain("ls"); - expect(stdout).toContain("history"); - }); - - it("help should mention matrix alias (m)", () => { - const { stdout } = runCLI(["help"]); - expect(stdout).toContain("matrix"); - }); -}); - -// ── Trailing help flag on subcommands ─────────────────────────────────────── - -describe("trailing help flag on subcommands", () => { - it("'agents --help' should show help, not agents list", () => { - const { stdout, exitCode } = runCLI(["agents", "--help"]); - expect(exitCode).toBe(0); - expect(stdout).toContain("USAGE"); - }); - - it("'clouds -h' should show help", () => { - const { stdout, exitCode } = runCLI(["clouds", "-h"]); - expect(exitCode).toBe(0); - expect(stdout).toContain("USAGE"); - }); - - it("'matrix --help' should show help", () => { - const { stdout, exitCode } = runCLI(["matrix", "--help"]); - expect(exitCode).toBe(0); - expect(stdout).toContain("USAGE"); - }); - - it("'list --help' should show help", () => { - const { stdout, exitCode } = runCLI(["list", "--help"]); - expect(exitCode).toBe(0); - expect(stdout).toContain("USAGE"); - }); - - it("'update --help' should show help", () => { - const { stdout, exitCode } = runCLI(["update", "--help"]); - expect(exitCode).toBe(0); - expect(stdout).toContain("USAGE"); - }); -}); - -// ── handleNoCommand: --dry-run and --prompt without agent/cloud ───────────── - -describe("handleNoCommand error paths", () => { - it("--dry-run without agent/cloud should error", () => { - const { stderr, exitCode } = runCLI(["--dry-run"]); - expect(exitCode).toBe(1); - expect(stderr).toContain("--dry-run requires both"); - }); - - it("-n without agent/cloud should error", () => { - const { stderr, exitCode } = runCLI(["-n"]); - expect(exitCode).toBe(1); - expect(stderr).toContain("--dry-run requires both"); - }); - - it("--prompt without agent/cloud should error", () => { - const { stderr, exitCode } = runCLI(["--prompt", "hello"]); - expect(exitCode).toBe(1); - expect(stderr).toContain("--prompt requires both"); - }); - - it("--prompt-file with nonexistent file should error with file-not-found", () => { - const { stderr, exitCode } = runCLI(["--prompt-file", "/tmp/nonexistent-spawn-test"]); - expect(exitCode).toBe(1); - // The file read error occurs before the no-agent/cloud check - expect(stderr).toContain("not found"); - }); -}); - -// ── --dry-run with only agent (no cloud) ──────────────────────────────────── - -describe("--dry-run with only agent", () => { - it("should error when --dry-run is used with agent only", () => { - const { stderr, exitCode } = runCLI(["claude", "--dry-run"]); - expect(exitCode).toBe(1); - expect(stderr).toContain("--dry-run requires both"); - }); -}); - -// ── --prompt with only agent (no cloud) ───────────────────────────────────── - -describe("--prompt with only agent (no cloud)", () => { - it("should error when --prompt is used with agent only", () => { - const { stderr, exitCode } = runCLI(["claude", "--prompt", "hello"]); - expect(exitCode).toBe(1); - expect(stderr).toContain("--prompt requires both"); - }); - - it("should suggest available clouds for the agent", () => { - const { stderr, exitCode } = runCLI(["claude", "--prompt", "hello"]); - expect(exitCode).toBe(1); - // Should suggest cloud options - expect(stderr).toContain("spawn claude"); - }); -}); - -// ── Unknown flag detection ────────────────────────────────────────────────── - -describe("unknown flag detection", () => { - it("should error on --unknown flag", () => { - const { stderr, exitCode } = runCLI(["--unknown"]); - expect(exitCode).toBe(1); - expect(stderr).toContain("Unknown flag"); - expect(stderr).toContain("--unknown"); - }); - - it("should show supported flags in error message", () => { - const { stderr, exitCode } = runCLI(["--xyz"]); - expect(exitCode).toBe(1); - expect(stderr).toContain("Supported flags"); - expect(stderr).toContain("--prompt"); - expect(stderr).toContain("--dry-run"); - expect(stderr).toContain("--help"); - expect(stderr).toContain("--version"); - }); - - it("should suggest 'spawn help' when unknown flag is used", () => { - const { stderr, exitCode } = runCLI(["--foo"]); - expect(exitCode).toBe(1); - expect(stderr).toContain("spawn help"); - }); - - it("should not treat -1 as a flag (numeric prefix)", () => { - // -1 starts with - but matches /^-\d/, so it should not be caught as unknown flag - // It will fail for other reasons (not a valid agent) but not as "unknown flag" - const { stderr, exitCode } = runCLI(["-1"]); - expect(stderr).not.toContain("Unknown flag"); - }); - - it("should treat --prompt-files (typo) as unknown flag", () => { - const { stderr, exitCode } = runCLI(["--prompt-files", "test.txt"]); - expect(exitCode).toBe(1); - expect(stderr).toContain("Unknown flag"); - expect(stderr).toContain("--prompt-files"); - }); -}); - -// ── Flag value requirements ───────────────────────────────────────────────── - -describe("flag value requirements", () => { - it("--prompt without value should error", () => { - const { stderr, exitCode } = runCLI(["claude", "sprite", "--prompt"]); - expect(exitCode).toBe(1); - expect(stderr).toContain("--prompt"); - expect(stderr).toContain("requires a value"); - }); - - it("-p without value should error", () => { - const { stderr, exitCode } = runCLI(["claude", "sprite", "-p"]); - expect(exitCode).toBe(1); - expect(stderr).toContain("-p"); - expect(stderr).toContain("requires a value"); - }); - - it("--prompt-file without value should error", () => { - const { stderr, exitCode } = runCLI(["claude", "sprite", "--prompt-file"]); - expect(exitCode).toBe(1); - expect(stderr).toContain("--prompt-file"); - expect(stderr).toContain("requires a value"); - }); - - it("-f without value should error", () => { - const { stderr, exitCode } = runCLI(["claude", "sprite", "-f"]); - expect(exitCode).toBe(1); - expect(stderr).toContain("-f"); - expect(stderr).toContain("requires a value"); - }); - - it("--prompt and --prompt-file together should error", () => { - const { stderr, exitCode } = runCLI([ - "claude", "sprite", - "--prompt", "hello", - "--prompt-file", "/tmp/test.txt", - ]); - expect(exitCode).toBe(1); - expect(stderr).toContain("cannot be used together"); - }); -}); - -// ── Verb alias routing ────────────────────────────────────────────────────── - -describe("verb alias routing", () => { - it("'run' without args should error with usage hint", () => { - const { stderr, exitCode } = runCLI(["run"]); - expect(exitCode).toBe(1); - expect(stderr).toContain("requires an agent and cloud"); - }); - - it("'launch' without args should error with usage hint", () => { - const { stderr, exitCode } = runCLI(["launch"]); - expect(exitCode).toBe(1); - expect(stderr).toContain("requires an agent and cloud"); - }); - - it("'start' without args should error with usage hint", () => { - const { stderr, exitCode } = runCLI(["start"]); - expect(exitCode).toBe(1); - expect(stderr).toContain("requires an agent and cloud"); - }); - - it("'deploy' without args should error with usage hint", () => { - const { stderr, exitCode } = runCLI(["deploy"]); - expect(exitCode).toBe(1); - expect(stderr).toContain("requires an agent and cloud"); - }); - - it("'exec' without args should error with usage hint", () => { - const { stderr, exitCode } = runCLI(["exec"]); - expect(exitCode).toBe(1); - expect(stderr).toContain("requires an agent and cloud"); - }); - - it("verb alias error should mention it's optional", () => { - const { stderr } = runCLI(["run"]); - expect(stderr).toContain("optional"); - expect(stderr).toContain("spawn "); - }); -}); - -// ── Extra args warning ────────────────────────────────────────────────────── - -describe("extra arguments warning", () => { - it("should warn about extra args after version command", () => { - const { stderr, stdout, exitCode } = runCLI(["version", "extra"]); - expect(exitCode).toBe(0); - expect(stderr.toLowerCase()).toContain("extra argument"); - expect(stderr).toContain("ignored"); - // Should still show version - expect(stdout).toMatch(/spawn v\d+\.\d+/); - }); - - it("should warn about multiple extra args", () => { - const { stderr, exitCode } = runCLI(["version", "a", "b", "c"]); - expect(exitCode).toBe(0); - expect(stderr.toLowerCase()).toContain("extra arguments"); - expect(stderr).toContain("ignored"); - }); - - it("should not warn when no extra args", () => { - const { stderr } = runCLI(["version"]); - expect(stderr.toLowerCase()).not.toContain("extra argument"); - }); -}); - -// ── Prompt file errors ────────────────────────────────────────────────────── - -describe("prompt file error handling", () => { - it("should show file-not-found error for nonexistent prompt file", () => { - const { stderr, exitCode } = runCLI([ - "claude", "sprite", - "--prompt-file", "/tmp/spawn-test-nonexistent-file-xyz123", - ]); - expect(exitCode).toBe(1); - expect(stderr).toContain("not found"); - }); - - it("should show directory error when prompt-file is a directory", () => { - const { stderr, exitCode } = runCLI([ - "claude", "sprite", - "--prompt-file", "/tmp", - ]); - expect(exitCode).toBe(1); - expect(stderr).toContain("directory"); - }); -}); - -// ── Non-interactive terminal without command ──────────────────────────────── - -describe("non-interactive terminal handling", () => { - it("should show usage hint when no args and no TTY", () => { - // Running as subprocess inherently lacks a TTY for stdin - const { stderr, exitCode } = runCLI([]); - expect(exitCode).toBe(1); - expect(stderr).toContain("Cannot run interactive picker: not a terminal"); - expect(stderr).toContain("spawn "); - expect(stderr).toContain("spawn agents"); - expect(stderr).toContain("spawn clouds"); - expect(stderr).toContain("spawn help"); - }); -}); - -// ── Subcommand alias routing ──────────────────────────────────────────────── - -describe("subcommand alias routing", () => { - it("'m' should work as alias for 'matrix'", () => { - const { stdout, exitCode } = runCLI(["m"]); - expect(exitCode).toBe(0); - expect(stdout).toContain("Availability Matrix"); - }); - - it("'agents' should list agents", () => { - const { stdout, exitCode } = runCLI(["agents"]); - expect(exitCode).toBe(0); - expect(stdout).toContain("Agents"); - }); - - it("'clouds' should list clouds", () => { - const { stdout, exitCode } = runCLI(["clouds"]); - expect(exitCode).toBe(0); - expect(stdout).toContain("Cloud Providers"); - }); -}); - -// ── List command aliases ──────────────────────────────────────────────────── - -describe("list command aliases", () => { - it("'list' should not crash with empty history", () => { - const { homedir } = require("os"); - const { exitCode } = runCLI(["list"], { SPAWN_HOME: join(homedir(), ".spawn-test-empty-home-" + Date.now()) }); - // May exit 0 (shows "no spawns") or run interactive picker in non-TTY - // The important thing is it doesn't crash - expect(exitCode).toBeDefined(); - }); - - it("'ls' should work as alias for 'list'", () => { - const { homedir } = require("os"); - const { exitCode } = runCLI(["ls"], { SPAWN_HOME: join(homedir(), ".spawn-test-empty-home-" + Date.now()) }); - expect(exitCode).toBeDefined(); - }); - - it("'history' should work as alias for 'list'", () => { - const { homedir } = require("os"); - const { exitCode } = runCLI(["history"], { SPAWN_HOME: join(homedir(), ".spawn-test-empty-home-" + Date.now()) }); - expect(exitCode).toBeDefined(); - }); - - it("'list -a' without value should error", () => { - const { stderr, exitCode } = runCLI(["list", "-a"]); - expect(exitCode).toBe(1); - expect(stderr).toContain("-a"); - expect(stderr).toContain("requires"); - }); - - it("'list -c' without value should error", () => { - const { stderr, exitCode } = runCLI(["list", "-c"]); - expect(exitCode).toBe(1); - expect(stderr).toContain("-c"); - expect(stderr).toContain("requires"); - }); -}); diff --git a/cli/src/__tests__/cmdrun-resolution.test.ts b/cli/src/__tests__/cmdrun-resolution.test.ts deleted file mode 100644 index 7be178b7..00000000 --- a/cli/src/__tests__/cmdrun-resolution.test.ts +++ /dev/null @@ -1,291 +0,0 @@ -import { describe, it, expect } from "bun:test"; -import { execSync } from "child_process"; -import { resolve } from "path"; - -/** - * Tests for cmdRun argument resolution paths: - * - Display name resolution ("Claude Code" -> "claude") - * - Case-insensitive key resolution ("Claude" -> "claude") - * - Argument swapping detection (cloud/agent -> agent/cloud) - * - showInfoOrError display name resolution ("Hetzner Cloud" -> cloud info) - * - * These paths in commands.ts cmdRun() (lines 252-304) and index.ts - * showInfoOrError() (lines 87-128) have zero E2E test coverage. - * - * Uses subprocess approach since cmdRun calls process.exit on errors. - * - * Agent: test-engineer - */ - -const CLI_DIR = resolve(import.meta.dir, "../.."); -const PROJECT_ROOT = resolve(CLI_DIR, ".."); - -function runCli( - args: string[], - env: Record = {} -): { stdout: string; stderr: string; exitCode: number } { - const quotedArgs = args.map(a => `'${a.replace(/'/g, "'\\''")}'`).join(" "); - const cmd = `bun run ${CLI_DIR}/src/index.ts ${quotedArgs}`; - try { - const stdout = execSync(cmd, { - cwd: PROJECT_ROOT, - env: { - PATH: `${process.env.HOME}/.bun/bin:${process.env.PATH}`, - HOME: process.env.HOME, - SHELL: process.env.SHELL, - TERM: process.env.TERM || "xterm", - // Prevent OAuth browser from opening during tests — if OPENROUTER_API_KEY - // is set, get_or_prompt_api_key() skips the entire OAuth flow. - OPENROUTER_API_KEY: "sk-or-test-fake", - ...env, - SPAWN_NO_UPDATE_CHECK: "1", - NODE_ENV: "", - BUN_ENV: "", - }, - encoding: "utf-8", - timeout: 15000, - stdio: ["pipe", "pipe", "pipe"], - }); - return { stdout, stderr: "", exitCode: 0 }; - } catch (err: any) { - return { - stdout: err.stdout || "", - stderr: err.stderr || "", - exitCode: err.status ?? 1, - }; - } -} - -// ── cmdRun: argument swapping detection ─────────────────────────────────── - -describe("cmdRun argument swapping", () => { - it("should detect swapped cloud/agent and show swap warning", () => { - // "spawn sprite claude" should be detected as swapped -> "spawn claude sprite" - // cmdRun will swap and try to launch, which will fail at download (no network) - // but the swap message should appear in output - const result = runCli(["sprite", "claude", "--dry-run"]); - const output = result.stdout + result.stderr; - expect(output).toContain("swapped"); - }); - - it("should show corrected command after swap detection", () => { - const result = runCli(["sprite", "claude", "--dry-run"]); - const output = result.stdout + result.stderr; - expect(output).toContain("spawn claude sprite"); - }); - - it("should swap hetzner/codex to codex/hetzner", () => { - const result = runCli(["hetzner", "codex", "--dry-run"]); - const output = result.stdout + result.stderr; - expect(output).toContain("swapped"); - }); - - it("should not swap when arguments are in correct order", () => { - // "spawn claude sprite" is correct order - no swap message - const result = runCli(["claude", "sprite", "--dry-run"]); - const output = result.stdout + result.stderr; - expect(output).not.toContain("swapped"); - }); - - it("should not swap when both args are unknown", () => { - const result = runCli(["fakething", "otherfake"]); - const output = result.stdout + result.stderr; - expect(output).not.toContain("swapped"); - }); -}); - -// ── cmdRun: display name resolution ─────────────────────────────────────── - -describe("cmdRun display name resolution", () => { - it("should resolve case-insensitive agent key", () => { - // "Claude" should resolve to "claude" - const result = runCli(["Claude", "sprite", "--dry-run"]); - const output = result.stdout + result.stderr; - // Should resolve and proceed (may show "Resolved" message) - // Should NOT show "Unknown agent" error - expect(output).not.toContain("Unknown agent"); - }); - - it("should resolve case-insensitive cloud key", () => { - // "Sprite" should resolve to "sprite" - const result = runCli(["claude", "Sprite", "--dry-run"]); - const output = result.stdout + result.stderr; - expect(output).not.toContain("Unknown cloud"); - }); - - it("should show resolution message when name is resolved", () => { - // "CLAUDE" -> "claude" should trigger "Resolved" message - const result = runCli(["CLAUDE", "sprite", "--dry-run"]); - const output = result.stdout + result.stderr; - expect(output).toContain("Resolved"); - expect(output).toContain("claude"); - }); - - it("should resolve agent display name to key", () => { - // "Claude Code" is the display name for agent key "claude" - const result = runCli(["Claude Code", "sprite", "--dry-run"]); - const output = result.stdout + result.stderr; - expect(output).toContain("Resolved"); - expect(output).toContain("claude"); - }); - - it("should resolve cloud display name to key", () => { - // "Hetzner Cloud" is the display name for cloud key "hetzner" - const result = runCli(["claude", "Hetzner Cloud", "--dry-run"]); - const output = result.stdout + result.stderr; - expect(output).toContain("Resolved"); - expect(output).toContain("hetzner"); - }); - - it("should not show resolution message for exact key match", () => { - // "claude" is already the exact key - no resolution needed - const result = runCli(["claude", "sprite", "--dry-run"]); - const output = result.stdout + result.stderr; - expect(output).not.toContain("Resolved"); - }); - - it("should show unknown agent error for truly invalid agent", () => { - const result = runCli(["notarealagent", "sprite"]); - const output = result.stdout + result.stderr; - expect(output).toContain("Unknown agent"); - expect(result.exitCode).not.toBe(0); - }); - - it("should show unknown cloud error for truly invalid cloud", () => { - const result = runCli(["claude", "notarealcloud"]); - const output = result.stdout + result.stderr; - expect(output).toContain("Unknown cloud"); - expect(result.exitCode).not.toBe(0); - }); -}); - -// ── showInfoOrError: display name resolution ────────────────────────────── - -describe("showInfoOrError display name resolution", () => { - it("should resolve agent display name to agent info", () => { - // "Claude Code" -> resolves to "claude" via resolveAgentKey -> shows agent info - const result = runCli(["Claude Code"]); - const output = result.stdout + result.stderr; - expect(output).toContain("Available clouds"); - expect(result.exitCode).toBe(0); - }); - - it("should resolve cloud display name to cloud info", () => { - // "Hetzner Cloud" -> resolves to "hetzner" via resolveCloudKey -> shows cloud info - const result = runCli(["Hetzner Cloud"]); - const output = result.stdout + result.stderr; - expect(output).toContain("Available agents"); - expect(result.exitCode).toBe(0); - }); - - it("should resolve case-insensitive agent display name", () => { - // "claude code" (lowercase) -> resolves to agent info - const result = runCli(["claude code"]); - const output = result.stdout + result.stderr; - expect(output).toContain("Available clouds"); - expect(result.exitCode).toBe(0); - }); - - it("should resolve case-insensitive cloud display name", () => { - // "hetzner cloud" (lowercase) -> resolves to cloud info - const result = runCli(["hetzner cloud"]); - const output = result.stdout + result.stderr; - expect(output).toContain("Available agents"); - expect(result.exitCode).toBe(0); - }); - - it("should resolve uppercase agent key", () => { - // "CLAUDE" -> resolves to "claude" key - const result = runCli(["CLAUDE"]); - const output = result.stdout + result.stderr; - expect(output).toContain("Available clouds"); - expect(result.exitCode).toBe(0); - }); - - it("should resolve uppercase cloud key", () => { - // "HETZNER" -> resolves to "hetzner" key - const result = runCli(["HETZNER"]); - const output = result.stdout + result.stderr; - expect(output).toContain("Available agents"); - expect(result.exitCode).toBe(0); - }); - - it("should resolve mixed case agent key", () => { - const result = runCli(["Codex"]); - const output = result.stdout + result.stderr; - expect(output).toContain("Available clouds"); - expect(result.exitCode).toBe(0); - }); -}); - -// ── cmdRun: "did you mean" suggestions ──────────────────────────────────── - -describe("cmdRun did-you-mean suggestions", () => { - it("should suggest closest agent match for typo", () => { - // "claud" is close to "claude" (distance 1) - const result = runCli(["claud", "sprite"]); - const output = result.stdout + result.stderr; - expect(output).toContain("Did you mean"); - expect(output).toContain("claude"); - expect(result.exitCode).not.toBe(0); - }); - - it("should suggest closest cloud match for typo", () => { - // "sprte" is close to "sprite" (distance 1) - const result = runCli(["claude", "sprte"]); - const output = result.stdout + result.stderr; - expect(output).toContain("Did you mean"); - expect(output).toContain("sprite"); - expect(result.exitCode).not.toBe(0); - }); - - it("should not suggest anything for completely different agent", () => { - const result = runCli(["kubernetes", "sprite"]); - const output = result.stdout + result.stderr; - expect(output).toContain("Unknown agent"); - expect(output).not.toContain("Did you mean"); - expect(result.exitCode).not.toBe(0); - }); - - it("should show spawn agents hint for unknown agent", () => { - const result = runCli(["notreal", "sprite"]); - const output = result.stdout + result.stderr; - expect(output).toContain("spawn agents"); - }); - - it("should show spawn clouds hint for unknown cloud", () => { - const result = runCli(["claude", "notreal"]); - const output = result.stdout + result.stderr; - expect(output).toContain("spawn clouds"); - }); -}); - -// ── validateImplementation: not-implemented error paths ─────────────────── - -describe("cmdRun not-implemented error", () => { - it("should show not implemented error for missing matrix entry", () => { - // Find a known missing combination from the manifest - // We check a combination that exists in the manifest as "missing" - // This tests validateImplementation's error messaging - const result = runCli(["claude", "cherry-servers"]); - const output = result.stdout + result.stderr; - // Should either succeed (if implemented) or show useful error - // The key thing is it doesn't crash - if (result.exitCode !== 0) { - // If not implemented, should show helpful alternatives - expect(output.length).toBeGreaterThan(0); - } - }); - - it("should suggest alternative clouds when agent is not on specified cloud", () => { - // We need a cloud that exists but doesn't have all agents - // Test the "available on N clouds" message path - // Using a known agent with a cloud that may not have it - const result = runCli(["claude", "cherry-servers"]); - const output = result.stdout + result.stderr; - if (output.includes("not yet implemented")) { - // Should suggest alternative clouds - expect(output).toMatch(/available on|Try one of these/); - } - }); -}); diff --git a/cli/src/__tests__/index-main-routing.test.ts b/cli/src/__tests__/index-main-routing.test.ts deleted file mode 100644 index f9d1f98e..00000000 --- a/cli/src/__tests__/index-main-routing.test.ts +++ /dev/null @@ -1,290 +0,0 @@ -import { describe, it, expect, beforeEach, afterEach, mock, spyOn } from "bun:test"; -import { execSync } from "child_process"; -import { resolve } from "path"; - -/** - * Tests for index.ts main() routing, handleError, and isInteractiveTTY. - * - * These functions have zero direct test coverage: - * - handleError: formats errors and exits with code 1 - * - isInteractiveTTY: checks stdin/stdout TTY status - * - main() routing: the actual switch statement that dispatches commands - * - * Since index.ts calls process.exit and has module-level side effects, - * we test it by spawning bun subprocesses with controlled environments - * (same approach as unicode-detect.test.ts). - * - * Agent: test-engineer - */ - -const CLI_DIR = resolve(import.meta.dir, "../.."); - -// Helper: run the CLI with given args and return { stdout, stderr, exitCode } -function runCli( - args: string[], - env: Record = {} -): { stdout: string; stderr: string; exitCode: number } { - const cmd = `bun run src/index.ts ${args.join(" ")}`; - try { - const stdout = execSync(cmd, { - cwd: CLI_DIR, - env: { - ...process.env, - ...env, - // Ensure bun is in PATH for child processes - PATH: `${process.env.HOME}/.bun/bin:${process.env.PATH}`, - // Prevent auto-update from running during tests - SPAWN_NO_UPDATE_CHECK: "1", - // Prevent local manifest.json from being used - NODE_ENV: "test", - BUN_ENV: "test", - // Prevent ANSI color codes in output (CI sets FORCE_COLOR/CI vars) - NO_COLOR: "1", - }, - encoding: "utf-8", - timeout: 15000, - stdio: ["pipe", "pipe", "pipe"], - }); - return { stdout, stderr: "", exitCode: 0 }; - } catch (err: any) { - return { - stdout: err.stdout || "", - stderr: err.stderr || "", - exitCode: err.status ?? 1, - }; - } -} - -describe("index.ts main() routing", () => { - // ── help command routing ────────────────────────────────────────────── - - describe("help command", () => { - it("should show help with 'help' command", () => { - const result = runCli(["help"]); - const output = result.stdout + result.stderr; - expect(output).toContain("USAGE"); - expect(output).toContain("spawn"); - }); - - it("should show help with '--help' flag", () => { - const result = runCli(["--help"]); - const output = result.stdout + result.stderr; - expect(output).toContain("USAGE"); - }); - - it("should show help with '-h' flag", () => { - const result = runCli(["-h"]); - const output = result.stdout + result.stderr; - expect(output).toContain("USAGE"); - }); - - it("should include all sections in help output", () => { - const result = runCli(["help"]); - const output = result.stdout + result.stderr; - expect(output).toContain("USAGE"); - expect(output).toContain("EXAMPLES"); - expect(output).toContain("AUTHENTICATION"); - expect(output).toContain("TROUBLESHOOTING"); - expect(output).toContain("INSTALL"); - expect(output).toContain("MORE INFO"); - }); - - it("should include --prompt and --prompt-file in help", () => { - const result = runCli(["help"]); - const output = result.stdout + result.stderr; - expect(output).toContain("--prompt"); - expect(output).toContain("--prompt-file"); - }); - }); - - // ── version command routing ───────────────────────────────────────── - - describe("version command", () => { - it("should show version with 'version' command", () => { - const result = runCli(["version"]); - const output = result.stdout + result.stderr; - expect(output).toMatch(/spawn v\d+\.\d+\.\d+/); - }); - - it("should show version with '--version' flag", () => { - const result = runCli(["--version"]); - const output = result.stdout + result.stderr; - expect(output).toMatch(/spawn v\d+\.\d+\.\d+/); - }); - - it("should show version with '-v' flag", () => { - const result = runCli(["-v"]); - const output = result.stdout + result.stderr; - expect(output).toMatch(/spawn v\d+\.\d+\.\d+/); - }); - - it("should show version with '-V' flag", () => { - const result = runCli(["-V"]); - const output = result.stdout + result.stderr; - expect(output).toMatch(/spawn v\d+\.\d+\.\d+/); - }); - }); - - // ── subcommand --help routing ─────────────────────────────────────── - - describe("subcommand --help shows general help", () => { - it("should show help for 'list --help'", () => { - const result = runCli(["list", "--help"]); - const output = result.stdout + result.stderr; - expect(output).toContain("USAGE"); - }); - - it("should show help for 'agents --help'", () => { - const result = runCli(["agents", "--help"]); - const output = result.stdout + result.stderr; - expect(output).toContain("USAGE"); - }); - - it("should show help for 'clouds --help'", () => { - const result = runCli(["clouds", "--help"]); - const output = result.stdout + result.stderr; - expect(output).toContain("USAGE"); - }); - - it("should show help for 'update --help'", () => { - const result = runCli(["update", "--help"]); - const output = result.stdout + result.stderr; - expect(output).toContain("USAGE"); - }); - - it("should show help for 'list -h'", () => { - const result = runCli(["list", "-h"]); - const output = result.stdout + result.stderr; - expect(output).toContain("USAGE"); - }); - - it("should show help for 'agents help'", () => { - const result = runCli(["agents", "help"]); - const output = result.stdout + result.stderr; - expect(output).toContain("USAGE"); - }); - }); - - // ── ls alias routing ─────────────────────────────────────────────── - - describe("ls alias", () => { - it("should show help for 'ls --help'", () => { - const result = runCli(["ls", "--help"]); - const output = result.stdout + result.stderr; - expect(output).toContain("USAGE"); - }); - }); - - // ── non-TTY mode with no args ────────────────────────────────────── - - describe("non-TTY mode", () => { - it("should show non-TTY hint when run without args in non-TTY mode", () => { - // When stdin is not a TTY (piped), and no args, it shows the non-TTY hint - const result = runCli([]); - const output = result.stdout + result.stderr; - expect(output).toContain("Cannot run interactive picker: not a terminal"); - }); - }); -}); - -describe("handleError formatting", () => { - // handleError is not exported, so we test it through the actual CLI - - describe("error with Error object", () => { - it("should show error message for invalid identifier", () => { - const result = runCli(["../hack", "sprite"]); - const output = result.stderr + result.stdout; - expect(output).toContain("can only contain"); - expect(result.exitCode).not.toBe(0); - }); - - it("should show help hint in error output", () => { - const result = runCli(["../hack", "sprite"]); - const output = result.stderr + result.stdout; - // handleError appends: Run 'spawn help' for usage information. - // But the error may come from validateIdentifier before handleError - // Either way, the CLI should provide helpful error messaging - expect(output.length).toBeGreaterThan(0); - expect(result.exitCode).not.toBe(0); - }); - }); - - describe("error for empty input", () => { - it("should exit with error for empty agent name in run command", () => { - // This tests the "prompt requires both agent and cloud" path - const result = runCli(["--prompt", "test text"]); - const output = result.stderr + result.stdout; - expect(output).toContain("--prompt requires both"); - expect(result.exitCode).not.toBe(0); - }); - }); -}); - -describe("extractFlagValue in actual CLI", () => { - describe("--prompt flag missing value", () => { - it("should error when --prompt is last argument", () => { - const result = runCli(["claude", "sprite", "--prompt"]); - const output = result.stderr + result.stdout; - expect(output).toContain("--prompt requires a value"); - expect(result.exitCode).not.toBe(0); - }); - - it("should error when -p is last argument", () => { - const result = runCli(["claude", "sprite", "-p"]); - const output = result.stderr + result.stdout; - expect(output).toContain("-p requires a value"); - expect(result.exitCode).not.toBe(0); - }); - - it("should error when --prompt-file is last argument", () => { - const result = runCli(["claude", "sprite", "--prompt-file"]); - const output = result.stderr + result.stdout; - expect(output).toContain("--prompt-file requires a value"); - expect(result.exitCode).not.toBe(0); - }); - - it("should error when --prompt value starts with -", () => { - const result = runCli(["claude", "sprite", "--prompt", "--verbose"]); - const output = result.stderr + result.stdout; - expect(output).toContain("--prompt requires a value"); - expect(result.exitCode).not.toBe(0); - }); - }); - - describe("--prompt and --prompt-file mutual exclusion", () => { - it("should error when both --prompt and --prompt-file are given", () => { - const result = runCli(["claude", "sprite", "--prompt", "text", "--prompt-file", "file.txt"]); - const output = result.stderr + result.stdout; - expect(output).toContain("cannot be used together"); - expect(result.exitCode).not.toBe(0); - }); - }); - - describe("--prompt-file with missing file", () => { - it("should error when prompt file does not exist", () => { - const result = runCli(["claude", "sprite", "--prompt-file", "/tmp/nonexistent-spawn-test-file.txt"]); - const output = result.stderr + result.stdout; - expect(output).toContain("Prompt file not found"); - expect(result.exitCode).not.toBe(0); - }); - }); -}); - -describe("prompt-only-without-cloud error", () => { - it("should error when --prompt is given without any agent/cloud", () => { - // When no positional args, prompt-without-cloud error triggers - const result = runCli(["--prompt", "Fix bugs"]); - const output = result.stderr + result.stdout; - expect(output).toContain("--prompt requires both"); - expect(result.exitCode).not.toBe(0); - }); - - it("should include usage hint in prompt-only error", () => { - const result = runCli(["--prompt", "Fix bugs"]); - const output = result.stderr + result.stdout; - // Should mention that both agent and cloud are required - expect(output).toContain(""); - expect(output).toContain(""); - expect(result.exitCode).not.toBe(0); - }); -}); diff --git a/cli/src/__tests__/install-helpers.test.ts b/cli/src/__tests__/install-helpers.test.ts deleted file mode 100644 index 239e5a1b..00000000 --- a/cli/src/__tests__/install-helpers.test.ts +++ /dev/null @@ -1,518 +0,0 @@ -import { describe, it, expect, beforeEach, afterEach } from "bun:test"; -import { execSync } from "child_process"; -import { resolve, join } from "path"; -import { mkdirSync, writeFileSync, rmSync, existsSync, chmodSync } from "fs"; -import { tmpdir } from "os"; - -/** - * Tests for install.sh bash helper functions. - * - * install.sh is the entry point for all new users (`curl ... | bash`). - * It has been modified in 3 of the last 5 commits and its helper functions - * had zero test coverage. These tests exercise: - * - * - version_gte: Semver comparison (determines if bun upgrade is needed) - * - find_install_dir: Install directory resolution (PATH-aware) - * - ensure_in_path: PATH detection and shell-specific instructions - * - * Each test sources the relevant functions from install.sh in an isolated - * bash subprocess with controlled PATH and HOME environment. - * - * Agent: test-engineer - */ - -const REPO_ROOT = resolve(import.meta.dir, "../../.."); -const INSTALL_SH = resolve(REPO_ROOT, "cli/install.sh"); - -/** - * Extract and run just the helper functions from install.sh. - * We source the function definitions without running the main body - * by extracting them into a separate script. - */ -function runBashWithHelpers( - script: string, - env?: Record -): { exitCode: number; stdout: string; stderr: string } { - // Extract the function definitions from install.sh (before the main body) - // The main body starts after the last function definition - const helperScript = ` -set -eo pipefail - -# Color codes (from install.sh) -RED='\\033[0;31m' -GREEN='\\033[0;32m' -YELLOW='\\033[1;33m' -BOLD='\\033[1m' -NC='\\033[0m' - -log_info() { echo -e "\${GREEN}[spawn]\${NC} $1"; } -log_warn() { echo -e "\${YELLOW}[spawn]\${NC} $1"; } -log_error() { echo -e "\${RED}[spawn]\${NC} $1"; } - -# version_gte from install.sh -version_gte() { - local IFS='.' - local a=($1) b=($2) - local i=0 - while [ $i -lt \${#b[@]} ]; do - local av="\${a[$i]:-0}" - local bv="\${b[$i]:-0}" - if [ "$av" -lt "$bv" ]; then - return 1 - elif [ "$av" -gt "$bv" ]; then - return 0 - fi - i=$((i + 1)) - done - return 0 -} - -# find_install_dir from install.sh (needs bun mock) -find_install_dir() { - if [ -n "\${SPAWN_INSTALL_DIR:-}" ]; then - echo "\${SPAWN_INSTALL_DIR}" - return - fi - local dirs=( - "\${HOME}/.local/bin" - "\$(bun pm bin -g 2>/dev/null)" - "\${HOME}/.bun/bin" - "\${HOME}/bin" - ) - for dir in "\${dirs[@]}"; do - [ -z "$dir" ] && continue - if echo "\${PATH}" | tr ':' '\\n' | grep -qx "$dir"; then - echo "$dir" - return - fi - done - echo "\${HOME}/.local/bin" -} - -# ensure_in_path from install.sh -ensure_in_path() { - local install_dir="$1" - if echo "\${PATH}" | tr ':' '\\n' | grep -qx "\${install_dir}"; then - echo "IN_PATH" - else - echo "NOT_IN_PATH" - case "\${SHELL:-/bin/bash}" in - */zsh) - echo "SHELL_TYPE=zsh" - ;; - */fish) - echo "SHELL_TYPE=fish" - ;; - *) - echo "SHELL_TYPE=bash" - ;; - esac - fi -} - -${script} -`; - - const defaultEnv: Record = { - PATH: process.env.PATH || "/usr/bin:/bin", - HOME: env?.HOME || "/tmp/test-home", - SHELL: env?.SHELL || "/bin/bash", - }; - - const mergedEnv = { ...defaultEnv, ...env }; - - try { - const stdout = execSync(`bash -c '${helperScript.replace(/'/g, "'\\''")}'`, { - encoding: "utf-8", - timeout: 10000, - stdio: ["pipe", "pipe", "pipe"], - env: mergedEnv, - }); - return { exitCode: 0, stdout: stdout.trim(), stderr: "" }; - } catch (err: any) { - return { - exitCode: err.status ?? 1, - stdout: (err.stdout || "").trim(), - stderr: (err.stderr || "").trim(), - }; - } -} - -// ── version_gte tests ────────────────────────────────────────────────────── - -describe("install.sh version_gte", () => { - describe("equal versions", () => { - it("should return true (0) for identical versions", () => { - const result = runBashWithHelpers('version_gte "1.2.3" "1.2.3" && echo "YES" || echo "NO"'); - expect(result.stdout).toBe("YES"); - }); - - it("should return true (0) for 0.0.0 == 0.0.0", () => { - const result = runBashWithHelpers('version_gte "0.0.0" "0.0.0" && echo "YES" || echo "NO"'); - expect(result.stdout).toBe("YES"); - }); - }); - - describe("greater versions", () => { - it("should return true when major is greater", () => { - const result = runBashWithHelpers('version_gte "2.0.0" "1.0.0" && echo "YES" || echo "NO"'); - expect(result.stdout).toBe("YES"); - }); - - it("should return true when minor is greater", () => { - const result = runBashWithHelpers('version_gte "1.3.0" "1.2.0" && echo "YES" || echo "NO"'); - expect(result.stdout).toBe("YES"); - }); - - it("should return true when patch is greater", () => { - const result = runBashWithHelpers('version_gte "1.2.4" "1.2.3" && echo "YES" || echo "NO"'); - expect(result.stdout).toBe("YES"); - }); - - it("should return true when major is greater despite lower minor", () => { - const result = runBashWithHelpers('version_gte "2.0.0" "1.9.9" && echo "YES" || echo "NO"'); - expect(result.stdout).toBe("YES"); - }); - - it("should return true when minor is greater despite lower patch", () => { - const result = runBashWithHelpers('version_gte "1.5.0" "1.4.9" && echo "YES" || echo "NO"'); - expect(result.stdout).toBe("YES"); - }); - }); - - describe("lesser versions", () => { - it("should return false when major is less", () => { - const result = runBashWithHelpers('version_gte "1.0.0" "2.0.0" && echo "YES" || echo "NO"'); - expect(result.stdout).toBe("NO"); - }); - - it("should return false when minor is less", () => { - const result = runBashWithHelpers('version_gte "1.1.0" "1.2.0" && echo "YES" || echo "NO"'); - expect(result.stdout).toBe("NO"); - }); - - it("should return false when patch is less", () => { - const result = runBashWithHelpers('version_gte "1.2.2" "1.2.3" && echo "YES" || echo "NO"'); - expect(result.stdout).toBe("NO"); - }); - }); - - describe("realistic bun version checks", () => { - it("should pass for bun 1.2.0 >= MIN_BUN_VERSION 1.2.0", () => { - const result = runBashWithHelpers('version_gte "1.2.0" "1.2.0" && echo "YES" || echo "NO"'); - expect(result.stdout).toBe("YES"); - }); - - it("should pass for bun 1.2.5 >= MIN_BUN_VERSION 1.2.0", () => { - const result = runBashWithHelpers('version_gte "1.2.5" "1.2.0" && echo "YES" || echo "NO"'); - expect(result.stdout).toBe("YES"); - }); - - it("should fail for bun 1.1.0 >= MIN_BUN_VERSION 1.2.0", () => { - const result = runBashWithHelpers('version_gte "1.1.0" "1.2.0" && echo "YES" || echo "NO"'); - expect(result.stdout).toBe("NO"); - }); - - it("should fail for bun 1.0.33 >= MIN_BUN_VERSION 1.2.0", () => { - const result = runBashWithHelpers('version_gte "1.0.33" "1.2.0" && echo "YES" || echo "NO"'); - expect(result.stdout).toBe("NO"); - }); - - it("should pass for bun 1.3.0 >= MIN_BUN_VERSION 1.2.0", () => { - const result = runBashWithHelpers('version_gte "1.3.0" "1.2.0" && echo "YES" || echo "NO"'); - expect(result.stdout).toBe("YES"); - }); - }); - - describe("segment edge cases", () => { - it("should handle two-segment version against three-segment", () => { - // "1.2" means a=(1 2), b=(1 2 0), missing a[2] defaults to 0 - const result = runBashWithHelpers('version_gte "1.2" "1.2.0" && echo "YES" || echo "NO"'); - expect(result.stdout).toBe("YES"); - }); - - it("should handle three-segment against two-segment", () => { - // b has only 2 parts, loop only runs twice, so 1.2.5 >= 1.2 - const result = runBashWithHelpers('version_gte "1.2.5" "1.2" && echo "YES" || echo "NO"'); - expect(result.stdout).toBe("YES"); - }); - - it("should handle single-segment versions", () => { - const result = runBashWithHelpers('version_gte "2" "1" && echo "YES" || echo "NO"'); - expect(result.stdout).toBe("YES"); - }); - - it("should handle single less than single", () => { - const result = runBashWithHelpers('version_gte "1" "2" && echo "YES" || echo "NO"'); - expect(result.stdout).toBe("NO"); - }); - - it("should handle large version numbers", () => { - const result = runBashWithHelpers('version_gte "100.200.300" "100.200.299" && echo "YES" || echo "NO"'); - expect(result.stdout).toBe("YES"); - }); - - it("should handle version with extra trailing segments (only compares up to b length)", () => { - // b=(1 2), loop runs 2 times. a=(1 2 9) - extra segment ignored - const result = runBashWithHelpers('version_gte "1.2.9" "1.2" && echo "YES" || echo "NO"'); - expect(result.stdout).toBe("YES"); - }); - - it("should handle missing segment in a as 0 when comparing", () => { - // a=(1 2), b=(1 2 1), loop runs 3 times, a[2]=0 < b[2]=1 - const result = runBashWithHelpers('version_gte "1.2" "1.2.1" && echo "YES" || echo "NO"'); - expect(result.stdout).toBe("NO"); - }); - }); -}); - -// ── find_install_dir tests ────────────────────────────────────────────────── - -describe("install.sh find_install_dir", () => { - let testDir: string; - - beforeEach(() => { - testDir = join(tmpdir(), `spawn-install-test-${Date.now()}-${Math.random()}`); - mkdirSync(testDir, { recursive: true }); - }); - - afterEach(() => { - if (existsSync(testDir)) { - rmSync(testDir, { recursive: true, force: true }); - } - }); - - it("should use SPAWN_INSTALL_DIR when set", () => { - const customDir = join(testDir, "custom-bin"); - mkdirSync(customDir, { recursive: true }); - const result = runBashWithHelpers("find_install_dir", { - HOME: testDir, - SPAWN_INSTALL_DIR: customDir, - }); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe(customDir); - }); - - it("should prefer ~/.local/bin when it is in PATH", () => { - const localBin = join(testDir, ".local", "bin"); - mkdirSync(localBin, { recursive: true }); - const result = runBashWithHelpers("find_install_dir", { - HOME: testDir, - PATH: `${localBin}:/usr/bin:/bin`, - }); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe(localBin); - }); - - it("should fall back to ~/.bun/bin when ~/.local/bin is not in PATH", () => { - const bunBin = join(testDir, ".bun", "bin"); - mkdirSync(bunBin, { recursive: true }); - const result = runBashWithHelpers("find_install_dir", { - HOME: testDir, - PATH: `${bunBin}:/usr/bin:/bin`, - }); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe(bunBin); - }); - - it("should fall back to ~/bin when other options not in PATH", () => { - const homeBin = join(testDir, "bin"); - mkdirSync(homeBin, { recursive: true }); - const result = runBashWithHelpers("find_install_dir", { - HOME: testDir, - PATH: `${homeBin}:/usr/bin:/bin`, - }); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe(homeBin); - }); - - it("should default to ~/.local/bin when nothing matches PATH", () => { - const result = runBashWithHelpers("find_install_dir", { - HOME: testDir, - PATH: "/usr/bin:/bin", - }); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe(join(testDir, ".local", "bin")); - }); - - it("should override all heuristics with SPAWN_INSTALL_DIR", () => { - const localBin = join(testDir, ".local", "bin"); - mkdirSync(localBin, { recursive: true }); - const override = join(testDir, "my-override"); - mkdirSync(override, { recursive: true }); - const result = runBashWithHelpers("find_install_dir", { - HOME: testDir, - PATH: `${localBin}:/usr/bin:/bin`, - SPAWN_INSTALL_DIR: override, - }); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe(override); - }); -}); - -// ── ensure_in_path tests ──────────────────────────────────────────────────── - -describe("install.sh ensure_in_path", () => { - let testDir: string; - - beforeEach(() => { - testDir = join(tmpdir(), `spawn-path-test-${Date.now()}-${Math.random()}`); - mkdirSync(testDir, { recursive: true }); - }); - - afterEach(() => { - if (existsSync(testDir)) { - rmSync(testDir, { recursive: true, force: true }); - } - }); - - it("should detect when install dir IS in PATH", () => { - const binDir = join(testDir, "bin"); - mkdirSync(binDir, { recursive: true }); - const result = runBashWithHelpers(`ensure_in_path "${binDir}"`, { - HOME: testDir, - PATH: `${binDir}:/usr/bin:/bin`, - }); - expect(result.stdout).toContain("IN_PATH"); - expect(result.stdout).not.toContain("NOT_IN_PATH"); - }); - - it("should detect when install dir is NOT in PATH", () => { - const binDir = join(testDir, "bin"); - mkdirSync(binDir, { recursive: true }); - const result = runBashWithHelpers(`ensure_in_path "${binDir}"`, { - HOME: testDir, - PATH: "/usr/bin:/bin", - }); - expect(result.stdout).toContain("NOT_IN_PATH"); - }); - - it("should suggest .bashrc for bash shell", () => { - const binDir = join(testDir, "bin"); - mkdirSync(binDir, { recursive: true }); - const result = runBashWithHelpers(`ensure_in_path "${binDir}"`, { - HOME: testDir, - PATH: "/usr/bin:/bin", - SHELL: "/bin/bash", - }); - expect(result.stdout).toContain("SHELL_TYPE=bash"); - }); - - it("should suggest .zshrc for zsh shell", () => { - const binDir = join(testDir, "bin"); - mkdirSync(binDir, { recursive: true }); - const result = runBashWithHelpers(`ensure_in_path "${binDir}"`, { - HOME: testDir, - PATH: "/usr/bin:/bin", - SHELL: "/bin/zsh", - }); - expect(result.stdout).toContain("SHELL_TYPE=zsh"); - }); - - it("should suggest fish_add_path for fish shell", () => { - const binDir = join(testDir, "bin"); - mkdirSync(binDir, { recursive: true }); - const result = runBashWithHelpers(`ensure_in_path "${binDir}"`, { - HOME: testDir, - PATH: "/usr/bin:/bin", - SHELL: "/usr/bin/fish", - }); - expect(result.stdout).toContain("SHELL_TYPE=fish"); - }); - - it("should default to bash when SHELL is unset", () => { - const binDir = join(testDir, "bin"); - mkdirSync(binDir, { recursive: true }); - // Explicitly unset SHELL - const result = runBashWithHelpers(`unset SHELL; ensure_in_path "${binDir}"`, { - HOME: testDir, - PATH: "/usr/bin:/bin", - }); - expect(result.stdout).toContain("SHELL_TYPE=bash"); - }); - - it("should handle PATH with many entries", () => { - const binDir = join(testDir, "bin"); - mkdirSync(binDir, { recursive: true }); - const longPath = Array.from({ length: 20 }, (_, i) => `/fake/path/${i}`).join(":"); - const result = runBashWithHelpers(`ensure_in_path "${binDir}"`, { - HOME: testDir, - PATH: `${longPath}:${binDir}:/usr/bin`, - }); - expect(result.stdout).toContain("IN_PATH"); - }); - - it("should not match partial path prefixes", () => { - const binDir = join(testDir, "bin"); - const binDirExtra = join(testDir, "bin-extra"); - mkdirSync(binDir, { recursive: true }); - mkdirSync(binDirExtra, { recursive: true }); - // PATH contains bin-extra but not bin - const result = runBashWithHelpers(`ensure_in_path "${binDir}"`, { - HOME: testDir, - PATH: `${binDirExtra}:/usr/bin:/bin`, - }); - expect(result.stdout).toContain("NOT_IN_PATH"); - }); -}); - -// ── install.sh syntax check ──────────────────────────────────────────────── - -describe("install.sh syntax", () => { - it("should pass bash -n syntax check", () => { - const result = execSync(`bash -n "${INSTALL_SH}" 2>&1`, { - encoding: "utf-8", - timeout: 5000, - }); - // bash -n produces no output on success - expect(result.trim()).toBe(""); - }); - - it("should have a valid shebang line", () => { - const { readFileSync } = require("fs"); - const content = readFileSync(INSTALL_SH, "utf-8"); - expect(content.startsWith("#!/bin/bash")).toBe(true); - }); - - it("should use set -eo pipefail", () => { - const { readFileSync } = require("fs"); - const content = readFileSync(INSTALL_SH, "utf-8"); - expect(content).toContain("set -eo pipefail"); - }); - - it("should define MIN_BUN_VERSION constant", () => { - const { readFileSync } = require("fs"); - const content = readFileSync(INSTALL_SH, "utf-8"); - expect(content).toMatch(/MIN_BUN_VERSION="[0-9]+\.[0-9]+\.[0-9]+"/); - }); - - it("should define version_gte function", () => { - const { readFileSync } = require("fs"); - const content = readFileSync(INSTALL_SH, "utf-8"); - expect(content).toContain("version_gte()"); - }); - - it("should define find_install_dir function", () => { - const { readFileSync } = require("fs"); - const content = readFileSync(INSTALL_SH, "utf-8"); - expect(content).toContain("find_install_dir()"); - }); - - it("should define ensure_in_path function", () => { - const { readFileSync } = require("fs"); - const content = readFileSync(INSTALL_SH, "utf-8"); - expect(content).toContain("ensure_in_path()"); - }); - - it("should define build_and_install function", () => { - const { readFileSync } = require("fs"); - const content = readFileSync(INSTALL_SH, "utf-8"); - expect(content).toContain("build_and_install()"); - }); - - it("should define clone_cli function", () => { - const { readFileSync } = require("fs"); - const content = readFileSync(INSTALL_SH, "utf-8"); - expect(content).toContain("clone_cli()"); - }); -}); diff --git a/cli/src/__tests__/no-cloud-error-paths.test.ts b/cli/src/__tests__/no-cloud-error-paths.test.ts deleted file mode 100644 index 06d135e2..00000000 --- a/cli/src/__tests__/no-cloud-error-paths.test.ts +++ /dev/null @@ -1,288 +0,0 @@ -import { describe, it, expect, beforeAll, afterAll } from "bun:test"; -import { execSync } from "child_process"; -import { resolve } from "path"; -import { writeFileSync, mkdirSync, rmSync, existsSync } from "fs"; - -/** - * Tests for error paths when agent/cloud arguments are missing. - * - * These paths in index.ts have zero test coverage: - * - suggestCloudsForPrompt (lines 154-178): shows available clouds when - * --prompt is used with agent but no cloud - * - handleNoCommand dry-run error (lines 238-242): --dry-run without agent/cloud - * - handleNoCommand prompt error (lines 243-247): --prompt without agent/cloud - * - handleDefaultCommand dry-run error (lines 141-145): --dry-run with agent but no cloud - * - * These are user-facing error messages that guide users to correct usage. - * - * Agent: test-engineer - */ - -const CLI_DIR = resolve(import.meta.dir, "../.."); -const PROJECT_ROOT = resolve(CLI_DIR, ".."); -const TEST_DIR = resolve("/tmp", `spawn-no-cloud-test-${Date.now()}`); - -function runCli( - args: string[], - env: Record = {} -): { stdout: string; stderr: string; exitCode: number } { - const quotedArgs = args - .map((a) => `'${a.replace(/'/g, "'\\''")}'`) - .join(" "); - const cmd = `bun run ${CLI_DIR}/src/index.ts ${quotedArgs}`; - try { - const stdout = execSync(cmd, { - cwd: PROJECT_ROOT, - env: { - PATH: `${process.env.HOME}/.bun/bin:${process.env.PATH}`, - HOME: process.env.HOME, - SHELL: process.env.SHELL, - TERM: process.env.TERM || "xterm", - ...env, - SPAWN_NO_UPDATE_CHECK: "1", - NODE_ENV: "", - BUN_ENV: "", - }, - encoding: "utf-8", - timeout: 15000, - stdio: ["pipe", "pipe", "pipe"], - }); - return { stdout, stderr: "", exitCode: 0 }; - } catch (err: any) { - return { - stdout: err.stdout || "", - stderr: err.stderr || "", - exitCode: err.status ?? 1, - }; - } -} - -function output(result: { stdout: string; stderr: string }): string { - return result.stdout + result.stderr; -} - -beforeAll(() => { - mkdirSync(TEST_DIR, { recursive: true }); -}); - -afterAll(() => { - if (existsSync(TEST_DIR)) { - rmSync(TEST_DIR, { recursive: true, force: true }); - } -}); - -// ── suggestCloudsForPrompt: --prompt with agent but no cloud ────────────── - -describe("suggestCloudsForPrompt (--prompt with agent, no cloud)", () => { - it("should show error that --prompt requires both agent and cloud", () => { - const result = runCli(["claude", "--prompt", "Fix all bugs"]); - expect(output(result)).toContain("--prompt requires both"); - expect(result.exitCode).not.toBe(0); - }); - - it("should show usage example with the agent name", () => { - const result = runCli(["claude", "--prompt", "Fix all bugs"]); - const out = output(result); - expect(out).toContain("spawn claude "); - }); - - it("should suggest available clouds for the agent", () => { - const result = runCli(["claude", "--prompt", "Fix all bugs"]); - const out = output(result); - // suggestCloudsForPrompt fetches the manifest and lists available clouds - expect(out).toContain("Available clouds for"); - }); - - it("should show example spawn commands with specific clouds", () => { - const result = runCli(["claude", "--prompt", "Fix all bugs"]); - const out = output(result); - // Should suggest at least one concrete spawn command with a real cloud - expect(out).toMatch(/spawn claude \S+ --prompt/); - }); - - it("should work with -p short form", () => { - const result = runCli(["claude", "-p", "Fix bugs"]); - const out = output(result); - expect(out).toContain("--prompt requires both"); - expect(out).toContain("Available clouds for"); - expect(result.exitCode).not.toBe(0); - }); - - it("should work with codex agent", () => { - const result = runCli(["codex", "--prompt", "Add tests"]); - const out = output(result); - expect(out).toContain("--prompt requires both"); - expect(out).toContain("spawn codex "); - }); - - it("should suggest clouds for codex agent", () => { - const result = runCli(["codex", "--prompt", "Refactor"]); - const out = output(result); - // codex has multiple implemented clouds - expect(out).toContain("Available clouds for"); - }); - - it("should show at most 5 concrete cloud suggestions", () => { - const result = runCli(["claude", "--prompt", "Fix bugs"]); - const out = output(result); - // suggestCloudsForPrompt shows max 5 examples with real cloud names - // Filter for lines with "spawn claude --prompt" - // but exclude the usage hint line which has "" placeholder - const spawnLines = out - .split("\n") - .filter( - (l) => - l.includes("spawn claude") && - l.includes("--prompt") && - !l.includes("") - ); - // Should have at most 5 example lines - expect(spawnLines.length).toBeLessThanOrEqual(5); - }); - - it("should show 'see all N clouds' hint when more than 5 clouds available", () => { - // claude has many clouds (>5), so the hint should appear - const result = runCli(["claude", "--prompt", "Fix bugs"]); - const out = output(result); - // Check for the "see all" hint (only shown when >5 clouds available) - if (out.includes("see all")) { - expect(out).toMatch(/spawn claude/); - } - // At minimum, the error and suggestion section should be present - expect(out).toContain("Available clouds for"); - }); -}); - -// ── --prompt-file with agent but no cloud ───────────────────────────────── - -describe("suggestCloudsForPrompt (--prompt-file with agent, no cloud)", () => { - const promptFile = resolve(TEST_DIR, "prompt.txt"); - - beforeAll(() => { - writeFileSync(promptFile, "Fix all the things"); - }); - - it("should show same error as --prompt when using --prompt-file", () => { - const result = runCli(["claude", "--prompt-file", promptFile]); - const out = output(result); - expect(out).toContain("--prompt requires both"); - expect(result.exitCode).not.toBe(0); - }); - - it("should suggest available clouds even with --prompt-file", () => { - const result = runCli(["claude", "--prompt-file", promptFile]); - const out = output(result); - expect(out).toContain("Available clouds for"); - }); - - it("should show usage example with placeholder", () => { - const result = runCli(["claude", "-f", promptFile]); - const out = output(result); - expect(out).toContain(""); - }); -}); - -// ── handleNoCommand: --dry-run without any args ─────────────────────────── - -describe("--dry-run without agent and cloud", () => { - it("should show error that --dry-run requires both agent and cloud", () => { - const result = runCli(["--dry-run"]); - const out = output(result); - expect(out).toContain("--dry-run requires both"); - expect(result.exitCode).not.toBe(0); - }); - - it("should show usage hint with spawn --dry-run", () => { - const result = runCli(["--dry-run"]); - const out = output(result); - expect(out).toContain("spawn --dry-run"); - }); - - it("should work with -n short form", () => { - const result = runCli(["-n"]); - const out = output(result); - expect(out).toContain("--dry-run requires both"); - expect(result.exitCode).not.toBe(0); - }); -}); - -// ── handleDefaultCommand: --dry-run with agent but no cloud ─────────────── - -describe("--dry-run with agent but no cloud", () => { - it("should show error that --dry-run requires both agent and cloud", () => { - const result = runCli(["claude", "--dry-run"]); - const out = output(result); - expect(out).toContain("--dry-run requires both"); - expect(result.exitCode).not.toBe(0); - }); - - it("should show usage hint", () => { - const result = runCli(["claude", "--dry-run"]); - const out = output(result); - expect(out).toContain("spawn --dry-run"); - }); - - it("should work with -n short form and agent", () => { - const result = runCli(["claude", "-n"]); - const out = output(result); - expect(out).toContain("--dry-run requires both"); - expect(result.exitCode).not.toBe(0); - }); -}); - -// ── handleNoCommand: --prompt without any args ──────────────────────────── - -describe("--prompt without any agent or cloud", () => { - it("should show error that --prompt requires both agent and cloud", () => { - const result = runCli(["--prompt", "Fix bugs"]); - const out = output(result); - expect(out).toContain("--prompt requires both"); - expect(result.exitCode).not.toBe(0); - }); - - it("should show usage hint", () => { - const result = runCli(["--prompt", "Fix bugs"]); - const out = output(result); - expect(out).toContain("spawn "); - }); - - it("should work with -p short form", () => { - const result = runCli(["-p", "Fix bugs"]); - const out = output(result); - expect(out).toContain("--prompt requires both"); - expect(result.exitCode).not.toBe(0); - }); -}); - -// ── Combined: --dry-run and --prompt without cloud ──────────────────────── - -describe("--dry-run combined with --prompt without cloud", () => { - it("should show dry-run error when both --dry-run and --prompt but no cloud", () => { - // --dry-run is checked first in handleDefaultCommand - const result = runCli(["claude", "--dry-run", "--prompt", "Fix bugs"]); - const out = output(result); - // Should show one of the two errors - expect(out).toMatch(/requires both/); - expect(result.exitCode).not.toBe(0); - }); -}); - -// ── Edge: unknown agent with --prompt ───────────────────────────────────── - -describe("unknown agent with --prompt", () => { - it("should show prompt-requires-cloud error even for unknown agent", () => { - // The prompt-without-cloud check happens before agent validation - const result = runCli(["fakeagent", "--prompt", "Fix bugs"]); - const out = output(result); - // Could show "requires both" or "Unknown agent" depending on routing - expect(result.exitCode).not.toBe(0); - }); - - it("should handle --prompt with agent typo gracefully", () => { - const result = runCli(["claud", "--prompt", "Fix bugs"]); - const out = output(result); - // Should not crash; should show some useful error - expect(result.exitCode).not.toBe(0); - expect(out.length).toBeGreaterThan(0); - }); -}); diff --git a/cli/src/__tests__/script-syntax.test.ts b/cli/src/__tests__/script-syntax.test.ts deleted file mode 100644 index ab980409..00000000 --- a/cli/src/__tests__/script-syntax.test.ts +++ /dev/null @@ -1,101 +0,0 @@ -import { describe, it, expect } from "bun:test"; -import { readFileSync, existsSync } from "fs"; -import { join, resolve } from "path"; -import { execSync } from "child_process"; -import type { Manifest } from "../manifest"; - -/** - * Shell script syntax validation tests. - * - * Runs `bash -n` on every shell script in the repository to catch syntax - * errors before they reach users. This is the automated equivalent of - * the CLAUDE.md rule: "Run `bash -n` on every changed .sh file." - * - * Coverage: - * - shared/common.sh (core library used by all clouds) - * - Every cloud's lib/common.sh (cloud-specific libraries) - * - Every implemented agent script (cloud/agent.sh) - * - * These tests catch: - * - Unclosed quotes, braces, parentheses - * - Invalid syntax from bad merges or edits - * - Bash 3.x incompatible syntax (some cases) - * - Missing heredoc terminators - * - * Agent: test-engineer - */ - -const REPO_ROOT = resolve(import.meta.dir, "../../.."); -const manifestPath = join(REPO_ROOT, "manifest.json"); -const manifestRaw = readFileSync(manifestPath, "utf-8"); -const manifest: Manifest = JSON.parse(manifestRaw); - -const matrixEntries = Object.entries(manifest.matrix); -const implementedEntries = matrixEntries.filter(([, status]) => status === "implemented"); - -/** Run `bash -n` on a script file. Returns null on success, error message on failure. */ -function bashSyntaxCheck(filePath: string): string | null { - try { - execSync(`bash -n "${filePath}"`, { - encoding: "utf-8", - stdio: ["pipe", "pipe", "pipe"], - timeout: 10000, - }); - return null; - } catch (err: any) { - return (err.stderr || err.stdout || err.message || "Unknown error").trim(); - } -} - -describe("Shell Script Syntax Validation (bash -n)", () => { - // ── Core shared library ──────────────────────────────────────────── - - describe("shared/common.sh", () => { - const sharedPath = join(REPO_ROOT, "shared", "common.sh"); - - it("should exist", () => { - expect(existsSync(sharedPath)).toBe(true); - }); - - it("should pass bash -n syntax check", () => { - const error = bashSyntaxCheck(sharedPath); - if (error) { - throw new Error(`shared/common.sh has syntax errors:\n${error}`); - } - }); - }); - - // ── Implemented agent scripts ────────────────────────────────────── - - describe("implemented agent scripts", () => { - it("should have at least one implemented script to check", () => { - expect(implementedEntries.length).toBeGreaterThan(0); - }); - - for (const [key] of implementedEntries) { - const scriptPath = join(REPO_ROOT, key + ".sh"); - - it(`${key}.sh should pass bash -n`, () => { - if (!existsSync(scriptPath)) { - throw new Error(`${key}.sh does not exist but is marked as implemented`); - } - const error = bashSyntaxCheck(scriptPath); - if (error) { - throw new Error(`${key}.sh has syntax errors:\n${error}`); - } - }); - } - }); - - // ── Summary stats ────────────────────────────────────────────────── - - describe("coverage summary", () => { - it("should check all implemented scripts", () => { - const existing = implementedEntries.filter(([key]) => - existsSync(join(REPO_ROOT, key + ".sh")) - ); - // All implemented entries should have corresponding files - expect(existing.length).toBe(implementedEntries.length); - }); - }); -}); diff --git a/cli/src/__tests__/shared-common-api-classify.test.ts b/cli/src/__tests__/shared-common-api-classify.test.ts deleted file mode 100644 index 3644e66b..00000000 --- a/cli/src/__tests__/shared-common-api-classify.test.ts +++ /dev/null @@ -1,605 +0,0 @@ -import { describe, it, expect, beforeEach, afterEach } from "bun:test"; -import { execSync, spawnSync } from "child_process"; -import { resolve } from "path"; - -/** - * Tests for _classify_api_result and _report_api_failure in shared/common.sh. - * - * These two helpers were extracted from _cloud_api_retry_loop in PR #821 to - * reduce its cyclomatic complexity. They had zero test coverage despite being - * invoked on EVERY cloud API call across ALL providers: - * - * - _classify_api_result: Decides whether to retry based on curl exit code - * and HTTP status code. Returns a reason string or empty (success). - * A bug here could cause infinite retries or silent failures. - * - * - _report_api_failure: Generates user-facing error messages after all - * retries are exhausted. Differentiates network errors from HTTP errors - * and includes the API response body for HTTP errors only. - * - * Tests run the actual bash functions in subprocesses to catch real shell - * behavior (quoting, variable expansion, exit codes). - * - * Agent: test-engineer - */ - -const REPO_ROOT = resolve(import.meta.dir, "../../.."); -const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh"); - -/** - * Run a bash snippet that sources shared/common.sh first. - * Returns { exitCode, stdout, stderr }. - */ -function runBash( - script: string, - env?: Record -): { exitCode: number; stdout: string; stderr: string } { - const fullScript = `source "${COMMON_SH}"\n${script}`; - const result = spawnSync("bash", ["-c", fullScript], { - encoding: "utf-8", - timeout: 10000, - stdio: ["pipe", "pipe", "pipe"], - env: { ...process.env, ...env }, - }); - return { - exitCode: result.status ?? 1, - stdout: (result.stdout || "").trim(), - stderr: (result.stderr || "").trim(), - }; -} - -// ── _classify_api_result ──────────────────────────────────────────────────── - -describe("_classify_api_result", () => { - describe("network errors (curl failures)", () => { - it("should return network error message when curl exits non-zero", () => { - const result = runBash(` - API_HTTP_CODE="" - echo "$(_classify_api_result 1)" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Cloud API network error"); - }); - - it("should return network error for curl exit code 6 (DNS failure)", () => { - const result = runBash(` - API_HTTP_CODE="" - echo "$(_classify_api_result 6)" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Cloud API network error"); - }); - - it("should return network error for curl exit code 7 (connection refused)", () => { - const result = runBash(` - API_HTTP_CODE="" - echo "$(_classify_api_result 7)" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Cloud API network error"); - }); - - it("should return network error for curl exit code 28 (timeout)", () => { - const result = runBash(` - API_HTTP_CODE="" - echo "$(_classify_api_result 28)" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Cloud API network error"); - }); - - it("should prioritize curl failure over HTTP code", () => { - // If curl itself failed, the HTTP code is meaningless - const result = runBash(` - API_HTTP_CODE="200" - echo "$(_classify_api_result 7)" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Cloud API network error"); - }); - }); - - describe("HTTP rate limiting (429)", () => { - it("should detect HTTP 429 rate limit", () => { - const result = runBash(` - API_HTTP_CODE="429" - echo "$(_classify_api_result 0)" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Cloud API returned rate limit (HTTP 429)"); - }); - - it("should include HTTP 429 in the message", () => { - const result = runBash(` - API_HTTP_CODE="429" - result=$(_classify_api_result 0) - echo "$result" - `); - expect(result.stdout).toContain("429"); - expect(result.stdout).toContain("rate limit"); - }); - }); - - describe("HTTP service unavailable (503)", () => { - it("should detect HTTP 503 service unavailable", () => { - const result = runBash(` - API_HTTP_CODE="503" - echo "$(_classify_api_result 0)" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Cloud API returned service unavailable (HTTP 503)"); - }); - - it("should include HTTP 503 in the message", () => { - const result = runBash(` - API_HTTP_CODE="503" - result=$(_classify_api_result 0) - echo "$result" - `); - expect(result.stdout).toContain("503"); - expect(result.stdout).toContain("service unavailable"); - }); - }); - - describe("success cases (no retry needed)", () => { - it("should return empty string for successful request (HTTP 200)", () => { - const result = runBash(` - API_HTTP_CODE="200" - result=$(_classify_api_result 0) - if [[ -z "$result" ]]; then - echo "EMPTY" - else - echo "$result" - fi - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("EMPTY"); - }); - - it("should return empty string for HTTP 201 (created)", () => { - const result = runBash(` - API_HTTP_CODE="201" - result=$(_classify_api_result 0) - if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("EMPTY"); - }); - - it("should return empty string for HTTP 204 (no content)", () => { - const result = runBash(` - API_HTTP_CODE="204" - result=$(_classify_api_result 0) - if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("EMPTY"); - }); - - it("should return empty string for HTTP 301 (redirect)", () => { - const result = runBash(` - API_HTTP_CODE="301" - result=$(_classify_api_result 0) - if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("EMPTY"); - }); - }); - - describe("non-retryable HTTP errors (not classified)", () => { - it("should return empty for HTTP 400 (bad request)", () => { - const result = runBash(` - API_HTTP_CODE="400" - result=$(_classify_api_result 0) - if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("EMPTY"); - }); - - it("should return empty for HTTP 401 (unauthorized)", () => { - const result = runBash(` - API_HTTP_CODE="401" - result=$(_classify_api_result 0) - if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("EMPTY"); - }); - - it("should return empty for HTTP 403 (forbidden)", () => { - const result = runBash(` - API_HTTP_CODE="403" - result=$(_classify_api_result 0) - if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("EMPTY"); - }); - - it("should return empty for HTTP 404 (not found)", () => { - const result = runBash(` - API_HTTP_CODE="404" - result=$(_classify_api_result 0) - if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("EMPTY"); - }); - - it("should return empty for HTTP 409 (conflict)", () => { - const result = runBash(` - API_HTTP_CODE="409" - result=$(_classify_api_result 0) - if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("EMPTY"); - }); - - it("should return empty for HTTP 500 (internal server error)", () => { - const result = runBash(` - API_HTTP_CODE="500" - result=$(_classify_api_result 0) - if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("EMPTY"); - }); - - it("should return empty for HTTP 502 (bad gateway)", () => { - const result = runBash(` - API_HTTP_CODE="502" - result=$(_classify_api_result 0) - if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("EMPTY"); - }); - }); - - describe("edge cases", () => { - it("should handle empty API_HTTP_CODE with curl success", () => { - const result = runBash(` - API_HTTP_CODE="" - result=$(_classify_api_result 0) - if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("EMPTY"); - }); - - it("should handle unset API_HTTP_CODE with curl success", () => { - const result = runBash(` - unset API_HTTP_CODE - result=$(_classify_api_result 0) - if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("EMPTY"); - }); - - it("should treat curl_ok string '0' as success", () => { - const result = runBash(` - API_HTTP_CODE="200" - result=$(_classify_api_result "0") - if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("EMPTY"); - }); - - it("should treat any non-zero curl_ok as network error", () => { - const result = runBash(` - API_HTTP_CODE="200" - echo "$(_classify_api_result 99)" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Cloud API network error"); - }); - }); -}); - -// ── _report_api_failure ───────────────────────────────────────────────────── - -describe("_report_api_failure", () => { - describe("network error reporting", () => { - it("should show retry count in error message", () => { - const result = runBash(` - API_RESPONSE_BODY="" - _report_api_failure "Cloud API network error" 3 - `); - expect(result.stderr).toContain("Cloud API network error"); - expect(result.stderr).toContain("3 attempts"); - }); - - it("should suggest checking internet connection for network errors", () => { - const result = runBash(` - API_RESPONSE_BODY="" - _report_api_failure "Cloud API network error" 5 - `); - expect(result.stderr).toContain("internet connection"); - }); - - it("should NOT output API response body for network errors", () => { - const result = runBash(` - API_RESPONSE_BODY='{"error": "should not appear"}' - _report_api_failure "Cloud API network error" 3 - `); - expect(result.stdout).not.toContain("should not appear"); - }); - }); - - describe("HTTP error reporting", () => { - it("should show rate limit reason in error message", () => { - const result = runBash(` - API_RESPONSE_BODY='{"error": "rate limited"}' - _report_api_failure "Cloud API returned rate limit (HTTP 429)" 3 - `); - expect(result.stderr).toContain("rate limit"); - expect(result.stderr).toContain("3 attempts"); - }); - - it("should output API response body for HTTP errors", () => { - const result = runBash(` - API_RESPONSE_BODY='{"error": "rate limited"}' - _report_api_failure "Cloud API returned rate limit (HTTP 429)" 3 - `); - expect(result.stdout).toContain("rate limited"); - }); - - it("should output API response body for 503 errors", () => { - const result = runBash(` - API_RESPONSE_BODY='{"error": "service unavailable"}' - _report_api_failure "Cloud API returned service unavailable (HTTP 503)" 3 - `); - expect(result.stdout).toContain("service unavailable"); - }); - - it("should suggest waiting and retrying for HTTP errors", () => { - const result = runBash(` - API_RESPONSE_BODY='{}' - _report_api_failure "Cloud API returned rate limit (HTTP 429)" 3 - `); - expect(result.stderr).toContain("rate limiting"); - expect(result.stderr).toContain("try again"); - }); - - it("should suggest checking status page for HTTP errors", () => { - const result = runBash(` - API_RESPONSE_BODY='{}' - _report_api_failure "Cloud API returned service unavailable (HTTP 503)" 3 - `); - expect(result.stderr).toContain("status page"); - }); - }); - - describe("retry count display", () => { - it("should show 1 attempt for single retry", () => { - const result = runBash(` - API_RESPONSE_BODY="" - _report_api_failure "Cloud API network error" 1 - `); - expect(result.stderr).toContain("1 attempts"); - }); - - it("should show 5 attempts for max retries", () => { - const result = runBash(` - API_RESPONSE_BODY="" - _report_api_failure "Cloud API network error" 5 - `); - expect(result.stderr).toContain("5 attempts"); - }); - - it("should show 10 attempts for large retry count", () => { - const result = runBash(` - API_RESPONSE_BODY="" - _report_api_failure "Cloud API network error" 10 - `); - expect(result.stderr).toContain("10 attempts"); - }); - }); - - describe("API response body handling", () => { - it("should handle empty API response body", () => { - const result = runBash(` - API_RESPONSE_BODY="" - _report_api_failure "Cloud API returned rate limit (HTTP 429)" 3 - `); - expect(result.exitCode).toBe(0); - // Should still print the error message, just empty body - expect(result.stderr).toContain("rate limit"); - }); - - it("should handle multiline API response body", () => { - const result = runBash(` - API_RESPONSE_BODY='{"error": "rate limited", - "retry_after": 60, - "message": "Please slow down"}' - _report_api_failure "Cloud API returned rate limit (HTTP 429)" 3 - `); - expect(result.stdout).toContain("rate limited"); - expect(result.stdout).toContain("retry_after"); - }); - - it("should handle API response body with special characters", () => { - const result = runBash(` - API_RESPONSE_BODY='{"error": "quota exceeded: \$100 limit"}' - _report_api_failure "Cloud API returned rate limit (HTTP 429)" 3 - `); - expect(result.exitCode).toBe(0); - // Should not crash on special chars - expect(result.stderr).toContain("rate limit"); - }); - - it("should handle very long API response body", () => { - const result = runBash(` - API_RESPONSE_BODY=$(printf 'x%.0s' {1..1000}) - _report_api_failure "Cloud API returned rate limit (HTTP 429)" 3 - `); - expect(result.exitCode).toBe(0); - expect(result.stdout.length).toBeGreaterThan(500); - }); - }); -}); - -// ── Integration: _classify_api_result + _report_api_failure ───────────────── - -describe("_classify_api_result + _report_api_failure integration", () => { - it("should classify network error and report appropriately", () => { - const result = runBash(` - API_HTTP_CODE="" - API_RESPONSE_BODY="" - reason=$(_classify_api_result 7) - if [[ -n "$reason" ]]; then - _report_api_failure "$reason" 3 - echo "CLASSIFIED:$reason" - fi - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("CLASSIFIED:Cloud API network error"); - expect(result.stderr).toContain("internet connection"); - }); - - it("should classify rate limit and report with response body", () => { - const result = runBash(` - API_HTTP_CODE="429" - API_RESPONSE_BODY='{"error": "too many requests"}' - reason=$(_classify_api_result 0) - if [[ -n "$reason" ]]; then - _report_api_failure "$reason" 3 - echo "CLASSIFIED:$reason" - fi - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("too many requests"); - expect(result.stdout).toContain("CLASSIFIED:Cloud API returned rate limit (HTTP 429)"); - }); - - it("should classify 503 and report with response body", () => { - const result = runBash(` - API_HTTP_CODE="503" - API_RESPONSE_BODY='{"error": "maintenance"}' - reason=$(_classify_api_result 0) - if [[ -n "$reason" ]]; then - _report_api_failure "$reason" 5 - echo "CLASSIFIED:$reason" - fi - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("maintenance"); - expect(result.stdout).toContain("CLASSIFIED:Cloud API returned service unavailable (HTTP 503)"); - }); - - it("should return empty for successful request (no report needed)", () => { - const result = runBash(` - API_HTTP_CODE="200" - API_RESPONSE_BODY='{"id": "srv-123"}' - reason=$(_classify_api_result 0) - if [[ -z "$reason" ]]; then - echo "SUCCESS" - else - echo "SHOULD_RETRY:$reason" - fi - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("SUCCESS"); - }); - - it("should return empty for 404 (not retryable, caller handles)", () => { - const result = runBash(` - API_HTTP_CODE="404" - API_RESPONSE_BODY='{"error": "not found"}' - reason=$(_classify_api_result 0) - if [[ -z "$reason" ]]; then - echo "NOT_RETRYABLE" - else - echo "SHOULD_RETRY:$reason" - fi - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("NOT_RETRYABLE"); - }); -}); - -// ── Realistic cloud provider scenarios ────────────────────────────────────── - -describe("realistic cloud provider scenarios", () => { - it("should handle Hetzner rate limit response", () => { - const result = runBash(` - API_HTTP_CODE="429" - API_RESPONSE_BODY='{"error":{"message":"Rate limit exceeded","code":"rate_limit_exceeded"}}' - reason=$(_classify_api_result 0) - echo "REASON:$reason" - `); - expect(result.stdout).toContain("rate limit"); - }); - - it("should handle DigitalOcean 503 response", () => { - const result = runBash(` - API_HTTP_CODE="503" - API_RESPONSE_BODY='{"id":"service_unavailable","message":"Server Error"}' - reason=$(_classify_api_result 0) - _report_api_failure "$reason" 3 - `); - expect(result.stderr).toContain("service unavailable"); - expect(result.stderr).toContain("status page"); - expect(result.stdout).toContain("service_unavailable"); - }); - - it("should handle DNS resolution failure", () => { - const result = runBash(` - API_HTTP_CODE="" - API_RESPONSE_BODY="" - reason=$(_classify_api_result 6) - _report_api_failure "$reason" 3 - `); - expect(result.stderr).toContain("network error"); - expect(result.stderr).toContain("internet connection"); - }); - - it("should handle connection timeout", () => { - const result = runBash(` - API_HTTP_CODE="" - API_RESPONSE_BODY="" - reason=$(_classify_api_result 28) - _report_api_failure "$reason" 3 - `); - expect(result.stderr).toContain("network error"); - }); - - it("should not retry on auth failure (401)", () => { - const result = runBash(` - API_HTTP_CODE="401" - API_RESPONSE_BODY='{"error":"invalid_token"}' - reason=$(_classify_api_result 0) - if [[ -z "$reason" ]]; then - echo "NO_RETRY" - else - echo "RETRY:$reason" - fi - `); - expect(result.stdout).toBe("NO_RETRY"); - }); - - it("should not retry on quota exceeded (402/403)", () => { - const result = runBash(` - API_HTTP_CODE="402" - API_RESPONSE_BODY='{"error":"payment_required"}' - reason=$(_classify_api_result 0) - if [[ -z "$reason" ]]; then echo "NO_RETRY"; else echo "RETRY:$reason"; fi - `); - expect(result.stdout).toBe("NO_RETRY"); - }); - - it("should not retry on validation error (422)", () => { - const result = runBash(` - API_HTTP_CODE="422" - API_RESPONSE_BODY='{"error":"invalid_parameter"}' - reason=$(_classify_api_result 0) - if [[ -z "$reason" ]]; then echo "NO_RETRY"; else echo "RETRY:$reason"; fi - `); - expect(result.stdout).toBe("NO_RETRY"); - }); -}); diff --git a/cli/src/__tests__/shared-common-api-helpers.test.ts b/cli/src/__tests__/shared-common-api-helpers.test.ts deleted file mode 100644 index 72959b32..00000000 --- a/cli/src/__tests__/shared-common-api-helpers.test.ts +++ /dev/null @@ -1,1031 +0,0 @@ -import { describe, it, expect, beforeEach, afterEach } from "bun:test"; -import { resolve, join } from "path"; -import { mkdirSync, rmSync, existsSync, readFileSync } from "fs"; -import { tmpdir } from "os"; - -/** - * Tests for cloud API helper functions in shared/common.sh: - * - _parse_api_response: HTTP status code + body extraction from curl output - * - _update_retry_interval: exponential backoff interval doubling with cap - * - _api_should_retry_on_error: retry decision based on attempt count - * - calculate_retry_backoff: backoff with jitter - * - _cloud_api_retry_loop: full retry loop with mock request function - * - generic_cloud_api: end-to-end API call with Bearer auth (mocked) - * - generic_cloud_api_custom_auth: end-to-end API call with custom auth (mocked) - * - _make_api_request: Bearer auth wrapper - * - _make_api_request_custom_auth: custom auth wrapper - * - _curl_api: core curl wrapper (mocked curl) - * - * These functions were recently refactored (extracting _curl_api) and had - * zero dedicated test coverage. They are critical infrastructure used by - * every cloud provider for API communication. - * - * Agent: test-engineer - */ - -const REPO_ROOT = resolve(import.meta.dir, "../../.."); -const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh"); - -let testDir: string; - -beforeEach(() => { - testDir = join(tmpdir(), `spawn-api-test-${Date.now()}-${Math.random().toString(36).slice(2)}`); - mkdirSync(testDir, { recursive: true }); -}); - -afterEach(() => { - if (existsSync(testDir)) { - rmSync(testDir, { recursive: true, force: true }); - } -}); - -/** - * Run a bash snippet that sources shared/common.sh first. - * Returns { exitCode, stdout, stderr }. - */ -function runBash(script: string): { exitCode: number; stdout: string; stderr: string } { - const fullScript = `source "${COMMON_SH}"\n${script}`; - const { spawnSync } = require("child_process"); - const result = spawnSync("bash", ["-c", fullScript], { - encoding: "utf-8", - timeout: 15000, - stdio: ["pipe", "pipe", "pipe"], - }); - return { - exitCode: result.status ?? 1, - stdout: (result.stdout || "").trim(), - stderr: (result.stderr || "").trim(), - }; -} - -// ── _parse_api_response ───────────────────────────────────────────────── - -describe("_parse_api_response", () => { - describe("extracts HTTP code from last line", () => { - it("should extract 200 status code from response", () => { - const result = runBash(` - _parse_api_response '{"ok": true} -200' - echo "CODE:\${API_HTTP_CODE}" - echo "BODY:\${API_RESPONSE_BODY}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("CODE:200"); - expect(result.stdout).toContain('BODY:{"ok": true}'); - }); - - it("should extract 404 status code", () => { - const result = runBash(` - _parse_api_response '{"error": "not found"} -404' - echo "CODE:\${API_HTTP_CODE}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("CODE:404"); - }); - - it("should extract 500 status code", () => { - const result = runBash(` - _parse_api_response 'Internal Server Error -500' - echo "CODE:\${API_HTTP_CODE}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("CODE:500"); - }); - - it("should extract 429 rate limit status code", () => { - const result = runBash(` - _parse_api_response '{"message": "rate limited"} -429' - echo "CODE:\${API_HTTP_CODE}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("CODE:429"); - }); - - it("should extract 201 created status code", () => { - const result = runBash(` - _parse_api_response '{"id": "abc123"} -201' - echo "CODE:\${API_HTTP_CODE}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("CODE:201"); - }); - - it("should extract 503 service unavailable status code", () => { - const result = runBash(` - _parse_api_response 'Service Unavailable -503' - echo "CODE:\${API_HTTP_CODE}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("CODE:503"); - }); - - it("should extract 204 no content status code with empty body", () => { - const result = runBash(` - _parse_api_response ' -204' - echo "CODE:\${API_HTTP_CODE}" - echo "BODY:[\${API_RESPONSE_BODY}]" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("CODE:204"); - }); - }); - - describe("extracts response body correctly", () => { - it("should extract single-line JSON body", () => { - const result = runBash(` - _parse_api_response '{"key": "value"} -200' - echo "\${API_RESPONSE_BODY}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe('{"key": "value"}'); - }); - - it("should extract multiline JSON body", () => { - const result = runBash(` - _parse_api_response '{ - "server": { - "id": 123, - "name": "test" - } -} -200' - echo "\${API_RESPONSE_BODY}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain('"server"'); - expect(result.stdout).toContain('"id": 123'); - expect(result.stdout).toContain('"name": "test"'); - }); - - it("should handle body with multiple lines correctly", () => { - const result = runBash(` - _parse_api_response 'line1 -line2 -line3 -200' - echo "\${API_RESPONSE_BODY}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("line1"); - expect(result.stdout).toContain("line2"); - expect(result.stdout).toContain("line3"); - expect(result.stdout).not.toContain("200"); - }); - - it("should handle body containing numbers that look like HTTP codes", () => { - const result = runBash(` - _parse_api_response '{"status": 200, "count": 404} -200' - echo "CODE:\${API_HTTP_CODE}" - echo "BODY:\${API_RESPONSE_BODY}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("CODE:200"); - expect(result.stdout).toContain('"status": 200'); - expect(result.stdout).toContain('"count": 404'); - }); - - it("should handle HTML error body", () => { - const result = runBash(` - _parse_api_response 'Error -502' - echo "CODE:\${API_HTTP_CODE}" - echo "BODY:\${API_RESPONSE_BODY}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("CODE:502"); - expect(result.stdout).toContain("Error"); - }); - }); - - describe("edge cases", () => { - it("should handle response with only HTTP code (no body)", () => { - const result = runBash(` - _parse_api_response '200' - echo "CODE:\${API_HTTP_CODE}" - echo "BODYLEN:\${#API_RESPONSE_BODY}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("CODE:200"); - }); - - it("should set globals for subsequent calls", () => { - const result = runBash(` - _parse_api_response '{"first": true} -200' - echo "FIRST_CODE:\${API_HTTP_CODE}" - _parse_api_response '{"second": true} -500' - echo "SECOND_CODE:\${API_HTTP_CODE}" - echo "SECOND_BODY:\${API_RESPONSE_BODY}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("FIRST_CODE:200"); - expect(result.stdout).toContain("SECOND_CODE:500"); - expect(result.stdout).toContain('SECOND_BODY:{"second": true}'); - }); - - it("should handle body with special characters", () => { - const result = runBash(` - _parse_api_response '{"msg": "hello & goodbye "} -200' - echo "\${API_RESPONSE_BODY}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("hello & goodbye "); - }); - }); -}); - -// ── _update_retry_interval ────────────────────────────────────────────── - -describe("_update_retry_interval", () => { - it("should double the interval from 2 to 4", () => { - const result = runBash(` - interval=2 - max_interval=30 - _update_retry_interval interval max_interval - echo "\${interval}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("4"); - }); - - it("should double the interval from 4 to 8", () => { - const result = runBash(` - interval=4 - max_interval=30 - _update_retry_interval interval max_interval - echo "\${interval}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("8"); - }); - - it("should cap at max_interval", () => { - const result = runBash(` - interval=16 - max_interval=30 - _update_retry_interval interval max_interval - echo "\${interval}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("30"); - }); - - it("should not exceed max when already at max", () => { - const result = runBash(` - interval=30 - max_interval=30 - _update_retry_interval interval max_interval - echo "\${interval}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("30"); - }); - - it("should handle interval=1 doubling to 2", () => { - const result = runBash(` - interval=1 - max_interval=60 - _update_retry_interval interval max_interval - echo "\${interval}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("2"); - }); - - it("should handle sequential doublings correctly", () => { - const result = runBash(` - interval=2 - max_interval=30 - _update_retry_interval interval max_interval - echo "\${interval}" - _update_retry_interval interval max_interval - echo "\${interval}" - _update_retry_interval interval max_interval - echo "\${interval}" - _update_retry_interval interval max_interval - echo "\${interval}" - `); - expect(result.exitCode).toBe(0); - const values = result.stdout.split("\n"); - expect(values[0]).toBe("4"); // 2 -> 4 - expect(values[1]).toBe("8"); // 4 -> 8 - expect(values[2]).toBe("16"); // 8 -> 16 - expect(values[3]).toBe("30"); // 16 -> 32, capped at 30 - }); - - it("should handle small max_interval", () => { - const result = runBash(` - interval=5 - max_interval=5 - _update_retry_interval interval max_interval - echo "\${interval}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("5"); - }); - - it("should handle max_interval=1", () => { - const result = runBash(` - interval=1 - max_interval=1 - _update_retry_interval interval max_interval - echo "\${interval}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("1"); - }); -}); - -// ── _api_should_retry_on_error ────────────────────────────────────────── - -describe("_api_should_retry_on_error", () => { - it("should return 0 (retry) when attempt < max_retries", () => { - const result = runBash(` - _api_should_retry_on_error 1 3 1 30 "test error" 2>/dev/null - echo "EXIT:$?" - `); - expect(result.stdout).toContain("EXIT:0"); - }); - - it("should return 1 (no retry) when attempt >= max_retries", () => { - const result = runBash(` - _api_should_retry_on_error 3 3 1 30 "test error" 2>/dev/null - echo "EXIT:$?" - `); - expect(result.stdout).toContain("EXIT:1"); - }); - - it("should return 1 when attempt exceeds max_retries", () => { - const result = runBash(` - _api_should_retry_on_error 5 3 1 30 "test error" 2>/dev/null - echo "EXIT:$?" - `); - expect(result.stdout).toContain("EXIT:1"); - }); - - it("should output retry warning to stderr", () => { - const result = runBash(` - _api_should_retry_on_error 1 3 1 30 "Cloud API returned rate limit (HTTP 429)" - `); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain("rate limit"); - expect(result.stderr).toContain("retrying"); - expect(result.stderr).toContain("attempt 1/3"); - }); - - it("should include attempt count in message", () => { - const result = runBash(` - _api_should_retry_on_error 2 5 1 30 "network error" - `); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain("attempt 2/5"); - }); -}); - -// calculate_retry_backoff tests are in shared-common-logging-utils.test.ts - -// ── _cloud_api_retry_loop ─────────────────────────────────────────────── - -describe("_cloud_api_retry_loop", () => { - it("should succeed on first attempt with 200 response", () => { - const result = runBash(` - mock_request() { - API_HTTP_CODE="200" - API_RESPONSE_BODY='{"ok": true}' - return 0 - } - output=$(_cloud_api_retry_loop mock_request 3 "GET /test") - echo "EXIT:$?" - echo "OUTPUT:\${output}" - `); - expect(result.stdout).toContain("EXIT:0"); - expect(result.stdout).toContain('OUTPUT:{"ok": true}'); - }); - - it("should output response body on success", () => { - const result = runBash(` - mock_request() { - API_HTTP_CODE="200" - API_RESPONSE_BODY='{"id": "server-123", "status": "active"}' - return 0 - } - output=$(_cloud_api_retry_loop mock_request 3 "GET /servers") - echo "\${output}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain('"id": "server-123"'); - expect(result.stdout).toContain('"status": "active"'); - }); - - it("should succeed with 201 created response", () => { - const result = runBash(` - mock_request() { - API_HTTP_CODE="201" - API_RESPONSE_BODY='{"created": true}' - return 0 - } - output=$(_cloud_api_retry_loop mock_request 3 "POST /servers") - echo "EXIT:$?" - echo "\${output}" - `); - expect(result.stdout).toContain("EXIT:0"); - expect(result.stdout).toContain('{"created": true}'); - }); - - it("should fail after max retries on persistent 429", () => { - const result = runBash(` - mock_request() { - API_HTTP_CODE="429" - API_RESPONSE_BODY='{"error": "rate limited"}' - return 0 - } - output=$(_cloud_api_retry_loop mock_request 1 "GET /test" 2>/dev/null) - echo "EXIT:$?" - `); - expect(result.stdout).toContain("EXIT:1"); - }); - - it("should fail after max retries on persistent 503", () => { - const result = runBash(` - mock_request() { - API_HTTP_CODE="503" - API_RESPONSE_BODY='service unavailable' - return 0 - } - output=$(_cloud_api_retry_loop mock_request 1 "GET /test" 2>/dev/null) - echo "EXIT:$?" - `); - expect(result.stdout).toContain("EXIT:1"); - }); - - it("should fail after max retries on network error", () => { - const result = runBash(` - mock_request() { - API_HTTP_CODE="" - API_RESPONSE_BODY="" - return 1 - } - output=$(_cloud_api_retry_loop mock_request 1 "GET /test" 2>/dev/null) - echo "EXIT:$?" - `); - expect(result.stdout).toContain("EXIT:1"); - }); - - it("should not retry on 400 client error (returns body immediately)", () => { - // Use temp file to count attempts since $() is a subshell - const counterFile = join(testDir, "attempts"); - const result = runBash(` - echo 0 > "${counterFile}" - mock_request() { - local c; c=$(cat "${counterFile}"); echo $((c + 1)) > "${counterFile}" - API_HTTP_CODE="400" - API_RESPONSE_BODY='{"error": "bad request"}' - return 0 - } - output=$(_cloud_api_retry_loop mock_request 3 "GET /test" 2>/dev/null) - echo "EXIT:$?" - echo "\${output}" - `); - expect(result.stdout).toContain("EXIT:0"); - expect(result.stdout).toContain('{"error": "bad request"}'); - const attempts = parseInt(readFileSync(counterFile, "utf-8").trim(), 10); - expect(attempts).toBe(1); - }); - - it("should not retry on 401 unauthorized", () => { - const counterFile = join(testDir, "attempts"); - const result = runBash(` - echo 0 > "${counterFile}" - mock_request() { - local c; c=$(cat "${counterFile}"); echo $((c + 1)) > "${counterFile}" - API_HTTP_CODE="401" - API_RESPONSE_BODY='{"error": "unauthorized"}' - return 0 - } - output=$(_cloud_api_retry_loop mock_request 3 "POST /create" 2>/dev/null) - `); - const attempts = parseInt(readFileSync(counterFile, "utf-8").trim(), 10); - expect(attempts).toBe(1); - }); - - it("should not retry on 403 forbidden", () => { - const counterFile = join(testDir, "attempts"); - const result = runBash(` - echo 0 > "${counterFile}" - mock_request() { - local c; c=$(cat "${counterFile}"); echo $((c + 1)) > "${counterFile}" - API_HTTP_CODE="403" - API_RESPONSE_BODY='{"error": "forbidden"}' - return 0 - } - output=$(_cloud_api_retry_loop mock_request 3 "DELETE /server" 2>/dev/null) - `); - const attempts = parseInt(readFileSync(counterFile, "utf-8").trim(), 10); - expect(attempts).toBe(1); - }); - - it("should not retry on 404 not found", () => { - const counterFile = join(testDir, "attempts"); - const result = runBash(` - echo 0 > "${counterFile}" - mock_request() { - local c; c=$(cat "${counterFile}"); echo $((c + 1)) > "${counterFile}" - API_HTTP_CODE="404" - API_RESPONSE_BODY='{"error": "not found"}' - return 0 - } - output=$(_cloud_api_retry_loop mock_request 3 "GET /missing" 2>/dev/null) - echo "\${output}" - `); - const attempts = parseInt(readFileSync(counterFile, "utf-8").trim(), 10); - expect(attempts).toBe(1); - expect(result.stdout).toContain('{"error": "not found"}'); - }); - - it("should pass extra arguments through to request function", () => { - const result = runBash(` - mock_request() { - echo "ARGS:$*" >&2 - API_HTTP_CODE="200" - API_RESPONSE_BODY='ok' - return 0 - } - output=$(_cloud_api_retry_loop mock_request 3 "test" "arg1" "arg2" "arg3") - echo "EXIT:$?" - `); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain("ARGS:arg1 arg2 arg3"); - expect(result.stdout).toContain("EXIT:0"); - }); - - it("should log error on max retry exhaustion for network error", () => { - const result = runBash(` - mock_request() { - return 1 - } - _cloud_api_retry_loop mock_request 1 "GET /endpoint" - `); - expect(result.exitCode).toBe(1); - expect(result.stderr).toContain("network error"); - }); - - it("should log error on max retry exhaustion for 429", () => { - const result = runBash(` - mock_request() { - API_HTTP_CODE="429" - API_RESPONSE_BODY='rate limited' - return 0 - } - _cloud_api_retry_loop mock_request 1 "GET /endpoint" - `); - expect(result.exitCode).toBe(1); - expect(result.stderr).toContain("rate limit"); - }); -}); - -// ── generic_cloud_api (with mocked request function) ──────────────────── - -describe("generic_cloud_api (mocked request)", () => { - it("should call _make_api_request with correct arguments", () => { - const result = runBash(` - _make_api_request() { - echo "BASE:\${1}" >&2 - echo "TOKEN:\${2}" >&2 - echo "METHOD:\${3}" >&2 - echo "ENDPOINT:\${4}" >&2 - echo "BODY:\${5}" >&2 - API_HTTP_CODE="200" - API_RESPONSE_BODY='{"result": "ok"}' - return 0 - } - output=$(generic_cloud_api "https://api.example.com" "my-token" "GET" "/v1/servers" "" 1) - echo "EXIT:$?" - `); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain("BASE:https://api.example.com"); - expect(result.stderr).toContain("TOKEN:my-token"); - expect(result.stderr).toContain("METHOD:GET"); - expect(result.stderr).toContain("ENDPOINT:/v1/servers"); - expect(result.stdout).toContain("EXIT:0"); - }); - - it("should pass body to _make_api_request for POST", () => { - const result = runBash(` - _make_api_request() { - echo "METHOD:\${3}" >&2 - echo "BODY:\${5}" >&2 - API_HTTP_CODE="201" - API_RESPONSE_BODY='{"id": 1}' - return 0 - } - output=$(generic_cloud_api "https://api.example.com" "token" "POST" "/servers" '{"name":"test"}' 1) - echo "\${output}" - `); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain("METHOD:POST"); - expect(result.stderr).toContain('BODY:{"name":"test"}'); - expect(result.stdout).toContain('{"id": 1}'); - }); - - it("should return response body on success", () => { - const result = runBash(` - _make_api_request() { - API_HTTP_CODE="200" - API_RESPONSE_BODY='{"servers": [{"id": 1}, {"id": 2}]}' - return 0 - } - output=$(generic_cloud_api "https://api.example.com" "tok" "GET" "/servers" "" 1) - echo "\${output}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain('"servers"'); - expect(result.stdout).toContain('"id": 1'); - }); - - it("should use default max_retries=3 when not specified", () => { - const counterFile = join(testDir, "attempts"); - const result = runBash(` - echo 0 > "${counterFile}" - _make_api_request() { - local c; c=$(cat "${counterFile}"); echo $((c + 1)) > "${counterFile}" - API_HTTP_CODE="429" - API_RESPONSE_BODY='rate limited' - return 0 - } - _api_should_retry_on_error() { return 1; } - output=$(generic_cloud_api "https://api.example.com" "tok" "GET" "/test" 2>/dev/null) - `); - // Default max_retries is 3, so the loop runs at most 3 times - const attempts = parseInt(readFileSync(counterFile, "utf-8").trim(), 10); - expect(attempts).toBeGreaterThanOrEqual(1); - expect(attempts).toBeLessThanOrEqual(3); - }); -}); - -// ── generic_cloud_api_custom_auth (with mocked request function) ──────── - -describe("generic_cloud_api_custom_auth (mocked request)", () => { - it("should call _make_api_request_custom_auth with full URL", () => { - const result = runBash(` - _make_api_request_custom_auth() { - echo "URL:\${1}" >&2 - echo "METHOD:\${2}" >&2 - echo "BODY:\${3}" >&2 - shift 3 - echo "AUTH_ARGS:$*" >&2 - API_HTTP_CODE="200" - API_RESPONSE_BODY='ok' - return 0 - } - output=$(generic_cloud_api_custom_auth "https://api.example.com" "GET" "/account" "" 1 -H "X-Auth-Token: secret") - echo "EXIT:$?" - `); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain("URL:https://api.example.com/account"); - expect(result.stderr).toContain("METHOD:GET"); - expect(result.stderr).toContain("AUTH_ARGS:-H X-Auth-Token: secret"); - expect(result.stdout).toContain("EXIT:0"); - }); - - it("should pass Basic Auth credentials as custom curl args", () => { - const result = runBash(` - _make_api_request_custom_auth() { - shift 3 - echo "AUTH:$*" >&2 - API_HTTP_CODE="200" - API_RESPONSE_BODY='{"user": "me"}' - return 0 - } - output=$(generic_cloud_api_custom_auth "https://api.example.com" "GET" "/me" "" 1 -u "user:pass") - echo "\${output}" - `); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain("AUTH:-u user:pass"); - expect(result.stdout).toContain('{"user": "me"}'); - }); - - it("should pass body for POST with custom auth", () => { - const result = runBash(` - _make_api_request_custom_auth() { - echo "BODY:\${3}" >&2 - API_HTTP_CODE="201" - API_RESPONSE_BODY='{"created": true}' - return 0 - } - output=$(generic_cloud_api_custom_auth "https://api.example.com" "POST" "/servers" '{"size":"small"}' 1 -H "X-Auth: tok") - echo "\${output}" - `); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain('BODY:{"size":"small"}'); - expect(result.stdout).toContain('{"created": true}'); - }); - - it("should support multiple custom auth headers", () => { - const result = runBash(` - _make_api_request_custom_auth() { - shift 3 - echo "ARGS:$*" >&2 - API_HTTP_CODE="200" - API_RESPONSE_BODY='ok' - return 0 - } - output=$(generic_cloud_api_custom_auth "https://api.example.com" "GET" "/test" "" 1 -H "X-Header-1: val1" -H "X-Header-2: val2") - echo "EXIT:$?" - `); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain("X-Header-1: val1"); - expect(result.stderr).toContain("X-Header-2: val2"); - }); -}); - -// ── _make_api_request (Bearer auth wrapper) ───────────────────────────── - -describe("_make_api_request (Bearer auth wrapper)", () => { - it("should call _curl_api with Bearer authorization header", () => { - const result = runBash(` - _curl_api() { - echo "URL:\${1}" >&2 - echo "METHOD:\${2}" >&2 - echo "BODY:\${3}" >&2 - shift 3 - echo "EXTRA:$*" >&2 - API_HTTP_CODE="200" - API_RESPONSE_BODY='ok' - return 0 - } - _make_api_request "https://api.example.com" "my-bearer-token" "GET" "/v1/servers" "" - echo "EXIT:$?" - `); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain("URL:https://api.example.com/v1/servers"); - expect(result.stderr).toContain("METHOD:GET"); - expect(result.stderr).toContain("Authorization: Bearer my-bearer-token"); - expect(result.stdout).toContain("EXIT:0"); - }); - - it("should concatenate base_url and endpoint", () => { - const result = runBash(` - _curl_api() { - echo "URL:\${1}" >&2 - API_HTTP_CODE="200" - API_RESPONSE_BODY='{}' - return 0 - } - _make_api_request "https://api.hetzner.cloud/v1" "tok" "GET" "/servers" "" - `); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain("URL:https://api.hetzner.cloud/v1/servers"); - }); - - it("should pass body for POST requests", () => { - const result = runBash(` - _curl_api() { - echo "BODY:\${3}" >&2 - API_HTTP_CODE="201" - API_RESPONSE_BODY='{"id":1}' - return 0 - } - _make_api_request "https://api.example.com" "tok" "POST" "/create" '{"name":"test"}' - `); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain('BODY:{"name":"test"}'); - }); - - it("should pass empty body for GET requests", () => { - const result = runBash(` - _curl_api() { - echo "BODY:[\${3}]" >&2 - API_HTTP_CODE="200" - API_RESPONSE_BODY='{}' - return 0 - } - _make_api_request "https://api.example.com" "tok" "GET" "/list" - `); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain("BODY:[]"); - }); -}); - -// ── _make_api_request_custom_auth (custom auth wrapper) ───────────────── - -describe("_make_api_request_custom_auth", () => { - it("should call _curl_api with custom auth args", () => { - const result = runBash(` - _curl_api() { - echo "URL:\${1}" >&2 - echo "METHOD:\${2}" >&2 - shift 3 - echo "AUTH:$*" >&2 - API_HTTP_CODE="200" - API_RESPONSE_BODY='ok' - return 0 - } - _make_api_request_custom_auth "https://api.example.com/v1/servers" "GET" "" -H "X-Auth-Token: mytoken" - echo "EXIT:$?" - `); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain("URL:https://api.example.com/v1/servers"); - expect(result.stderr).toContain("METHOD:GET"); - expect(result.stderr).toContain("AUTH:-H X-Auth-Token: mytoken"); - expect(result.stdout).toContain("EXIT:0"); - }); - - it("should pass body as third argument", () => { - const result = runBash(` - _curl_api() { - echo "BODY:\${3}" >&2 - API_HTTP_CODE="201" - API_RESPONSE_BODY='created' - return 0 - } - _make_api_request_custom_auth "https://api.example.com" "POST" '{"name":"s1"}' -u "user:pass" - `); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain('BODY:{"name":"s1"}'); - }); - - it("should handle multiple custom auth arguments", () => { - const result = runBash(` - _curl_api() { - shift 3 - echo "ARGS:$*" >&2 - API_HTTP_CODE="200" - API_RESPONSE_BODY='ok' - return 0 - } - _make_api_request_custom_auth "https://api.example.com" "GET" "" -H "X-A: 1" -H "X-B: 2" -u "u:p" - `); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain("X-A: 1"); - expect(result.stderr).toContain("X-B: 2"); - expect(result.stderr).toContain("-u u:p"); - }); -}); - -// ── _curl_api (core curl wrapper with mocked curl) ────────────────────── - -describe("_curl_api (core curl wrapper)", () => { - it("should set API_HTTP_CODE and API_RESPONSE_BODY via mocked curl", () => { - const result = runBash(` - curl() { printf '{"mocked":true}\n200'; return 0; } - _curl_api "https://example.com/test" "GET" "" -H "Authorization: Bearer test" - echo "CODE:\${API_HTTP_CODE}" - echo "BODY:\${API_RESPONSE_BODY}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("CODE:200"); - expect(result.stdout).toContain('BODY:{"mocked":true}'); - }); - - it("should return curl exit code on failure", () => { - const result = runBash(` - curl() { return 7; } - _curl_api "https://unreachable.example.com" "GET" "" - echo "EXIT:$?" - `); - expect(result.stdout).toContain("EXIT:7"); - }); - - it("should pass arguments to curl including Content-Type", () => { - // Use a temp file to record args since _curl_api captures curl stdout - const argsFile = join(testDir, "curl-args"); - const result = runBash(` - curl() { - printf '%s\n' "$@" > "${argsFile}" - printf '{"ok":true}\n200' - return 0 - } - _curl_api "https://example.com" "GET" "" - echo "CODE:\${API_HTTP_CODE}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("CODE:200"); - const args = readFileSync(argsFile, "utf-8"); - expect(args).toContain("Content-Type: application/json"); - expect(args).toContain("-s"); - expect(args).toContain("-X"); - expect(args).toContain("GET"); - expect(args).toContain("https://example.com"); - }); - - it("should include -d flag when body is provided", () => { - const argsFile = join(testDir, "curl-args"); - const result = runBash(` - curl() { - printf '%s\n' "$@" > "${argsFile}" - printf 'ok\n200' - return 0 - } - _curl_api "https://example.com" "POST" '{"name":"test"}' - `); - expect(result.exitCode).toBe(0); - const args = readFileSync(argsFile, "utf-8"); - expect(args).toContain("-d"); - expect(args).toContain('{"name":"test"}'); - }); - - it("should not include -d flag when body is empty", () => { - const argsFile = join(testDir, "curl-args"); - const result = runBash(` - curl() { - printf '%s\n' "$@" > "${argsFile}" - printf 'ok\n200' - return 0 - } - _curl_api "https://example.com" "GET" "" - `); - expect(result.exitCode).toBe(0); - const args = readFileSync(argsFile, "utf-8"); - expect(args).not.toContain("-d"); - }); - - it("should pass the URL as the last argument to curl", () => { - const argsFile = join(testDir, "curl-args"); - const result = runBash(` - curl() { - printf '%s\n' "$@" > "${argsFile}" - printf 'ok\n200' - return 0 - } - _curl_api "https://api.example.com/v1/endpoint" "GET" "" - `); - expect(result.exitCode).toBe(0); - const args = readFileSync(argsFile, "utf-8").trim().split("\n"); - expect(args[args.length - 1]).toBe("https://api.example.com/v1/endpoint"); - }); - - it("should pass extra non-auth args to curl", () => { - // Authorization headers are now passed via -K (stdin) for security, - // so we test with a non-Authorization header that passes through directly - const argsFile = join(testDir, "curl-args"); - const result = runBash(` - curl() { - printf '%s\n' "$@" > "${argsFile}" - printf 'ok\n200' - return 0 - } - _curl_api "https://example.com" "GET" "" -H "X-Custom: my-value" - `); - expect(result.exitCode).toBe(0); - const args = readFileSync(argsFile, "utf-8"); - expect(args).toContain("-H"); - expect(args).toContain("X-Custom: my-value"); - }); - - it("should use specified HTTP method", () => { - const argsFile = join(testDir, "curl-args"); - const result = runBash(` - curl() { - printf '%s\n' "$@" > "${argsFile}" - printf 'ok\n200' - return 0 - } - _curl_api "https://example.com" "DELETE" "" - `); - expect(result.exitCode).toBe(0); - const args = readFileSync(argsFile, "utf-8"); - expect(args).toContain("-X"); - expect(args).toContain("DELETE"); - }); - - it("should handle multiline response body from curl", () => { - const result = runBash(` - curl() { - printf '{"line1": true,\n"line2": false}\n200' - return 0 - } - _curl_api "https://example.com" "GET" "" - echo "CODE:\${API_HTTP_CODE}" - echo "BODY:\${API_RESPONSE_BODY}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("CODE:200"); - expect(result.stdout).toContain('"line1": true'); - expect(result.stdout).toContain('"line2": false'); - }); - - it("should handle 500 error response from curl", () => { - const result = runBash(` - curl() { - printf '{"error": "internal"}\n500' - return 0 - } - _curl_api "https://example.com" "GET" "" - echo "CODE:\${API_HTTP_CODE}" - echo "BODY:\${API_RESPONSE_BODY}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("CODE:500"); - expect(result.stdout).toContain('BODY:{"error": "internal"}'); - }); -}); diff --git a/cli/src/__tests__/shared-common-credential-mgmt.test.ts b/cli/src/__tests__/shared-common-credential-mgmt.test.ts deleted file mode 100644 index 5c04c754..00000000 --- a/cli/src/__tests__/shared-common-credential-mgmt.test.ts +++ /dev/null @@ -1,731 +0,0 @@ -import { describe, it, expect, afterEach } from "bun:test"; -import { execSync } from "child_process"; -import { resolve, join } from "path"; -import { mkdirSync, writeFileSync, readFileSync, rmSync, existsSync, statSync } from "fs"; -import { tmpdir } from "os"; - -/** - * Tests for credential management functions in shared/common.sh. - * - * These functions had zero test coverage despite being used by every cloud - * provider script. They handle API token loading from env vars, config files, - * validation via provider test functions, and saving with proper permissions. - * - * Functions tested: - * - _load_token_from_env: load token from environment variable - * - _load_token_from_config: load token from JSON config file (api_key or token field) - * - _validate_token_with_provider: validate token via a test function - * - _save_token_to_config: save token to JSON config with chmod 600 - * - _multi_creds_all_env_set: check if all env vars are set - * - _multi_creds_load_config: load multiple credentials from JSON config - * - _multi_creds_validate: validate credentials via test function, unset on failure - * - * Agent: test-engineer - */ - -const REPO_ROOT = resolve(import.meta.dir, "../../.."); -const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh"); - -/** - * Run a bash snippet that sources shared/common.sh first. - * Returns { exitCode, stdout, stderr }. - */ -function runBash(script: string): { exitCode: number; stdout: string; stderr: string } { - const fullScript = `source "${COMMON_SH}"\n${script}`; - try { - const stdout = execSync(`bash -c '${fullScript.replace(/'/g, "'\\''")}'`, { - encoding: "utf-8", - timeout: 10000, - stdio: ["pipe", "pipe", "pipe"], - }); - return { exitCode: 0, stdout: stdout.trim(), stderr: "" }; - } catch (err: any) { - return { - exitCode: err.status ?? 1, - stdout: (err.stdout || "").trim(), - stderr: (err.stderr || "").trim(), - }; - } -} - -function createTempDir(): string { - const dir = join(tmpdir(), `spawn-cred-test-${Date.now()}-${Math.random().toString(36).slice(2)}`); - mkdirSync(dir, { recursive: true }); - return dir; -} - -const tempDirs: string[] = []; - -afterEach(() => { - for (const dir of tempDirs) { - if (existsSync(dir)) { - rmSync(dir, { recursive: true, force: true }); - } - } - tempDirs.length = 0; -}); - -function trackTempDir(): string { - const dir = createTempDir(); - tempDirs.push(dir); - return dir; -} - -// ── _load_token_from_env ────────────────────────────────────────────────── - -describe("_load_token_from_env", () => { - it("should return 0 when env var is set", () => { - const result = runBash(` - export MY_TOKEN="test-token-123" - _load_token_from_env MY_TOKEN "TestProvider" - echo "exit=$?" - `); - expect(result.exitCode).toBe(0); - }); - - it("should return 1 when env var is not set", () => { - const result = runBash(` - unset MY_TOKEN 2>/dev/null - _load_token_from_env MY_TOKEN "TestProvider" - `); - expect(result.exitCode).toBe(1); - }); - - it("should return 1 when env var is empty string", () => { - const result = runBash(` - export MY_TOKEN="" - _load_token_from_env MY_TOKEN "TestProvider" - `); - expect(result.exitCode).toBe(1); - }); - - it("should log info message when token found", () => { - const result = runBash(` - export MY_TOKEN="abc" - _load_token_from_env MY_TOKEN "Hetzner" 2>&1 - `); - expect(result.stdout).toContain("Hetzner"); - expect(result.stdout).toContain("environment"); - }); - - it("should work with different env var names", () => { - const result = runBash(` - export HCLOUD_TOKEN="hetzner-token-value" - _load_token_from_env HCLOUD_TOKEN "Hetzner Cloud" - echo "exit=$?" - `); - expect(result.exitCode).toBe(0); - }); - - it("should handle tokens with special characters", () => { - const result = runBash(` - export MY_TOKEN="sk-or-v1-abc123/def+ghi=" - _load_token_from_env MY_TOKEN "Provider" - echo "exit=$?" - `); - expect(result.exitCode).toBe(0); - }); -}); - -// ── _load_token_from_config ─────────────────────────────────────────────── - -describe("_load_token_from_config", () => { - it("should load token from api_key field in JSON config", () => { - const dir = trackTempDir(); - const configFile = join(dir, "provider.json"); - writeFileSync(configFile, JSON.stringify({ api_key: "my-api-key-123" })); - - const result = runBash(` - _load_token_from_config "${configFile}" MY_TOKEN "TestProvider" - echo "$MY_TOKEN" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("my-api-key-123"); - }); - - it("should load token from token field in JSON config", () => { - const dir = trackTempDir(); - const configFile = join(dir, "provider.json"); - writeFileSync(configFile, JSON.stringify({ token: "my-token-456" })); - - const result = runBash(` - _load_token_from_config "${configFile}" MY_TOKEN "TestProvider" - echo "$MY_TOKEN" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("my-token-456"); - }); - - it("should prefer api_key over token when both present", () => { - const dir = trackTempDir(); - const configFile = join(dir, "provider.json"); - writeFileSync(configFile, JSON.stringify({ api_key: "api-key-value", token: "token-value" })); - - const result = runBash(` - _load_token_from_config "${configFile}" MY_TOKEN "TestProvider" - echo "$MY_TOKEN" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("api-key-value"); - }); - - it("should return 1 when config file does not exist", () => { - const result = runBash(` - _load_token_from_config "/nonexistent/path/config.json" MY_TOKEN "TestProvider" - `); - expect(result.exitCode).toBe(1); - }); - - it("should return 1 when config file has empty api_key and empty token", () => { - const dir = trackTempDir(); - const configFile = join(dir, "provider.json"); - writeFileSync(configFile, JSON.stringify({ api_key: "", token: "" })); - - const result = runBash(` - _load_token_from_config "${configFile}" MY_TOKEN "TestProvider" - `); - expect(result.exitCode).toBe(1); - }); - - it("should return 1 for invalid JSON", () => { - const dir = trackTempDir(); - const configFile = join(dir, "provider.json"); - writeFileSync(configFile, "not valid json {{{"); - - const result = runBash(` - _load_token_from_config "${configFile}" MY_TOKEN "TestProvider" - `); - expect(result.exitCode).toBe(1); - }); - - it("should return 1 when JSON has no api_key or token field", () => { - const dir = trackTempDir(); - const configFile = join(dir, "provider.json"); - writeFileSync(configFile, JSON.stringify({ username: "user", password: "pass" })); - - const result = runBash(` - _load_token_from_config "${configFile}" MY_TOKEN "TestProvider" - `); - expect(result.exitCode).toBe(1); - }); - - it("should export the env var with the loaded value", () => { - const dir = trackTempDir(); - const configFile = join(dir, "provider.json"); - writeFileSync(configFile, JSON.stringify({ api_key: "loaded-token" })); - - const result = runBash(` - _load_token_from_config "${configFile}" HCLOUD_TOKEN "Hetzner" - echo "HCLOUD_TOKEN=$HCLOUD_TOKEN" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("HCLOUD_TOKEN=loaded-token"); - }); - - it("should log info message with config file path", () => { - const dir = trackTempDir(); - const configFile = join(dir, "provider.json"); - writeFileSync(configFile, JSON.stringify({ api_key: "test" })); - - const result = runBash(` - _load_token_from_config "${configFile}" MY_TOKEN "TestProvider" 2>&1 - `); - expect(result.stdout).toContain(configFile); - expect(result.stdout).toContain("TestProvider"); - }); - - it("should fall back to token field when api_key is empty", () => { - const dir = trackTempDir(); - const configFile = join(dir, "provider.json"); - writeFileSync(configFile, JSON.stringify({ api_key: "", token: "fallback-token" })); - - const result = runBash(` - _load_token_from_config "${configFile}" MY_TOKEN "TestProvider" - echo "$MY_TOKEN" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("fallback-token"); - }); -}); - -// ── _validate_token_with_provider ───────────────────────────────────────── - -describe("_validate_token_with_provider", () => { - it("should return 0 when no test function provided (empty string)", () => { - const result = runBash(` - _validate_token_with_provider "" MY_TOKEN "TestProvider" - `); - expect(result.exitCode).toBe(0); - }); - - it("should return 0 when test function succeeds", () => { - const result = runBash(` - test_success() { return 0; } - export MY_TOKEN="valid-token" - _validate_token_with_provider test_success MY_TOKEN "TestProvider" - `); - expect(result.exitCode).toBe(0); - }); - - it("should return 1 when test function fails", () => { - const result = runBash(` - test_fail() { return 1; } - export MY_TOKEN="invalid-token" - _validate_token_with_provider test_fail MY_TOKEN "TestProvider" - `); - expect(result.exitCode).toBe(1); - }); - - it("should unset the env var when validation fails", () => { - const result = runBash(` - test_fail() { return 1; } - export MY_TOKEN="will-be-unset" - _validate_token_with_provider test_fail MY_TOKEN "TestProvider" 2>/dev/null - echo "MY_TOKEN=\${MY_TOKEN:-UNSET}" - `); - expect(result.stdout).toContain("MY_TOKEN=UNSET"); - }); - - it("should log authentication failed message on failure", () => { - const result = runBash(` - test_fail() { return 1; } - export MY_TOKEN="bad" - _validate_token_with_provider test_fail MY_TOKEN "Lambda" 2>&1 - `); - expect(result.stdout).toContain("Authentication failed"); - expect(result.stdout).toContain("Lambda"); - }); - - it("should not unset env var when validation succeeds", () => { - const result = runBash(` - test_ok() { return 0; } - export MY_TOKEN="good-token" - _validate_token_with_provider test_ok MY_TOKEN "TestProvider" - echo "MY_TOKEN=$MY_TOKEN" - `); - expect(result.stdout).toContain("MY_TOKEN=good-token"); - }); -}); - -// ── _save_token_to_config ───────────────────────────────────────────────── - -describe("_save_token_to_config", () => { - it("should create config file with api_key and token fields", () => { - const dir = trackTempDir(); - const configFile = join(dir, "subdir", "provider.json"); - - runBash(`_save_token_to_config "${configFile}" "my-secret-token" 2>/dev/null`); - - expect(existsSync(configFile)).toBe(true); - const content = readFileSync(configFile, "utf-8"); - const parsed = JSON.parse(content); - expect(parsed.api_key).toBe("my-secret-token"); - expect(parsed.token).toBe("my-secret-token"); - }); - - it("should create parent directories if they do not exist", () => { - const dir = trackTempDir(); - const configFile = join(dir, "deep", "nested", "config.json"); - - runBash(`_save_token_to_config "${configFile}" "test-token" 2>/dev/null`); - - expect(existsSync(configFile)).toBe(true); - }); - - it("should set file permissions to 600", () => { - const dir = trackTempDir(); - const configFile = join(dir, "secure.json"); - - runBash(`_save_token_to_config "${configFile}" "secret" 2>/dev/null`); - - const stats = statSync(configFile); - const mode = (stats.mode & 0o777).toString(8); - expect(mode).toBe("600"); - }); - - it("should properly JSON-escape tokens with special characters", () => { - const dir = trackTempDir(); - const configFile = join(dir, "special.json"); - - // Token with quotes and backslashes - runBash(`_save_token_to_config "${configFile}" 'token-with-"quotes"' 2>/dev/null`); - - const content = readFileSync(configFile, "utf-8"); - // Should be valid JSON - expect(() => JSON.parse(content)).not.toThrow(); - const parsed = JSON.parse(content); - expect(parsed.api_key).toBe('token-with-"quotes"'); - }); - - it("should overwrite existing config file", () => { - const dir = trackTempDir(); - const configFile = join(dir, "provider.json"); - writeFileSync(configFile, JSON.stringify({ api_key: "old-token" })); - - runBash(`_save_token_to_config "${configFile}" "new-token" 2>/dev/null`); - - const content = readFileSync(configFile, "utf-8"); - const parsed = JSON.parse(content); - expect(parsed.api_key).toBe("new-token"); - }); - - it("should write valid JSON that can be re-read by _load_token_from_config", () => { - const dir = trackTempDir(); - const configFile = join(dir, "roundtrip.json"); - - // Save token - runBash(`_save_token_to_config "${configFile}" "roundtrip-value" 2>/dev/null`); - - // Load it back - const result = runBash(` - _load_token_from_config "${configFile}" LOADED_TOKEN "Test" 2>/dev/null - echo "$LOADED_TOKEN" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("roundtrip-value"); - }); - - it("should handle empty token string", () => { - const dir = trackTempDir(); - const configFile = join(dir, "empty.json"); - - runBash(`_save_token_to_config "${configFile}" "" 2>/dev/null`); - - expect(existsSync(configFile)).toBe(true); - const content = readFileSync(configFile, "utf-8"); - expect(() => JSON.parse(content)).not.toThrow(); - const parsed = JSON.parse(content); - expect(parsed.api_key).toBe(""); - }); -}); - -// ── _multi_creds_all_env_set ────────────────────────────────────────────── - -describe("_multi_creds_all_env_set", () => { - it("should return 0 when all env vars are set", () => { - const result = runBash(` - export VAR_A="a" - export VAR_B="b" - export VAR_C="c" - _multi_creds_all_env_set VAR_A VAR_B VAR_C - `); - expect(result.exitCode).toBe(0); - }); - - it("should return 1 when any env var is missing", () => { - const result = runBash(` - export VAR_A="a" - unset VAR_B 2>/dev/null - _multi_creds_all_env_set VAR_A VAR_B - `); - expect(result.exitCode).toBe(1); - }); - - it("should return 1 when any env var is empty string", () => { - const result = runBash(` - export VAR_A="a" - export VAR_B="" - _multi_creds_all_env_set VAR_A VAR_B - `); - expect(result.exitCode).toBe(1); - }); - - it("should return 0 with a single env var that is set", () => { - const result = runBash(` - export SINGLE_VAR="value" - _multi_creds_all_env_set SINGLE_VAR - `); - expect(result.exitCode).toBe(0); - }); - - it("should return 1 when first env var is missing but second is set", () => { - const result = runBash(` - unset FIRST_VAR 2>/dev/null - export SECOND_VAR="present" - _multi_creds_all_env_set FIRST_VAR SECOND_VAR - `); - expect(result.exitCode).toBe(1); - }); - - it("should return 1 when last env var is missing", () => { - const result = runBash(` - export VAR_A="a" - export VAR_B="b" - unset VAR_C 2>/dev/null - _multi_creds_all_env_set VAR_A VAR_B VAR_C - `); - expect(result.exitCode).toBe(1); - }); - - it("should return 0 with no arguments (vacuously true)", () => { - const result = runBash(` - _multi_creds_all_env_set - `); - expect(result.exitCode).toBe(0); - }); -}); - -// ── _multi_creds_load_config ────────────────────────────────────────────── - -describe("_multi_creds_load_config", () => { - it("should load two credentials from JSON config into env vars", () => { - const dir = trackTempDir(); - const configFile = join(dir, "multi.json"); - writeFileSync(configFile, JSON.stringify({ - client_id: "my-client-id", - client_secret: "my-secret", - })); - - const result = runBash(` - _multi_creds_load_config "${configFile}" 2 CRED_ID CRED_SECRET client_id client_secret - echo "ID=$CRED_ID" - echo "SECRET=$CRED_SECRET" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("ID=my-client-id"); - expect(result.stdout).toContain("SECRET=my-secret"); - }); - - it("should return 1 when config file does not exist", () => { - const result = runBash(` - _multi_creds_load_config "/nonexistent/config.json" 1 MY_VAR my_key - `); - expect(result.exitCode).toBe(1); - }); - - it("should return 1 when a field is empty in config", () => { - const dir = trackTempDir(); - const configFile = join(dir, "partial.json"); - writeFileSync(configFile, JSON.stringify({ - client_id: "has-value", - client_secret: "", - })); - - const result = runBash(` - _multi_creds_load_config "${configFile}" 2 CRED_ID CRED_SECRET client_id client_secret - `); - expect(result.exitCode).toBe(1); - }); - - it("should return 1 when a field is missing from config", () => { - const dir = trackTempDir(); - const configFile = join(dir, "missing.json"); - writeFileSync(configFile, JSON.stringify({ - client_id: "has-value", - })); - - const result = runBash(` - _multi_creds_load_config "${configFile}" 2 CRED_ID CRED_SECRET client_id client_secret - `); - expect(result.exitCode).toBe(1); - }); - - it("should load a single credential from config", () => { - const dir = trackTempDir(); - const configFile = join(dir, "single.json"); - writeFileSync(configFile, JSON.stringify({ api_key: "single-value" })); - - const result = runBash(` - _multi_creds_load_config "${configFile}" 1 MY_KEY api_key - echo "KEY=$MY_KEY" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("KEY=single-value"); - }); - - it("should load three credentials from config", () => { - const dir = trackTempDir(); - const configFile = join(dir, "three.json"); - writeFileSync(configFile, JSON.stringify({ - username: "user1", - password: "pass1", - project: "proj1", - })); - - const result = runBash(` - _multi_creds_load_config "${configFile}" 3 MY_USER MY_PASS MY_PROJ username password project - echo "USER=$MY_USER" - echo "PASS=$MY_PASS" - echo "PROJ=$MY_PROJ" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("USER=user1"); - expect(result.stdout).toContain("PASS=pass1"); - expect(result.stdout).toContain("PROJ=proj1"); - }); - - it("should return 1 for invalid JSON config", () => { - const dir = trackTempDir(); - const configFile = join(dir, "invalid.json"); - writeFileSync(configFile, "not json {{{"); - - const result = runBash(` - _multi_creds_load_config "${configFile}" 1 MY_VAR my_key - `); - expect(result.exitCode).toBe(1); - }); -}); - -// ── _multi_creds_validate ───────────────────────────────────────────────── - -describe("_multi_creds_validate", () => { - it("should return 0 when no test function provided (empty string)", () => { - const result = runBash(` - _multi_creds_validate "" "TestProvider" "https://example.com" VAR_A VAR_B - `); - expect(result.exitCode).toBe(0); - }); - - it("should return 0 when test function succeeds", () => { - const result = runBash(` - test_ok() { return 0; } - export VAR_A="a" - _multi_creds_validate test_ok "TestProvider" "https://example.com" VAR_A - `); - expect(result.exitCode).toBe(0); - }); - - it("should return 1 when test function fails", () => { - const result = runBash(` - test_fail() { return 1; } - export VAR_A="a" - _multi_creds_validate test_fail "TestProvider" "https://example.com" VAR_A 2>/dev/null - `); - expect(result.exitCode).toBe(1); - }); - - it("should unset all env vars when validation fails", () => { - const result = runBash(` - test_fail() { return 1; } - export VAR_A="a" - export VAR_B="b" - _multi_creds_validate test_fail "TestProvider" "https://example.com" VAR_A VAR_B 2>/dev/null - echo "A=\${VAR_A:-UNSET}" - echo "B=\${VAR_B:-UNSET}" - `); - expect(result.stdout).toContain("A=UNSET"); - expect(result.stdout).toContain("B=UNSET"); - }); - - it("should not unset env vars when validation succeeds", () => { - const result = runBash(` - test_ok() { return 0; } - export VAR_A="kept" - export VAR_B="also-kept" - _multi_creds_validate test_ok "TestProvider" "https://example.com" VAR_A VAR_B 2>/dev/null - echo "A=$VAR_A" - echo "B=$VAR_B" - `); - expect(result.stdout).toContain("A=kept"); - expect(result.stdout).toContain("B=also-kept"); - }); - - it("should log error message with provider name on failure", () => { - const result = runBash(` - test_fail() { return 1; } - export VAR_A="a" - _multi_creds_validate test_fail "Contabo" "https://example.com" VAR_A 2>&1 - `); - expect(result.stdout).toContain("Contabo"); - expect(result.stdout).toContain("Invalid"); - }); -}); - -// ── Integration: _save_token_to_config + _load_token_from_config ────────── - -describe("credential roundtrip integration", () => { - it("should save and reload a simple token", () => { - const dir = trackTempDir(); - const configFile = join(dir, "roundtrip.json"); - - // Save - runBash(`_save_token_to_config "${configFile}" "abc123" 2>/dev/null`); - - // Load - const result = runBash(` - _load_token_from_config "${configFile}" LOADED "Test" 2>/dev/null - echo "$LOADED" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("abc123"); - }); - - it("should reject a token with backslashes (not valid in API tokens)", () => { - const dir = trackTempDir(); - const configFile = join(dir, "special.json"); - - // Save a token with backslashes - runBash(`_save_token_to_config "${configFile}" 'token\\with\\slashes' 2>/dev/null`); - - // Load should fail — backslashes are not in the allowed character set - const result = runBash( - `_load_token_from_config "${configFile}" LOADED "Test" 2>/dev/null`, - ); - expect(result.exitCode).not.toBe(0); - }); - - it("should validate after loading from config", () => { - const dir = trackTempDir(); - const configFile = join(dir, "validated.json"); - - // Save - runBash(`_save_token_to_config "${configFile}" "valid-token" 2>/dev/null`); - - // Load and validate - const result = runBash(` - test_valid() { [[ "$MY_TOKEN" == "valid-token" ]]; } - _load_token_from_config "${configFile}" MY_TOKEN "Test" 2>/dev/null - _validate_token_with_provider test_valid MY_TOKEN "TestProvider" - `); - expect(result.exitCode).toBe(0); - }); -}); - -// ── Integration: multi-credential save and load ─────────────────────────── - -describe("multi-credential save and load integration", () => { - it("should save with _save_json_config and load with _multi_creds_load_config", () => { - const dir = trackTempDir(); - const configFile = join(dir, "multi-roundtrip.json"); - - // Save two credentials - runBash(`_save_json_config "${configFile}" client_id "my-id" client_secret "my-secret" 2>/dev/null`); - - // Load them back - const result = runBash(` - _multi_creds_load_config "${configFile}" 2 LOADED_ID LOADED_SECRET client_id client_secret - echo "ID=$LOADED_ID" - echo "SECRET=$LOADED_SECRET" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("ID=my-id"); - expect(result.stdout).toContain("SECRET=my-secret"); - }); - - it("should save three credentials and load all three", () => { - const dir = trackTempDir(); - const configFile = join(dir, "three-creds.json"); - - runBash(`_save_json_config "${configFile}" username "user" password "pass" project_id "proj" 2>/dev/null`); - - const result = runBash(` - _multi_creds_load_config "${configFile}" 3 U P R username password project_id - echo "U=$U P=$P R=$R" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("U=user"); - expect(result.stdout).toContain("P=pass"); - expect(result.stdout).toContain("R=proj"); - }); - - it("should save config with chmod 600", () => { - const dir = trackTempDir(); - const configFile = join(dir, "perms.json"); - - runBash(`_save_json_config "${configFile}" key "value" 2>/dev/null`); - - const stats = statSync(configFile); - const mode = (stats.mode & 0o777).toString(8); - expect(mode).toBe("600"); - }); -}); diff --git a/cli/src/__tests__/shared-common-env-inject.test.ts b/cli/src/__tests__/shared-common-env-inject.test.ts deleted file mode 100644 index c3a666b4..00000000 --- a/cli/src/__tests__/shared-common-env-inject.test.ts +++ /dev/null @@ -1,302 +0,0 @@ -import { describe, it, expect } from "bun:test"; -import { execSync } from "child_process"; -import { resolve, join } from "path"; -import { mkdirSync, writeFileSync, readFileSync, rmSync } from "fs"; -import { tmpdir } from "os"; - -/** - * Tests for environment injection, JSON extraction, SSH key check, - * and opencode install helpers in shared/common.sh: - * - * - inject_env_vars_ssh: Injects env vars into remote server via SSH - * - inject_env_vars_local: Injects env vars for local/container providers - * - _extract_json_field: Extracts fields from JSON using Python expressions - * - check_ssh_key_by_fingerprint: Checks SSH key registration via API - * - opencode_install_cmd: Generates robust OpenCode install command - * - * These functions had zero test coverage despite being used across - * all cloud provider scripts. - * - * Agent: test-engineer - */ - -const REPO_ROOT = resolve(import.meta.dir, "../../.."); -const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh"); - -/** - * Run a bash snippet that sources shared/common.sh first. - * Returns { exitCode, stdout, stderr }. - */ -function runBash(script: string): { exitCode: number; stdout: string; stderr: string } { - const fullScript = `source "${COMMON_SH}"\n${script}`; - try { - const stdout = execSync(`bash -c '${fullScript.replace(/'/g, "'\\''")}'`, { - encoding: "utf-8", - timeout: 10000, - stdio: ["pipe", "pipe", "pipe"], - env: { ...process.env, NO_COLOR: "1" }, - }); - return { exitCode: 0, stdout: stdout.trim(), stderr: "" }; - } catch (err: any) { - return { - exitCode: err.status ?? 1, - stdout: (err.stdout || "").trim(), - stderr: (err.stderr || "").trim(), - }; - } -} - -/** Create a temporary directory for test files. */ -function createTempDir(): string { - const dir = join(tmpdir(), `spawn-test-${Date.now()}-${Math.random().toString(36).slice(2)}`); - mkdirSync(dir, { recursive: true }); - return dir; -} - -// ── inject_env_vars_ssh ──────────────────────────────────────────────── - -describe("inject_env_vars_ssh", () => { - it("should call upload_func and run_func with correct arguments", () => { - const dir = createTempDir(); - try { - // Create a mock zshrc - writeFileSync(join(dir, ".zshrc"), "# existing config\n"); - - // Mock upload and run functions that log their arguments - const result = runBash(` -mock_upload() { echo "UPLOAD: \$1 \$2 \$3"; } -mock_run() { echo "RUN: \$1 \$2"; } -inject_env_vars_ssh "192.168.1.1" mock_upload mock_run "MY_KEY=my_value" -`); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("UPLOAD: 192.168.1.1"); - expect(result.stdout).toContain("/tmp/spawn_env_"); - expect(result.stdout).toContain("RUN: 192.168.1.1"); - expect(result.stdout).toContain(".zshrc"); - } finally { - rmSync(dir, { recursive: true, force: true }); - } - }); - - it("should generate correct env config content via upload", () => { - const dir = createTempDir(); - try { - // Mock that captures the uploaded file content - const result = runBash(` -mock_upload() { cat "\$2"; } -mock_run() { true; } -inject_env_vars_ssh "10.0.0.1" mock_upload mock_run "API_KEY=sk-123" "BASE_URL=https://example.com" -`); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("export API_KEY='sk-123'"); - expect(result.stdout).toContain("export BASE_URL='https://example.com'"); - expect(result.stdout).toContain("# [spawn:env]"); - } finally { - rmSync(dir, { recursive: true, force: true }); - } - }); - - it("should handle multiple env vars", () => { - const result = runBash(` -mock_upload() { cat "\$2"; } -mock_run() { true; } -inject_env_vars_ssh "10.0.0.1" mock_upload mock_run "KEY1=val1" "KEY2=val2" "KEY3=val3" -`); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("export KEY1='val1'"); - expect(result.stdout).toContain("export KEY2='val2'"); - expect(result.stdout).toContain("export KEY3='val3'"); - }); - - it("should pass server_ip as first arg to upload and run functions", () => { - const result = runBash(` -mock_upload() { echo "UPLOAD_IP=\$1"; } -mock_run() { echo "RUN_IP=\$1"; } -inject_env_vars_ssh "203.0.113.42" mock_upload mock_run "K=V" -`); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("UPLOAD_IP=203.0.113.42"); - expect(result.stdout).toContain("RUN_IP=203.0.113.42"); - }); - - it("should handle values with special characters", () => { - const result = runBash(` -mock_upload() { cat "\$2"; } -mock_run() { true; } -inject_env_vars_ssh "10.0.0.1" mock_upload mock_run "URL=https://api.example.com?key=abc&token=def" -`); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("export URL='https://api.example.com?key=abc&token=def'"); - }); - - it("should create temp file with restrictive permissions", () => { - const result = runBash(` -mock_upload() { - local perms - perms=$(stat -c '%a' "\$2" 2>/dev/null || stat -f '%Lp' "\$2" 2>/dev/null) - echo "PERMS=\$perms" -} -mock_run() { true; } -inject_env_vars_ssh "10.0.0.1" mock_upload mock_run "SECRET=s3cret" -`); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("PERMS=600"); - }); -}); - -// ── inject_env_vars_local ────────────────────────────────────────────── - -describe("inject_env_vars_local", () => { - it("should call upload and run functions without server_ip", () => { - const result = runBash(` -mock_upload() { echo "UPLOAD_ARGS: \$1 \$2"; } -mock_run() { echo "RUN_ARGS: \$1"; } -inject_env_vars_local mock_upload mock_run "MY_KEY=my_value" -`); - expect(result.exitCode).toBe(0); - // inject_env_vars_local does NOT pass server_ip - upload gets (local_path, remote_path) - expect(result.stdout).toContain("UPLOAD_ARGS:"); - expect(result.stdout).toContain("/tmp/spawn_env_"); - expect(result.stdout).toMatch(/cat '\/tmp\/spawn_env_[^']+' >> ~\/.bashrc; cat '\/tmp\/spawn_env_[^']+' >> ~\/.zshrc/); - }); - - it("should generate correct env config content", () => { - const result = runBash(` -mock_upload() { cat "\$1"; } -mock_run() { true; } -inject_env_vars_local mock_upload mock_run "OPENROUTER_KEY=sk-or-v1-abc" -`); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("export OPENROUTER_KEY='sk-or-v1-abc'"); - expect(result.stdout).toContain("# [spawn:env]"); - }); - - it("should handle multiple env vars", () => { - const result = runBash(` -mock_upload() { cat "\$1"; } -mock_run() { true; } -inject_env_vars_local mock_upload mock_run "K1=v1" "K2=v2" -`); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("export K1='v1'"); - expect(result.stdout).toContain("export K2='v2'"); - }); - - it("should create temp file with 600 permissions", () => { - const result = runBash(` -mock_upload() { - local perms - perms=$(stat -c '%a' "\$1" 2>/dev/null || stat -f '%Lp' "\$1" 2>/dev/null) - echo "PERMS=\$perms" -} -mock_run() { true; } -inject_env_vars_local mock_upload mock_run "SECRET=hidden" -`); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("PERMS=600"); - }); - - it("should differ from inject_env_vars_ssh in argument passing", () => { - // inject_env_vars_local passes (local_path, remote_path) to upload - // inject_env_vars_ssh passes (server_ip, local_path, remote_path) to upload - const localResult = runBash(` -mock_upload() { echo "ARG_COUNT=\$#"; } -mock_run() { true; } -inject_env_vars_local mock_upload mock_run "K=V" -`); - const sshResult = runBash(` -mock_upload() { echo "ARG_COUNT=\$#"; } -mock_run() { true; } -inject_env_vars_ssh "10.0.0.1" mock_upload mock_run "K=V" -`); - // local: upload(local_path, remote_path) = 2 args - // ssh: upload(server_ip, local_path, remote_path) = 3 args - expect(localResult.stdout).toContain("ARG_COUNT=2"); - expect(sshResult.stdout).toContain("ARG_COUNT=3"); - }); - - it("should handle values with single quotes via escaping", () => { - const result = runBash(` -mock_upload() { cat "\$1"; } -mock_run() { true; } -inject_env_vars_local mock_upload mock_run "MSG=it'\\''s a test" -`); - expect(result.exitCode).toBe(0); - // The value should be properly escaped for bash sourcing - expect(result.stdout).toContain("export MSG="); - }); -}); - -// _extract_json_field tests are in shared-common-json-extraction.test.ts - -// ── check_ssh_key_by_fingerprint ─────────────────────────────────────── - -describe("check_ssh_key_by_fingerprint", () => { - it("should return 0 when fingerprint is found in API response", () => { - const result = runBash(` -mock_api() { echo '{"ssh_keys":[{"fingerprint":"aa:bb:cc:dd"}]}'; } -check_ssh_key_by_fingerprint mock_api "/ssh_keys" "aa:bb:cc:dd" -`); - expect(result.exitCode).toBe(0); - }); - - it("should return 1 when fingerprint is not found", () => { - const result = runBash(` -mock_api() { echo '{"ssh_keys":[{"fingerprint":"xx:yy:zz:00"}]}'; } -check_ssh_key_by_fingerprint mock_api "/ssh_keys" "aa:bb:cc:dd" -`); - expect(result.exitCode).not.toBe(0); - }); - - it("should pass endpoint to the API function", () => { - const result = runBash(` -mock_api() { echo "CALLED_WITH: \$1 \$2"; } -check_ssh_key_by_fingerprint mock_api "/v2/account/keys" "test-fp" 2>/dev/null || true -echo "DONE" -`); - expect(result.stdout).toContain("DONE"); - }); - - it("should handle multiple keys and find a match", () => { - const result = runBash(` -mock_api() { - echo '{"ssh_keys":[{"fingerprint":"11:22:33:44"},{"fingerprint":"55:66:77:88"},{"fingerprint":"aa:bb:cc:dd"}]}' -} -check_ssh_key_by_fingerprint mock_api "/ssh_keys" "55:66:77:88" -`); - expect(result.exitCode).toBe(0); - }); - - it("should return failure for empty API response", () => { - const result = runBash(` -mock_api() { echo '{"ssh_keys":[]}'; } -check_ssh_key_by_fingerprint mock_api "/ssh_keys" "aa:bb:cc:dd" -`); - expect(result.exitCode).not.toBe(0); - }); - - it("should handle SHA256 format fingerprints", () => { - const result = runBash(` -mock_api() { echo '{"keys":[{"fingerprint":"SHA256:abcdef1234567890ABCDEF"}]}'; } -check_ssh_key_by_fingerprint mock_api "/keys" "SHA256:abcdef1234567890ABCDEF" -`); - expect(result.exitCode).toBe(0); - }); - - it("should use GET method when calling API function", () => { - const result = runBash(` -mock_api() { - echo "METHOD=\$1 ENDPOINT=\$2" >&2 - echo '{"keys":[]}' -} -check_ssh_key_by_fingerprint mock_api "/ssh_keys" "test" 2>&1 | head -1 -`); - expect(result.stdout).toContain("METHOD=GET"); - expect(result.stdout).toContain("ENDPOINT=/ssh_keys"); - }); -}); - -// opencode_install_cmd tests are in shared-common-logging-utils.test.ts - -// track_temp_file/cleanup_temp_files tests are in shared-common-logging-utils.test.ts -// validate_resource_name tests are in shared-common-validators.test.ts diff --git a/cli/src/__tests__/shared-common-error-polling.test.ts b/cli/src/__tests__/shared-common-error-polling.test.ts deleted file mode 100644 index bdeabf70..00000000 --- a/cli/src/__tests__/shared-common-error-polling.test.ts +++ /dev/null @@ -1,297 +0,0 @@ -import { describe, it, expect } from "bun:test"; -import { spawnSync } from "child_process"; -import { resolve } from "path"; - -/** - * Tests for extract_api_error_message and generic_wait_for_instance - * in shared/common.sh. - * - * extract_api_error_message is used across 4+ cloud providers (10+ call sites) - * to parse error responses from cloud APIs. It tries common JSON error field - * patterns: error.message, error.error_message, message, reason, error (string). - * - * generic_wait_for_instance is used across 9 cloud providers as the core - * polling loop for instance provisioning. It calls an API function repeatedly - * until the target status is reached, then extracts the IP address. - * - * Both had zero test coverage despite being critical shared infrastructure. - * - * Agent: test-engineer - */ - -const REPO_ROOT = resolve(import.meta.dir, "../../.."); -const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh"); - -/** - * Run a bash snippet that sources shared/common.sh first. - * Returns { exitCode, stdout, stderr }. - */ -function runBash(script: string): { exitCode: number; stdout: string; stderr: string } { - const fullScript = `source "${COMMON_SH}"\n${script}`; - const result = spawnSync("bash", ["-c", fullScript], { - encoding: "utf-8", - timeout: 10000, - stdio: ["pipe", "pipe", "pipe"], - }); - return { - exitCode: result.status ?? 1, - stdout: (result.stdout || "").trim(), - stderr: (result.stderr || "").trim(), - }; -} - -// ── extract_api_error_message ────────────────────────────────────────── - -describe("extract_api_error_message", () => { - describe("top-level message field", () => { - it("should extract message from top-level 'message' field", () => { - const result = runBash( - `extract_api_error_message '{"message":"Server not found"}'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Server not found"); - }); - - it("should extract message from top-level 'reason' field", () => { - const result = runBash( - `extract_api_error_message '{"reason":"Rate limit exceeded"}'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Rate limit exceeded"); - }); - }); - - describe("error as string", () => { - it("should extract error when it is a plain string", () => { - const result = runBash( - `extract_api_error_message '{"error":"Unauthorized"}'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Unauthorized"); - }); - - it("should extract error string even when it is a long message", () => { - const result = runBash( - `extract_api_error_message '{"error":"The API token provided is invalid or has expired"}'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("The API token provided is invalid or has expired"); - }); - }); - - describe("error as object with message field", () => { - it("should extract error.message when error is an object", () => { - const result = runBash( - `extract_api_error_message '{"error":{"message":"Instance quota exceeded"}}'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Instance quota exceeded"); - }); - - it("should extract error.error_message when error is an object", () => { - const result = runBash( - `extract_api_error_message '{"error":{"error_message":"Invalid region specified"}}'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Invalid region specified"); - }); - - it("should prefer error.message over error.error_message", () => { - const result = runBash( - `extract_api_error_message '{"error":{"message":"Primary msg","error_message":"Secondary msg"}}'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Primary msg"); - }); - }); - - describe("field priority", () => { - it("should prefer error.message over top-level message", () => { - const result = runBash( - `extract_api_error_message '{"error":{"message":"Nested error"},"message":"Top-level message"}'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Nested error"); - }); - - it("should fall back to top-level message when error is empty object", () => { - const result = runBash( - `extract_api_error_message '{"error":{},"message":"Top-level message"}'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Top-level message"); - }); - - it("should fall back to reason when no message or error fields", () => { - const result = runBash( - `extract_api_error_message '{"reason":"Forbidden","status":403}'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Forbidden"); - }); - - it("should prefer message over reason", () => { - const result = runBash( - `extract_api_error_message '{"message":"Auth failed","reason":"Forbidden"}'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Auth failed"); - }); - - it("should prefer error string over reason", () => { - const result = runBash( - `extract_api_error_message '{"error":"Bad token","reason":"Forbidden"}'` - ); - expect(result.exitCode).toBe(0); - // error string comes after message/reason in the or-chain but before empty - // The actual priority: error.message > message > reason > error(string) - // Wait, let's re-read the code: - // msg = (isinstance(e, dict) and (e.get('message') or e.get('error_message'))) - // or d.get('message') - // or d.get('reason') - // or (isinstance(e, str) and e) - // So error string has lowest priority - expect(result.stdout).toBe("Forbidden"); - }); - }); - - describe("fallback behavior", () => { - it("should use default fallback for invalid JSON", () => { - const result = runBash( - `extract_api_error_message 'not valid json'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Unknown error"); - }); - - it("should use custom fallback for invalid JSON", () => { - const result = runBash( - `extract_api_error_message 'not valid json' 'Custom fallback'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Custom fallback"); - }); - - it("should use fallback when JSON has no recognized error fields", () => { - const result = runBash( - `extract_api_error_message '{"status":500,"code":"INTERNAL"}' 'Server error'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Server error"); - }); - - it("should use default fallback for empty JSON object", () => { - const result = runBash( - `extract_api_error_message '{}'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Unknown error"); - }); - - it("should use fallback for empty string input", () => { - const result = runBash( - `extract_api_error_message '' 'No response'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("No response"); - }); - - it("should use fallback when error object has no message or error_message", () => { - const result = runBash( - `extract_api_error_message '{"error":{"code":"ERR_QUOTA"}}' 'Quota error'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Quota error"); - }); - }); - - describe("real-world API responses", () => { - it("should parse Hetzner-style error response", () => { - const result = runBash( - `extract_api_error_message '{"error":{"message":"server_limit_exceeded","code":"limit_exceeded"}}'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("server_limit_exceeded"); - }); - - it("should parse DigitalOcean-style error response", () => { - const result = runBash( - `extract_api_error_message '{"id":"unauthorized","message":"Unable to authenticate you"}'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Unable to authenticate you"); - }); - - it("should parse Vultr-style error response", () => { - const result = runBash( - `extract_api_error_message '{"error":"Invalid API token.","status":401}'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Invalid API token."); - }); - - it("should parse Contabo-style error response", () => { - const result = runBash( - `extract_api_error_message '{"error":{"message":"Resource not found","code":404}}'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Resource not found"); - }); - - it("should parse response with HTML error body as fallback", () => { - const result = runBash( - `extract_api_error_message '503 Service Unavailable' 'Service unavailable'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Service unavailable"); - }); - }); - - describe("edge cases", () => { - it("should handle message with special characters", () => { - const result = runBash( - `extract_api_error_message '{"message":"Can'\\''t create: quota (5/5) exceeded"}'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("quota"); - expect(result.stdout).toContain("exceeded"); - }); - - it("should handle message with unicode characters", () => { - const result = runBash( - `extract_api_error_message '{"message":"Fehler: Kontingent \\u00fcberschritten"}'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("Fehler"); - }); - - it("should handle JSON array input as fallback", () => { - const result = runBash( - `extract_api_error_message '[1,2,3]' 'Not an object'` - ); - expect(result.exitCode).toBe(0); - // JSON array has no .get method, so python will throw and fall through to fallback - expect(result.stdout).toBe("Not an object"); - }); - - it("should handle null JSON value as fallback", () => { - const result = runBash( - `extract_api_error_message 'null' 'Null response'` - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("Null response"); - }); - - it("should handle nested error with empty message string", () => { - const result = runBash( - `extract_api_error_message '{"error":{"message":""},"reason":"Backup reason"}' 'default'` - ); - expect(result.exitCode).toBe(0); - // Empty message is falsy in Python, so it should fall through - expect(result.stdout).toBe("Backup reason"); - }); - }); -}); - -// _extract_json_field tests are in shared-common-json-extraction.test.ts -// generic_wait_for_instance tests are in shared-common-ssh-helpers.test.ts diff --git a/cli/src/__tests__/shared-common-helpers.test.ts b/cli/src/__tests__/shared-common-helpers.test.ts deleted file mode 100644 index e247c777..00000000 --- a/cli/src/__tests__/shared-common-helpers.test.ts +++ /dev/null @@ -1,589 +0,0 @@ -import { describe, it, expect } from "bun:test"; -import { execSync } from "child_process"; -import { resolve, join } from "path"; -import { mkdirSync, writeFileSync, readFileSync, rmSync, existsSync } from "fs"; -import { tmpdir } from "os"; - -/** - * Tests for untested bash helper functions in shared/common.sh: - * - _load_json_config_fields: JSON config field loading (used by all multi-credential providers) - * - _save_json_config: JSON config writing with json_escape - * - extract_ssh_key_ids: SSH key ID extraction from cloud API responses - * - _generate_csrf_state: CSRF state generation (security-critical) - * - interactive_pick: Interactive picker with env var override - * - * These functions had zero test coverage despite being used across all cloud - * provider scripts. Each test sources shared/common.sh and calls the function - * in a real bash subprocess. - * - * Agent: test-engineer - */ - -const REPO_ROOT = resolve(import.meta.dir, "../../.."); -const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh"); - -/** - * Run a bash snippet that sources shared/common.sh first. - * Returns { exitCode, stdout, stderr }. - */ -function runBash(script: string): { exitCode: number; stdout: string; stderr: string } { - const fullScript = `source "${COMMON_SH}"\n${script}`; - try { - const stdout = execSync(`bash -c '${fullScript.replace(/'/g, "'\\''")}'`, { - encoding: "utf-8", - timeout: 10000, - stdio: ["pipe", "pipe", "pipe"], - }); - return { exitCode: 0, stdout: stdout.trim(), stderr: "" }; - } catch (err: any) { - return { - exitCode: err.status ?? 1, - stdout: (err.stdout || "").trim(), - stderr: (err.stderr || "").trim(), - }; - } -} - -/** - * Create a temporary directory for test files. - */ -function createTempDir(): string { - const dir = join(tmpdir(), `spawn-test-${Date.now()}-${Math.random().toString(36).slice(2)}`); - mkdirSync(dir, { recursive: true }); - return dir; -} - -// ── _load_json_config_fields ──────────────────────────────────────────── - -describe("_load_json_config_fields", () => { - it("should load a single field from JSON config", () => { - const dir = createTempDir(); - const configFile = join(dir, "config.json"); - writeFileSync(configFile, JSON.stringify({ api_key: "sk-test-123" })); - - const result = runBash(`_load_json_config_fields "${configFile}" api_key`); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("sk-test-123"); - - rmSync(dir, { recursive: true, force: true }); - }); - - it("should load multiple fields from JSON config", () => { - const dir = createTempDir(); - const configFile = join(dir, "config.json"); - writeFileSync(configFile, JSON.stringify({ - username: "admin", - password: "s3cret", - region: "us-east-1", - })); - - const result = runBash(`_load_json_config_fields "${configFile}" username password region`); - expect(result.exitCode).toBe(0); - const lines = result.stdout.split("\n"); - expect(lines[0]).toBe("admin"); - expect(lines[1]).toBe("s3cret"); - expect(lines[2]).toBe("us-east-1"); - - rmSync(dir, { recursive: true, force: true }); - }); - - it("should return empty string for missing fields", () => { - const dir = createTempDir(); - const configFile = join(dir, "config.json"); - writeFileSync(configFile, JSON.stringify({ api_key: "present" })); - - // Use the intended read pattern -- missing fields produce empty lines - const result = runBash(` - creds=$(_load_json_config_fields "${configFile}" api_key missing_field) - { read -r v1; read -r v2; } <<< "\${creds}" - echo "v1=\${v1}" - echo "v2=\${v2}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("v1=present"); - expect(result.stdout).toContain("v2="); - - rmSync(dir, { recursive: true, force: true }); - }); - - it("should return exit code 1 for missing config file", () => { - const result = runBash(`_load_json_config_fields "/tmp/nonexistent-spawn-config-${Date.now()}.json" api_key`); - expect(result.exitCode).toBe(1); - }); - - it("should return exit code 1 for invalid JSON", () => { - const dir = createTempDir(); - const configFile = join(dir, "bad.json"); - writeFileSync(configFile, "{ not valid json!!!"); - - const result = runBash(`_load_json_config_fields "${configFile}" api_key`); - expect(result.exitCode).toBe(1); - - rmSync(dir, { recursive: true, force: true }); - }); - - it("should handle empty JSON object", () => { - const dir = createTempDir(); - const configFile = join(dir, "empty.json"); - writeFileSync(configFile, "{}"); - - const result = runBash(`_load_json_config_fields "${configFile}" api_key`); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe(""); - - rmSync(dir, { recursive: true, force: true }); - }); - - it("should handle values with special characters", () => { - const dir = createTempDir(); - const configFile = join(dir, "special.json"); - writeFileSync(configFile, JSON.stringify({ - token: "sk-or-v1-abc123/def+456==", - url: "https://api.example.com/v1?key=val&other=true", - })); - - const result = runBash(`_load_json_config_fields "${configFile}" token url`); - expect(result.exitCode).toBe(0); - const lines = result.stdout.split("\n"); - expect(lines[0]).toBe("sk-or-v1-abc123/def+456=="); - expect(lines[1]).toBe("https://api.example.com/v1?key=val&other=true"); - - rmSync(dir, { recursive: true, force: true }); - }); - - it("should handle numeric and boolean values", () => { - const dir = createTempDir(); - const configFile = join(dir, "types.json"); - writeFileSync(configFile, JSON.stringify({ port: 8080, enabled: true })); - - const result = runBash(`_load_json_config_fields "${configFile}" port enabled`); - expect(result.exitCode).toBe(0); - const lines = result.stdout.split("\n"); - expect(lines[0]).toBe("8080"); - expect(lines[1]).toBe("true"); - - rmSync(dir, { recursive: true, force: true }); - }); - - it("should handle values that are empty strings", () => { - const dir = createTempDir(); - const configFile = join(dir, "empty-val.json"); - writeFileSync(configFile, JSON.stringify({ key: "" })); - - const result = runBash(`_load_json_config_fields "${configFile}" key`); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe(""); - - rmSync(dir, { recursive: true, force: true }); - }); - - it("should handle reading results into variables via read", () => { - const dir = createTempDir(); - const configFile = join(dir, "multi.json"); - writeFileSync(configFile, JSON.stringify({ - username: "admin", - password: "hunter2", - })); - - // Test the intended usage pattern: reading into variables - const result = runBash(` - creds=$(_load_json_config_fields "${configFile}" username password) - { read -r user; read -r pass; } <<< "\${creds}" - echo "user=\${user}" - echo "pass=\${pass}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("user=admin"); - expect(result.stdout).toContain("pass=hunter2"); - - rmSync(dir, { recursive: true, force: true }); - }); -}); - -// ── _save_json_config ─────────────────────────────────────────────────── - -describe("_save_json_config", () => { - it("should save a single key-value pair", () => { - const dir = createTempDir(); - const configFile = join(dir, "out.json"); - - const result = runBash(`_save_json_config "${configFile}" api_key sk-test-123`); - expect(result.exitCode).toBe(0); - - const content = readFileSync(configFile, "utf-8"); - const parsed = JSON.parse(content); - expect(parsed.api_key).toBe("sk-test-123"); - - rmSync(dir, { recursive: true, force: true }); - }); - - it("should save multiple key-value pairs", () => { - const dir = createTempDir(); - const configFile = join(dir, "multi.json"); - - const result = runBash(`_save_json_config "${configFile}" username admin password s3cret`); - expect(result.exitCode).toBe(0); - - const content = readFileSync(configFile, "utf-8"); - const parsed = JSON.parse(content); - expect(parsed.username).toBe("admin"); - expect(parsed.password).toBe("s3cret"); - - rmSync(dir, { recursive: true, force: true }); - }); - - it("should create parent directories if needed", () => { - const dir = createTempDir(); - const configFile = join(dir, "nested", "deep", "config.json"); - - const result = runBash(`_save_json_config "${configFile}" key value`); - expect(result.exitCode).toBe(0); - expect(existsSync(configFile)).toBe(true); - - const parsed = JSON.parse(readFileSync(configFile, "utf-8")); - expect(parsed.key).toBe("value"); - - rmSync(dir, { recursive: true, force: true }); - }); - - it("should set restrictive file permissions (600)", () => { - const dir = createTempDir(); - const configFile = join(dir, "perms.json"); - - runBash(`_save_json_config "${configFile}" key value`); - - const result = runBash(`stat -c %a "${configFile}" 2>/dev/null || stat -f %Lp "${configFile}"`); - expect(result.stdout).toBe("600"); - - rmSync(dir, { recursive: true, force: true }); - }); - - it("should properly escape special characters in values", () => { - const dir = createTempDir(); - const configFile = join(dir, "escape.json"); - - const result = runBash(`_save_json_config "${configFile}" token 'value"with"quotes'`); - expect(result.exitCode).toBe(0); - - const content = readFileSync(configFile, "utf-8"); - const parsed = JSON.parse(content); - expect(parsed.token).toBe('value"with"quotes'); - - rmSync(dir, { recursive: true, force: true }); - }); - - it("should handle values with backslashes", () => { - const dir = createTempDir(); - const configFile = join(dir, "backslash.json"); - - const result = runBash(`_save_json_config "${configFile}" path 'C:\\Users\\test'`); - expect(result.exitCode).toBe(0); - - const content = readFileSync(configFile, "utf-8"); - const parsed = JSON.parse(content); - expect(parsed.path).toBe("C:\\Users\\test"); - - rmSync(dir, { recursive: true, force: true }); - }); - - it("should handle empty values", () => { - const dir = createTempDir(); - const configFile = join(dir, "empty.json"); - - const result = runBash(`_save_json_config "${configFile}" key ""`); - expect(result.exitCode).toBe(0); - - const content = readFileSync(configFile, "utf-8"); - const parsed = JSON.parse(content); - expect(parsed.key).toBe(""); - - rmSync(dir, { recursive: true, force: true }); - }); - - it("should overwrite existing config file", () => { - const dir = createTempDir(); - const configFile = join(dir, "overwrite.json"); - writeFileSync(configFile, JSON.stringify({ old: "data" })); - - const result = runBash(`_save_json_config "${configFile}" new_key new_value`); - expect(result.exitCode).toBe(0); - - const content = readFileSync(configFile, "utf-8"); - const parsed = JSON.parse(content); - expect(parsed.new_key).toBe("new_value"); - expect(parsed.old).toBeUndefined(); - - rmSync(dir, { recursive: true, force: true }); - }); - - it("should produce valid JSON that _load_json_config_fields can read", () => { - const dir = createTempDir(); - const configFile = join(dir, "roundtrip.json"); - - runBash(`_save_json_config "${configFile}" user testuser pass "hunter2"`); - - const loadResult = runBash(`_load_json_config_fields "${configFile}" user pass`); - expect(loadResult.exitCode).toBe(0); - const lines = loadResult.stdout.split("\n"); - expect(lines[0]).toBe("testuser"); - expect(lines[1]).toBe("hunter2"); - - rmSync(dir, { recursive: true, force: true }); - }); - - it("should handle values with newlines via json_escape", () => { - const dir = createTempDir(); - const configFile = join(dir, "newline.json"); - - // Use printf to pass a value with actual newline - const result = runBash(`_save_json_config "${configFile}" key "$(printf 'line1\\nline2')"`); - expect(result.exitCode).toBe(0); - - const content = readFileSync(configFile, "utf-8"); - const parsed = JSON.parse(content); - expect(parsed.key).toBe("line1\nline2"); - - rmSync(dir, { recursive: true, force: true }); - }); -}); - -// ── extract_ssh_key_ids ───────────────────────────────────────────────── - -describe("extract_ssh_key_ids", () => { - it("should extract IDs from DigitalOcean-style response (ssh_keys field)", () => { - const response = JSON.stringify({ - ssh_keys: [ - { id: 12345, name: "my-key-1" }, - { id: 67890, name: "my-key-2" }, - ], - }); - - const result = runBash(`echo '${response}' | python3 -c " -import json, sys -data = json.loads(sys.stdin.read()) -ids = [k['id'] for k in data.get('ssh_keys', [])] -print(json.dumps(ids)) -"`); - expect(result.exitCode).toBe(0); - expect(JSON.parse(result.stdout)).toEqual([12345, 67890]); - }); - - it("should extract IDs from Linode-style response (data field)", () => { - const response = JSON.stringify({ - data: [ - { id: 111, label: "work-key" }, - { id: 222, label: "personal-key" }, - { id: 333, label: "deploy-key" }, - ], - }); - - // Simulate extract_ssh_key_ids with key_field="data" - const result = runBash(`extract_ssh_key_ids '${response}' data`); - expect(result.exitCode).toBe(0); - expect(JSON.parse(result.stdout)).toEqual([111, 222, 333]); - }); - - it("should default to ssh_keys field when no field specified", () => { - const response = JSON.stringify({ - ssh_keys: [{ id: 42, name: "default" }], - }); - - const result = runBash(`extract_ssh_key_ids '${response}'`); - expect(result.exitCode).toBe(0); - expect(JSON.parse(result.stdout)).toEqual([42]); - }); - - it("should return empty array when no keys present", () => { - const response = JSON.stringify({ ssh_keys: [] }); - - const result = runBash(`extract_ssh_key_ids '${response}'`); - expect(result.exitCode).toBe(0); - expect(JSON.parse(result.stdout)).toEqual([]); - }); - - it("should return empty array when field is missing", () => { - const response = JSON.stringify({ other_data: "foo" }); - - const result = runBash(`extract_ssh_key_ids '${response}'`); - expect(result.exitCode).toBe(0); - expect(JSON.parse(result.stdout)).toEqual([]); - }); - - it("should handle string IDs (Vultr uses string UUIDs)", () => { - const response = JSON.stringify({ - ssh_keys: [ - { id: "abc-123-def", name: "vultr-key" }, - { id: "xyz-789-uvw", name: "other-key" }, - ], - }); - - const result = runBash(`extract_ssh_key_ids '${response}'`); - expect(result.exitCode).toBe(0); - expect(JSON.parse(result.stdout)).toEqual(["abc-123-def", "xyz-789-uvw"]); - }); - - it("should handle single key in response", () => { - const response = JSON.stringify({ - ssh_keys: [{ id: 99, name: "only-key" }], - }); - - const result = runBash(`extract_ssh_key_ids '${response}'`); - expect(result.exitCode).toBe(0); - expect(JSON.parse(result.stdout)).toEqual([99]); - }); -}); - -// ── _generate_csrf_state ──────────────────────────────────────────────── - -describe("_generate_csrf_state", () => { - it("should generate a non-empty string", () => { - const result = runBash(`_generate_csrf_state`); - expect(result.exitCode).toBe(0); - expect(result.stdout.length).toBeGreaterThan(0); - }); - - it("should generate hex-only output", () => { - const result = runBash(`_generate_csrf_state`); - expect(result.exitCode).toBe(0); - // Output should only contain hexadecimal characters - expect(/^[0-9a-f]+$/.test(result.stdout)).toBe(true); - }); - - it("should generate at least 16 hex chars (64 bits of entropy)", () => { - const result = runBash(`_generate_csrf_state`); - expect(result.exitCode).toBe(0); - expect(result.stdout.length).toBeGreaterThanOrEqual(16); - }); - - it("should generate unique values on consecutive calls", () => { - const result = runBash(` - state1=$(_generate_csrf_state) - state2=$(_generate_csrf_state) - if [[ "\${state1}" == "\${state2}" ]]; then - echo "DUPLICATE" - else - echo "UNIQUE" - fi - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("UNIQUE"); - }); - - it("should work with openssl if available", () => { - const result = runBash(` - if command -v openssl &>/dev/null; then - state=$(_generate_csrf_state) - # openssl rand -hex 16 produces exactly 32 hex chars - echo "\${#state}" - else - echo "32" # skip test if openssl not available - fi - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("32"); - }); -}); - -// ── interactive_pick ──────────────────────────────────────────────────── - -describe("interactive_pick", () => { - it("should use environment variable value when set", () => { - const result = runBash(` - export MY_PICK_VAR="from-env" - selected=$(interactive_pick MY_PICK_VAR default-val "options" "echo dummy") - echo "\${selected}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("from-env"); - }); - - it("should use default when env var is empty and callback returns nothing", () => { - const result = runBash(` - unset MY_PICK_VAR - list_empty() { echo ""; } - selected=$(interactive_pick MY_PICK_VAR "my-default" "options" list_empty) - echo "\${selected}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("my-default"); - }); - - it("should prefer env var over callback results", () => { - const result = runBash(` - export REGION_VAR="eu-west-1" - list_regions() { echo "us-east-1|US East"; echo "eu-west-1|EU West"; } - selected=$(interactive_pick REGION_VAR "us-east-1" "regions" list_regions) - echo "\${selected}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("eu-west-1"); - }); -}); - -// ── _save_json_config + _load_json_config_fields roundtrip ────────────── - -describe("_save_json_config + _load_json_config_fields roundtrip", () => { - it("should roundtrip simple credentials", () => { - const dir = createTempDir(); - const configFile = join(dir, "rt.json"); - - const result = runBash(` - _save_json_config "${configFile}" client_id "my-client" client_secret "my-secret" - creds=$(_load_json_config_fields "${configFile}" client_id client_secret) - { read -r cid; read -r csec; } <<< "\${creds}" - echo "id=\${cid}" - echo "secret=\${csec}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("id=my-client"); - expect(result.stdout).toContain("secret=my-secret"); - - rmSync(dir, { recursive: true, force: true }); - }); - - it("should roundtrip values with special chars (quotes, slashes, ampersands)", () => { - const dir = createTempDir(); - const configFile = join(dir, "special-rt.json"); - - const result = runBash(` - _save_json_config "${configFile}" url "https://api.com/v1?a=1&b=2" - loaded=$(_load_json_config_fields "${configFile}" url) - echo "\${loaded}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("https://api.com/v1?a=1&b=2"); - - rmSync(dir, { recursive: true, force: true }); - }); - - it("should roundtrip API key format values", () => { - const dir = createTempDir(); - const configFile = join(dir, "apikey-rt.json"); - - const result = runBash(` - _save_json_config "${configFile}" token "sk-or-v1-abc123def456ghi789" - loaded=$(_load_json_config_fields "${configFile}" token) - echo "\${loaded}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("sk-or-v1-abc123def456ghi789"); - - rmSync(dir, { recursive: true, force: true }); - }); - - it("should roundtrip three credentials (UpCloud pattern)", () => { - const dir = createTempDir(); - const configFile = join(dir, "upcloud-rt.json"); - - const result = runBash(` - _save_json_config "${configFile}" username "admin" password "p@ss!w0rd" zone "fi-hel1" - creds=$(_load_json_config_fields "${configFile}" username password zone) - { read -r u; read -r p; read -r z; } <<< "\${creds}" - echo "\${u}|\${p}|\${z}" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("admin|p@ss!w0rd|fi-hel1"); - - rmSync(dir, { recursive: true, force: true }); - }); -}); diff --git a/cli/src/__tests__/shared-common-input-validation.test.ts b/cli/src/__tests__/shared-common-input-validation.test.ts deleted file mode 100644 index 5e150dc6..00000000 --- a/cli/src/__tests__/shared-common-input-validation.test.ts +++ /dev/null @@ -1,637 +0,0 @@ -import { describe, it, expect } from "bun:test"; -import { execSync } from "child_process"; -import { resolve } from "path"; - -/** - * Tests for interactive input validation helpers in shared/common.sh: - * - * - get_resource_name: resource name from env var (bypassing safe_read) - * - get_validated_server_name: env-var path + validate_server_name integration - * - get_model_id_interactive: MODEL_ID env var path with validation - * - interactive_pick: env var bypass path, list callback, default selection - * - show_server_name_requirements: output format - * - _display_and_select: menu rendering and default selection (non-stdin paths) - * - validated_read: validation callback contract (via stdin workaround) - * - * These functions are used by every agent/cloud script but had zero test - * coverage. Tests exercise the env-var bypass paths (most critical for - * CI/automated usage) since safe_read requires an interactive terminal. - * - * Agent: test-engineer - */ - -const REPO_ROOT = resolve(import.meta.dir, "../../.."); -const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh"); - -/** - * Run a bash snippet that sources shared/common.sh first. - * Always captures both stdout and stderr (even on success). - */ -function runBash( - script: string, - opts?: { env?: Record } -): { exitCode: number; stdout: string; stderr: string } { - const fullScript = `source "${COMMON_SH}"\n${script}`; - const escaped = fullScript.replace(/'/g, "'\\''"); - try { - const stdout = execSync(`bash -c '${escaped}' 2>/tmp/spawn-test-stderr$$`, { - encoding: "utf-8", - timeout: 10000, - env: { ...process.env, ...opts?.env }, - }); - let stderr = ""; - try { - stderr = execSync(`cat /tmp/spawn-test-stderr$$ 2>/dev/null; rm -f /tmp/spawn-test-stderr$$`, { - encoding: "utf-8", - }); - } catch (err: any) { - // Expected: cat fails if file doesn't exist. Log unexpected command failures. - if (err.status !== 1) console.error("Unexpected error in stderr cleanup:", err); - } - return { exitCode: 0, stdout: stdout.trim(), stderr: stderr.trim() }; - } catch (err: any) { - let stderr = (err.stderr || "").trim(); - try { - const captured = execSync(`cat /tmp/spawn-test-stderr$$ 2>/dev/null; rm -f /tmp/spawn-test-stderr$$`, { - encoding: "utf-8", - }); - if (captured.trim()) stderr = captured.trim(); - } catch (captureErr: any) { - // Expected: cat fails if file doesn't exist. - if (captureErr.status !== 1) console.error("Unexpected error capturing stderr:", captureErr); - } - return { - exitCode: err.status ?? 1, - stdout: (err.stdout || "").trim(), - stderr, - }; - } -} - -/** - * Run bash with stderr captured inline via fd redirection. - * Captures both stdout and stderr reliably. - */ -function runBashCapture( - script: string, - opts?: { env?: Record } -): { exitCode: number; stdout: string; stderr: string } { - const stderrFile = `/tmp/spawn-test-err-${process.pid}-${Date.now()}`; - const fullScript = `source "${COMMON_SH}"\n${script}`; - const escaped = fullScript.replace(/'/g, "'\\''"); - try { - const stdout = execSync(`bash -c '${escaped}' 2>"${stderrFile}"`, { - encoding: "utf-8", - timeout: 10000, - env: { ...process.env, ...opts?.env }, - }); - let stderr = ""; - try { - stderr = execSync(`cat "${stderrFile}" 2>/dev/null`, { encoding: "utf-8" }); - } catch (err: any) { - // Expected: cat fails if file doesn't exist. - if (err.status !== 1) console.error("Unexpected error reading stderr file:", err); - } - try { execSync(`rm -f "${stderrFile}"`); } catch (err: any) { - console.error("Unexpected error removing stderr file:", err); - } - return { exitCode: 0, stdout: stdout.trim(), stderr: stderr.trim() }; - } catch (err: any) { - let stderr = (err.stderr || "").trim(); - try { - const captured = execSync(`cat "${stderrFile}" 2>/dev/null`, { encoding: "utf-8" }); - if (captured.trim()) stderr = captured.trim(); - } catch (captureErr: any) { - // Expected: cat fails if file doesn't exist. - if (captureErr.status !== 1) console.error("Unexpected error capturing stderr:", captureErr); - } - try { execSync(`rm -f "${stderrFile}"`); } catch (rmErr: any) { - console.error("Unexpected error removing stderr file:", rmErr); - } - return { - exitCode: err.status ?? 1, - stdout: (err.stdout || "").trim(), - stderr, - }; - } -} - -// ── get_resource_name (env var path) ─────────────────────────────────────── - -describe("get_resource_name", () => { - describe("env var set (bypasses stdin)", () => { - it("should return value from env var", () => { - const result = runBash( - 'get_resource_name "MY_RESOURCE" "Enter resource name: "', - { env: { MY_RESOURCE: "from-env" } } - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("from-env"); - }); - - it("should log that value comes from environment", () => { - const result = runBashCapture( - 'get_resource_name "MY_SERVER" "Enter server name: "', - { env: { MY_SERVER: "test-srv" } } - ); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain("environment"); - }); - - it("should accept hyphenated names", () => { - const result = runBash( - 'get_resource_name "NAME" "Enter: "', - { env: { NAME: "my-server-01" } } - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("my-server-01"); - }); - - it("should accept names with underscores", () => { - const result = runBash( - 'get_resource_name "NAME" "Enter: "', - { env: { NAME: "my_server" } } - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("my_server"); - }); - - it("should accept names with dots", () => { - const result = runBash( - 'get_resource_name "TYPE" "Enter: "', - { env: { TYPE: "e2.micro" } } - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("e2.micro"); - }); - - it("should preserve spaces in env var value", () => { - const result = runBash( - 'get_resource_name "LABEL" "Enter: "', - { env: { LABEL: "My Server Label" } } - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("My Server Label"); - }); - }); - - describe("env var not set (stdin path fails without tty)", () => { - it("should fail in non-interactive mode with empty env var", () => { - const result = runBash( - 'get_resource_name "UNSET_VAR_XYZ" "Enter name: "', - ); - expect(result.exitCode).not.toBe(0); - }); - - it("should show error about non-interactive mode", () => { - const result = runBashCapture( - 'get_resource_name "UNSET_VAR_XYZ" "Enter name: "', - ); - expect(result.exitCode).not.toBe(0); - // Should mention the env var name users can set - expect(result.stderr).toContain("UNSET_VAR_XYZ"); - }); - }); -}); - -// ── get_validated_server_name (env var + validation) ─────────────────────── - -describe("get_validated_server_name", () => { - describe("valid server names from env var", () => { - it("should accept valid name", () => { - const result = runBash( - 'get_validated_server_name "SERVER_NAME" "Enter name: "', - { env: { SERVER_NAME: "my-server-01" } } - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("my-server-01"); - }); - - it("should accept 3-char name (minimum length)", () => { - const result = runBash( - 'get_validated_server_name "NAME" "Enter: "', - { env: { NAME: "abc" } } - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("abc"); - }); - - it("should accept 63-char name (maximum length)", () => { - const longName = "a".repeat(63); - const result = runBash( - 'get_validated_server_name "NAME" "Enter: "', - { env: { NAME: longName } } - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe(longName); - }); - - it("should accept all-numeric name", () => { - const result = runBash( - 'get_validated_server_name "NAME" "Enter: "', - { env: { NAME: "12345" } } - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("12345"); - }); - - it("should accept mixed case name", () => { - const result = runBash( - 'get_validated_server_name "NAME" "Enter: "', - { env: { NAME: "MyServer01" } } - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("MyServer01"); - }); - - it("should accept name with interior dashes", () => { - const result = runBash( - 'get_validated_server_name "NAME" "Enter: "', - { env: { NAME: "a-b-c" } } - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("a-b-c"); - }); - }); - - describe("invalid server names rejected from env var", () => { - it("should reject name shorter than 3 chars", () => { - const result = runBashCapture( - 'get_validated_server_name "NAME" "Enter: "', - { env: { NAME: "ab" } } - ); - expect(result.exitCode).not.toBe(0); - expect(result.stderr).toContain("too short"); - }); - - it("should reject single character name", () => { - const result = runBash( - 'get_validated_server_name "NAME" "Enter: "', - { env: { NAME: "x" } } - ); - expect(result.exitCode).not.toBe(0); - }); - - it("should reject name longer than 63 chars", () => { - const longName = "a".repeat(64); - const result = runBashCapture( - 'get_validated_server_name "NAME" "Enter: "', - { env: { NAME: longName } } - ); - expect(result.exitCode).not.toBe(0); - expect(result.stderr).toContain("too long"); - }); - - it("should reject name with special characters", () => { - const result = runBash( - 'get_validated_server_name "NAME" "Enter: "', - { env: { NAME: "server;rm" } } - ); - expect(result.exitCode).not.toBe(0); - }); - - it("should reject name starting with dash", () => { - const result = runBashCapture( - 'get_validated_server_name "NAME" "Enter: "', - { env: { NAME: "-server" } } - ); - expect(result.exitCode).not.toBe(0); - expect(result.stderr).toContain("dash"); - }); - - it("should reject name ending with dash", () => { - const result = runBashCapture( - 'get_validated_server_name "NAME" "Enter: "', - { env: { NAME: "server-" } } - ); - expect(result.exitCode).not.toBe(0); - expect(result.stderr).toContain("dash"); - }); - - it("should reject name with underscores", () => { - const result = runBash( - 'get_validated_server_name "NAME" "Enter: "', - { env: { NAME: "my_server" } } - ); - expect(result.exitCode).not.toBe(0); - }); - - it("should reject name with spaces", () => { - const result = runBash( - 'get_validated_server_name "NAME" "Enter: "', - { env: { NAME: "my server" } } - ); - expect(result.exitCode).not.toBe(0); - }); - - it("should reject name with dots", () => { - const result = runBash( - 'get_validated_server_name "NAME" "Enter: "', - { env: { NAME: "my.server" } } - ); - expect(result.exitCode).not.toBe(0); - }); - - it("should reject empty name", () => { - const result = runBash( - 'get_validated_server_name "NAME" "Enter: "', - { env: { NAME: "" } } - ); - expect(result.exitCode).not.toBe(0); - }); - - it("should reject injection attempt with semicolons", () => { - const result = runBash( - 'get_validated_server_name "NAME" "Enter: "', - { env: { NAME: "test;whoami" } } - ); - expect(result.exitCode).not.toBe(0); - }); - - it("should reject injection attempt with backticks", () => { - const result = runBash( - 'get_validated_server_name "NAME" "Enter: "', - { env: { NAME: "test`id`" } } - ); - expect(result.exitCode).not.toBe(0); - }); - - it("should reject path traversal attempt", () => { - const result = runBash( - 'get_validated_server_name "NAME" "Enter: "', - { env: { NAME: "../../../etc" } } - ); - expect(result.exitCode).not.toBe(0); - }); - }); -}); - -// ── get_model_id_interactive ─────────────────────────────────────────────── - -describe("get_model_id_interactive", () => { - describe("MODEL_ID env var set (bypasses stdin)", () => { - it("should return MODEL_ID from env var", () => { - const result = runBash('get_model_id_interactive "openrouter/auto" "Codex"', { - env: { MODEL_ID: "anthropic/claude-3.5-sonnet" }, - }); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("anthropic/claude-3.5-sonnet"); - }); - - it("should accept simple model ID", () => { - const result = runBash('get_model_id_interactive', { - env: { MODEL_ID: "openrouter/auto" }, - }); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("openrouter/auto"); - }); - - it("should accept model ID with version numbers", () => { - const result = runBash('get_model_id_interactive', { - env: { MODEL_ID: "anthropic/claude-3.5-sonnet-20241022" }, - }); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("anthropic/claude-3.5-sonnet-20241022"); - }); - - it("should accept model ID with dots", () => { - const result = runBash('get_model_id_interactive', { - env: { MODEL_ID: "google/gemini-1.5-pro" }, - }); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("google/gemini-1.5-pro"); - }); - - it("should accept model ID with colons", () => { - const result = runBash('get_model_id_interactive', { - env: { MODEL_ID: "anthropic/claude-3.5-sonnet:beta" }, - }); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("anthropic/claude-3.5-sonnet:beta"); - }); - }); - - describe("MODEL_ID env var validation failures", () => { - it("should reject MODEL_ID with semicolons (injection)", () => { - const result = runBash('get_model_id_interactive', { - env: { MODEL_ID: "model;rm -rf /" }, - }); - expect(result.exitCode).not.toBe(0); - }); - - it("should reject MODEL_ID with backticks (injection)", () => { - const result = runBash('get_model_id_interactive', { - env: { MODEL_ID: "model`whoami`" }, - }); - expect(result.exitCode).not.toBe(0); - }); - - it("should reject MODEL_ID with dollar-paren (injection)", () => { - const result = runBash('get_model_id_interactive', { - env: { MODEL_ID: "$(whoami)/model" }, - }); - expect(result.exitCode).not.toBe(0); - }); - - it("should reject MODEL_ID with pipe (injection)", () => { - const result = runBash('get_model_id_interactive', { - env: { MODEL_ID: "model|cat /etc/passwd" }, - }); - expect(result.exitCode).not.toBe(0); - }); - - it("should reject MODEL_ID with ampersand (injection)", () => { - const result = runBash('get_model_id_interactive', { - env: { MODEL_ID: "model&whoami" }, - }); - expect(result.exitCode).not.toBe(0); - }); - - it("should show error about invalid characters", () => { - const result = runBashCapture('get_model_id_interactive', { - env: { MODEL_ID: "bad;model" }, - }); - expect(result.exitCode).not.toBe(0); - expect(result.stderr).toContain("invalid"); - }); - }); - - describe("MODEL_ID not set (falls through to stdin)", () => { - it("should use default model in non-interactive mode without MODEL_ID", () => { - const result = runBash( - 'get_model_id_interactive "openrouter/auto" "Codex"', - ); - // Falls through to safe_read which fails without tty, - // but the function catches this and uses the default model - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("openrouter/auto"); - }); - - it("should show model browsing hint before prompting", () => { - const result = runBashCapture( - 'get_model_id_interactive "openrouter/auto" "TestAgent"', - ); - expect(result.stderr).toContain("openrouter.ai/models"); - }); - - it("should show agent name in prompt text", () => { - const result = runBashCapture( - 'get_model_id_interactive "openrouter/auto" "Codex"', - ); - expect(result.stderr).toContain("Codex"); - }); - }); -}); - -// ── interactive_pick (env var bypass) ────────────────────────────────────── - -describe("interactive_pick", () => { - describe("env var bypass (most common non-interactive path)", () => { - it("should return env var value without calling list callback", () => { - const result = runBash( - 'interactive_pick "HETZNER_LOCATION" "fsn1" "locations" "echo should-not-see-this"', - { env: { HETZNER_LOCATION: "nbg1" } } - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("nbg1"); - // The list callback output should NOT appear since env var takes priority - expect(result.stdout).not.toContain("should-not-see-this"); - }); - - it("should return env var for arbitrary values", () => { - const result = runBash( - 'interactive_pick "MY_ZONE" "us-east-1" "zones" "echo dummy"', - { env: { MY_ZONE: "eu-west-2" } } - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("eu-west-2"); - }); - - it("should accept hyphenated env var values", () => { - const result = runBash( - 'interactive_pick "SERVER_TYPE" "cx23" "types" "echo unused"', - { env: { SERVER_TYPE: "cpx21" } } - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("cpx21"); - }); - - it("should accept env var with multiple words", () => { - const result = runBash( - 'interactive_pick "IMAGE_NAME" "ubuntu-22.04" "images" "echo unused"', - { env: { IMAGE_NAME: "debian-12" } } - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("debian-12"); - }); - }); - - describe("env var not set: list callback runs", () => { - it("should use default when list callback returns empty", () => { - const result = runBash( - 'no_items() { true; }; interactive_pick "UNSET_XYZ" "default-val" "regions" "no_items"', - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("default-val"); - }); - - it("should warn about using default when list is empty", () => { - const result = runBashCapture( - 'no_items() { true; }; interactive_pick "UNSET_XYZ" "fallback" "items" "no_items"', - ); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain("default"); - }); - - it("should use default even when list callback fails", () => { - const result = runBash( - 'failing_list() { return 1; }; interactive_pick "UNSET_XYZ" "safe-default" "zones" "failing_list"', - ); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("safe-default"); - }); - }); -}); - -// ── show_server_name_requirements ────────────────────────────────────────── - -describe("show_server_name_requirements", () => { - it("should output requirements mentioning character range", () => { - const result = runBashCapture("show_server_name_requirements"); - expect(result.stderr).toContain("3-63"); - }); - - it("should mention alphanumeric characters", () => { - const result = runBashCapture("show_server_name_requirements"); - expect(result.stderr).toContain("letters"); - expect(result.stderr).toContain("numbers"); - }); - - it("should mention dash restriction", () => { - const result = runBashCapture("show_server_name_requirements"); - expect(result.stderr).toContain("dash"); - }); -}); - -// ── _display_and_select (rendering, not stdin) ───────────────────────────── - -describe("_display_and_select", () => { - describe("menu rendering to stderr", () => { - it("should display numbered items", () => { - // Will fail on safe_read (no tty) but should still render the menu - const result = runBashCapture( - '_display_and_select "locations" "fsn1" "" <<< "fsn1|Falkenstein|DE\nnbg1|Nuremberg|DE"', - ); - expect(result.stderr).toContain("1)"); - expect(result.stderr).toContain("2)"); - expect(result.stderr).toContain("fsn1"); - expect(result.stderr).toContain("nbg1"); - }); - - it("should display Available heading with prompt text", () => { - const result = runBashCapture( - '_display_and_select "server types" "cx23" "" <<< "cx23|2 vCPU|4 GB"', - ); - expect(result.stderr).toContain("server types"); - }); - - it("should handle single-item list", () => { - const result = runBashCapture( - '_display_and_select "zones" "zone1" "" <<< "zone1|Zone One"', - ); - expect(result.stderr).toContain("1)"); - expect(result.stderr).toContain("zone1"); - }); - - it("should handle many items", () => { - // Build a list of 10 items using printf to get real newlines - const items = Array.from({ length: 10 }, (_, i) => `item${i}|Item ${i}`).join("\\n"); - const result = runBashCapture( - `_display_and_select "options" "item0" "" <<< "$(printf "${items}")"`, - ); - expect(result.stderr).toContain("1)"); - expect(result.stderr).toContain("10)"); - }); - }); - - describe("default value on stdin failure", () => { - it("should output default value when safe_read fails", () => { - // In non-tty mode, safe_read fails, so _display_and_select - // uses the default value (first item index as default) - const result = runBash( - '_display_and_select "locations" "fsn1" "" <<< "fsn1|Falkenstein\nnbg1|Nuremberg"', - ); - // It falls back to default when stdin is unavailable - expect(result.stdout).toBe("fsn1"); - }); - }); -}); - -// ── validated_read contract tests ────────────────────────────────────────── -// These test the validator callback contract without needing stdin, -// by verifying what validated_read would accept/reject through -// the validators themselves. - -// Standalone validator tests (validate_api_token, validate_region_name, validate_resource_name) -// are in shared-common-validators.test.ts -// get_validated_server_name boundary tests are covered above diff --git a/cli/src/__tests__/shared-common-json-extraction.test.ts b/cli/src/__tests__/shared-common-json-extraction.test.ts deleted file mode 100644 index 78e656a8..00000000 --- a/cli/src/__tests__/shared-common-json-extraction.test.ts +++ /dev/null @@ -1,277 +0,0 @@ -import { describe, it, expect, beforeEach, afterEach } from "bun:test"; -import { resolve, join } from "path"; -import { mkdirSync, rmSync, existsSync } from "fs"; -import { tmpdir } from "os"; - -/** - * Tests for JSON extraction helpers in shared/common.sh: - * - _extract_json_field: generic JSON field extraction using Python expressions - * - extract_api_error_message: API error message extraction from cloud provider responses - * - * These functions were recently extracted (PRs #673, #767) and are critical - * infrastructure used by cloud providers for JSON parsing and error reporting. - * _extract_json_field is used by generic_wait_for_instance for status polling, - * and extract_api_error_message is used by Hetzner, DigitalOcean, Vultr, and - * Contabo for surfacing actionable error messages. - * - * Agent: test-engineer - */ - -const REPO_ROOT = resolve(import.meta.dir, "../../.."); -const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh"); - -let testDir: string; - -beforeEach(() => { - testDir = join(tmpdir(), `spawn-json-test-${Date.now()}-${Math.random().toString(36).slice(2)}`); - mkdirSync(testDir, { recursive: true }); -}); - -afterEach(() => { - if (existsSync(testDir)) { - rmSync(testDir, { recursive: true, force: true }); - } -}); - -/** - * Run a bash snippet that sources shared/common.sh first. - * Returns { exitCode, stdout, stderr }. - */ -function runBash(script: string): { exitCode: number; stdout: string; stderr: string } { - const fullScript = `source "${COMMON_SH}"\n${script}`; - const { spawnSync } = require("child_process"); - const result = spawnSync("bash", ["-c", fullScript], { - encoding: "utf-8", - timeout: 15000, - stdio: ["pipe", "pipe", "pipe"], - }); - return { - exitCode: result.status ?? 1, - stdout: (result.stdout || "").trim(), - stderr: (result.stderr || "").trim(), - }; -} - -// ── _extract_json_field ───────────────────────────────────────────────── - -describe("_extract_json_field", () => { - describe("basic field extraction", () => { - it("should extract a top-level string field", () => { - const result = runBash(` - _extract_json_field '{"name": "test"}' "d['name']" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("test"); - }); - - it("should extract a top-level integer field", () => { - const result = runBash(` - _extract_json_field '{"count": 42}' "d['count']" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("42"); - }); - - it("should extract a nested field", () => { - const result = runBash(` - _extract_json_field '{"server": {"status": "running"}}' "d['server']['status']" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("running"); - }); - - it("should extract a deeply nested field", () => { - const result = runBash(` - _extract_json_field '{"a": {"b": {"c": {"d": "deep"}}}}' "d['a']['b']['c']['d']" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("deep"); - }); - - it("should extract a boolean field", () => { - const result = runBash(` - _extract_json_field '{"ready": true}' "d['ready']" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("true"); - }); - - it("should extract a null field and return default", () => { - const result = runBash(` - _extract_json_field '{"value": null}' "d['value']" "fallback" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("fallback"); - }); - }); - - describe("default value handling", () => { - it("should return default when JSON is invalid", () => { - const result = runBash(` - _extract_json_field 'not-json' "d['key']" "fallback" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("fallback"); - }); - - it("should return default when key is missing", () => { - const result = runBash(` - _extract_json_field '{"other": "value"}' "d['missing']" "default-val" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("default-val"); - }); - - it("should return empty string when no default specified and key is missing", () => { - const result = runBash(` - _extract_json_field '{"other": "value"}' "d['missing']" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe(""); - }); - - it("should return default when JSON is empty string", () => { - const result = runBash(` - _extract_json_field '' "d['key']" "empty-fallback" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("empty-fallback"); - }); - - it("should return default when nested key path fails", () => { - const result = runBash(` - _extract_json_field '{"a": {"b": 1}}' "d['a']['c']['d']" "nested-default" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("nested-default"); - }); - }); - - describe("complex JS expressions", () => { - it("should support bracket access for existing key", () => { - const result = runBash(` - _extract_json_field '{"status": "active"}' "d['status']" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("active"); - }); - - it("should return default when key missing", () => { - const result = runBash(` - _extract_json_field '{"other": 1}' "d['status']" "unknown" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("unknown"); - }); - - it("should support array indexing", () => { - const result = runBash(` - _extract_json_field '{"ips": ["1.2.3.4", "5.6.7.8"]}' "d['ips'][0]" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("1.2.3.4"); - }); - - it("should support conditional expressions", () => { - const result = runBash(` - _extract_json_field '{"networks": {"v4": [{"ip_address": "10.0.0.1"}]}}' \ - "d['networks']['v4'][0]['ip_address']" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("10.0.0.1"); - }); - }); - - describe("real-world cloud provider patterns", () => { - it("should extract Vultr instance status", () => { - const json = '{"instance": {"status": "active", "main_ip": "203.0.113.1"}}'; - const result = runBash(` - _extract_json_field '${json}' "d['instance']['status']" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("active"); - }); - - it("should extract Vultr instance IP", () => { - const json = '{"instance": {"status": "active", "main_ip": "203.0.113.1"}}'; - const result = runBash(` - _extract_json_field '${json}' "d['instance']['main_ip']" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("203.0.113.1"); - }); - - it("should extract DigitalOcean droplet status", () => { - const json = '{"droplet": {"status": "active", "networks": {"v4": [{"ip_address": "10.0.0.5", "type": "public"}]}}}'; - const result = runBash(` - _extract_json_field '${json}' "d['droplet']['status']" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("active"); - }); - - it("should handle unknown status gracefully", () => { - const result = runBash(` - _extract_json_field '{}' "d['instance']['status']" "unknown" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("unknown"); - }); - }); - - describe("edge cases", () => { - it("should handle JSON with special characters in values", () => { - const result = runBash(` - _extract_json_field '{"msg": "hello world & more"}' "d['msg']" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("hello world & more"); - }); - - it("should handle JSON with unicode characters", () => { - const result = runBash(` - _extract_json_field '{"msg": "\\u00e9"}' "d['msg']" - `); - expect(result.exitCode).toBe(0); - // Python should decode the unicode escape - expect(result.stdout.length).toBeGreaterThan(0); - }); - - it("should handle large JSON responses", () => { - // Build a JSON with many keys - const pairs = Array.from({ length: 50 }, (_, i) => `"key${i}": "val${i}"`).join(", "); - const json = `{${pairs}, "target": "found"}`; - const result = runBash(` - _extract_json_field '${json}' "d['target']" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("found"); - }); - - it("should handle JSON with numeric string keys", () => { - const result = runBash(` - _extract_json_field '{"123": "numeric-key"}' "d['123']" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("numeric-key"); - }); - - it("should handle empty JSON object", () => { - const result = runBash(` - _extract_json_field '{}' "d['key']" "empty" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("empty"); - }); - - it("should handle JSON array as root", () => { - const result = runBash(` - _extract_json_field '[1, 2, 3]' "d[0]" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("1"); - }); - }); -}); - -// extract_api_error_message tests are in shared-common-error-polling.test.ts diff --git a/cli/src/__tests__/shared-common-logging-utils.test.ts b/cli/src/__tests__/shared-common-logging-utils.test.ts deleted file mode 100644 index 3905a77d..00000000 --- a/cli/src/__tests__/shared-common-logging-utils.test.ts +++ /dev/null @@ -1,620 +0,0 @@ -import { describe, it, expect, beforeEach, afterEach } from "bun:test"; -import { resolve, join } from "path"; -import { mkdirSync, rmSync, existsSync, readFileSync, writeFileSync } from "fs"; -import { tmpdir } from "os"; - -/** - * Tests for logging, diagnostic, temp-file management, runtime detection, - * cloud-init generation, and SSH key helpers in shared/common.sh. - * - * These utility functions had zero dedicated test coverage but are used - * pervasively across all cloud provider scripts: - * - log_step: progress messages (cyan), added in PR #757 - * - _log_diagnostic: structured error output (header + causes + fixes) - * - check_json_processor_available: JSON processor (jq/bun) dependency check - * - find_node_runtime: bun/node detection - * - track_temp_file + cleanup_temp_files: secure credential temp file cleanup - * - get_cloud_init_userdata: cloud-init YAML generation for all providers - * - generate_ssh_key_if_missing: SSH key generation - * - get_ssh_fingerprint: SSH fingerprint extraction - * - calculate_retry_backoff: jittered exponential backoff - * - opencode_install_cmd: opencode install script generation - * - * Agent: test-engineer - */ - -const REPO_ROOT = resolve(import.meta.dir, "../../.."); -const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh"); - -let testDir: string; - -beforeEach(() => { - testDir = join(tmpdir(), `spawn-log-util-test-${Date.now()}-${Math.random().toString(36).slice(2)}`); - mkdirSync(testDir, { recursive: true }); -}); - -afterEach(() => { - if (existsSync(testDir)) { - rmSync(testDir, { recursive: true, force: true }); - } -}); - -/** - * Run a bash snippet that sources shared/common.sh first. - * Returns { exitCode, stdout, stderr }. - */ -function runBash(script: string, env?: Record): { exitCode: number; stdout: string; stderr: string } { - const fullScript = `source "${COMMON_SH}"\n${script}`; - const { spawnSync } = require("child_process"); - const result = spawnSync("bash", ["-c", fullScript], { - encoding: "utf-8", - timeout: 15000, - stdio: ["pipe", "pipe", "pipe"], - env: { ...process.env, ...env }, - }); - return { - exitCode: result.status ?? 1, - stdout: (result.stdout || "").trim(), - stderr: (result.stderr || "").trim(), - }; -} - -// ── log_step ──────────────────────────────────────────────────────────────── - -describe("log_step", () => { - it("should output message to stderr", () => { - const result = runBash('log_step "Deploying agent..."'); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain("Deploying agent..."); - }); - - it("should not output to stdout", () => { - const result = runBash('log_step "Progress message"'); - expect(result.stdout).toBe(""); - }); - - it("should use cyan color codes", () => { - const result = runBash('log_step "Step in progress"'); - // CYAN = \033[36m, NC = \033[0m - expect(result.stderr).toContain("Step in progress"); - // Verify it's different from log_warn (yellow) output - const warnResult = runBash('log_warn "Warning message"'); - // Both write to stderr but with different ANSI codes - expect(result.stderr).not.toBe(warnResult.stderr.replace("Warning message", "Step in progress")); - }); - - it("should handle empty message", () => { - const result = runBash('log_step ""'); - expect(result.exitCode).toBe(0); - }); - - it("should handle message with special characters", () => { - const result = runBash('log_step "Status: 50% done (step 1/3)"'); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain("Status: 50% done (step 1/3)"); - }); -}); - -// ── _log_diagnostic ───────────────────────────────────────────────────────── - -describe("_log_diagnostic", () => { - it("should output header, causes, and fixes in structured format", () => { - const result = runBash(` - _log_diagnostic "Something failed" \\ - "Cause A" \\ - "Cause B" \\ - --- \\ - "Fix 1" \\ - "Fix 2" - `); - expect(result.exitCode).toBe(0); - expect(result.stderr).toContain("Something failed"); - expect(result.stderr).toContain("Possible causes:"); - expect(result.stderr).toContain("Cause A"); - expect(result.stderr).toContain("Cause B"); - expect(result.stderr).toContain("How to fix:"); - expect(result.stderr).toContain("Fix 1"); - expect(result.stderr).toContain("Fix 2"); - }); - - it("should number fix steps sequentially", () => { - const result = runBash(` - _log_diagnostic "Error" \\ - "cause" \\ - --- \\ - "First fix" \\ - "Second fix" \\ - "Third fix" - `); - expect(result.stderr).toContain("1. First fix"); - expect(result.stderr).toContain("2. Second fix"); - expect(result.stderr).toContain("3. Third fix"); - }); - - it("should handle single cause and single fix", () => { - const result = runBash(` - _log_diagnostic "Install failed" \\ - "Network error" \\ - --- \\ - "Retry the command" - `); - expect(result.stderr).toContain("Install failed"); - expect(result.stderr).toContain("Network error"); - expect(result.stderr).toContain("1. Retry the command"); - }); - - it("should handle multiple causes", () => { - const result = runBash(` - _log_diagnostic "Auth failed" \\ - "Token expired" \\ - "Token invalid" \\ - "Wrong region" \\ - --- \\ - "Regenerate token" - `); - expect(result.stderr).toContain("Token expired"); - expect(result.stderr).toContain("Token invalid"); - expect(result.stderr).toContain("Wrong region"); - }); - - it("should use bullet points for causes", () => { - const result = runBash(` - _log_diagnostic "Error" \\ - "Cause 1" \\ - --- \\ - "Fix 1" - `); - expect(result.stderr).toContain("- Cause 1"); - }); - - it("should output everything to stderr", () => { - const result = runBash(` - _log_diagnostic "Header" \\ - "Cause" \\ - --- \\ - "Fix" - `); - expect(result.stdout).toBe(""); - expect(result.stderr).toContain("Header"); - }); -}); - -// ── check_json_processor_available ────────────────────────────────────────────────── - -describe("check_json_processor_available", () => { - it("should return 0 when python3 is available", () => { - const result = runBash("check_json_processor_available"); - expect(result.exitCode).toBe(0); - }); - - it("should return 1 when python3 is not in PATH", () => { - const result = runBash("check_json_processor_available", { PATH: "/nonexistent" }); - expect(result.exitCode).toBe(1); - }); - - it("should show install instructions when jq and bun are missing", () => { - // Override command to simulate jq and bun not found (can't restrict PATH since sourcing needs it) - const result = runBash(` - command() { if [[ "$2" == "jq" || "$2" == "bun" ]]; then return 1; fi; builtin command "$@"; } - check_json_processor_available - `); - expect(result.exitCode).toBe(1); - expect(result.stderr).toContain("jq or bun is required"); - expect(result.stderr).toContain("Install jq:"); - }); - - it("should mention Ubuntu, Fedora, macOS, and Arch install options", () => { - const result = runBash(` - command() { if [[ "$2" == "jq" || "$2" == "bun" ]]; then return 1; fi; builtin command "$@"; } - check_json_processor_available - `); - expect(result.exitCode).toBe(1); - expect(result.stderr).toContain("Ubuntu/Debian"); - expect(result.stderr).toContain("Fedora/RHEL"); - expect(result.stderr).toContain("macOS"); - expect(result.stderr).toContain("Arch Linux"); - }); -}); - -// ── find_node_runtime ─────────────────────────────────────────────────────── - -describe("find_node_runtime", () => { - it("should find a runtime in normal environment", () => { - const result = runBash("find_node_runtime"); - expect(result.exitCode).toBe(0); - expect(["bun", "node"]).toContain(result.stdout); - }); - - it("should return 1 when neither bun nor node is available", () => { - const result = runBash("find_node_runtime", { PATH: "/nonexistent" }); - expect(result.exitCode).toBe(1); - expect(result.stdout).toBe(""); - }); - - it("should prefer bun over node when both available", () => { - // In the test environment bun is available - const result = runBash("find_node_runtime"); - if (result.stdout === "bun") { - // Confirm bun is indeed preferred - expect(result.stdout).toBe("bun"); - } - // Either way, should succeed - expect(result.exitCode).toBe(0); - }); -}); - -// ── track_temp_file + cleanup_temp_files ──────────────────────────────────── - -describe("track_temp_file and cleanup_temp_files", () => { - it("should add file to CLEANUP_TEMP_FILES array", () => { - const tmpFile = join(testDir, "cred.tmp"); - writeFileSync(tmpFile, "secret-data"); - - const result = runBash(` - track_temp_file "${tmpFile}" - echo "\${#CLEANUP_TEMP_FILES[@]}" - `); - expect(result.exitCode).toBe(0); - // Array should now have at least 1 entry - expect(parseInt(result.stdout)).toBeGreaterThanOrEqual(1); - }); - - it("should clean up tracked temp files", () => { - const tmpFile = join(testDir, "cred.tmp"); - writeFileSync(tmpFile, "secret-data"); - - const result = runBash(` - track_temp_file "${tmpFile}" - cleanup_temp_files - if [[ -f "${tmpFile}" ]]; then echo "exists"; else echo "removed"; fi - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("removed"); - }); - - it("should handle multiple tracked files", () => { - const tmpFile1 = join(testDir, "cred1.tmp"); - const tmpFile2 = join(testDir, "cred2.tmp"); - writeFileSync(tmpFile1, "secret-1"); - writeFileSync(tmpFile2, "secret-2"); - - const result = runBash(` - track_temp_file "${tmpFile1}" - track_temp_file "${tmpFile2}" - cleanup_temp_files - f1="removed"; f2="removed" - [[ -f "${tmpFile1}" ]] && f1="exists" - [[ -f "${tmpFile2}" ]] && f2="exists" - echo "$f1 $f2" - `); - expect(result.stdout).toBe("removed removed"); - }); - - it("should not fail if tracked file does not exist", () => { - const result = runBash(` - track_temp_file "/nonexistent/path/file.tmp" - cleanup_temp_files - `); - expect(result.exitCode).toBe(0); - }); - - it("should preserve exit code through cleanup", () => { - const result = runBash(` - cleanup_exit_code_test() { - local exit_code=42 - (exit $exit_code) - cleanup_temp_files - return $? - } - cleanup_exit_code_test - `); - // cleanup_temp_files preserves the exit code from before it was called - expect(result.exitCode).toBe(42); - }); - - it("should try shred before rm for security", () => { - const tmpFile = join(testDir, "secure.tmp"); - writeFileSync(tmpFile, "sensitive-credentials"); - - // After cleanup, file should not exist regardless of whether shred or rm was used - const result = runBash(` - track_temp_file "${tmpFile}" - cleanup_temp_files - [[ -f "${tmpFile}" ]] && echo "exists" || echo "removed" - `); - expect(result.stdout).toBe("removed"); - }); -}); - -// ── register_cleanup_trap ─────────────────────────────────────────────────── - -describe("register_cleanup_trap", () => { - it("should register EXIT trap", () => { - const result = runBash(` - register_cleanup_trap - trap -p EXIT - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("cleanup_temp_files"); - }); - - it("should register INT trap", () => { - const result = runBash(` - register_cleanup_trap - trap -p INT - `); - expect(result.stdout).toContain("cleanup_temp_files"); - }); - - it("should register TERM trap", () => { - const result = runBash(` - register_cleanup_trap - trap -p TERM - `); - expect(result.stdout).toContain("cleanup_temp_files"); - }); - - it("should auto-register on source (common.sh sources register_cleanup_trap at bottom)", () => { - // shared/common.sh calls register_cleanup_trap at end of file - const result = runBash("trap -p EXIT"); - expect(result.stdout).toContain("cleanup_temp_files"); - }); -}); - -// ── get_cloud_init_userdata ───────────────────────────────────────────────── - -describe("get_cloud_init_userdata", () => { - it("should output valid cloud-config YAML", () => { - const result = runBash("get_cloud_init_userdata"); - expect(result.exitCode).toBe(0); - expect(result.stdout).toContain("#cloud-config"); - }); - - it("should include package_update directive", () => { - const result = runBash("get_cloud_init_userdata"); - expect(result.stdout).toContain("package_update: true"); - }); - - it("should include required packages", () => { - const result = runBash("get_cloud_init_userdata"); - expect(result.stdout).toContain("curl"); - expect(result.stdout).toContain("unzip"); - expect(result.stdout).toContain("git"); - expect(result.stdout).toContain("zsh"); - }); - - it("should install Bun", () => { - const result = runBash("get_cloud_init_userdata"); - expect(result.stdout).toContain("bun.sh/install"); - }); - - it("should install Claude Code", () => { - const result = runBash("get_cloud_init_userdata"); - expect(result.stdout).toContain("claude.ai/install.sh"); - }); - - it("should configure PATH in both .bashrc and .zshrc", () => { - const result = runBash("get_cloud_init_userdata"); - expect(result.stdout).toContain(".bashrc"); - expect(result.stdout).toContain(".zshrc"); - }); - - it("should include .bun/bin in PATH config", () => { - const result = runBash("get_cloud_init_userdata"); - expect(result.stdout).toContain(".bun/bin"); - }); - - it("should signal completion with touch marker", () => { - const result = runBash("get_cloud_init_userdata"); - expect(result.stdout).toContain("touch /root/.cloud-init-complete"); - }); - - it("should include runcmd section", () => { - const result = runBash("get_cloud_init_userdata"); - expect(result.stdout).toContain("runcmd:"); - }); - - it("should include packages section", () => { - const result = runBash("get_cloud_init_userdata"); - expect(result.stdout).toContain("packages:"); - }); -}); - -// ── calculate_retry_backoff ───────────────────────────────────────────────── - -describe("calculate_retry_backoff", () => { - it("should return a value within +-20% jitter of interval", () => { - // Run multiple times and check the range - const results: number[] = []; - for (let i = 0; i < 10; i++) { - const result = runBash("calculate_retry_backoff 10 60"); - results.push(parseInt(result.stdout)); - } - for (const val of results) { - // 10 * 0.8 = 8, 10 * 1.2 = 12 - expect(val).toBeGreaterThanOrEqual(8); - expect(val).toBeLessThanOrEqual(12); - } - }); - - it("should return next interval not exceeding max", () => { - const result = runBash("calculate_retry_backoff 50 60"); - const val = parseInt(result.stdout); - // 50 * 0.8 = 40, 50 * 1.2 = 60 - expect(val).toBeGreaterThanOrEqual(40); - expect(val).toBeLessThanOrEqual(60); - }); - - it("should handle interval of 1", () => { - const result = runBash("calculate_retry_backoff 1 60"); - const val = parseInt(result.stdout); - // 1 * 0.8 = 0.8 -> int 0 or 1; 1 * 1.2 = 1.2 -> int 1 - expect(val).toBeGreaterThanOrEqual(0); - expect(val).toBeLessThanOrEqual(2); - }); - - it("should handle equal interval and max", () => { - const result = runBash("calculate_retry_backoff 30 30"); - const val = parseInt(result.stdout); - // Already at max; jitter +-20% of 30 => [24, 36] - expect(val).toBeGreaterThanOrEqual(24); - expect(val).toBeLessThanOrEqual(36); - }); - - it("should fall back to plain interval if python3 unavailable", () => { - const result = runBash("calculate_retry_backoff 5 30", { PATH: "/usr/bin:/bin" }); - // Without python3, should fall back to echo'ing the raw interval - // But python3 might still be available at /usr/bin/python3 - expect(result.exitCode).toBe(0); - const val = parseInt(result.stdout); - expect(val).toBeGreaterThanOrEqual(0); - }); -}); - -// generate_ssh_key_if_missing and get_ssh_fingerprint tests are in shared-common-ssh-key-lifecycle.test.ts - -// ── logging functions ─────────────────────────────────────────────────────── - -describe("logging functions output to stderr", () => { - it("log_info should output to stderr with green color", () => { - const result = runBash('log_info "Info message"'); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe(""); - expect(result.stderr).toContain("Info message"); - }); - - it("log_warn should output to stderr with yellow color", () => { - const result = runBash('log_warn "Warning message"'); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe(""); - expect(result.stderr).toContain("Warning message"); - }); - - it("log_error should output to stderr with red color", () => { - const result = runBash('log_error "Error message"'); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe(""); - expect(result.stderr).toContain("Error message"); - }); - - it("log functions should not interfere with stdout piping", () => { - const result = runBash(` - log_info "info" - log_warn "warn" - log_error "error" - log_step "step" - echo "stdout-data" - `); - expect(result.stdout).toBe("stdout-data"); - expect(result.stderr).toContain("info"); - expect(result.stderr).toContain("warn"); - expect(result.stderr).toContain("error"); - expect(result.stderr).toContain("step"); - }); -}); - -// ── opencode_install_cmd ──────────────────────────────────────────────────── - -describe("opencode_install_cmd", () => { - it("should output a non-empty install command", () => { - const result = runBash("opencode_install_cmd"); - expect(result.exitCode).toBe(0); - expect(result.stdout.length).toBeGreaterThan(0); - }); - - it("should include architecture detection", () => { - const result = runBash("opencode_install_cmd"); - expect(result.stdout).toContain("uname -m"); - }); - - it("should include OS detection", () => { - const result = runBash("opencode_install_cmd"); - expect(result.stdout).toContain("uname -s"); - }); - - it("should download from github releases", () => { - const result = runBash("opencode_install_cmd"); - expect(result.stdout).toContain("github.com/anomalyco/opencode"); - }); - - it("should handle aarch64 to arm64 mapping", () => { - const result = runBash("opencode_install_cmd"); - expect(result.stdout).toContain("aarch64"); - expect(result.stdout).toContain("arm64"); - }); - - it("should update PATH in both .bashrc and .zshrc", () => { - const result = runBash("opencode_install_cmd"); - expect(result.stdout).toContain(".bashrc"); - expect(result.stdout).toContain(".zshrc"); - }); - - it("should install to $HOME/.opencode/bin", () => { - const result = runBash("opencode_install_cmd"); - expect(result.stdout).toContain(".opencode/bin"); - }); - - it("should use tar to extract the archive", () => { - const result = runBash("opencode_install_cmd"); - expect(result.stdout).toContain("tar xzf"); - }); - - it("should clean up temp install directory", () => { - const result = runBash("opencode_install_cmd"); - expect(result.stdout).toContain("rm -rf /tmp/opencode-install"); - }); -}); - -// ── POLL_INTERVAL configurable constant ───────────────────────────────────── - -describe("POLL_INTERVAL configuration", () => { - it("should default to 1 second", () => { - const result = runBash('echo "$POLL_INTERVAL"'); - expect(result.stdout).toBe("1"); - }); - - it("should respect SPAWN_POLL_INTERVAL env var", () => { - const result = runBash('echo "$POLL_INTERVAL"', { SPAWN_POLL_INTERVAL: "0.1" }); - expect(result.stdout).toBe("0.1"); - }); - - it("should allow custom poll interval for testing", () => { - const result = runBash('echo "$POLL_INTERVAL"', { SPAWN_POLL_INTERVAL: "5" }); - expect(result.stdout).toBe("5"); - }); -}); - -// ── SSH_OPTS default configuration ────────────────────────────────────────── - -describe("SSH_OPTS defaults", () => { - it("should set SSH_OPTS when not pre-defined", () => { - const result = runBash('echo "$SSH_OPTS"'); - expect(result.exitCode).toBe(0); - expect(result.stdout.length).toBeGreaterThan(0); - }); - - it("should use accept-new for strict host key checking (TOFU)", () => { - const result = runBash('echo "$SSH_OPTS"'); - expect(result.stdout).toContain("StrictHostKeyChecking=accept-new"); - }); - - it("should use /dev/null for known hosts file", () => { - const result = runBash('echo "$SSH_OPTS"'); - expect(result.stdout).toContain("UserKnownHostsFile=/dev/null"); - }); - - it("should suppress SSH logging", () => { - const result = runBash('echo "$SSH_OPTS"'); - expect(result.stdout).toContain("LogLevel=ERROR"); - }); - - it("should use ed25519 key by default", () => { - const result = runBash('echo "$SSH_OPTS"'); - expect(result.stdout).toContain("id_ed25519"); - }); - - it("should not override pre-existing SSH_OPTS", () => { - const result = runBash('echo "$SSH_OPTS"', { SSH_OPTS: "custom-opts" }); - expect(result.stdout).toBe("custom-opts"); - }); -}); diff --git a/cli/src/__tests__/shared-common-oauth-flow.test.ts b/cli/src/__tests__/shared-common-oauth-flow.test.ts deleted file mode 100644 index 237afc6f..00000000 --- a/cli/src/__tests__/shared-common-oauth-flow.test.ts +++ /dev/null @@ -1,852 +0,0 @@ -import { describe, it, expect, beforeEach, afterEach } from "bun:test"; -import { resolve, join } from "path"; -import { mkdirSync, rmSync, existsSync, readFileSync, writeFileSync, chmodSync } from "fs"; -import { tmpdir } from "os"; - -/** - * Tests for OAuth flow functions in shared/common.sh. - * - * The OAuth flow is the primary authentication mechanism for spawn users, - * yet its component functions had zero test coverage. This file tests: - * - * - validate_oauth_port: port range validation (1024-65535, numeric only) - * - _generate_csrf_state: CSRF token generation (security-critical) - * - _generate_oauth_html: HTML page generation for OAuth callback - * - _generate_oauth_server_script: Node.js callback server generation - * - _validate_oauth_server_args: prerequisite validation (port, state, runtime) - * - _init_oauth_session: temp directory and CSRF state file creation - * - cleanup_oauth_session: PID and directory cleanup - * - exchange_oauth_code: OAuth code-to-key exchange (json_escape security) - * - * These are SECURITY-CRITICAL: CSRF state prevents OAuth code interception, - * port validation prevents injection, and json_escape in exchange_oauth_code - * prevents JSON injection via crafted OAuth codes. - * - * Agent: test-engineer - */ - -const REPO_ROOT = resolve(import.meta.dir, "../../.."); -const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh"); - -let testDir: string; - -beforeEach(() => { - testDir = join(tmpdir(), `spawn-oauth-test-${Date.now()}-${Math.random().toString(36).slice(2)}`); - mkdirSync(testDir, { recursive: true }); -}); - -afterEach(() => { - if (existsSync(testDir)) { - rmSync(testDir, { recursive: true, force: true }); - } -}); - -/** - * Run a bash snippet that sources shared/common.sh first. - * Returns { exitCode, stdout, stderr }. - */ -function runBash(script: string, env?: Record): { exitCode: number; stdout: string; stderr: string } { - const fullScript = `source "${COMMON_SH}"\n${script}`; - const { spawnSync } = require("child_process"); - const result = spawnSync("bash", ["-c", fullScript], { - encoding: "utf-8", - timeout: 15000, - stdio: ["pipe", "pipe", "pipe"], - env: { ...process.env, ...env }, - }); - return { - exitCode: result.status ?? 1, - stdout: (result.stdout || "").trim(), - stderr: (result.stderr || "").trim(), - }; -} - -// ── validate_oauth_port ─────────────────────────────────────────────────────── - -describe("validate_oauth_port", () => { - describe("accepts valid ports", () => { - const validPorts = ["1024", "5180", "8080", "9999", "49152", "65535"]; - for (const port of validPorts) { - it(`should accept port ${port}`, () => { - const result = runBash(`validate_oauth_port "${port}"`); - expect(result.exitCode).toBe(0); - }); - } - }); - - describe("rejects privileged ports (below 1024)", () => { - const privilegedPorts = ["0", "1", "22", "80", "443", "1023"]; - for (const port of privilegedPorts) { - it(`should reject port ${port}`, () => { - const result = runBash(`validate_oauth_port "${port}"`); - expect(result.exitCode).toBe(1); - }); - } - }); - - describe("rejects ports above 65535", () => { - it("should reject port 65536", () => { - const result = runBash(`validate_oauth_port "65536"`); - expect(result.exitCode).toBe(1); - }); - - it("should reject port 99999", () => { - const result = runBash(`validate_oauth_port "99999"`); - expect(result.exitCode).toBe(1); - }); - }); - - describe("rejects non-numeric input", () => { - it("should reject alphabetic string", () => { - const result = runBash(`validate_oauth_port "abc"`); - expect(result.exitCode).toBe(1); - }); - - it("should reject empty string", () => { - const result = runBash(`validate_oauth_port ""`); - expect(result.exitCode).toBe(1); - }); - - it("should reject port with spaces", () => { - const result = runBash(`validate_oauth_port "80 80"`); - expect(result.exitCode).toBe(1); - }); - - it("should reject port with special characters", () => { - const result = runBash(`validate_oauth_port "5180;echo"`); - expect(result.exitCode).toBe(1); - }); - - it("should reject negative number", () => { - const result = runBash(`validate_oauth_port "-1"`); - expect(result.exitCode).toBe(1); - }); - - it("should reject decimal number", () => { - const result = runBash(`validate_oauth_port "5180.5"`); - expect(result.exitCode).toBe(1); - }); - }); - - describe("boundary values", () => { - it("should reject port 1023 (just below valid range)", () => { - const result = runBash(`validate_oauth_port "1023"`); - expect(result.exitCode).toBe(1); - }); - - it("should accept port 1024 (lower boundary)", () => { - const result = runBash(`validate_oauth_port "1024"`); - expect(result.exitCode).toBe(0); - }); - - it("should accept port 65535 (upper boundary)", () => { - const result = runBash(`validate_oauth_port "65535"`); - expect(result.exitCode).toBe(0); - }); - - it("should reject port 65536 (just above valid range)", () => { - const result = runBash(`validate_oauth_port "65536"`); - expect(result.exitCode).toBe(1); - }); - }); - - describe("error messages", () => { - it("should show 'must be numeric' for non-numeric input", () => { - const result = runBash(`validate_oauth_port "abc"`); - expect(result.stderr).toContain("must be numeric"); - }); - - it("should show 'must be between' for out-of-range port", () => { - const result = runBash(`validate_oauth_port "80"`); - expect(result.stderr).toContain("must be between"); - }); - }); -}); - -// ── _generate_csrf_state ────────────────────────────────────────────────────── - -describe("_generate_csrf_state", () => { - it("should generate a non-empty string", () => { - const result = runBash("_generate_csrf_state"); - expect(result.exitCode).toBe(0); - expect(result.stdout.length).toBeGreaterThan(0); - }); - - it("should generate hex-only output", () => { - const result = runBash("_generate_csrf_state"); - expect(result.exitCode).toBe(0); - expect(result.stdout).toMatch(/^[0-9a-f]+$/); - }); - - it("should generate at least 16 hex characters (64 bits of entropy)", () => { - const result = runBash("_generate_csrf_state"); - expect(result.exitCode).toBe(0); - expect(result.stdout.length).toBeGreaterThanOrEqual(16); - }); - - it("should generate different values on consecutive calls", () => { - const result = runBash(` - state1=$(_generate_csrf_state) - state2=$(_generate_csrf_state) - if [[ "$state1" == "$state2" ]]; then - echo "SAME" - exit 1 - fi - echo "DIFFERENT" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout).toBe("DIFFERENT"); - }); - - it("should work with openssl fallback", () => { - // Test the primary openssl path (if available) - const result = runBash(` - if command -v openssl &>/dev/null; then - state=$(_generate_csrf_state) - echo "$state" - else - echo "no-openssl" - fi - `); - expect(result.exitCode).toBe(0); - if (result.stdout !== "no-openssl") { - // openssl rand -hex 16 produces exactly 32 hex chars - expect(result.stdout.length).toBe(32); - } - }); - - it("should produce output safe for embedding in URLs and filenames", () => { - const result = runBash("_generate_csrf_state"); - expect(result.exitCode).toBe(0); - // No special characters, spaces, or newlines - expect(result.stdout).not.toContain(" "); - expect(result.stdout).not.toContain("\n"); - expect(result.stdout).not.toContain("/"); - expect(result.stdout).not.toContain("&"); - expect(result.stdout).not.toContain("?"); - }); -}); - -// ── _generate_oauth_html ────────────────────────────────────────────────────── - -describe("_generate_oauth_html", () => { - it("should set OAUTH_SUCCESS_HTML variable", () => { - const result = runBash(` - _generate_oauth_html - echo "$OAUTH_SUCCESS_HTML" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout.length).toBeGreaterThan(0); - }); - - it("should set OAUTH_ERROR_HTML variable", () => { - const result = runBash(` - _generate_oauth_html - echo "$OAUTH_ERROR_HTML" - `); - expect(result.exitCode).toBe(0); - expect(result.stdout.length).toBeGreaterThan(0); - }); - - it("should produce valid HTML in success page", () => { - const result = runBash(` - _generate_oauth_html - echo "$OAUTH_SUCCESS_HTML" - `); - expect(result.stdout).toContain(""); - expect(result.stdout).toContain(""); - }); - - it("should include success message in success HTML", () => { - const result = runBash(` - _generate_oauth_html - echo "$OAUTH_SUCCESS_HTML" - `); - expect(result.stdout).toContain("Authentication Successful"); - }); - - it("should include auto-close script in success HTML", () => { - const result = runBash(` - _generate_oauth_html - echo "$OAUTH_SUCCESS_HTML" - `); - expect(result.stdout).toContain("window.close"); - }); - - it("should include CSRF protection message in error HTML", () => { - const result = runBash(` - _generate_oauth_html - echo "$OAUTH_ERROR_HTML" - `); - expect(result.stdout).toContain("CSRF"); - }); - - it("should include 'Authentication Failed' in error HTML", () => { - const result = runBash(` - _generate_oauth_html - echo "$OAUTH_ERROR_HTML" - `); - expect(result.stdout).toContain("Authentication Failed"); - }); - - it("should include 'try again' guidance in error HTML", () => { - const result = runBash(` - _generate_oauth_html - echo "$OAUTH_ERROR_HTML" - `); - expect(result.stdout).toContain("try again"); - }); - - it("should include CSS styling in both pages", () => { - const result = runBash(` - _generate_oauth_html - echo "$OAUTH_SUCCESS_HTML" - echo "---SEPARATOR---" - echo "$OAUTH_ERROR_HTML" - `); - const parts = result.stdout.split("---SEPARATOR---"); - expect(parts[0]).toContain("

Authentication Successful

You can close this tab and return to your terminal.

" - OAUTH_ERROR_HTML="

Authentication Failed

Invalid or missing state parameter (CSRF protection). Please try again.

" -} - -# Validate OAuth server prerequisites (port, state token, runtime) -# Sets OAUTH_RUNTIME and OAUTH_STATE variables on success -# $1=starting_port $2=state_file -_validate_oauth_server_args() { - local starting_port="${1}" - local state_file="${2}" - - OAUTH_RUNTIME=$(find_node_runtime) || { log_warn "No Node.js runtime found"; return 1; } - - # SECURITY: Validate port number to prevent injection - if ! validate_oauth_port "${starting_port}"; then - log_error "OAuth server port validation failed" - return 1 - fi - - # SECURITY: Read CSRF state token for validation - OAUTH_STATE=$(cat "${state_file}" 2>/dev/null || echo "") - if [[ -z "${OAUTH_STATE}" ]]; then - log_error "CSRF state token file is missing or empty" - return 1 - fi -} - -# Generate the Node.js script for the OAuth callback server -# $1=expected_state $2=success_html $3=error_html $4=code_file $5=port_file $6=starting_port -_generate_oauth_server_script() { - local expected_state="${1}" success_html="${2}" error_html="${3}" - local code_file="${4}" port_file="${5}" starting_port="${6}" - - # SECURITY: Escape single quotes in all parameters to prevent injection - # When parameters are embedded in the Node.js script string, unescaped quotes - # could break out of the string context and execute arbitrary code - expected_state="${expected_state//\'/\\\'}" - success_html="${success_html//\'/\\\'}" - error_html="${error_html//\'/\\\'}" - code_file="${code_file//\'/\\\'}" - port_file="${port_file//\'/\\\'}" - - printf '%s' " -import http from 'http'; -import fs from 'fs'; -import url from 'url'; -const expectedState = '${expected_state}'; -const html = '${success_html}'; -const errorHtml = '${error_html}'; -const server = http.createServer((req, res) => { - const parsed = url.parse(req.url, true); - if (parsed.pathname === '/callback' && parsed.query.code) { - if (!parsed.query.state || parsed.query.state !== expectedState) { - res.writeHead(403, {'Content-Type':'text/html','Connection':'close'}); - res.end(errorHtml); - setTimeout(() => { server.close(); process.exit(1); }, 500); - return; - } - // SECURITY: Validate OAuth code format before writing to file - // OpenRouter OAuth codes are alphanumeric with hyphens/underscores, typically 32-64 chars - const code = String(parsed.query.code || ''); - if (!/^[a-zA-Z0-9_-]{16,128}$/.test(code)) { - res.writeHead(400, {'Content-Type':'text/html','Connection':'close'}); - res.end('

Invalid OAuth Code

The authorization code format is invalid.

'); - setTimeout(() => { server.close(); process.exit(1); }, 500); - return; - } - fs.writeFileSync('${code_file}', code); - res.writeHead(200, {'Content-Type':'text/html','Connection':'close'}); - res.end(html); - setTimeout(() => { server.close(); process.exit(0); }, 500); - } else { - res.writeHead(200, {'Content-Type':'text/html'}); - res.end('Waiting for OAuth callback...'); - } -}); -let currentPort = ${starting_port}; -const maxPort = ${starting_port} + 10; -function tryListen() { - server.listen(currentPort, '127.0.0.1', () => { - fs.writeFileSync('${port_file}', currentPort.toString()); - fs.writeFileSync('/dev/fd/1', ''); - }); -} -server.on('error', (err) => { - if (err.code === 'EADDRINUSE' && currentPort < maxPort) { - currentPort++; - tryListen(); - } else { - process.exit(1); - } -}); -setTimeout(() => process.exit(0), 300000); -tryListen(); -" -} - -# Start OAuth callback server using Node.js/Bun HTTP server -# Proper HTTP server — handles multiple connections, favicon requests, etc. -# Tries a range of ports if the initial port is busy -# $1=starting_port $2=code_file $3=port_file (writes actual port used) $4=state_file (CSRF token) -# Returns: server PID -# SECURITY: Validates port number and CSRF state parameter -start_oauth_server() { - local starting_port="${1}" - local code_file="${2}" - local port_file="${3}" - local state_file="${4}" - - _validate_oauth_server_args "${starting_port}" "${state_file}" || return 1 - - _generate_oauth_html - local script - script=$(_generate_oauth_server_script "${OAUTH_STATE}" "${OAUTH_SUCCESS_HTML}" "${OAUTH_ERROR_HTML}" \ - "${code_file}" "${port_file}" "${starting_port}") - - "${OAUTH_RUNTIME}" -e "${script}" /dev/null 2>&1 & - - echo $! -} - -# Wait for OAuth code with timeout, returns 0 if code received -wait_for_oauth_code() { - local code_file="${1}" - local timeout="${2:-120}" - local elapsed=0 - - log_step "Waiting for authentication in browser (this usually takes 10-30 seconds, timeout: ${timeout}s)..." - while [[ ! -f "${code_file}" ]] && [[ ${elapsed} -lt ${timeout} ]]; do - sleep "${POLL_INTERVAL}" - # Use bun for float addition since bash arithmetic only handles integers - # If POLL_INTERVAL is 0.5, bash $(( )) would fail. Fallback keeps timeout working. - if command -v bun &>/dev/null; then - elapsed=$(_E="${elapsed}" _P="${POLL_INTERVAL}" bun -e "process.stdout.write(String(Math.floor(Number(process.env._E) + Number(process.env._P))))" 2>/dev/null || echo "$((elapsed + 1))") - else - # No bun available - fall back to integer seconds (may timeout early with fractional POLL_INTERVAL) - elapsed=$((elapsed + 1)) - fi - done - - [[ -f "${code_file}" ]] -} - -# Exchange OAuth code for API key -exchange_oauth_code() { - local oauth_code="${1}" - - # SECURITY: Use json_escape to prevent JSON injection via crafted OAuth codes - local escaped_code - escaped_code=$(json_escape "${oauth_code}") - - local key_response curl_exit - key_response=$(curl -s --max-time 30 -X POST "https://openrouter.ai/api/v1/auth/keys" \ - -H "Content-Type: application/json" \ - -d "{\"code\": ${escaped_code}}" 2>&1) - curl_exit=$? - - if [[ ${curl_exit} -ne 0 ]]; then - log_error "Failed to contact OpenRouter API (curl exit code: ${curl_exit})" - log_warn "This may indicate a network issue or temporary service outage" - log_warn "Please check your internet connection and try again" - return 1 - fi - - local api_key - api_key=$(echo "${key_response}" | grep -o '"key":"[^"]*"' | sed 's/"key":"//;s/"$//') - - if [[ -z "${api_key}" ]]; then - log_error "Failed to exchange OAuth code for API key" - log_warn "Server response: ${key_response}" - log_warn "This may indicate the OAuth code expired or was already used" - log_warn "Please try again, or set OPENROUTER_API_KEY manually" - return 1 - fi - - echo "${api_key}" -} - -# Clean up OAuth session resources -cleanup_oauth_session() { - local server_pid="${1}" - local oauth_dir="${2}" - - if [[ -n "${server_pid}" ]]; then - # Verify PID still exists before killing to prevent race conditions - if kill -0 "${server_pid}" 2>/dev/null; then - # Kill process group to catch any child processes (netcat listeners, etc) - kill -TERM "-${server_pid}" 2>/dev/null || kill "${server_pid}" 2>/dev/null || true - # Give it time to shut down gracefully - sleep 0.5 - # Force kill if still running - kill -KILL "-${server_pid}" 2>/dev/null || true - wait "${server_pid}" 2>/dev/null || true - fi - fi - - # SAFETY: Validate path before rm -rf to prevent accidental deletion of system directories - # Only delete if: - # 1. Variable is non-empty - # 2. Directory exists - # 3. Path starts with /tmp/ (mktemp always creates in /tmp) - # 4. Path contains more than just /tmp (prevent rm -rf /tmp) - if [[ -n "${oauth_dir}" && -d "${oauth_dir}" && "${oauth_dir}" == /tmp/* && "${oauth_dir}" != "/tmp" && "${oauth_dir}" != "/tmp/" ]]; then - rm -rf "${oauth_dir}" - fi -} - -# Check network connectivity to OpenRouter -# Returns 0 if reachable, 1 if network is unreachable -check_openrouter_connectivity() { - local host="openrouter.ai" - local port="443" - local timeout=5 - - # Try curl with short timeout if available - if command -v curl &> /dev/null; then - if curl -s --connect-timeout "${timeout}" --max-time "${timeout}" "https://${host}" -o /dev/null 2>/dev/null; then - return 0 - fi - fi - - # Fallback to nc/telnet test - if command -v nc &> /dev/null; then - if timeout "${timeout}" nc -z "${host}" "${port}" 2>/dev/null; then - return 0 - fi - elif command -v timeout &> /dev/null && command -v bash &> /dev/null; then - # Bash TCP socket test as last resort - if timeout "${timeout}" bash -c "exec 3<>/dev/tcp/${host}/${port}" 2>/dev/null; then - return 0 - fi - fi - - return 1 -} - -# Start OAuth server and wait for it to be ready -# Returns: "port_number" on success, "" on failure (cleanup handled by caller) -start_and_verify_oauth_server() { - local callback_port="${1}" - local code_file="${2}" - local port_file="${3}" - local state_file="${4}" - local server_pid="${5}" - - sleep "${POLL_INTERVAL}" - if ! kill -0 "${server_pid}" 2>/dev/null; then - log_warn "Failed to start OAuth server - ports ${callback_port}-$((callback_port + 10)) may be in use" - log_warn "Try closing other dev servers or set OPENROUTER_API_KEY to skip OAuth" - return 1 - fi - - # Wait for port file to be created (server successfully bound to a port) - local wait_count=0 - while [[ ! -f "${port_file}" ]] && [[ ${wait_count} -lt 10 ]]; do - sleep 0.2 - wait_count=$((wait_count + 1)) - done - - if [[ ! -f "${port_file}" ]]; then - log_warn "OAuth server failed to allocate a port after 2 seconds" - log_warn "Another process may be using ports ${callback_port}-$((callback_port + 10))" - return 1 - fi - - cat "${port_file}" -} - -# Validate OAuth prerequisites (network, Node.js runtime) -# Returns 0 if all checks pass, 1 otherwise -_check_oauth_prerequisites() { - if ! check_openrouter_connectivity; then - log_warn "Cannot reach openrouter.ai - network may be unavailable" - log_warn "Please check your internet connection and try again" - log_warn "Alternatively, set OPENROUTER_API_KEY in your environment to skip OAuth" - return 1 - fi - - local runtime - runtime=$(find_node_runtime) - if [[ -z "${runtime}" ]]; then - log_warn "No Node.js runtime (bun/node) found - required for the OAuth callback server" - log_warn "Install one with: brew install node OR curl -fsSL https://bun.sh/install | bash" - return 1 - fi - - return 0 -} - -# Start OAuth server and return actual port, cleanup on failure -# Sets server_pid and returns 0 on success, 1 on failure -_setup_oauth_server() { - local callback_port="${1}" - local code_file="${2}" - local port_file="${3}" - local state_file="${4}" - local pid_file="${5}" - - log_step "Starting local OAuth server (trying ports ${callback_port}-$((callback_port + 10)))..." - local server_pid - server_pid=$(start_oauth_server "${callback_port}" "${code_file}" "${port_file}" "${state_file}") - - # Persist server PID to file for reliable retrieval - if [[ -n "${pid_file}" && -n "${server_pid}" ]]; then - printf '%s' "${server_pid}" > "${pid_file}" - fi - - local actual_port - actual_port=$(start_and_verify_oauth_server "${callback_port}" "${code_file}" "${port_file}" "${state_file}" "${server_pid}") - if [[ -z "${actual_port}" ]]; then - return 1 - fi - - log_info "OAuth server listening on port ${actual_port}" - echo "${actual_port}" - return 0 -} - -# Wait for OAuth code with timeout and cleanup on failure -# Returns 0 on success, 1 on failure -_wait_for_oauth() { - local code_file="${1}" - - if ! wait_for_oauth_code "${code_file}" 120; then - log_warn "OAuth timeout - no response received" - return 1 - fi - return 0 -} - -# Try OAuth flow (orchestrates the helper functions above) -# SECURITY: Generates CSRF state token to prevent OAuth code interception -_generate_csrf_state() { - if command -v openssl &>/dev/null; then - openssl rand -hex 16 - elif [[ -r /dev/urandom ]]; then - od -An -N16 -tx1 /dev/urandom | tr -d ' \n' - else - log_error "Cannot generate secure CSRF token: neither openssl nor /dev/urandom available" - log_error "Install openssl or ensure /dev/urandom is readable" - return 1 - fi -} - -# Create temp directory with OAuth session files and CSRF state -_init_oauth_session() { - local oauth_dir - oauth_dir=$(mktemp -d) || { - log_error "Failed to create temporary directory for OAuth session" - log_error "Check disk space and /tmp permissions" - return 1 - } - - # SAFETY: Verify mktemp succeeded before proceeding - if [[ -z "${oauth_dir}" || ! -d "${oauth_dir}" ]]; then - log_error "Failed to create temporary directory for OAuth session" - log_error "Check disk space and /tmp permissions" - return 1 - fi - - # SECURITY: Generate random CSRF state token (32 hex chars = 128 bits) - local csrf_state - csrf_state=$(_generate_csrf_state) - printf '%s' "${csrf_state}" > "${oauth_dir}/state" || { - rm -rf "${oauth_dir}" - log_error "Failed to write OAuth state file" - return 1 - } - chmod 600 "${oauth_dir}/state" - - echo "${oauth_dir}" -} - -# Open browser and wait for OAuth callback, returning the auth code -# Outputs the OAuth code on success, returns 1 on timeout -_await_oauth_callback() { - local code_file="${1}" - local server_pid="${2}" - local oauth_dir="${3}" - local actual_port="${4}" - local csrf_state="${5}" - local spawn_agent_slug="${6:-}" - local spawn_cloud_slug="${7:-}" - - local callback_url="http://localhost:${actual_port}/callback" - local auth_url="https://openrouter.ai/auth?callback_url=${callback_url}&state=${csrf_state}" - if [[ -n "${spawn_agent_slug}" ]]; then auth_url="${auth_url}&spawn_agent=${spawn_agent_slug}"; fi - if [[ -n "${spawn_cloud_slug}" ]]; then auth_url="${auth_url}&spawn_cloud=${spawn_cloud_slug}"; fi - log_step "Opening browser to authenticate with OpenRouter..." - open_browser "${auth_url}" - - if ! _wait_for_oauth "${code_file}"; then - cleanup_oauth_session "${server_pid}" "${oauth_dir}" - log_error "OAuth authentication timed out after 120 seconds" - log_error "" - log_error "The authentication flow was not completed in time." - log_error "" - log_error "Troubleshooting:" - log_error " 1. Check if your browser opened to openrouter.ai" - log_error " 2. Complete the authentication and allow the redirect" - log_error " 3. Ensure port ${actual_port} is not blocked by firewall/proxy" - log_error "" - log_error "Alternative: Use a manual API key instead" - log_error " export OPENROUTER_API_KEY=sk-or-v1-..." - log_error " Get a key at: https://openrouter.ai/settings/keys" - return 1 - fi - - cat "${code_file}" -} - -# Helper: Start OAuth server and get session details -# Returns: "port|pid|oauth_dir" on success, "" on failure -_start_oauth_session_with_server() { - local callback_port="${1}" - - local oauth_dir - oauth_dir=$(_init_oauth_session) - local code_file="${oauth_dir}/code" - local pid_file="${oauth_dir}/server_pid" - - local actual_port - actual_port=$(_setup_oauth_server "${callback_port}" "${code_file}" "${oauth_dir}/port" "${oauth_dir}/state" "${pid_file}") || { - cleanup_oauth_session "" "${oauth_dir}" - return 1 - } - - local server_pid - server_pid=$(cat "${pid_file}" 2>/dev/null || echo "") - if [[ -z "${server_pid}" ]]; then - log_error "Failed to retrieve OAuth server PID" - cleanup_oauth_session "" "${oauth_dir}" - return 1 - fi - - echo "${actual_port}|${server_pid}|${oauth_dir}" -} - -try_oauth_flow() { - local callback_port=${1:-5180} - local spawn_agent_slug="${2:-}" - local spawn_cloud_slug="${3:-}" - - log_step "Attempting OAuth authentication..." - - if ! _check_oauth_prerequisites; then - return 1 - fi - - local session_info - session_info=$(_start_oauth_session_with_server "${callback_port}") || return 1 - - local actual_port server_pid oauth_dir - IFS='|' read -r actual_port server_pid oauth_dir <<< "${session_info}" - - local csrf_state - csrf_state=$(cat "${oauth_dir}/state") - - # Open browser and wait for callback - local oauth_code - oauth_code=$(_await_oauth_callback "${oauth_dir}/code" "${server_pid}" "${oauth_dir}" "${actual_port}" "${csrf_state}" "${spawn_agent_slug}" "${spawn_cloud_slug}") || return 1 - cleanup_oauth_session "${server_pid}" "${oauth_dir}" - - # Exchange code for API key - log_step "Exchanging OAuth code for API key..." - local api_key - api_key=$(exchange_oauth_code "${oauth_code}") || return 1 - - log_info "Successfully obtained OpenRouter API key via OAuth!" - echo "${api_key}" -} - -# Main function: Try OAuth, fallback to manual entry -get_openrouter_api_key_oauth() { - local callback_port=${1:-5180} - local spawn_agent_slug="${2:-}" - local spawn_cloud_slug="${3:-}" - - # Try OAuth flow first - local api_key - api_key=$(try_oauth_flow "${callback_port}" "${spawn_agent_slug}" "${spawn_cloud_slug}") - - if [[ -n "${api_key}" ]]; then - echo "${api_key}" - return 0 - fi - - # OAuth failed, offer manual entry - echo "" >&2 - log_warn "Browser-based OAuth login was not completed." - log_warn "This is normal on remote servers, SSH sessions, or headless environments." - log_info "You can paste an API key instead. Create one at: https://openrouter.ai/settings/keys" - echo "" >&2 - local manual_choice - manual_choice=$(safe_read "Paste your API key manually? (Y/n): ") || { - log_error "Cannot prompt for manual entry in non-interactive mode" - log_warn "Set OPENROUTER_API_KEY environment variable before running spawn" - return 1 - } - - if [[ "${manual_choice}" =~ ^[Nn]$ ]]; then - log_error "Authentication cancelled. An OpenRouter API key is required to use spawn." - log_warn "To authenticate, either:" - log_warn " - Re-run this command and complete the OAuth flow in your browser" - log_warn " - Set OPENROUTER_API_KEY=sk-or-v1-... before running spawn" - log_warn " - Create a key at: https://openrouter.ai/settings/keys" - return 1 - fi - - api_key=$(get_openrouter_api_key_manual) - echo "${api_key}" -} - -# ============================================================ -# Environment injection helpers -# ============================================================ - -# Generate environment variable config content -# Usage: generate_env_config KEY1=val1 KEY2=val2 ... -# Outputs the env config to stdout -# SECURITY: Values are single-quoted to prevent shell injection when sourced. -# Single quotes prevent all interpretation of special characters ($, `, \, etc.) -generate_env_config() { - echo "" - echo "# [spawn:env]" - # All spawn environments are disposable cloud VMs — mark as sandbox - echo "export IS_SANDBOX='1'" - for env_pair in "$@"; do - local key="${env_pair%%=*}" - local value="${env_pair#*=}" - - # SECURITY: Validate environment variable names to prevent injection - # Only allow uppercase letters, numbers, and underscores (standard env var format) - if [[ ! "${key}" =~ ^[A-Z_][A-Z0-9_]*$ ]]; then - log_error "SECURITY: Invalid environment variable name rejected: ${key}" - continue - fi - - # Escape any single quotes in the value: replace ' with '\'' - # Use sed instead of ${//} pattern substitution for bash 3.2 (macOS) compat - local escaped_value - escaped_value=$(printf '%s' "$value" | sed "s/'/'\\\\''/g") - echo "export ${key}='${escaped_value}'" - done -} - -# Inject environment variables into remote server's shell config (SSH-based clouds) -# Usage: inject_env_vars_ssh SERVER_IP UPLOAD_FUNC RUN_FUNC KEY1=val1 KEY2=val2 ... -# Example: inject_env_vars_ssh "$DO_SERVER_IP" upload_file run_server \ -# "OPENROUTER_API_KEY=$OPENROUTER_API_KEY" \ -# "ANTHROPIC_BASE_URL=https://openrouter.ai/api" -inject_env_vars_ssh() { - local server_ip="${1}" - local upload_func="${2}" - local run_func="${3}" - shift 3 - - local env_temp - env_temp=$(mktemp) - chmod 600 "${env_temp}" - track_temp_file "${env_temp}" - - generate_env_config "$@" > "${env_temp}" - - # SECURITY: Use unpredictable temp file name to prevent race condition - # Attacker could create symlink at /tmp/env_config to exfiltrate credentials - local rand_suffix - rand_suffix=$(basename "${env_temp}") - local temp_remote="/tmp/spawn_env_${rand_suffix}" - - # Append to .bashrc and .zshrc only — do NOT write to .profile or .bash_profile - "${upload_func}" "${server_ip}" "${env_temp}" "${temp_remote}" - "${run_func}" "${server_ip}" "cat '${temp_remote}' >> ~/.bashrc; cat '${temp_remote}' >> ~/.zshrc; rm -f '${temp_remote}'" - - # Note: temp file will be cleaned up by trap handler - - # Offer optional GitHub CLI setup - offer_github_auth "${run_func} ${server_ip}" -} - -# Inject environment variables for providers without SSH (modal, e2b, sprite) -# For providers where upload_file and run_server don't take server_ip as first arg -# Usage: inject_env_vars_local upload_file run_server KEY1=VAL1 KEY2=VAL2 ... -# Example: inject_env_vars_local upload_file run_server \ -# "OPENROUTER_API_KEY=$OPENROUTER_API_KEY" \ -# "ANTHROPIC_BASE_URL=https://openrouter.ai/api" -inject_env_vars_local() { - local upload_func="${1}" - local run_func="${2}" - shift 2 - - local env_temp - env_temp=$(mktemp) - chmod 600 "${env_temp}" - track_temp_file "${env_temp}" - - generate_env_config "$@" > "${env_temp}" - - # SECURITY: Use unpredictable temp file name to prevent race condition - local rand_suffix - rand_suffix=$(basename "${env_temp}") - local temp_remote="/tmp/spawn_env_${rand_suffix}" - - # Append to .bashrc and .zshrc only - "${upload_func}" "${env_temp}" "${temp_remote}" - "${run_func}" "cat '${temp_remote}' >> ~/.bashrc; cat '${temp_remote}' >> ~/.zshrc; rm -f '${temp_remote}'" - - # Note: temp file will be cleaned up by trap handler - - # Offer optional GitHub CLI setup - offer_github_auth "${run_func}" -} - -# Prompt user about GitHub CLI setup BEFORE provisioning. -# Stores the answer so the actual install can happen later (after the -# server is up) without re-prompting. -# Usage: prompt_github_auth (call before create_server) -prompt_github_auth() { - SPAWN_GITHUB_AUTH_PROMPTED=1 - - # Skip in non-interactive or if user opted out - if [[ -n "${SPAWN_SKIP_GITHUB_AUTH:-}" ]]; then - return 0 - fi - - printf '\n' - local choice - choice=$(safe_read "Set up GitHub CLI (gh) on this machine? (y/N): ") || return 0 - if [[ "${choice}" =~ ^[Yy]$ ]]; then - SPAWN_GITHUB_AUTH_REQUESTED=1 - - # Capture local GitHub token for passthrough to remote VM - if [[ -n "${GITHUB_TOKEN:-}" ]]; then - SPAWN_GITHUB_TOKEN="${GITHUB_TOKEN}" - elif command -v gh &>/dev/null && gh auth status &>/dev/null 2>&1; then - SPAWN_GITHUB_TOKEN="$(gh auth token 2>/dev/null)" || true - fi - fi -} - -# Run GitHub CLI setup on remote VM if previously requested via prompt_github_auth. -# If prompt_github_auth was never called, falls back to prompting interactively. -# Usage (SSH clouds): offer_github_auth "run_server SERVER_IP" -# Usage (local): offer_github_auth "run_server" -offer_github_auth() { - local run_callback="${1}" - - # Skip if user opted out via env var - if [[ -n "${SPAWN_SKIP_GITHUB_AUTH:-}" ]]; then - return 0 - fi - - # Build the remote command with optional token export. - # Prefer the local copy (running from a checkout) so fixes don't wait for - # a merge to main. Base64-encode it for safe inline transport. - local gh_cmd - local _local_gh="${SCRIPT_DIR:-}/../../shared/github-auth.sh" - if [[ -n "${SCRIPT_DIR:-}" && -f "${_local_gh}" && ! -L "${_local_gh}" ]]; then - local _gh_b64 - _gh_b64=$(base64 < "${_local_gh}" | tr -d '\n') - gh_cmd="printf '%s' '${_gh_b64}' | base64 -d | bash" - else - gh_cmd="curl -fsSL https://raw.githubusercontent.com/OpenRouterTeam/spawn/main/shared/github-auth.sh | bash" - fi - if [[ -n "${SPAWN_GITHUB_TOKEN:-}" ]]; then - local escaped_token - escaped_token=$(printf '%q' "${SPAWN_GITHUB_TOKEN}") - gh_cmd="export GITHUB_TOKEN=${escaped_token}; ${gh_cmd}" - fi - - # If prompt_github_auth was already called, use its stored answer - if [[ "${SPAWN_GITHUB_AUTH_PROMPTED:-}" == "1" ]]; then - if [[ "${SPAWN_GITHUB_AUTH_REQUESTED:-}" == "1" ]]; then - log_step "Installing and authenticating GitHub CLI..." - ${run_callback} "${gh_cmd}" || log_warn "GitHub CLI setup failed (non-fatal, continuing)" - fi - return 0 - fi - - # Fallback: prompt_github_auth was never called, ask now - printf '\n' - local choice - choice=$(safe_read "Set up GitHub CLI (gh) on this machine? (y/N): ") || return 0 - if [[ ! "${choice}" =~ ^[Yy]$ ]]; then - return 0 - fi - - # Attempt token capture in fallback path too - if [[ -z "${SPAWN_GITHUB_TOKEN:-}" ]]; then - if [[ -n "${GITHUB_TOKEN:-}" ]]; then - SPAWN_GITHUB_TOKEN="${GITHUB_TOKEN}" - elif command -v gh &>/dev/null && gh auth status &>/dev/null 2>&1; then - SPAWN_GITHUB_TOKEN="$(gh auth token 2>/dev/null)" || true - fi - if [[ -n "${SPAWN_GITHUB_TOKEN:-}" ]]; then - local escaped_token - escaped_token=$(printf '%q' "${SPAWN_GITHUB_TOKEN}") - gh_cmd="export GITHUB_TOKEN=${escaped_token}; ${gh_cmd}" - fi - fi - - log_step "Installing and authenticating GitHub CLI..." - ${run_callback} "${gh_cmd}" || log_warn "GitHub CLI setup failed (non-fatal, continuing)" -} - -# ============================================================ -# Resource cleanup trap handlers -# ============================================================ - -# Array to track temporary files for cleanup -CLEANUP_TEMP_FILES=() - -# Track a temporary file for cleanup on exit -# Usage: track_temp_file PATH -track_temp_file() { - local temp_file="${1}" - CLEANUP_TEMP_FILES+=("${temp_file}") -} - -# Cleanup function for temporary files -# Called automatically on EXIT, INT, TERM signals -cleanup_temp_files() { - local exit_code=$? - - for temp_file in "${CLEANUP_TEMP_FILES[@]}"; do - if [[ -f "${temp_file}" ]]; then - # Securely remove temp files (may contain credentials) - shred -f -u "${temp_file}" 2>/dev/null || rm -f "${temp_file}" - fi - done - - return "${exit_code}" -} - -# Register cleanup trap handler -# Call this at the start of scripts that create temp files -register_cleanup_trap() { - trap cleanup_temp_files EXIT INT TERM -} - -# ============================================================ -# Agent setup helpers (composable, callback-based) -# ============================================================ -# These helpers accept pre-applied RUN/UPLOAD/SESSION callbacks, -# following the same callback pattern used by offer_github_auth -# and setup_claude_code_config. -# -# Usage pattern in agent scripts: -# RUN="run_server ${SERVER_IP}" -# UPLOAD="upload_file ${SERVER_IP}" -# SESSION="interactive_session ${SERVER_IP}" -# -# install_agent "Codex" "npm install -g @openai/codex" "$RUN" -# verify_agent "Codex" "command -v codex" "npm install -g @openai/codex" "$RUN" -# get_or_prompt_api_key -# inject_env_vars_cb "$RUN" "$UPLOAD" "OPENROUTER_API_KEY=${OPENROUTER_API_KEY}" -# launch_session "Hetzner server" "$SESSION" "source ~/.zshrc && codex" - -# Run an agent's install command on the target machine -# Usage: install_agent AGENT_NAME INSTALL_CMD RUN_CB -install_agent() { - local agent_name="$1" install_cmd="$2" run_cb="$3" - log_step "Installing ${agent_name}..." - # Pass the raw command to the run callback — do NOT use printf '%q' + bash -c - # here. The run callback (run_server, run_sprite, ssh) already handles escaping - # for remote transport. Double-escaping breaks shell operators (&&, ||, >, |) - # inside install commands. - if ! ${run_cb} "${install_cmd}"; then - log_install_failed "${agent_name}" "${install_cmd}" - return 1 - fi - log_info "${agent_name} installation completed" -} - -# Verify an agent installed correctly; exit 1 on failure -# Usage: verify_agent AGENT_NAME VERIFY_CMD INSTALL_CMD RUN_CB -verify_agent() { - local agent_name="$1" verify_cmd="$2" install_cmd="$3" run_cb="$4" - if ! ${run_cb} "${verify_cmd}" >/dev/null 2>&1; then - log_install_failed "${agent_name}" "${install_cmd}" - exit 1 - fi - log_info "${agent_name} installation verified successfully" -} - -# Install Claude Code with multi-method fallback and detailed error reporting. -# Tries: 1) curl installer (standalone binary) 2) bun 3) npm -# The curl installer bundles its own runtime. npm/bun install a Node.js package -# whose shebang needs 'node', so we ensure a node runtime exists after those. -# Usage: install_claude_code RUN_CB -_finalize_claude_install() { - local run_cb="$1" - local claude_path="$2" - log_step "Setting up Claude Code shell integration..." - ${run_cb} "${claude_path} && claude install --force" >/dev/null 2>&1 || true - # Write claude PATH to .bashrc and .zshrc - ${run_cb} "for rc in ~/.bashrc ~/.zshrc; do grep -q '.claude/local/bin' \"\$rc\" 2>/dev/null || printf '\\n# Claude Code PATH\\nexport PATH=\"\$HOME/.claude/local/bin:\$HOME/.local/bin:\$HOME/.bun/bin:\$PATH\"\\n' >> \"\$rc\"; done" >/dev/null 2>&1 || true -} - -_verify_claude_installed() { - local run_cb="$1" - local claude_path="$2" - ${run_cb} "${claude_path} && command -v claude" >/dev/null 2>&1 -} - -_install_via_curl() { - local run_cb="$1" - local claude_path="$2" - log_step "Installing Claude Code (method 1/2: curl installer)..." - if ${run_cb} "curl -fsSL https://claude.ai/install.sh | bash" 2>&1; then - if _verify_claude_installed "$run_cb" "$claude_path"; then - log_info "Claude Code installed via curl installer" - _finalize_claude_install "$run_cb" "$claude_path" - return 0 - fi - log_warn "curl installer exited 0 but claude not found on PATH" - else - log_warn "curl installer failed (site may be temporarily unavailable)" - fi - return 1 -} - -_ensure_nodejs_runtime() { - local run_cb="$1" - local claude_path="$2" - if ! ${run_cb} "${claude_path} && command -v node" >/dev/null 2>&1; then - log_step "Installing Node.js runtime (required for claude package)..." - if ${run_cb} "DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends nodejs npm && npm install -g n && n 22 && ln -sf /usr/local/bin/node /usr/bin/node && ln -sf /usr/local/bin/npm /usr/bin/npm && ln -sf /usr/local/bin/npx /usr/bin/npx" >/dev/null 2>&1; then - log_info "Node.js installed via n" - else - log_warn "Could not install Node.js - bun method may fail" - fi - fi -} - -_install_via_npm() { - local run_cb="$1" - local claude_path="$2" - log_step "Installing Claude Code (method 2/3: npm)..." - if ${run_cb} "${claude_path} && npm install -g @anthropic-ai/claude-code 2>&1" 2>&1; then - if _verify_claude_installed "$run_cb" "$claude_path"; then - log_info "Claude Code installed via npm" - _finalize_claude_install "$run_cb" "$claude_path" - return 0 - fi - log_warn "npm install exited 0 but claude binary not found" - else - log_warn "npm install failed" - fi - return 1 -} - -_install_via_bun() { - local run_cb="$1" - local claude_path="$2" - log_step "Installing Claude Code (method 3/3: bun)..." - if ${run_cb} "${claude_path} && bun i -g @anthropic-ai/claude-code 2>&1" 2>&1; then - if _verify_claude_installed "$run_cb" "$claude_path"; then - log_info "Claude Code installed via bun" - _finalize_claude_install "$run_cb" "$claude_path" - return 0 - fi - log_warn "bun install exited 0 but claude binary not found" - else - log_warn "bun install failed" - fi - return 1 -} - -install_claude_code() { - local run_cb="$1" - local claude_path='export PATH=$HOME/.npm-global/bin:$HOME/.claude/local/bin:$HOME/.local/bin:$HOME/.bun/bin:$PATH' - - # Clean up ~/.bash_profile if it was created by a previous broken deployment. - ${run_cb} "if [ -f ~/.bash_profile ] && grep -q 'spawn:env\|Claude Code PATH\|spawn:path' ~/.bash_profile 2>/dev/null; then rm -f ~/.bash_profile; fi" >/dev/null 2>&1 || true - - # Already installed? - if _verify_claude_installed "$run_cb" "$claude_path"; then - log_info "Claude Code already installed" - _finalize_claude_install "$run_cb" "$claude_path" - return 0 - fi - - # Try curl installer first - if _install_via_curl "$run_cb" "$claude_path"; then - return 0 - fi - - # Ensure Node.js runtime for npm/bun methods - _ensure_nodejs_runtime "$run_cb" "$claude_path" - - # Try npm (most reliable for global installs) - if _install_via_npm "$run_cb" "$claude_path"; then - return 0 - fi - - # Try bun as last resort - if _install_via_bun "$run_cb" "$claude_path"; then - return 0 - fi - - # All methods failed - log_install_failed "Claude Code" "npm install -g @anthropic-ai/claude-code" - exit 1 -} - -# Get OpenRouter API key from environment or prompt via OAuth -# Sets the global OPENROUTER_API_KEY variable -get_or_prompt_api_key() { - echo "" - if [[ -n "${OPENROUTER_API_KEY:-}" ]]; then - log_info "Using OpenRouter API key from environment" - if ! verify_openrouter_key "${OPENROUTER_API_KEY}"; then - log_warn "Environment key failed validation, prompting for a new one..." - OPENROUTER_API_KEY="" - fi - fi - - local max_attempts=3 attempt=0 - while [[ -z "${OPENROUTER_API_KEY:-}" ]]; do - attempt=$((attempt + 1)) - if [[ ${attempt} -gt ${max_attempts} ]]; then - log_error "No valid API key after ${max_attempts} attempts" - exit 1 - fi - OPENROUTER_API_KEY=$(get_openrouter_api_key_oauth 5180 "${SPAWN_AGENT_SLUG:-}" "${SPAWN_CLOUD_SLUG:-}") || true - if [[ -n "${OPENROUTER_API_KEY:-}" ]] && ! verify_openrouter_key "${OPENROUTER_API_KEY}"; then - OPENROUTER_API_KEY="" - fi - done -} - -# Inject environment variables using pre-applied callbacks -# Usage: inject_env_vars_cb RUN_CB UPLOAD_CB KEY1=val1 KEY2=val2 ... -# Example: inject_env_vars_cb "$RUN" "$UPLOAD" \ -# "OPENROUTER_API_KEY=$OPENROUTER_API_KEY" \ -# "ANTHROPIC_BASE_URL=https://openrouter.ai/api" -inject_env_vars_cb() { - local run_cb="$1" upload_cb="$2" - shift 2 - - log_step "Setting up environment variables..." - - local env_temp - env_temp=$(mktemp) - chmod 600 "${env_temp}" - track_temp_file "${env_temp}" - - generate_env_config "$@" > "${env_temp}" - - # SECURITY: Use unpredictable temp file name to prevent race condition - local rand_suffix - rand_suffix=$(basename "${env_temp}") - local temp_remote="/tmp/spawn_env_${rand_suffix}" - - ${upload_cb} "${env_temp}" "${temp_remote}" - ${run_cb} "cat '${temp_remote}' >> ~/.bashrc; cat '${temp_remote}' >> ~/.zshrc; rm -f '${temp_remote}'" - - # Offer optional GitHub CLI setup - offer_github_auth "${run_cb}" -} - -# Print success message and launch an interactive agent session -# Usage: launch_session CLOUD_MSG SESSION_CB LAUNCH_CMD -launch_session() { - local cloud_msg="$1" session_cb="$2" launch_cmd="$3" - echo "" - log_info "${cloud_msg} setup completed successfully!" - echo "" - log_step "Starting agent..." - sleep 1 - clear 2>/dev/null || true - ${session_cb} "${launch_cmd}" -} - -# ============================================================ -# Cloud adapter runner (spawn_agent) -# ============================================================ -# Orchestrates the standard agent deployment flow using cloud_* adapter -# functions. Agent scripts define hooks (agent_install, agent_env_vars, -# agent_launch_cmd, etc.) and call spawn_agent to run them. -# -# Required cloud_* functions (defined in {cloud}/lib/common.sh): -# cloud_authenticate, cloud_provision, cloud_wait_ready, -# cloud_run, cloud_upload, cloud_interactive, cloud_label -# -# Required agent hooks: -# agent_env_vars — print env config lines to stdout (via generate_env_config) -# agent_launch_cmd — print the shell command to launch the agent -# -# Optional agent hooks: -# agent_pre_provision — run before provisioning (e.g., prompt_github_auth) -# agent_install — install the agent on the server -# agent_configure — agent-specific config (settings files, etc.) -# agent_save_connection — save connection info for `spawn list` -# agent_pre_launch — run before launching (e.g., start daemon) -# -# Optional agent variables: -# AGENT_MODEL_PROMPT — if set, prompt for model selection -# AGENT_MODEL_DEFAULT — default model ID (default: openrouter/auto) - -# Check if a function is defined (bash 3.2 compatible) -_fn_exists() { type "$1" 2>/dev/null | head -1 | grep -q 'function'; } - -# Inject env vars using cloud_* adapter functions -_spawn_inject_env_vars() { - log_step "Setting up environment variables..." - local env_temp - env_temp=$(mktemp) - chmod 600 "${env_temp}" - track_temp_file "${env_temp}" - - agent_env_vars > "${env_temp}" - - # SECURITY: Use unpredictable temp file name to prevent symlink attacks - local rand_suffix - rand_suffix=$(basename "${env_temp}") - local temp_remote="/tmp/spawn_env_${rand_suffix}" - - cloud_upload "${env_temp}" "${temp_remote}" - - # Write env vars to ~/.spawnrc instead of inlining into .bashrc/.zshrc. - # Ubuntu's default .bashrc has an interactive-shell guard that exits early — - # anything appended after the guard is never loaded when SSH runs a command string. - cloud_run "cp '${temp_remote}' ~/.spawnrc && chmod 600 ~/.spawnrc; rm -f '${temp_remote}'" - - # Hook .spawnrc into .bashrc and .zshrc so interactive shells pick up the vars too - cloud_run "grep -q 'source ~/.spawnrc' ~/.bashrc 2>/dev/null || echo '[ -f ~/.spawnrc ] && source ~/.spawnrc' >> ~/.bashrc" || log_warn "Could not hook .spawnrc into .bashrc" - cloud_run "grep -q 'source ~/.spawnrc' ~/.zshrc 2>/dev/null || echo '[ -f ~/.spawnrc ] && source ~/.spawnrc' >> ~/.zshrc" || log_warn "Could not hook .spawnrc into .zshrc" - - offer_github_auth cloud_run -} - -# Main orchestration runner for agent deployment -# Usage: spawn_agent AGENT_DISPLAY_NAME -spawn_agent() { - local agent_name="$1" - SPAWN_AGENT_SLUG="${2:-}" - SPAWN_CLOUD_SLUG="${3:-}" - - # 1. Authenticate with cloud provider - cloud_authenticate - - # 2. Pre-provision hooks (e.g., prompt for GitHub auth) - if _fn_exists agent_pre_provision; then agent_pre_provision || true; fi - - # 3. Get API key (before provisioning so user isn't waiting on server) - get_or_prompt_api_key - - # 4. Model selection (if agent needs it) - if [[ -n "${AGENT_MODEL_PROMPT:-}" ]]; then - MODEL_ID=$(get_model_id_interactive "${AGENT_MODEL_DEFAULT:-openrouter/auto}" "${agent_name}") || exit 1 - fi - - # 5. Provision server - local server_name - server_name=$(get_server_name) - cloud_provision "${server_name}" - - # 6. Wait for readiness (may already be done after OAuth) - cloud_wait_ready - - # 7. Install agent - if _fn_exists agent_install; then - agent_install || exit 1 - fi - - # 8. Inject environment variables - _spawn_inject_env_vars - - # 9. Agent-specific configuration (non-fatal — agent may work with defaults) - if _fn_exists agent_configure; then agent_configure || log_warn "Agent configuration failed (continuing with defaults)"; fi - - # 10. Save connection info (non-fatal — convenience feature only) - if _fn_exists agent_save_connection; then agent_save_connection || log_warn "Could not save connection info"; fi - - # 11. Pre-launch hooks (non-fatal — e.g., gateway daemon may start slowly) - if _fn_exists agent_pre_launch; then agent_pre_launch || log_warn "Pre-launch hook failed (continuing)"; fi - - # 12. Launch interactive session - log_info "${agent_name} is ready" - local launch_cmd - launch_cmd=$(agent_launch_cmd) - - # Save the launch command to connection file for `spawn list` → "Enter agent" - _save_launch_cmd "${launch_cmd}" - - launch_session "$(cloud_label)" cloud_interactive "${launch_cmd}" -} - -# ============================================================ -# SSH configuration -# ============================================================ - -# Validate SSH_OPTS to prevent command injection -# Only allow safe SSH option patterns (dash-prefixed flags and values) -_validate_ssh_opts() { - local opts="${1}" - # Allow empty - if [[ -z "${opts}" ]]; then - return 0 - fi - # Pattern: SSH opts must start with dash and contain only safe characters - # Allows: -o Option=value -i /path/to/key -p 22 etc. - # Blocks: semicolons, pipes, backticks, $() and other shell metacharacters - if [[ "${opts}" =~ [\;\|\&\`\$\(\)\<\>] ]]; then - log_error "SECURITY: SSH_OPTS contains shell metacharacters" - log_error "Rejected value: ${opts}" - return 1 - fi - return 0 -} - -# Default SSH options for all cloud providers -# Clouds can override this if they need provider-specific settings -if [[ -z "${SSH_OPTS:-}" ]]; then - SSH_OPTS="-o StrictHostKeyChecking=accept-new -o UserKnownHostsFile=/dev/null -o LogLevel=ERROR -o ServerAliveInterval=15 -o ServerAliveCountMax=3 -o ConnectTimeout=10 -i ${HOME}/.ssh/id_ed25519" -else - # Validate user-provided SSH_OPTS for security - if ! _validate_ssh_opts "${SSH_OPTS}"; then - log_error "Invalid SSH_OPTS provided. Using secure defaults." - SSH_OPTS="-o StrictHostKeyChecking=accept-new -o UserKnownHostsFile=/dev/null -o LogLevel=ERROR -o ServerAliveInterval=15 -o ServerAliveCountMax=3 -o ConnectTimeout=10 -i ${HOME}/.ssh/id_ed25519" - fi -fi - -# ============================================================ -# SSH key management helpers -# ============================================================ - -# Generate SSH key if it doesn't exist -# Usage: generate_ssh_key_if_missing KEY_PATH -generate_ssh_key_if_missing() { - local key_path="${1}" - if [[ -f "${key_path}" ]]; then - return 0 - fi - log_step "Generating SSH key at ${key_path}..." - mkdir -p "$(dirname "${key_path}")" || { - log_error "Failed to create SSH key directory: $(dirname "${key_path}")" - log_error "Check that you have write permissions to this directory." - return 1 - } - ssh-keygen -t ed25519 -f "${key_path}" -N "" -q || { - log_error "Failed to generate SSH key at ${key_path}" - log_error "" - log_error "How to fix:" - log_error " 1. Check disk space: df -h $(dirname "${key_path}")" - log_error " 2. Check permissions: ls -la $(dirname "${key_path}")" - log_error " 3. Generate manually: ssh-keygen -t ed25519 -f ${key_path}" - return 1 - } - log_info "SSH key generated at ${key_path}" -} - -# Get MD5 fingerprint of SSH public key -# Usage: get_ssh_fingerprint PUB_KEY_PATH -get_ssh_fingerprint() { - local pub_path="${1}" - if [[ ! -f "${pub_path}" ]]; then - log_error "SSH public key not found: ${pub_path}" - log_error "Expected a public key file alongside your private key." - log_error "Regenerate with: ssh-keygen -t ed25519 -f ${pub_path%.pub}" - return 1 - fi - local fingerprint - fingerprint=$(ssh-keygen -lf "${pub_path}" -E md5 2>/dev/null | awk '{print $2}' | sed 's/MD5://') - if [[ -z "${fingerprint}" ]]; then - log_error "Failed to read SSH public key fingerprint from ${pub_path}" - log_error "The key file may be corrupted or in an unsupported format." - log_error "Regenerate with: ssh-keygen -t ed25519 -f ${pub_path%.pub}" - return 1 - fi - echo "${fingerprint}" -} - -# JSON-escape a string (for embedding in JSON bodies) -# Usage: json_escape STRING -json_escape() { - local string="${1}" - _INPUT="${string}" bun -e "process.stdout.write(JSON.stringify(process.env._INPUT) + '\n')" 2>/dev/null || { - # Fallback: manually escape backslashes, quotes, and JSON control characters - local escaped="${string//\\/\\\\}" - escaped="${escaped//\"/\\\"}" - escaped="${escaped//$'\n'/\\n}" - escaped="${escaped//$'\r'/\\r}" - escaped="${escaped//$'\t'/\\t}" - echo "\"${escaped}\"" - } -} - -# Extract SSH key IDs from cloud provider API response -# Usage: extract_ssh_key_ids API_RESPONSE KEY_FIELD -# KEY_FIELD: "ssh_keys" (DigitalOcean/Vultr) or "data" (Linode) -extract_ssh_key_ids() { - local api_response="${1}" - local key_field="${2:-ssh_keys}" - # Use jq with --arg to safely pass key_field (prevents code injection). - if command -v jq &>/dev/null; then - printf '%s' "${api_response}" | jq --arg field "${key_field}" '[.[$field][]?.id]' 2>/dev/null || { - log_error "Failed to parse SSH key IDs from API response" - return 1 - } - else - _DATA="${api_response}" _FIELD="${key_field}" bun -e " -const d = JSON.parse(process.env._DATA); -const ids = (d[process.env._FIELD] || []).map(k => k.id); -process.stdout.write(JSON.stringify(ids) + '\n'); -" 2>/dev/null || { - log_error "Failed to parse SSH key IDs from API response" - log_error "The API response may be malformed or bun is unavailable" - return 1 - } - fi -} - -# ============================================================ -# Cloud provisioning helpers -# ============================================================ - -# Generate cloud-init userdata YAML for server provisioning -# This is the default userdata used by all cloud providers -# Clouds can override this function if they need provider-specific cloud-init config -get_cloud_init_userdata() { - cat << 'CLOUD_INIT_EOF' -#cloud-config -package_update: true -packages: - - curl - - unzip - - git - - zsh - - nodejs - - npm - -runcmd: - # Set up 2G swap to prevent OOM kills on small VMs - - fallocate -l 2G /swapfile - - chmod 600 /swapfile - - mkswap /swapfile - - swapon /swapfile - # Upgrade Node.js to v22 LTS (apt has v18, agents like Cline need v20+) - # n installs to /usr/local/bin but apt's v18 at /usr/bin can shadow it, so symlink over - - npm install -g n && n 22 && ln -sf /usr/local/bin/node /usr/bin/node && ln -sf /usr/local/bin/npm /usr/bin/npm && ln -sf /usr/local/bin/npx /usr/bin/npx - # Install Bun - - su - root -c 'curl -fsSL https://bun.sh/install | bash' - # Install Claude Code - - su - root -c 'curl -fsSL https://claude.ai/install.sh | bash' - # Mark as sandbox environment (disposable cloud VM) - - echo 'export IS_SANDBOX=1' >> /root/.bashrc - - echo 'export IS_SANDBOX=1' >> /root/.zshrc - # Configure PATH in .bashrc and .zshrc (include claude installer path) - - echo 'export PATH="${HOME}/.claude/local/bin:${HOME}/.local/bin:${HOME}/.bun/bin:${PATH}"' >> /root/.bashrc - - echo 'export PATH="${HOME}/.claude/local/bin:${HOME}/.local/bin:${HOME}/.bun/bin:${PATH}"' >> /root/.zshrc - # Signal completion - - touch /root/.cloud-init-complete -CLOUD_INIT_EOF -} - -# ============================================================ -# Cloud API helpers -# ============================================================ - -# Calculate exponential backoff with jitter for retry logic -# Usage: calculate_retry_backoff CURRENT_INTERVAL MAX_INTERVAL -# Returns: backoff interval with ±20% jitter -calculate_retry_backoff() { - local interval="${1}" - local max_interval="${2}" - - # Validate inputs to prevent empty or invalid intervals - if [[ -z "${interval}" ]] || [[ "${interval}" -lt 1 ]]; then - echo "1" - return 0 - fi - - # Add jitter: ±20% randomization to prevent thundering herd - # Fallback to no-jitter interval if bun is unavailable - _INTERVAL="${interval}" bun -e "process.stdout.write(String(Math.floor(Number(process.env._INTERVAL) * (0.8 + Math.random() * 0.4))) + '\n')" 2>/dev/null || printf '%s\n' "${interval}" -} - -# Handle API retry decision with backoff - extracted to reduce duplication across API wrappers -# Usage: _api_should_retry_on_error ATTEMPT MAX_RETRIES INTERVAL MAX_INTERVAL MESSAGE -# Returns: 0 to continue/retry, 1 to fail -# Caller updates interval and attempt variables after success -_api_should_retry_on_error() { - local attempt="${1}" - local max_retries="${2}" - local interval="${3}" - local max_interval="${4}" - local message="${5}" - - if [[ "${attempt}" -ge "${max_retries}" ]]; then - return 1 # Don't retry - max attempts exhausted - fi - - local jitter - jitter=$(calculate_retry_backoff "${interval}" "${max_interval}") - log_warn "${message} (attempt ${attempt}/${max_retries}), retrying in ${jitter}s..." - sleep "${jitter}" - - return 0 # Do retry -} - -# Helper to update retry interval with backoff -# Usage: _update_retry_interval INTERVAL_VAR MAX_INTERVAL_VAR -# This eliminates repeated interval update logic across API wrappers -_update_retry_interval() { - local interval_var="${1}" - local max_interval_var="${2}" - - local current_interval=${!interval_var} - local max_interval=${!max_interval_var} - - current_interval=$((current_interval * 2)) - if [[ "${current_interval}" -gt "${max_interval}" ]]; then - current_interval="${max_interval}" - fi - - eval "${interval_var}=\${current_interval}" -} - -# Helper to extract HTTP status code and response body from curl output -# Curl is called with "-w \n%{http_code}" so last line is the code -# Returns: http_code on stdout, response_body via global variable -_parse_api_response() { - local response="${1}" - local http_code - http_code=$(echo "${response}" | tail -1) - local response_body - response_body=$(echo "${response}" | sed '$d') - - API_HTTP_CODE="${http_code}" - API_RESPONSE_BODY="${response_body}" -} - -# Core curl wrapper for API requests - builds args, executes, parses response -# Usage: _curl_api URL METHOD BODY AUTH_ARGS... -# Returns: 0 on curl success, 1 on curl failure -# Sets: API_HTTP_CODE and API_RESPONSE_BODY globals -# SECURITY: Authorization headers are passed via curl's -K (config from stdin) -# instead of command-line args, so tokens don't appear in `ps` output. -_curl_api() { - local url="${1}" - local method="${2}" - local body="${3:-}" - shift 3 - - # SECURITY: Separate Authorization headers from other args so we can pass - # them via stdin (-K -) instead of command-line, hiding tokens from `ps`. - local auth_header="" - local extra_args=() - while [[ $# -gt 0 ]]; do - if [[ "$1" == "-H" && "${2:-}" == Authorization:* ]]; then - auth_header="$2" - shift 2 - else - extra_args+=("$1") - shift - fi - done - - local args=( - -s - -w "\n%{http_code}" - -X "${method}" - -H "Content-Type: application/json" - "${extra_args[@]}" - ) - - if [[ -n "${body}" ]]; then - args+=(-d "${body}") - fi - - local response - if [[ -n "${auth_header}" ]]; then - # Pass auth header via stdin to keep it out of process argument list - response=$(printf 'header = "%s"\n' "${auth_header}" | curl "${args[@]}" -K - "${url}" 2>&1) - else - response=$(curl "${args[@]}" "${url}" 2>&1) - fi - local curl_exit_code=$? - - _parse_api_response "${response}" - - return ${curl_exit_code} -} - -# Helper to handle a single API request attempt with Bearer auth -# Returns: 0 on curl success, 1 on curl failure -# Sets: API_HTTP_CODE and API_RESPONSE_BODY globals -_make_api_request() { - local base_url="${1}" - local auth_token="${2}" - local method="${3}" - local endpoint="${4}" - local body="${5:-}" - - _curl_api "${base_url}${endpoint}" "${method}" "${body}" -H "Authorization: Bearer ${auth_token}" -} - -# Generic cloud API wrapper - centralized curl wrapper for all cloud providers -# Includes automatic retry logic with exponential backoff for transient failures -# Usage: generic_cloud_api BASE_URL AUTH_TOKEN METHOD ENDPOINT [BODY] [MAX_RETRIES] -# Example: generic_cloud_api "$DO_API_BASE" "$DO_API_TOKEN" GET "/account" -# Example: generic_cloud_api "$DO_API_BASE" "$DO_API_TOKEN" POST "/droplets" "$body" -# Example: generic_cloud_api "$DO_API_BASE" "$DO_API_TOKEN" GET "/account" "" 5 -# Retries on: 429 (rate limit), 503 (service unavailable), network errors -# Internal retry loop shared by generic_cloud_api and generic_cloud_api_custom_auth -# Usage: _cloud_api_retry_loop REQUEST_FUNC MAX_RETRIES API_DESCRIPTION [REQUEST_FUNC_ARGS...] -# Classify the result of an API request attempt. -# Returns a retry reason string on stdout if the request failed with a retryable error, -# or empty string on success. Caller checks the return string. -_classify_api_result() { - local curl_ok="${1}" - if [[ "${curl_ok}" != "0" ]]; then - echo "Cloud API network error" - elif [[ "${API_HTTP_CODE}" == "429" ]]; then - echo "Cloud API returned rate limit (HTTP 429)" - elif [[ "${API_HTTP_CODE}" == "503" ]]; then - echo "Cloud API returned service unavailable (HTTP 503)" - fi -} - -# Report a final API failure after retries are exhausted -_report_api_failure() { - local retry_reason="${1}" - local max_retries="${2}" - log_error "${retry_reason} after ${max_retries} attempts" - if [[ "${retry_reason}" == "Cloud API network error" ]]; then - log_warn "Could not reach the cloud provider's API." - log_warn "" - log_warn "How to fix:" - log_warn " 1. Check your internet connection: curl -s https://httpbin.org/ip" - log_warn " 2. Check DNS resolution: nslookup the provider's API hostname" - log_warn " 3. If behind a proxy or firewall, ensure HTTPS traffic is allowed" - log_warn " 4. Try again in a few moments (the API may be temporarily down)" - else - log_warn "This is usually caused by rate limiting or temporary provider issues." - log_warn "Wait a minute and try again, or check the provider's status page." - echo "${API_RESPONSE_BODY}" - fi -} - -_cloud_api_retry_loop() { - local request_func="${1}" - local max_retries="${2}" - local api_description="${3}" - shift 3 - - local attempt=1 - local interval=2 - local max_interval=30 - - while [[ "${attempt}" -le "${max_retries}" ]]; do - local curl_ok=0 - "${request_func}" "$@" || curl_ok=$? - - local retry_reason - retry_reason=$(_classify_api_result "${curl_ok}") - - if [[ -z "${retry_reason}" ]]; then - echo "${API_RESPONSE_BODY}" - return 0 - fi - - if ! _api_should_retry_on_error "${attempt}" "${max_retries}" "${interval}" "${max_interval}" "${retry_reason}"; then - _report_api_failure "${retry_reason}" "${max_retries}" - return 1 - fi - _update_retry_interval interval max_interval - attempt=$((attempt + 1)) - done - - log_error "Cloud API request failed after ${max_retries} attempts (${api_description})" - log_warn "This is usually caused by rate limiting or temporary provider issues." - log_warn "Wait a minute and try again, or check the provider's status page." - return 1 -} - -generic_cloud_api() { - local base_url="${1}" - local auth_token="${2}" - local method="${3}" - local endpoint="${4}" - local body="${5:-}" - local max_retries="${6:-3}" - - _cloud_api_retry_loop _make_api_request "${max_retries}" "${method} ${endpoint}" "${base_url}" "${auth_token}" "${method}" "${endpoint}" "${body}" -} - -# Helper to make API request with custom curl auth args (e.g., Basic Auth, custom headers) -# Returns: 0 on curl success, 1 on curl failure -# Sets: API_HTTP_CODE and API_RESPONSE_BODY globals -_make_api_request_custom_auth() { - local url="${1}" - local method="${2}" - local body="${3:-}" - shift 3 - - _curl_api "${url}" "${method}" "${body}" "$@" -} - -# Generic cloud API wrapper with custom curl auth args -# Like generic_cloud_api but accepts arbitrary curl flags for authentication -# Usage: generic_cloud_api_custom_auth BASE_URL METHOD ENDPOINT BODY MAX_RETRIES AUTH_ARGS... -# Example: generic_cloud_api_custom_auth "$API_BASE" GET "/account" "" 3 -H "X-Auth-Token: $TOKEN" -# Example: generic_cloud_api_custom_auth "$API_BASE" POST "/servers" "$body" 3 -u "$USER:$PASS" -generic_cloud_api_custom_auth() { - local base_url="${1}" - local method="${2}" - local endpoint="${3}" - local body="${4:-}" - local max_retries="${5:-3}" - shift 5 - # Remaining args are custom curl auth flags - - _cloud_api_retry_loop _make_api_request_custom_auth "${max_retries}" "${method} ${endpoint}" "${base_url}${endpoint}" "${method}" "${body}" "$@" -} - -# ============================================================ -# Agent verification helpers -# ============================================================ - -# Check if agent command exists in PATH -_check_agent_in_path() { - local agent_cmd="$1" - local agent_name="$2" - if ! command -v "${agent_cmd}" &> /dev/null; then - _log_diagnostic \ - "${agent_name} installation failed: command '${agent_cmd}' not found in PATH" \ - "The installation script encountered an error (check logs above)" \ - "The binary was installed to a directory not in PATH" \ - "Network issues prevented the download from completing" \ - --- \ - "Re-run the script to retry the installation" \ - "Install ${agent_name} manually and ensure it is in PATH" - return 1 - fi - return 0 -} - -# Check if agent command executes without error -_check_agent_runs() { - local agent_cmd="$1" - local verify_arg="$2" - local agent_name="$3" - if ! "${agent_cmd}" "${verify_arg}" &> /dev/null; then - _log_diagnostic \ - "${agent_name} verification failed: '${agent_cmd} ${verify_arg}' returned an error" \ - "Missing runtime dependencies (Python, Node.js, etc.)" \ - "Incompatible system architecture or OS version" \ - --- \ - "Check ${agent_name}'s installation docs for prerequisites" \ - "Run '${agent_cmd} ${verify_arg}' manually to see the error" - return 1 - fi - return 0 -} - -# Verify that an agent is properly installed by checking if its command exists -# Usage: verify_agent_installed AGENT_COMMAND [VERIFICATION_ARG] [ERROR_MESSAGE] -# Examples: -# verify_agent_installed "claude" "--version" "Claude Code" -# verify_agent_installed "codex" "--version" "Codex" -# verify_agent_installed "cline" "--version" "Cline" -# Returns 0 if agent is installed and working, 1 otherwise -verify_agent_installed() { - local agent_cmd="${1}" - local verify_arg="${2:---version}" - local agent_name="${3:-${agent_cmd}}" - - log_step "Verifying ${agent_name} installation..." - - _check_agent_in_path "${agent_cmd}" "${agent_name}" || return 1 - _check_agent_runs "${agent_cmd}" "${verify_arg}" "${agent_name}" || return 1 - - log_info "${agent_name} installation verified successfully" - return 0 -} - -# ============================================================ -# Non-interactive agent execution -# ============================================================ - -# Execute an agent in non-interactive mode with a prompt -# Usage: execute_agent_non_interactive SPRITE_NAME AGENT_NAME AGENT_FLAGS PROMPT -# Arguments: -# SPRITE_NAME - Name of the sprite/server to execute on -# AGENT_NAME - Name of the agent command (e.g., "claude", "codex") -# AGENT_FLAGS - Agent-specific flags for non-interactive execution (e.g., "-p" for claude, "--prompt" for codex) -# PROMPT - User prompt to execute -# EXEC_CALLBACK - Function to execute commands: func(sprite_name, command) -# -# Example (Sprite): -# execute_agent_non_interactive "$SPRITE_NAME" "claude" "-p" "$PROMPT" "sprite_exec" -# -# Example (SSH): -# execute_agent_non_interactive "$SERVER_IP" "codex" "--prompt" "$PROMPT" "ssh_exec" -execute_agent_non_interactive() { - local sprite_name="${1}" - local agent_name="${2}" - local agent_flags="${3}" - local prompt="${4}" - local exec_callback="${5}" - - log_step "Executing ${agent_name} with prompt in non-interactive mode..." - - # Do NOT use printf '%q' here — the run callback (run_server, sprite exec, - # ssh) already handles escaping for remote transport. Double-escaping breaks - # prompts containing quotes, spaces, or special characters on Fly.io. - # Single-quote the prompt to protect it from shell expansion. - local safe_prompt - safe_prompt="'$(printf '%s' "${prompt}" | sed "s/'/'\\\\''/g")'" - - # Build the command based on exec callback type - if [[ "${exec_callback}" == *"sprite"* ]]; then - # Sprite execution (no -tty flag for non-interactive) - sprite exec -s "${sprite_name}" -- zsh -c "source ~/.zshrc && ${agent_name} ${agent_flags} ${safe_prompt}" - else - # Generic SSH execution - ${exec_callback} "${sprite_name}" "source ~/.zshrc && ${agent_name} ${agent_flags} ${safe_prompt}" - fi -} - -# ============================================================ -# SSH connectivity helpers -# ============================================================ - -# Generic SSH wait function - polls until a remote command succeeds with exponential backoff -# Usage: generic_ssh_wait USERNAME IP SSH_OPTS TEST_CMD DESCRIPTION MAX_ATTEMPTS [INITIAL_INTERVAL] -# Implements exponential backoff: starts at INITIAL_INTERVAL (default 5s), doubles up to max 30s -# Adds jitter (±20%) to prevent thundering herd when multiple instances retry simultaneously -# Log progress message based on elapsed time -_log_ssh_wait_progress() { - local description="${1}" - local elapsed_time="${2}" - - if [[ ${elapsed_time} -lt 60 ]]; then - log_step "Waiting for ${description}... (${elapsed_time}s elapsed, still within normal range)" - elif [[ ${elapsed_time} -lt 120 ]]; then - log_step "Waiting for ${description}... (${elapsed_time}s elapsed, taking longer than usual)" - else - log_warn "Still waiting for ${description}... (${elapsed_time}s elapsed, this is unusually slow)" - fi -} - -# Log timeout error message with troubleshooting steps -_log_ssh_wait_timeout_error() { - local description="${1}" - local elapsed_time="${2}" - local username="${3}" - local ip="${4}" - - log_error "${description} timed out after ${elapsed_time}s (server: ${ip})" - log_error "" - log_error "The server failed to become ready within the expected timeframe." - log_error "" - log_error "Common causes:" - log_error " - Server is still booting (some cloud providers take 2-3 minutes)" - log_error " - Cloud provider API delays or maintenance" - log_error " - Firewall blocking SSH on port 22" - log_error " - Network connectivity issues" - log_error "" - log_error "Troubleshooting steps:" - log_error " 1. Test SSH manually: ssh ${username}@${ip}" - log_error " 2. Check firewall rules in your cloud provider dashboard" - if [[ -n "${SPAWN_DASHBOARD_URL:-}" ]]; then - log_error " Dashboard: ${SPAWN_DASHBOARD_URL}" - fi - log_error " 3. Re-run this command to retry (the server may need more time)" - if [[ -n "${SPAWN_RETRY_CMD:-}" ]]; then - log_error " ${SPAWN_RETRY_CMD}" - fi -} - -generic_ssh_wait() { - local username="${1}" - local ip="${2}" - local ssh_opts="${3}" - local test_cmd="${4}" - local description="${5}" - local max_attempts="${6:-30}" - local initial_interval="${7:-5}" - - local attempt=1 - local interval="${initial_interval}" - local max_interval=30 - local elapsed_time=0 - - log_step "Waiting for ${description} to ${ip} (this usually takes 30-90 seconds)..." - while [[ "${attempt}" -le "${max_attempts}" ]]; do - # shellcheck disable=SC2086 - if ssh ${ssh_opts} "${username}@${ip}" "${test_cmd}" < /dev/null >/dev/null 2>&1; then - log_info "${description} ready (took ${elapsed_time}s)" - return 0 - fi - - local jitter - jitter=$(calculate_retry_backoff "${interval}" "${max_interval}") - - _log_ssh_wait_progress "${description}" "${elapsed_time}" - sleep "${jitter}" - - elapsed_time=$((elapsed_time + jitter)) - _update_retry_interval interval max_interval - attempt=$((attempt + 1)) - done - - _log_ssh_wait_timeout_error "${description}" "${elapsed_time}" "${username}" "${ip}" - return 1 -} - -# Wait for cloud-init to complete on a server -# Usage: wait_for_cloud_init [max_attempts] -# Default max_attempts is 60 (~5 minutes with exponential backoff) -wait_for_cloud_init() { - local ip="${1}" - local max_attempts=${2:-60} - generic_ssh_wait "root" "${ip}" "${SSH_OPTS}" "test -f /root/.cloud-init-complete" "cloud-init" "${max_attempts}" 5 -} - -# ============================================================ -# Standard SSH server operations -# ============================================================ - -# Most SSH-based cloud providers share identical implementations for -# run_server, upload_file, interactive_session, and verify_server_connectivity. -# These helpers let providers set SSH_USER (default: root) and get all four -# functions automatically, eliminating ~20 lines of copy-paste per provider. - -# Run a command on a remote server via SSH -# Usage: ssh_run_server IP COMMAND -# Requires: SSH_USER (default: root), SSH_OPTS -# SECURITY: Command is properly quoted to prevent shell injection. -# Note: $cmd is always a shell command string (with pipes, semicolons, etc.) -# that is intentionally interpreted by the remote shell. All callers pass -# static command strings — never user-controlled input. -ssh_run_server() { - local ip="${1}" - local cmd="${2}" - # Single-quoted so $HOME/$PATH expand on the remote side, not locally. - # .npm-global/bin: user-writable npm prefix (AWS Lightsail runs as ubuntu, not root) - local path_prefix='export PATH="$HOME/.npm-global/bin:$HOME/.local/bin:$HOME/.bun/bin:$PATH"' - if [[ -n "${SPAWN_DEBUG:-}" ]]; then - cmd="set -x; ${cmd}" - fi - # shellcheck disable=SC2086 - # < /dev/null prevents SSH from consuming the parent script's stdin. - # Without this, sequential SSH calls can steal input meant for later - # commands (e.g., safe_read prompts), causing hangs. - ssh $SSH_OPTS "${SSH_USER:-root}@${ip}" -- "${path_prefix} && ${cmd}" < /dev/null -} - -# Upload a file to a remote server via SCP -# Usage: ssh_upload_file IP LOCAL_PATH REMOTE_PATH -# Requires: SSH_USER (default: root), SSH_OPTS -ssh_upload_file() { - local ip="${1}" - local local_path="${2}" - local remote_path="${3}" - # shellcheck disable=SC2086 - scp $SSH_OPTS "${local_path}" "${SSH_USER:-root}@${ip}:${remote_path}" -} - -# Show a post-session summary reminding the user their server is still running. -# Called automatically by ssh_interactive_session after the SSH session ends. -# Uses optional env vars for richer output: -# SPAWN_DASHBOARD_URL - Cloud provider dashboard URL for managing servers -# SERVER_NAME - Server name (set by individual cloud scripts) -# Arguments: IP -_show_post_session_summary() { - local ip="${1}" - local dashboard_url="${SPAWN_DASHBOARD_URL:-}" - local server_name="${SERVER_NAME:-}" - - printf '\n' - if [[ -n "${server_name}" ]]; then - log_warn "Session ended. Your server '${server_name}' is still running at ${ip}." - else - log_warn "Session ended. Your server is still running at ${ip}." - fi - log_warn "Remember to delete it when you're done to avoid ongoing charges." - log_warn "" - if [[ -n "${dashboard_url}" ]]; then - log_warn "Manage or delete it in your dashboard:" - log_warn " ${dashboard_url}" - else - log_warn "Check your cloud provider dashboard to stop or delete the server." - fi - log_warn "" - log_info "To delete from CLI:" - log_info " spawn delete" - log_info "To reconnect:" - log_info " ssh ${SSH_USER:-root}@${ip}" -} - -# Show a post-session summary for exec-based (non-SSH) cloud providers. -# These use CLI exec commands instead of direct SSH, so the reconnect -# hint differs from the SSH variant. -# Uses optional env vars for richer output: -# SPAWN_DASHBOARD_URL - Cloud provider dashboard URL for managing services -# SERVER_NAME - Service/sandbox name -# SPAWN_RECONNECT_CMD - CLI command to reconnect (shown as reconnect hint) -_show_exec_post_session_summary() { - local dashboard_url="${SPAWN_DASHBOARD_URL:-}" - local server_name="${SERVER_NAME:-}" - local reconnect_cmd="${SPAWN_RECONNECT_CMD:-}" - - printf '\n' - if [[ -n "${server_name}" ]]; then - log_warn "Session ended. Your service '${server_name}' is still running." - else - log_warn "Session ended. Your service is still running." - fi - log_warn "Remember to delete it when you're done to avoid ongoing charges." - log_warn "" - if [[ -n "${dashboard_url}" ]]; then - log_warn "Manage or delete it in your dashboard:" - log_warn " ${dashboard_url}" - else - log_warn "Check your cloud provider dashboard to stop or delete the service." - fi - log_warn "" - log_info "To delete from CLI:" - log_info " spawn delete" - if [[ -n "${reconnect_cmd}" ]]; then - log_info "To reconnect:" - log_info " ${reconnect_cmd}" - fi -} - -# Start an interactive SSH session -# Usage: ssh_interactive_session IP COMMAND -# Requires: SSH_USER (default: root), SSH_OPTS -# SECURITY: Command is properly quoted to prevent shell injection -ssh_interactive_session() { - local ip="${1}" - local cmd="${2}" - local ssh_exit=0 - # shellcheck disable=SC2086 - ssh -t $SSH_OPTS "${SSH_USER:-root}@${ip}" -- "${cmd}" || ssh_exit=$? - _show_post_session_summary "${ip}" - return "${ssh_exit}" -} - -# Wait for SSH connectivity to a server -# Usage: ssh_verify_connectivity IP [MAX_ATTEMPTS] [INITIAL_INTERVAL] -# Requires: SSH_USER (default: root), SSH_OPTS -ssh_verify_connectivity() { - local ip="${1}" - local max_attempts=${2:-30} - local initial_interval=${3:-5} - # shellcheck disable=SC2154 - generic_ssh_wait "${SSH_USER:-root}" "${ip}" "$SSH_OPTS -o ConnectTimeout=5" "echo ok" "SSH connectivity" "${max_attempts}" "${initial_interval}" -} - -# Extract a value from a JSON response using bracket-notation path -# Usage: _extract_json_field JSON_STRING JS_EXPR [DEFAULT] -# The JS expression uses bracket access syntax: d['key1']['key2'][0] -# Returns DEFAULT (or empty string) on parse failure. -_extract_json_field() { - local json="${1}" - local js_expr="${2}" - local default="${3:-}" - - _DATA="${json}" _EXPR="${js_expr}" bun -e " -try { - const d = JSON.parse(process.env._DATA); - const expr = process.env._EXPR || ''; - // Parse bracket-notation path: d['key1']['key2'][0] - // Extract segments from ['...'] or [N] patterns - const segments = []; - const re = /\[(\d+|'[^']*'|\"[^\"]*\")\]/g; - let m; - while ((m = re.exec(expr)) !== null) { - let key = m[1]; - if ((key.startsWith(\"'\") && key.endsWith(\"'\")) || (key.startsWith('\"') && key.endsWith('\"'))) { - key = key.slice(1, -1); - } else { - key = Number(key); - } - segments.push(key); - } - let result = d; - for (const seg of segments) { - if (result === null || result === undefined) { process.exit(1); } - result = result[seg]; - } - if (result !== undefined && result !== null) process.stdout.write(String(result) + '\n'); - else process.exit(1); -} catch { process.exit(1); } -" 2>/dev/null || echo "${default}" -} - -# Extract an error message from a JSON API response. -# Tries common error field patterns used by cloud provider APIs: -# message, error, error.message, error.error_message, reason -# Falls back to the raw response if no known field matches. -# Usage: extract_api_error_message JSON_STRING [FALLBACK] -extract_api_error_message() { - local json="${1}" - local fallback="${2:-Unknown error}" - - _DATA="${json}" bun -e " -try { - const d = JSON.parse(process.env._DATA); - const e = d.error || ''; - const msg = - (typeof e === 'object' && e !== null && (e.message || e.error_message)) || - d.message || - d.reason || - (typeof e === 'string' && e) || - ''; - if (msg) process.stdout.write(String(msg) + '\n'); - else process.exit(1); -} catch { process.exit(1); } -" 2>/dev/null || echo "${fallback}" -} - -# Generic instance status polling loop -# Polls an API endpoint until the instance reaches the target status, then extracts the IP. -# Usage: generic_wait_for_instance API_FUNC ENDPOINT TARGET_STATUS STATUS_PY IP_PY IP_VAR DESCRIPTION [MAX_ATTEMPTS] -# -# Arguments: -# API_FUNC - Cloud API function name (e.g., "vultr_api", "do_api") -# ENDPOINT - API endpoint path (e.g., "/instances/$id") -# TARGET_STATUS - Status value that means "ready" (e.g., "active", "running") -# STATUS_PY - Python expression to extract status from JSON (receives 'd' as parsed dict) -# IP_PY - Python expression to extract IP from JSON (receives 'd' as parsed dict) -# IP_VAR - Environment variable name to export with the IP (e.g., "VULTR_SERVER_IP") -# DESCRIPTION - Human-readable label for logging (e.g., "Vultr instance") -# MAX_ATTEMPTS - Optional, defaults to 60 -# -# Example: -# generic_wait_for_instance vultr_api "/instances/$id" "active" \ -# "d['instance']['status']" "d['instance']['main_ip']" \ -# VULTR_SERVER_IP "Instance" 60 -# Single polling attempt: fetch status, check readiness, log progress. -# Returns 0 if instance is ready (IP exported), 1 to keep polling, 2 on status mismatch. -# Arguments: API_FUNC ENDPOINT TARGET_STATUS STATUS_PY IP_PY IP_VAR DESCRIPTION ATTEMPT POLL_DELAY -_poll_instance_once() { - local api_func="${1}" endpoint="${2}" target_status="${3}" - local status_py="${4}" ip_py="${5}" ip_var="${6}" - local description="${7}" attempt="${8}" poll_delay="${9}" - - local response - response=$("${api_func}" GET "${endpoint}" 2>/dev/null) || true - - local status - status=$(_extract_json_field "${response}" "${status_py}" "unknown") - - if [[ "${status}" != "${target_status}" ]]; then - log_step "${description} status: ${status} ($((attempt * poll_delay))s elapsed)" - return 2 - fi - - local ip - ip=$(_extract_json_field "${response}" "${ip_py}") - if [[ -n "${ip}" ]]; then - # SECURITY: Validate ip_var to prevent command injection - if [[ ! "${ip_var}" =~ ^[A-Z_][A-Z0-9_]*$ ]]; then - log_error "SECURITY: Invalid env var name rejected: ${ip_var}" - return 1 - fi - export "${ip_var}=${ip}" - log_info "${description} ready (IP: ${ip})" - return 0 - fi - - log_step "${description} status: ${status} ($((attempt * poll_delay))s elapsed)" - return 1 -} - -# Report timeout when instance polling exhausts all attempts. -_report_instance_timeout() { - local description="${1}" target_status="${2}" total_time="${3}" - log_error "${description} did not become ${target_status} within ${total_time}s" - log_error "" - log_error "The cloud provider API reported the instance is not yet ready." - log_error "" - log_error "This usually means:" - log_error " - Cloud provider is experiencing delays (high load, maintenance)" - log_error " - The region or instance type has limited capacity" - log_error " - The instance failed to provision but the API hasn't reported it yet" - log_error "" - log_error "Next steps:" - log_error " 1. Check your cloud dashboard for instance status and error messages" - if [[ -n "${SPAWN_DASHBOARD_URL:-}" ]]; then - log_error " ${SPAWN_DASHBOARD_URL}" - fi - log_error " 2. Wait 2-3 minutes and retry the spawn command" - log_error " 3. Try a different region or instance size if this persists" -} - -generic_wait_for_instance() { - local api_func="${1}" endpoint="${2}" target_status="${3}" - local status_py="${4}" ip_py="${5}" ip_var="${6}" - local description="${7}" max_attempts="${8:-60}" - local poll_delay="${INSTANCE_STATUS_POLL_DELAY:-5}" - - local attempt=1 - log_step "Waiting for ${description} to become ${target_status}..." - - while [[ "${attempt}" -le "${max_attempts}" ]]; do - _poll_instance_once "${api_func}" "${endpoint}" "${target_status}" \ - "${status_py}" "${ip_py}" "${ip_var}" \ - "${description}" "${attempt}" "${poll_delay}" && return 0 - sleep "${poll_delay}" - attempt=$((attempt + 1)) - done - - _report_instance_timeout "${description}" "${target_status}" "$((max_attempts * poll_delay))" - return 1 -} - -# ============================================================ -# API token management helpers -# ============================================================ - -# Try to load API token from environment variable -# Returns 0 if found and sets env var, 1 otherwise -_load_token_from_env() { - local env_var_name="${1}" - local provider_name="${2}" - - local env_value="${!env_var_name}" - if [[ -n "${env_value}" ]]; then - # SECURITY: Validate token characters to prevent curl config injection via -K - - # Must match the same character class used in _load_token_from_config() - if [[ ! "${env_value}" =~ ^[a-zA-Z0-9._/@:+=,\ -]+$ ]]; then - log_warn "${provider_name} token from environment contains invalid characters — ignoring" - return 1 - fi - log_info "Using ${provider_name} API token from environment" - return 0 - fi - return 1 -} - -# Try to load API token from config file -# Returns 0 if found and exports env var, 1 otherwise -_load_token_from_config() { - local config_file="${1}" - local env_var_name="${2}" - local provider_name="${3}" - - # SECURITY: Validate env_var_name to prevent command injection - if [[ ! "${env_var_name}" =~ ^[A-Z_][A-Z0-9_]*$ ]]; then - log_error "SECURITY: Invalid env var name rejected: ${env_var_name}" - return 1 - fi - - if [[ ! -f "${config_file}" ]]; then - return 1 - fi - - local saved_token - if command -v jq &>/dev/null; then - saved_token=$(jq -r '(if (.api_key // "" | length) > 0 then .api_key else (.token // "") end)' "${config_file}" 2>/dev/null) - else - saved_token=$(_FILE="${config_file}" bun -e " -import fs from 'fs'; -const d = JSON.parse(fs.readFileSync(process.env._FILE, 'utf8')); -process.stdout.write(d.api_key || d.token || ''); -" 2>/dev/null) - fi - if [[ -z "${saved_token}" ]]; then - return 1 - fi - - # SECURITY: Validate token characters to prevent curl config injection via -K - - # Similar to key-request.sh _try_load_env_var (^[a-zA-Z0-9._/@-]+$) but also - # allows colon (:) for Fly.io FlyV1 tokens and URL-style formats, - # plus (+) / equals (=) for base64-encoded token segments, - # comma (,) for multi-segment macaroon tokens (fm2_...,fm2_...,fo1_...), and - # space ( ) for Fly.io "FlyV1 " prefixed tokens. - # Space and comma are safe inside curl -K double-quoted values. - if [[ ! "${saved_token}" =~ ^[a-zA-Z0-9._/@:+=,\ -]+$ ]]; then - log_warn "Saved ${provider_name} token is malformed — clearing cached credentials." - rm -f "${config_file}" 2>/dev/null || true - return 1 - fi - - export "${env_var_name}=${saved_token}" - log_info "Using ${provider_name} API token from ${config_file}" - return 0 -} - -# Validate token with provider API if test function provided -# Returns 0 on success, 1 on validation failure -_validate_token_with_provider() { - local test_func="${1}" - local env_var_name="${2}" - local provider_name="${3}" - local help_url="${4:-}" - - if [[ -z "${test_func}" ]]; then - return 0 # No validation needed - fi - - if ! "${test_func}"; then - log_error "Authentication failed: Invalid ${provider_name} API token" - log_error "The token may be expired, revoked, or incorrectly copied." - log_error "" - log_error "How to fix:" - if [[ -n "${help_url}" ]]; then - log_error " 1. Get a new token from: ${help_url}" - log_error " 2. Re-run the command and paste the new token" - log_error " 3. Or set it directly: ${env_var_name}=your-token spawn ..." - else - log_error " 1. Re-run the command to enter a new token" - log_error " 2. Or set it directly: ${env_var_name}=your-token spawn ..." - fi - unset "${env_var_name}" - return 1 - fi - return 0 -} - -# Save API token to config file -_save_token_to_config() { - local config_file="${1}" - local token="${2}" - - local config_dir - config_dir=$(dirname "${config_file}") - mkdir -p "${config_dir}" - - local escaped_token - escaped_token=$(json_escape "${token}") - printf '{\n "api_key": %s,\n "token": %s\n}\n' "${escaped_token}" "${escaped_token}" > "${config_file}" - chmod 600 "${config_file}" - log_info "API token saved to ${config_file}" -} - -# Generic ensure API token function - eliminates duplication across providers -# Usage: ensure_api_token_with_provider PROVIDER_NAME ENV_VAR_NAME CONFIG_FILE HELP_URL TEST_FUNC -# Example: ensure_api_token_with_provider "Lambda" "LAMBDA_API_KEY" "$HOME/.config/spawn/lambda.json" \ -# "https://cloud.lambdalabs.com/api-keys" test_lambda_token -# TEST_FUNC should be a function that validates the token and returns 0 on success, 1 on failure -# TEST_FUNC is optional - if empty, no validation is performed -_prompt_for_api_token() { - local provider_name="${1}" - local help_url="${2}" - - echo "" >&2 - log_step "${provider_name} API Token Required" - log_step "Get your token from: ${help_url}" - echo "" >&2 - - validated_read "Enter your ${provider_name} API token: " validate_api_token -} - -_validate_env_var_name() { - local env_var_name="${1}" - if [[ ! "${env_var_name}" =~ ^[A-Z_][A-Z0-9_]*$ ]]; then - log_error "SECURITY: Invalid env var name rejected: ${env_var_name}" - return 1 - fi - return 0 -} - -ensure_api_token_with_provider() { - local provider_name="${1}" - local env_var_name="${2}" - local config_file="${3}" - local help_url="${4}" - local test_func="${5:-}" - - check_json_processor_available || return 1 - - # Try environment variable (validate if test function provided) - if _load_token_from_env "${env_var_name}" "${provider_name}"; then - if [[ -z "${test_func}" ]] || "${test_func}"; then - return 0 - fi - log_warn "${provider_name} token from environment is invalid or expired" - unset "${env_var_name}" - fi - - # Try config file (validate if test function provided, fall through to prompt on failure) - if _load_token_from_config "${config_file}" "${env_var_name}" "${provider_name}"; then - if [[ -z "${test_func}" ]] || "${test_func}"; then - return 0 - fi - log_warn "Saved ${provider_name} token is invalid or expired, requesting a new one..." - unset "${env_var_name}" - fi - - # Prompt for new token - local token - token=$(_prompt_for_api_token "${provider_name}" "${help_url}") || return 1 - - # SECURITY: Validate env_var_name to prevent command injection - _validate_env_var_name "${env_var_name}" || return 1 - - export "${env_var_name}=${token}" - - # Validate with provider API - if ! _validate_token_with_provider "${test_func}" "${env_var_name}" "${provider_name}" "${help_url}"; then - return 1 - fi - - # Save to config file - _save_token_to_config "${config_file}" "${token}" - return 0 -} - -# ============================================================ -# Multi-credential configuration helpers -# ============================================================ - -# Load multiple fields from a JSON config file in a single call. -# Outputs each field value on a separate line. Returns 1 if file missing or parse fails. -# Usage: local creds; creds=$(_load_json_config_fields CONFIG_FILE field1 field2 ...) -# Then: { read -r var1; read -r var2; ... } <<< "${creds}" -_load_json_config_fields() { - local config_file="${1}"; shift - [[ -f "${config_file}" ]] || return 1 - - if command -v jq &>/dev/null; then - # Use jq to extract each field; output one value per line - local field - for field in "$@"; do - jq -r --arg f "${field}" '.[$f] // ""' "${config_file}" 2>/dev/null || return 1 - done - else - # SECURITY: Pass field names via env var to prevent code injection. - _FILE="${config_file}" _FIELDS="$(printf '%s\n' "$@")" bun -e " -import fs from 'fs'; -const d = JSON.parse(fs.readFileSync(process.env._FILE, 'utf8')); -for (const field of process.env._FIELDS.split('\n')) { - if (field) process.stdout.write((d[field] || '') + '\n'); -} -" 2>/dev/null || return 1 - fi -} - -# Save key-value pairs to a JSON config file using json_escape for safe encoding. -# Usage: _save_json_config CONFIG_FILE key1 val1 key2 val2 ... -_save_json_config() { - local config_file="${1}"; shift - - mkdir -p "$(dirname "${config_file}")" - - # Build JSON object from key=value pairs - local json="{" - local first=true - while [[ $# -ge 2 ]]; do - local key="${1}"; shift - local val="${1}"; shift - if [[ "${first}" == "true" ]]; then - first=false - else - json="${json}," - fi - json="${json} - \"${key}\": $(json_escape "${val}")" - done - json="${json} -} -" - - printf '%s\n' "${json}" > "${config_file}" - chmod 600 "${config_file}" - log_info "Credentials saved to ${config_file}" -} - -# Check if all env vars in a list are set (non-empty) -# Returns 0 if all set, 1 if any missing -_multi_creds_all_env_set() { - local var - for var in "$@"; do - if [[ -z "${!var:-}" ]]; then - return 1 - fi - done - return 0 -} - -# Load multi-credentials from a JSON config file into env vars. -# Returns 0 if all fields loaded, 1 if any missing. -# Usage: _multi_creds_load_config CONFIG_FILE env_vars[@] config_keys[@] -_multi_creds_load_config() { - local config_file="${1}" - shift - local env_count="${1}" - shift - local env_vars=("${@:1:$env_count}") - shift "${env_count}" - local config_keys=("$@") - - local creds - creds=$(_load_json_config_fields "${config_file}" "${config_keys[@]}") || return 1 - - local i=0 - while IFS= read -r value; do - if [[ -z "${value}" ]]; then - return 1 - fi - # SECURITY: Validate env var name before export - if [[ ! "${env_vars[$i]}" =~ ^[A-Z_][A-Z0-9_]*$ ]]; then - log_error "SECURITY: Invalid env var name rejected: ${env_vars[$i]}" - return 1 - fi - export "${env_vars[$i]}=${value}" - i=$((i + 1)) - done <<< "${creds}" - - [[ "${i}" -eq "${#env_vars[@]}" ]] || return 1 - return 0 -} - -# Prompt user for each credential interactively. -# Returns 1 if any input is empty or read fails. -_multi_creds_prompt() { - local provider_name="${1}" - local help_url="${2}" - shift 2 - local env_count="${1}" - shift - local env_vars=("${@:1:$env_count}") - shift "${env_count}" - local labels=("$@") - - echo "" - log_step "${provider_name} API Credentials Required" - log_step "Get your credentials from: ${help_url}" - echo "" - - local idx - for idx in $(seq 0 $((${#env_vars[@]} - 1))); do - # SECURITY: Validate env var name before export - if [[ ! "${env_vars[$idx]}" =~ ^[A-Z_][A-Z0-9_]*$ ]]; then - log_error "SECURITY: Invalid env var name rejected: ${env_vars[$idx]}" - return 1 - fi - local val - val=$(safe_read "Enter ${provider_name} ${labels[$idx]}: ") || return 1 - if [[ -z "${val}" ]]; then - log_error "${labels[$idx]} is required" - return 1 - fi - export "${env_vars[$idx]}=${val}" - done - return 0 -} - -# Validate multi-credentials using a test function. -# Unsets all env vars on failure. -_multi_creds_validate() { - local test_func="${1}" - local provider_name="${2}" - local help_url="${3}" - shift 3 - - if [[ -z "${test_func}" ]]; then - return 0 - fi - - log_step "Testing ${provider_name} credentials..." - if ! "${test_func}"; then - log_error "Invalid ${provider_name} credentials" - log_error "The credentials may be expired, revoked, or incorrectly copied." - log_error "" - log_error "How to fix:" - log_error " 1. Get new credentials from: ${help_url}" - log_error " 2. Re-run the command and enter the new credentials" - local v - for v in "$@"; do - unset "${v}" - done - return 1 - fi - return 0 -} - -# Generic multi-credential ensure function -# Eliminates duplicated env-var/config/prompt/test/save logic across providers -# that need more than one credential (username+password, client_id+secret, etc.) -# -# Usage: ensure_multi_credentials PROVIDER_NAME CONFIG_FILE HELP_URL TEST_FUNC \ -# "ENV_VAR:config_key:Prompt Label" ... -# -# Each credential spec is a colon-delimited triple: -# ENV_VAR - Environment variable name (e.g., CONTABO_CLIENT_ID) -# config_key - JSON key in the config file (e.g., client_id) -# Prompt Label - Human-readable label for prompting (e.g., "Client ID") -ensure_multi_credentials() { - local provider_name="${1}" - local config_file="${2}" - local help_url="${3}" - local test_func="${4:-}" - shift 4 - - check_json_processor_available || return 1 - - # Parse credential specs into parallel arrays - local env_vars=() config_keys=() labels=() - local spec - for spec in "$@"; do - env_vars+=("${spec%%:*}") - local rest="${spec#*:}" - config_keys+=("${rest%%:*}") - labels+=("${rest#*:}") - done - - local n="${#env_vars[@]}" - - # 1. All env vars already set? - if _multi_creds_all_env_set "${env_vars[@]}"; then - log_info "Using ${provider_name} credentials from environment" - return 0 - fi - - # 2. Try loading from config file - if _multi_creds_load_config "${config_file}" "${n}" "${env_vars[@]}" "${config_keys[@]}"; then - log_info "Using ${provider_name} credentials from ${config_file}" - return 0 - fi - - # 3. Prompt for each credential - _multi_creds_prompt "${provider_name}" "${help_url}" "${n}" "${env_vars[@]}" "${labels[@]}" || return 1 - - # 4. Validate credentials - _multi_creds_validate "${test_func}" "${provider_name}" "${help_url}" "${env_vars[@]}" || return 1 - - # 5. Save to config file - local save_args=() - local idx - for idx in $(seq 0 $((n - 1))); do - save_args+=("${config_keys[$idx]}" "${!env_vars[$idx]}") - done - _save_json_config "${config_file}" "${save_args[@]}" - return 0 -} - -# ============================================================ -# Configuration file helpers -# ============================================================ - -# Helper to create, upload, and install a config file from a heredoc or string -# Usage: upload_config_file UPLOAD_CALLBACK RUN_CALLBACK CONTENT REMOTE_PATH -# Example: upload_config_file "$upload_func" "$run_func" "$json_content" "\$HOME/.config/app.json" -upload_config_file() { - local upload_callback="${1}" - local run_callback="${2}" - local content="${3}" - local remote_path="${4}" - - local temp_file - temp_file=$(mktemp) - chmod 600 "${temp_file}" - track_temp_file "${temp_file}" - - printf '%s\n' "${content}" > "${temp_file}" - - # Use mktemp-derived randomness for the remote temp path to avoid predictable names - local rand_suffix - rand_suffix=$(basename "${temp_file}") - local temp_remote="/tmp/spawn_config_${rand_suffix}" - ${upload_callback} "${temp_file}" "${temp_remote}" - # SECURITY: remote_path must be double-quoted to prevent injection via spaces/metacharacters - # Note: Callers should use $HOME instead of ~ since tilde does not expand inside double quotes - ${run_callback} "mkdir -p \$(dirname \"${remote_path}\") && chmod 600 '${temp_remote}' && mv '${temp_remote}' \"${remote_path}\"" -} - -# ============================================================ -# Claude Code configuration setup -# ============================================================ - -# Setup Claude Code configuration files (settings.json, .claude.json, CLAUDE.md) -# This consolidates the config setup pattern used by all claude.sh scripts -# Usage: setup_claude_code_config OPENROUTER_KEY UPLOAD_CALLBACK RUN_CALLBACK -# -# Arguments: -# OPENROUTER_KEY - OpenRouter API key to inject into config -# UPLOAD_CALLBACK - Function to upload files: func(local_path, remote_path) -# RUN_CALLBACK - Function to run commands: func(command) -# -# Example (SSH-based clouds): -# setup_claude_code_config "$OPENROUTER_API_KEY" \ -# "upload_file $SERVER_IP" \ -# "run_server $SERVER_IP" -# -# Example (Sprite): -# setup_claude_code_config "$OPENROUTER_API_KEY" \ -# "upload_file_sprite $SPRITE_NAME" \ -# "run_sprite $SPRITE_NAME" - -# Generate Claude Code settings.json with API key -_generate_claude_code_settings() { - local openrouter_key="${1}" - local escaped_key - escaped_key=$(json_escape "${openrouter_key}") - cat << EOF -{ - "theme": "dark", - "editor": "vim", - "env": { - "CLAUDE_CODE_ENABLE_TELEMETRY": "0", - "ANTHROPIC_BASE_URL": "https://openrouter.ai/api", - "ANTHROPIC_AUTH_TOKEN": ${escaped_key} - }, - "permissions": { - "defaultMode": "bypassPermissions", - "dangerouslySkipPermissions": true - } -} -EOF -} - -# Generate Claude Code global state JSON -_generate_claude_code_state() { - cat << EOF -{ - "hasCompletedOnboarding": true, - "bypassPermissionsModeAccepted": true -} -EOF -} - -setup_claude_code_config() { - local openrouter_key="${1}" - local upload_callback="${2}" - local run_callback="${3}" - - log_step "Configuring Claude Code..." - - # Create ~/.claude directory - ${run_callback} "mkdir -p ~/.claude" - - # Create settings.json - local settings_json - settings_json=$(_generate_claude_code_settings "${openrouter_key}") - upload_config_file "${upload_callback}" "${run_callback}" "${settings_json}" "\$HOME/.claude/settings.json" - - # Create .claude.json global state - local global_state_json - global_state_json=$(_generate_claude_code_state) - upload_config_file "${upload_callback}" "${run_callback}" "${global_state_json}" "\$HOME/.claude.json" - - # Create empty CLAUDE.md - ${run_callback} "touch ~/.claude/CLAUDE.md" -} - -# ============================================================ -# OpenClaw configuration setup -# ============================================================ - -# Setup OpenClaw configuration files (openclaw.json) -# This consolidates the config setup pattern used by all openclaw.sh scripts -# Usage: setup_openclaw_config OPENROUTER_KEY MODEL_ID UPLOAD_CALLBACK RUN_CALLBACK -# -# Arguments: -# OPENROUTER_KEY - OpenRouter API key to inject into config -# MODEL_ID - Model ID to use (e.g., "openrouter/auto", "anthropic/claude-3.5-sonnet") -# UPLOAD_CALLBACK - Function to upload files: func(local_path, remote_path) -# RUN_CALLBACK - Function to run commands: func(command) -# -# Example (SSH-based clouds): -# setup_openclaw_config "$OPENROUTER_API_KEY" "$MODEL_ID" \ -# "upload_file $SERVER_IP" \ -# "run_server $SERVER_IP" -# -# Example (Sprite): -# setup_openclaw_config "$OPENROUTER_API_KEY" "$MODEL_ID" \ -# "upload_file_sprite $SPRITE_NAME" \ -# "run_sprite $SPRITE_NAME" -# Generate openclaw.json configuration with escaped credentials -_generate_openclaw_json() { - local openrouter_key="${1}" - local model_id="${2}" - local gateway_token="${3}" - - local escaped_key escaped_token escaped_model - escaped_key=$(json_escape "${openrouter_key}") - escaped_token=$(json_escape "${gateway_token}") - escaped_model=$(json_escape "${model_id}") - - cat << EOF -{ - "env": { - "OPENROUTER_API_KEY": ${escaped_key} - }, - "gateway": { - "mode": "local", - "auth": { - "token": ${escaped_token} - } - }, - "agents": { - "defaults": { - "model": { - "primary": ${escaped_model} - } - } - } -} -EOF -} - -setup_openclaw_config() { - local openrouter_key="${1}" - local model_id="${2}" - local upload_callback="${3}" - local run_callback="${4}" - - log_step "Configuring openclaw..." - - # Create ~/.openclaw directory - ${run_callback} "mkdir -p ~/.openclaw" - - # Generate a random gateway token - local gateway_token - gateway_token=$(openssl rand -hex 16) - - # Create and upload openclaw.json config - local openclaw_json - openclaw_json=$(_generate_openclaw_json "${openrouter_key}" "${model_id}" "${gateway_token}") - upload_config_file "${upload_callback}" "${run_callback}" "${openclaw_json}" "\$HOME/.openclaw/openclaw.json" -} - -# Start OpenClaw gateway as a fully detached daemon -# Usage: start_openclaw_gateway RUN_CALLBACK -# -# Arguments: -# RUN_CALLBACK - Function to run commands: func(command) -# -# SSH/exec channels hang if a backgrounded daemon inherits the session's file -# descriptors. setsid creates a new session, fully detaching the gateway so -# the channel can close. Falls back to nohup where setsid is unavailable -# (e.g. macOS local — no SSH, so the hang doesn't apply). -start_openclaw_gateway() { - local run_callback="${1}" - log_step "Starting OpenClaw gateway daemon..." - ${run_callback} "source ~/.spawnrc 2>/dev/null; export PATH=\$(npm prefix -g 2>/dev/null)/bin:\$HOME/.bun/bin:/.sprite/languages/bun/bin:\$HOME/.local/bin:\$PATH; if command -v setsid >/dev/null 2>&1; then setsid openclaw gateway > /tmp/openclaw-gateway.log 2>&1 < /dev/null & else nohup openclaw gateway > /tmp/openclaw-gateway.log 2>&1 < /dev/null & fi" -} - -# Wait for OpenClaw gateway to be ready -# Usage: wait_for_openclaw_gateway RUN_CALLBACK -# -# Arguments: -# RUN_CALLBACK - Function to run commands: func(command) -# -# Returns: -# 0 if gateway starts successfully, 1 if timeout -wait_for_openclaw_gateway() { - local run_callback="${1}" - local max_wait=60 - local elapsed=0 - - log_step "Waiting for OpenClaw gateway to start..." - - while [ $elapsed -lt $max_wait ]; do - if ${run_callback} "nc -z 127.0.0.1 18789 2>/dev/null || (command -v telnet >/dev/null && timeout 1 telnet 127.0.0.1 18789 2>&1 | grep -q Connected)"; then - log_info "Gateway ready after ${elapsed}s" - return 0 - fi - sleep 1 - elapsed=$((elapsed + 1)) - done - - log_error "OpenClaw gateway failed to start after ${max_wait}s" - log_info "Check gateway logs: cat /tmp/openclaw-gateway.log" - ${run_callback} "tail -10 /tmp/openclaw-gateway.log 2>/dev/null" || true - return 1 -} - -# ============================================================ -# Codex CLI configuration setup -# ============================================================ - -# Setup Codex CLI config.toml for OpenRouter -# Uses the native model_provider config instead of OPENAI_BASE_URL env var. -# Usage: setup_codex_config OPENROUTER_KEY UPLOAD_CALLBACK RUN_CALLBACK -setup_codex_config() { - local openrouter_key="${1}" - local upload_callback="${2}" - local run_callback="${3}" - - log_step "Configuring Codex CLI for OpenRouter..." - - local config_toml - config_toml=$(cat <&2 - ids+=("${id}") - if [[ -n "${default_id}" && "${id}" == "${default_id}" ]]; then - default_idx=${i} - fi - i=$((i + 1)) - done - - local choice - printf "\n" >&2 - choice=$(safe_read "Select ${prompt_text%s} [${default_idx}]: ") || choice="" - choice="${choice:-${default_idx}}" - - if [[ "${choice}" -ge 1 && "${choice}" -le "${#ids[@]}" ]] 2>/dev/null; then - echo "${ids[$((choice - 1))]}" - else - log_warn "Invalid selection '${choice}' (enter a number between 1 and ${#ids[@]}). Using default: ${default_value}" - echo "${default_value}" - fi -} - -_display_and_select() { - local prompt_text="${1}" - local default_value="${2}" - local default_id="${3:-}" - - # Read all items into array - local items_array=() - while IFS= read -r line; do - items_array+=("${line}") - done - - if [[ "${#items_array[@]}" -eq 0 ]]; then - log_warn "No ${prompt_text} available, using default: ${default_value}" - echo "${default_value}" - return - fi - - # Try to use fzf for interactive filtering if available and stdin is a TTY - if command -v fzf >/dev/null 2>&1 && [[ -t 0 ]]; then - _prepare_fzf_input "${default_id}" "${items_array[@]}" - _fzf_select "${prompt_text}" "${default_value}" "${default_id}" "${FZF_INPUT}" "${FZF_DEFAULT_LINE}" - return - fi - - # Try spawn pick for an arrow-key UI (available when the user ran `spawn`) - if command -v spawn >/dev/null 2>&1; then - # Convert pipe-delimited "id|label|extra..." → "id\tid\tlabel · extra · ..." - # so spawn pick shows the id as label and all detail fields as hint. - local spawn_input - spawn_input=$(printf '%s\n' "${items_array[@]}" | awk -F'|' '{ - val=$1; hint=""; - for (i=2; i<=NF; i++) { hint = hint (hint ? " \xc2\xb7 " : "") $i } - printf "%s\t%s\t%s\n", val, val, hint - }') - local picked - local spawn_default="${default_id:-${default_value}}" - picked=$(printf '%s\n' "${spawn_input}" | \ - spawn pick --prompt "Select ${prompt_text}" --default "${spawn_default}") && { - echo "${picked}" - return - } - fi - - # Fallback to numbered list when neither fzf nor spawn pick is available - _numbered_list_select "${prompt_text}" "${default_value}" "${default_id}" "${items_array[@]}" -} - -# Returns: selected ID via stdout -interactive_pick() { - local env_var_name="${1}" - local default_value="${2}" - local prompt_text="${3}" - local list_callback="${4}" - local default_id="${5:-}" - - # Check environment variable first - local env_value="${!env_var_name:-}" - if [[ -n "${env_value}" ]]; then - echo "${env_value}" - return - fi - - log_step "Fetching available ${prompt_text}..." - local items - items=$("${list_callback}") - - if [[ -z "${items}" ]]; then - log_warn "Could not fetch ${prompt_text}, using default: ${default_value}" - echo "${default_value}" - return - fi - - _display_and_select "${prompt_text}" "${default_value}" "${default_id}" <<< "${items}" -} - -# ============================================================ -# SSH key registration helpers -# ============================================================ - -# Generic SSH key check: queries the provider's API and greps for the fingerprint. -# Most providers follow this exact pattern. Use this to avoid duplicating 5-line -# check functions across every cloud lib. -# Usage: check_ssh_key_by_fingerprint API_FUNC ENDPOINT FINGERPRINT -# Example: check_ssh_key_by_fingerprint hetzner_api "/ssh_keys" "$fingerprint" -check_ssh_key_by_fingerprint() { - local api_func="${1}" - local endpoint="${2}" - local fingerprint="${3}" - - local existing_keys - existing_keys=$("${api_func}" GET "${endpoint}") - echo "${existing_keys}" | grep -q "${fingerprint}" -} - -# Generic SSH key registration pattern used by all cloud providers -# Eliminates ~220 lines of duplicate code across 5 provider libraries -# -# Usage: ensure_ssh_key_with_provider \ -# CHECK_CALLBACK \ -# REGISTER_CALLBACK \ -# PROVIDER_NAME \ -# [KEY_PATH] -# -# Arguments: -# CHECK_CALLBACK - Function that checks if SSH key exists with provider -# Should return 0 if key exists, 1 if not -# Function receives: fingerprint, pub_key_path -# REGISTER_CALLBACK - Function that registers SSH key with provider -# Should return 0 on success, 1 on error -# Function receives: key_name, pub_key_path -# PROVIDER_NAME - Display name of the provider (for logging) -# KEY_PATH - Optional: Path to SSH private key (default: $HOME/.ssh/id_ed25519) -# -# Example: -# ensure_ssh_key_with_provider \ -# hetzner_check_ssh_key \ -# hetzner_register_ssh_key \ -# "Hetzner" -# -# Callback implementations should use provider-specific API calls but follow -# this contract to enable shared logic for key generation and registration flow. -ensure_ssh_key_with_provider() { - local check_callback="${1}" - local register_callback="${2}" - local provider_name="${3}" - local key_path="${4:-${HOME}/.ssh/id_ed25519}" - local pub_path="${key_path}.pub" - - # Generate key if needed (shared function) - generate_ssh_key_if_missing "${key_path}" - - # Get fingerprint (shared function) - local fingerprint - fingerprint=$(get_ssh_fingerprint "${pub_path}") - - # Check if already registered (provider-specific) - if "${check_callback}" "${fingerprint}" "${pub_path}"; then - log_info "SSH key already registered with ${provider_name}" - return 0 - fi - - # Register the key (provider-specific) - log_step "Registering SSH key with ${provider_name}..." - local key_name - key_name="spawn-$(hostname)-$(date +%s)" - - if "${register_callback}" "${key_name}" "${pub_path}"; then - log_info "SSH key registered with ${provider_name}" - return 0 - else - log_error "Failed to register SSH key with ${provider_name}" - log_error "The API may have rejected the key format or the token lacks write permissions." - log_error "Verify your API token has SSH key management permissions, then try again." - return 1 - fi -} - -# ============================================================ -# Agent install commands (run remotely on provisioned servers) -# ============================================================ - -# Robust OpenCode install command that downloads to a file first instead of -# piping curl|tar, which breaks in container exec environments (Sprite, E2B, -# Modal, Daytona) where the binary stream can get corrupted through the exec -# layer. The upstream installer's "curl -#" flag also interferes in non-TTY -# environments. -opencode_install_cmd() { - printf '%s' 'OC_ARCH=$(uname -m); case "$OC_ARCH" in aarch64) OC_ARCH=arm64;; x86_64) OC_ARCH=x64;; esac; OC_OS=$(uname -s | tr A-Z a-z); mkdir -p /tmp/opencode-install "$HOME/.opencode/bin" && curl -fsSL -o /tmp/opencode-install/oc.tar.gz "https://github.com/anomalyco/opencode/releases/latest/download/opencode-${OC_OS}-${OC_ARCH}.tar.gz" && tar xzf /tmp/opencode-install/oc.tar.gz -C /tmp/opencode-install && mv /tmp/opencode-install/opencode "$HOME/.opencode/bin/" && rm -rf /tmp/opencode-install && grep -q ".opencode/bin" "$HOME/.bashrc" 2>/dev/null || echo '"'"'export PATH="$HOME/.opencode/bin:$PATH"'"'"' >> "$HOME/.bashrc"; grep -q ".opencode/bin" "$HOME/.zshrc" 2>/dev/null || echo '"'"'export PATH="$HOME/.opencode/bin:$PATH"'"'"' >> "$HOME/.zshrc" 2>/dev/null; export PATH="$HOME/.opencode/bin:$PATH"' -} - -# ============================================================ -# VM Connection Tracking -# ============================================================ - -# Save VM connection info for spawn list reconnect functionality. -# This allows users to reconnect to previously spawned VMs via `spawn list`. -# Usage: save_vm_connection IP USER [SERVER_ID] [SERVER_NAME] [CLOUD] [METADATA_JSON] -# Example: save_vm_connection "$DO_SERVER_IP" "root" "$DO_DROPLET_ID" "$DROPLET_NAME" "digitalocean" -# Example: save_vm_connection "$GCP_IP" "root" "" "$NAME" "gcp" '{"zone":"us-central1-a"}' -save_vm_connection() { - local ip="${1}" - local user="${2}" - local server_id="${3:-}" - local server_name="${4:-}" - local cloud="${5:-}" - local metadata="${6:-}" - - local spawn_dir="${HOME}/.spawn" - mkdir -p "${spawn_dir}" - - local conn_file="${spawn_dir}/last-connection.json" - - # Build JSON using json_escape to prevent injection via special characters - local json="{\"ip\":$(json_escape "${ip}"),\"user\":$(json_escape "${user}")" - if [[ -n "${server_id}" ]]; then - json="${json},\"server_id\":$(json_escape "${server_id}")" - fi - if [[ -n "${server_name}" ]]; then - json="${json},\"server_name\":$(json_escape "${server_name}")" - fi - if [[ -n "${cloud}" ]]; then - json="${json},\"cloud\":$(json_escape "${cloud}")" - fi - if [[ -n "${metadata}" ]]; then - json="${json},\"metadata\":${metadata}" - fi - json="${json}}" - - printf '%s\n' "${json}" > "${conn_file}" -} - -# Append launch_cmd to an existing last-connection.json file. -# Called by spawn_agent after computing the agent's launch command. -# Usage: _save_launch_cmd LAUNCH_CMD -_save_launch_cmd() { - local cmd="${1:-}" - if [[ -z "${cmd}" ]]; then return 0; fi - - local conn_file="${HOME}/.spawn/last-connection.json" - if [[ ! -f "${conn_file}" ]]; then return 0; fi - - # Read existing JSON content and inject launch_cmd before the closing brace - local existing - existing=$(cat "${conn_file}") - # Strip trailing } and add launch_cmd field - existing="${existing%\}}" - existing="${existing},\"launch_cmd\":$(json_escape "${cmd}")}" - printf '%s\n' "${existing}" > "${conn_file}" -} - -# ============================================================ -# Auto-initialization -# ============================================================ - -# Auto-register cleanup trap when this file is sourced -register_cleanup_trap diff --git a/shared/github-auth.sh b/shared/github-auth.sh index a095c129..17c75e10 100755 --- a/shared/github-auth.sh +++ b/shared/github-auth.sh @@ -11,23 +11,13 @@ # curl -fsSL https://raw.githubusercontent.com/OpenRouterTeam/spawn/main/shared/github-auth.sh | bash # ============================================================ -# Source shared/common.sh for logging (local-or-remote fallback) +# Logging helpers # ============================================================ -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" 2>/dev/null && pwd)" -if [[ -n "${SCRIPT_DIR:-}" && -f "${SCRIPT_DIR}/common.sh" ]]; then - source "${SCRIPT_DIR}/common.sh" -else - eval "$(curl -fsSL https://raw.githubusercontent.com/OpenRouterTeam/spawn/main/shared/common.sh)" -fi - -# Fallback log functions if common.sh failed to load -if ! type log_info &>/dev/null 2>&1; then - log_info() { printf '[github-auth] %s\n' "$*" >&2; } - log_step() { printf '[github-auth] %s\n' "$*" >&2; } - log_warn() { printf '[github-auth] WARNING: %s\n' "$*" >&2; } - log_error() { printf '[github-auth] ERROR: %s\n' "$*" >&2; } -fi +log_info() { printf '[github-auth] %s\n' "$*" >&2; } +log_step() { printf '[github-auth] %s\n' "$*" >&2; } +log_warn() { printf '[github-auth] WARNING: %s\n' "$*" >&2; } +log_error() { printf '[github-auth] ERROR: %s\n' "$*" >&2; } # ============================================================ # ensure_gh_cli — Install gh CLI if not already present diff --git a/shared/key-request.sh b/shared/key-request.sh index 5e297b40..8aa5eb79 100644 --- a/shared/key-request.sh +++ b/shared/key-request.sh @@ -108,7 +108,7 @@ process.stdout.write(d[process.env._VAR] || d.api_key || d.token || ''); # - _ . / @ (standard API key chars) # : + = (base64 segments, URL-style formats) # space (Fly.io "FlyV1 " prefixed tokens) - # Must match shared/common.sh _load_token_from_config regex + # Must match CLI's loadTokenFromConfig regex in cli/src/digitalocean/digitalocean.ts if [[ ! "${val}" =~ ^[a-zA-Z0-9._/@:+=\ -]+$ ]]; then log "SECURITY: Invalid characters in config value for ${var_name}" return 1