mirror of
https://github.com/OpenRouterTeam/spawn.git
synced 2026-04-28 03:49:31 +00:00
refactor: remove shared/common.sh and 27 subprocess-heavy test files (#1728)
shared/common.sh (3852 lines) was dead code — the entire architecture was rewritten to TypeScript in cli/src/. No agent scripts source it anymore. The only consumer was github-auth.sh which just needed 4 log functions (now inlined). Remove 27 test files that spawned ~800+ real bash/bun subprocesses per run (the root cause of slow bun test). Every shared-common-*.test.ts file forked a real bash shell per test case to source shared/common.sh. CLI subprocess tests spawned `bun run index.ts` per assertion. These were integration tests, not unit tests. Also removes: - mock-tests CI job from test.yml (ran test/mock.sh which opens browser) - Stale plan files referencing deleted infrastructure - All CLAUDE.md/README.md references to the old lib/common.sh pattern Co-authored-by: lab <6723574+louisgv@users.noreply.github.com> Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
738ad18fee
commit
60986e5a05
37 changed files with 26 additions and 17853 deletions
|
|
@ -1,218 +0,0 @@
|
|||
# Refactor: Cloud Adapter + Agent Runner System
|
||||
|
||||
## Context
|
||||
|
||||
149 agent scripts across 11 clouds share ~70% identical boilerplate (auth, SSH key, provision, wait, API key). Only the agent-specific parts differ (install, env vars, config, launch). The refactor introduces a standard `cloud_*` adapter interface and a `spawn_agent` runner that eliminates this duplication.
|
||||
|
||||
## Architecture
|
||||
|
||||
### 1. Cloud Adapter Interface (added to each `{cloud}/lib/common.sh`)
|
||||
|
||||
Every cloud adds 7 standard functions at the bottom of its `lib/common.sh`. These bind cloud-specific globals (IP, sandbox ID, sprite name) so callers never need to know them:
|
||||
|
||||
```bash
|
||||
cloud_authenticate() # Ensure creds + SSH key (if applicable)
|
||||
cloud_provision(name) # Create server, set internal globals
|
||||
cloud_wait_ready() # Wait for connectivity + cloud-init
|
||||
cloud_run(cmd) # Execute command on server
|
||||
cloud_upload(local, remote) # Upload file to server
|
||||
cloud_interactive(cmd) # Start interactive session
|
||||
cloud_label() # Return display name string
|
||||
```
|
||||
|
||||
**SSH-based clouds** (hetzner, digitalocean, gcp, aws-lightsail, oracle, ovh) — thin wrappers:
|
||||
```bash
|
||||
cloud_run() { run_server "${HETZNER_SERVER_IP}" "$1"; }
|
||||
cloud_upload() { upload_file "${HETZNER_SERVER_IP}" "$1" "$2"; }
|
||||
cloud_interactive() { interactive_session "${HETZNER_SERVER_IP}" "$1"; }
|
||||
```
|
||||
|
||||
**CLI-based clouds** (fly, daytona, sprite) — delegate to their CLI wrappers:
|
||||
```bash
|
||||
cloud_run() { run_server "$1"; } # fly/daytona: no IP arg
|
||||
cloud_run() { run_sprite "${SPRITE_NAME}" "$1"; } # sprite
|
||||
```
|
||||
|
||||
**Local** — no-ops for provision/wait:
|
||||
```bash
|
||||
cloud_provision() { :; }
|
||||
cloud_wait_ready() { :; }
|
||||
cloud_run() { eval "$1"; }
|
||||
```
|
||||
|
||||
### 2. `spawn_agent` Runner (added to `shared/common.sh`)
|
||||
|
||||
~60 lines. Orchestrates the common flow, calling agent-defined hooks where needed:
|
||||
|
||||
```bash
|
||||
spawn_agent() {
|
||||
local agent_key="$1"
|
||||
|
||||
# 1. Authenticate cloud
|
||||
cloud_authenticate
|
||||
|
||||
# 2. Pre-provision prompts (github auth if agent wants it)
|
||||
if _fn_exists agent_pre_provision; then agent_pre_provision; fi
|
||||
|
||||
# 3. Provision
|
||||
local server_name
|
||||
server_name=$(get_server_name)
|
||||
cloud_provision "${server_name}"
|
||||
|
||||
# 4. Wait for readiness
|
||||
cloud_wait_ready
|
||||
|
||||
# 5. Install agent (hook or default)
|
||||
if _fn_exists agent_install; then
|
||||
agent_install
|
||||
fi
|
||||
|
||||
# 6. Get API key
|
||||
get_or_prompt_api_key
|
||||
|
||||
# 7. Model selection (if agent needs it)
|
||||
if [[ -n "${AGENT_MODEL_PROMPT:-}" ]]; then
|
||||
MODEL_ID=$(get_model_id_interactive "${AGENT_MODEL_DEFAULT:-openrouter/auto}" "${agent_key}")
|
||||
fi
|
||||
|
||||
# 8. Inject env vars (hook provides the vars)
|
||||
_spawn_inject_env_vars
|
||||
|
||||
# 9. Agent-specific config (optional hook)
|
||||
if _fn_exists agent_configure; then agent_configure; fi
|
||||
|
||||
# 10. Save connection info (optional hook)
|
||||
if _fn_exists agent_save_connection; then agent_save_connection; fi
|
||||
|
||||
# 11. Pre-launch (optional hook, e.g., start gateway daemon)
|
||||
if _fn_exists agent_pre_launch; then agent_pre_launch; fi
|
||||
|
||||
# 12. Launch
|
||||
local launch_cmd
|
||||
launch_cmd=$(agent_launch_cmd)
|
||||
launch_session "$(cloud_label)" cloud_interactive "${launch_cmd}"
|
||||
}
|
||||
```
|
||||
|
||||
Helper for env injection — uses `cloud_run`/`cloud_upload` directly:
|
||||
```bash
|
||||
_spawn_inject_env_vars() {
|
||||
log_step "Setting up environment variables..."
|
||||
local env_temp; env_temp=$(mktemp)
|
||||
chmod 600 "${env_temp}"; track_temp_file "${env_temp}"
|
||||
agent_env_vars > "${env_temp}" # Hook: agent defines this
|
||||
cloud_upload "${env_temp}" "/tmp/env_config"
|
||||
cloud_run "cat /tmp/env_config >> ~/.bashrc && cat /tmp/env_config >> ~/.zshrc && rm /tmp/env_config"
|
||||
offer_github_auth cloud_run
|
||||
}
|
||||
```
|
||||
|
||||
`_fn_exists` helper (bash 3.2 compatible):
|
||||
```bash
|
||||
_fn_exists() { type "$1" 2>/dev/null | head -1 | grep -q 'function'; }
|
||||
```
|
||||
|
||||
### 3. Agent Script Pattern (after refactor)
|
||||
|
||||
**Simple agent** — e.g., `hetzner/aider.sh` (was 37 lines → ~25 lines):
|
||||
```bash
|
||||
#!/bin/bash
|
||||
set -eo pipefail
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" 2>/dev/null && pwd)"
|
||||
if [[ -f "${SCRIPT_DIR}/lib/common.sh" ]]; then
|
||||
source "${SCRIPT_DIR}/lib/common.sh"
|
||||
else
|
||||
eval "$(curl -fsSL https://raw.githubusercontent.com/OpenRouterTeam/spawn/main/hetzner/lib/common.sh)"
|
||||
fi
|
||||
|
||||
log_info "Aider on Hetzner Cloud"
|
||||
echo ""
|
||||
|
||||
AGENT_MODEL_PROMPT=1
|
||||
AGENT_MODEL_DEFAULT="openrouter/auto"
|
||||
|
||||
agent_install() {
|
||||
install_agent "Aider" "pip install aider-chat 2>/dev/null || pip3 install aider-chat" cloud_run
|
||||
verify_agent "Aider" "command -v aider && aider --version" "pip install aider-chat" cloud_run
|
||||
}
|
||||
agent_env_vars() { generate_env_config "OPENROUTER_API_KEY=${OPENROUTER_API_KEY}"; }
|
||||
agent_launch_cmd() { printf 'source ~/.zshrc && aider --model openrouter/%s' "${MODEL_ID}"; }
|
||||
|
||||
spawn_agent "Aider"
|
||||
```
|
||||
|
||||
**Complex agent** — e.g., `hetzner/claude.sh`:
|
||||
```bash
|
||||
agent_pre_provision() { prompt_github_auth; }
|
||||
agent_install() { install_claude_code cloud_run; }
|
||||
agent_env_vars() {
|
||||
generate_env_config \
|
||||
"OPENROUTER_API_KEY=${OPENROUTER_API_KEY}" \
|
||||
"ANTHROPIC_BASE_URL=https://openrouter.ai/api" \
|
||||
"ANTHROPIC_AUTH_TOKEN=${OPENROUTER_API_KEY}" \
|
||||
"ANTHROPIC_API_KEY=" \
|
||||
"CLAUDE_CODE_SKIP_ONBOARDING=1" \
|
||||
"CLAUDE_CODE_ENABLE_TELEMETRY=0"
|
||||
}
|
||||
agent_configure() { setup_claude_code_config "${OPENROUTER_API_KEY}" cloud_upload cloud_run; }
|
||||
agent_launch_cmd() { echo 'source ~/.bashrc 2>/dev/null; export PATH=$HOME/.claude/local/bin:$HOME/.local/bin:$HOME/.bun/bin:$PATH; claude'; }
|
||||
|
||||
spawn_agent "Claude Code"
|
||||
```
|
||||
|
||||
**Edge-case agent** — e.g., `hetzner/openclaw.sh` (needs gateway daemon):
|
||||
```bash
|
||||
agent_pre_launch() {
|
||||
cloud_run "source ~/.zshrc && nohup openclaw gateway > /tmp/openclaw-gateway.log 2>&1 &"
|
||||
sleep 2
|
||||
}
|
||||
```
|
||||
|
||||
**Cross-cloud portability**: An agent's hooks are identical across all clouds. Only the source line at the top changes (e.g., `hetzner/lib/common.sh` → `fly/lib/common.sh`).
|
||||
|
||||
### 4. Special Cases
|
||||
|
||||
**Sprite `SPAWN_PROMPT`**: Handled in `cloud_interactive()` — Sprite's adapter checks `SPAWN_PROMPT` and uses non-tty exec if set.
|
||||
|
||||
**OVH no cloud-init**: OVH's `cloud_wait_ready()` calls `install_base_deps` instead of `wait_for_cloud_init`.
|
||||
|
||||
**Local (no provisioning)**: `cloud_provision()` and `cloud_wait_ready()` are no-ops. `cloud_run` uses `eval`. Local agent scripts still use `spawn_agent` — it just skips provisioning steps naturally.
|
||||
|
||||
**`save_vm_connection`**: Clouds that need it (digitalocean, sprite) call it from `cloud_provision()` or a post-provision hook.
|
||||
|
||||
## Files to Modify
|
||||
|
||||
### Core (2 files)
|
||||
- `shared/common.sh` — Add `spawn_agent()`, `_spawn_inject_env_vars()`, `_fn_exists()`
|
||||
|
||||
### Cloud Adapters (11 files)
|
||||
- `hetzner/lib/common.sh` — Add `cloud_*` functions wrapping `run_server $HETZNER_SERVER_IP` etc.
|
||||
- `digitalocean/lib/common.sh` — Same, wrapping `$DO_SERVER_IP`
|
||||
- `gcp/lib/common.sh` — Same, wrapping `$GCP_SERVER_IP`
|
||||
- `aws-lightsail/lib/common.sh` — Same, wrapping `$LIGHTSAIL_SERVER_IP`
|
||||
- `oracle/lib/common.sh` — Same, wrapping `$OCI_SERVER_IP`
|
||||
- `ovh/lib/common.sh` — Same, wrapping `$OVH_SERVER_IP`, also `cloud_wait_ready()` calls `install_base_deps`
|
||||
- `fly/lib/common.sh` — Same, no IP arg
|
||||
- `daytona/lib/common.sh` — Same, no IP arg
|
||||
- `sprite/lib/common.sh` — Same, wrapping `$SPRITE_NAME`, handles `SPAWN_PROMPT` in `cloud_interactive`
|
||||
- `local/lib/common.sh` — No-op provision/wait, `eval` for run
|
||||
|
||||
### Agent Scripts (~149 files)
|
||||
All `{cloud}/{agent}.sh` files get rewritten to use the hook pattern + `spawn_agent`. Each shrinks from ~40-80 lines to ~20-35 lines.
|
||||
|
||||
## Execution Strategy
|
||||
|
||||
Use a team of agents working in parallel:
|
||||
1. **Agent 1**: Add `spawn_agent` + `_fn_exists` + `_spawn_inject_env_vars` to `shared/common.sh`
|
||||
2. **Agent 2**: Add `cloud_*` adapter functions to all 11 cloud `lib/common.sh` files
|
||||
3. **Agents 3-5**: Convert agent scripts (split by cloud groups)
|
||||
4. **Agent 6**: Run `bash -n` on all files + run test suite
|
||||
|
||||
Work sequentially: core first (1+2), then scripts (3-5), then verify (6).
|
||||
|
||||
## Verification
|
||||
|
||||
1. `bash -n` syntax check on every modified `.sh` file
|
||||
2. `bash test/run.sh` — full mock test suite
|
||||
3. Spot-check: read 5-6 converted scripts to verify hook pattern is correct
|
||||
4. Verify `curl|bash` compatibility — source fallback pattern preserved in all files
|
||||
|
|
@ -1,17 +0,0 @@
|
|||
# Fix: GitHub CLI auth never works on remote servers
|
||||
|
||||
## Problem
|
||||
`gh auth login` (bare, no flags) tries to open a browser — always fails on headless remote servers. Also, local GitHub tokens are never passed through to the remote.
|
||||
|
||||
## Fix (2 files)
|
||||
|
||||
### 1. `shared/github-auth.sh` — Use device code flow
|
||||
Change `gh auth login` → `gh auth login --web -p https -h github.com` (shows URL + code for user to enter in local browser)
|
||||
|
||||
### 2. `shared/common.sh` — Token passthrough
|
||||
- In `prompt_github_auth`: capture local GITHUB_TOKEN or `gh auth token`
|
||||
- In `offer_github_auth`: pass captured token as env var prefix to remote command
|
||||
|
||||
## Verification
|
||||
- `bash -n` on modified files
|
||||
- `bash test/run.sh`
|
||||
|
|
@ -63,7 +63,7 @@ curl -s -H "Authorization: Bearer ${DO_API_TOKEN}" "https://api.digitalocean.com
|
|||
curl -s -H "Authorization: Bearer ${FLY_API_TOKEN}" "https://api.machines.dev/v1/apps?org_slug=personal"
|
||||
```
|
||||
|
||||
For any other cloud directories found, read their `lib/common.sh` to discover the API base URL and auth pattern, then call equivalent GET-only endpoints.
|
||||
For any other cloud directories found, read their TypeScript module in `cli/src/{cloud}/` to discover the API base URL and auth pattern, then call equivalent GET-only endpoints.
|
||||
|
||||
## Step 4 — Save Fixtures
|
||||
|
||||
|
|
|
|||
|
|
@ -94,8 +94,8 @@ cd REPO_ROOT_PLACEHOLDER && git worktree remove WORKTREE_BASE_PLACEHOLDER/TASK_N
|
|||
2. `cd` into worktree
|
||||
3. Scan for these issues:
|
||||
|
||||
**a) Dead code**: Functions in `shared/*.sh` or `*/lib/common.sh` that are never called by any script
|
||||
- Grep for the function name across all `.sh` files
|
||||
**a) Dead code**: Functions in `shared/*.sh` or `cli/src/` that are never called
|
||||
- Grep for the function name across all source files
|
||||
- If only the definition exists (no callers), remove the function
|
||||
|
||||
**b) Stale references**: Scripts or code referencing deleted files:
|
||||
|
|
@ -106,8 +106,8 @@ cd REPO_ROOT_PLACEHOLDER && git worktree remove WORKTREE_BASE_PLACEHOLDER/TASK_N
|
|||
**c) Python usage**: Any `python3 -c` or `python -c` calls in shell scripts
|
||||
- Replace with `bun eval` or `jq` as appropriate per CLAUDE.md rules
|
||||
|
||||
**d) Duplicate utilities**: Same helper function defined in multiple cloud `lib/common.sh` files
|
||||
- If identical, move to `shared/common.sh` and have cloud libs call the shared version
|
||||
**d) Duplicate utilities**: Same helper function defined in multiple TypeScript cloud modules
|
||||
- If identical, move to `cli/src/shared/` and have cloud modules import it
|
||||
|
||||
**e) Stale comments**: Comments referencing removed infrastructure, old test files, or deleted functions
|
||||
- Remove or update these comments
|
||||
|
|
|
|||
49
.github/workflows/test.yml
vendored
49
.github/workflows/test.yml
vendored
|
|
@ -30,52 +30,3 @@ jobs:
|
|||
- name: Verify cloud bundles build
|
||||
run: bun run cli/build-clouds.ts
|
||||
|
||||
mock-tests:
|
||||
name: Mock Tests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
|
||||
- name: Install dependencies
|
||||
working-directory: cli
|
||||
run: bun install
|
||||
|
||||
- name: Run mock tests
|
||||
id: tests
|
||||
env:
|
||||
NO_COLOR: 1
|
||||
run: |
|
||||
set +e
|
||||
bash test/mock.sh 2>&1 | tee /tmp/mock-output.log
|
||||
echo "exit_code=${PIPESTATUS[0]}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Post summary
|
||||
if: always()
|
||||
run: |
|
||||
echo '## Mock Test Results' >> "$GITHUB_STEP_SUMMARY"
|
||||
echo '```' >> "$GITHUB_STEP_SUMMARY"
|
||||
grep -E 'Results:' /tmp/mock-output.log >> "$GITHUB_STEP_SUMMARY" || true
|
||||
echo '```' >> "$GITHUB_STEP_SUMMARY"
|
||||
FAILURES=$(grep '✗' /tmp/mock-output.log | head -50)
|
||||
if [[ -n "$FAILURES" ]]; then
|
||||
echo '' >> "$GITHUB_STEP_SUMMARY"
|
||||
echo '<details><summary>Failures (first 50)</summary>' >> "$GITHUB_STEP_SUMMARY"
|
||||
echo '' >> "$GITHUB_STEP_SUMMARY"
|
||||
echo '```' >> "$GITHUB_STEP_SUMMARY"
|
||||
printf '%s\n' "$FAILURES" >> "$GITHUB_STEP_SUMMARY"
|
||||
echo '```' >> "$GITHUB_STEP_SUMMARY"
|
||||
echo '</details>' >> "$GITHUB_STEP_SUMMARY"
|
||||
fi
|
||||
|
||||
- name: Check results
|
||||
if: always()
|
||||
run: |
|
||||
if [[ "${{ steps.tests.outputs.exit_code }}" != "0" ]]; then
|
||||
echo "Mock tests failed (exit code ${{ steps.tests.outputs.exit_code }})"
|
||||
exit 1
|
||||
fi
|
||||
|
|
|
|||
95
CLAUDE.md
95
CLAUDE.md
|
|
@ -17,24 +17,10 @@ When run via `./discovery.sh`, your job is to pick ONE of these tasks and execut
|
|||
|
||||
Look at `manifest.json` → `matrix` for any `"missing"` entry. To implement it:
|
||||
|
||||
- Find the **cloud's** `lib/common.sh` — it has all the provider-specific primitives (create server, run command, upload file, interactive session)
|
||||
- Find the **agent's** existing script on another cloud — it shows the install steps, config files, env vars, and launch command
|
||||
- Combine them: use the cloud's primitives to execute the agent's setup steps
|
||||
- The agent scripts are thin bash wrappers that bootstrap bun and run the TypeScript CLI
|
||||
- The script goes at `{cloud}/{agent}.sh`
|
||||
|
||||
**Pattern for every script:**
|
||||
```
|
||||
1. Source {cloud}/lib/common.sh (local or remote fallback)
|
||||
2. Authenticate with cloud provider
|
||||
3. Provision server/VM
|
||||
4. Wait for readiness
|
||||
5. Install the agent
|
||||
6. Get OpenRouter API key (env var or OAuth)
|
||||
7. Inject env vars into shell config
|
||||
8. Write agent-specific config files
|
||||
9. Launch interactive session
|
||||
```
|
||||
|
||||
**OpenRouter injection is mandatory.** Every agent script MUST:
|
||||
- Set `OPENROUTER_API_KEY` in the shell environment
|
||||
- Set provider-specific env vars (e.g., `ANTHROPIC_BASE_URL=https://openrouter.ai/api`)
|
||||
|
|
@ -62,7 +48,7 @@ We are currently shipping with **9 curated clouds** (sorted by price):
|
|||
- **Test coverage is mandatory** (see "Mock Test Infrastructure" section below)
|
||||
|
||||
Steps to add one:
|
||||
1. Create `{cloud}/lib/common.sh` with the provider's primitives
|
||||
1. Add cloud-specific TypeScript module in `cli/src/{cloud}/`
|
||||
2. Add an entry to `manifest.json` → `clouds`
|
||||
3. Add `"missing"` entries to the matrix for every existing agent
|
||||
4. Implement at least 2-3 agent scripts to prove the lib works
|
||||
|
|
@ -116,10 +102,10 @@ spawn/
|
|||
package.json # npm package (@openrouter/spawn)
|
||||
install.sh # One-liner installer (bun → npm → auto-install bun)
|
||||
shared/
|
||||
common.sh # Provider-agnostic shared utilities
|
||||
github-auth.sh # Standalone GitHub CLI auth helper
|
||||
key-request.sh # API key provisioning helpers (used by QA)
|
||||
{cloud}/
|
||||
lib/common.sh # Cloud-specific functions (sources shared/common.sh)
|
||||
{agent}.sh # Agent deployment scripts
|
||||
{agent}.sh # Agent deployment scripts (thin bash → bun wrappers)
|
||||
.claude/skills/setup-agent-team/
|
||||
trigger-server.ts # HTTP trigger server (concurrent runs, dedup)
|
||||
discovery.sh # Discovery cycle script (fill gaps, scout new clouds/agents)
|
||||
|
|
@ -154,61 +140,13 @@ The only documentation files allowed in the repository are:
|
|||
|
||||
If you need to create documentation during development, write it to `.docs/` and add `.docs/` to `.gitignore`.
|
||||
|
||||
### Architecture: Shared Library Pattern
|
||||
### Architecture
|
||||
|
||||
**`shared/common.sh`** - Core utilities used by all clouds:
|
||||
- **Logging**: `log_info`, `log_warn`, `log_error` (colored output)
|
||||
- **Input handling**: `safe_read` (works in interactive and piped contexts)
|
||||
- **OAuth flow**: `try_oauth_flow`, `get_openrouter_api_key_oauth` (browser-based auth)
|
||||
- **Network utilities**: `nc_listen` (cross-platform netcat wrapper), `open_browser`
|
||||
- **SSH helpers**: `generate_ssh_key_if_missing`, `get_ssh_fingerprint`, `generic_ssh_wait`
|
||||
- **Security**: `validate_model_id`, `json_escape`
|
||||
All cloud provisioning and agent setup logic lives in TypeScript under `cli/src/`. Agent scripts (`{cloud}/{agent}.sh`) are thin bash wrappers that bootstrap bun and invoke the CLI.
|
||||
|
||||
**`{cloud}/lib/common.sh`** - Cloud-specific extensions:
|
||||
- Sources `shared/common.sh` at the top
|
||||
- Adds provider-specific functions:
|
||||
- **Sprite**: `ensure_sprite_installed`, `get_sprite_name`, `run_sprite`, etc.
|
||||
- **Hetzner**: API wrappers for server creation, SSH key management, etc.
|
||||
- **DigitalOcean**: Droplet provisioning, API calls, etc.
|
||||
- **Vultr**: Instance management via REST API
|
||||
- **Linode**: Linode-specific provisioning functions
|
||||
**`shared/github-auth.sh`** — Standalone GitHub CLI installer + OAuth login helper. Used by `cli/src/shared/agent-setup.ts` to set up `gh` on remote VMs.
|
||||
|
||||
**Agent scripts** (`{cloud}/{agent}.sh`):
|
||||
1. Source their cloud's `lib/common.sh` (which auto-sources `shared/common.sh`)
|
||||
2. Use shared functions for logging, OAuth, SSH setup
|
||||
3. Use cloud functions for provisioning and connecting to servers
|
||||
4. Deploy the specific agent with its configuration
|
||||
|
||||
### Why This Structure?
|
||||
|
||||
- **DRY principle**: OAuth, logging, SSH logic written once in `shared/common.sh`
|
||||
- **Consistency**: All scripts use same authentication and error handling patterns
|
||||
- **Maintainability**: Bug fixes in shared code benefit all providers automatically
|
||||
- **Extensibility**: New clouds only need to implement provider-specific logic
|
||||
- **Testability**: Shared functions can be tested independently
|
||||
|
||||
### Source Pattern
|
||||
|
||||
Every cloud's `lib/common.sh` starts with:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# Cloud-specific functions for {provider}
|
||||
|
||||
# Source shared provider-agnostic functions
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
source "$SCRIPT_DIR/../../shared/common.sh" || {
|
||||
echo "ERROR: Failed to load shared/common.sh" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
# ... cloud-specific functions below ...
|
||||
```
|
||||
|
||||
This pattern ensures:
|
||||
- Shared utilities are always available
|
||||
- Path resolution works when sourced from any location
|
||||
- Script fails fast if shared library is missing
|
||||
**`shared/key-request.sh`** — API key provisioning helpers sourced by the QA harness (`qa.sh`) for loading cloud credentials from `~/.config/spawn/{cloud}.json`.
|
||||
|
||||
## Shell Script Rules
|
||||
|
||||
|
|
@ -218,16 +156,6 @@ These rules are **non-negotiable** — violating them breaks remote execution fo
|
|||
Every script MUST work when executed via `bash <(curl -fsSL URL)`:
|
||||
- **NEVER** use relative paths for sourcing (`source ./lib/...`, `source ../shared/...`)
|
||||
- **NEVER** rely on `$0`, `dirname $0`, or `BASH_SOURCE` resolving to a real filesystem path
|
||||
- **ALWAYS** use the local-or-remote fallback pattern:
|
||||
```bash
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" 2>/dev/null && pwd)"
|
||||
if [[ -f "$SCRIPT_DIR/lib/common.sh" ]]; then
|
||||
source "$SCRIPT_DIR/lib/common.sh"
|
||||
else
|
||||
eval "$(curl -fsSL https://raw.githubusercontent.com/OpenRouterTeam/spawn/main/{cloud}/lib/common.sh)"
|
||||
fi
|
||||
```
|
||||
- Similarly, `{cloud}/lib/common.sh` MUST use the same fallback for `shared/common.sh`
|
||||
|
||||
### macOS bash 3.x Compatibility
|
||||
macOS ships bash 3.2. All scripts MUST work on it:
|
||||
|
|
@ -309,9 +237,8 @@ Without these, the cloud has **no test coverage**, body validation is missing, m
|
|||
When running autonomous discovery/refactoring loops (`./discovery.sh --loop`):
|
||||
|
||||
- **Run `bash -n` on every changed .sh file** before committing — syntax errors break everything
|
||||
- **NEVER revert a prior fix** — if `shared/common.sh` was changed to fix macOS compat, don't undo it
|
||||
- **NEVER re-introduce deleted functions** — if `write_oauth_response_file` was removed, don't call it
|
||||
- **NEVER change the source/eval fallback pattern** in lib/common.sh files — it's load-bearing for curl|bash
|
||||
- **NEVER revert a prior fix** — don't undo previously applied compatibility fixes
|
||||
- **NEVER re-introduce deleted functions** — if a function was removed, don't call it
|
||||
- **Test after EACH iteration** — don't batch multiple changes without verification
|
||||
- **If a change breaks tests, STOP** — revert and ask for guidance rather than compounding the regression
|
||||
|
||||
|
|
|
|||
10
README.md
10
README.md
|
|
@ -191,18 +191,16 @@ git config core.hooksPath .githooks
|
|||
### Structure
|
||||
|
||||
```
|
||||
{cloud}/lib/common.sh # Cloud provider primitives (provision, SSH, cleanup)
|
||||
{cloud}/{agent}.sh # Agent deployment script
|
||||
shared/common.sh # Shared utilities (OAuth, logging, SSH helpers)
|
||||
cli/ # TypeScript CLI (bun)
|
||||
{cloud}/{agent}.sh # Agent deployment script (thin bash → bun wrapper)
|
||||
cli/ # TypeScript CLI — all provisioning logic (bun)
|
||||
manifest.json # Source of truth for the matrix
|
||||
```
|
||||
|
||||
### Adding a new cloud
|
||||
|
||||
1. Create `{cloud}/lib/common.sh` with provisioning primitives
|
||||
1. Add cloud-specific TypeScript module in `cli/src/{cloud}/`
|
||||
2. Add to `manifest.json`
|
||||
3. Implement agent scripts using the cloud's primitives
|
||||
3. Implement agent scripts
|
||||
4. See [CLAUDE.md](CLAUDE.md) for full contributor guide
|
||||
|
||||
### Adding a new agent
|
||||
|
|
|
|||
|
|
@ -1,699 +0,0 @@
|
|||
import { describe, it, expect } from "bun:test";
|
||||
import { execSync } from "child_process";
|
||||
import { resolve } from "path";
|
||||
import { mkdirSync, readFileSync, rmSync, existsSync } from "fs";
|
||||
import { tmpdir } from "os";
|
||||
import { join } from "path";
|
||||
|
||||
/**
|
||||
* Tests for agent configuration and verification functions in shared/common.sh:
|
||||
* - verify_agent_installed: command existence and version check
|
||||
* - upload_config_file: temp file creation and callback invocation
|
||||
* - setup_claude_code_config: Claude Code settings.json + .claude.json generation
|
||||
* - setup_openclaw_config: OpenClaw openclaw.json generation
|
||||
* - setup_continue_config: Continue config.json generation
|
||||
*
|
||||
* These functions had zero test coverage despite being used by every agent
|
||||
* script across all cloud providers. They are security-relevant because they
|
||||
* inject API keys into JSON config files using json_escape.
|
||||
*
|
||||
* Each test sources shared/common.sh and calls the function in a real bash
|
||||
* subprocess to catch actual shell behavior (quoting, escaping, JSON structure).
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const REPO_ROOT = resolve(import.meta.dir, "../../..");
|
||||
const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh");
|
||||
|
||||
/**
|
||||
* Run a bash snippet that sources shared/common.sh first.
|
||||
* Returns { exitCode, stdout, stderr }.
|
||||
*
|
||||
* Note: Scripts run in a bash subprocess, so template variables must be
|
||||
* properly escaped or injected via environment variables.
|
||||
*/
|
||||
function runBash(script: string, env?: Record<string, string>): { exitCode: number; stdout: string; stderr: string } {
|
||||
const fullScript = `source "${COMMON_SH}"\n${script}`;
|
||||
const { spawnSync } = require("child_process");
|
||||
const result = spawnSync("bash", ["-c", fullScript], {
|
||||
encoding: "utf-8",
|
||||
timeout: 10000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
env: { ...process.env, ...env },
|
||||
});
|
||||
return {
|
||||
exitCode: result.status ?? 1,
|
||||
stdout: (result.stdout || "").trim(),
|
||||
stderr: (result.stderr || "").trim(),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a temporary directory for test files.
|
||||
*/
|
||||
function createTempDir(): string {
|
||||
const dir = join(tmpdir(), `spawn-config-test-${Date.now()}-${Math.random().toString(36).slice(2)}`);
|
||||
mkdirSync(dir, { recursive: true });
|
||||
return dir;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a bash script that sets up mock_upload and mock_run callbacks
|
||||
* that redirect to a temp directory instead of the user's home directory.
|
||||
* This allows tests to verify file creation without affecting the real filesystem.
|
||||
*/
|
||||
function createMockSetup(tempDir: string, configDir: string): string {
|
||||
return `
|
||||
mock_upload() { cp "$1" "$TEMP_DIR/\$(basename "$2")"; }
|
||||
mock_run() {
|
||||
local cmd="$1"
|
||||
# Replace $HOME with $TEMP_DIR
|
||||
cmd=\$(echo "$cmd" | sed "s|\\\$HOME|$TEMP_DIR|g")
|
||||
# Replace /tmp/spawn_config_* with $TEMP_DIR/spawn_config_*
|
||||
cmd=\$(echo "$cmd" | sed "s|/tmp/spawn_config_|$TEMP_DIR/spawn_config_|g")
|
||||
eval "$cmd"
|
||||
}
|
||||
HOME="$TEMP_DIR"
|
||||
`;
|
||||
}
|
||||
|
||||
|
||||
// ── verify_agent_installed ──────────────────────────────────────────────────
|
||||
|
||||
describe("verify_agent_installed", () => {
|
||||
describe("command found and verifies", () => {
|
||||
it("should return 0 for a known command (bash)", () => {
|
||||
const result = runBash('verify_agent_installed "bash" "--version" "Bash"');
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stderr).toContain("verified successfully");
|
||||
});
|
||||
|
||||
it("should return 0 for ls with --help", () => {
|
||||
const result = runBash('verify_agent_installed "ls" "--help" "ls"');
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should use --version as default verify arg", () => {
|
||||
// bash supports --version without second arg
|
||||
const result = runBash('verify_agent_installed "bash"');
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should use command name as default agent name", () => {
|
||||
const result = runBash('verify_agent_installed "bash"');
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stderr).toContain("bash");
|
||||
expect(result.stderr).toContain("verified successfully");
|
||||
});
|
||||
|
||||
it("should display custom agent name in messages", () => {
|
||||
const result = runBash('verify_agent_installed "bash" "--version" "My Custom Agent"');
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stderr).toContain("My Custom Agent");
|
||||
});
|
||||
});
|
||||
|
||||
describe("command not found", () => {
|
||||
it("should return 1 for non-existent command", () => {
|
||||
const result = runBash('verify_agent_installed "definitely_not_a_real_command_xyz"');
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should show not found error message", () => {
|
||||
const result = runBash('verify_agent_installed "nonexistent_cmd_abc" "--version" "TestAgent"');
|
||||
expect(result.exitCode).toBe(1);
|
||||
expect(result.stderr).toContain("not found in PATH");
|
||||
expect(result.stderr).toContain("TestAgent");
|
||||
});
|
||||
|
||||
it("should show troubleshooting hints on failure", () => {
|
||||
const result = runBash('verify_agent_installed "nonexistent_cmd_abc" "--version" "TestAgent"');
|
||||
expect(result.exitCode).toBe(1);
|
||||
expect(result.stderr).toContain("Possible causes");
|
||||
expect(result.stderr).toContain("How to fix");
|
||||
});
|
||||
|
||||
it("should include command name in error output", () => {
|
||||
const result = runBash('verify_agent_installed "fake_agent_xyz" "--version" "FakeAgent"');
|
||||
expect(result.exitCode).toBe(1);
|
||||
expect(result.stderr).toContain("fake_agent_xyz");
|
||||
});
|
||||
});
|
||||
|
||||
describe("command exists but verification fails", () => {
|
||||
it("should return 1 when verify command fails", () => {
|
||||
// 'false' is a valid command that always returns 1
|
||||
const result = runBash('verify_agent_installed "true" "--nonexistent-flag-xyz" "TrueCmd"');
|
||||
// true command ignores flags and succeeds, so test with a script
|
||||
// Use a custom script that exists but fails verification
|
||||
const tempDir = createTempDir();
|
||||
try {
|
||||
const scriptPath = join(tempDir, "fake-agent");
|
||||
execSync(`echo '#!/bin/bash\nif [ "$1" = "--version" ]; then exit 1; fi' > "${scriptPath}" && chmod +x "${scriptPath}"`, {
|
||||
encoding: "utf-8",
|
||||
});
|
||||
const result2 = runBash(`PATH="${tempDir}:$PATH" verify_agent_installed "fake-agent" "--version" "FakeAgent"`);
|
||||
expect(result2.exitCode).toBe(1);
|
||||
expect(result2.stderr).toContain("verification failed");
|
||||
expect(result2.stderr).toContain("returned an error");
|
||||
} finally {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should show prerequisite hints on verification failure", () => {
|
||||
const tempDir = createTempDir();
|
||||
try {
|
||||
const scriptPath = join(tempDir, "bad-agent");
|
||||
execSync(`echo '#!/bin/bash\nexit 1' > "${scriptPath}" && chmod +x "${scriptPath}"`, {
|
||||
encoding: "utf-8",
|
||||
});
|
||||
const result = runBash(`PATH="${tempDir}:$PATH" verify_agent_installed "bad-agent" "--version" "BadAgent"`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
expect(result.stderr).toContain("Missing runtime dependencies");
|
||||
} finally {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ── upload_config_file ──────────────────────────────────────────────────────
|
||||
|
||||
describe("upload_config_file", () => {
|
||||
it("should create a temp file with correct content", () => {
|
||||
const tempDir = createTempDir();
|
||||
try {
|
||||
// Use mock callbacks that record their arguments
|
||||
const result = runBash(`
|
||||
mock_upload() { cp "$1" "${tempDir}/uploaded_file"; echo "UPLOAD:$1:$2"; }
|
||||
mock_run() { echo "RUN:$1"; }
|
||||
upload_config_file "mock_upload" "mock_run" "hello world content" "/remote/path/config.json"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
// Verify the content was uploaded
|
||||
const uploadedContent = readFileSync(join(tempDir, "uploaded_file"), "utf-8");
|
||||
expect(uploadedContent.trim()).toBe("hello world content");
|
||||
} finally {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should invoke upload callback with temp file and remote temp path", () => {
|
||||
const result = runBash(`
|
||||
mock_upload() { echo "UPLOAD:$1:$2"; }
|
||||
mock_run() { echo "RUN:$1"; }
|
||||
upload_config_file "mock_upload" "mock_run" "test content" "~/.config/app.json"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
// Should contain UPLOAD line
|
||||
expect(result.stdout).toContain("UPLOAD:");
|
||||
// Remote temp path should contain spawn_config prefix
|
||||
expect(result.stdout).toContain("spawn_config");
|
||||
// Should have a basename of the remote path
|
||||
expect(result.stdout).toContain("app.json");
|
||||
});
|
||||
|
||||
it("should invoke run callback with mv command", () => {
|
||||
const result = runBash(`
|
||||
mock_upload() { echo "UPLOAD"; }
|
||||
mock_run() { echo "RUN:$1"; }
|
||||
upload_config_file "mock_upload" "mock_run" "test" "~/.config/test.json"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
// Should run mv to move temp file to final path with chmod for permissions
|
||||
expect(result.stdout).toContain("mv");
|
||||
expect(result.stdout).toContain("~/.config/test.json");
|
||||
});
|
||||
|
||||
it("should preserve multiline content", () => {
|
||||
const tempDir = createTempDir();
|
||||
try {
|
||||
const result = runBash(`
|
||||
mock_upload() { cp "$1" "${tempDir}/uploaded"; }
|
||||
mock_run() { :; }
|
||||
upload_config_file "mock_upload" "mock_run" '{"key": "value",
|
||||
"nested": true}' "/remote/config.json"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
const content = readFileSync(join(tempDir, "uploaded"), "utf-8").trim();
|
||||
expect(content).toContain('"key": "value"');
|
||||
expect(content).toContain('"nested": true');
|
||||
} finally {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should handle special characters in content", () => {
|
||||
const tempDir = createTempDir();
|
||||
try {
|
||||
const result = runBash(`
|
||||
mock_upload() { cp "$1" "${tempDir}/uploaded"; }
|
||||
mock_run() { :; }
|
||||
upload_config_file "mock_upload" "mock_run" 'key with $dollar and "quotes"' "/remote/config"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
const content = readFileSync(join(tempDir, "uploaded"), "utf-8").trim();
|
||||
expect(content).toContain("$dollar");
|
||||
expect(content).toContain('"quotes"');
|
||||
} finally {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// ── setup_claude_code_config ────────────────────────────────────────────────
|
||||
|
||||
describe("setup_claude_code_config", () => {
|
||||
describe("generates valid JSON", () => {
|
||||
it("should produce valid settings.json", () => {
|
||||
const result = runBash(`
|
||||
mock_upload() { echo "UPLOAD $2"; }
|
||||
mock_run() { echo "RUN: $1"; }
|
||||
setup_claude_code_config "sk-or-v1-test-key-123" "mock_upload" "mock_run"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("settings.json");
|
||||
});
|
||||
|
||||
it("should include OpenRouter base URL in settings", () => {
|
||||
const result = runBash(`
|
||||
mock_upload() { cat "$1"; }
|
||||
mock_run() { :; }
|
||||
setup_claude_code_config "sk-or-v1-test" "mock_upload" "mock_run"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("https://openrouter.ai/api");
|
||||
});
|
||||
|
||||
it("should include API key in settings via json_escape", () => {
|
||||
const result = runBash(`
|
||||
mock_upload() { cat "$1"; }
|
||||
mock_run() { :; }
|
||||
setup_claude_code_config "my-test-api-key-value" "mock_upload" "mock_run"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("my-test-api-key-value");
|
||||
});
|
||||
|
||||
it("should set bypass permissions in settings", () => {
|
||||
const result = runBash(`
|
||||
mock_upload() { cat "$1"; }
|
||||
mock_run() { :; }
|
||||
setup_claude_code_config "key123" "mock_upload" "mock_run"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("dangerouslySkipPermissions");
|
||||
});
|
||||
|
||||
it("should disable telemetry in settings", () => {
|
||||
const result = runBash(`
|
||||
mock_upload() { cat "$1"; }
|
||||
mock_run() { :; }
|
||||
setup_claude_code_config "key123" "mock_upload" "mock_run"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("CLAUDE_CODE_ENABLE_TELEMETRY");
|
||||
});
|
||||
|
||||
it("should produce valid .claude.json with onboarding completed", () => {
|
||||
const tempDir = createTempDir();
|
||||
try {
|
||||
const result = runBash(`
|
||||
mock_upload() { cp "$1" "$TEMP_DIR/\$(basename "$2")"; }
|
||||
mock_run() {
|
||||
local cmd="$1"
|
||||
# Replace $HOME with $TEMP_DIR
|
||||
cmd=\$(echo "$cmd" | sed "s|\\\$HOME|$TEMP_DIR|g")
|
||||
# Replace /tmp/spawn_config_* with $TEMP_DIR/spawn_config_*
|
||||
cmd=\$(echo "$cmd" | sed "s|/tmp/spawn_config_|$TEMP_DIR/spawn_config_|g")
|
||||
eval "$cmd"
|
||||
}
|
||||
HOME="$TEMP_DIR"
|
||||
setup_claude_code_config "key" "mock_upload" "mock_run"
|
||||
`, { TEMP_DIR: tempDir });
|
||||
expect(result.exitCode).toBe(0);
|
||||
// List all files recursively to find .claude.json
|
||||
const output = execSync(`find "${tempDir}" -type f 2>/dev/null`, { encoding: "utf-8" }).trim();
|
||||
const files = output.split("\n").filter(f => f);
|
||||
const claudeFile = files.find(f => f.includes(".claude.json"));
|
||||
expect(claudeFile).toBeDefined();
|
||||
const content = readFileSync(claudeFile!, "utf-8");
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed.hasCompletedOnboarding).toBe(true);
|
||||
} finally {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should create both settings.json and .claude.json files", () => {
|
||||
const tempDir = createTempDir();
|
||||
try {
|
||||
const result = runBash(`
|
||||
mock_upload() { cp "$1" "$TEMP_DIR/\$(basename "$2")"; }
|
||||
mock_run() {
|
||||
local cmd="$1"
|
||||
# Replace $HOME with $TEMP_DIR
|
||||
cmd=\$(echo "$cmd" | sed "s|\\\$HOME|$TEMP_DIR|g")
|
||||
# Replace /tmp/spawn_config_* with $TEMP_DIR/spawn_config_*
|
||||
cmd=\$(echo "$cmd" | sed "s|/tmp/spawn_config_|$TEMP_DIR/spawn_config_|g")
|
||||
eval "$cmd"
|
||||
}
|
||||
HOME="$TEMP_DIR"
|
||||
setup_claude_code_config "key" "mock_upload" "mock_run"
|
||||
`, { TEMP_DIR: tempDir });
|
||||
expect(result.exitCode).toBe(0);
|
||||
const files = execSync(`find "${tempDir}" -type f`, { encoding: "utf-8" }).trim().split("\n").filter(f => f);
|
||||
expect(files.some(f => f.includes("settings.json"))).toBe(true);
|
||||
expect(files.some(f => f.includes(".claude.json"))).toBe(true);
|
||||
} finally {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should invoke run callback to create .claude directory", () => {
|
||||
const result = runBash(`
|
||||
mock_upload() { :; }
|
||||
mock_run() { echo "CMD:$1"; }
|
||||
setup_claude_code_config "key" "mock_upload" "mock_run"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("CMD:mkdir -p ~/.claude");
|
||||
});
|
||||
|
||||
it("should invoke run callback to create CLAUDE.md", () => {
|
||||
const result = runBash(`
|
||||
mock_upload() { :; }
|
||||
mock_run() { echo "CMD:$1"; }
|
||||
setup_claude_code_config "key" "mock_upload" "mock_run"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("CMD:touch ~/.claude/CLAUDE.md");
|
||||
});
|
||||
});
|
||||
|
||||
describe("json_escape security", () => {
|
||||
it("should safely escape API key with double quotes", () => {
|
||||
const tempDir = createTempDir();
|
||||
try {
|
||||
const result = runBash(`
|
||||
mock_upload() { cp "$1" "$TEMP_DIR/\$(basename "$2")"; }
|
||||
mock_run() {
|
||||
local cmd="$1"
|
||||
# Replace $HOME with $TEMP_DIR
|
||||
cmd=\$(echo "$cmd" | sed "s|\\\$HOME|$TEMP_DIR|g")
|
||||
# Replace /tmp/spawn_config_* with $TEMP_DIR/spawn_config_*
|
||||
cmd=\$(echo "$cmd" | sed "s|/tmp/spawn_config_|$TEMP_DIR/spawn_config_|g")
|
||||
eval "$cmd"
|
||||
}
|
||||
HOME="$TEMP_DIR"
|
||||
setup_claude_code_config 'key-with-"quotes"-inside' "mock_upload" "mock_run"
|
||||
`, { TEMP_DIR: tempDir });
|
||||
expect(result.exitCode).toBe(0);
|
||||
const files = execSync(`find "${tempDir}" -name "*settings.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f);
|
||||
const settingsFile = files[0];
|
||||
expect(settingsFile).toBeDefined();
|
||||
const content = readFileSync(settingsFile, "utf-8");
|
||||
// Should be valid JSON even with quotes in the key
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed.env.ANTHROPIC_AUTH_TOKEN).toContain("quotes");
|
||||
} finally {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should safely escape API key with backslashes", () => {
|
||||
const tempDir = createTempDir();
|
||||
try {
|
||||
const result = runBash(`
|
||||
mock_upload() { cp "$1" "$TEMP_DIR/\$(basename "$2")"; }
|
||||
mock_run() {
|
||||
local cmd="$1"
|
||||
# Replace $HOME with $TEMP_DIR
|
||||
cmd=\$(echo "$cmd" | sed "s|\\\$HOME|$TEMP_DIR|g")
|
||||
# Replace /tmp/spawn_config_* with $TEMP_DIR/spawn_config_*
|
||||
cmd=\$(echo "$cmd" | sed "s|/tmp/spawn_config_|$TEMP_DIR/spawn_config_|g")
|
||||
eval "$cmd"
|
||||
}
|
||||
HOME="$TEMP_DIR"
|
||||
setup_claude_code_config 'key\\\\with\\\\backslashes' "mock_upload" "mock_run"
|
||||
`, { TEMP_DIR: tempDir });
|
||||
expect(result.exitCode).toBe(0);
|
||||
const files = execSync(`find "${tempDir}" -name "*settings.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f);
|
||||
const settingsFile = files[0];
|
||||
expect(settingsFile).toBeDefined();
|
||||
const content = readFileSync(settingsFile, "utf-8");
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed.env.ANTHROPIC_AUTH_TOKEN).toBeDefined();
|
||||
} finally {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ── setup_openclaw_config ───────────────────────────────────────────────────
|
||||
|
||||
describe("setup_openclaw_config", () => {
|
||||
it("should produce valid openclaw.json", () => {
|
||||
const tempDir = createTempDir();
|
||||
try {
|
||||
const result = runBash(`
|
||||
${createMockSetup(tempDir, ".openclaw")}
|
||||
setup_openclaw_config "sk-or-v1-test-key" "openrouter/auto" "mock_upload" "mock_run"
|
||||
`, { TEMP_DIR: tempDir });
|
||||
expect(result.exitCode).toBe(0);
|
||||
const files = execSync(`find "${tempDir}" -name "*openclaw.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f);
|
||||
const opClawFile = files[0];
|
||||
expect(opClawFile).toBeDefined();
|
||||
const content = readFileSync(opClawFile, "utf-8");
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed).toBeDefined();
|
||||
} finally {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should include OPENROUTER_API_KEY in env section", () => {
|
||||
const tempDir = createTempDir();
|
||||
try {
|
||||
const result = runBash(`
|
||||
${createMockSetup(tempDir, ".openclaw")}
|
||||
setup_openclaw_config "my-api-key-123" "openrouter/auto" "mock_upload" "mock_run"
|
||||
`, { TEMP_DIR: tempDir });
|
||||
expect(result.exitCode).toBe(0);
|
||||
const files = execSync(`find "${tempDir}" -name "*openclaw.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f);
|
||||
const opClawFile = files[0];
|
||||
expect(opClawFile).toBeDefined();
|
||||
const content = readFileSync(opClawFile, "utf-8");
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed.env.OPENROUTER_API_KEY).toBe("my-api-key-123");
|
||||
} finally {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should include model ID in agents.defaults.model.primary", () => {
|
||||
const tempDir = createTempDir();
|
||||
try {
|
||||
const result = runBash(`
|
||||
${createMockSetup(tempDir, ".openclaw")}
|
||||
setup_openclaw_config "key" "anthropic/claude-3.5-sonnet" "mock_upload" "mock_run"
|
||||
`, { TEMP_DIR: tempDir });
|
||||
expect(result.exitCode).toBe(0);
|
||||
const files = execSync(`find "${tempDir}" -name "*openclaw.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f);
|
||||
const opClawFile = files[0];
|
||||
expect(opClawFile).toBeDefined();
|
||||
const content = readFileSync(opClawFile, "utf-8");
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed.agents.defaults.model.primary).toBe("anthropic/claude-3.5-sonnet");
|
||||
} finally {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should include gateway config with local mode", () => {
|
||||
const tempDir = createTempDir();
|
||||
try {
|
||||
const result = runBash(`
|
||||
${createMockSetup(tempDir, ".openclaw")}
|
||||
setup_openclaw_config "key" "auto" "mock_upload" "mock_run"
|
||||
`, { TEMP_DIR: tempDir });
|
||||
expect(result.exitCode).toBe(0);
|
||||
const files = execSync(`find "${tempDir}" -name "*openclaw.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f);
|
||||
const opClawFile = files[0];
|
||||
expect(opClawFile).toBeDefined();
|
||||
const content = readFileSync(opClawFile, "utf-8");
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed.gateway.mode).toBe("local");
|
||||
} finally {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should generate a gateway auth token", () => {
|
||||
const tempDir = createTempDir();
|
||||
try {
|
||||
const result = runBash(`
|
||||
${createMockSetup(tempDir, ".openclaw")}
|
||||
setup_openclaw_config "key" "auto" "mock_upload" "mock_run"
|
||||
`, { TEMP_DIR: tempDir });
|
||||
expect(result.exitCode).toBe(0);
|
||||
const files = execSync(`find "${tempDir}" -name "*openclaw.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f);
|
||||
const opClawFile = files[0];
|
||||
expect(opClawFile).toBeDefined();
|
||||
const content = readFileSync(opClawFile, "utf-8");
|
||||
const parsed = JSON.parse(content);
|
||||
// Gateway token should be a 32-char hex string (openssl rand -hex 16)
|
||||
expect(parsed.gateway.auth.token).toBeDefined();
|
||||
expect(typeof parsed.gateway.auth.token).toBe("string");
|
||||
expect(parsed.gateway.auth.token.length).toBe(32);
|
||||
expect(parsed.gateway.auth.token).toMatch(/^[0-9a-f]+$/);
|
||||
} finally {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should invoke run callback to clean and create .openclaw directory", () => {
|
||||
const result = runBash(`
|
||||
mock_upload() { :; }
|
||||
mock_run() { echo "CMD:$1"; }
|
||||
setup_openclaw_config "key" "auto" "mock_upload" "mock_run"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("CMD:mkdir -p ~/.openclaw");
|
||||
});
|
||||
});
|
||||
|
||||
// ── setup_continue_config ───────────────────────────────────────────────────
|
||||
|
||||
describe("setup_continue_config", () => {
|
||||
it("should produce valid config.json", () => {
|
||||
const tempDir = createTempDir();
|
||||
try {
|
||||
const result = runBash(`
|
||||
${createMockSetup(tempDir, ".continue")}
|
||||
setup_continue_config "sk-or-v1-test-key" "mock_upload" "mock_run"
|
||||
`, { TEMP_DIR: tempDir });
|
||||
expect(result.exitCode).toBe(0);
|
||||
const files = execSync(`find "${tempDir}" -name "*config.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f);
|
||||
const configFile = files[0];
|
||||
expect(configFile).toBeDefined();
|
||||
const content = readFileSync(configFile, "utf-8");
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed).toBeDefined();
|
||||
} finally {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should include OpenRouter model config", () => {
|
||||
const tempDir = createTempDir();
|
||||
try {
|
||||
const result = runBash(`
|
||||
${createMockSetup(tempDir, ".continue")}
|
||||
setup_continue_config "test-key" "mock_upload" "mock_run"
|
||||
`, { TEMP_DIR: tempDir });
|
||||
expect(result.exitCode).toBe(0);
|
||||
const files = execSync(`find "${tempDir}" -name "*config.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f);
|
||||
const configFile = files[0];
|
||||
expect(configFile).toBeDefined();
|
||||
const content = readFileSync(configFile, "utf-8");
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed.models).toBeArray();
|
||||
expect(parsed.models.length).toBeGreaterThan(0);
|
||||
expect(parsed.models[0].provider).toBe("openrouter");
|
||||
expect(parsed.models[0].model).toBe("openrouter/auto");
|
||||
} finally {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should include API key from json_escape", () => {
|
||||
const tempDir = createTempDir();
|
||||
try {
|
||||
const result = runBash(`
|
||||
${createMockSetup(tempDir, ".continue")}
|
||||
setup_continue_config "my-continue-api-key" "mock_upload" "mock_run"
|
||||
`, { TEMP_DIR: tempDir });
|
||||
expect(result.exitCode).toBe(0);
|
||||
const files = execSync(`find "${tempDir}" -name "*config.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f);
|
||||
const configFile = files[0];
|
||||
expect(configFile).toBeDefined();
|
||||
const content = readFileSync(configFile, "utf-8");
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed.models[0].apiKey).toBe("my-continue-api-key");
|
||||
} finally {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should set apiBase to OpenRouter API v1", () => {
|
||||
const tempDir = createTempDir();
|
||||
try {
|
||||
const result = runBash(`
|
||||
${createMockSetup(tempDir, ".continue")}
|
||||
setup_continue_config "key" "mock_upload" "mock_run"
|
||||
`, { TEMP_DIR: tempDir });
|
||||
expect(result.exitCode).toBe(0);
|
||||
const files = execSync(`find "${tempDir}" -name "*config.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f);
|
||||
const configFile = files[0];
|
||||
expect(configFile).toBeDefined();
|
||||
const content = readFileSync(configFile, "utf-8");
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed.models[0].apiBase).toBe("https://openrouter.ai/api/v1");
|
||||
} finally {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should set title to OpenRouter", () => {
|
||||
const tempDir = createTempDir();
|
||||
try {
|
||||
const result = runBash(`
|
||||
${createMockSetup(tempDir, ".continue")}
|
||||
setup_continue_config "key" "mock_upload" "mock_run"
|
||||
`, { TEMP_DIR: tempDir });
|
||||
expect(result.exitCode).toBe(0);
|
||||
const files = execSync(`find "${tempDir}" -name "*config.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f);
|
||||
const configFile = files[0];
|
||||
expect(configFile).toBeDefined();
|
||||
const content = readFileSync(configFile, "utf-8");
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed.models[0].title).toBe("OpenRouter");
|
||||
} finally {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should invoke run callback to create .continue directory", () => {
|
||||
const result = runBash(`
|
||||
mock_upload() { :; }
|
||||
mock_run() { echo "CMD:$1"; }
|
||||
setup_continue_config "key" "mock_upload" "mock_run"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("CMD:mkdir -p ~/.continue");
|
||||
});
|
||||
|
||||
it("should safely handle API key with special JSON characters", () => {
|
||||
const tempDir = createTempDir();
|
||||
try {
|
||||
const result = runBash(`
|
||||
${createMockSetup(tempDir, ".continue")}
|
||||
setup_continue_config 'key-with-"quotes"-and\\backslash' "mock_upload" "mock_run"
|
||||
`, { TEMP_DIR: tempDir });
|
||||
expect(result.exitCode).toBe(0);
|
||||
const files = execSync(`find "${tempDir}" -name "*config.json"`, { encoding: "utf-8" }).trim().split("\n").filter(f => f);
|
||||
const configFile = files[0];
|
||||
expect(configFile).toBeDefined();
|
||||
const content = readFileSync(configFile, "utf-8");
|
||||
// Must be valid JSON even with special characters
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed.models[0].apiKey).toContain("quotes");
|
||||
} finally {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
|
|
@ -1,556 +0,0 @@
|
|||
import { describe, it, expect } from "bun:test";
|
||||
import { spawnSync } from "child_process";
|
||||
import { resolve } from "path";
|
||||
|
||||
/**
|
||||
* Edge case tests for the CLI entry point (index.ts).
|
||||
*
|
||||
* Tests paths that are not covered by other test files:
|
||||
* - handleError formatting for various thrown value types (non-Error, number, etc.)
|
||||
* - Flag ordering edge cases (flags before, between, and after positional args)
|
||||
* - Multiple positional args beyond expected count (extra args ignored)
|
||||
* - Unknown flags combined with valid subcommands
|
||||
* - --prompt interaction with subcommands (list, agents, clouds)
|
||||
* - --prompt-file with a real file on disk (subprocess-level verification)
|
||||
* - Version flag combined with other flags
|
||||
* - Empty string and whitespace positional args
|
||||
* - isInteractiveTTY: non-TTY stdin shows help instead of interactive picker
|
||||
* - SPAWN_NO_UPDATE_CHECK actually prevents update check in subprocess
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const CLI_DIR = resolve(import.meta.dir, "../..");
|
||||
const PROJECT_ROOT = resolve(CLI_DIR, "..");
|
||||
|
||||
function runCli(
|
||||
args: string[],
|
||||
env: Record<string, string> = {}
|
||||
): { stdout: string; stderr: string; exitCode: number } {
|
||||
const result = spawnSync(
|
||||
"bun",
|
||||
["run", `${CLI_DIR}/src/index.ts`, ...args],
|
||||
{
|
||||
cwd: PROJECT_ROOT,
|
||||
env: {
|
||||
PATH: `${process.env.HOME}/.bun/bin:${process.env.PATH}`,
|
||||
HOME: process.env.HOME,
|
||||
SHELL: process.env.SHELL,
|
||||
TERM: process.env.TERM || "xterm",
|
||||
// Prevent OAuth browser from opening during tests — if OPENROUTER_API_KEY
|
||||
// is set, get_or_prompt_api_key() skips the entire OAuth flow.
|
||||
OPENROUTER_API_KEY: "sk-or-test-fake",
|
||||
...env,
|
||||
SPAWN_NO_UPDATE_CHECK: "1",
|
||||
NODE_ENV: "",
|
||||
BUN_ENV: "",
|
||||
},
|
||||
encoding: "utf-8",
|
||||
timeout: 5000,
|
||||
}
|
||||
);
|
||||
return {
|
||||
stdout: result.stdout || "",
|
||||
stderr: result.stderr || "",
|
||||
exitCode: result.status ?? 1,
|
||||
};
|
||||
}
|
||||
|
||||
function output(result: { stdout: string; stderr: string }): string {
|
||||
return result.stdout + result.stderr;
|
||||
}
|
||||
|
||||
// ── handleError output formatting ─────────────────────────────────────────
|
||||
|
||||
describe("error output formatting", () => {
|
||||
it("should show error with valid names hint for invalid identifier", () => {
|
||||
const result = runCli(["../hack", "sprite"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("can only contain");
|
||||
expect(out).toContain("spawn agents");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should format error message for semicolon injection", () => {
|
||||
const result = runCli(["agent;rm", "sprite"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("can only contain");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should format error message for dollar sign injection", () => {
|
||||
const result = runCli(["agent$var", "sprite"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("can only contain");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should format error message for backtick injection", () => {
|
||||
const result = runCli(["agent`cmd`", "sprite"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("can only contain");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should show identifier rules in error message", () => {
|
||||
const result = runCli(["Agent!", "sprite"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("Lowercase letters");
|
||||
expect(out).toContain("Numbers");
|
||||
expect(out).toContain("Hyphens");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ── Flag ordering edge cases ──────────────────────────────────────────────
|
||||
|
||||
describe("flag ordering edge cases", () => {
|
||||
it("should handle --prompt before positional args", () => {
|
||||
const result = runCli(["--prompt", "Fix bugs", "claude", "sprite", "--dry-run"]);
|
||||
const out = output(result);
|
||||
// Should attempt to run (not error about prompt)
|
||||
expect(out).not.toContain("--prompt requires both");
|
||||
});
|
||||
|
||||
it("should handle -p between positional args", () => {
|
||||
const result = runCli(["claude", "-p", "Fix bugs", "sprite", "--dry-run"]);
|
||||
const out = output(result);
|
||||
// Should attempt to run
|
||||
expect(out).not.toContain("--prompt requires both");
|
||||
});
|
||||
|
||||
it("should handle --prompt after positional args", () => {
|
||||
const result = runCli(["claude", "sprite", "--prompt", "Fix bugs", "--dry-run"]);
|
||||
const out = output(result);
|
||||
expect(out).not.toContain("--prompt requires both");
|
||||
});
|
||||
|
||||
it("should reject --prompt with no cloud regardless of flag position", () => {
|
||||
const result = runCli(["--prompt", "Fix bugs", "claude"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("--prompt requires both");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject -p with no cloud regardless of flag position", () => {
|
||||
const result = runCli(["-p", "Fix bugs", "claude"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("--prompt requires both");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ── Unknown flags with subcommands ────────────────────────────────────────
|
||||
|
||||
describe("unknown flags with subcommands", () => {
|
||||
it("should reject --json with list command", () => {
|
||||
const result = runCli(["list", "--json"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("Unknown flag");
|
||||
expect(out).toContain("--json");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject --format with agents command", () => {
|
||||
const result = runCli(["agents", "--format"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("Unknown flag");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should handle --dry-run with valid agent and cloud", () => {
|
||||
const result = runCli(["claude", "sprite", "--dry-run"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("Dry run");
|
||||
expect(out).toContain("no resources");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should show supported flags list in unknown flag error", () => {
|
||||
const result = runCli(["list", "--json"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("Supported flags");
|
||||
expect(out).toContain("--prompt");
|
||||
expect(out).toContain("--help");
|
||||
expect(out).toContain("--version");
|
||||
});
|
||||
|
||||
it("should not reject flags that look like negative numbers", () => {
|
||||
// -1, -42 etc should NOT be treated as unknown flags
|
||||
const result = runCli(["-1"]);
|
||||
const out = output(result);
|
||||
// Should be treated as a positional arg, not as a flag
|
||||
expect(out).not.toContain("Unknown flag");
|
||||
});
|
||||
});
|
||||
|
||||
// ── --prompt interaction with subcommands ──────────────────────────────────
|
||||
|
||||
describe("--prompt interaction with subcommands", () => {
|
||||
it("should error when --prompt is used with no args at all", () => {
|
||||
const result = runCli(["--prompt", "Fix bugs"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("--prompt requires both");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should error when --prompt is used with 'list' subcommand", () => {
|
||||
// "spawn list --prompt 'text'" - list doesn't take a prompt
|
||||
// After extracting --prompt, filtered args become ["list"]
|
||||
// which dispatches to cmdList (no error about prompt, but --prompt value is ignored)
|
||||
const result = runCli(["list", "--prompt", "text"]);
|
||||
// cmdList will run since "list" is a subcommand and prompt is not passed to it
|
||||
// This should succeed (prompt is simply ignored for subcommands)
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should show agent info when --prompt used with single agent arg", () => {
|
||||
// "spawn claude --prompt 'text'" - only agent, no cloud
|
||||
const result = runCli(["claude", "--prompt", "Fix bugs"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("--prompt requires both");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ── Version flag edge cases ───────────────────────────────────────────────
|
||||
|
||||
describe("version flag edge cases", () => {
|
||||
it("should show version for 'version' as first arg regardless of other args", () => {
|
||||
const result = runCli(["version"]);
|
||||
const out = output(result);
|
||||
expect(out).toMatch(/spawn v\d+\.\d+\.\d+/);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should show version for --version flag", () => {
|
||||
const result = runCli(["--version"]);
|
||||
const out = output(result);
|
||||
expect(out).toMatch(/spawn v\d+\.\d+\.\d+/);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should show version and exit for -V flag", () => {
|
||||
const result = runCli(["-V"]);
|
||||
const out = output(result);
|
||||
expect(out).toMatch(/spawn v\d+\.\d+\.\d+/);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should handle 'version' command and ignore extra args", () => {
|
||||
// "spawn version extra" - immediateCommands[cmd] fires for "version"
|
||||
const result = runCli(["version"]);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ── Non-TTY behavior ──────────────────────────────────────────────────────
|
||||
|
||||
describe("non-TTY behavior", () => {
|
||||
it("should show non-TTY hint when no args in non-TTY (subprocess) mode", () => {
|
||||
// Subprocesses don't have TTY stdin, so isInteractiveTTY returns false
|
||||
const result = runCli([]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("Cannot run interactive picker");
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should include launch hint in non-TTY output", () => {
|
||||
const result = runCli([]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("spawn <agent> <cloud>");
|
||||
});
|
||||
|
||||
it("should include help hint in non-TTY output", () => {
|
||||
const result = runCli([]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("spawn help");
|
||||
});
|
||||
});
|
||||
|
||||
// ── Alias commands ────────────────────────────────────────────────────────
|
||||
|
||||
describe("command aliases", () => {
|
||||
it("should treat 'ls' as alias for 'list'", () => {
|
||||
const result = runCli(["ls"]);
|
||||
const out = output(result);
|
||||
// 'ls' should produce spawn history output
|
||||
expect(out).toMatch(/AGENT|No spawns recorded/);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should treat 'm' as alias for 'matrix'", () => {
|
||||
const result = runCli(["m"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("combinations implemented");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should show help for 'ls --help'", () => {
|
||||
const result = runCli(["ls", "--help"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("USAGE");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should show help for 'ls -h'", () => {
|
||||
const result = runCli(["ls", "-h"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("USAGE");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ── --prompt-file with real file ──────────────────────────────────────────
|
||||
|
||||
describe("--prompt-file with real files", () => {
|
||||
it("should error for non-existent prompt file", () => {
|
||||
const result = runCli([
|
||||
"claude",
|
||||
"sprite",
|
||||
"--prompt-file",
|
||||
"/tmp/spawn-nonexistent-test-file-12345.txt",
|
||||
]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("Prompt file not found");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should include filename in error message for missing file", () => {
|
||||
const result = runCli([
|
||||
"claude",
|
||||
"sprite",
|
||||
"--prompt-file",
|
||||
"/tmp/spawn-missing-file.txt",
|
||||
]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("spawn-missing-file.txt");
|
||||
});
|
||||
|
||||
it("should include hint about file existence in error", () => {
|
||||
const result = runCli([
|
||||
"claude",
|
||||
"sprite",
|
||||
"--prompt-file",
|
||||
"/tmp/spawn-missing-file.txt",
|
||||
]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("Check the path and try again");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ── Multiple agent/cloud resolution ───────────────────────────────────────
|
||||
|
||||
describe("agent and cloud display name resolution in cmdRun", () => {
|
||||
it("should resolve uppercase agent key and show resolution message", () => {
|
||||
const result = runCli(["CLAUDE", "sprite", "--dry-run"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("Resolved");
|
||||
expect(out).not.toContain("Unknown agent");
|
||||
});
|
||||
|
||||
it("should resolve uppercase cloud key and show resolution message", () => {
|
||||
const result = runCli(["claude", "SPRITE", "--dry-run"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("Resolved");
|
||||
expect(out).not.toContain("Unknown cloud");
|
||||
});
|
||||
|
||||
it("should resolve both uppercase agent and cloud", () => {
|
||||
const result = runCli(["CLAUDE", "SPRITE", "--dry-run"]);
|
||||
const out = output(result);
|
||||
// Both should be resolved
|
||||
expect(out).toContain("Resolved");
|
||||
expect(out).not.toContain("Unknown");
|
||||
});
|
||||
|
||||
it("should not show resolution for exact lowercase keys", () => {
|
||||
const result = runCli(["claude", "sprite", "--dry-run"]);
|
||||
const out = output(result);
|
||||
expect(out).not.toContain("Resolved");
|
||||
});
|
||||
});
|
||||
|
||||
// ── Subcommand list and agents output format ──────────────────────────────
|
||||
|
||||
describe("subcommand output format verification", () => {
|
||||
it("'agents' should list all agents in manifest", () => {
|
||||
const result = runCli(["agents"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("Agents");
|
||||
expect(out).toContain("claude");
|
||||
expect(out).toContain("codex");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("'clouds' should list all clouds in manifest", () => {
|
||||
const result = runCli(["clouds"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("Cloud Providers");
|
||||
expect(out).toContain("sprite");
|
||||
expect(out).toContain("hetzner");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("'list' should show spawn history", () => {
|
||||
const result = runCli(["list"]);
|
||||
const out = output(result);
|
||||
// list now shows spawn history (may be empty or have entries)
|
||||
expect(out).toMatch(/AGENT|No spawns recorded/);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("'matrix' should show the availability matrix", () => {
|
||||
const result = runCli(["matrix"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("combinations implemented");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("'matrix' should show usage hints at the bottom", () => {
|
||||
const result = runCli(["matrix"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("spawn <agent>");
|
||||
expect(out).toContain("spawn <cloud>");
|
||||
});
|
||||
});
|
||||
|
||||
// ── Fuzzy match edge cases ────────────────────────────────────────────────
|
||||
|
||||
describe("fuzzy matching edge cases in showInfoOrError", () => {
|
||||
it("should suggest close agent match for 2-char typo", () => {
|
||||
// "cloude" is distance 1 from "claude"
|
||||
const result = runCli(["cloude"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("Did you mean");
|
||||
expect(out).toContain("claude");
|
||||
});
|
||||
|
||||
it("should suggest close cloud match for 1-char typo", () => {
|
||||
// "hetzne" is distance 1 from "hetzner"
|
||||
const result = runCli(["hetzne"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("Did you mean");
|
||||
expect(out).toContain("hetzner");
|
||||
});
|
||||
|
||||
it("should not suggest for string with distance > 3", () => {
|
||||
// "abcdefgh" is far from any agent/cloud
|
||||
const result = runCli(["abcdefgh"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("Unknown agent or cloud");
|
||||
expect(out).not.toContain("Did you mean");
|
||||
});
|
||||
|
||||
it("should show both agent and cloud suggestions when both match", () => {
|
||||
// Need a string close to both an agent and a cloud name
|
||||
// "sprit" is close to "sprite" (cloud, distance 1)
|
||||
const result = runCli(["sprit"]);
|
||||
const out = output(result);
|
||||
// Should suggest sprite as a cloud
|
||||
expect(out).toContain("sprite");
|
||||
expect(out).toContain("(cloud:");
|
||||
});
|
||||
});
|
||||
|
||||
// ── SPAWN_NO_UNICODE env var ──────────────────────────────────────────────
|
||||
|
||||
describe("SPAWN_NO_UNICODE environment variable", () => {
|
||||
it("should work normally with SPAWN_NO_UNICODE=1", () => {
|
||||
const result = runCli(["help"], { SPAWN_NO_UNICODE: "1" });
|
||||
const out = output(result);
|
||||
expect(out).toContain("USAGE");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should work normally with SPAWN_ASCII=1", () => {
|
||||
const result = runCli(["version"], { SPAWN_ASCII: "1" });
|
||||
const out = output(result);
|
||||
expect(out).toMatch(/spawn v\d+\.\d+\.\d+/);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ── SPAWN_NO_UPDATE_CHECK env var ─────────────────────────────────────────
|
||||
|
||||
describe("SPAWN_NO_UPDATE_CHECK behavior", () => {
|
||||
it("should skip update check and run command immediately", () => {
|
||||
const start = Date.now();
|
||||
const result = runCli(["version"], { SPAWN_NO_UPDATE_CHECK: "1" });
|
||||
const elapsed = Date.now() - start;
|
||||
expect(output(result)).toMatch(/spawn v\d+\.\d+\.\d+/);
|
||||
expect(result.exitCode).toBe(0);
|
||||
// With update check skipped, should be fast (< 10s)
|
||||
expect(elapsed).toBeLessThan(10000);
|
||||
});
|
||||
});
|
||||
|
||||
// ── Extra positional argument warnings ──────────────────────────────────
|
||||
|
||||
describe("extra positional argument warnings", () => {
|
||||
it("should warn when 3 positional args given (agent cloud extra)", () => {
|
||||
const result = runCli(["claude", "sprite", "hetzner", "--dry-run"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("Extra argument ignored");
|
||||
expect(out).toContain("hetzner");
|
||||
expect(out).toContain("Usage:");
|
||||
});
|
||||
|
||||
it("should warn about multiple extra args", () => {
|
||||
const result = runCli(["claude", "sprite", "foo", "bar", "--dry-run"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("Extra arguments ignored");
|
||||
expect(out).toContain("foo");
|
||||
expect(out).toContain("bar");
|
||||
});
|
||||
|
||||
it("should still work for subcommands with extra args (warning on stderr)", () => {
|
||||
// "spawn matrix extra" runs successfully - the warning goes to stderr
|
||||
// which isn't captured by execSync on success, but the command should still work
|
||||
const result = runCli(["matrix", "extra"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("combinations implemented");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should still work for version with extra args (warning on stderr)", () => {
|
||||
// "spawn version extra" runs successfully - the warning goes to stderr
|
||||
const result = runCli(["version", "extra"]);
|
||||
const out = output(result);
|
||||
expect(out).toMatch(/spawn v\d+\.\d+\.\d+/);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should NOT warn when exactly 2 positional args given", () => {
|
||||
const result = runCli(["claude", "sprite", "--dry-run"]);
|
||||
const out = output(result);
|
||||
expect(out).not.toContain("extra argument");
|
||||
});
|
||||
|
||||
it("should NOT warn when exactly 1 positional arg given", () => {
|
||||
const result = runCli(["claude"]);
|
||||
const out = output(result);
|
||||
expect(out).not.toContain("extra argument");
|
||||
});
|
||||
});
|
||||
|
||||
// ── Mismatched argument type errors ─────────────────────────────────────
|
||||
|
||||
describe("mismatched argument type detection", () => {
|
||||
it("should detect two agents passed as agent+cloud", () => {
|
||||
const result = runCli(["claude", "codex"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("is an agent");
|
||||
expect(out).toContain("spawn <agent> <cloud>");
|
||||
});
|
||||
|
||||
it("should detect two clouds passed as agent+cloud", () => {
|
||||
const result = runCli(["hetzner", "sprite"]);
|
||||
const out = output(result);
|
||||
// The swap detection won't fire (sprite is not an agent), so validateAgent
|
||||
// catches that hetzner is a cloud
|
||||
expect(out).toContain("is a cloud provider");
|
||||
expect(out).toContain("spawn <agent> <cloud>");
|
||||
});
|
||||
});
|
||||
|
|
@ -1,528 +0,0 @@
|
|||
import { describe, it, expect, beforeEach, afterEach, spyOn } from "bun:test";
|
||||
import { resolve, join } from "path";
|
||||
|
||||
/**
|
||||
* Tests for CLI version output and dispatch routing via subprocess execution.
|
||||
*
|
||||
* These tests exercise the ACTUAL index.ts entry point by running it as a
|
||||
* subprocess, verifying the real behavior users see when they run spawn commands.
|
||||
* This catches integration issues that unit tests with mocked modules miss:
|
||||
*
|
||||
* - showVersion: output format, runtime info (bun/node, platform, arch)
|
||||
* - Version flags: --version, -v, -V, and "version" subcommand
|
||||
* - Help flags: --help, -h, and "help" subcommand
|
||||
* - handleNoCommand: --dry-run and --prompt without agent/cloud
|
||||
* - Subcommand aliases: "m" for "matrix", "ls"/"history" for "list"
|
||||
* - Verb alias routing: "run", "launch", "start", "deploy", "exec"
|
||||
* - Unknown flag error messaging
|
||||
* - Extra args warning
|
||||
* - showInfoOrError: unknown command with fuzzy suggestions
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const CLI_PATH = resolve(import.meta.dir, "../../src/index.ts");
|
||||
const REPO_ROOT = resolve(import.meta.dir, "../../..");
|
||||
|
||||
/**
|
||||
* Run the CLI with given args as a subprocess.
|
||||
* Sets SPAWN_NO_UPDATE_CHECK to skip auto-update and BUN_ENV=test to skip
|
||||
* local manifest loading. Returns { stdout, stderr, exitCode }.
|
||||
*/
|
||||
function runCLI(
|
||||
args: string[],
|
||||
env?: Record<string, string>,
|
||||
): { stdout: string; stderr: string; exitCode: number } {
|
||||
const { spawnSync } = require("child_process");
|
||||
const result = spawnSync("bun", ["run", CLI_PATH, ...args], {
|
||||
cwd: REPO_ROOT,
|
||||
encoding: "utf-8",
|
||||
timeout: 15000,
|
||||
env: {
|
||||
...process.env,
|
||||
// Ensure bun is in PATH for child processes
|
||||
PATH: `${process.env.HOME}/.bun/bin:${process.env.PATH}`,
|
||||
SPAWN_NO_UPDATE_CHECK: "1",
|
||||
BUN_ENV: "test",
|
||||
// Avoid terminal-dependent output
|
||||
TERM: "dumb",
|
||||
SPAWN_NO_UNICODE: "1",
|
||||
// Ensure no color codes in output for easier assertion
|
||||
NO_COLOR: "1",
|
||||
...env,
|
||||
},
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
});
|
||||
return {
|
||||
stdout: (result.stdout || "").toString(),
|
||||
stderr: (result.stderr || "").toString(),
|
||||
exitCode: result.status ?? 1,
|
||||
};
|
||||
}
|
||||
|
||||
// ── showVersion output ──────────────────────────────────────────────────────
|
||||
|
||||
describe("showVersion via CLI subprocess", () => {
|
||||
it("should show version string with 'spawn v' prefix", () => {
|
||||
const { stdout, exitCode } = runCLI(["version"]);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toMatch(/spawn v\d+\.\d+\.\d+/);
|
||||
});
|
||||
|
||||
it("should show bun runtime info", () => {
|
||||
const { stdout, exitCode } = runCLI(["version"]);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("bun");
|
||||
});
|
||||
|
||||
it("should show platform info", () => {
|
||||
const { stdout, exitCode } = runCLI(["version"]);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain(process.platform);
|
||||
});
|
||||
|
||||
it("should show arch info", () => {
|
||||
const { stdout, exitCode } = runCLI(["version"]);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain(process.arch);
|
||||
});
|
||||
|
||||
it("should suggest 'spawn update' command", () => {
|
||||
const { stdout, exitCode } = runCLI(["version"]);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("spawn update");
|
||||
});
|
||||
|
||||
it("should show binary path", () => {
|
||||
const { stdout, exitCode } = runCLI(["version"]);
|
||||
expect(exitCode).toBe(0);
|
||||
// The binary path should contain the path to index.ts
|
||||
expect(stdout).toContain("index.ts");
|
||||
});
|
||||
});
|
||||
|
||||
// ── Version flag aliases ────────────────────────────────────────────────────
|
||||
|
||||
describe("version flag aliases", () => {
|
||||
it("--version should produce same version line as 'version'", () => {
|
||||
const versionResult = runCLI(["version"]);
|
||||
const flagResult = runCLI(["--version"]);
|
||||
expect(flagResult.exitCode).toBe(0);
|
||||
// Both should contain the version string
|
||||
const versionMatch = versionResult.stdout.match(/spawn v[\d.]+/);
|
||||
const flagMatch = flagResult.stdout.match(/spawn v[\d.]+/);
|
||||
expect(versionMatch).not.toBeNull();
|
||||
expect(flagMatch).not.toBeNull();
|
||||
expect(versionMatch![0]).toBe(flagMatch![0]);
|
||||
});
|
||||
|
||||
it("-v should produce same version line as 'version'", () => {
|
||||
const { stdout, exitCode } = runCLI(["-v"]);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toMatch(/spawn v\d+\.\d+\.\d+/);
|
||||
});
|
||||
|
||||
it("-V should produce same version line as 'version'", () => {
|
||||
const { stdout, exitCode } = runCLI(["-V"]);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toMatch(/spawn v\d+\.\d+\.\d+/);
|
||||
});
|
||||
});
|
||||
|
||||
// ── Help flags ──────────────────────────────────────────────────────────────
|
||||
|
||||
describe("help command and flags", () => {
|
||||
it("'help' should show USAGE section", () => {
|
||||
const { stdout, exitCode } = runCLI(["help"]);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("USAGE");
|
||||
});
|
||||
|
||||
it("--help should show USAGE section", () => {
|
||||
const { stdout, exitCode } = runCLI(["--help"]);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("USAGE");
|
||||
});
|
||||
|
||||
it("-h should show USAGE section", () => {
|
||||
const { stdout, exitCode } = runCLI(["-h"]);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("USAGE");
|
||||
});
|
||||
|
||||
it("help should include EXAMPLES section", () => {
|
||||
const { stdout } = runCLI(["help"]);
|
||||
expect(stdout).toContain("EXAMPLES");
|
||||
});
|
||||
|
||||
it("help should include AUTHENTICATION section", () => {
|
||||
const { stdout } = runCLI(["help"]);
|
||||
expect(stdout).toContain("AUTHENTICATION");
|
||||
});
|
||||
|
||||
it("help should include ENVIRONMENT VARIABLES section", () => {
|
||||
const { stdout } = runCLI(["help"]);
|
||||
expect(stdout).toContain("ENVIRONMENT VARIABLES");
|
||||
});
|
||||
|
||||
it("help should include TROUBLESHOOTING section", () => {
|
||||
const { stdout } = runCLI(["help"]);
|
||||
expect(stdout).toContain("TROUBLESHOOTING");
|
||||
});
|
||||
|
||||
it("help should mention --dry-run flag", () => {
|
||||
const { stdout } = runCLI(["help"]);
|
||||
expect(stdout).toContain("--dry-run");
|
||||
});
|
||||
|
||||
it("help should mention --prompt-file flag", () => {
|
||||
const { stdout } = runCLI(["help"]);
|
||||
expect(stdout).toContain("--prompt-file");
|
||||
});
|
||||
|
||||
it("help should mention list aliases (ls, history)", () => {
|
||||
const { stdout } = runCLI(["help"]);
|
||||
expect(stdout).toContain("ls");
|
||||
expect(stdout).toContain("history");
|
||||
});
|
||||
|
||||
it("help should mention matrix alias (m)", () => {
|
||||
const { stdout } = runCLI(["help"]);
|
||||
expect(stdout).toContain("matrix");
|
||||
});
|
||||
});
|
||||
|
||||
// ── Trailing help flag on subcommands ───────────────────────────────────────
|
||||
|
||||
describe("trailing help flag on subcommands", () => {
|
||||
it("'agents --help' should show help, not agents list", () => {
|
||||
const { stdout, exitCode } = runCLI(["agents", "--help"]);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("USAGE");
|
||||
});
|
||||
|
||||
it("'clouds -h' should show help", () => {
|
||||
const { stdout, exitCode } = runCLI(["clouds", "-h"]);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("USAGE");
|
||||
});
|
||||
|
||||
it("'matrix --help' should show help", () => {
|
||||
const { stdout, exitCode } = runCLI(["matrix", "--help"]);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("USAGE");
|
||||
});
|
||||
|
||||
it("'list --help' should show help", () => {
|
||||
const { stdout, exitCode } = runCLI(["list", "--help"]);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("USAGE");
|
||||
});
|
||||
|
||||
it("'update --help' should show help", () => {
|
||||
const { stdout, exitCode } = runCLI(["update", "--help"]);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("USAGE");
|
||||
});
|
||||
});
|
||||
|
||||
// ── handleNoCommand: --dry-run and --prompt without agent/cloud ─────────────
|
||||
|
||||
describe("handleNoCommand error paths", () => {
|
||||
it("--dry-run without agent/cloud should error", () => {
|
||||
const { stderr, exitCode } = runCLI(["--dry-run"]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("--dry-run requires both");
|
||||
});
|
||||
|
||||
it("-n without agent/cloud should error", () => {
|
||||
const { stderr, exitCode } = runCLI(["-n"]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("--dry-run requires both");
|
||||
});
|
||||
|
||||
it("--prompt without agent/cloud should error", () => {
|
||||
const { stderr, exitCode } = runCLI(["--prompt", "hello"]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("--prompt requires both");
|
||||
});
|
||||
|
||||
it("--prompt-file with nonexistent file should error with file-not-found", () => {
|
||||
const { stderr, exitCode } = runCLI(["--prompt-file", "/tmp/nonexistent-spawn-test"]);
|
||||
expect(exitCode).toBe(1);
|
||||
// The file read error occurs before the no-agent/cloud check
|
||||
expect(stderr).toContain("not found");
|
||||
});
|
||||
});
|
||||
|
||||
// ── --dry-run with only agent (no cloud) ────────────────────────────────────
|
||||
|
||||
describe("--dry-run with only agent", () => {
|
||||
it("should error when --dry-run is used with agent only", () => {
|
||||
const { stderr, exitCode } = runCLI(["claude", "--dry-run"]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("--dry-run requires both");
|
||||
});
|
||||
});
|
||||
|
||||
// ── --prompt with only agent (no cloud) ─────────────────────────────────────
|
||||
|
||||
describe("--prompt with only agent (no cloud)", () => {
|
||||
it("should error when --prompt is used with agent only", () => {
|
||||
const { stderr, exitCode } = runCLI(["claude", "--prompt", "hello"]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("--prompt requires both");
|
||||
});
|
||||
|
||||
it("should suggest available clouds for the agent", () => {
|
||||
const { stderr, exitCode } = runCLI(["claude", "--prompt", "hello"]);
|
||||
expect(exitCode).toBe(1);
|
||||
// Should suggest cloud options
|
||||
expect(stderr).toContain("spawn claude");
|
||||
});
|
||||
});
|
||||
|
||||
// ── Unknown flag detection ──────────────────────────────────────────────────
|
||||
|
||||
describe("unknown flag detection", () => {
|
||||
it("should error on --unknown flag", () => {
|
||||
const { stderr, exitCode } = runCLI(["--unknown"]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("Unknown flag");
|
||||
expect(stderr).toContain("--unknown");
|
||||
});
|
||||
|
||||
it("should show supported flags in error message", () => {
|
||||
const { stderr, exitCode } = runCLI(["--xyz"]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("Supported flags");
|
||||
expect(stderr).toContain("--prompt");
|
||||
expect(stderr).toContain("--dry-run");
|
||||
expect(stderr).toContain("--help");
|
||||
expect(stderr).toContain("--version");
|
||||
});
|
||||
|
||||
it("should suggest 'spawn help' when unknown flag is used", () => {
|
||||
const { stderr, exitCode } = runCLI(["--foo"]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("spawn help");
|
||||
});
|
||||
|
||||
it("should not treat -1 as a flag (numeric prefix)", () => {
|
||||
// -1 starts with - but matches /^-\d/, so it should not be caught as unknown flag
|
||||
// It will fail for other reasons (not a valid agent) but not as "unknown flag"
|
||||
const { stderr, exitCode } = runCLI(["-1"]);
|
||||
expect(stderr).not.toContain("Unknown flag");
|
||||
});
|
||||
|
||||
it("should treat --prompt-files (typo) as unknown flag", () => {
|
||||
const { stderr, exitCode } = runCLI(["--prompt-files", "test.txt"]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("Unknown flag");
|
||||
expect(stderr).toContain("--prompt-files");
|
||||
});
|
||||
});
|
||||
|
||||
// ── Flag value requirements ─────────────────────────────────────────────────
|
||||
|
||||
describe("flag value requirements", () => {
|
||||
it("--prompt without value should error", () => {
|
||||
const { stderr, exitCode } = runCLI(["claude", "sprite", "--prompt"]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("--prompt");
|
||||
expect(stderr).toContain("requires a value");
|
||||
});
|
||||
|
||||
it("-p without value should error", () => {
|
||||
const { stderr, exitCode } = runCLI(["claude", "sprite", "-p"]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("-p");
|
||||
expect(stderr).toContain("requires a value");
|
||||
});
|
||||
|
||||
it("--prompt-file without value should error", () => {
|
||||
const { stderr, exitCode } = runCLI(["claude", "sprite", "--prompt-file"]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("--prompt-file");
|
||||
expect(stderr).toContain("requires a value");
|
||||
});
|
||||
|
||||
it("-f without value should error", () => {
|
||||
const { stderr, exitCode } = runCLI(["claude", "sprite", "-f"]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("-f");
|
||||
expect(stderr).toContain("requires a value");
|
||||
});
|
||||
|
||||
it("--prompt and --prompt-file together should error", () => {
|
||||
const { stderr, exitCode } = runCLI([
|
||||
"claude", "sprite",
|
||||
"--prompt", "hello",
|
||||
"--prompt-file", "/tmp/test.txt",
|
||||
]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("cannot be used together");
|
||||
});
|
||||
});
|
||||
|
||||
// ── Verb alias routing ──────────────────────────────────────────────────────
|
||||
|
||||
describe("verb alias routing", () => {
|
||||
it("'run' without args should error with usage hint", () => {
|
||||
const { stderr, exitCode } = runCLI(["run"]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("requires an agent and cloud");
|
||||
});
|
||||
|
||||
it("'launch' without args should error with usage hint", () => {
|
||||
const { stderr, exitCode } = runCLI(["launch"]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("requires an agent and cloud");
|
||||
});
|
||||
|
||||
it("'start' without args should error with usage hint", () => {
|
||||
const { stderr, exitCode } = runCLI(["start"]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("requires an agent and cloud");
|
||||
});
|
||||
|
||||
it("'deploy' without args should error with usage hint", () => {
|
||||
const { stderr, exitCode } = runCLI(["deploy"]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("requires an agent and cloud");
|
||||
});
|
||||
|
||||
it("'exec' without args should error with usage hint", () => {
|
||||
const { stderr, exitCode } = runCLI(["exec"]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("requires an agent and cloud");
|
||||
});
|
||||
|
||||
it("verb alias error should mention it's optional", () => {
|
||||
const { stderr } = runCLI(["run"]);
|
||||
expect(stderr).toContain("optional");
|
||||
expect(stderr).toContain("spawn <agent> <cloud>");
|
||||
});
|
||||
});
|
||||
|
||||
// ── Extra args warning ──────────────────────────────────────────────────────
|
||||
|
||||
describe("extra arguments warning", () => {
|
||||
it("should warn about extra args after version command", () => {
|
||||
const { stderr, stdout, exitCode } = runCLI(["version", "extra"]);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stderr.toLowerCase()).toContain("extra argument");
|
||||
expect(stderr).toContain("ignored");
|
||||
// Should still show version
|
||||
expect(stdout).toMatch(/spawn v\d+\.\d+/);
|
||||
});
|
||||
|
||||
it("should warn about multiple extra args", () => {
|
||||
const { stderr, exitCode } = runCLI(["version", "a", "b", "c"]);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stderr.toLowerCase()).toContain("extra arguments");
|
||||
expect(stderr).toContain("ignored");
|
||||
});
|
||||
|
||||
it("should not warn when no extra args", () => {
|
||||
const { stderr } = runCLI(["version"]);
|
||||
expect(stderr.toLowerCase()).not.toContain("extra argument");
|
||||
});
|
||||
});
|
||||
|
||||
// ── Prompt file errors ──────────────────────────────────────────────────────
|
||||
|
||||
describe("prompt file error handling", () => {
|
||||
it("should show file-not-found error for nonexistent prompt file", () => {
|
||||
const { stderr, exitCode } = runCLI([
|
||||
"claude", "sprite",
|
||||
"--prompt-file", "/tmp/spawn-test-nonexistent-file-xyz123",
|
||||
]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("not found");
|
||||
});
|
||||
|
||||
it("should show directory error when prompt-file is a directory", () => {
|
||||
const { stderr, exitCode } = runCLI([
|
||||
"claude", "sprite",
|
||||
"--prompt-file", "/tmp",
|
||||
]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("directory");
|
||||
});
|
||||
});
|
||||
|
||||
// ── Non-interactive terminal without command ────────────────────────────────
|
||||
|
||||
describe("non-interactive terminal handling", () => {
|
||||
it("should show usage hint when no args and no TTY", () => {
|
||||
// Running as subprocess inherently lacks a TTY for stdin
|
||||
const { stderr, exitCode } = runCLI([]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("Cannot run interactive picker: not a terminal");
|
||||
expect(stderr).toContain("spawn <agent> <cloud>");
|
||||
expect(stderr).toContain("spawn agents");
|
||||
expect(stderr).toContain("spawn clouds");
|
||||
expect(stderr).toContain("spawn help");
|
||||
});
|
||||
});
|
||||
|
||||
// ── Subcommand alias routing ────────────────────────────────────────────────
|
||||
|
||||
describe("subcommand alias routing", () => {
|
||||
it("'m' should work as alias for 'matrix'", () => {
|
||||
const { stdout, exitCode } = runCLI(["m"]);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("Availability Matrix");
|
||||
});
|
||||
|
||||
it("'agents' should list agents", () => {
|
||||
const { stdout, exitCode } = runCLI(["agents"]);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("Agents");
|
||||
});
|
||||
|
||||
it("'clouds' should list clouds", () => {
|
||||
const { stdout, exitCode } = runCLI(["clouds"]);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("Cloud Providers");
|
||||
});
|
||||
});
|
||||
|
||||
// ── List command aliases ────────────────────────────────────────────────────
|
||||
|
||||
describe("list command aliases", () => {
|
||||
it("'list' should not crash with empty history", () => {
|
||||
const { homedir } = require("os");
|
||||
const { exitCode } = runCLI(["list"], { SPAWN_HOME: join(homedir(), ".spawn-test-empty-home-" + Date.now()) });
|
||||
// May exit 0 (shows "no spawns") or run interactive picker in non-TTY
|
||||
// The important thing is it doesn't crash
|
||||
expect(exitCode).toBeDefined();
|
||||
});
|
||||
|
||||
it("'ls' should work as alias for 'list'", () => {
|
||||
const { homedir } = require("os");
|
||||
const { exitCode } = runCLI(["ls"], { SPAWN_HOME: join(homedir(), ".spawn-test-empty-home-" + Date.now()) });
|
||||
expect(exitCode).toBeDefined();
|
||||
});
|
||||
|
||||
it("'history' should work as alias for 'list'", () => {
|
||||
const { homedir } = require("os");
|
||||
const { exitCode } = runCLI(["history"], { SPAWN_HOME: join(homedir(), ".spawn-test-empty-home-" + Date.now()) });
|
||||
expect(exitCode).toBeDefined();
|
||||
});
|
||||
|
||||
it("'list -a' without value should error", () => {
|
||||
const { stderr, exitCode } = runCLI(["list", "-a"]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("-a");
|
||||
expect(stderr).toContain("requires");
|
||||
});
|
||||
|
||||
it("'list -c' without value should error", () => {
|
||||
const { stderr, exitCode } = runCLI(["list", "-c"]);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("-c");
|
||||
expect(stderr).toContain("requires");
|
||||
});
|
||||
});
|
||||
|
|
@ -1,291 +0,0 @@
|
|||
import { describe, it, expect } from "bun:test";
|
||||
import { execSync } from "child_process";
|
||||
import { resolve } from "path";
|
||||
|
||||
/**
|
||||
* Tests for cmdRun argument resolution paths:
|
||||
* - Display name resolution ("Claude Code" -> "claude")
|
||||
* - Case-insensitive key resolution ("Claude" -> "claude")
|
||||
* - Argument swapping detection (cloud/agent -> agent/cloud)
|
||||
* - showInfoOrError display name resolution ("Hetzner Cloud" -> cloud info)
|
||||
*
|
||||
* These paths in commands.ts cmdRun() (lines 252-304) and index.ts
|
||||
* showInfoOrError() (lines 87-128) have zero E2E test coverage.
|
||||
*
|
||||
* Uses subprocess approach since cmdRun calls process.exit on errors.
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const CLI_DIR = resolve(import.meta.dir, "../..");
|
||||
const PROJECT_ROOT = resolve(CLI_DIR, "..");
|
||||
|
||||
function runCli(
|
||||
args: string[],
|
||||
env: Record<string, string> = {}
|
||||
): { stdout: string; stderr: string; exitCode: number } {
|
||||
const quotedArgs = args.map(a => `'${a.replace(/'/g, "'\\''")}'`).join(" ");
|
||||
const cmd = `bun run ${CLI_DIR}/src/index.ts ${quotedArgs}`;
|
||||
try {
|
||||
const stdout = execSync(cmd, {
|
||||
cwd: PROJECT_ROOT,
|
||||
env: {
|
||||
PATH: `${process.env.HOME}/.bun/bin:${process.env.PATH}`,
|
||||
HOME: process.env.HOME,
|
||||
SHELL: process.env.SHELL,
|
||||
TERM: process.env.TERM || "xterm",
|
||||
// Prevent OAuth browser from opening during tests — if OPENROUTER_API_KEY
|
||||
// is set, get_or_prompt_api_key() skips the entire OAuth flow.
|
||||
OPENROUTER_API_KEY: "sk-or-test-fake",
|
||||
...env,
|
||||
SPAWN_NO_UPDATE_CHECK: "1",
|
||||
NODE_ENV: "",
|
||||
BUN_ENV: "",
|
||||
},
|
||||
encoding: "utf-8",
|
||||
timeout: 15000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
});
|
||||
return { stdout, stderr: "", exitCode: 0 };
|
||||
} catch (err: any) {
|
||||
return {
|
||||
stdout: err.stdout || "",
|
||||
stderr: err.stderr || "",
|
||||
exitCode: err.status ?? 1,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// ── cmdRun: argument swapping detection ───────────────────────────────────
|
||||
|
||||
describe("cmdRun argument swapping", () => {
|
||||
it("should detect swapped cloud/agent and show swap warning", () => {
|
||||
// "spawn sprite claude" should be detected as swapped -> "spawn claude sprite"
|
||||
// cmdRun will swap and try to launch, which will fail at download (no network)
|
||||
// but the swap message should appear in output
|
||||
const result = runCli(["sprite", "claude", "--dry-run"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("swapped");
|
||||
});
|
||||
|
||||
it("should show corrected command after swap detection", () => {
|
||||
const result = runCli(["sprite", "claude", "--dry-run"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("spawn claude sprite");
|
||||
});
|
||||
|
||||
it("should swap hetzner/codex to codex/hetzner", () => {
|
||||
const result = runCli(["hetzner", "codex", "--dry-run"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("swapped");
|
||||
});
|
||||
|
||||
it("should not swap when arguments are in correct order", () => {
|
||||
// "spawn claude sprite" is correct order - no swap message
|
||||
const result = runCli(["claude", "sprite", "--dry-run"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).not.toContain("swapped");
|
||||
});
|
||||
|
||||
it("should not swap when both args are unknown", () => {
|
||||
const result = runCli(["fakething", "otherfake"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).not.toContain("swapped");
|
||||
});
|
||||
});
|
||||
|
||||
// ── cmdRun: display name resolution ───────────────────────────────────────
|
||||
|
||||
describe("cmdRun display name resolution", () => {
|
||||
it("should resolve case-insensitive agent key", () => {
|
||||
// "Claude" should resolve to "claude"
|
||||
const result = runCli(["Claude", "sprite", "--dry-run"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
// Should resolve and proceed (may show "Resolved" message)
|
||||
// Should NOT show "Unknown agent" error
|
||||
expect(output).not.toContain("Unknown agent");
|
||||
});
|
||||
|
||||
it("should resolve case-insensitive cloud key", () => {
|
||||
// "Sprite" should resolve to "sprite"
|
||||
const result = runCli(["claude", "Sprite", "--dry-run"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).not.toContain("Unknown cloud");
|
||||
});
|
||||
|
||||
it("should show resolution message when name is resolved", () => {
|
||||
// "CLAUDE" -> "claude" should trigger "Resolved" message
|
||||
const result = runCli(["CLAUDE", "sprite", "--dry-run"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Resolved");
|
||||
expect(output).toContain("claude");
|
||||
});
|
||||
|
||||
it("should resolve agent display name to key", () => {
|
||||
// "Claude Code" is the display name for agent key "claude"
|
||||
const result = runCli(["Claude Code", "sprite", "--dry-run"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Resolved");
|
||||
expect(output).toContain("claude");
|
||||
});
|
||||
|
||||
it("should resolve cloud display name to key", () => {
|
||||
// "Hetzner Cloud" is the display name for cloud key "hetzner"
|
||||
const result = runCli(["claude", "Hetzner Cloud", "--dry-run"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Resolved");
|
||||
expect(output).toContain("hetzner");
|
||||
});
|
||||
|
||||
it("should not show resolution message for exact key match", () => {
|
||||
// "claude" is already the exact key - no resolution needed
|
||||
const result = runCli(["claude", "sprite", "--dry-run"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).not.toContain("Resolved");
|
||||
});
|
||||
|
||||
it("should show unknown agent error for truly invalid agent", () => {
|
||||
const result = runCli(["notarealagent", "sprite"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Unknown agent");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should show unknown cloud error for truly invalid cloud", () => {
|
||||
const result = runCli(["claude", "notarealcloud"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Unknown cloud");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ── showInfoOrError: display name resolution ──────────────────────────────
|
||||
|
||||
describe("showInfoOrError display name resolution", () => {
|
||||
it("should resolve agent display name to agent info", () => {
|
||||
// "Claude Code" -> resolves to "claude" via resolveAgentKey -> shows agent info
|
||||
const result = runCli(["Claude Code"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Available clouds");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should resolve cloud display name to cloud info", () => {
|
||||
// "Hetzner Cloud" -> resolves to "hetzner" via resolveCloudKey -> shows cloud info
|
||||
const result = runCli(["Hetzner Cloud"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Available agents");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should resolve case-insensitive agent display name", () => {
|
||||
// "claude code" (lowercase) -> resolves to agent info
|
||||
const result = runCli(["claude code"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Available clouds");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should resolve case-insensitive cloud display name", () => {
|
||||
// "hetzner cloud" (lowercase) -> resolves to cloud info
|
||||
const result = runCli(["hetzner cloud"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Available agents");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should resolve uppercase agent key", () => {
|
||||
// "CLAUDE" -> resolves to "claude" key
|
||||
const result = runCli(["CLAUDE"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Available clouds");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should resolve uppercase cloud key", () => {
|
||||
// "HETZNER" -> resolves to "hetzner" key
|
||||
const result = runCli(["HETZNER"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Available agents");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should resolve mixed case agent key", () => {
|
||||
const result = runCli(["Codex"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Available clouds");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ── cmdRun: "did you mean" suggestions ────────────────────────────────────
|
||||
|
||||
describe("cmdRun did-you-mean suggestions", () => {
|
||||
it("should suggest closest agent match for typo", () => {
|
||||
// "claud" is close to "claude" (distance 1)
|
||||
const result = runCli(["claud", "sprite"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Did you mean");
|
||||
expect(output).toContain("claude");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should suggest closest cloud match for typo", () => {
|
||||
// "sprte" is close to "sprite" (distance 1)
|
||||
const result = runCli(["claude", "sprte"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Did you mean");
|
||||
expect(output).toContain("sprite");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should not suggest anything for completely different agent", () => {
|
||||
const result = runCli(["kubernetes", "sprite"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Unknown agent");
|
||||
expect(output).not.toContain("Did you mean");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should show spawn agents hint for unknown agent", () => {
|
||||
const result = runCli(["notreal", "sprite"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("spawn agents");
|
||||
});
|
||||
|
||||
it("should show spawn clouds hint for unknown cloud", () => {
|
||||
const result = runCli(["claude", "notreal"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("spawn clouds");
|
||||
});
|
||||
});
|
||||
|
||||
// ── validateImplementation: not-implemented error paths ───────────────────
|
||||
|
||||
describe("cmdRun not-implemented error", () => {
|
||||
it("should show not implemented error for missing matrix entry", () => {
|
||||
// Find a known missing combination from the manifest
|
||||
// We check a combination that exists in the manifest as "missing"
|
||||
// This tests validateImplementation's error messaging
|
||||
const result = runCli(["claude", "cherry-servers"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
// Should either succeed (if implemented) or show useful error
|
||||
// The key thing is it doesn't crash
|
||||
if (result.exitCode !== 0) {
|
||||
// If not implemented, should show helpful alternatives
|
||||
expect(output.length).toBeGreaterThan(0);
|
||||
}
|
||||
});
|
||||
|
||||
it("should suggest alternative clouds when agent is not on specified cloud", () => {
|
||||
// We need a cloud that exists but doesn't have all agents
|
||||
// Test the "available on N clouds" message path
|
||||
// Using a known agent with a cloud that may not have it
|
||||
const result = runCli(["claude", "cherry-servers"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
if (output.includes("not yet implemented")) {
|
||||
// Should suggest alternative clouds
|
||||
expect(output).toMatch(/available on|Try one of these/);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
|
@ -1,290 +0,0 @@
|
|||
import { describe, it, expect, beforeEach, afterEach, mock, spyOn } from "bun:test";
|
||||
import { execSync } from "child_process";
|
||||
import { resolve } from "path";
|
||||
|
||||
/**
|
||||
* Tests for index.ts main() routing, handleError, and isInteractiveTTY.
|
||||
*
|
||||
* These functions have zero direct test coverage:
|
||||
* - handleError: formats errors and exits with code 1
|
||||
* - isInteractiveTTY: checks stdin/stdout TTY status
|
||||
* - main() routing: the actual switch statement that dispatches commands
|
||||
*
|
||||
* Since index.ts calls process.exit and has module-level side effects,
|
||||
* we test it by spawning bun subprocesses with controlled environments
|
||||
* (same approach as unicode-detect.test.ts).
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const CLI_DIR = resolve(import.meta.dir, "../..");
|
||||
|
||||
// Helper: run the CLI with given args and return { stdout, stderr, exitCode }
|
||||
function runCli(
|
||||
args: string[],
|
||||
env: Record<string, string> = {}
|
||||
): { stdout: string; stderr: string; exitCode: number } {
|
||||
const cmd = `bun run src/index.ts ${args.join(" ")}`;
|
||||
try {
|
||||
const stdout = execSync(cmd, {
|
||||
cwd: CLI_DIR,
|
||||
env: {
|
||||
...process.env,
|
||||
...env,
|
||||
// Ensure bun is in PATH for child processes
|
||||
PATH: `${process.env.HOME}/.bun/bin:${process.env.PATH}`,
|
||||
// Prevent auto-update from running during tests
|
||||
SPAWN_NO_UPDATE_CHECK: "1",
|
||||
// Prevent local manifest.json from being used
|
||||
NODE_ENV: "test",
|
||||
BUN_ENV: "test",
|
||||
// Prevent ANSI color codes in output (CI sets FORCE_COLOR/CI vars)
|
||||
NO_COLOR: "1",
|
||||
},
|
||||
encoding: "utf-8",
|
||||
timeout: 15000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
});
|
||||
return { stdout, stderr: "", exitCode: 0 };
|
||||
} catch (err: any) {
|
||||
return {
|
||||
stdout: err.stdout || "",
|
||||
stderr: err.stderr || "",
|
||||
exitCode: err.status ?? 1,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
describe("index.ts main() routing", () => {
|
||||
// ── help command routing ──────────────────────────────────────────────
|
||||
|
||||
describe("help command", () => {
|
||||
it("should show help with 'help' command", () => {
|
||||
const result = runCli(["help"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("USAGE");
|
||||
expect(output).toContain("spawn");
|
||||
});
|
||||
|
||||
it("should show help with '--help' flag", () => {
|
||||
const result = runCli(["--help"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("USAGE");
|
||||
});
|
||||
|
||||
it("should show help with '-h' flag", () => {
|
||||
const result = runCli(["-h"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("USAGE");
|
||||
});
|
||||
|
||||
it("should include all sections in help output", () => {
|
||||
const result = runCli(["help"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("USAGE");
|
||||
expect(output).toContain("EXAMPLES");
|
||||
expect(output).toContain("AUTHENTICATION");
|
||||
expect(output).toContain("TROUBLESHOOTING");
|
||||
expect(output).toContain("INSTALL");
|
||||
expect(output).toContain("MORE INFO");
|
||||
});
|
||||
|
||||
it("should include --prompt and --prompt-file in help", () => {
|
||||
const result = runCli(["help"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("--prompt");
|
||||
expect(output).toContain("--prompt-file");
|
||||
});
|
||||
});
|
||||
|
||||
// ── version command routing ─────────────────────────────────────────
|
||||
|
||||
describe("version command", () => {
|
||||
it("should show version with 'version' command", () => {
|
||||
const result = runCli(["version"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toMatch(/spawn v\d+\.\d+\.\d+/);
|
||||
});
|
||||
|
||||
it("should show version with '--version' flag", () => {
|
||||
const result = runCli(["--version"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toMatch(/spawn v\d+\.\d+\.\d+/);
|
||||
});
|
||||
|
||||
it("should show version with '-v' flag", () => {
|
||||
const result = runCli(["-v"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toMatch(/spawn v\d+\.\d+\.\d+/);
|
||||
});
|
||||
|
||||
it("should show version with '-V' flag", () => {
|
||||
const result = runCli(["-V"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toMatch(/spawn v\d+\.\d+\.\d+/);
|
||||
});
|
||||
});
|
||||
|
||||
// ── subcommand --help routing ───────────────────────────────────────
|
||||
|
||||
describe("subcommand --help shows general help", () => {
|
||||
it("should show help for 'list --help'", () => {
|
||||
const result = runCli(["list", "--help"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("USAGE");
|
||||
});
|
||||
|
||||
it("should show help for 'agents --help'", () => {
|
||||
const result = runCli(["agents", "--help"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("USAGE");
|
||||
});
|
||||
|
||||
it("should show help for 'clouds --help'", () => {
|
||||
const result = runCli(["clouds", "--help"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("USAGE");
|
||||
});
|
||||
|
||||
it("should show help for 'update --help'", () => {
|
||||
const result = runCli(["update", "--help"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("USAGE");
|
||||
});
|
||||
|
||||
it("should show help for 'list -h'", () => {
|
||||
const result = runCli(["list", "-h"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("USAGE");
|
||||
});
|
||||
|
||||
it("should show help for 'agents help'", () => {
|
||||
const result = runCli(["agents", "help"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("USAGE");
|
||||
});
|
||||
});
|
||||
|
||||
// ── ls alias routing ───────────────────────────────────────────────
|
||||
|
||||
describe("ls alias", () => {
|
||||
it("should show help for 'ls --help'", () => {
|
||||
const result = runCli(["ls", "--help"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("USAGE");
|
||||
});
|
||||
});
|
||||
|
||||
// ── non-TTY mode with no args ──────────────────────────────────────
|
||||
|
||||
describe("non-TTY mode", () => {
|
||||
it("should show non-TTY hint when run without args in non-TTY mode", () => {
|
||||
// When stdin is not a TTY (piped), and no args, it shows the non-TTY hint
|
||||
const result = runCli([]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Cannot run interactive picker: not a terminal");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("handleError formatting", () => {
|
||||
// handleError is not exported, so we test it through the actual CLI
|
||||
|
||||
describe("error with Error object", () => {
|
||||
it("should show error message for invalid identifier", () => {
|
||||
const result = runCli(["../hack", "sprite"]);
|
||||
const output = result.stderr + result.stdout;
|
||||
expect(output).toContain("can only contain");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should show help hint in error output", () => {
|
||||
const result = runCli(["../hack", "sprite"]);
|
||||
const output = result.stderr + result.stdout;
|
||||
// handleError appends: Run 'spawn help' for usage information.
|
||||
// But the error may come from validateIdentifier before handleError
|
||||
// Either way, the CLI should provide helpful error messaging
|
||||
expect(output.length).toBeGreaterThan(0);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe("error for empty input", () => {
|
||||
it("should exit with error for empty agent name in run command", () => {
|
||||
// This tests the "prompt requires both agent and cloud" path
|
||||
const result = runCli(["--prompt", "test text"]);
|
||||
const output = result.stderr + result.stdout;
|
||||
expect(output).toContain("--prompt requires both");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("extractFlagValue in actual CLI", () => {
|
||||
describe("--prompt flag missing value", () => {
|
||||
it("should error when --prompt is last argument", () => {
|
||||
const result = runCli(["claude", "sprite", "--prompt"]);
|
||||
const output = result.stderr + result.stdout;
|
||||
expect(output).toContain("--prompt requires a value");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should error when -p is last argument", () => {
|
||||
const result = runCli(["claude", "sprite", "-p"]);
|
||||
const output = result.stderr + result.stdout;
|
||||
expect(output).toContain("-p requires a value");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should error when --prompt-file is last argument", () => {
|
||||
const result = runCli(["claude", "sprite", "--prompt-file"]);
|
||||
const output = result.stderr + result.stdout;
|
||||
expect(output).toContain("--prompt-file requires a value");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should error when --prompt value starts with -", () => {
|
||||
const result = runCli(["claude", "sprite", "--prompt", "--verbose"]);
|
||||
const output = result.stderr + result.stdout;
|
||||
expect(output).toContain("--prompt requires a value");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe("--prompt and --prompt-file mutual exclusion", () => {
|
||||
it("should error when both --prompt and --prompt-file are given", () => {
|
||||
const result = runCli(["claude", "sprite", "--prompt", "text", "--prompt-file", "file.txt"]);
|
||||
const output = result.stderr + result.stdout;
|
||||
expect(output).toContain("cannot be used together");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe("--prompt-file with missing file", () => {
|
||||
it("should error when prompt file does not exist", () => {
|
||||
const result = runCli(["claude", "sprite", "--prompt-file", "/tmp/nonexistent-spawn-test-file.txt"]);
|
||||
const output = result.stderr + result.stdout;
|
||||
expect(output).toContain("Prompt file not found");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("prompt-only-without-cloud error", () => {
|
||||
it("should error when --prompt is given without any agent/cloud", () => {
|
||||
// When no positional args, prompt-without-cloud error triggers
|
||||
const result = runCli(["--prompt", "Fix bugs"]);
|
||||
const output = result.stderr + result.stdout;
|
||||
expect(output).toContain("--prompt requires both");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should include usage hint in prompt-only error", () => {
|
||||
const result = runCli(["--prompt", "Fix bugs"]);
|
||||
const output = result.stderr + result.stdout;
|
||||
// Should mention that both agent and cloud are required
|
||||
expect(output).toContain("<agent>");
|
||||
expect(output).toContain("<cloud>");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
});
|
||||
|
|
@ -1,518 +0,0 @@
|
|||
import { describe, it, expect, beforeEach, afterEach } from "bun:test";
|
||||
import { execSync } from "child_process";
|
||||
import { resolve, join } from "path";
|
||||
import { mkdirSync, writeFileSync, rmSync, existsSync, chmodSync } from "fs";
|
||||
import { tmpdir } from "os";
|
||||
|
||||
/**
|
||||
* Tests for install.sh bash helper functions.
|
||||
*
|
||||
* install.sh is the entry point for all new users (`curl ... | bash`).
|
||||
* It has been modified in 3 of the last 5 commits and its helper functions
|
||||
* had zero test coverage. These tests exercise:
|
||||
*
|
||||
* - version_gte: Semver comparison (determines if bun upgrade is needed)
|
||||
* - find_install_dir: Install directory resolution (PATH-aware)
|
||||
* - ensure_in_path: PATH detection and shell-specific instructions
|
||||
*
|
||||
* Each test sources the relevant functions from install.sh in an isolated
|
||||
* bash subprocess with controlled PATH and HOME environment.
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const REPO_ROOT = resolve(import.meta.dir, "../../..");
|
||||
const INSTALL_SH = resolve(REPO_ROOT, "cli/install.sh");
|
||||
|
||||
/**
|
||||
* Extract and run just the helper functions from install.sh.
|
||||
* We source the function definitions without running the main body
|
||||
* by extracting them into a separate script.
|
||||
*/
|
||||
function runBashWithHelpers(
|
||||
script: string,
|
||||
env?: Record<string, string>
|
||||
): { exitCode: number; stdout: string; stderr: string } {
|
||||
// Extract the function definitions from install.sh (before the main body)
|
||||
// The main body starts after the last function definition
|
||||
const helperScript = `
|
||||
set -eo pipefail
|
||||
|
||||
# Color codes (from install.sh)
|
||||
RED='\\033[0;31m'
|
||||
GREEN='\\033[0;32m'
|
||||
YELLOW='\\033[1;33m'
|
||||
BOLD='\\033[1m'
|
||||
NC='\\033[0m'
|
||||
|
||||
log_info() { echo -e "\${GREEN}[spawn]\${NC} $1"; }
|
||||
log_warn() { echo -e "\${YELLOW}[spawn]\${NC} $1"; }
|
||||
log_error() { echo -e "\${RED}[spawn]\${NC} $1"; }
|
||||
|
||||
# version_gte from install.sh
|
||||
version_gte() {
|
||||
local IFS='.'
|
||||
local a=($1) b=($2)
|
||||
local i=0
|
||||
while [ $i -lt \${#b[@]} ]; do
|
||||
local av="\${a[$i]:-0}"
|
||||
local bv="\${b[$i]:-0}"
|
||||
if [ "$av" -lt "$bv" ]; then
|
||||
return 1
|
||||
elif [ "$av" -gt "$bv" ]; then
|
||||
return 0
|
||||
fi
|
||||
i=$((i + 1))
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
# find_install_dir from install.sh (needs bun mock)
|
||||
find_install_dir() {
|
||||
if [ -n "\${SPAWN_INSTALL_DIR:-}" ]; then
|
||||
echo "\${SPAWN_INSTALL_DIR}"
|
||||
return
|
||||
fi
|
||||
local dirs=(
|
||||
"\${HOME}/.local/bin"
|
||||
"\$(bun pm bin -g 2>/dev/null)"
|
||||
"\${HOME}/.bun/bin"
|
||||
"\${HOME}/bin"
|
||||
)
|
||||
for dir in "\${dirs[@]}"; do
|
||||
[ -z "$dir" ] && continue
|
||||
if echo "\${PATH}" | tr ':' '\\n' | grep -qx "$dir"; then
|
||||
echo "$dir"
|
||||
return
|
||||
fi
|
||||
done
|
||||
echo "\${HOME}/.local/bin"
|
||||
}
|
||||
|
||||
# ensure_in_path from install.sh
|
||||
ensure_in_path() {
|
||||
local install_dir="$1"
|
||||
if echo "\${PATH}" | tr ':' '\\n' | grep -qx "\${install_dir}"; then
|
||||
echo "IN_PATH"
|
||||
else
|
||||
echo "NOT_IN_PATH"
|
||||
case "\${SHELL:-/bin/bash}" in
|
||||
*/zsh)
|
||||
echo "SHELL_TYPE=zsh"
|
||||
;;
|
||||
*/fish)
|
||||
echo "SHELL_TYPE=fish"
|
||||
;;
|
||||
*)
|
||||
echo "SHELL_TYPE=bash"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
}
|
||||
|
||||
${script}
|
||||
`;
|
||||
|
||||
const defaultEnv: Record<string, string> = {
|
||||
PATH: process.env.PATH || "/usr/bin:/bin",
|
||||
HOME: env?.HOME || "/tmp/test-home",
|
||||
SHELL: env?.SHELL || "/bin/bash",
|
||||
};
|
||||
|
||||
const mergedEnv = { ...defaultEnv, ...env };
|
||||
|
||||
try {
|
||||
const stdout = execSync(`bash -c '${helperScript.replace(/'/g, "'\\''")}'`, {
|
||||
encoding: "utf-8",
|
||||
timeout: 10000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
env: mergedEnv,
|
||||
});
|
||||
return { exitCode: 0, stdout: stdout.trim(), stderr: "" };
|
||||
} catch (err: any) {
|
||||
return {
|
||||
exitCode: err.status ?? 1,
|
||||
stdout: (err.stdout || "").trim(),
|
||||
stderr: (err.stderr || "").trim(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// ── version_gte tests ──────────────────────────────────────────────────────
|
||||
|
||||
describe("install.sh version_gte", () => {
|
||||
describe("equal versions", () => {
|
||||
it("should return true (0) for identical versions", () => {
|
||||
const result = runBashWithHelpers('version_gte "1.2.3" "1.2.3" && echo "YES" || echo "NO"');
|
||||
expect(result.stdout).toBe("YES");
|
||||
});
|
||||
|
||||
it("should return true (0) for 0.0.0 == 0.0.0", () => {
|
||||
const result = runBashWithHelpers('version_gte "0.0.0" "0.0.0" && echo "YES" || echo "NO"');
|
||||
expect(result.stdout).toBe("YES");
|
||||
});
|
||||
});
|
||||
|
||||
describe("greater versions", () => {
|
||||
it("should return true when major is greater", () => {
|
||||
const result = runBashWithHelpers('version_gte "2.0.0" "1.0.0" && echo "YES" || echo "NO"');
|
||||
expect(result.stdout).toBe("YES");
|
||||
});
|
||||
|
||||
it("should return true when minor is greater", () => {
|
||||
const result = runBashWithHelpers('version_gte "1.3.0" "1.2.0" && echo "YES" || echo "NO"');
|
||||
expect(result.stdout).toBe("YES");
|
||||
});
|
||||
|
||||
it("should return true when patch is greater", () => {
|
||||
const result = runBashWithHelpers('version_gte "1.2.4" "1.2.3" && echo "YES" || echo "NO"');
|
||||
expect(result.stdout).toBe("YES");
|
||||
});
|
||||
|
||||
it("should return true when major is greater despite lower minor", () => {
|
||||
const result = runBashWithHelpers('version_gte "2.0.0" "1.9.9" && echo "YES" || echo "NO"');
|
||||
expect(result.stdout).toBe("YES");
|
||||
});
|
||||
|
||||
it("should return true when minor is greater despite lower patch", () => {
|
||||
const result = runBashWithHelpers('version_gte "1.5.0" "1.4.9" && echo "YES" || echo "NO"');
|
||||
expect(result.stdout).toBe("YES");
|
||||
});
|
||||
});
|
||||
|
||||
describe("lesser versions", () => {
|
||||
it("should return false when major is less", () => {
|
||||
const result = runBashWithHelpers('version_gte "1.0.0" "2.0.0" && echo "YES" || echo "NO"');
|
||||
expect(result.stdout).toBe("NO");
|
||||
});
|
||||
|
||||
it("should return false when minor is less", () => {
|
||||
const result = runBashWithHelpers('version_gte "1.1.0" "1.2.0" && echo "YES" || echo "NO"');
|
||||
expect(result.stdout).toBe("NO");
|
||||
});
|
||||
|
||||
it("should return false when patch is less", () => {
|
||||
const result = runBashWithHelpers('version_gte "1.2.2" "1.2.3" && echo "YES" || echo "NO"');
|
||||
expect(result.stdout).toBe("NO");
|
||||
});
|
||||
});
|
||||
|
||||
describe("realistic bun version checks", () => {
|
||||
it("should pass for bun 1.2.0 >= MIN_BUN_VERSION 1.2.0", () => {
|
||||
const result = runBashWithHelpers('version_gte "1.2.0" "1.2.0" && echo "YES" || echo "NO"');
|
||||
expect(result.stdout).toBe("YES");
|
||||
});
|
||||
|
||||
it("should pass for bun 1.2.5 >= MIN_BUN_VERSION 1.2.0", () => {
|
||||
const result = runBashWithHelpers('version_gte "1.2.5" "1.2.0" && echo "YES" || echo "NO"');
|
||||
expect(result.stdout).toBe("YES");
|
||||
});
|
||||
|
||||
it("should fail for bun 1.1.0 >= MIN_BUN_VERSION 1.2.0", () => {
|
||||
const result = runBashWithHelpers('version_gte "1.1.0" "1.2.0" && echo "YES" || echo "NO"');
|
||||
expect(result.stdout).toBe("NO");
|
||||
});
|
||||
|
||||
it("should fail for bun 1.0.33 >= MIN_BUN_VERSION 1.2.0", () => {
|
||||
const result = runBashWithHelpers('version_gte "1.0.33" "1.2.0" && echo "YES" || echo "NO"');
|
||||
expect(result.stdout).toBe("NO");
|
||||
});
|
||||
|
||||
it("should pass for bun 1.3.0 >= MIN_BUN_VERSION 1.2.0", () => {
|
||||
const result = runBashWithHelpers('version_gte "1.3.0" "1.2.0" && echo "YES" || echo "NO"');
|
||||
expect(result.stdout).toBe("YES");
|
||||
});
|
||||
});
|
||||
|
||||
describe("segment edge cases", () => {
|
||||
it("should handle two-segment version against three-segment", () => {
|
||||
// "1.2" means a=(1 2), b=(1 2 0), missing a[2] defaults to 0
|
||||
const result = runBashWithHelpers('version_gte "1.2" "1.2.0" && echo "YES" || echo "NO"');
|
||||
expect(result.stdout).toBe("YES");
|
||||
});
|
||||
|
||||
it("should handle three-segment against two-segment", () => {
|
||||
// b has only 2 parts, loop only runs twice, so 1.2.5 >= 1.2
|
||||
const result = runBashWithHelpers('version_gte "1.2.5" "1.2" && echo "YES" || echo "NO"');
|
||||
expect(result.stdout).toBe("YES");
|
||||
});
|
||||
|
||||
it("should handle single-segment versions", () => {
|
||||
const result = runBashWithHelpers('version_gte "2" "1" && echo "YES" || echo "NO"');
|
||||
expect(result.stdout).toBe("YES");
|
||||
});
|
||||
|
||||
it("should handle single less than single", () => {
|
||||
const result = runBashWithHelpers('version_gte "1" "2" && echo "YES" || echo "NO"');
|
||||
expect(result.stdout).toBe("NO");
|
||||
});
|
||||
|
||||
it("should handle large version numbers", () => {
|
||||
const result = runBashWithHelpers('version_gte "100.200.300" "100.200.299" && echo "YES" || echo "NO"');
|
||||
expect(result.stdout).toBe("YES");
|
||||
});
|
||||
|
||||
it("should handle version with extra trailing segments (only compares up to b length)", () => {
|
||||
// b=(1 2), loop runs 2 times. a=(1 2 9) - extra segment ignored
|
||||
const result = runBashWithHelpers('version_gte "1.2.9" "1.2" && echo "YES" || echo "NO"');
|
||||
expect(result.stdout).toBe("YES");
|
||||
});
|
||||
|
||||
it("should handle missing segment in a as 0 when comparing", () => {
|
||||
// a=(1 2), b=(1 2 1), loop runs 3 times, a[2]=0 < b[2]=1
|
||||
const result = runBashWithHelpers('version_gte "1.2" "1.2.1" && echo "YES" || echo "NO"');
|
||||
expect(result.stdout).toBe("NO");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ── find_install_dir tests ──────────────────────────────────────────────────
|
||||
|
||||
describe("install.sh find_install_dir", () => {
|
||||
let testDir: string;
|
||||
|
||||
beforeEach(() => {
|
||||
testDir = join(tmpdir(), `spawn-install-test-${Date.now()}-${Math.random()}`);
|
||||
mkdirSync(testDir, { recursive: true });
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (existsSync(testDir)) {
|
||||
rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should use SPAWN_INSTALL_DIR when set", () => {
|
||||
const customDir = join(testDir, "custom-bin");
|
||||
mkdirSync(customDir, { recursive: true });
|
||||
const result = runBashWithHelpers("find_install_dir", {
|
||||
HOME: testDir,
|
||||
SPAWN_INSTALL_DIR: customDir,
|
||||
});
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe(customDir);
|
||||
});
|
||||
|
||||
it("should prefer ~/.local/bin when it is in PATH", () => {
|
||||
const localBin = join(testDir, ".local", "bin");
|
||||
mkdirSync(localBin, { recursive: true });
|
||||
const result = runBashWithHelpers("find_install_dir", {
|
||||
HOME: testDir,
|
||||
PATH: `${localBin}:/usr/bin:/bin`,
|
||||
});
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe(localBin);
|
||||
});
|
||||
|
||||
it("should fall back to ~/.bun/bin when ~/.local/bin is not in PATH", () => {
|
||||
const bunBin = join(testDir, ".bun", "bin");
|
||||
mkdirSync(bunBin, { recursive: true });
|
||||
const result = runBashWithHelpers("find_install_dir", {
|
||||
HOME: testDir,
|
||||
PATH: `${bunBin}:/usr/bin:/bin`,
|
||||
});
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe(bunBin);
|
||||
});
|
||||
|
||||
it("should fall back to ~/bin when other options not in PATH", () => {
|
||||
const homeBin = join(testDir, "bin");
|
||||
mkdirSync(homeBin, { recursive: true });
|
||||
const result = runBashWithHelpers("find_install_dir", {
|
||||
HOME: testDir,
|
||||
PATH: `${homeBin}:/usr/bin:/bin`,
|
||||
});
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe(homeBin);
|
||||
});
|
||||
|
||||
it("should default to ~/.local/bin when nothing matches PATH", () => {
|
||||
const result = runBashWithHelpers("find_install_dir", {
|
||||
HOME: testDir,
|
||||
PATH: "/usr/bin:/bin",
|
||||
});
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe(join(testDir, ".local", "bin"));
|
||||
});
|
||||
|
||||
it("should override all heuristics with SPAWN_INSTALL_DIR", () => {
|
||||
const localBin = join(testDir, ".local", "bin");
|
||||
mkdirSync(localBin, { recursive: true });
|
||||
const override = join(testDir, "my-override");
|
||||
mkdirSync(override, { recursive: true });
|
||||
const result = runBashWithHelpers("find_install_dir", {
|
||||
HOME: testDir,
|
||||
PATH: `${localBin}:/usr/bin:/bin`,
|
||||
SPAWN_INSTALL_DIR: override,
|
||||
});
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe(override);
|
||||
});
|
||||
});
|
||||
|
||||
// ── ensure_in_path tests ────────────────────────────────────────────────────
|
||||
|
||||
describe("install.sh ensure_in_path", () => {
|
||||
let testDir: string;
|
||||
|
||||
beforeEach(() => {
|
||||
testDir = join(tmpdir(), `spawn-path-test-${Date.now()}-${Math.random()}`);
|
||||
mkdirSync(testDir, { recursive: true });
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (existsSync(testDir)) {
|
||||
rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should detect when install dir IS in PATH", () => {
|
||||
const binDir = join(testDir, "bin");
|
||||
mkdirSync(binDir, { recursive: true });
|
||||
const result = runBashWithHelpers(`ensure_in_path "${binDir}"`, {
|
||||
HOME: testDir,
|
||||
PATH: `${binDir}:/usr/bin:/bin`,
|
||||
});
|
||||
expect(result.stdout).toContain("IN_PATH");
|
||||
expect(result.stdout).not.toContain("NOT_IN_PATH");
|
||||
});
|
||||
|
||||
it("should detect when install dir is NOT in PATH", () => {
|
||||
const binDir = join(testDir, "bin");
|
||||
mkdirSync(binDir, { recursive: true });
|
||||
const result = runBashWithHelpers(`ensure_in_path "${binDir}"`, {
|
||||
HOME: testDir,
|
||||
PATH: "/usr/bin:/bin",
|
||||
});
|
||||
expect(result.stdout).toContain("NOT_IN_PATH");
|
||||
});
|
||||
|
||||
it("should suggest .bashrc for bash shell", () => {
|
||||
const binDir = join(testDir, "bin");
|
||||
mkdirSync(binDir, { recursive: true });
|
||||
const result = runBashWithHelpers(`ensure_in_path "${binDir}"`, {
|
||||
HOME: testDir,
|
||||
PATH: "/usr/bin:/bin",
|
||||
SHELL: "/bin/bash",
|
||||
});
|
||||
expect(result.stdout).toContain("SHELL_TYPE=bash");
|
||||
});
|
||||
|
||||
it("should suggest .zshrc for zsh shell", () => {
|
||||
const binDir = join(testDir, "bin");
|
||||
mkdirSync(binDir, { recursive: true });
|
||||
const result = runBashWithHelpers(`ensure_in_path "${binDir}"`, {
|
||||
HOME: testDir,
|
||||
PATH: "/usr/bin:/bin",
|
||||
SHELL: "/bin/zsh",
|
||||
});
|
||||
expect(result.stdout).toContain("SHELL_TYPE=zsh");
|
||||
});
|
||||
|
||||
it("should suggest fish_add_path for fish shell", () => {
|
||||
const binDir = join(testDir, "bin");
|
||||
mkdirSync(binDir, { recursive: true });
|
||||
const result = runBashWithHelpers(`ensure_in_path "${binDir}"`, {
|
||||
HOME: testDir,
|
||||
PATH: "/usr/bin:/bin",
|
||||
SHELL: "/usr/bin/fish",
|
||||
});
|
||||
expect(result.stdout).toContain("SHELL_TYPE=fish");
|
||||
});
|
||||
|
||||
it("should default to bash when SHELL is unset", () => {
|
||||
const binDir = join(testDir, "bin");
|
||||
mkdirSync(binDir, { recursive: true });
|
||||
// Explicitly unset SHELL
|
||||
const result = runBashWithHelpers(`unset SHELL; ensure_in_path "${binDir}"`, {
|
||||
HOME: testDir,
|
||||
PATH: "/usr/bin:/bin",
|
||||
});
|
||||
expect(result.stdout).toContain("SHELL_TYPE=bash");
|
||||
});
|
||||
|
||||
it("should handle PATH with many entries", () => {
|
||||
const binDir = join(testDir, "bin");
|
||||
mkdirSync(binDir, { recursive: true });
|
||||
const longPath = Array.from({ length: 20 }, (_, i) => `/fake/path/${i}`).join(":");
|
||||
const result = runBashWithHelpers(`ensure_in_path "${binDir}"`, {
|
||||
HOME: testDir,
|
||||
PATH: `${longPath}:${binDir}:/usr/bin`,
|
||||
});
|
||||
expect(result.stdout).toContain("IN_PATH");
|
||||
});
|
||||
|
||||
it("should not match partial path prefixes", () => {
|
||||
const binDir = join(testDir, "bin");
|
||||
const binDirExtra = join(testDir, "bin-extra");
|
||||
mkdirSync(binDir, { recursive: true });
|
||||
mkdirSync(binDirExtra, { recursive: true });
|
||||
// PATH contains bin-extra but not bin
|
||||
const result = runBashWithHelpers(`ensure_in_path "${binDir}"`, {
|
||||
HOME: testDir,
|
||||
PATH: `${binDirExtra}:/usr/bin:/bin`,
|
||||
});
|
||||
expect(result.stdout).toContain("NOT_IN_PATH");
|
||||
});
|
||||
});
|
||||
|
||||
// ── install.sh syntax check ────────────────────────────────────────────────
|
||||
|
||||
describe("install.sh syntax", () => {
|
||||
it("should pass bash -n syntax check", () => {
|
||||
const result = execSync(`bash -n "${INSTALL_SH}" 2>&1`, {
|
||||
encoding: "utf-8",
|
||||
timeout: 5000,
|
||||
});
|
||||
// bash -n produces no output on success
|
||||
expect(result.trim()).toBe("");
|
||||
});
|
||||
|
||||
it("should have a valid shebang line", () => {
|
||||
const { readFileSync } = require("fs");
|
||||
const content = readFileSync(INSTALL_SH, "utf-8");
|
||||
expect(content.startsWith("#!/bin/bash")).toBe(true);
|
||||
});
|
||||
|
||||
it("should use set -eo pipefail", () => {
|
||||
const { readFileSync } = require("fs");
|
||||
const content = readFileSync(INSTALL_SH, "utf-8");
|
||||
expect(content).toContain("set -eo pipefail");
|
||||
});
|
||||
|
||||
it("should define MIN_BUN_VERSION constant", () => {
|
||||
const { readFileSync } = require("fs");
|
||||
const content = readFileSync(INSTALL_SH, "utf-8");
|
||||
expect(content).toMatch(/MIN_BUN_VERSION="[0-9]+\.[0-9]+\.[0-9]+"/);
|
||||
});
|
||||
|
||||
it("should define version_gte function", () => {
|
||||
const { readFileSync } = require("fs");
|
||||
const content = readFileSync(INSTALL_SH, "utf-8");
|
||||
expect(content).toContain("version_gte()");
|
||||
});
|
||||
|
||||
it("should define find_install_dir function", () => {
|
||||
const { readFileSync } = require("fs");
|
||||
const content = readFileSync(INSTALL_SH, "utf-8");
|
||||
expect(content).toContain("find_install_dir()");
|
||||
});
|
||||
|
||||
it("should define ensure_in_path function", () => {
|
||||
const { readFileSync } = require("fs");
|
||||
const content = readFileSync(INSTALL_SH, "utf-8");
|
||||
expect(content).toContain("ensure_in_path()");
|
||||
});
|
||||
|
||||
it("should define build_and_install function", () => {
|
||||
const { readFileSync } = require("fs");
|
||||
const content = readFileSync(INSTALL_SH, "utf-8");
|
||||
expect(content).toContain("build_and_install()");
|
||||
});
|
||||
|
||||
it("should define clone_cli function", () => {
|
||||
const { readFileSync } = require("fs");
|
||||
const content = readFileSync(INSTALL_SH, "utf-8");
|
||||
expect(content).toContain("clone_cli()");
|
||||
});
|
||||
});
|
||||
|
|
@ -1,288 +0,0 @@
|
|||
import { describe, it, expect, beforeAll, afterAll } from "bun:test";
|
||||
import { execSync } from "child_process";
|
||||
import { resolve } from "path";
|
||||
import { writeFileSync, mkdirSync, rmSync, existsSync } from "fs";
|
||||
|
||||
/**
|
||||
* Tests for error paths when agent/cloud arguments are missing.
|
||||
*
|
||||
* These paths in index.ts have zero test coverage:
|
||||
* - suggestCloudsForPrompt (lines 154-178): shows available clouds when
|
||||
* --prompt is used with agent but no cloud
|
||||
* - handleNoCommand dry-run error (lines 238-242): --dry-run without agent/cloud
|
||||
* - handleNoCommand prompt error (lines 243-247): --prompt without agent/cloud
|
||||
* - handleDefaultCommand dry-run error (lines 141-145): --dry-run with agent but no cloud
|
||||
*
|
||||
* These are user-facing error messages that guide users to correct usage.
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const CLI_DIR = resolve(import.meta.dir, "../..");
|
||||
const PROJECT_ROOT = resolve(CLI_DIR, "..");
|
||||
const TEST_DIR = resolve("/tmp", `spawn-no-cloud-test-${Date.now()}`);
|
||||
|
||||
function runCli(
|
||||
args: string[],
|
||||
env: Record<string, string> = {}
|
||||
): { stdout: string; stderr: string; exitCode: number } {
|
||||
const quotedArgs = args
|
||||
.map((a) => `'${a.replace(/'/g, "'\\''")}'`)
|
||||
.join(" ");
|
||||
const cmd = `bun run ${CLI_DIR}/src/index.ts ${quotedArgs}`;
|
||||
try {
|
||||
const stdout = execSync(cmd, {
|
||||
cwd: PROJECT_ROOT,
|
||||
env: {
|
||||
PATH: `${process.env.HOME}/.bun/bin:${process.env.PATH}`,
|
||||
HOME: process.env.HOME,
|
||||
SHELL: process.env.SHELL,
|
||||
TERM: process.env.TERM || "xterm",
|
||||
...env,
|
||||
SPAWN_NO_UPDATE_CHECK: "1",
|
||||
NODE_ENV: "",
|
||||
BUN_ENV: "",
|
||||
},
|
||||
encoding: "utf-8",
|
||||
timeout: 15000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
});
|
||||
return { stdout, stderr: "", exitCode: 0 };
|
||||
} catch (err: any) {
|
||||
return {
|
||||
stdout: err.stdout || "",
|
||||
stderr: err.stderr || "",
|
||||
exitCode: err.status ?? 1,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
function output(result: { stdout: string; stderr: string }): string {
|
||||
return result.stdout + result.stderr;
|
||||
}
|
||||
|
||||
beforeAll(() => {
|
||||
mkdirSync(TEST_DIR, { recursive: true });
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
if (existsSync(TEST_DIR)) {
|
||||
rmSync(TEST_DIR, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
// ── suggestCloudsForPrompt: --prompt with agent but no cloud ──────────────
|
||||
|
||||
describe("suggestCloudsForPrompt (--prompt with agent, no cloud)", () => {
|
||||
it("should show error that --prompt requires both agent and cloud", () => {
|
||||
const result = runCli(["claude", "--prompt", "Fix all bugs"]);
|
||||
expect(output(result)).toContain("--prompt requires both");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should show usage example with the agent name", () => {
|
||||
const result = runCli(["claude", "--prompt", "Fix all bugs"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("spawn claude <cloud>");
|
||||
});
|
||||
|
||||
it("should suggest available clouds for the agent", () => {
|
||||
const result = runCli(["claude", "--prompt", "Fix all bugs"]);
|
||||
const out = output(result);
|
||||
// suggestCloudsForPrompt fetches the manifest and lists available clouds
|
||||
expect(out).toContain("Available clouds for");
|
||||
});
|
||||
|
||||
it("should show example spawn commands with specific clouds", () => {
|
||||
const result = runCli(["claude", "--prompt", "Fix all bugs"]);
|
||||
const out = output(result);
|
||||
// Should suggest at least one concrete spawn command with a real cloud
|
||||
expect(out).toMatch(/spawn claude \S+ --prompt/);
|
||||
});
|
||||
|
||||
it("should work with -p short form", () => {
|
||||
const result = runCli(["claude", "-p", "Fix bugs"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("--prompt requires both");
|
||||
expect(out).toContain("Available clouds for");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should work with codex agent", () => {
|
||||
const result = runCli(["codex", "--prompt", "Add tests"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("--prompt requires both");
|
||||
expect(out).toContain("spawn codex <cloud>");
|
||||
});
|
||||
|
||||
it("should suggest clouds for codex agent", () => {
|
||||
const result = runCli(["codex", "--prompt", "Refactor"]);
|
||||
const out = output(result);
|
||||
// codex has multiple implemented clouds
|
||||
expect(out).toContain("Available clouds for");
|
||||
});
|
||||
|
||||
it("should show at most 5 concrete cloud suggestions", () => {
|
||||
const result = runCli(["claude", "--prompt", "Fix bugs"]);
|
||||
const out = output(result);
|
||||
// suggestCloudsForPrompt shows max 5 examples with real cloud names
|
||||
// Filter for lines with "spawn claude <real-cloud-name> --prompt"
|
||||
// but exclude the usage hint line which has "<cloud>" placeholder
|
||||
const spawnLines = out
|
||||
.split("\n")
|
||||
.filter(
|
||||
(l) =>
|
||||
l.includes("spawn claude") &&
|
||||
l.includes("--prompt") &&
|
||||
!l.includes("<cloud>")
|
||||
);
|
||||
// Should have at most 5 example lines
|
||||
expect(spawnLines.length).toBeLessThanOrEqual(5);
|
||||
});
|
||||
|
||||
it("should show 'see all N clouds' hint when more than 5 clouds available", () => {
|
||||
// claude has many clouds (>5), so the hint should appear
|
||||
const result = runCli(["claude", "--prompt", "Fix bugs"]);
|
||||
const out = output(result);
|
||||
// Check for the "see all" hint (only shown when >5 clouds available)
|
||||
if (out.includes("see all")) {
|
||||
expect(out).toMatch(/spawn claude/);
|
||||
}
|
||||
// At minimum, the error and suggestion section should be present
|
||||
expect(out).toContain("Available clouds for");
|
||||
});
|
||||
});
|
||||
|
||||
// ── --prompt-file with agent but no cloud ─────────────────────────────────
|
||||
|
||||
describe("suggestCloudsForPrompt (--prompt-file with agent, no cloud)", () => {
|
||||
const promptFile = resolve(TEST_DIR, "prompt.txt");
|
||||
|
||||
beforeAll(() => {
|
||||
writeFileSync(promptFile, "Fix all the things");
|
||||
});
|
||||
|
||||
it("should show same error as --prompt when using --prompt-file", () => {
|
||||
const result = runCli(["claude", "--prompt-file", promptFile]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("--prompt requires both");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should suggest available clouds even with --prompt-file", () => {
|
||||
const result = runCli(["claude", "--prompt-file", promptFile]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("Available clouds for");
|
||||
});
|
||||
|
||||
it("should show usage example with <cloud> placeholder", () => {
|
||||
const result = runCli(["claude", "-f", promptFile]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("<cloud>");
|
||||
});
|
||||
});
|
||||
|
||||
// ── handleNoCommand: --dry-run without any args ───────────────────────────
|
||||
|
||||
describe("--dry-run without agent and cloud", () => {
|
||||
it("should show error that --dry-run requires both agent and cloud", () => {
|
||||
const result = runCli(["--dry-run"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("--dry-run requires both");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should show usage hint with spawn <agent> <cloud> --dry-run", () => {
|
||||
const result = runCli(["--dry-run"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("spawn <agent> <cloud> --dry-run");
|
||||
});
|
||||
|
||||
it("should work with -n short form", () => {
|
||||
const result = runCli(["-n"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("--dry-run requires both");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ── handleDefaultCommand: --dry-run with agent but no cloud ───────────────
|
||||
|
||||
describe("--dry-run with agent but no cloud", () => {
|
||||
it("should show error that --dry-run requires both agent and cloud", () => {
|
||||
const result = runCli(["claude", "--dry-run"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("--dry-run requires both");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should show usage hint", () => {
|
||||
const result = runCli(["claude", "--dry-run"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("spawn <agent> <cloud> --dry-run");
|
||||
});
|
||||
|
||||
it("should work with -n short form and agent", () => {
|
||||
const result = runCli(["claude", "-n"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("--dry-run requires both");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ── handleNoCommand: --prompt without any args ────────────────────────────
|
||||
|
||||
describe("--prompt without any agent or cloud", () => {
|
||||
it("should show error that --prompt requires both agent and cloud", () => {
|
||||
const result = runCli(["--prompt", "Fix bugs"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("--prompt requires both");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should show usage hint", () => {
|
||||
const result = runCli(["--prompt", "Fix bugs"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("spawn <agent> <cloud>");
|
||||
});
|
||||
|
||||
it("should work with -p short form", () => {
|
||||
const result = runCli(["-p", "Fix bugs"]);
|
||||
const out = output(result);
|
||||
expect(out).toContain("--prompt requires both");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ── Combined: --dry-run and --prompt without cloud ────────────────────────
|
||||
|
||||
describe("--dry-run combined with --prompt without cloud", () => {
|
||||
it("should show dry-run error when both --dry-run and --prompt but no cloud", () => {
|
||||
// --dry-run is checked first in handleDefaultCommand
|
||||
const result = runCli(["claude", "--dry-run", "--prompt", "Fix bugs"]);
|
||||
const out = output(result);
|
||||
// Should show one of the two errors
|
||||
expect(out).toMatch(/requires both/);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ── Edge: unknown agent with --prompt ─────────────────────────────────────
|
||||
|
||||
describe("unknown agent with --prompt", () => {
|
||||
it("should show prompt-requires-cloud error even for unknown agent", () => {
|
||||
// The prompt-without-cloud check happens before agent validation
|
||||
const result = runCli(["fakeagent", "--prompt", "Fix bugs"]);
|
||||
const out = output(result);
|
||||
// Could show "requires both" or "Unknown agent" depending on routing
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should handle --prompt with agent typo gracefully", () => {
|
||||
const result = runCli(["claud", "--prompt", "Fix bugs"]);
|
||||
const out = output(result);
|
||||
// Should not crash; should show some useful error
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
expect(out.length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
|
@ -1,101 +0,0 @@
|
|||
import { describe, it, expect } from "bun:test";
|
||||
import { readFileSync, existsSync } from "fs";
|
||||
import { join, resolve } from "path";
|
||||
import { execSync } from "child_process";
|
||||
import type { Manifest } from "../manifest";
|
||||
|
||||
/**
|
||||
* Shell script syntax validation tests.
|
||||
*
|
||||
* Runs `bash -n` on every shell script in the repository to catch syntax
|
||||
* errors before they reach users. This is the automated equivalent of
|
||||
* the CLAUDE.md rule: "Run `bash -n` on every changed .sh file."
|
||||
*
|
||||
* Coverage:
|
||||
* - shared/common.sh (core library used by all clouds)
|
||||
* - Every cloud's lib/common.sh (cloud-specific libraries)
|
||||
* - Every implemented agent script (cloud/agent.sh)
|
||||
*
|
||||
* These tests catch:
|
||||
* - Unclosed quotes, braces, parentheses
|
||||
* - Invalid syntax from bad merges or edits
|
||||
* - Bash 3.x incompatible syntax (some cases)
|
||||
* - Missing heredoc terminators
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const REPO_ROOT = resolve(import.meta.dir, "../../..");
|
||||
const manifestPath = join(REPO_ROOT, "manifest.json");
|
||||
const manifestRaw = readFileSync(manifestPath, "utf-8");
|
||||
const manifest: Manifest = JSON.parse(manifestRaw);
|
||||
|
||||
const matrixEntries = Object.entries(manifest.matrix);
|
||||
const implementedEntries = matrixEntries.filter(([, status]) => status === "implemented");
|
||||
|
||||
/** Run `bash -n` on a script file. Returns null on success, error message on failure. */
|
||||
function bashSyntaxCheck(filePath: string): string | null {
|
||||
try {
|
||||
execSync(`bash -n "${filePath}"`, {
|
||||
encoding: "utf-8",
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
timeout: 10000,
|
||||
});
|
||||
return null;
|
||||
} catch (err: any) {
|
||||
return (err.stderr || err.stdout || err.message || "Unknown error").trim();
|
||||
}
|
||||
}
|
||||
|
||||
describe("Shell Script Syntax Validation (bash -n)", () => {
|
||||
// ── Core shared library ────────────────────────────────────────────
|
||||
|
||||
describe("shared/common.sh", () => {
|
||||
const sharedPath = join(REPO_ROOT, "shared", "common.sh");
|
||||
|
||||
it("should exist", () => {
|
||||
expect(existsSync(sharedPath)).toBe(true);
|
||||
});
|
||||
|
||||
it("should pass bash -n syntax check", () => {
|
||||
const error = bashSyntaxCheck(sharedPath);
|
||||
if (error) {
|
||||
throw new Error(`shared/common.sh has syntax errors:\n${error}`);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// ── Implemented agent scripts ──────────────────────────────────────
|
||||
|
||||
describe("implemented agent scripts", () => {
|
||||
it("should have at least one implemented script to check", () => {
|
||||
expect(implementedEntries.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
for (const [key] of implementedEntries) {
|
||||
const scriptPath = join(REPO_ROOT, key + ".sh");
|
||||
|
||||
it(`${key}.sh should pass bash -n`, () => {
|
||||
if (!existsSync(scriptPath)) {
|
||||
throw new Error(`${key}.sh does not exist but is marked as implemented`);
|
||||
}
|
||||
const error = bashSyntaxCheck(scriptPath);
|
||||
if (error) {
|
||||
throw new Error(`${key}.sh has syntax errors:\n${error}`);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// ── Summary stats ──────────────────────────────────────────────────
|
||||
|
||||
describe("coverage summary", () => {
|
||||
it("should check all implemented scripts", () => {
|
||||
const existing = implementedEntries.filter(([key]) =>
|
||||
existsSync(join(REPO_ROOT, key + ".sh"))
|
||||
);
|
||||
// All implemented entries should have corresponding files
|
||||
expect(existing.length).toBe(implementedEntries.length);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -1,605 +0,0 @@
|
|||
import { describe, it, expect, beforeEach, afterEach } from "bun:test";
|
||||
import { execSync, spawnSync } from "child_process";
|
||||
import { resolve } from "path";
|
||||
|
||||
/**
|
||||
* Tests for _classify_api_result and _report_api_failure in shared/common.sh.
|
||||
*
|
||||
* These two helpers were extracted from _cloud_api_retry_loop in PR #821 to
|
||||
* reduce its cyclomatic complexity. They had zero test coverage despite being
|
||||
* invoked on EVERY cloud API call across ALL providers:
|
||||
*
|
||||
* - _classify_api_result: Decides whether to retry based on curl exit code
|
||||
* and HTTP status code. Returns a reason string or empty (success).
|
||||
* A bug here could cause infinite retries or silent failures.
|
||||
*
|
||||
* - _report_api_failure: Generates user-facing error messages after all
|
||||
* retries are exhausted. Differentiates network errors from HTTP errors
|
||||
* and includes the API response body for HTTP errors only.
|
||||
*
|
||||
* Tests run the actual bash functions in subprocesses to catch real shell
|
||||
* behavior (quoting, variable expansion, exit codes).
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const REPO_ROOT = resolve(import.meta.dir, "../../..");
|
||||
const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh");
|
||||
|
||||
/**
|
||||
* Run a bash snippet that sources shared/common.sh first.
|
||||
* Returns { exitCode, stdout, stderr }.
|
||||
*/
|
||||
function runBash(
|
||||
script: string,
|
||||
env?: Record<string, string>
|
||||
): { exitCode: number; stdout: string; stderr: string } {
|
||||
const fullScript = `source "${COMMON_SH}"\n${script}`;
|
||||
const result = spawnSync("bash", ["-c", fullScript], {
|
||||
encoding: "utf-8",
|
||||
timeout: 10000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
env: { ...process.env, ...env },
|
||||
});
|
||||
return {
|
||||
exitCode: result.status ?? 1,
|
||||
stdout: (result.stdout || "").trim(),
|
||||
stderr: (result.stderr || "").trim(),
|
||||
};
|
||||
}
|
||||
|
||||
// ── _classify_api_result ────────────────────────────────────────────────────
|
||||
|
||||
describe("_classify_api_result", () => {
|
||||
describe("network errors (curl failures)", () => {
|
||||
it("should return network error message when curl exits non-zero", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE=""
|
||||
echo "$(_classify_api_result 1)"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Cloud API network error");
|
||||
});
|
||||
|
||||
it("should return network error for curl exit code 6 (DNS failure)", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE=""
|
||||
echo "$(_classify_api_result 6)"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Cloud API network error");
|
||||
});
|
||||
|
||||
it("should return network error for curl exit code 7 (connection refused)", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE=""
|
||||
echo "$(_classify_api_result 7)"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Cloud API network error");
|
||||
});
|
||||
|
||||
it("should return network error for curl exit code 28 (timeout)", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE=""
|
||||
echo "$(_classify_api_result 28)"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Cloud API network error");
|
||||
});
|
||||
|
||||
it("should prioritize curl failure over HTTP code", () => {
|
||||
// If curl itself failed, the HTTP code is meaningless
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="200"
|
||||
echo "$(_classify_api_result 7)"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Cloud API network error");
|
||||
});
|
||||
});
|
||||
|
||||
describe("HTTP rate limiting (429)", () => {
|
||||
it("should detect HTTP 429 rate limit", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="429"
|
||||
echo "$(_classify_api_result 0)"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Cloud API returned rate limit (HTTP 429)");
|
||||
});
|
||||
|
||||
it("should include HTTP 429 in the message", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="429"
|
||||
result=$(_classify_api_result 0)
|
||||
echo "$result"
|
||||
`);
|
||||
expect(result.stdout).toContain("429");
|
||||
expect(result.stdout).toContain("rate limit");
|
||||
});
|
||||
});
|
||||
|
||||
describe("HTTP service unavailable (503)", () => {
|
||||
it("should detect HTTP 503 service unavailable", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="503"
|
||||
echo "$(_classify_api_result 0)"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Cloud API returned service unavailable (HTTP 503)");
|
||||
});
|
||||
|
||||
it("should include HTTP 503 in the message", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="503"
|
||||
result=$(_classify_api_result 0)
|
||||
echo "$result"
|
||||
`);
|
||||
expect(result.stdout).toContain("503");
|
||||
expect(result.stdout).toContain("service unavailable");
|
||||
});
|
||||
});
|
||||
|
||||
describe("success cases (no retry needed)", () => {
|
||||
it("should return empty string for successful request (HTTP 200)", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="200"
|
||||
result=$(_classify_api_result 0)
|
||||
if [[ -z "$result" ]]; then
|
||||
echo "EMPTY"
|
||||
else
|
||||
echo "$result"
|
||||
fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("EMPTY");
|
||||
});
|
||||
|
||||
it("should return empty string for HTTP 201 (created)", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="201"
|
||||
result=$(_classify_api_result 0)
|
||||
if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("EMPTY");
|
||||
});
|
||||
|
||||
it("should return empty string for HTTP 204 (no content)", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="204"
|
||||
result=$(_classify_api_result 0)
|
||||
if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("EMPTY");
|
||||
});
|
||||
|
||||
it("should return empty string for HTTP 301 (redirect)", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="301"
|
||||
result=$(_classify_api_result 0)
|
||||
if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("EMPTY");
|
||||
});
|
||||
});
|
||||
|
||||
describe("non-retryable HTTP errors (not classified)", () => {
|
||||
it("should return empty for HTTP 400 (bad request)", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="400"
|
||||
result=$(_classify_api_result 0)
|
||||
if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("EMPTY");
|
||||
});
|
||||
|
||||
it("should return empty for HTTP 401 (unauthorized)", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="401"
|
||||
result=$(_classify_api_result 0)
|
||||
if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("EMPTY");
|
||||
});
|
||||
|
||||
it("should return empty for HTTP 403 (forbidden)", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="403"
|
||||
result=$(_classify_api_result 0)
|
||||
if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("EMPTY");
|
||||
});
|
||||
|
||||
it("should return empty for HTTP 404 (not found)", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="404"
|
||||
result=$(_classify_api_result 0)
|
||||
if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("EMPTY");
|
||||
});
|
||||
|
||||
it("should return empty for HTTP 409 (conflict)", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="409"
|
||||
result=$(_classify_api_result 0)
|
||||
if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("EMPTY");
|
||||
});
|
||||
|
||||
it("should return empty for HTTP 500 (internal server error)", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="500"
|
||||
result=$(_classify_api_result 0)
|
||||
if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("EMPTY");
|
||||
});
|
||||
|
||||
it("should return empty for HTTP 502 (bad gateway)", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="502"
|
||||
result=$(_classify_api_result 0)
|
||||
if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("EMPTY");
|
||||
});
|
||||
});
|
||||
|
||||
describe("edge cases", () => {
|
||||
it("should handle empty API_HTTP_CODE with curl success", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE=""
|
||||
result=$(_classify_api_result 0)
|
||||
if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("EMPTY");
|
||||
});
|
||||
|
||||
it("should handle unset API_HTTP_CODE with curl success", () => {
|
||||
const result = runBash(`
|
||||
unset API_HTTP_CODE
|
||||
result=$(_classify_api_result 0)
|
||||
if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("EMPTY");
|
||||
});
|
||||
|
||||
it("should treat curl_ok string '0' as success", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="200"
|
||||
result=$(_classify_api_result "0")
|
||||
if [[ -z "$result" ]]; then echo "EMPTY"; else echo "$result"; fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("EMPTY");
|
||||
});
|
||||
|
||||
it("should treat any non-zero curl_ok as network error", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="200"
|
||||
echo "$(_classify_api_result 99)"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Cloud API network error");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ── _report_api_failure ─────────────────────────────────────────────────────
|
||||
|
||||
describe("_report_api_failure", () => {
|
||||
describe("network error reporting", () => {
|
||||
it("should show retry count in error message", () => {
|
||||
const result = runBash(`
|
||||
API_RESPONSE_BODY=""
|
||||
_report_api_failure "Cloud API network error" 3
|
||||
`);
|
||||
expect(result.stderr).toContain("Cloud API network error");
|
||||
expect(result.stderr).toContain("3 attempts");
|
||||
});
|
||||
|
||||
it("should suggest checking internet connection for network errors", () => {
|
||||
const result = runBash(`
|
||||
API_RESPONSE_BODY=""
|
||||
_report_api_failure "Cloud API network error" 5
|
||||
`);
|
||||
expect(result.stderr).toContain("internet connection");
|
||||
});
|
||||
|
||||
it("should NOT output API response body for network errors", () => {
|
||||
const result = runBash(`
|
||||
API_RESPONSE_BODY='{"error": "should not appear"}'
|
||||
_report_api_failure "Cloud API network error" 3
|
||||
`);
|
||||
expect(result.stdout).not.toContain("should not appear");
|
||||
});
|
||||
});
|
||||
|
||||
describe("HTTP error reporting", () => {
|
||||
it("should show rate limit reason in error message", () => {
|
||||
const result = runBash(`
|
||||
API_RESPONSE_BODY='{"error": "rate limited"}'
|
||||
_report_api_failure "Cloud API returned rate limit (HTTP 429)" 3
|
||||
`);
|
||||
expect(result.stderr).toContain("rate limit");
|
||||
expect(result.stderr).toContain("3 attempts");
|
||||
});
|
||||
|
||||
it("should output API response body for HTTP errors", () => {
|
||||
const result = runBash(`
|
||||
API_RESPONSE_BODY='{"error": "rate limited"}'
|
||||
_report_api_failure "Cloud API returned rate limit (HTTP 429)" 3
|
||||
`);
|
||||
expect(result.stdout).toContain("rate limited");
|
||||
});
|
||||
|
||||
it("should output API response body for 503 errors", () => {
|
||||
const result = runBash(`
|
||||
API_RESPONSE_BODY='{"error": "service unavailable"}'
|
||||
_report_api_failure "Cloud API returned service unavailable (HTTP 503)" 3
|
||||
`);
|
||||
expect(result.stdout).toContain("service unavailable");
|
||||
});
|
||||
|
||||
it("should suggest waiting and retrying for HTTP errors", () => {
|
||||
const result = runBash(`
|
||||
API_RESPONSE_BODY='{}'
|
||||
_report_api_failure "Cloud API returned rate limit (HTTP 429)" 3
|
||||
`);
|
||||
expect(result.stderr).toContain("rate limiting");
|
||||
expect(result.stderr).toContain("try again");
|
||||
});
|
||||
|
||||
it("should suggest checking status page for HTTP errors", () => {
|
||||
const result = runBash(`
|
||||
API_RESPONSE_BODY='{}'
|
||||
_report_api_failure "Cloud API returned service unavailable (HTTP 503)" 3
|
||||
`);
|
||||
expect(result.stderr).toContain("status page");
|
||||
});
|
||||
});
|
||||
|
||||
describe("retry count display", () => {
|
||||
it("should show 1 attempt for single retry", () => {
|
||||
const result = runBash(`
|
||||
API_RESPONSE_BODY=""
|
||||
_report_api_failure "Cloud API network error" 1
|
||||
`);
|
||||
expect(result.stderr).toContain("1 attempts");
|
||||
});
|
||||
|
||||
it("should show 5 attempts for max retries", () => {
|
||||
const result = runBash(`
|
||||
API_RESPONSE_BODY=""
|
||||
_report_api_failure "Cloud API network error" 5
|
||||
`);
|
||||
expect(result.stderr).toContain("5 attempts");
|
||||
});
|
||||
|
||||
it("should show 10 attempts for large retry count", () => {
|
||||
const result = runBash(`
|
||||
API_RESPONSE_BODY=""
|
||||
_report_api_failure "Cloud API network error" 10
|
||||
`);
|
||||
expect(result.stderr).toContain("10 attempts");
|
||||
});
|
||||
});
|
||||
|
||||
describe("API response body handling", () => {
|
||||
it("should handle empty API response body", () => {
|
||||
const result = runBash(`
|
||||
API_RESPONSE_BODY=""
|
||||
_report_api_failure "Cloud API returned rate limit (HTTP 429)" 3
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
// Should still print the error message, just empty body
|
||||
expect(result.stderr).toContain("rate limit");
|
||||
});
|
||||
|
||||
it("should handle multiline API response body", () => {
|
||||
const result = runBash(`
|
||||
API_RESPONSE_BODY='{"error": "rate limited",
|
||||
"retry_after": 60,
|
||||
"message": "Please slow down"}'
|
||||
_report_api_failure "Cloud API returned rate limit (HTTP 429)" 3
|
||||
`);
|
||||
expect(result.stdout).toContain("rate limited");
|
||||
expect(result.stdout).toContain("retry_after");
|
||||
});
|
||||
|
||||
it("should handle API response body with special characters", () => {
|
||||
const result = runBash(`
|
||||
API_RESPONSE_BODY='{"error": "quota exceeded: \$100 limit"}'
|
||||
_report_api_failure "Cloud API returned rate limit (HTTP 429)" 3
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
// Should not crash on special chars
|
||||
expect(result.stderr).toContain("rate limit");
|
||||
});
|
||||
|
||||
it("should handle very long API response body", () => {
|
||||
const result = runBash(`
|
||||
API_RESPONSE_BODY=$(printf 'x%.0s' {1..1000})
|
||||
_report_api_failure "Cloud API returned rate limit (HTTP 429)" 3
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout.length).toBeGreaterThan(500);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ── Integration: _classify_api_result + _report_api_failure ─────────────────
|
||||
|
||||
describe("_classify_api_result + _report_api_failure integration", () => {
|
||||
it("should classify network error and report appropriately", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE=""
|
||||
API_RESPONSE_BODY=""
|
||||
reason=$(_classify_api_result 7)
|
||||
if [[ -n "$reason" ]]; then
|
||||
_report_api_failure "$reason" 3
|
||||
echo "CLASSIFIED:$reason"
|
||||
fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("CLASSIFIED:Cloud API network error");
|
||||
expect(result.stderr).toContain("internet connection");
|
||||
});
|
||||
|
||||
it("should classify rate limit and report with response body", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="429"
|
||||
API_RESPONSE_BODY='{"error": "too many requests"}'
|
||||
reason=$(_classify_api_result 0)
|
||||
if [[ -n "$reason" ]]; then
|
||||
_report_api_failure "$reason" 3
|
||||
echo "CLASSIFIED:$reason"
|
||||
fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("too many requests");
|
||||
expect(result.stdout).toContain("CLASSIFIED:Cloud API returned rate limit (HTTP 429)");
|
||||
});
|
||||
|
||||
it("should classify 503 and report with response body", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="503"
|
||||
API_RESPONSE_BODY='{"error": "maintenance"}'
|
||||
reason=$(_classify_api_result 0)
|
||||
if [[ -n "$reason" ]]; then
|
||||
_report_api_failure "$reason" 5
|
||||
echo "CLASSIFIED:$reason"
|
||||
fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("maintenance");
|
||||
expect(result.stdout).toContain("CLASSIFIED:Cloud API returned service unavailable (HTTP 503)");
|
||||
});
|
||||
|
||||
it("should return empty for successful request (no report needed)", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="200"
|
||||
API_RESPONSE_BODY='{"id": "srv-123"}'
|
||||
reason=$(_classify_api_result 0)
|
||||
if [[ -z "$reason" ]]; then
|
||||
echo "SUCCESS"
|
||||
else
|
||||
echo "SHOULD_RETRY:$reason"
|
||||
fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("SUCCESS");
|
||||
});
|
||||
|
||||
it("should return empty for 404 (not retryable, caller handles)", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="404"
|
||||
API_RESPONSE_BODY='{"error": "not found"}'
|
||||
reason=$(_classify_api_result 0)
|
||||
if [[ -z "$reason" ]]; then
|
||||
echo "NOT_RETRYABLE"
|
||||
else
|
||||
echo "SHOULD_RETRY:$reason"
|
||||
fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("NOT_RETRYABLE");
|
||||
});
|
||||
});
|
||||
|
||||
// ── Realistic cloud provider scenarios ──────────────────────────────────────
|
||||
|
||||
describe("realistic cloud provider scenarios", () => {
|
||||
it("should handle Hetzner rate limit response", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="429"
|
||||
API_RESPONSE_BODY='{"error":{"message":"Rate limit exceeded","code":"rate_limit_exceeded"}}'
|
||||
reason=$(_classify_api_result 0)
|
||||
echo "REASON:$reason"
|
||||
`);
|
||||
expect(result.stdout).toContain("rate limit");
|
||||
});
|
||||
|
||||
it("should handle DigitalOcean 503 response", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="503"
|
||||
API_RESPONSE_BODY='{"id":"service_unavailable","message":"Server Error"}'
|
||||
reason=$(_classify_api_result 0)
|
||||
_report_api_failure "$reason" 3
|
||||
`);
|
||||
expect(result.stderr).toContain("service unavailable");
|
||||
expect(result.stderr).toContain("status page");
|
||||
expect(result.stdout).toContain("service_unavailable");
|
||||
});
|
||||
|
||||
it("should handle DNS resolution failure", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE=""
|
||||
API_RESPONSE_BODY=""
|
||||
reason=$(_classify_api_result 6)
|
||||
_report_api_failure "$reason" 3
|
||||
`);
|
||||
expect(result.stderr).toContain("network error");
|
||||
expect(result.stderr).toContain("internet connection");
|
||||
});
|
||||
|
||||
it("should handle connection timeout", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE=""
|
||||
API_RESPONSE_BODY=""
|
||||
reason=$(_classify_api_result 28)
|
||||
_report_api_failure "$reason" 3
|
||||
`);
|
||||
expect(result.stderr).toContain("network error");
|
||||
});
|
||||
|
||||
it("should not retry on auth failure (401)", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="401"
|
||||
API_RESPONSE_BODY='{"error":"invalid_token"}'
|
||||
reason=$(_classify_api_result 0)
|
||||
if [[ -z "$reason" ]]; then
|
||||
echo "NO_RETRY"
|
||||
else
|
||||
echo "RETRY:$reason"
|
||||
fi
|
||||
`);
|
||||
expect(result.stdout).toBe("NO_RETRY");
|
||||
});
|
||||
|
||||
it("should not retry on quota exceeded (402/403)", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="402"
|
||||
API_RESPONSE_BODY='{"error":"payment_required"}'
|
||||
reason=$(_classify_api_result 0)
|
||||
if [[ -z "$reason" ]]; then echo "NO_RETRY"; else echo "RETRY:$reason"; fi
|
||||
`);
|
||||
expect(result.stdout).toBe("NO_RETRY");
|
||||
});
|
||||
|
||||
it("should not retry on validation error (422)", () => {
|
||||
const result = runBash(`
|
||||
API_HTTP_CODE="422"
|
||||
API_RESPONSE_BODY='{"error":"invalid_parameter"}'
|
||||
reason=$(_classify_api_result 0)
|
||||
if [[ -z "$reason" ]]; then echo "NO_RETRY"; else echo "RETRY:$reason"; fi
|
||||
`);
|
||||
expect(result.stdout).toBe("NO_RETRY");
|
||||
});
|
||||
});
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,731 +0,0 @@
|
|||
import { describe, it, expect, afterEach } from "bun:test";
|
||||
import { execSync } from "child_process";
|
||||
import { resolve, join } from "path";
|
||||
import { mkdirSync, writeFileSync, readFileSync, rmSync, existsSync, statSync } from "fs";
|
||||
import { tmpdir } from "os";
|
||||
|
||||
/**
|
||||
* Tests for credential management functions in shared/common.sh.
|
||||
*
|
||||
* These functions had zero test coverage despite being used by every cloud
|
||||
* provider script. They handle API token loading from env vars, config files,
|
||||
* validation via provider test functions, and saving with proper permissions.
|
||||
*
|
||||
* Functions tested:
|
||||
* - _load_token_from_env: load token from environment variable
|
||||
* - _load_token_from_config: load token from JSON config file (api_key or token field)
|
||||
* - _validate_token_with_provider: validate token via a test function
|
||||
* - _save_token_to_config: save token to JSON config with chmod 600
|
||||
* - _multi_creds_all_env_set: check if all env vars are set
|
||||
* - _multi_creds_load_config: load multiple credentials from JSON config
|
||||
* - _multi_creds_validate: validate credentials via test function, unset on failure
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const REPO_ROOT = resolve(import.meta.dir, "../../..");
|
||||
const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh");
|
||||
|
||||
/**
|
||||
* Run a bash snippet that sources shared/common.sh first.
|
||||
* Returns { exitCode, stdout, stderr }.
|
||||
*/
|
||||
function runBash(script: string): { exitCode: number; stdout: string; stderr: string } {
|
||||
const fullScript = `source "${COMMON_SH}"\n${script}`;
|
||||
try {
|
||||
const stdout = execSync(`bash -c '${fullScript.replace(/'/g, "'\\''")}'`, {
|
||||
encoding: "utf-8",
|
||||
timeout: 10000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
});
|
||||
return { exitCode: 0, stdout: stdout.trim(), stderr: "" };
|
||||
} catch (err: any) {
|
||||
return {
|
||||
exitCode: err.status ?? 1,
|
||||
stdout: (err.stdout || "").trim(),
|
||||
stderr: (err.stderr || "").trim(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
function createTempDir(): string {
|
||||
const dir = join(tmpdir(), `spawn-cred-test-${Date.now()}-${Math.random().toString(36).slice(2)}`);
|
||||
mkdirSync(dir, { recursive: true });
|
||||
return dir;
|
||||
}
|
||||
|
||||
const tempDirs: string[] = [];
|
||||
|
||||
afterEach(() => {
|
||||
for (const dir of tempDirs) {
|
||||
if (existsSync(dir)) {
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
tempDirs.length = 0;
|
||||
});
|
||||
|
||||
function trackTempDir(): string {
|
||||
const dir = createTempDir();
|
||||
tempDirs.push(dir);
|
||||
return dir;
|
||||
}
|
||||
|
||||
// ── _load_token_from_env ──────────────────────────────────────────────────
|
||||
|
||||
describe("_load_token_from_env", () => {
|
||||
it("should return 0 when env var is set", () => {
|
||||
const result = runBash(`
|
||||
export MY_TOKEN="test-token-123"
|
||||
_load_token_from_env MY_TOKEN "TestProvider"
|
||||
echo "exit=$?"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should return 1 when env var is not set", () => {
|
||||
const result = runBash(`
|
||||
unset MY_TOKEN 2>/dev/null
|
||||
_load_token_from_env MY_TOKEN "TestProvider"
|
||||
`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should return 1 when env var is empty string", () => {
|
||||
const result = runBash(`
|
||||
export MY_TOKEN=""
|
||||
_load_token_from_env MY_TOKEN "TestProvider"
|
||||
`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should log info message when token found", () => {
|
||||
const result = runBash(`
|
||||
export MY_TOKEN="abc"
|
||||
_load_token_from_env MY_TOKEN "Hetzner" 2>&1
|
||||
`);
|
||||
expect(result.stdout).toContain("Hetzner");
|
||||
expect(result.stdout).toContain("environment");
|
||||
});
|
||||
|
||||
it("should work with different env var names", () => {
|
||||
const result = runBash(`
|
||||
export HCLOUD_TOKEN="hetzner-token-value"
|
||||
_load_token_from_env HCLOUD_TOKEN "Hetzner Cloud"
|
||||
echo "exit=$?"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should handle tokens with special characters", () => {
|
||||
const result = runBash(`
|
||||
export MY_TOKEN="sk-or-v1-abc123/def+ghi="
|
||||
_load_token_from_env MY_TOKEN "Provider"
|
||||
echo "exit=$?"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ── _load_token_from_config ───────────────────────────────────────────────
|
||||
|
||||
describe("_load_token_from_config", () => {
|
||||
it("should load token from api_key field in JSON config", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "provider.json");
|
||||
writeFileSync(configFile, JSON.stringify({ api_key: "my-api-key-123" }));
|
||||
|
||||
const result = runBash(`
|
||||
_load_token_from_config "${configFile}" MY_TOKEN "TestProvider"
|
||||
echo "$MY_TOKEN"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("my-api-key-123");
|
||||
});
|
||||
|
||||
it("should load token from token field in JSON config", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "provider.json");
|
||||
writeFileSync(configFile, JSON.stringify({ token: "my-token-456" }));
|
||||
|
||||
const result = runBash(`
|
||||
_load_token_from_config "${configFile}" MY_TOKEN "TestProvider"
|
||||
echo "$MY_TOKEN"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("my-token-456");
|
||||
});
|
||||
|
||||
it("should prefer api_key over token when both present", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "provider.json");
|
||||
writeFileSync(configFile, JSON.stringify({ api_key: "api-key-value", token: "token-value" }));
|
||||
|
||||
const result = runBash(`
|
||||
_load_token_from_config "${configFile}" MY_TOKEN "TestProvider"
|
||||
echo "$MY_TOKEN"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("api-key-value");
|
||||
});
|
||||
|
||||
it("should return 1 when config file does not exist", () => {
|
||||
const result = runBash(`
|
||||
_load_token_from_config "/nonexistent/path/config.json" MY_TOKEN "TestProvider"
|
||||
`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should return 1 when config file has empty api_key and empty token", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "provider.json");
|
||||
writeFileSync(configFile, JSON.stringify({ api_key: "", token: "" }));
|
||||
|
||||
const result = runBash(`
|
||||
_load_token_from_config "${configFile}" MY_TOKEN "TestProvider"
|
||||
`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should return 1 for invalid JSON", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "provider.json");
|
||||
writeFileSync(configFile, "not valid json {{{");
|
||||
|
||||
const result = runBash(`
|
||||
_load_token_from_config "${configFile}" MY_TOKEN "TestProvider"
|
||||
`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should return 1 when JSON has no api_key or token field", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "provider.json");
|
||||
writeFileSync(configFile, JSON.stringify({ username: "user", password: "pass" }));
|
||||
|
||||
const result = runBash(`
|
||||
_load_token_from_config "${configFile}" MY_TOKEN "TestProvider"
|
||||
`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should export the env var with the loaded value", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "provider.json");
|
||||
writeFileSync(configFile, JSON.stringify({ api_key: "loaded-token" }));
|
||||
|
||||
const result = runBash(`
|
||||
_load_token_from_config "${configFile}" HCLOUD_TOKEN "Hetzner"
|
||||
echo "HCLOUD_TOKEN=$HCLOUD_TOKEN"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("HCLOUD_TOKEN=loaded-token");
|
||||
});
|
||||
|
||||
it("should log info message with config file path", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "provider.json");
|
||||
writeFileSync(configFile, JSON.stringify({ api_key: "test" }));
|
||||
|
||||
const result = runBash(`
|
||||
_load_token_from_config "${configFile}" MY_TOKEN "TestProvider" 2>&1
|
||||
`);
|
||||
expect(result.stdout).toContain(configFile);
|
||||
expect(result.stdout).toContain("TestProvider");
|
||||
});
|
||||
|
||||
it("should fall back to token field when api_key is empty", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "provider.json");
|
||||
writeFileSync(configFile, JSON.stringify({ api_key: "", token: "fallback-token" }));
|
||||
|
||||
const result = runBash(`
|
||||
_load_token_from_config "${configFile}" MY_TOKEN "TestProvider"
|
||||
echo "$MY_TOKEN"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("fallback-token");
|
||||
});
|
||||
});
|
||||
|
||||
// ── _validate_token_with_provider ─────────────────────────────────────────
|
||||
|
||||
describe("_validate_token_with_provider", () => {
|
||||
it("should return 0 when no test function provided (empty string)", () => {
|
||||
const result = runBash(`
|
||||
_validate_token_with_provider "" MY_TOKEN "TestProvider"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should return 0 when test function succeeds", () => {
|
||||
const result = runBash(`
|
||||
test_success() { return 0; }
|
||||
export MY_TOKEN="valid-token"
|
||||
_validate_token_with_provider test_success MY_TOKEN "TestProvider"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should return 1 when test function fails", () => {
|
||||
const result = runBash(`
|
||||
test_fail() { return 1; }
|
||||
export MY_TOKEN="invalid-token"
|
||||
_validate_token_with_provider test_fail MY_TOKEN "TestProvider"
|
||||
`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should unset the env var when validation fails", () => {
|
||||
const result = runBash(`
|
||||
test_fail() { return 1; }
|
||||
export MY_TOKEN="will-be-unset"
|
||||
_validate_token_with_provider test_fail MY_TOKEN "TestProvider" 2>/dev/null
|
||||
echo "MY_TOKEN=\${MY_TOKEN:-UNSET}"
|
||||
`);
|
||||
expect(result.stdout).toContain("MY_TOKEN=UNSET");
|
||||
});
|
||||
|
||||
it("should log authentication failed message on failure", () => {
|
||||
const result = runBash(`
|
||||
test_fail() { return 1; }
|
||||
export MY_TOKEN="bad"
|
||||
_validate_token_with_provider test_fail MY_TOKEN "Lambda" 2>&1
|
||||
`);
|
||||
expect(result.stdout).toContain("Authentication failed");
|
||||
expect(result.stdout).toContain("Lambda");
|
||||
});
|
||||
|
||||
it("should not unset env var when validation succeeds", () => {
|
||||
const result = runBash(`
|
||||
test_ok() { return 0; }
|
||||
export MY_TOKEN="good-token"
|
||||
_validate_token_with_provider test_ok MY_TOKEN "TestProvider"
|
||||
echo "MY_TOKEN=$MY_TOKEN"
|
||||
`);
|
||||
expect(result.stdout).toContain("MY_TOKEN=good-token");
|
||||
});
|
||||
});
|
||||
|
||||
// ── _save_token_to_config ─────────────────────────────────────────────────
|
||||
|
||||
describe("_save_token_to_config", () => {
|
||||
it("should create config file with api_key and token fields", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "subdir", "provider.json");
|
||||
|
||||
runBash(`_save_token_to_config "${configFile}" "my-secret-token" 2>/dev/null`);
|
||||
|
||||
expect(existsSync(configFile)).toBe(true);
|
||||
const content = readFileSync(configFile, "utf-8");
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed.api_key).toBe("my-secret-token");
|
||||
expect(parsed.token).toBe("my-secret-token");
|
||||
});
|
||||
|
||||
it("should create parent directories if they do not exist", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "deep", "nested", "config.json");
|
||||
|
||||
runBash(`_save_token_to_config "${configFile}" "test-token" 2>/dev/null`);
|
||||
|
||||
expect(existsSync(configFile)).toBe(true);
|
||||
});
|
||||
|
||||
it("should set file permissions to 600", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "secure.json");
|
||||
|
||||
runBash(`_save_token_to_config "${configFile}" "secret" 2>/dev/null`);
|
||||
|
||||
const stats = statSync(configFile);
|
||||
const mode = (stats.mode & 0o777).toString(8);
|
||||
expect(mode).toBe("600");
|
||||
});
|
||||
|
||||
it("should properly JSON-escape tokens with special characters", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "special.json");
|
||||
|
||||
// Token with quotes and backslashes
|
||||
runBash(`_save_token_to_config "${configFile}" 'token-with-"quotes"' 2>/dev/null`);
|
||||
|
||||
const content = readFileSync(configFile, "utf-8");
|
||||
// Should be valid JSON
|
||||
expect(() => JSON.parse(content)).not.toThrow();
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed.api_key).toBe('token-with-"quotes"');
|
||||
});
|
||||
|
||||
it("should overwrite existing config file", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "provider.json");
|
||||
writeFileSync(configFile, JSON.stringify({ api_key: "old-token" }));
|
||||
|
||||
runBash(`_save_token_to_config "${configFile}" "new-token" 2>/dev/null`);
|
||||
|
||||
const content = readFileSync(configFile, "utf-8");
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed.api_key).toBe("new-token");
|
||||
});
|
||||
|
||||
it("should write valid JSON that can be re-read by _load_token_from_config", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "roundtrip.json");
|
||||
|
||||
// Save token
|
||||
runBash(`_save_token_to_config "${configFile}" "roundtrip-value" 2>/dev/null`);
|
||||
|
||||
// Load it back
|
||||
const result = runBash(`
|
||||
_load_token_from_config "${configFile}" LOADED_TOKEN "Test" 2>/dev/null
|
||||
echo "$LOADED_TOKEN"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("roundtrip-value");
|
||||
});
|
||||
|
||||
it("should handle empty token string", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "empty.json");
|
||||
|
||||
runBash(`_save_token_to_config "${configFile}" "" 2>/dev/null`);
|
||||
|
||||
expect(existsSync(configFile)).toBe(true);
|
||||
const content = readFileSync(configFile, "utf-8");
|
||||
expect(() => JSON.parse(content)).not.toThrow();
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed.api_key).toBe("");
|
||||
});
|
||||
});
|
||||
|
||||
// ── _multi_creds_all_env_set ──────────────────────────────────────────────
|
||||
|
||||
describe("_multi_creds_all_env_set", () => {
|
||||
it("should return 0 when all env vars are set", () => {
|
||||
const result = runBash(`
|
||||
export VAR_A="a"
|
||||
export VAR_B="b"
|
||||
export VAR_C="c"
|
||||
_multi_creds_all_env_set VAR_A VAR_B VAR_C
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should return 1 when any env var is missing", () => {
|
||||
const result = runBash(`
|
||||
export VAR_A="a"
|
||||
unset VAR_B 2>/dev/null
|
||||
_multi_creds_all_env_set VAR_A VAR_B
|
||||
`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should return 1 when any env var is empty string", () => {
|
||||
const result = runBash(`
|
||||
export VAR_A="a"
|
||||
export VAR_B=""
|
||||
_multi_creds_all_env_set VAR_A VAR_B
|
||||
`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should return 0 with a single env var that is set", () => {
|
||||
const result = runBash(`
|
||||
export SINGLE_VAR="value"
|
||||
_multi_creds_all_env_set SINGLE_VAR
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should return 1 when first env var is missing but second is set", () => {
|
||||
const result = runBash(`
|
||||
unset FIRST_VAR 2>/dev/null
|
||||
export SECOND_VAR="present"
|
||||
_multi_creds_all_env_set FIRST_VAR SECOND_VAR
|
||||
`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should return 1 when last env var is missing", () => {
|
||||
const result = runBash(`
|
||||
export VAR_A="a"
|
||||
export VAR_B="b"
|
||||
unset VAR_C 2>/dev/null
|
||||
_multi_creds_all_env_set VAR_A VAR_B VAR_C
|
||||
`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should return 0 with no arguments (vacuously true)", () => {
|
||||
const result = runBash(`
|
||||
_multi_creds_all_env_set
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ── _multi_creds_load_config ──────────────────────────────────────────────
|
||||
|
||||
describe("_multi_creds_load_config", () => {
|
||||
it("should load two credentials from JSON config into env vars", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "multi.json");
|
||||
writeFileSync(configFile, JSON.stringify({
|
||||
client_id: "my-client-id",
|
||||
client_secret: "my-secret",
|
||||
}));
|
||||
|
||||
const result = runBash(`
|
||||
_multi_creds_load_config "${configFile}" 2 CRED_ID CRED_SECRET client_id client_secret
|
||||
echo "ID=$CRED_ID"
|
||||
echo "SECRET=$CRED_SECRET"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("ID=my-client-id");
|
||||
expect(result.stdout).toContain("SECRET=my-secret");
|
||||
});
|
||||
|
||||
it("should return 1 when config file does not exist", () => {
|
||||
const result = runBash(`
|
||||
_multi_creds_load_config "/nonexistent/config.json" 1 MY_VAR my_key
|
||||
`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should return 1 when a field is empty in config", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "partial.json");
|
||||
writeFileSync(configFile, JSON.stringify({
|
||||
client_id: "has-value",
|
||||
client_secret: "",
|
||||
}));
|
||||
|
||||
const result = runBash(`
|
||||
_multi_creds_load_config "${configFile}" 2 CRED_ID CRED_SECRET client_id client_secret
|
||||
`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should return 1 when a field is missing from config", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "missing.json");
|
||||
writeFileSync(configFile, JSON.stringify({
|
||||
client_id: "has-value",
|
||||
}));
|
||||
|
||||
const result = runBash(`
|
||||
_multi_creds_load_config "${configFile}" 2 CRED_ID CRED_SECRET client_id client_secret
|
||||
`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should load a single credential from config", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "single.json");
|
||||
writeFileSync(configFile, JSON.stringify({ api_key: "single-value" }));
|
||||
|
||||
const result = runBash(`
|
||||
_multi_creds_load_config "${configFile}" 1 MY_KEY api_key
|
||||
echo "KEY=$MY_KEY"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("KEY=single-value");
|
||||
});
|
||||
|
||||
it("should load three credentials from config", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "three.json");
|
||||
writeFileSync(configFile, JSON.stringify({
|
||||
username: "user1",
|
||||
password: "pass1",
|
||||
project: "proj1",
|
||||
}));
|
||||
|
||||
const result = runBash(`
|
||||
_multi_creds_load_config "${configFile}" 3 MY_USER MY_PASS MY_PROJ username password project
|
||||
echo "USER=$MY_USER"
|
||||
echo "PASS=$MY_PASS"
|
||||
echo "PROJ=$MY_PROJ"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("USER=user1");
|
||||
expect(result.stdout).toContain("PASS=pass1");
|
||||
expect(result.stdout).toContain("PROJ=proj1");
|
||||
});
|
||||
|
||||
it("should return 1 for invalid JSON config", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "invalid.json");
|
||||
writeFileSync(configFile, "not json {{{");
|
||||
|
||||
const result = runBash(`
|
||||
_multi_creds_load_config "${configFile}" 1 MY_VAR my_key
|
||||
`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
});
|
||||
|
||||
// ── _multi_creds_validate ─────────────────────────────────────────────────
|
||||
|
||||
describe("_multi_creds_validate", () => {
|
||||
it("should return 0 when no test function provided (empty string)", () => {
|
||||
const result = runBash(`
|
||||
_multi_creds_validate "" "TestProvider" "https://example.com" VAR_A VAR_B
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should return 0 when test function succeeds", () => {
|
||||
const result = runBash(`
|
||||
test_ok() { return 0; }
|
||||
export VAR_A="a"
|
||||
_multi_creds_validate test_ok "TestProvider" "https://example.com" VAR_A
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should return 1 when test function fails", () => {
|
||||
const result = runBash(`
|
||||
test_fail() { return 1; }
|
||||
export VAR_A="a"
|
||||
_multi_creds_validate test_fail "TestProvider" "https://example.com" VAR_A 2>/dev/null
|
||||
`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should unset all env vars when validation fails", () => {
|
||||
const result = runBash(`
|
||||
test_fail() { return 1; }
|
||||
export VAR_A="a"
|
||||
export VAR_B="b"
|
||||
_multi_creds_validate test_fail "TestProvider" "https://example.com" VAR_A VAR_B 2>/dev/null
|
||||
echo "A=\${VAR_A:-UNSET}"
|
||||
echo "B=\${VAR_B:-UNSET}"
|
||||
`);
|
||||
expect(result.stdout).toContain("A=UNSET");
|
||||
expect(result.stdout).toContain("B=UNSET");
|
||||
});
|
||||
|
||||
it("should not unset env vars when validation succeeds", () => {
|
||||
const result = runBash(`
|
||||
test_ok() { return 0; }
|
||||
export VAR_A="kept"
|
||||
export VAR_B="also-kept"
|
||||
_multi_creds_validate test_ok "TestProvider" "https://example.com" VAR_A VAR_B 2>/dev/null
|
||||
echo "A=$VAR_A"
|
||||
echo "B=$VAR_B"
|
||||
`);
|
||||
expect(result.stdout).toContain("A=kept");
|
||||
expect(result.stdout).toContain("B=also-kept");
|
||||
});
|
||||
|
||||
it("should log error message with provider name on failure", () => {
|
||||
const result = runBash(`
|
||||
test_fail() { return 1; }
|
||||
export VAR_A="a"
|
||||
_multi_creds_validate test_fail "Contabo" "https://example.com" VAR_A 2>&1
|
||||
`);
|
||||
expect(result.stdout).toContain("Contabo");
|
||||
expect(result.stdout).toContain("Invalid");
|
||||
});
|
||||
});
|
||||
|
||||
// ── Integration: _save_token_to_config + _load_token_from_config ──────────
|
||||
|
||||
describe("credential roundtrip integration", () => {
|
||||
it("should save and reload a simple token", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "roundtrip.json");
|
||||
|
||||
// Save
|
||||
runBash(`_save_token_to_config "${configFile}" "abc123" 2>/dev/null`);
|
||||
|
||||
// Load
|
||||
const result = runBash(`
|
||||
_load_token_from_config "${configFile}" LOADED "Test" 2>/dev/null
|
||||
echo "$LOADED"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("abc123");
|
||||
});
|
||||
|
||||
it("should reject a token with backslashes (not valid in API tokens)", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "special.json");
|
||||
|
||||
// Save a token with backslashes
|
||||
runBash(`_save_token_to_config "${configFile}" 'token\\with\\slashes' 2>/dev/null`);
|
||||
|
||||
// Load should fail — backslashes are not in the allowed character set
|
||||
const result = runBash(
|
||||
`_load_token_from_config "${configFile}" LOADED "Test" 2>/dev/null`,
|
||||
);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should validate after loading from config", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "validated.json");
|
||||
|
||||
// Save
|
||||
runBash(`_save_token_to_config "${configFile}" "valid-token" 2>/dev/null`);
|
||||
|
||||
// Load and validate
|
||||
const result = runBash(`
|
||||
test_valid() { [[ "$MY_TOKEN" == "valid-token" ]]; }
|
||||
_load_token_from_config "${configFile}" MY_TOKEN "Test" 2>/dev/null
|
||||
_validate_token_with_provider test_valid MY_TOKEN "TestProvider"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ── Integration: multi-credential save and load ───────────────────────────
|
||||
|
||||
describe("multi-credential save and load integration", () => {
|
||||
it("should save with _save_json_config and load with _multi_creds_load_config", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "multi-roundtrip.json");
|
||||
|
||||
// Save two credentials
|
||||
runBash(`_save_json_config "${configFile}" client_id "my-id" client_secret "my-secret" 2>/dev/null`);
|
||||
|
||||
// Load them back
|
||||
const result = runBash(`
|
||||
_multi_creds_load_config "${configFile}" 2 LOADED_ID LOADED_SECRET client_id client_secret
|
||||
echo "ID=$LOADED_ID"
|
||||
echo "SECRET=$LOADED_SECRET"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("ID=my-id");
|
||||
expect(result.stdout).toContain("SECRET=my-secret");
|
||||
});
|
||||
|
||||
it("should save three credentials and load all three", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "three-creds.json");
|
||||
|
||||
runBash(`_save_json_config "${configFile}" username "user" password "pass" project_id "proj" 2>/dev/null`);
|
||||
|
||||
const result = runBash(`
|
||||
_multi_creds_load_config "${configFile}" 3 U P R username password project_id
|
||||
echo "U=$U P=$P R=$R"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("U=user");
|
||||
expect(result.stdout).toContain("P=pass");
|
||||
expect(result.stdout).toContain("R=proj");
|
||||
});
|
||||
|
||||
it("should save config with chmod 600", () => {
|
||||
const dir = trackTempDir();
|
||||
const configFile = join(dir, "perms.json");
|
||||
|
||||
runBash(`_save_json_config "${configFile}" key "value" 2>/dev/null`);
|
||||
|
||||
const stats = statSync(configFile);
|
||||
const mode = (stats.mode & 0o777).toString(8);
|
||||
expect(mode).toBe("600");
|
||||
});
|
||||
});
|
||||
|
|
@ -1,302 +0,0 @@
|
|||
import { describe, it, expect } from "bun:test";
|
||||
import { execSync } from "child_process";
|
||||
import { resolve, join } from "path";
|
||||
import { mkdirSync, writeFileSync, readFileSync, rmSync } from "fs";
|
||||
import { tmpdir } from "os";
|
||||
|
||||
/**
|
||||
* Tests for environment injection, JSON extraction, SSH key check,
|
||||
* and opencode install helpers in shared/common.sh:
|
||||
*
|
||||
* - inject_env_vars_ssh: Injects env vars into remote server via SSH
|
||||
* - inject_env_vars_local: Injects env vars for local/container providers
|
||||
* - _extract_json_field: Extracts fields from JSON using Python expressions
|
||||
* - check_ssh_key_by_fingerprint: Checks SSH key registration via API
|
||||
* - opencode_install_cmd: Generates robust OpenCode install command
|
||||
*
|
||||
* These functions had zero test coverage despite being used across
|
||||
* all cloud provider scripts.
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const REPO_ROOT = resolve(import.meta.dir, "../../..");
|
||||
const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh");
|
||||
|
||||
/**
|
||||
* Run a bash snippet that sources shared/common.sh first.
|
||||
* Returns { exitCode, stdout, stderr }.
|
||||
*/
|
||||
function runBash(script: string): { exitCode: number; stdout: string; stderr: string } {
|
||||
const fullScript = `source "${COMMON_SH}"\n${script}`;
|
||||
try {
|
||||
const stdout = execSync(`bash -c '${fullScript.replace(/'/g, "'\\''")}'`, {
|
||||
encoding: "utf-8",
|
||||
timeout: 10000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
env: { ...process.env, NO_COLOR: "1" },
|
||||
});
|
||||
return { exitCode: 0, stdout: stdout.trim(), stderr: "" };
|
||||
} catch (err: any) {
|
||||
return {
|
||||
exitCode: err.status ?? 1,
|
||||
stdout: (err.stdout || "").trim(),
|
||||
stderr: (err.stderr || "").trim(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/** Create a temporary directory for test files. */
|
||||
function createTempDir(): string {
|
||||
const dir = join(tmpdir(), `spawn-test-${Date.now()}-${Math.random().toString(36).slice(2)}`);
|
||||
mkdirSync(dir, { recursive: true });
|
||||
return dir;
|
||||
}
|
||||
|
||||
// ── inject_env_vars_ssh ────────────────────────────────────────────────
|
||||
|
||||
describe("inject_env_vars_ssh", () => {
|
||||
it("should call upload_func and run_func with correct arguments", () => {
|
||||
const dir = createTempDir();
|
||||
try {
|
||||
// Create a mock zshrc
|
||||
writeFileSync(join(dir, ".zshrc"), "# existing config\n");
|
||||
|
||||
// Mock upload and run functions that log their arguments
|
||||
const result = runBash(`
|
||||
mock_upload() { echo "UPLOAD: \$1 \$2 \$3"; }
|
||||
mock_run() { echo "RUN: \$1 \$2"; }
|
||||
inject_env_vars_ssh "192.168.1.1" mock_upload mock_run "MY_KEY=my_value"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("UPLOAD: 192.168.1.1");
|
||||
expect(result.stdout).toContain("/tmp/spawn_env_");
|
||||
expect(result.stdout).toContain("RUN: 192.168.1.1");
|
||||
expect(result.stdout).toContain(".zshrc");
|
||||
} finally {
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should generate correct env config content via upload", () => {
|
||||
const dir = createTempDir();
|
||||
try {
|
||||
// Mock that captures the uploaded file content
|
||||
const result = runBash(`
|
||||
mock_upload() { cat "\$2"; }
|
||||
mock_run() { true; }
|
||||
inject_env_vars_ssh "10.0.0.1" mock_upload mock_run "API_KEY=sk-123" "BASE_URL=https://example.com"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("export API_KEY='sk-123'");
|
||||
expect(result.stdout).toContain("export BASE_URL='https://example.com'");
|
||||
expect(result.stdout).toContain("# [spawn:env]");
|
||||
} finally {
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should handle multiple env vars", () => {
|
||||
const result = runBash(`
|
||||
mock_upload() { cat "\$2"; }
|
||||
mock_run() { true; }
|
||||
inject_env_vars_ssh "10.0.0.1" mock_upload mock_run "KEY1=val1" "KEY2=val2" "KEY3=val3"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("export KEY1='val1'");
|
||||
expect(result.stdout).toContain("export KEY2='val2'");
|
||||
expect(result.stdout).toContain("export KEY3='val3'");
|
||||
});
|
||||
|
||||
it("should pass server_ip as first arg to upload and run functions", () => {
|
||||
const result = runBash(`
|
||||
mock_upload() { echo "UPLOAD_IP=\$1"; }
|
||||
mock_run() { echo "RUN_IP=\$1"; }
|
||||
inject_env_vars_ssh "203.0.113.42" mock_upload mock_run "K=V"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("UPLOAD_IP=203.0.113.42");
|
||||
expect(result.stdout).toContain("RUN_IP=203.0.113.42");
|
||||
});
|
||||
|
||||
it("should handle values with special characters", () => {
|
||||
const result = runBash(`
|
||||
mock_upload() { cat "\$2"; }
|
||||
mock_run() { true; }
|
||||
inject_env_vars_ssh "10.0.0.1" mock_upload mock_run "URL=https://api.example.com?key=abc&token=def"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("export URL='https://api.example.com?key=abc&token=def'");
|
||||
});
|
||||
|
||||
it("should create temp file with restrictive permissions", () => {
|
||||
const result = runBash(`
|
||||
mock_upload() {
|
||||
local perms
|
||||
perms=$(stat -c '%a' "\$2" 2>/dev/null || stat -f '%Lp' "\$2" 2>/dev/null)
|
||||
echo "PERMS=\$perms"
|
||||
}
|
||||
mock_run() { true; }
|
||||
inject_env_vars_ssh "10.0.0.1" mock_upload mock_run "SECRET=s3cret"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("PERMS=600");
|
||||
});
|
||||
});
|
||||
|
||||
// ── inject_env_vars_local ──────────────────────────────────────────────
|
||||
|
||||
describe("inject_env_vars_local", () => {
|
||||
it("should call upload and run functions without server_ip", () => {
|
||||
const result = runBash(`
|
||||
mock_upload() { echo "UPLOAD_ARGS: \$1 \$2"; }
|
||||
mock_run() { echo "RUN_ARGS: \$1"; }
|
||||
inject_env_vars_local mock_upload mock_run "MY_KEY=my_value"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
// inject_env_vars_local does NOT pass server_ip - upload gets (local_path, remote_path)
|
||||
expect(result.stdout).toContain("UPLOAD_ARGS:");
|
||||
expect(result.stdout).toContain("/tmp/spawn_env_");
|
||||
expect(result.stdout).toMatch(/cat '\/tmp\/spawn_env_[^']+' >> ~\/.bashrc; cat '\/tmp\/spawn_env_[^']+' >> ~\/.zshrc/);
|
||||
});
|
||||
|
||||
it("should generate correct env config content", () => {
|
||||
const result = runBash(`
|
||||
mock_upload() { cat "\$1"; }
|
||||
mock_run() { true; }
|
||||
inject_env_vars_local mock_upload mock_run "OPENROUTER_KEY=sk-or-v1-abc"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("export OPENROUTER_KEY='sk-or-v1-abc'");
|
||||
expect(result.stdout).toContain("# [spawn:env]");
|
||||
});
|
||||
|
||||
it("should handle multiple env vars", () => {
|
||||
const result = runBash(`
|
||||
mock_upload() { cat "\$1"; }
|
||||
mock_run() { true; }
|
||||
inject_env_vars_local mock_upload mock_run "K1=v1" "K2=v2"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("export K1='v1'");
|
||||
expect(result.stdout).toContain("export K2='v2'");
|
||||
});
|
||||
|
||||
it("should create temp file with 600 permissions", () => {
|
||||
const result = runBash(`
|
||||
mock_upload() {
|
||||
local perms
|
||||
perms=$(stat -c '%a' "\$1" 2>/dev/null || stat -f '%Lp' "\$1" 2>/dev/null)
|
||||
echo "PERMS=\$perms"
|
||||
}
|
||||
mock_run() { true; }
|
||||
inject_env_vars_local mock_upload mock_run "SECRET=hidden"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("PERMS=600");
|
||||
});
|
||||
|
||||
it("should differ from inject_env_vars_ssh in argument passing", () => {
|
||||
// inject_env_vars_local passes (local_path, remote_path) to upload
|
||||
// inject_env_vars_ssh passes (server_ip, local_path, remote_path) to upload
|
||||
const localResult = runBash(`
|
||||
mock_upload() { echo "ARG_COUNT=\$#"; }
|
||||
mock_run() { true; }
|
||||
inject_env_vars_local mock_upload mock_run "K=V"
|
||||
`);
|
||||
const sshResult = runBash(`
|
||||
mock_upload() { echo "ARG_COUNT=\$#"; }
|
||||
mock_run() { true; }
|
||||
inject_env_vars_ssh "10.0.0.1" mock_upload mock_run "K=V"
|
||||
`);
|
||||
// local: upload(local_path, remote_path) = 2 args
|
||||
// ssh: upload(server_ip, local_path, remote_path) = 3 args
|
||||
expect(localResult.stdout).toContain("ARG_COUNT=2");
|
||||
expect(sshResult.stdout).toContain("ARG_COUNT=3");
|
||||
});
|
||||
|
||||
it("should handle values with single quotes via escaping", () => {
|
||||
const result = runBash(`
|
||||
mock_upload() { cat "\$1"; }
|
||||
mock_run() { true; }
|
||||
inject_env_vars_local mock_upload mock_run "MSG=it'\\''s a test"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
// The value should be properly escaped for bash sourcing
|
||||
expect(result.stdout).toContain("export MSG=");
|
||||
});
|
||||
});
|
||||
|
||||
// _extract_json_field tests are in shared-common-json-extraction.test.ts
|
||||
|
||||
// ── check_ssh_key_by_fingerprint ───────────────────────────────────────
|
||||
|
||||
describe("check_ssh_key_by_fingerprint", () => {
|
||||
it("should return 0 when fingerprint is found in API response", () => {
|
||||
const result = runBash(`
|
||||
mock_api() { echo '{"ssh_keys":[{"fingerprint":"aa:bb:cc:dd"}]}'; }
|
||||
check_ssh_key_by_fingerprint mock_api "/ssh_keys" "aa:bb:cc:dd"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should return 1 when fingerprint is not found", () => {
|
||||
const result = runBash(`
|
||||
mock_api() { echo '{"ssh_keys":[{"fingerprint":"xx:yy:zz:00"}]}'; }
|
||||
check_ssh_key_by_fingerprint mock_api "/ssh_keys" "aa:bb:cc:dd"
|
||||
`);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should pass endpoint to the API function", () => {
|
||||
const result = runBash(`
|
||||
mock_api() { echo "CALLED_WITH: \$1 \$2"; }
|
||||
check_ssh_key_by_fingerprint mock_api "/v2/account/keys" "test-fp" 2>/dev/null || true
|
||||
echo "DONE"
|
||||
`);
|
||||
expect(result.stdout).toContain("DONE");
|
||||
});
|
||||
|
||||
it("should handle multiple keys and find a match", () => {
|
||||
const result = runBash(`
|
||||
mock_api() {
|
||||
echo '{"ssh_keys":[{"fingerprint":"11:22:33:44"},{"fingerprint":"55:66:77:88"},{"fingerprint":"aa:bb:cc:dd"}]}'
|
||||
}
|
||||
check_ssh_key_by_fingerprint mock_api "/ssh_keys" "55:66:77:88"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should return failure for empty API response", () => {
|
||||
const result = runBash(`
|
||||
mock_api() { echo '{"ssh_keys":[]}'; }
|
||||
check_ssh_key_by_fingerprint mock_api "/ssh_keys" "aa:bb:cc:dd"
|
||||
`);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should handle SHA256 format fingerprints", () => {
|
||||
const result = runBash(`
|
||||
mock_api() { echo '{"keys":[{"fingerprint":"SHA256:abcdef1234567890ABCDEF"}]}'; }
|
||||
check_ssh_key_by_fingerprint mock_api "/keys" "SHA256:abcdef1234567890ABCDEF"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should use GET method when calling API function", () => {
|
||||
const result = runBash(`
|
||||
mock_api() {
|
||||
echo "METHOD=\$1 ENDPOINT=\$2" >&2
|
||||
echo '{"keys":[]}'
|
||||
}
|
||||
check_ssh_key_by_fingerprint mock_api "/ssh_keys" "test" 2>&1 | head -1
|
||||
`);
|
||||
expect(result.stdout).toContain("METHOD=GET");
|
||||
expect(result.stdout).toContain("ENDPOINT=/ssh_keys");
|
||||
});
|
||||
});
|
||||
|
||||
// opencode_install_cmd tests are in shared-common-logging-utils.test.ts
|
||||
|
||||
// track_temp_file/cleanup_temp_files tests are in shared-common-logging-utils.test.ts
|
||||
// validate_resource_name tests are in shared-common-validators.test.ts
|
||||
|
|
@ -1,297 +0,0 @@
|
|||
import { describe, it, expect } from "bun:test";
|
||||
import { spawnSync } from "child_process";
|
||||
import { resolve } from "path";
|
||||
|
||||
/**
|
||||
* Tests for extract_api_error_message and generic_wait_for_instance
|
||||
* in shared/common.sh.
|
||||
*
|
||||
* extract_api_error_message is used across 4+ cloud providers (10+ call sites)
|
||||
* to parse error responses from cloud APIs. It tries common JSON error field
|
||||
* patterns: error.message, error.error_message, message, reason, error (string).
|
||||
*
|
||||
* generic_wait_for_instance is used across 9 cloud providers as the core
|
||||
* polling loop for instance provisioning. It calls an API function repeatedly
|
||||
* until the target status is reached, then extracts the IP address.
|
||||
*
|
||||
* Both had zero test coverage despite being critical shared infrastructure.
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const REPO_ROOT = resolve(import.meta.dir, "../../..");
|
||||
const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh");
|
||||
|
||||
/**
|
||||
* Run a bash snippet that sources shared/common.sh first.
|
||||
* Returns { exitCode, stdout, stderr }.
|
||||
*/
|
||||
function runBash(script: string): { exitCode: number; stdout: string; stderr: string } {
|
||||
const fullScript = `source "${COMMON_SH}"\n${script}`;
|
||||
const result = spawnSync("bash", ["-c", fullScript], {
|
||||
encoding: "utf-8",
|
||||
timeout: 10000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
});
|
||||
return {
|
||||
exitCode: result.status ?? 1,
|
||||
stdout: (result.stdout || "").trim(),
|
||||
stderr: (result.stderr || "").trim(),
|
||||
};
|
||||
}
|
||||
|
||||
// ── extract_api_error_message ──────────────────────────────────────────
|
||||
|
||||
describe("extract_api_error_message", () => {
|
||||
describe("top-level message field", () => {
|
||||
it("should extract message from top-level 'message' field", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '{"message":"Server not found"}'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Server not found");
|
||||
});
|
||||
|
||||
it("should extract message from top-level 'reason' field", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '{"reason":"Rate limit exceeded"}'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Rate limit exceeded");
|
||||
});
|
||||
});
|
||||
|
||||
describe("error as string", () => {
|
||||
it("should extract error when it is a plain string", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '{"error":"Unauthorized"}'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Unauthorized");
|
||||
});
|
||||
|
||||
it("should extract error string even when it is a long message", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '{"error":"The API token provided is invalid or has expired"}'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("The API token provided is invalid or has expired");
|
||||
});
|
||||
});
|
||||
|
||||
describe("error as object with message field", () => {
|
||||
it("should extract error.message when error is an object", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '{"error":{"message":"Instance quota exceeded"}}'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Instance quota exceeded");
|
||||
});
|
||||
|
||||
it("should extract error.error_message when error is an object", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '{"error":{"error_message":"Invalid region specified"}}'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Invalid region specified");
|
||||
});
|
||||
|
||||
it("should prefer error.message over error.error_message", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '{"error":{"message":"Primary msg","error_message":"Secondary msg"}}'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Primary msg");
|
||||
});
|
||||
});
|
||||
|
||||
describe("field priority", () => {
|
||||
it("should prefer error.message over top-level message", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '{"error":{"message":"Nested error"},"message":"Top-level message"}'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Nested error");
|
||||
});
|
||||
|
||||
it("should fall back to top-level message when error is empty object", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '{"error":{},"message":"Top-level message"}'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Top-level message");
|
||||
});
|
||||
|
||||
it("should fall back to reason when no message or error fields", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '{"reason":"Forbidden","status":403}'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Forbidden");
|
||||
});
|
||||
|
||||
it("should prefer message over reason", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '{"message":"Auth failed","reason":"Forbidden"}'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Auth failed");
|
||||
});
|
||||
|
||||
it("should prefer error string over reason", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '{"error":"Bad token","reason":"Forbidden"}'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
// error string comes after message/reason in the or-chain but before empty
|
||||
// The actual priority: error.message > message > reason > error(string)
|
||||
// Wait, let's re-read the code:
|
||||
// msg = (isinstance(e, dict) and (e.get('message') or e.get('error_message')))
|
||||
// or d.get('message')
|
||||
// or d.get('reason')
|
||||
// or (isinstance(e, str) and e)
|
||||
// So error string has lowest priority
|
||||
expect(result.stdout).toBe("Forbidden");
|
||||
});
|
||||
});
|
||||
|
||||
describe("fallback behavior", () => {
|
||||
it("should use default fallback for invalid JSON", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message 'not valid json'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Unknown error");
|
||||
});
|
||||
|
||||
it("should use custom fallback for invalid JSON", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message 'not valid json' 'Custom fallback'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Custom fallback");
|
||||
});
|
||||
|
||||
it("should use fallback when JSON has no recognized error fields", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '{"status":500,"code":"INTERNAL"}' 'Server error'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Server error");
|
||||
});
|
||||
|
||||
it("should use default fallback for empty JSON object", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '{}'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Unknown error");
|
||||
});
|
||||
|
||||
it("should use fallback for empty string input", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '' 'No response'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("No response");
|
||||
});
|
||||
|
||||
it("should use fallback when error object has no message or error_message", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '{"error":{"code":"ERR_QUOTA"}}' 'Quota error'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Quota error");
|
||||
});
|
||||
});
|
||||
|
||||
describe("real-world API responses", () => {
|
||||
it("should parse Hetzner-style error response", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '{"error":{"message":"server_limit_exceeded","code":"limit_exceeded"}}'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("server_limit_exceeded");
|
||||
});
|
||||
|
||||
it("should parse DigitalOcean-style error response", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '{"id":"unauthorized","message":"Unable to authenticate you"}'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Unable to authenticate you");
|
||||
});
|
||||
|
||||
it("should parse Vultr-style error response", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '{"error":"Invalid API token.","status":401}'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Invalid API token.");
|
||||
});
|
||||
|
||||
it("should parse Contabo-style error response", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '{"error":{"message":"Resource not found","code":404}}'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Resource not found");
|
||||
});
|
||||
|
||||
it("should parse response with HTML error body as fallback", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '<html>503 Service Unavailable</html>' 'Service unavailable'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Service unavailable");
|
||||
});
|
||||
});
|
||||
|
||||
describe("edge cases", () => {
|
||||
it("should handle message with special characters", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '{"message":"Can'\\''t create: quota (5/5) exceeded"}'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("quota");
|
||||
expect(result.stdout).toContain("exceeded");
|
||||
});
|
||||
|
||||
it("should handle message with unicode characters", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '{"message":"Fehler: Kontingent \\u00fcberschritten"}'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("Fehler");
|
||||
});
|
||||
|
||||
it("should handle JSON array input as fallback", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '[1,2,3]' 'Not an object'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
// JSON array has no .get method, so python will throw and fall through to fallback
|
||||
expect(result.stdout).toBe("Not an object");
|
||||
});
|
||||
|
||||
it("should handle null JSON value as fallback", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message 'null' 'Null response'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("Null response");
|
||||
});
|
||||
|
||||
it("should handle nested error with empty message string", () => {
|
||||
const result = runBash(
|
||||
`extract_api_error_message '{"error":{"message":""},"reason":"Backup reason"}' 'default'`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
// Empty message is falsy in Python, so it should fall through
|
||||
expect(result.stdout).toBe("Backup reason");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// _extract_json_field tests are in shared-common-json-extraction.test.ts
|
||||
// generic_wait_for_instance tests are in shared-common-ssh-helpers.test.ts
|
||||
|
|
@ -1,589 +0,0 @@
|
|||
import { describe, it, expect } from "bun:test";
|
||||
import { execSync } from "child_process";
|
||||
import { resolve, join } from "path";
|
||||
import { mkdirSync, writeFileSync, readFileSync, rmSync, existsSync } from "fs";
|
||||
import { tmpdir } from "os";
|
||||
|
||||
/**
|
||||
* Tests for untested bash helper functions in shared/common.sh:
|
||||
* - _load_json_config_fields: JSON config field loading (used by all multi-credential providers)
|
||||
* - _save_json_config: JSON config writing with json_escape
|
||||
* - extract_ssh_key_ids: SSH key ID extraction from cloud API responses
|
||||
* - _generate_csrf_state: CSRF state generation (security-critical)
|
||||
* - interactive_pick: Interactive picker with env var override
|
||||
*
|
||||
* These functions had zero test coverage despite being used across all cloud
|
||||
* provider scripts. Each test sources shared/common.sh and calls the function
|
||||
* in a real bash subprocess.
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const REPO_ROOT = resolve(import.meta.dir, "../../..");
|
||||
const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh");
|
||||
|
||||
/**
|
||||
* Run a bash snippet that sources shared/common.sh first.
|
||||
* Returns { exitCode, stdout, stderr }.
|
||||
*/
|
||||
function runBash(script: string): { exitCode: number; stdout: string; stderr: string } {
|
||||
const fullScript = `source "${COMMON_SH}"\n${script}`;
|
||||
try {
|
||||
const stdout = execSync(`bash -c '${fullScript.replace(/'/g, "'\\''")}'`, {
|
||||
encoding: "utf-8",
|
||||
timeout: 10000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
});
|
||||
return { exitCode: 0, stdout: stdout.trim(), stderr: "" };
|
||||
} catch (err: any) {
|
||||
return {
|
||||
exitCode: err.status ?? 1,
|
||||
stdout: (err.stdout || "").trim(),
|
||||
stderr: (err.stderr || "").trim(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a temporary directory for test files.
|
||||
*/
|
||||
function createTempDir(): string {
|
||||
const dir = join(tmpdir(), `spawn-test-${Date.now()}-${Math.random().toString(36).slice(2)}`);
|
||||
mkdirSync(dir, { recursive: true });
|
||||
return dir;
|
||||
}
|
||||
|
||||
// ── _load_json_config_fields ────────────────────────────────────────────
|
||||
|
||||
describe("_load_json_config_fields", () => {
|
||||
it("should load a single field from JSON config", () => {
|
||||
const dir = createTempDir();
|
||||
const configFile = join(dir, "config.json");
|
||||
writeFileSync(configFile, JSON.stringify({ api_key: "sk-test-123" }));
|
||||
|
||||
const result = runBash(`_load_json_config_fields "${configFile}" api_key`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("sk-test-123");
|
||||
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("should load multiple fields from JSON config", () => {
|
||||
const dir = createTempDir();
|
||||
const configFile = join(dir, "config.json");
|
||||
writeFileSync(configFile, JSON.stringify({
|
||||
username: "admin",
|
||||
password: "s3cret",
|
||||
region: "us-east-1",
|
||||
}));
|
||||
|
||||
const result = runBash(`_load_json_config_fields "${configFile}" username password region`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
const lines = result.stdout.split("\n");
|
||||
expect(lines[0]).toBe("admin");
|
||||
expect(lines[1]).toBe("s3cret");
|
||||
expect(lines[2]).toBe("us-east-1");
|
||||
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("should return empty string for missing fields", () => {
|
||||
const dir = createTempDir();
|
||||
const configFile = join(dir, "config.json");
|
||||
writeFileSync(configFile, JSON.stringify({ api_key: "present" }));
|
||||
|
||||
// Use the intended read pattern -- missing fields produce empty lines
|
||||
const result = runBash(`
|
||||
creds=$(_load_json_config_fields "${configFile}" api_key missing_field)
|
||||
{ read -r v1; read -r v2; } <<< "\${creds}"
|
||||
echo "v1=\${v1}"
|
||||
echo "v2=\${v2}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("v1=present");
|
||||
expect(result.stdout).toContain("v2=");
|
||||
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("should return exit code 1 for missing config file", () => {
|
||||
const result = runBash(`_load_json_config_fields "/tmp/nonexistent-spawn-config-${Date.now()}.json" api_key`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should return exit code 1 for invalid JSON", () => {
|
||||
const dir = createTempDir();
|
||||
const configFile = join(dir, "bad.json");
|
||||
writeFileSync(configFile, "{ not valid json!!!");
|
||||
|
||||
const result = runBash(`_load_json_config_fields "${configFile}" api_key`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("should handle empty JSON object", () => {
|
||||
const dir = createTempDir();
|
||||
const configFile = join(dir, "empty.json");
|
||||
writeFileSync(configFile, "{}");
|
||||
|
||||
const result = runBash(`_load_json_config_fields "${configFile}" api_key`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("");
|
||||
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("should handle values with special characters", () => {
|
||||
const dir = createTempDir();
|
||||
const configFile = join(dir, "special.json");
|
||||
writeFileSync(configFile, JSON.stringify({
|
||||
token: "sk-or-v1-abc123/def+456==",
|
||||
url: "https://api.example.com/v1?key=val&other=true",
|
||||
}));
|
||||
|
||||
const result = runBash(`_load_json_config_fields "${configFile}" token url`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
const lines = result.stdout.split("\n");
|
||||
expect(lines[0]).toBe("sk-or-v1-abc123/def+456==");
|
||||
expect(lines[1]).toBe("https://api.example.com/v1?key=val&other=true");
|
||||
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("should handle numeric and boolean values", () => {
|
||||
const dir = createTempDir();
|
||||
const configFile = join(dir, "types.json");
|
||||
writeFileSync(configFile, JSON.stringify({ port: 8080, enabled: true }));
|
||||
|
||||
const result = runBash(`_load_json_config_fields "${configFile}" port enabled`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
const lines = result.stdout.split("\n");
|
||||
expect(lines[0]).toBe("8080");
|
||||
expect(lines[1]).toBe("true");
|
||||
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("should handle values that are empty strings", () => {
|
||||
const dir = createTempDir();
|
||||
const configFile = join(dir, "empty-val.json");
|
||||
writeFileSync(configFile, JSON.stringify({ key: "" }));
|
||||
|
||||
const result = runBash(`_load_json_config_fields "${configFile}" key`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("");
|
||||
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("should handle reading results into variables via read", () => {
|
||||
const dir = createTempDir();
|
||||
const configFile = join(dir, "multi.json");
|
||||
writeFileSync(configFile, JSON.stringify({
|
||||
username: "admin",
|
||||
password: "hunter2",
|
||||
}));
|
||||
|
||||
// Test the intended usage pattern: reading into variables
|
||||
const result = runBash(`
|
||||
creds=$(_load_json_config_fields "${configFile}" username password)
|
||||
{ read -r user; read -r pass; } <<< "\${creds}"
|
||||
echo "user=\${user}"
|
||||
echo "pass=\${pass}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("user=admin");
|
||||
expect(result.stdout).toContain("pass=hunter2");
|
||||
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
});
|
||||
});
|
||||
|
||||
// ── _save_json_config ───────────────────────────────────────────────────
|
||||
|
||||
describe("_save_json_config", () => {
|
||||
it("should save a single key-value pair", () => {
|
||||
const dir = createTempDir();
|
||||
const configFile = join(dir, "out.json");
|
||||
|
||||
const result = runBash(`_save_json_config "${configFile}" api_key sk-test-123`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
|
||||
const content = readFileSync(configFile, "utf-8");
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed.api_key).toBe("sk-test-123");
|
||||
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("should save multiple key-value pairs", () => {
|
||||
const dir = createTempDir();
|
||||
const configFile = join(dir, "multi.json");
|
||||
|
||||
const result = runBash(`_save_json_config "${configFile}" username admin password s3cret`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
|
||||
const content = readFileSync(configFile, "utf-8");
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed.username).toBe("admin");
|
||||
expect(parsed.password).toBe("s3cret");
|
||||
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("should create parent directories if needed", () => {
|
||||
const dir = createTempDir();
|
||||
const configFile = join(dir, "nested", "deep", "config.json");
|
||||
|
||||
const result = runBash(`_save_json_config "${configFile}" key value`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(existsSync(configFile)).toBe(true);
|
||||
|
||||
const parsed = JSON.parse(readFileSync(configFile, "utf-8"));
|
||||
expect(parsed.key).toBe("value");
|
||||
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("should set restrictive file permissions (600)", () => {
|
||||
const dir = createTempDir();
|
||||
const configFile = join(dir, "perms.json");
|
||||
|
||||
runBash(`_save_json_config "${configFile}" key value`);
|
||||
|
||||
const result = runBash(`stat -c %a "${configFile}" 2>/dev/null || stat -f %Lp "${configFile}"`);
|
||||
expect(result.stdout).toBe("600");
|
||||
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("should properly escape special characters in values", () => {
|
||||
const dir = createTempDir();
|
||||
const configFile = join(dir, "escape.json");
|
||||
|
||||
const result = runBash(`_save_json_config "${configFile}" token 'value"with"quotes'`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
|
||||
const content = readFileSync(configFile, "utf-8");
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed.token).toBe('value"with"quotes');
|
||||
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("should handle values with backslashes", () => {
|
||||
const dir = createTempDir();
|
||||
const configFile = join(dir, "backslash.json");
|
||||
|
||||
const result = runBash(`_save_json_config "${configFile}" path 'C:\\Users\\test'`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
|
||||
const content = readFileSync(configFile, "utf-8");
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed.path).toBe("C:\\Users\\test");
|
||||
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("should handle empty values", () => {
|
||||
const dir = createTempDir();
|
||||
const configFile = join(dir, "empty.json");
|
||||
|
||||
const result = runBash(`_save_json_config "${configFile}" key ""`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
|
||||
const content = readFileSync(configFile, "utf-8");
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed.key).toBe("");
|
||||
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("should overwrite existing config file", () => {
|
||||
const dir = createTempDir();
|
||||
const configFile = join(dir, "overwrite.json");
|
||||
writeFileSync(configFile, JSON.stringify({ old: "data" }));
|
||||
|
||||
const result = runBash(`_save_json_config "${configFile}" new_key new_value`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
|
||||
const content = readFileSync(configFile, "utf-8");
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed.new_key).toBe("new_value");
|
||||
expect(parsed.old).toBeUndefined();
|
||||
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("should produce valid JSON that _load_json_config_fields can read", () => {
|
||||
const dir = createTempDir();
|
||||
const configFile = join(dir, "roundtrip.json");
|
||||
|
||||
runBash(`_save_json_config "${configFile}" user testuser pass "hunter2"`);
|
||||
|
||||
const loadResult = runBash(`_load_json_config_fields "${configFile}" user pass`);
|
||||
expect(loadResult.exitCode).toBe(0);
|
||||
const lines = loadResult.stdout.split("\n");
|
||||
expect(lines[0]).toBe("testuser");
|
||||
expect(lines[1]).toBe("hunter2");
|
||||
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("should handle values with newlines via json_escape", () => {
|
||||
const dir = createTempDir();
|
||||
const configFile = join(dir, "newline.json");
|
||||
|
||||
// Use printf to pass a value with actual newline
|
||||
const result = runBash(`_save_json_config "${configFile}" key "$(printf 'line1\\nline2')"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
|
||||
const content = readFileSync(configFile, "utf-8");
|
||||
const parsed = JSON.parse(content);
|
||||
expect(parsed.key).toBe("line1\nline2");
|
||||
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
});
|
||||
});
|
||||
|
||||
// ── extract_ssh_key_ids ─────────────────────────────────────────────────
|
||||
|
||||
describe("extract_ssh_key_ids", () => {
|
||||
it("should extract IDs from DigitalOcean-style response (ssh_keys field)", () => {
|
||||
const response = JSON.stringify({
|
||||
ssh_keys: [
|
||||
{ id: 12345, name: "my-key-1" },
|
||||
{ id: 67890, name: "my-key-2" },
|
||||
],
|
||||
});
|
||||
|
||||
const result = runBash(`echo '${response}' | python3 -c "
|
||||
import json, sys
|
||||
data = json.loads(sys.stdin.read())
|
||||
ids = [k['id'] for k in data.get('ssh_keys', [])]
|
||||
print(json.dumps(ids))
|
||||
"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(JSON.parse(result.stdout)).toEqual([12345, 67890]);
|
||||
});
|
||||
|
||||
it("should extract IDs from Linode-style response (data field)", () => {
|
||||
const response = JSON.stringify({
|
||||
data: [
|
||||
{ id: 111, label: "work-key" },
|
||||
{ id: 222, label: "personal-key" },
|
||||
{ id: 333, label: "deploy-key" },
|
||||
],
|
||||
});
|
||||
|
||||
// Simulate extract_ssh_key_ids with key_field="data"
|
||||
const result = runBash(`extract_ssh_key_ids '${response}' data`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(JSON.parse(result.stdout)).toEqual([111, 222, 333]);
|
||||
});
|
||||
|
||||
it("should default to ssh_keys field when no field specified", () => {
|
||||
const response = JSON.stringify({
|
||||
ssh_keys: [{ id: 42, name: "default" }],
|
||||
});
|
||||
|
||||
const result = runBash(`extract_ssh_key_ids '${response}'`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(JSON.parse(result.stdout)).toEqual([42]);
|
||||
});
|
||||
|
||||
it("should return empty array when no keys present", () => {
|
||||
const response = JSON.stringify({ ssh_keys: [] });
|
||||
|
||||
const result = runBash(`extract_ssh_key_ids '${response}'`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(JSON.parse(result.stdout)).toEqual([]);
|
||||
});
|
||||
|
||||
it("should return empty array when field is missing", () => {
|
||||
const response = JSON.stringify({ other_data: "foo" });
|
||||
|
||||
const result = runBash(`extract_ssh_key_ids '${response}'`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(JSON.parse(result.stdout)).toEqual([]);
|
||||
});
|
||||
|
||||
it("should handle string IDs (Vultr uses string UUIDs)", () => {
|
||||
const response = JSON.stringify({
|
||||
ssh_keys: [
|
||||
{ id: "abc-123-def", name: "vultr-key" },
|
||||
{ id: "xyz-789-uvw", name: "other-key" },
|
||||
],
|
||||
});
|
||||
|
||||
const result = runBash(`extract_ssh_key_ids '${response}'`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(JSON.parse(result.stdout)).toEqual(["abc-123-def", "xyz-789-uvw"]);
|
||||
});
|
||||
|
||||
it("should handle single key in response", () => {
|
||||
const response = JSON.stringify({
|
||||
ssh_keys: [{ id: 99, name: "only-key" }],
|
||||
});
|
||||
|
||||
const result = runBash(`extract_ssh_key_ids '${response}'`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(JSON.parse(result.stdout)).toEqual([99]);
|
||||
});
|
||||
});
|
||||
|
||||
// ── _generate_csrf_state ────────────────────────────────────────────────
|
||||
|
||||
describe("_generate_csrf_state", () => {
|
||||
it("should generate a non-empty string", () => {
|
||||
const result = runBash(`_generate_csrf_state`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it("should generate hex-only output", () => {
|
||||
const result = runBash(`_generate_csrf_state`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
// Output should only contain hexadecimal characters
|
||||
expect(/^[0-9a-f]+$/.test(result.stdout)).toBe(true);
|
||||
});
|
||||
|
||||
it("should generate at least 16 hex chars (64 bits of entropy)", () => {
|
||||
const result = runBash(`_generate_csrf_state`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout.length).toBeGreaterThanOrEqual(16);
|
||||
});
|
||||
|
||||
it("should generate unique values on consecutive calls", () => {
|
||||
const result = runBash(`
|
||||
state1=$(_generate_csrf_state)
|
||||
state2=$(_generate_csrf_state)
|
||||
if [[ "\${state1}" == "\${state2}" ]]; then
|
||||
echo "DUPLICATE"
|
||||
else
|
||||
echo "UNIQUE"
|
||||
fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("UNIQUE");
|
||||
});
|
||||
|
||||
it("should work with openssl if available", () => {
|
||||
const result = runBash(`
|
||||
if command -v openssl &>/dev/null; then
|
||||
state=$(_generate_csrf_state)
|
||||
# openssl rand -hex 16 produces exactly 32 hex chars
|
||||
echo "\${#state}"
|
||||
else
|
||||
echo "32" # skip test if openssl not available
|
||||
fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("32");
|
||||
});
|
||||
});
|
||||
|
||||
// ── interactive_pick ────────────────────────────────────────────────────
|
||||
|
||||
describe("interactive_pick", () => {
|
||||
it("should use environment variable value when set", () => {
|
||||
const result = runBash(`
|
||||
export MY_PICK_VAR="from-env"
|
||||
selected=$(interactive_pick MY_PICK_VAR default-val "options" "echo dummy")
|
||||
echo "\${selected}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("from-env");
|
||||
});
|
||||
|
||||
it("should use default when env var is empty and callback returns nothing", () => {
|
||||
const result = runBash(`
|
||||
unset MY_PICK_VAR
|
||||
list_empty() { echo ""; }
|
||||
selected=$(interactive_pick MY_PICK_VAR "my-default" "options" list_empty)
|
||||
echo "\${selected}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("my-default");
|
||||
});
|
||||
|
||||
it("should prefer env var over callback results", () => {
|
||||
const result = runBash(`
|
||||
export REGION_VAR="eu-west-1"
|
||||
list_regions() { echo "us-east-1|US East"; echo "eu-west-1|EU West"; }
|
||||
selected=$(interactive_pick REGION_VAR "us-east-1" "regions" list_regions)
|
||||
echo "\${selected}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("eu-west-1");
|
||||
});
|
||||
});
|
||||
|
||||
// ── _save_json_config + _load_json_config_fields roundtrip ──────────────
|
||||
|
||||
describe("_save_json_config + _load_json_config_fields roundtrip", () => {
|
||||
it("should roundtrip simple credentials", () => {
|
||||
const dir = createTempDir();
|
||||
const configFile = join(dir, "rt.json");
|
||||
|
||||
const result = runBash(`
|
||||
_save_json_config "${configFile}" client_id "my-client" client_secret "my-secret"
|
||||
creds=$(_load_json_config_fields "${configFile}" client_id client_secret)
|
||||
{ read -r cid; read -r csec; } <<< "\${creds}"
|
||||
echo "id=\${cid}"
|
||||
echo "secret=\${csec}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("id=my-client");
|
||||
expect(result.stdout).toContain("secret=my-secret");
|
||||
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("should roundtrip values with special chars (quotes, slashes, ampersands)", () => {
|
||||
const dir = createTempDir();
|
||||
const configFile = join(dir, "special-rt.json");
|
||||
|
||||
const result = runBash(`
|
||||
_save_json_config "${configFile}" url "https://api.com/v1?a=1&b=2"
|
||||
loaded=$(_load_json_config_fields "${configFile}" url)
|
||||
echo "\${loaded}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("https://api.com/v1?a=1&b=2");
|
||||
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("should roundtrip API key format values", () => {
|
||||
const dir = createTempDir();
|
||||
const configFile = join(dir, "apikey-rt.json");
|
||||
|
||||
const result = runBash(`
|
||||
_save_json_config "${configFile}" token "sk-or-v1-abc123def456ghi789"
|
||||
loaded=$(_load_json_config_fields "${configFile}" token)
|
||||
echo "\${loaded}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("sk-or-v1-abc123def456ghi789");
|
||||
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("should roundtrip three credentials (UpCloud pattern)", () => {
|
||||
const dir = createTempDir();
|
||||
const configFile = join(dir, "upcloud-rt.json");
|
||||
|
||||
const result = runBash(`
|
||||
_save_json_config "${configFile}" username "admin" password "p@ss!w0rd" zone "fi-hel1"
|
||||
creds=$(_load_json_config_fields "${configFile}" username password zone)
|
||||
{ read -r u; read -r p; read -r z; } <<< "\${creds}"
|
||||
echo "\${u}|\${p}|\${z}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("admin|p@ss!w0rd|fi-hel1");
|
||||
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
});
|
||||
});
|
||||
|
|
@ -1,637 +0,0 @@
|
|||
import { describe, it, expect } from "bun:test";
|
||||
import { execSync } from "child_process";
|
||||
import { resolve } from "path";
|
||||
|
||||
/**
|
||||
* Tests for interactive input validation helpers in shared/common.sh:
|
||||
*
|
||||
* - get_resource_name: resource name from env var (bypassing safe_read)
|
||||
* - get_validated_server_name: env-var path + validate_server_name integration
|
||||
* - get_model_id_interactive: MODEL_ID env var path with validation
|
||||
* - interactive_pick: env var bypass path, list callback, default selection
|
||||
* - show_server_name_requirements: output format
|
||||
* - _display_and_select: menu rendering and default selection (non-stdin paths)
|
||||
* - validated_read: validation callback contract (via stdin workaround)
|
||||
*
|
||||
* These functions are used by every agent/cloud script but had zero test
|
||||
* coverage. Tests exercise the env-var bypass paths (most critical for
|
||||
* CI/automated usage) since safe_read requires an interactive terminal.
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const REPO_ROOT = resolve(import.meta.dir, "../../..");
|
||||
const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh");
|
||||
|
||||
/**
|
||||
* Run a bash snippet that sources shared/common.sh first.
|
||||
* Always captures both stdout and stderr (even on success).
|
||||
*/
|
||||
function runBash(
|
||||
script: string,
|
||||
opts?: { env?: Record<string, string> }
|
||||
): { exitCode: number; stdout: string; stderr: string } {
|
||||
const fullScript = `source "${COMMON_SH}"\n${script}`;
|
||||
const escaped = fullScript.replace(/'/g, "'\\''");
|
||||
try {
|
||||
const stdout = execSync(`bash -c '${escaped}' 2>/tmp/spawn-test-stderr$$`, {
|
||||
encoding: "utf-8",
|
||||
timeout: 10000,
|
||||
env: { ...process.env, ...opts?.env },
|
||||
});
|
||||
let stderr = "";
|
||||
try {
|
||||
stderr = execSync(`cat /tmp/spawn-test-stderr$$ 2>/dev/null; rm -f /tmp/spawn-test-stderr$$`, {
|
||||
encoding: "utf-8",
|
||||
});
|
||||
} catch (err: any) {
|
||||
// Expected: cat fails if file doesn't exist. Log unexpected command failures.
|
||||
if (err.status !== 1) console.error("Unexpected error in stderr cleanup:", err);
|
||||
}
|
||||
return { exitCode: 0, stdout: stdout.trim(), stderr: stderr.trim() };
|
||||
} catch (err: any) {
|
||||
let stderr = (err.stderr || "").trim();
|
||||
try {
|
||||
const captured = execSync(`cat /tmp/spawn-test-stderr$$ 2>/dev/null; rm -f /tmp/spawn-test-stderr$$`, {
|
||||
encoding: "utf-8",
|
||||
});
|
||||
if (captured.trim()) stderr = captured.trim();
|
||||
} catch (captureErr: any) {
|
||||
// Expected: cat fails if file doesn't exist.
|
||||
if (captureErr.status !== 1) console.error("Unexpected error capturing stderr:", captureErr);
|
||||
}
|
||||
return {
|
||||
exitCode: err.status ?? 1,
|
||||
stdout: (err.stdout || "").trim(),
|
||||
stderr,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Run bash with stderr captured inline via fd redirection.
|
||||
* Captures both stdout and stderr reliably.
|
||||
*/
|
||||
function runBashCapture(
|
||||
script: string,
|
||||
opts?: { env?: Record<string, string> }
|
||||
): { exitCode: number; stdout: string; stderr: string } {
|
||||
const stderrFile = `/tmp/spawn-test-err-${process.pid}-${Date.now()}`;
|
||||
const fullScript = `source "${COMMON_SH}"\n${script}`;
|
||||
const escaped = fullScript.replace(/'/g, "'\\''");
|
||||
try {
|
||||
const stdout = execSync(`bash -c '${escaped}' 2>"${stderrFile}"`, {
|
||||
encoding: "utf-8",
|
||||
timeout: 10000,
|
||||
env: { ...process.env, ...opts?.env },
|
||||
});
|
||||
let stderr = "";
|
||||
try {
|
||||
stderr = execSync(`cat "${stderrFile}" 2>/dev/null`, { encoding: "utf-8" });
|
||||
} catch (err: any) {
|
||||
// Expected: cat fails if file doesn't exist.
|
||||
if (err.status !== 1) console.error("Unexpected error reading stderr file:", err);
|
||||
}
|
||||
try { execSync(`rm -f "${stderrFile}"`); } catch (err: any) {
|
||||
console.error("Unexpected error removing stderr file:", err);
|
||||
}
|
||||
return { exitCode: 0, stdout: stdout.trim(), stderr: stderr.trim() };
|
||||
} catch (err: any) {
|
||||
let stderr = (err.stderr || "").trim();
|
||||
try {
|
||||
const captured = execSync(`cat "${stderrFile}" 2>/dev/null`, { encoding: "utf-8" });
|
||||
if (captured.trim()) stderr = captured.trim();
|
||||
} catch (captureErr: any) {
|
||||
// Expected: cat fails if file doesn't exist.
|
||||
if (captureErr.status !== 1) console.error("Unexpected error capturing stderr:", captureErr);
|
||||
}
|
||||
try { execSync(`rm -f "${stderrFile}"`); } catch (rmErr: any) {
|
||||
console.error("Unexpected error removing stderr file:", rmErr);
|
||||
}
|
||||
return {
|
||||
exitCode: err.status ?? 1,
|
||||
stdout: (err.stdout || "").trim(),
|
||||
stderr,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// ── get_resource_name (env var path) ───────────────────────────────────────
|
||||
|
||||
describe("get_resource_name", () => {
|
||||
describe("env var set (bypasses stdin)", () => {
|
||||
it("should return value from env var", () => {
|
||||
const result = runBash(
|
||||
'get_resource_name "MY_RESOURCE" "Enter resource name: "',
|
||||
{ env: { MY_RESOURCE: "from-env" } }
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("from-env");
|
||||
});
|
||||
|
||||
it("should log that value comes from environment", () => {
|
||||
const result = runBashCapture(
|
||||
'get_resource_name "MY_SERVER" "Enter server name: "',
|
||||
{ env: { MY_SERVER: "test-srv" } }
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stderr).toContain("environment");
|
||||
});
|
||||
|
||||
it("should accept hyphenated names", () => {
|
||||
const result = runBash(
|
||||
'get_resource_name "NAME" "Enter: "',
|
||||
{ env: { NAME: "my-server-01" } }
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("my-server-01");
|
||||
});
|
||||
|
||||
it("should accept names with underscores", () => {
|
||||
const result = runBash(
|
||||
'get_resource_name "NAME" "Enter: "',
|
||||
{ env: { NAME: "my_server" } }
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("my_server");
|
||||
});
|
||||
|
||||
it("should accept names with dots", () => {
|
||||
const result = runBash(
|
||||
'get_resource_name "TYPE" "Enter: "',
|
||||
{ env: { TYPE: "e2.micro" } }
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("e2.micro");
|
||||
});
|
||||
|
||||
it("should preserve spaces in env var value", () => {
|
||||
const result = runBash(
|
||||
'get_resource_name "LABEL" "Enter: "',
|
||||
{ env: { LABEL: "My Server Label" } }
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("My Server Label");
|
||||
});
|
||||
});
|
||||
|
||||
describe("env var not set (stdin path fails without tty)", () => {
|
||||
it("should fail in non-interactive mode with empty env var", () => {
|
||||
const result = runBash(
|
||||
'get_resource_name "UNSET_VAR_XYZ" "Enter name: "',
|
||||
);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should show error about non-interactive mode", () => {
|
||||
const result = runBashCapture(
|
||||
'get_resource_name "UNSET_VAR_XYZ" "Enter name: "',
|
||||
);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
// Should mention the env var name users can set
|
||||
expect(result.stderr).toContain("UNSET_VAR_XYZ");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ── get_validated_server_name (env var + validation) ───────────────────────
|
||||
|
||||
describe("get_validated_server_name", () => {
|
||||
describe("valid server names from env var", () => {
|
||||
it("should accept valid name", () => {
|
||||
const result = runBash(
|
||||
'get_validated_server_name "SERVER_NAME" "Enter name: "',
|
||||
{ env: { SERVER_NAME: "my-server-01" } }
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("my-server-01");
|
||||
});
|
||||
|
||||
it("should accept 3-char name (minimum length)", () => {
|
||||
const result = runBash(
|
||||
'get_validated_server_name "NAME" "Enter: "',
|
||||
{ env: { NAME: "abc" } }
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("abc");
|
||||
});
|
||||
|
||||
it("should accept 63-char name (maximum length)", () => {
|
||||
const longName = "a".repeat(63);
|
||||
const result = runBash(
|
||||
'get_validated_server_name "NAME" "Enter: "',
|
||||
{ env: { NAME: longName } }
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe(longName);
|
||||
});
|
||||
|
||||
it("should accept all-numeric name", () => {
|
||||
const result = runBash(
|
||||
'get_validated_server_name "NAME" "Enter: "',
|
||||
{ env: { NAME: "12345" } }
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("12345");
|
||||
});
|
||||
|
||||
it("should accept mixed case name", () => {
|
||||
const result = runBash(
|
||||
'get_validated_server_name "NAME" "Enter: "',
|
||||
{ env: { NAME: "MyServer01" } }
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("MyServer01");
|
||||
});
|
||||
|
||||
it("should accept name with interior dashes", () => {
|
||||
const result = runBash(
|
||||
'get_validated_server_name "NAME" "Enter: "',
|
||||
{ env: { NAME: "a-b-c" } }
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("a-b-c");
|
||||
});
|
||||
});
|
||||
|
||||
describe("invalid server names rejected from env var", () => {
|
||||
it("should reject name shorter than 3 chars", () => {
|
||||
const result = runBashCapture(
|
||||
'get_validated_server_name "NAME" "Enter: "',
|
||||
{ env: { NAME: "ab" } }
|
||||
);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
expect(result.stderr).toContain("too short");
|
||||
});
|
||||
|
||||
it("should reject single character name", () => {
|
||||
const result = runBash(
|
||||
'get_validated_server_name "NAME" "Enter: "',
|
||||
{ env: { NAME: "x" } }
|
||||
);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject name longer than 63 chars", () => {
|
||||
const longName = "a".repeat(64);
|
||||
const result = runBashCapture(
|
||||
'get_validated_server_name "NAME" "Enter: "',
|
||||
{ env: { NAME: longName } }
|
||||
);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
expect(result.stderr).toContain("too long");
|
||||
});
|
||||
|
||||
it("should reject name with special characters", () => {
|
||||
const result = runBash(
|
||||
'get_validated_server_name "NAME" "Enter: "',
|
||||
{ env: { NAME: "server;rm" } }
|
||||
);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject name starting with dash", () => {
|
||||
const result = runBashCapture(
|
||||
'get_validated_server_name "NAME" "Enter: "',
|
||||
{ env: { NAME: "-server" } }
|
||||
);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
expect(result.stderr).toContain("dash");
|
||||
});
|
||||
|
||||
it("should reject name ending with dash", () => {
|
||||
const result = runBashCapture(
|
||||
'get_validated_server_name "NAME" "Enter: "',
|
||||
{ env: { NAME: "server-" } }
|
||||
);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
expect(result.stderr).toContain("dash");
|
||||
});
|
||||
|
||||
it("should reject name with underscores", () => {
|
||||
const result = runBash(
|
||||
'get_validated_server_name "NAME" "Enter: "',
|
||||
{ env: { NAME: "my_server" } }
|
||||
);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject name with spaces", () => {
|
||||
const result = runBash(
|
||||
'get_validated_server_name "NAME" "Enter: "',
|
||||
{ env: { NAME: "my server" } }
|
||||
);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject name with dots", () => {
|
||||
const result = runBash(
|
||||
'get_validated_server_name "NAME" "Enter: "',
|
||||
{ env: { NAME: "my.server" } }
|
||||
);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject empty name", () => {
|
||||
const result = runBash(
|
||||
'get_validated_server_name "NAME" "Enter: "',
|
||||
{ env: { NAME: "" } }
|
||||
);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject injection attempt with semicolons", () => {
|
||||
const result = runBash(
|
||||
'get_validated_server_name "NAME" "Enter: "',
|
||||
{ env: { NAME: "test;whoami" } }
|
||||
);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject injection attempt with backticks", () => {
|
||||
const result = runBash(
|
||||
'get_validated_server_name "NAME" "Enter: "',
|
||||
{ env: { NAME: "test`id`" } }
|
||||
);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject path traversal attempt", () => {
|
||||
const result = runBash(
|
||||
'get_validated_server_name "NAME" "Enter: "',
|
||||
{ env: { NAME: "../../../etc" } }
|
||||
);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ── get_model_id_interactive ───────────────────────────────────────────────
|
||||
|
||||
describe("get_model_id_interactive", () => {
|
||||
describe("MODEL_ID env var set (bypasses stdin)", () => {
|
||||
it("should return MODEL_ID from env var", () => {
|
||||
const result = runBash('get_model_id_interactive "openrouter/auto" "Codex"', {
|
||||
env: { MODEL_ID: "anthropic/claude-3.5-sonnet" },
|
||||
});
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("anthropic/claude-3.5-sonnet");
|
||||
});
|
||||
|
||||
it("should accept simple model ID", () => {
|
||||
const result = runBash('get_model_id_interactive', {
|
||||
env: { MODEL_ID: "openrouter/auto" },
|
||||
});
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("openrouter/auto");
|
||||
});
|
||||
|
||||
it("should accept model ID with version numbers", () => {
|
||||
const result = runBash('get_model_id_interactive', {
|
||||
env: { MODEL_ID: "anthropic/claude-3.5-sonnet-20241022" },
|
||||
});
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("anthropic/claude-3.5-sonnet-20241022");
|
||||
});
|
||||
|
||||
it("should accept model ID with dots", () => {
|
||||
const result = runBash('get_model_id_interactive', {
|
||||
env: { MODEL_ID: "google/gemini-1.5-pro" },
|
||||
});
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("google/gemini-1.5-pro");
|
||||
});
|
||||
|
||||
it("should accept model ID with colons", () => {
|
||||
const result = runBash('get_model_id_interactive', {
|
||||
env: { MODEL_ID: "anthropic/claude-3.5-sonnet:beta" },
|
||||
});
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("anthropic/claude-3.5-sonnet:beta");
|
||||
});
|
||||
});
|
||||
|
||||
describe("MODEL_ID env var validation failures", () => {
|
||||
it("should reject MODEL_ID with semicolons (injection)", () => {
|
||||
const result = runBash('get_model_id_interactive', {
|
||||
env: { MODEL_ID: "model;rm -rf /" },
|
||||
});
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject MODEL_ID with backticks (injection)", () => {
|
||||
const result = runBash('get_model_id_interactive', {
|
||||
env: { MODEL_ID: "model`whoami`" },
|
||||
});
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject MODEL_ID with dollar-paren (injection)", () => {
|
||||
const result = runBash('get_model_id_interactive', {
|
||||
env: { MODEL_ID: "$(whoami)/model" },
|
||||
});
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject MODEL_ID with pipe (injection)", () => {
|
||||
const result = runBash('get_model_id_interactive', {
|
||||
env: { MODEL_ID: "model|cat /etc/passwd" },
|
||||
});
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject MODEL_ID with ampersand (injection)", () => {
|
||||
const result = runBash('get_model_id_interactive', {
|
||||
env: { MODEL_ID: "model&whoami" },
|
||||
});
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should show error about invalid characters", () => {
|
||||
const result = runBashCapture('get_model_id_interactive', {
|
||||
env: { MODEL_ID: "bad;model" },
|
||||
});
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
expect(result.stderr).toContain("invalid");
|
||||
});
|
||||
});
|
||||
|
||||
describe("MODEL_ID not set (falls through to stdin)", () => {
|
||||
it("should use default model in non-interactive mode without MODEL_ID", () => {
|
||||
const result = runBash(
|
||||
'get_model_id_interactive "openrouter/auto" "Codex"',
|
||||
);
|
||||
// Falls through to safe_read which fails without tty,
|
||||
// but the function catches this and uses the default model
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("openrouter/auto");
|
||||
});
|
||||
|
||||
it("should show model browsing hint before prompting", () => {
|
||||
const result = runBashCapture(
|
||||
'get_model_id_interactive "openrouter/auto" "TestAgent"',
|
||||
);
|
||||
expect(result.stderr).toContain("openrouter.ai/models");
|
||||
});
|
||||
|
||||
it("should show agent name in prompt text", () => {
|
||||
const result = runBashCapture(
|
||||
'get_model_id_interactive "openrouter/auto" "Codex"',
|
||||
);
|
||||
expect(result.stderr).toContain("Codex");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ── interactive_pick (env var bypass) ──────────────────────────────────────
|
||||
|
||||
describe("interactive_pick", () => {
|
||||
describe("env var bypass (most common non-interactive path)", () => {
|
||||
it("should return env var value without calling list callback", () => {
|
||||
const result = runBash(
|
||||
'interactive_pick "HETZNER_LOCATION" "fsn1" "locations" "echo should-not-see-this"',
|
||||
{ env: { HETZNER_LOCATION: "nbg1" } }
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("nbg1");
|
||||
// The list callback output should NOT appear since env var takes priority
|
||||
expect(result.stdout).not.toContain("should-not-see-this");
|
||||
});
|
||||
|
||||
it("should return env var for arbitrary values", () => {
|
||||
const result = runBash(
|
||||
'interactive_pick "MY_ZONE" "us-east-1" "zones" "echo dummy"',
|
||||
{ env: { MY_ZONE: "eu-west-2" } }
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("eu-west-2");
|
||||
});
|
||||
|
||||
it("should accept hyphenated env var values", () => {
|
||||
const result = runBash(
|
||||
'interactive_pick "SERVER_TYPE" "cx23" "types" "echo unused"',
|
||||
{ env: { SERVER_TYPE: "cpx21" } }
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("cpx21");
|
||||
});
|
||||
|
||||
it("should accept env var with multiple words", () => {
|
||||
const result = runBash(
|
||||
'interactive_pick "IMAGE_NAME" "ubuntu-22.04" "images" "echo unused"',
|
||||
{ env: { IMAGE_NAME: "debian-12" } }
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("debian-12");
|
||||
});
|
||||
});
|
||||
|
||||
describe("env var not set: list callback runs", () => {
|
||||
it("should use default when list callback returns empty", () => {
|
||||
const result = runBash(
|
||||
'no_items() { true; }; interactive_pick "UNSET_XYZ" "default-val" "regions" "no_items"',
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("default-val");
|
||||
});
|
||||
|
||||
it("should warn about using default when list is empty", () => {
|
||||
const result = runBashCapture(
|
||||
'no_items() { true; }; interactive_pick "UNSET_XYZ" "fallback" "items" "no_items"',
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stderr).toContain("default");
|
||||
});
|
||||
|
||||
it("should use default even when list callback fails", () => {
|
||||
const result = runBash(
|
||||
'failing_list() { return 1; }; interactive_pick "UNSET_XYZ" "safe-default" "zones" "failing_list"',
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("safe-default");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ── show_server_name_requirements ──────────────────────────────────────────
|
||||
|
||||
describe("show_server_name_requirements", () => {
|
||||
it("should output requirements mentioning character range", () => {
|
||||
const result = runBashCapture("show_server_name_requirements");
|
||||
expect(result.stderr).toContain("3-63");
|
||||
});
|
||||
|
||||
it("should mention alphanumeric characters", () => {
|
||||
const result = runBashCapture("show_server_name_requirements");
|
||||
expect(result.stderr).toContain("letters");
|
||||
expect(result.stderr).toContain("numbers");
|
||||
});
|
||||
|
||||
it("should mention dash restriction", () => {
|
||||
const result = runBashCapture("show_server_name_requirements");
|
||||
expect(result.stderr).toContain("dash");
|
||||
});
|
||||
});
|
||||
|
||||
// ── _display_and_select (rendering, not stdin) ─────────────────────────────
|
||||
|
||||
describe("_display_and_select", () => {
|
||||
describe("menu rendering to stderr", () => {
|
||||
it("should display numbered items", () => {
|
||||
// Will fail on safe_read (no tty) but should still render the menu
|
||||
const result = runBashCapture(
|
||||
'_display_and_select "locations" "fsn1" "" <<< "fsn1|Falkenstein|DE\nnbg1|Nuremberg|DE"',
|
||||
);
|
||||
expect(result.stderr).toContain("1)");
|
||||
expect(result.stderr).toContain("2)");
|
||||
expect(result.stderr).toContain("fsn1");
|
||||
expect(result.stderr).toContain("nbg1");
|
||||
});
|
||||
|
||||
it("should display Available heading with prompt text", () => {
|
||||
const result = runBashCapture(
|
||||
'_display_and_select "server types" "cx23" "" <<< "cx23|2 vCPU|4 GB"',
|
||||
);
|
||||
expect(result.stderr).toContain("server types");
|
||||
});
|
||||
|
||||
it("should handle single-item list", () => {
|
||||
const result = runBashCapture(
|
||||
'_display_and_select "zones" "zone1" "" <<< "zone1|Zone One"',
|
||||
);
|
||||
expect(result.stderr).toContain("1)");
|
||||
expect(result.stderr).toContain("zone1");
|
||||
});
|
||||
|
||||
it("should handle many items", () => {
|
||||
// Build a list of 10 items using printf to get real newlines
|
||||
const items = Array.from({ length: 10 }, (_, i) => `item${i}|Item ${i}`).join("\\n");
|
||||
const result = runBashCapture(
|
||||
`_display_and_select "options" "item0" "" <<< "$(printf "${items}")"`,
|
||||
);
|
||||
expect(result.stderr).toContain("1)");
|
||||
expect(result.stderr).toContain("10)");
|
||||
});
|
||||
});
|
||||
|
||||
describe("default value on stdin failure", () => {
|
||||
it("should output default value when safe_read fails", () => {
|
||||
// In non-tty mode, safe_read fails, so _display_and_select
|
||||
// uses the default value (first item index as default)
|
||||
const result = runBash(
|
||||
'_display_and_select "locations" "fsn1" "" <<< "fsn1|Falkenstein\nnbg1|Nuremberg"',
|
||||
);
|
||||
// It falls back to default when stdin is unavailable
|
||||
expect(result.stdout).toBe("fsn1");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ── validated_read contract tests ──────────────────────────────────────────
|
||||
// These test the validator callback contract without needing stdin,
|
||||
// by verifying what validated_read would accept/reject through
|
||||
// the validators themselves.
|
||||
|
||||
// Standalone validator tests (validate_api_token, validate_region_name, validate_resource_name)
|
||||
// are in shared-common-validators.test.ts
|
||||
// get_validated_server_name boundary tests are covered above
|
||||
|
|
@ -1,277 +0,0 @@
|
|||
import { describe, it, expect, beforeEach, afterEach } from "bun:test";
|
||||
import { resolve, join } from "path";
|
||||
import { mkdirSync, rmSync, existsSync } from "fs";
|
||||
import { tmpdir } from "os";
|
||||
|
||||
/**
|
||||
* Tests for JSON extraction helpers in shared/common.sh:
|
||||
* - _extract_json_field: generic JSON field extraction using Python expressions
|
||||
* - extract_api_error_message: API error message extraction from cloud provider responses
|
||||
*
|
||||
* These functions were recently extracted (PRs #673, #767) and are critical
|
||||
* infrastructure used by cloud providers for JSON parsing and error reporting.
|
||||
* _extract_json_field is used by generic_wait_for_instance for status polling,
|
||||
* and extract_api_error_message is used by Hetzner, DigitalOcean, Vultr, and
|
||||
* Contabo for surfacing actionable error messages.
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const REPO_ROOT = resolve(import.meta.dir, "../../..");
|
||||
const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh");
|
||||
|
||||
let testDir: string;
|
||||
|
||||
beforeEach(() => {
|
||||
testDir = join(tmpdir(), `spawn-json-test-${Date.now()}-${Math.random().toString(36).slice(2)}`);
|
||||
mkdirSync(testDir, { recursive: true });
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (existsSync(testDir)) {
|
||||
rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Run a bash snippet that sources shared/common.sh first.
|
||||
* Returns { exitCode, stdout, stderr }.
|
||||
*/
|
||||
function runBash(script: string): { exitCode: number; stdout: string; stderr: string } {
|
||||
const fullScript = `source "${COMMON_SH}"\n${script}`;
|
||||
const { spawnSync } = require("child_process");
|
||||
const result = spawnSync("bash", ["-c", fullScript], {
|
||||
encoding: "utf-8",
|
||||
timeout: 15000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
});
|
||||
return {
|
||||
exitCode: result.status ?? 1,
|
||||
stdout: (result.stdout || "").trim(),
|
||||
stderr: (result.stderr || "").trim(),
|
||||
};
|
||||
}
|
||||
|
||||
// ── _extract_json_field ─────────────────────────────────────────────────
|
||||
|
||||
describe("_extract_json_field", () => {
|
||||
describe("basic field extraction", () => {
|
||||
it("should extract a top-level string field", () => {
|
||||
const result = runBash(`
|
||||
_extract_json_field '{"name": "test"}' "d['name']"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("test");
|
||||
});
|
||||
|
||||
it("should extract a top-level integer field", () => {
|
||||
const result = runBash(`
|
||||
_extract_json_field '{"count": 42}' "d['count']"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("42");
|
||||
});
|
||||
|
||||
it("should extract a nested field", () => {
|
||||
const result = runBash(`
|
||||
_extract_json_field '{"server": {"status": "running"}}' "d['server']['status']"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("running");
|
||||
});
|
||||
|
||||
it("should extract a deeply nested field", () => {
|
||||
const result = runBash(`
|
||||
_extract_json_field '{"a": {"b": {"c": {"d": "deep"}}}}' "d['a']['b']['c']['d']"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("deep");
|
||||
});
|
||||
|
||||
it("should extract a boolean field", () => {
|
||||
const result = runBash(`
|
||||
_extract_json_field '{"ready": true}' "d['ready']"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("true");
|
||||
});
|
||||
|
||||
it("should extract a null field and return default", () => {
|
||||
const result = runBash(`
|
||||
_extract_json_field '{"value": null}' "d['value']" "fallback"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("fallback");
|
||||
});
|
||||
});
|
||||
|
||||
describe("default value handling", () => {
|
||||
it("should return default when JSON is invalid", () => {
|
||||
const result = runBash(`
|
||||
_extract_json_field 'not-json' "d['key']" "fallback"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("fallback");
|
||||
});
|
||||
|
||||
it("should return default when key is missing", () => {
|
||||
const result = runBash(`
|
||||
_extract_json_field '{"other": "value"}' "d['missing']" "default-val"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("default-val");
|
||||
});
|
||||
|
||||
it("should return empty string when no default specified and key is missing", () => {
|
||||
const result = runBash(`
|
||||
_extract_json_field '{"other": "value"}' "d['missing']"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("");
|
||||
});
|
||||
|
||||
it("should return default when JSON is empty string", () => {
|
||||
const result = runBash(`
|
||||
_extract_json_field '' "d['key']" "empty-fallback"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("empty-fallback");
|
||||
});
|
||||
|
||||
it("should return default when nested key path fails", () => {
|
||||
const result = runBash(`
|
||||
_extract_json_field '{"a": {"b": 1}}' "d['a']['c']['d']" "nested-default"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("nested-default");
|
||||
});
|
||||
});
|
||||
|
||||
describe("complex JS expressions", () => {
|
||||
it("should support bracket access for existing key", () => {
|
||||
const result = runBash(`
|
||||
_extract_json_field '{"status": "active"}' "d['status']"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("active");
|
||||
});
|
||||
|
||||
it("should return default when key missing", () => {
|
||||
const result = runBash(`
|
||||
_extract_json_field '{"other": 1}' "d['status']" "unknown"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("unknown");
|
||||
});
|
||||
|
||||
it("should support array indexing", () => {
|
||||
const result = runBash(`
|
||||
_extract_json_field '{"ips": ["1.2.3.4", "5.6.7.8"]}' "d['ips'][0]"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("1.2.3.4");
|
||||
});
|
||||
|
||||
it("should support conditional expressions", () => {
|
||||
const result = runBash(`
|
||||
_extract_json_field '{"networks": {"v4": [{"ip_address": "10.0.0.1"}]}}' \
|
||||
"d['networks']['v4'][0]['ip_address']"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("10.0.0.1");
|
||||
});
|
||||
});
|
||||
|
||||
describe("real-world cloud provider patterns", () => {
|
||||
it("should extract Vultr instance status", () => {
|
||||
const json = '{"instance": {"status": "active", "main_ip": "203.0.113.1"}}';
|
||||
const result = runBash(`
|
||||
_extract_json_field '${json}' "d['instance']['status']"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("active");
|
||||
});
|
||||
|
||||
it("should extract Vultr instance IP", () => {
|
||||
const json = '{"instance": {"status": "active", "main_ip": "203.0.113.1"}}';
|
||||
const result = runBash(`
|
||||
_extract_json_field '${json}' "d['instance']['main_ip']"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("203.0.113.1");
|
||||
});
|
||||
|
||||
it("should extract DigitalOcean droplet status", () => {
|
||||
const json = '{"droplet": {"status": "active", "networks": {"v4": [{"ip_address": "10.0.0.5", "type": "public"}]}}}';
|
||||
const result = runBash(`
|
||||
_extract_json_field '${json}' "d['droplet']['status']"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("active");
|
||||
});
|
||||
|
||||
it("should handle unknown status gracefully", () => {
|
||||
const result = runBash(`
|
||||
_extract_json_field '{}' "d['instance']['status']" "unknown"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("unknown");
|
||||
});
|
||||
});
|
||||
|
||||
describe("edge cases", () => {
|
||||
it("should handle JSON with special characters in values", () => {
|
||||
const result = runBash(`
|
||||
_extract_json_field '{"msg": "hello world & more"}' "d['msg']"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("hello world & more");
|
||||
});
|
||||
|
||||
it("should handle JSON with unicode characters", () => {
|
||||
const result = runBash(`
|
||||
_extract_json_field '{"msg": "\\u00e9"}' "d['msg']"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
// Python should decode the unicode escape
|
||||
expect(result.stdout.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it("should handle large JSON responses", () => {
|
||||
// Build a JSON with many keys
|
||||
const pairs = Array.from({ length: 50 }, (_, i) => `"key${i}": "val${i}"`).join(", ");
|
||||
const json = `{${pairs}, "target": "found"}`;
|
||||
const result = runBash(`
|
||||
_extract_json_field '${json}' "d['target']"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("found");
|
||||
});
|
||||
|
||||
it("should handle JSON with numeric string keys", () => {
|
||||
const result = runBash(`
|
||||
_extract_json_field '{"123": "numeric-key"}' "d['123']"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("numeric-key");
|
||||
});
|
||||
|
||||
it("should handle empty JSON object", () => {
|
||||
const result = runBash(`
|
||||
_extract_json_field '{}' "d['key']" "empty"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("empty");
|
||||
});
|
||||
|
||||
it("should handle JSON array as root", () => {
|
||||
const result = runBash(`
|
||||
_extract_json_field '[1, 2, 3]' "d[0]"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("1");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// extract_api_error_message tests are in shared-common-error-polling.test.ts
|
||||
|
|
@ -1,620 +0,0 @@
|
|||
import { describe, it, expect, beforeEach, afterEach } from "bun:test";
|
||||
import { resolve, join } from "path";
|
||||
import { mkdirSync, rmSync, existsSync, readFileSync, writeFileSync } from "fs";
|
||||
import { tmpdir } from "os";
|
||||
|
||||
/**
|
||||
* Tests for logging, diagnostic, temp-file management, runtime detection,
|
||||
* cloud-init generation, and SSH key helpers in shared/common.sh.
|
||||
*
|
||||
* These utility functions had zero dedicated test coverage but are used
|
||||
* pervasively across all cloud provider scripts:
|
||||
* - log_step: progress messages (cyan), added in PR #757
|
||||
* - _log_diagnostic: structured error output (header + causes + fixes)
|
||||
* - check_json_processor_available: JSON processor (jq/bun) dependency check
|
||||
* - find_node_runtime: bun/node detection
|
||||
* - track_temp_file + cleanup_temp_files: secure credential temp file cleanup
|
||||
* - get_cloud_init_userdata: cloud-init YAML generation for all providers
|
||||
* - generate_ssh_key_if_missing: SSH key generation
|
||||
* - get_ssh_fingerprint: SSH fingerprint extraction
|
||||
* - calculate_retry_backoff: jittered exponential backoff
|
||||
* - opencode_install_cmd: opencode install script generation
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const REPO_ROOT = resolve(import.meta.dir, "../../..");
|
||||
const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh");
|
||||
|
||||
let testDir: string;
|
||||
|
||||
beforeEach(() => {
|
||||
testDir = join(tmpdir(), `spawn-log-util-test-${Date.now()}-${Math.random().toString(36).slice(2)}`);
|
||||
mkdirSync(testDir, { recursive: true });
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (existsSync(testDir)) {
|
||||
rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Run a bash snippet that sources shared/common.sh first.
|
||||
* Returns { exitCode, stdout, stderr }.
|
||||
*/
|
||||
function runBash(script: string, env?: Record<string, string>): { exitCode: number; stdout: string; stderr: string } {
|
||||
const fullScript = `source "${COMMON_SH}"\n${script}`;
|
||||
const { spawnSync } = require("child_process");
|
||||
const result = spawnSync("bash", ["-c", fullScript], {
|
||||
encoding: "utf-8",
|
||||
timeout: 15000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
env: { ...process.env, ...env },
|
||||
});
|
||||
return {
|
||||
exitCode: result.status ?? 1,
|
||||
stdout: (result.stdout || "").trim(),
|
||||
stderr: (result.stderr || "").trim(),
|
||||
};
|
||||
}
|
||||
|
||||
// ── log_step ────────────────────────────────────────────────────────────────
|
||||
|
||||
describe("log_step", () => {
|
||||
it("should output message to stderr", () => {
|
||||
const result = runBash('log_step "Deploying agent..."');
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stderr).toContain("Deploying agent...");
|
||||
});
|
||||
|
||||
it("should not output to stdout", () => {
|
||||
const result = runBash('log_step "Progress message"');
|
||||
expect(result.stdout).toBe("");
|
||||
});
|
||||
|
||||
it("should use cyan color codes", () => {
|
||||
const result = runBash('log_step "Step in progress"');
|
||||
// CYAN = \033[36m, NC = \033[0m
|
||||
expect(result.stderr).toContain("Step in progress");
|
||||
// Verify it's different from log_warn (yellow) output
|
||||
const warnResult = runBash('log_warn "Warning message"');
|
||||
// Both write to stderr but with different ANSI codes
|
||||
expect(result.stderr).not.toBe(warnResult.stderr.replace("Warning message", "Step in progress"));
|
||||
});
|
||||
|
||||
it("should handle empty message", () => {
|
||||
const result = runBash('log_step ""');
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should handle message with special characters", () => {
|
||||
const result = runBash('log_step "Status: 50% done (step 1/3)"');
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stderr).toContain("Status: 50% done (step 1/3)");
|
||||
});
|
||||
});
|
||||
|
||||
// ── _log_diagnostic ─────────────────────────────────────────────────────────
|
||||
|
||||
describe("_log_diagnostic", () => {
|
||||
it("should output header, causes, and fixes in structured format", () => {
|
||||
const result = runBash(`
|
||||
_log_diagnostic "Something failed" \\
|
||||
"Cause A" \\
|
||||
"Cause B" \\
|
||||
--- \\
|
||||
"Fix 1" \\
|
||||
"Fix 2"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stderr).toContain("Something failed");
|
||||
expect(result.stderr).toContain("Possible causes:");
|
||||
expect(result.stderr).toContain("Cause A");
|
||||
expect(result.stderr).toContain("Cause B");
|
||||
expect(result.stderr).toContain("How to fix:");
|
||||
expect(result.stderr).toContain("Fix 1");
|
||||
expect(result.stderr).toContain("Fix 2");
|
||||
});
|
||||
|
||||
it("should number fix steps sequentially", () => {
|
||||
const result = runBash(`
|
||||
_log_diagnostic "Error" \\
|
||||
"cause" \\
|
||||
--- \\
|
||||
"First fix" \\
|
||||
"Second fix" \\
|
||||
"Third fix"
|
||||
`);
|
||||
expect(result.stderr).toContain("1. First fix");
|
||||
expect(result.stderr).toContain("2. Second fix");
|
||||
expect(result.stderr).toContain("3. Third fix");
|
||||
});
|
||||
|
||||
it("should handle single cause and single fix", () => {
|
||||
const result = runBash(`
|
||||
_log_diagnostic "Install failed" \\
|
||||
"Network error" \\
|
||||
--- \\
|
||||
"Retry the command"
|
||||
`);
|
||||
expect(result.stderr).toContain("Install failed");
|
||||
expect(result.stderr).toContain("Network error");
|
||||
expect(result.stderr).toContain("1. Retry the command");
|
||||
});
|
||||
|
||||
it("should handle multiple causes", () => {
|
||||
const result = runBash(`
|
||||
_log_diagnostic "Auth failed" \\
|
||||
"Token expired" \\
|
||||
"Token invalid" \\
|
||||
"Wrong region" \\
|
||||
--- \\
|
||||
"Regenerate token"
|
||||
`);
|
||||
expect(result.stderr).toContain("Token expired");
|
||||
expect(result.stderr).toContain("Token invalid");
|
||||
expect(result.stderr).toContain("Wrong region");
|
||||
});
|
||||
|
||||
it("should use bullet points for causes", () => {
|
||||
const result = runBash(`
|
||||
_log_diagnostic "Error" \\
|
||||
"Cause 1" \\
|
||||
--- \\
|
||||
"Fix 1"
|
||||
`);
|
||||
expect(result.stderr).toContain("- Cause 1");
|
||||
});
|
||||
|
||||
it("should output everything to stderr", () => {
|
||||
const result = runBash(`
|
||||
_log_diagnostic "Header" \\
|
||||
"Cause" \\
|
||||
--- \\
|
||||
"Fix"
|
||||
`);
|
||||
expect(result.stdout).toBe("");
|
||||
expect(result.stderr).toContain("Header");
|
||||
});
|
||||
});
|
||||
|
||||
// ── check_json_processor_available ──────────────────────────────────────────────────
|
||||
|
||||
describe("check_json_processor_available", () => {
|
||||
it("should return 0 when python3 is available", () => {
|
||||
const result = runBash("check_json_processor_available");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should return 1 when python3 is not in PATH", () => {
|
||||
const result = runBash("check_json_processor_available", { PATH: "/nonexistent" });
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should show install instructions when jq and bun are missing", () => {
|
||||
// Override command to simulate jq and bun not found (can't restrict PATH since sourcing needs it)
|
||||
const result = runBash(`
|
||||
command() { if [[ "$2" == "jq" || "$2" == "bun" ]]; then return 1; fi; builtin command "$@"; }
|
||||
check_json_processor_available
|
||||
`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
expect(result.stderr).toContain("jq or bun is required");
|
||||
expect(result.stderr).toContain("Install jq:");
|
||||
});
|
||||
|
||||
it("should mention Ubuntu, Fedora, macOS, and Arch install options", () => {
|
||||
const result = runBash(`
|
||||
command() { if [[ "$2" == "jq" || "$2" == "bun" ]]; then return 1; fi; builtin command "$@"; }
|
||||
check_json_processor_available
|
||||
`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
expect(result.stderr).toContain("Ubuntu/Debian");
|
||||
expect(result.stderr).toContain("Fedora/RHEL");
|
||||
expect(result.stderr).toContain("macOS");
|
||||
expect(result.stderr).toContain("Arch Linux");
|
||||
});
|
||||
});
|
||||
|
||||
// ── find_node_runtime ───────────────────────────────────────────────────────
|
||||
|
||||
describe("find_node_runtime", () => {
|
||||
it("should find a runtime in normal environment", () => {
|
||||
const result = runBash("find_node_runtime");
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(["bun", "node"]).toContain(result.stdout);
|
||||
});
|
||||
|
||||
it("should return 1 when neither bun nor node is available", () => {
|
||||
const result = runBash("find_node_runtime", { PATH: "/nonexistent" });
|
||||
expect(result.exitCode).toBe(1);
|
||||
expect(result.stdout).toBe("");
|
||||
});
|
||||
|
||||
it("should prefer bun over node when both available", () => {
|
||||
// In the test environment bun is available
|
||||
const result = runBash("find_node_runtime");
|
||||
if (result.stdout === "bun") {
|
||||
// Confirm bun is indeed preferred
|
||||
expect(result.stdout).toBe("bun");
|
||||
}
|
||||
// Either way, should succeed
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ── track_temp_file + cleanup_temp_files ────────────────────────────────────
|
||||
|
||||
describe("track_temp_file and cleanup_temp_files", () => {
|
||||
it("should add file to CLEANUP_TEMP_FILES array", () => {
|
||||
const tmpFile = join(testDir, "cred.tmp");
|
||||
writeFileSync(tmpFile, "secret-data");
|
||||
|
||||
const result = runBash(`
|
||||
track_temp_file "${tmpFile}"
|
||||
echo "\${#CLEANUP_TEMP_FILES[@]}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
// Array should now have at least 1 entry
|
||||
expect(parseInt(result.stdout)).toBeGreaterThanOrEqual(1);
|
||||
});
|
||||
|
||||
it("should clean up tracked temp files", () => {
|
||||
const tmpFile = join(testDir, "cred.tmp");
|
||||
writeFileSync(tmpFile, "secret-data");
|
||||
|
||||
const result = runBash(`
|
||||
track_temp_file "${tmpFile}"
|
||||
cleanup_temp_files
|
||||
if [[ -f "${tmpFile}" ]]; then echo "exists"; else echo "removed"; fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("removed");
|
||||
});
|
||||
|
||||
it("should handle multiple tracked files", () => {
|
||||
const tmpFile1 = join(testDir, "cred1.tmp");
|
||||
const tmpFile2 = join(testDir, "cred2.tmp");
|
||||
writeFileSync(tmpFile1, "secret-1");
|
||||
writeFileSync(tmpFile2, "secret-2");
|
||||
|
||||
const result = runBash(`
|
||||
track_temp_file "${tmpFile1}"
|
||||
track_temp_file "${tmpFile2}"
|
||||
cleanup_temp_files
|
||||
f1="removed"; f2="removed"
|
||||
[[ -f "${tmpFile1}" ]] && f1="exists"
|
||||
[[ -f "${tmpFile2}" ]] && f2="exists"
|
||||
echo "$f1 $f2"
|
||||
`);
|
||||
expect(result.stdout).toBe("removed removed");
|
||||
});
|
||||
|
||||
it("should not fail if tracked file does not exist", () => {
|
||||
const result = runBash(`
|
||||
track_temp_file "/nonexistent/path/file.tmp"
|
||||
cleanup_temp_files
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should preserve exit code through cleanup", () => {
|
||||
const result = runBash(`
|
||||
cleanup_exit_code_test() {
|
||||
local exit_code=42
|
||||
(exit $exit_code)
|
||||
cleanup_temp_files
|
||||
return $?
|
||||
}
|
||||
cleanup_exit_code_test
|
||||
`);
|
||||
// cleanup_temp_files preserves the exit code from before it was called
|
||||
expect(result.exitCode).toBe(42);
|
||||
});
|
||||
|
||||
it("should try shred before rm for security", () => {
|
||||
const tmpFile = join(testDir, "secure.tmp");
|
||||
writeFileSync(tmpFile, "sensitive-credentials");
|
||||
|
||||
// After cleanup, file should not exist regardless of whether shred or rm was used
|
||||
const result = runBash(`
|
||||
track_temp_file "${tmpFile}"
|
||||
cleanup_temp_files
|
||||
[[ -f "${tmpFile}" ]] && echo "exists" || echo "removed"
|
||||
`);
|
||||
expect(result.stdout).toBe("removed");
|
||||
});
|
||||
});
|
||||
|
||||
// ── register_cleanup_trap ───────────────────────────────────────────────────
|
||||
|
||||
describe("register_cleanup_trap", () => {
|
||||
it("should register EXIT trap", () => {
|
||||
const result = runBash(`
|
||||
register_cleanup_trap
|
||||
trap -p EXIT
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("cleanup_temp_files");
|
||||
});
|
||||
|
||||
it("should register INT trap", () => {
|
||||
const result = runBash(`
|
||||
register_cleanup_trap
|
||||
trap -p INT
|
||||
`);
|
||||
expect(result.stdout).toContain("cleanup_temp_files");
|
||||
});
|
||||
|
||||
it("should register TERM trap", () => {
|
||||
const result = runBash(`
|
||||
register_cleanup_trap
|
||||
trap -p TERM
|
||||
`);
|
||||
expect(result.stdout).toContain("cleanup_temp_files");
|
||||
});
|
||||
|
||||
it("should auto-register on source (common.sh sources register_cleanup_trap at bottom)", () => {
|
||||
// shared/common.sh calls register_cleanup_trap at end of file
|
||||
const result = runBash("trap -p EXIT");
|
||||
expect(result.stdout).toContain("cleanup_temp_files");
|
||||
});
|
||||
});
|
||||
|
||||
// ── get_cloud_init_userdata ─────────────────────────────────────────────────
|
||||
|
||||
describe("get_cloud_init_userdata", () => {
|
||||
it("should output valid cloud-config YAML", () => {
|
||||
const result = runBash("get_cloud_init_userdata");
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("#cloud-config");
|
||||
});
|
||||
|
||||
it("should include package_update directive", () => {
|
||||
const result = runBash("get_cloud_init_userdata");
|
||||
expect(result.stdout).toContain("package_update: true");
|
||||
});
|
||||
|
||||
it("should include required packages", () => {
|
||||
const result = runBash("get_cloud_init_userdata");
|
||||
expect(result.stdout).toContain("curl");
|
||||
expect(result.stdout).toContain("unzip");
|
||||
expect(result.stdout).toContain("git");
|
||||
expect(result.stdout).toContain("zsh");
|
||||
});
|
||||
|
||||
it("should install Bun", () => {
|
||||
const result = runBash("get_cloud_init_userdata");
|
||||
expect(result.stdout).toContain("bun.sh/install");
|
||||
});
|
||||
|
||||
it("should install Claude Code", () => {
|
||||
const result = runBash("get_cloud_init_userdata");
|
||||
expect(result.stdout).toContain("claude.ai/install.sh");
|
||||
});
|
||||
|
||||
it("should configure PATH in both .bashrc and .zshrc", () => {
|
||||
const result = runBash("get_cloud_init_userdata");
|
||||
expect(result.stdout).toContain(".bashrc");
|
||||
expect(result.stdout).toContain(".zshrc");
|
||||
});
|
||||
|
||||
it("should include .bun/bin in PATH config", () => {
|
||||
const result = runBash("get_cloud_init_userdata");
|
||||
expect(result.stdout).toContain(".bun/bin");
|
||||
});
|
||||
|
||||
it("should signal completion with touch marker", () => {
|
||||
const result = runBash("get_cloud_init_userdata");
|
||||
expect(result.stdout).toContain("touch /root/.cloud-init-complete");
|
||||
});
|
||||
|
||||
it("should include runcmd section", () => {
|
||||
const result = runBash("get_cloud_init_userdata");
|
||||
expect(result.stdout).toContain("runcmd:");
|
||||
});
|
||||
|
||||
it("should include packages section", () => {
|
||||
const result = runBash("get_cloud_init_userdata");
|
||||
expect(result.stdout).toContain("packages:");
|
||||
});
|
||||
});
|
||||
|
||||
// ── calculate_retry_backoff ─────────────────────────────────────────────────
|
||||
|
||||
describe("calculate_retry_backoff", () => {
|
||||
it("should return a value within +-20% jitter of interval", () => {
|
||||
// Run multiple times and check the range
|
||||
const results: number[] = [];
|
||||
for (let i = 0; i < 10; i++) {
|
||||
const result = runBash("calculate_retry_backoff 10 60");
|
||||
results.push(parseInt(result.stdout));
|
||||
}
|
||||
for (const val of results) {
|
||||
// 10 * 0.8 = 8, 10 * 1.2 = 12
|
||||
expect(val).toBeGreaterThanOrEqual(8);
|
||||
expect(val).toBeLessThanOrEqual(12);
|
||||
}
|
||||
});
|
||||
|
||||
it("should return next interval not exceeding max", () => {
|
||||
const result = runBash("calculate_retry_backoff 50 60");
|
||||
const val = parseInt(result.stdout);
|
||||
// 50 * 0.8 = 40, 50 * 1.2 = 60
|
||||
expect(val).toBeGreaterThanOrEqual(40);
|
||||
expect(val).toBeLessThanOrEqual(60);
|
||||
});
|
||||
|
||||
it("should handle interval of 1", () => {
|
||||
const result = runBash("calculate_retry_backoff 1 60");
|
||||
const val = parseInt(result.stdout);
|
||||
// 1 * 0.8 = 0.8 -> int 0 or 1; 1 * 1.2 = 1.2 -> int 1
|
||||
expect(val).toBeGreaterThanOrEqual(0);
|
||||
expect(val).toBeLessThanOrEqual(2);
|
||||
});
|
||||
|
||||
it("should handle equal interval and max", () => {
|
||||
const result = runBash("calculate_retry_backoff 30 30");
|
||||
const val = parseInt(result.stdout);
|
||||
// Already at max; jitter +-20% of 30 => [24, 36]
|
||||
expect(val).toBeGreaterThanOrEqual(24);
|
||||
expect(val).toBeLessThanOrEqual(36);
|
||||
});
|
||||
|
||||
it("should fall back to plain interval if python3 unavailable", () => {
|
||||
const result = runBash("calculate_retry_backoff 5 30", { PATH: "/usr/bin:/bin" });
|
||||
// Without python3, should fall back to echo'ing the raw interval
|
||||
// But python3 might still be available at /usr/bin/python3
|
||||
expect(result.exitCode).toBe(0);
|
||||
const val = parseInt(result.stdout);
|
||||
expect(val).toBeGreaterThanOrEqual(0);
|
||||
});
|
||||
});
|
||||
|
||||
// generate_ssh_key_if_missing and get_ssh_fingerprint tests are in shared-common-ssh-key-lifecycle.test.ts
|
||||
|
||||
// ── logging functions ───────────────────────────────────────────────────────
|
||||
|
||||
describe("logging functions output to stderr", () => {
|
||||
it("log_info should output to stderr with green color", () => {
|
||||
const result = runBash('log_info "Info message"');
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("");
|
||||
expect(result.stderr).toContain("Info message");
|
||||
});
|
||||
|
||||
it("log_warn should output to stderr with yellow color", () => {
|
||||
const result = runBash('log_warn "Warning message"');
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("");
|
||||
expect(result.stderr).toContain("Warning message");
|
||||
});
|
||||
|
||||
it("log_error should output to stderr with red color", () => {
|
||||
const result = runBash('log_error "Error message"');
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("");
|
||||
expect(result.stderr).toContain("Error message");
|
||||
});
|
||||
|
||||
it("log functions should not interfere with stdout piping", () => {
|
||||
const result = runBash(`
|
||||
log_info "info"
|
||||
log_warn "warn"
|
||||
log_error "error"
|
||||
log_step "step"
|
||||
echo "stdout-data"
|
||||
`);
|
||||
expect(result.stdout).toBe("stdout-data");
|
||||
expect(result.stderr).toContain("info");
|
||||
expect(result.stderr).toContain("warn");
|
||||
expect(result.stderr).toContain("error");
|
||||
expect(result.stderr).toContain("step");
|
||||
});
|
||||
});
|
||||
|
||||
// ── opencode_install_cmd ────────────────────────────────────────────────────
|
||||
|
||||
describe("opencode_install_cmd", () => {
|
||||
it("should output a non-empty install command", () => {
|
||||
const result = runBash("opencode_install_cmd");
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it("should include architecture detection", () => {
|
||||
const result = runBash("opencode_install_cmd");
|
||||
expect(result.stdout).toContain("uname -m");
|
||||
});
|
||||
|
||||
it("should include OS detection", () => {
|
||||
const result = runBash("opencode_install_cmd");
|
||||
expect(result.stdout).toContain("uname -s");
|
||||
});
|
||||
|
||||
it("should download from github releases", () => {
|
||||
const result = runBash("opencode_install_cmd");
|
||||
expect(result.stdout).toContain("github.com/anomalyco/opencode");
|
||||
});
|
||||
|
||||
it("should handle aarch64 to arm64 mapping", () => {
|
||||
const result = runBash("opencode_install_cmd");
|
||||
expect(result.stdout).toContain("aarch64");
|
||||
expect(result.stdout).toContain("arm64");
|
||||
});
|
||||
|
||||
it("should update PATH in both .bashrc and .zshrc", () => {
|
||||
const result = runBash("opencode_install_cmd");
|
||||
expect(result.stdout).toContain(".bashrc");
|
||||
expect(result.stdout).toContain(".zshrc");
|
||||
});
|
||||
|
||||
it("should install to $HOME/.opencode/bin", () => {
|
||||
const result = runBash("opencode_install_cmd");
|
||||
expect(result.stdout).toContain(".opencode/bin");
|
||||
});
|
||||
|
||||
it("should use tar to extract the archive", () => {
|
||||
const result = runBash("opencode_install_cmd");
|
||||
expect(result.stdout).toContain("tar xzf");
|
||||
});
|
||||
|
||||
it("should clean up temp install directory", () => {
|
||||
const result = runBash("opencode_install_cmd");
|
||||
expect(result.stdout).toContain("rm -rf /tmp/opencode-install");
|
||||
});
|
||||
});
|
||||
|
||||
// ── POLL_INTERVAL configurable constant ─────────────────────────────────────
|
||||
|
||||
describe("POLL_INTERVAL configuration", () => {
|
||||
it("should default to 1 second", () => {
|
||||
const result = runBash('echo "$POLL_INTERVAL"');
|
||||
expect(result.stdout).toBe("1");
|
||||
});
|
||||
|
||||
it("should respect SPAWN_POLL_INTERVAL env var", () => {
|
||||
const result = runBash('echo "$POLL_INTERVAL"', { SPAWN_POLL_INTERVAL: "0.1" });
|
||||
expect(result.stdout).toBe("0.1");
|
||||
});
|
||||
|
||||
it("should allow custom poll interval for testing", () => {
|
||||
const result = runBash('echo "$POLL_INTERVAL"', { SPAWN_POLL_INTERVAL: "5" });
|
||||
expect(result.stdout).toBe("5");
|
||||
});
|
||||
});
|
||||
|
||||
// ── SSH_OPTS default configuration ──────────────────────────────────────────
|
||||
|
||||
describe("SSH_OPTS defaults", () => {
|
||||
it("should set SSH_OPTS when not pre-defined", () => {
|
||||
const result = runBash('echo "$SSH_OPTS"');
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it("should use accept-new for strict host key checking (TOFU)", () => {
|
||||
const result = runBash('echo "$SSH_OPTS"');
|
||||
expect(result.stdout).toContain("StrictHostKeyChecking=accept-new");
|
||||
});
|
||||
|
||||
it("should use /dev/null for known hosts file", () => {
|
||||
const result = runBash('echo "$SSH_OPTS"');
|
||||
expect(result.stdout).toContain("UserKnownHostsFile=/dev/null");
|
||||
});
|
||||
|
||||
it("should suppress SSH logging", () => {
|
||||
const result = runBash('echo "$SSH_OPTS"');
|
||||
expect(result.stdout).toContain("LogLevel=ERROR");
|
||||
});
|
||||
|
||||
it("should use ed25519 key by default", () => {
|
||||
const result = runBash('echo "$SSH_OPTS"');
|
||||
expect(result.stdout).toContain("id_ed25519");
|
||||
});
|
||||
|
||||
it("should not override pre-existing SSH_OPTS", () => {
|
||||
const result = runBash('echo "$SSH_OPTS"', { SSH_OPTS: "custom-opts" });
|
||||
expect(result.stdout).toBe("custom-opts");
|
||||
});
|
||||
});
|
||||
|
|
@ -1,852 +0,0 @@
|
|||
import { describe, it, expect, beforeEach, afterEach } from "bun:test";
|
||||
import { resolve, join } from "path";
|
||||
import { mkdirSync, rmSync, existsSync, readFileSync, writeFileSync, chmodSync } from "fs";
|
||||
import { tmpdir } from "os";
|
||||
|
||||
/**
|
||||
* Tests for OAuth flow functions in shared/common.sh.
|
||||
*
|
||||
* The OAuth flow is the primary authentication mechanism for spawn users,
|
||||
* yet its component functions had zero test coverage. This file tests:
|
||||
*
|
||||
* - validate_oauth_port: port range validation (1024-65535, numeric only)
|
||||
* - _generate_csrf_state: CSRF token generation (security-critical)
|
||||
* - _generate_oauth_html: HTML page generation for OAuth callback
|
||||
* - _generate_oauth_server_script: Node.js callback server generation
|
||||
* - _validate_oauth_server_args: prerequisite validation (port, state, runtime)
|
||||
* - _init_oauth_session: temp directory and CSRF state file creation
|
||||
* - cleanup_oauth_session: PID and directory cleanup
|
||||
* - exchange_oauth_code: OAuth code-to-key exchange (json_escape security)
|
||||
*
|
||||
* These are SECURITY-CRITICAL: CSRF state prevents OAuth code interception,
|
||||
* port validation prevents injection, and json_escape in exchange_oauth_code
|
||||
* prevents JSON injection via crafted OAuth codes.
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const REPO_ROOT = resolve(import.meta.dir, "../../..");
|
||||
const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh");
|
||||
|
||||
let testDir: string;
|
||||
|
||||
beforeEach(() => {
|
||||
testDir = join(tmpdir(), `spawn-oauth-test-${Date.now()}-${Math.random().toString(36).slice(2)}`);
|
||||
mkdirSync(testDir, { recursive: true });
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (existsSync(testDir)) {
|
||||
rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Run a bash snippet that sources shared/common.sh first.
|
||||
* Returns { exitCode, stdout, stderr }.
|
||||
*/
|
||||
function runBash(script: string, env?: Record<string, string>): { exitCode: number; stdout: string; stderr: string } {
|
||||
const fullScript = `source "${COMMON_SH}"\n${script}`;
|
||||
const { spawnSync } = require("child_process");
|
||||
const result = spawnSync("bash", ["-c", fullScript], {
|
||||
encoding: "utf-8",
|
||||
timeout: 15000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
env: { ...process.env, ...env },
|
||||
});
|
||||
return {
|
||||
exitCode: result.status ?? 1,
|
||||
stdout: (result.stdout || "").trim(),
|
||||
stderr: (result.stderr || "").trim(),
|
||||
};
|
||||
}
|
||||
|
||||
// ── validate_oauth_port ───────────────────────────────────────────────────────
|
||||
|
||||
describe("validate_oauth_port", () => {
|
||||
describe("accepts valid ports", () => {
|
||||
const validPorts = ["1024", "5180", "8080", "9999", "49152", "65535"];
|
||||
for (const port of validPorts) {
|
||||
it(`should accept port ${port}`, () => {
|
||||
const result = runBash(`validate_oauth_port "${port}"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
describe("rejects privileged ports (below 1024)", () => {
|
||||
const privilegedPorts = ["0", "1", "22", "80", "443", "1023"];
|
||||
for (const port of privilegedPorts) {
|
||||
it(`should reject port ${port}`, () => {
|
||||
const result = runBash(`validate_oauth_port "${port}"`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
describe("rejects ports above 65535", () => {
|
||||
it("should reject port 65536", () => {
|
||||
const result = runBash(`validate_oauth_port "65536"`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should reject port 99999", () => {
|
||||
const result = runBash(`validate_oauth_port "99999"`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe("rejects non-numeric input", () => {
|
||||
it("should reject alphabetic string", () => {
|
||||
const result = runBash(`validate_oauth_port "abc"`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should reject empty string", () => {
|
||||
const result = runBash(`validate_oauth_port ""`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should reject port with spaces", () => {
|
||||
const result = runBash(`validate_oauth_port "80 80"`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should reject port with special characters", () => {
|
||||
const result = runBash(`validate_oauth_port "5180;echo"`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should reject negative number", () => {
|
||||
const result = runBash(`validate_oauth_port "-1"`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should reject decimal number", () => {
|
||||
const result = runBash(`validate_oauth_port "5180.5"`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe("boundary values", () => {
|
||||
it("should reject port 1023 (just below valid range)", () => {
|
||||
const result = runBash(`validate_oauth_port "1023"`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should accept port 1024 (lower boundary)", () => {
|
||||
const result = runBash(`validate_oauth_port "1024"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should accept port 65535 (upper boundary)", () => {
|
||||
const result = runBash(`validate_oauth_port "65535"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should reject port 65536 (just above valid range)", () => {
|
||||
const result = runBash(`validate_oauth_port "65536"`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
});
|
||||
|
||||
describe("error messages", () => {
|
||||
it("should show 'must be numeric' for non-numeric input", () => {
|
||||
const result = runBash(`validate_oauth_port "abc"`);
|
||||
expect(result.stderr).toContain("must be numeric");
|
||||
});
|
||||
|
||||
it("should show 'must be between' for out-of-range port", () => {
|
||||
const result = runBash(`validate_oauth_port "80"`);
|
||||
expect(result.stderr).toContain("must be between");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ── _generate_csrf_state ──────────────────────────────────────────────────────
|
||||
|
||||
describe("_generate_csrf_state", () => {
|
||||
it("should generate a non-empty string", () => {
|
||||
const result = runBash("_generate_csrf_state");
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it("should generate hex-only output", () => {
|
||||
const result = runBash("_generate_csrf_state");
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toMatch(/^[0-9a-f]+$/);
|
||||
});
|
||||
|
||||
it("should generate at least 16 hex characters (64 bits of entropy)", () => {
|
||||
const result = runBash("_generate_csrf_state");
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout.length).toBeGreaterThanOrEqual(16);
|
||||
});
|
||||
|
||||
it("should generate different values on consecutive calls", () => {
|
||||
const result = runBash(`
|
||||
state1=$(_generate_csrf_state)
|
||||
state2=$(_generate_csrf_state)
|
||||
if [[ "$state1" == "$state2" ]]; then
|
||||
echo "SAME"
|
||||
exit 1
|
||||
fi
|
||||
echo "DIFFERENT"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("DIFFERENT");
|
||||
});
|
||||
|
||||
it("should work with openssl fallback", () => {
|
||||
// Test the primary openssl path (if available)
|
||||
const result = runBash(`
|
||||
if command -v openssl &>/dev/null; then
|
||||
state=$(_generate_csrf_state)
|
||||
echo "$state"
|
||||
else
|
||||
echo "no-openssl"
|
||||
fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
if (result.stdout !== "no-openssl") {
|
||||
// openssl rand -hex 16 produces exactly 32 hex chars
|
||||
expect(result.stdout.length).toBe(32);
|
||||
}
|
||||
});
|
||||
|
||||
it("should produce output safe for embedding in URLs and filenames", () => {
|
||||
const result = runBash("_generate_csrf_state");
|
||||
expect(result.exitCode).toBe(0);
|
||||
// No special characters, spaces, or newlines
|
||||
expect(result.stdout).not.toContain(" ");
|
||||
expect(result.stdout).not.toContain("\n");
|
||||
expect(result.stdout).not.toContain("/");
|
||||
expect(result.stdout).not.toContain("&");
|
||||
expect(result.stdout).not.toContain("?");
|
||||
});
|
||||
});
|
||||
|
||||
// ── _generate_oauth_html ──────────────────────────────────────────────────────
|
||||
|
||||
describe("_generate_oauth_html", () => {
|
||||
it("should set OAUTH_SUCCESS_HTML variable", () => {
|
||||
const result = runBash(`
|
||||
_generate_oauth_html
|
||||
echo "$OAUTH_SUCCESS_HTML"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it("should set OAUTH_ERROR_HTML variable", () => {
|
||||
const result = runBash(`
|
||||
_generate_oauth_html
|
||||
echo "$OAUTH_ERROR_HTML"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it("should produce valid HTML in success page", () => {
|
||||
const result = runBash(`
|
||||
_generate_oauth_html
|
||||
echo "$OAUTH_SUCCESS_HTML"
|
||||
`);
|
||||
expect(result.stdout).toContain("<html");
|
||||
expect(result.stdout).toContain("</html>");
|
||||
expect(result.stdout).toContain("<body>");
|
||||
});
|
||||
|
||||
it("should include success message in success HTML", () => {
|
||||
const result = runBash(`
|
||||
_generate_oauth_html
|
||||
echo "$OAUTH_SUCCESS_HTML"
|
||||
`);
|
||||
expect(result.stdout).toContain("Authentication Successful");
|
||||
});
|
||||
|
||||
it("should include auto-close script in success HTML", () => {
|
||||
const result = runBash(`
|
||||
_generate_oauth_html
|
||||
echo "$OAUTH_SUCCESS_HTML"
|
||||
`);
|
||||
expect(result.stdout).toContain("window.close");
|
||||
});
|
||||
|
||||
it("should include CSRF protection message in error HTML", () => {
|
||||
const result = runBash(`
|
||||
_generate_oauth_html
|
||||
echo "$OAUTH_ERROR_HTML"
|
||||
`);
|
||||
expect(result.stdout).toContain("CSRF");
|
||||
});
|
||||
|
||||
it("should include 'Authentication Failed' in error HTML", () => {
|
||||
const result = runBash(`
|
||||
_generate_oauth_html
|
||||
echo "$OAUTH_ERROR_HTML"
|
||||
`);
|
||||
expect(result.stdout).toContain("Authentication Failed");
|
||||
});
|
||||
|
||||
it("should include 'try again' guidance in error HTML", () => {
|
||||
const result = runBash(`
|
||||
_generate_oauth_html
|
||||
echo "$OAUTH_ERROR_HTML"
|
||||
`);
|
||||
expect(result.stdout).toContain("try again");
|
||||
});
|
||||
|
||||
it("should include CSS styling in both pages", () => {
|
||||
const result = runBash(`
|
||||
_generate_oauth_html
|
||||
echo "$OAUTH_SUCCESS_HTML"
|
||||
echo "---SEPARATOR---"
|
||||
echo "$OAUTH_ERROR_HTML"
|
||||
`);
|
||||
const parts = result.stdout.split("---SEPARATOR---");
|
||||
expect(parts[0]).toContain("<style>");
|
||||
expect(parts[1]).toContain("<style>");
|
||||
});
|
||||
});
|
||||
|
||||
// ── _generate_oauth_server_script ─────────────────────────────────────────────
|
||||
|
||||
describe("_generate_oauth_server_script", () => {
|
||||
it("should generate valid JavaScript", () => {
|
||||
const result = runBash(`
|
||||
_generate_oauth_server_script "test-state" "<html>ok</html>" "<html>err</html>" \\
|
||||
"${testDir}/code" "${testDir}/port" 5180
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("import http from 'http'");
|
||||
});
|
||||
|
||||
it("should embed the expected CSRF state", () => {
|
||||
const result = runBash(`
|
||||
_generate_oauth_server_script "my-csrf-token-abc123" "<html>ok</html>" "<html>err</html>" \\
|
||||
"${testDir}/code" "${testDir}/port" 5180
|
||||
`);
|
||||
expect(result.stdout).toContain("my-csrf-token-abc123");
|
||||
});
|
||||
|
||||
it("should embed the starting port", () => {
|
||||
const result = runBash(`
|
||||
_generate_oauth_server_script "state" "<html>ok</html>" "<html>err</html>" \\
|
||||
"${testDir}/code" "${testDir}/port" 9876
|
||||
`);
|
||||
expect(result.stdout).toContain("9876");
|
||||
});
|
||||
|
||||
it("should include CSRF state validation in the callback handler", () => {
|
||||
const result = runBash(`
|
||||
_generate_oauth_server_script "state" "<html>ok</html>" "<html>err</html>" \\
|
||||
"${testDir}/code" "${testDir}/port" 5180
|
||||
`);
|
||||
// Should check parsed.query.state against expectedState
|
||||
expect(result.stdout).toContain("expectedState");
|
||||
expect(result.stdout).toContain("parsed.query.state");
|
||||
});
|
||||
|
||||
it("should write the OAuth code to the code file", () => {
|
||||
const result = runBash(`
|
||||
_generate_oauth_server_script "state" "<html>ok</html>" "<html>err</html>" \\
|
||||
"${testDir}/code" "${testDir}/port" 5180
|
||||
`);
|
||||
expect(result.stdout).toContain("writeFileSync");
|
||||
expect(result.stdout).toContain("parsed.query.code");
|
||||
});
|
||||
|
||||
it("should write the actual port to the port file path", () => {
|
||||
const result = runBash(`
|
||||
_generate_oauth_server_script "state" "<html>ok</html>" "<html>err</html>" \\
|
||||
"${testDir}/code" "${testDir}/port" 5180
|
||||
`);
|
||||
// The script writes currentPort.toString() to the port file path
|
||||
expect(result.stdout).toContain("currentPort.toString()");
|
||||
expect(result.stdout).toContain(`${testDir}/port`);
|
||||
});
|
||||
|
||||
it("should handle EADDRINUSE by trying the next port", () => {
|
||||
const result = runBash(`
|
||||
_generate_oauth_server_script "state" "<html>ok</html>" "<html>err</html>" \\
|
||||
"${testDir}/code" "${testDir}/port" 5180
|
||||
`);
|
||||
expect(result.stdout).toContain("EADDRINUSE");
|
||||
expect(result.stdout).toContain("currentPort++");
|
||||
});
|
||||
|
||||
it("should have a 5-minute timeout (300000ms)", () => {
|
||||
const result = runBash(`
|
||||
_generate_oauth_server_script "state" "<html>ok</html>" "<html>err</html>" \\
|
||||
"${testDir}/code" "${testDir}/port" 5180
|
||||
`);
|
||||
expect(result.stdout).toContain("300000");
|
||||
});
|
||||
|
||||
it("should listen on localhost only (127.0.0.1)", () => {
|
||||
const result = runBash(`
|
||||
_generate_oauth_server_script "state" "<html>ok</html>" "<html>err</html>" \\
|
||||
"${testDir}/code" "${testDir}/port" 5180
|
||||
`);
|
||||
expect(result.stdout).toContain("127.0.0.1");
|
||||
});
|
||||
|
||||
it("should try a range of 10 ports", () => {
|
||||
const result = runBash(`
|
||||
_generate_oauth_server_script "state" "<html>ok</html>" "<html>err</html>" \\
|
||||
"${testDir}/code" "${testDir}/port" 5180
|
||||
`);
|
||||
// maxPort = starting_port + 10
|
||||
expect(result.stdout).toContain("maxPort");
|
||||
});
|
||||
|
||||
it("should return 403 for invalid CSRF state", () => {
|
||||
const result = runBash(`
|
||||
_generate_oauth_server_script "state" "<html>ok</html>" "<html>err</html>" \\
|
||||
"${testDir}/code" "${testDir}/port" 5180
|
||||
`);
|
||||
expect(result.stdout).toContain("403");
|
||||
});
|
||||
|
||||
it("should close server after successful callback", () => {
|
||||
const result = runBash(`
|
||||
_generate_oauth_server_script "state" "<html>ok</html>" "<html>err</html>" \\
|
||||
"${testDir}/code" "${testDir}/port" 5180
|
||||
`);
|
||||
expect(result.stdout).toContain("server.close()");
|
||||
});
|
||||
});
|
||||
|
||||
// ── _validate_oauth_server_args ───────────────────────────────────────────────
|
||||
|
||||
describe("_validate_oauth_server_args", () => {
|
||||
it("should succeed with valid port and state file", () => {
|
||||
const stateFile = join(testDir, "state");
|
||||
writeFileSync(stateFile, "valid-csrf-token");
|
||||
|
||||
const result = runBash(`_validate_oauth_server_args 5180 "${stateFile}"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should fail with invalid port number", () => {
|
||||
const stateFile = join(testDir, "state");
|
||||
writeFileSync(stateFile, "valid-csrf-token");
|
||||
|
||||
const result = runBash(`_validate_oauth_server_args 80 "${stateFile}"`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should fail when state file does not exist", () => {
|
||||
const result = runBash(`_validate_oauth_server_args 5180 "${testDir}/nonexistent"`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
expect(result.stderr).toContain("CSRF");
|
||||
});
|
||||
|
||||
it("should fail when state file is empty", () => {
|
||||
const stateFile = join(testDir, "state");
|
||||
writeFileSync(stateFile, "");
|
||||
|
||||
const result = runBash(`_validate_oauth_server_args 5180 "${stateFile}"`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
expect(result.stderr).toContain("CSRF");
|
||||
});
|
||||
|
||||
it("should set OAUTH_STATE variable on success", () => {
|
||||
const stateFile = join(testDir, "state");
|
||||
writeFileSync(stateFile, "my-unique-csrf-token");
|
||||
|
||||
const result = runBash(`
|
||||
_validate_oauth_server_args 5180 "${stateFile}"
|
||||
echo "$OAUTH_STATE"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("my-unique-csrf-token");
|
||||
});
|
||||
|
||||
it("should set OAUTH_RUNTIME variable on success", () => {
|
||||
const stateFile = join(testDir, "state");
|
||||
writeFileSync(stateFile, "token");
|
||||
|
||||
const result = runBash(`
|
||||
_validate_oauth_server_args 5180 "${stateFile}"
|
||||
echo "$OAUTH_RUNTIME"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
// Should be one of: bun, node
|
||||
expect(result.stdout).toMatch(/bun|node/);
|
||||
});
|
||||
|
||||
it("should fail with non-numeric port", () => {
|
||||
const stateFile = join(testDir, "state");
|
||||
writeFileSync(stateFile, "token");
|
||||
|
||||
const result = runBash(`_validate_oauth_server_args "abc" "${stateFile}"`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should show port validation failure message", () => {
|
||||
const stateFile = join(testDir, "state");
|
||||
writeFileSync(stateFile, "token");
|
||||
|
||||
const result = runBash(`_validate_oauth_server_args 80 "${stateFile}"`);
|
||||
expect(result.stderr).toContain("port validation failed");
|
||||
});
|
||||
});
|
||||
|
||||
// ── _init_oauth_session ───────────────────────────────────────────────────────
|
||||
|
||||
describe("_init_oauth_session", () => {
|
||||
it("should create a temp directory", () => {
|
||||
const result = runBash("_init_oauth_session");
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout.length).toBeGreaterThan(0);
|
||||
// Clean up the created dir
|
||||
if (existsSync(result.stdout)) {
|
||||
rmSync(result.stdout, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should create a state file inside the directory", () => {
|
||||
const result = runBash("_init_oauth_session");
|
||||
expect(result.exitCode).toBe(0);
|
||||
const oauthDir = result.stdout;
|
||||
|
||||
const stateFile = join(oauthDir, "state");
|
||||
expect(existsSync(stateFile)).toBe(true);
|
||||
|
||||
// Clean up
|
||||
rmSync(oauthDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("should populate state file with a non-empty CSRF token", () => {
|
||||
const result = runBash("_init_oauth_session");
|
||||
expect(result.exitCode).toBe(0);
|
||||
const oauthDir = result.stdout;
|
||||
|
||||
const stateContent = readFileSync(join(oauthDir, "state"), "utf-8");
|
||||
expect(stateContent.length).toBeGreaterThan(0);
|
||||
expect(stateContent).toMatch(/^[0-9a-f]+$/);
|
||||
|
||||
// Clean up
|
||||
rmSync(oauthDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it("should set restrictive permissions (600) on state file", () => {
|
||||
const result = runBash(`
|
||||
dir=$(_init_oauth_session)
|
||||
stat -c '%a' "$dir/state" 2>/dev/null || stat -f '%Lp' "$dir/state"
|
||||
rm -rf "$dir"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("600");
|
||||
});
|
||||
|
||||
it("should generate unique directories on consecutive calls", () => {
|
||||
const result = runBash(`
|
||||
dir1=$(_init_oauth_session)
|
||||
dir2=$(_init_oauth_session)
|
||||
if [[ "$dir1" == "$dir2" ]]; then
|
||||
echo "SAME"
|
||||
else
|
||||
echo "DIFFERENT"
|
||||
fi
|
||||
rm -rf "$dir1" "$dir2"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("DIFFERENT");
|
||||
});
|
||||
});
|
||||
|
||||
// ── cleanup_oauth_session ─────────────────────────────────────────────────────
|
||||
|
||||
describe("cleanup_oauth_session", () => {
|
||||
it("should remove the oauth directory", () => {
|
||||
const oauthDir = join(testDir, "oauth-session");
|
||||
mkdirSync(oauthDir, { recursive: true });
|
||||
writeFileSync(join(oauthDir, "code"), "test-code");
|
||||
writeFileSync(join(oauthDir, "state"), "test-state");
|
||||
|
||||
const result = runBash(`cleanup_oauth_session "" "${oauthDir}"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(existsSync(oauthDir)).toBe(false);
|
||||
});
|
||||
|
||||
it("should handle non-existent directory gracefully", () => {
|
||||
const result = runBash(`cleanup_oauth_session "" "${testDir}/nonexistent"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should handle empty server_pid gracefully", () => {
|
||||
const oauthDir = join(testDir, "oauth-cleanup");
|
||||
mkdirSync(oauthDir, { recursive: true });
|
||||
|
||||
const result = runBash(`cleanup_oauth_session "" "${oauthDir}"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should handle empty oauth_dir gracefully", () => {
|
||||
const result = runBash(`cleanup_oauth_session "" ""`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should handle both empty pid and dir gracefully", () => {
|
||||
const result = runBash(`cleanup_oauth_session "" ""`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should attempt to kill the specified PID", () => {
|
||||
// Verify cleanup_oauth_session calls kill on a PID by using a known-dead PID.
|
||||
// We can't easily test live process killing in spawnSync (wait hangs),
|
||||
// but we verify it handles the kill attempt without error.
|
||||
const result = runBash(`
|
||||
# Use PID of a process we know will already be dead
|
||||
bash -c 'exit 0' &
|
||||
bg_pid=$!
|
||||
wait "$bg_pid" # Ensure it's fully done
|
||||
cleanup_oauth_session "$bg_pid" ""
|
||||
echo "OK"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("OK");
|
||||
});
|
||||
|
||||
it("should handle invalid PID gracefully", () => {
|
||||
const result = runBash(`cleanup_oauth_session "999999999" ""`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should clean up both PID and directory", () => {
|
||||
const oauthDir = join(testDir, "oauth-both");
|
||||
mkdirSync(oauthDir, { recursive: true });
|
||||
writeFileSync(join(oauthDir, "code"), "test");
|
||||
|
||||
const result = runBash(`
|
||||
# Use a short-lived process that finishes before cleanup
|
||||
bash -c 'exit 0' &
|
||||
bg_pid=$!
|
||||
wait "$bg_pid"
|
||||
cleanup_oauth_session "$bg_pid" "${oauthDir}"
|
||||
# Check directory is cleaned up
|
||||
if [[ -d "${oauthDir}" ]]; then
|
||||
echo "DIR_EXISTS"
|
||||
exit 1
|
||||
fi
|
||||
echo "BOTH_CLEANED"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("BOTH_CLEANED");
|
||||
});
|
||||
});
|
||||
|
||||
// ── exchange_oauth_code (input sanitization) ──────────────────────────────────
|
||||
|
||||
describe("exchange_oauth_code", () => {
|
||||
// Note: We can't easily mock curl in a child bash process, but we can
|
||||
// create a fake curl script on PATH that returns controlled responses.
|
||||
|
||||
it("should extract the API key from the response", () => {
|
||||
// Create a fake curl that returns a known response
|
||||
writeFileSync(join(testDir, "curl"), '#!/bin/bash\necho \'{"key":"sk-or-v1-test-api-key-12345"}\'\n');
|
||||
chmodSync(join(testDir, "curl"), 0o755);
|
||||
|
||||
const result = runBash(
|
||||
`exchange_oauth_code "test-oauth-code"`,
|
||||
{ PATH: `${testDir}:${process.env.PATH}` }
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("sk-or-v1-test-api-key-12345");
|
||||
});
|
||||
|
||||
it("should return error when response has no key field", () => {
|
||||
writeFileSync(join(testDir, "curl"), '#!/bin/bash\necho \'{"error":"invalid_code"}\'\n');
|
||||
chmodSync(join(testDir, "curl"), 0o755);
|
||||
|
||||
const result = runBash(
|
||||
`exchange_oauth_code "bad-code"`,
|
||||
{ PATH: `${testDir}:${process.env.PATH}` }
|
||||
);
|
||||
expect(result.exitCode).toBe(1);
|
||||
expect(result.stderr).toContain("Failed to exchange");
|
||||
});
|
||||
|
||||
it("should return error when response is empty", () => {
|
||||
writeFileSync(join(testDir, "curl"), '#!/bin/bash\necho ""\n');
|
||||
chmodSync(join(testDir, "curl"), 0o755);
|
||||
|
||||
const result = runBash(
|
||||
`exchange_oauth_code "code"`,
|
||||
{ PATH: `${testDir}:${process.env.PATH}` }
|
||||
);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should return error when curl returns invalid JSON", () => {
|
||||
writeFileSync(join(testDir, "curl"), '#!/bin/bash\necho "not json at all"\n');
|
||||
chmodSync(join(testDir, "curl"), 0o755);
|
||||
|
||||
const result = runBash(
|
||||
`exchange_oauth_code "code"`,
|
||||
{ PATH: `${testDir}:${process.env.PATH}` }
|
||||
);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should handle OAuth code with double quotes safely via json_escape", () => {
|
||||
writeFileSync(join(testDir, "curl"), '#!/bin/bash\necho \'{"key":"sk-or-v1-safe-key"}\'\n');
|
||||
chmodSync(join(testDir, "curl"), 0o755);
|
||||
|
||||
const result = runBash(
|
||||
`exchange_oauth_code 'code-with-"quotes"'`,
|
||||
{ PATH: `${testDir}:${process.env.PATH}` }
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("sk-or-v1-safe-key");
|
||||
});
|
||||
|
||||
it("should show the server response in error message on failure", () => {
|
||||
writeFileSync(join(testDir, "curl"), '#!/bin/bash\necho \'{"error":"code_expired","message":"OAuth code has expired"}\'\n');
|
||||
chmodSync(join(testDir, "curl"), 0o755);
|
||||
|
||||
const result = runBash(
|
||||
`exchange_oauth_code "expired-code"`,
|
||||
{ PATH: `${testDir}:${process.env.PATH}` }
|
||||
);
|
||||
expect(result.exitCode).toBe(1);
|
||||
expect(result.stderr).toContain("code_expired");
|
||||
});
|
||||
|
||||
it("should suggest manual API key as alternative on failure", () => {
|
||||
writeFileSync(join(testDir, "curl"), '#!/bin/bash\necho \'{"error":"invalid"}\'\n');
|
||||
chmodSync(join(testDir, "curl"), 0o755);
|
||||
|
||||
const result = runBash(
|
||||
`exchange_oauth_code "bad-code"`,
|
||||
{ PATH: `${testDir}:${process.env.PATH}` }
|
||||
);
|
||||
expect(result.exitCode).toBe(1);
|
||||
expect(result.stderr).toContain("OPENROUTER_API_KEY");
|
||||
});
|
||||
});
|
||||
|
||||
// ── check_openrouter_connectivity (offline-safe) ──────────────────────────────
|
||||
|
||||
describe("check_openrouter_connectivity", () => {
|
||||
it("should return 0 when curl can reach the host", () => {
|
||||
// This test may fail in truly offline environments
|
||||
// but should pass in CI and normal dev environments
|
||||
const result = runBash("check_openrouter_connectivity");
|
||||
// We just verify it doesn't crash; actual connectivity depends on environment
|
||||
expect(result.exitCode === 0 || result.exitCode === 1).toBe(true);
|
||||
});
|
||||
|
||||
it("should return 1 when no tools are available", () => {
|
||||
const result = runBash(
|
||||
"check_openrouter_connectivity",
|
||||
{ PATH: "/nonexistent" }
|
||||
);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
});
|
||||
|
||||
// ── Integration: _init_oauth_session + _validate_oauth_server_args ────────────
|
||||
|
||||
describe("OAuth session lifecycle integration", () => {
|
||||
it("should create session and validate its state file", () => {
|
||||
const result = runBash(`
|
||||
oauth_dir=$(_init_oauth_session)
|
||||
|
||||
# The state file created by _init_oauth_session should pass validation
|
||||
_validate_oauth_server_args 5180 "$oauth_dir/state"
|
||||
exit_code=$?
|
||||
|
||||
# Verify OAUTH_STATE matches the file content
|
||||
file_content=$(cat "$oauth_dir/state")
|
||||
if [[ "$OAUTH_STATE" != "$file_content" ]]; then
|
||||
echo "STATE_MISMATCH"
|
||||
rm -rf "$oauth_dir"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -rf "$oauth_dir"
|
||||
echo "OK"
|
||||
exit $exit_code
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("OK");
|
||||
});
|
||||
|
||||
it("should create session, use it, and clean up", () => {
|
||||
const result = runBash(`
|
||||
# Create session
|
||||
oauth_dir=$(_init_oauth_session)
|
||||
|
||||
# Verify files exist
|
||||
[[ -d "$oauth_dir" ]] || { echo "NO_DIR"; exit 1; }
|
||||
[[ -f "$oauth_dir/state" ]] || { echo "NO_STATE"; exit 1; }
|
||||
|
||||
# Clean up
|
||||
cleanup_oauth_session "" "$oauth_dir"
|
||||
|
||||
# Verify cleanup
|
||||
[[ -d "$oauth_dir" ]] && { echo "DIR_STILL_EXISTS"; exit 1; }
|
||||
|
||||
echo "LIFECYCLE_OK"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("LIFECYCLE_OK");
|
||||
});
|
||||
});
|
||||
|
||||
// ── CSRF state security properties ────────────────────────────────────────────
|
||||
|
||||
describe("CSRF state security properties", () => {
|
||||
it("should generate 128 bits (32 hex chars) of entropy via openssl", () => {
|
||||
const result = runBash(`
|
||||
if command -v openssl &>/dev/null; then
|
||||
state=$(_generate_csrf_state)
|
||||
printf '%s' "$state" | wc -c | tr -d ' '
|
||||
else
|
||||
echo "32" # Skip test if no openssl
|
||||
fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("32");
|
||||
});
|
||||
|
||||
it("should not contain predictable patterns", () => {
|
||||
// Collect 5 CSRF states and check they all differ
|
||||
const result = runBash(`
|
||||
state1=$(_generate_csrf_state)
|
||||
state2=$(_generate_csrf_state)
|
||||
state3=$(_generate_csrf_state)
|
||||
state4=$(_generate_csrf_state)
|
||||
state5=$(_generate_csrf_state)
|
||||
|
||||
# Check all are unique using sort -u
|
||||
unique=$(printf '%s\\n' "$state1" "$state2" "$state3" "$state4" "$state5" | sort -u | wc -l | tr -d ' ')
|
||||
echo "$unique"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(parseInt(result.stdout)).toBe(5);
|
||||
});
|
||||
|
||||
it("should generate state that survives file write/read round-trip", () => {
|
||||
const result = runBash(`
|
||||
state=$(_generate_csrf_state)
|
||||
state_file="${testDir}/roundtrip_state"
|
||||
printf '%s' "$state" > "$state_file"
|
||||
chmod 600 "$state_file"
|
||||
|
||||
read_back=$(cat "$state_file")
|
||||
if [[ "$state" == "$read_back" ]]; then
|
||||
echo "MATCH"
|
||||
else
|
||||
echo "MISMATCH"
|
||||
fi
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("MATCH");
|
||||
});
|
||||
});
|
||||
|
|
@ -1,385 +0,0 @@
|
|||
import { describe, it, expect, beforeEach, afterEach } from "bun:test";
|
||||
import { resolve, join } from "path";
|
||||
import { mkdirSync, rmSync, existsSync, writeFileSync, readFileSync } from "fs";
|
||||
import { tmpdir } from "os";
|
||||
import { spawnSync } from "child_process";
|
||||
|
||||
/**
|
||||
* Tests for the post-session summary feature (PR #1037):
|
||||
*
|
||||
* - _show_post_session_summary: warns user their server is still running,
|
||||
* shows dashboard URL (if available), and provides reconnect command
|
||||
* - ssh_interactive_session: now calls _show_post_session_summary after
|
||||
* the SSH session ends, and preserves the SSH exit code
|
||||
* - SPAWN_DASHBOARD_URL convention: all SSH-based cloud providers must
|
||||
* set this variable so users get actionable dashboard links
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const REPO_ROOT = resolve(import.meta.dir, "../../..");
|
||||
const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh");
|
||||
|
||||
let testDir: string;
|
||||
let mockBinDir: string;
|
||||
|
||||
beforeEach(() => {
|
||||
testDir = join(
|
||||
tmpdir(),
|
||||
`spawn-post-session-${Date.now()}-${Math.random().toString(36).slice(2)}`
|
||||
);
|
||||
mockBinDir = join(testDir, "bin");
|
||||
mkdirSync(mockBinDir, { recursive: true });
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (existsSync(testDir)) {
|
||||
rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Run a bash snippet that sources shared/common.sh first.
|
||||
*/
|
||||
function runBash(
|
||||
script: string,
|
||||
opts?: { useMockPath?: boolean; env?: Record<string, string> }
|
||||
): { exitCode: number; stdout: string; stderr: string } {
|
||||
let prefix = "";
|
||||
if (opts?.useMockPath) {
|
||||
prefix = `export PATH="${mockBinDir}:$PATH"\n`;
|
||||
}
|
||||
const fullScript = `${prefix}source "${COMMON_SH}"\n${script}`;
|
||||
const result = spawnSync("bash", ["-c", fullScript], {
|
||||
encoding: "utf-8",
|
||||
timeout: 15000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
env: { ...process.env, ...opts?.env },
|
||||
});
|
||||
return {
|
||||
exitCode: result.status ?? 1,
|
||||
stdout: (result.stdout || "").trim(),
|
||||
stderr: (result.stderr || "").trim(),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a mock executable in the mock bin directory.
|
||||
*/
|
||||
function createMockCommand(name: string, script: string): void {
|
||||
const path = join(mockBinDir, name);
|
||||
writeFileSync(path, `#!/bin/bash\n${script}`, { mode: 0o755 });
|
||||
}
|
||||
|
||||
// ── _show_post_session_summary ──────────────────────────────────────────────
|
||||
|
||||
describe("_show_post_session_summary", () => {
|
||||
it("should warn that the server is still running at the given IP", () => {
|
||||
const { stderr } = runBash(
|
||||
'_show_post_session_summary "203.0.113.42"'
|
||||
);
|
||||
expect(stderr).toContain("still running");
|
||||
expect(stderr).toContain("203.0.113.42");
|
||||
});
|
||||
|
||||
it("should show dashboard URL when SPAWN_DASHBOARD_URL is set", () => {
|
||||
const { stderr } = runBash(
|
||||
'SPAWN_DASHBOARD_URL="https://console.example.com/servers"\n_show_post_session_summary "10.0.0.1"'
|
||||
);
|
||||
expect(stderr).toContain("https://console.example.com/servers");
|
||||
expect(stderr).toContain("dashboard");
|
||||
});
|
||||
|
||||
it("should show generic message when SPAWN_DASHBOARD_URL is not set", () => {
|
||||
const { stderr } = runBash(
|
||||
'unset SPAWN_DASHBOARD_URL\n_show_post_session_summary "10.0.0.1"'
|
||||
);
|
||||
expect(stderr).toContain("cloud provider dashboard");
|
||||
expect(stderr).not.toContain("https://");
|
||||
});
|
||||
|
||||
it("should show reconnect command with default SSH_USER=root", () => {
|
||||
const { stderr } = runBash(
|
||||
'_show_post_session_summary "192.168.1.100"'
|
||||
);
|
||||
expect(stderr).toContain("ssh root@192.168.1.100");
|
||||
});
|
||||
|
||||
it("should show reconnect command with custom SSH_USER", () => {
|
||||
const { stderr } = runBash(
|
||||
'SSH_USER=ubuntu\n_show_post_session_summary "192.168.1.100"'
|
||||
);
|
||||
expect(stderr).toContain("ssh ubuntu@192.168.1.100");
|
||||
});
|
||||
|
||||
it("should use log_warn for all output lines (yellow warning styling)", () => {
|
||||
const { stderr } = runBash(
|
||||
'_show_post_session_summary "10.0.0.1"'
|
||||
);
|
||||
// log_warn outputs to stderr with WARNING prefix or yellow color
|
||||
// Every substantive line should go through log_warn
|
||||
expect(stderr).toContain("Session ended");
|
||||
expect(stderr).toContain("reconnect");
|
||||
});
|
||||
|
||||
it("should handle empty SPAWN_DASHBOARD_URL same as unset", () => {
|
||||
const { stderr } = runBash(
|
||||
'SPAWN_DASHBOARD_URL=""\n_show_post_session_summary "10.0.0.1"'
|
||||
);
|
||||
expect(stderr).toContain("cloud provider dashboard");
|
||||
expect(stderr).not.toContain("visit your dashboard");
|
||||
});
|
||||
|
||||
it("should handle IPv6 addresses", () => {
|
||||
const { stderr } = runBash(
|
||||
'_show_post_session_summary "2001:db8::1"'
|
||||
);
|
||||
expect(stderr).toContain("2001:db8::1");
|
||||
expect(stderr).toContain("still running");
|
||||
});
|
||||
});
|
||||
|
||||
// ── _show_exec_post_session_summary ─────────────────────────────────────────
|
||||
|
||||
describe("_show_exec_post_session_summary", () => {
|
||||
it("should warn that the service is still running", () => {
|
||||
const { stderr } = runBash(
|
||||
'_show_exec_post_session_summary'
|
||||
);
|
||||
expect(stderr).toContain("still running");
|
||||
expect(stderr).toContain("Session ended");
|
||||
});
|
||||
|
||||
it("should show service name when SERVER_NAME is set", () => {
|
||||
const { stderr } = runBash(
|
||||
'SERVER_NAME="my-app"\n_show_exec_post_session_summary'
|
||||
);
|
||||
expect(stderr).toContain("my-app");
|
||||
expect(stderr).toContain("still running");
|
||||
});
|
||||
|
||||
it("should show dashboard URL when SPAWN_DASHBOARD_URL is set", () => {
|
||||
const { stderr } = runBash(
|
||||
'SPAWN_DASHBOARD_URL="https://fly.io/dashboard"\n_show_exec_post_session_summary'
|
||||
);
|
||||
expect(stderr).toContain("https://fly.io/dashboard");
|
||||
expect(stderr).toContain("dashboard");
|
||||
});
|
||||
|
||||
it("should show generic message when SPAWN_DASHBOARD_URL is not set", () => {
|
||||
const { stderr } = runBash(
|
||||
'unset SPAWN_DASHBOARD_URL\n_show_exec_post_session_summary'
|
||||
);
|
||||
expect(stderr).toContain("cloud provider dashboard");
|
||||
});
|
||||
|
||||
it("should show reconnect command when SPAWN_RECONNECT_CMD is set", () => {
|
||||
const { stderr } = runBash(
|
||||
'SPAWN_RECONNECT_CMD="fly ssh console -a my-app"\n_show_exec_post_session_summary'
|
||||
);
|
||||
expect(stderr).toContain("fly ssh console -a my-app");
|
||||
expect(stderr).toContain("reconnect");
|
||||
});
|
||||
|
||||
it("should not show reconnect section when SPAWN_RECONNECT_CMD is not set", () => {
|
||||
const { stderr } = runBash(
|
||||
'unset SPAWN_RECONNECT_CMD\n_show_exec_post_session_summary'
|
||||
);
|
||||
expect(stderr).not.toContain("reconnect");
|
||||
});
|
||||
|
||||
it("should use 'service' instead of 'server' in messages", () => {
|
||||
const { stderr } = runBash(
|
||||
'_show_exec_post_session_summary'
|
||||
);
|
||||
expect(stderr).toContain("service");
|
||||
});
|
||||
|
||||
it("should not crash with no env vars set", () => {
|
||||
const { exitCode } = runBash(
|
||||
'unset SPAWN_DASHBOARD_URL SERVER_NAME SPAWN_RECONNECT_CMD\n_show_exec_post_session_summary'
|
||||
);
|
||||
expect(exitCode).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ── ssh_interactive_session with post-session summary ───────────────────────
|
||||
|
||||
describe("ssh_interactive_session post-session integration", () => {
|
||||
it("should show post-session summary after SSH session ends", () => {
|
||||
createMockCommand("ssh", "exit 0");
|
||||
const { stderr } = runBash(
|
||||
'SSH_OPTS=""\nssh_interactive_session "10.0.0.1" "bash"',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(stderr).toContain("Session ended");
|
||||
expect(stderr).toContain("still running");
|
||||
expect(stderr).toContain("10.0.0.1");
|
||||
});
|
||||
|
||||
it("should preserve SSH exit code 0 on success", () => {
|
||||
createMockCommand("ssh", "exit 0");
|
||||
const { exitCode } = runBash(
|
||||
'SSH_OPTS=""\nssh_interactive_session "10.0.0.1" "bash"',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should preserve non-zero SSH exit code on failure", () => {
|
||||
createMockCommand("ssh", "exit 42");
|
||||
const { exitCode, stderr } = runBash(
|
||||
'SSH_OPTS=""\nset +e\nssh_interactive_session "10.0.0.1" "bash"\necho "EXIT=$?"',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
// The summary should still appear even on failure
|
||||
expect(stderr).toContain("still running");
|
||||
});
|
||||
|
||||
it("should show summary even when SSH exits with error", () => {
|
||||
createMockCommand("ssh", "exit 1");
|
||||
const { stderr } = runBash(
|
||||
'SSH_OPTS=""\nset +e\nresult=0\nssh_interactive_session "10.0.0.1" "bash" || result=$?\necho "EXIT=$result"',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(stderr).toContain("Session ended");
|
||||
expect(stderr).toContain("reconnect");
|
||||
});
|
||||
|
||||
it("should include dashboard URL when SPAWN_DASHBOARD_URL is set", () => {
|
||||
createMockCommand("ssh", "exit 0");
|
||||
const { stderr } = runBash(
|
||||
'SSH_OPTS=""\nSPAWN_DASHBOARD_URL="https://console.hetzner.cloud/"\nssh_interactive_session "10.0.0.1" "bash"',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(stderr).toContain("https://console.hetzner.cloud/");
|
||||
});
|
||||
|
||||
it("should show reconnect command with correct user and IP", () => {
|
||||
createMockCommand("ssh", "exit 0");
|
||||
const { stderr } = runBash(
|
||||
'SSH_OPTS=""\nSSH_USER=deploy\nssh_interactive_session "172.16.0.5" "tmux"',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(stderr).toContain("ssh deploy@172.16.0.5");
|
||||
});
|
||||
|
||||
it("should still pass -t flag and correct SSH args", () => {
|
||||
createMockCommand("ssh", 'echo "ARGS: $@"');
|
||||
const { stdout } = runBash(
|
||||
'SSH_OPTS="-o StrictHostKeyChecking=no"\nssh_interactive_session "10.0.0.1" "bash"',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(stdout).toContain("-t");
|
||||
expect(stdout).toContain("root@10.0.0.1");
|
||||
expect(stdout).toContain("bash");
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
// ── _show_post_session_summary does not use SPAWN_DASHBOARD_URL from function scope ─
|
||||
|
||||
describe("_show_post_session_summary env var handling", () => {
|
||||
it("should read SPAWN_DASHBOARD_URL from environment, not from arguments", () => {
|
||||
// Verify it uses env var, not positional args for the dashboard URL
|
||||
const { stderr } = runBash(
|
||||
'export SPAWN_DASHBOARD_URL="https://test.example.com"\n_show_post_session_summary "10.0.0.1"'
|
||||
);
|
||||
expect(stderr).toContain("https://test.example.com");
|
||||
});
|
||||
|
||||
it("should not crash when called with only IP argument", () => {
|
||||
const { exitCode } = runBash(
|
||||
'_show_post_session_summary "10.0.0.1"'
|
||||
);
|
||||
expect(exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should handle SPAWN_DASHBOARD_URL with trailing slash", () => {
|
||||
const { stderr } = runBash(
|
||||
'SPAWN_DASHBOARD_URL="https://console.example.com/"\n_show_post_session_summary "10.0.0.1"'
|
||||
);
|
||||
expect(stderr).toContain("https://console.example.com/");
|
||||
});
|
||||
|
||||
it("should handle SPAWN_DASHBOARD_URL with path components", () => {
|
||||
const { stderr } = runBash(
|
||||
'SPAWN_DASHBOARD_URL="https://cloud.oracle.com/compute/instances"\n_show_post_session_summary "10.0.0.1"'
|
||||
);
|
||||
expect(stderr).toContain("https://cloud.oracle.com/compute/instances");
|
||||
});
|
||||
});
|
||||
|
||||
// ── shared/common.sh function definitions ───────────────────────────────────
|
||||
|
||||
describe("function definitions in shared/common.sh", () => {
|
||||
const sharedContent = readFileSync(COMMON_SH, "utf-8");
|
||||
|
||||
it("should define _show_post_session_summary", () => {
|
||||
expect(sharedContent).toContain("_show_post_session_summary()");
|
||||
});
|
||||
|
||||
it("should define _show_exec_post_session_summary", () => {
|
||||
expect(sharedContent).toContain("_show_exec_post_session_summary()");
|
||||
});
|
||||
|
||||
it("should define ssh_interactive_session that calls _show_post_session_summary", () => {
|
||||
// Find the ssh_interactive_session function body
|
||||
const lines = sharedContent.split("\n");
|
||||
let inFunc = false;
|
||||
let braceDepth = 0;
|
||||
const bodyLines: string[] = [];
|
||||
|
||||
for (const line of lines) {
|
||||
if (!inFunc) {
|
||||
if (line.match(/^ssh_interactive_session\(\)\s*\{/)) {
|
||||
inFunc = true;
|
||||
braceDepth = 1;
|
||||
continue;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
for (const ch of line) {
|
||||
if (ch === "{") braceDepth++;
|
||||
if (ch === "}") braceDepth--;
|
||||
}
|
||||
if (braceDepth <= 0) break;
|
||||
bodyLines.push(line);
|
||||
}
|
||||
|
||||
const body = bodyLines.join("\n");
|
||||
expect(body).toContain("_show_post_session_summary");
|
||||
expect(body).toContain('ssh_exit');
|
||||
});
|
||||
|
||||
it("ssh_interactive_session should capture ssh exit code instead of failing immediately", () => {
|
||||
const lines = sharedContent.split("\n");
|
||||
let inFunc = false;
|
||||
let braceDepth = 0;
|
||||
const bodyLines: string[] = [];
|
||||
|
||||
for (const line of lines) {
|
||||
if (!inFunc) {
|
||||
if (line.match(/^ssh_interactive_session\(\)\s*\{/)) {
|
||||
inFunc = true;
|
||||
braceDepth = 1;
|
||||
continue;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
for (const ch of line) {
|
||||
if (ch === "{") braceDepth++;
|
||||
if (ch === "}") braceDepth--;
|
||||
}
|
||||
if (braceDepth <= 0) break;
|
||||
bodyLines.push(line);
|
||||
}
|
||||
|
||||
const body = bodyLines.join("\n");
|
||||
// Should use || ssh_exit=$? pattern instead of letting set -e kill the script
|
||||
expect(body).toContain("|| ssh_exit=$?");
|
||||
// Should return the captured exit code
|
||||
expect(body).toContain("return");
|
||||
expect(body).toContain("ssh_exit");
|
||||
});
|
||||
});
|
||||
|
|
@ -1,616 +0,0 @@
|
|||
import { describe, it, expect, beforeEach, afterEach } from "bun:test";
|
||||
import { resolve, join } from "path";
|
||||
import { mkdirSync, rmSync, existsSync, writeFileSync, readFileSync } from "fs";
|
||||
import { tmpdir } from "os";
|
||||
import { spawnSync } from "child_process";
|
||||
|
||||
/**
|
||||
* Tests for SSH helper and instance polling functions in shared/common.sh:
|
||||
*
|
||||
* - generic_ssh_wait: exponential-backoff SSH polling loop
|
||||
* - wait_for_cloud_init: cloud-init completion checker (thin wrapper)
|
||||
* - ssh_run_server: remote command execution via SSH
|
||||
* - ssh_upload_file: file upload via SCP
|
||||
* - ssh_interactive_session: interactive SSH session (-t flag)
|
||||
* - ssh_verify_connectivity: SSH connectivity check (thin wrapper)
|
||||
* - generic_wait_for_instance: API-based instance status polling
|
||||
*
|
||||
* These are CRITICAL infrastructure functions used by every cloud provider.
|
||||
* Tests use mock SSH/SCP commands to verify argument construction, variable
|
||||
* defaults (SSH_USER, SSH_OPTS), and failure/success behavior without
|
||||
* requiring actual SSH connectivity.
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const REPO_ROOT = resolve(import.meta.dir, "../../..");
|
||||
const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh");
|
||||
|
||||
let testDir: string;
|
||||
let mockBinDir: string;
|
||||
|
||||
beforeEach(() => {
|
||||
testDir = join(tmpdir(), `spawn-ssh-test-${Date.now()}-${Math.random().toString(36).slice(2)}`);
|
||||
mockBinDir = join(testDir, "bin");
|
||||
mkdirSync(mockBinDir, { recursive: true });
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (existsSync(testDir)) {
|
||||
rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
/**
|
||||
* Run a bash snippet that sources shared/common.sh first.
|
||||
* Optionally prepends mockBinDir to PATH for mock commands.
|
||||
*/
|
||||
function runBash(script: string, opts?: { useMockPath?: boolean }): { exitCode: number; stdout: string; stderr: string } {
|
||||
let prefix = "";
|
||||
if (opts?.useMockPath) {
|
||||
prefix = `export PATH="${mockBinDir}:$PATH"\n`;
|
||||
}
|
||||
const fullScript = `${prefix}source "${COMMON_SH}"\n${script}`;
|
||||
const result = spawnSync("bash", ["-c", fullScript], {
|
||||
encoding: "utf-8",
|
||||
timeout: 15000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
});
|
||||
return {
|
||||
exitCode: result.status ?? 1,
|
||||
stdout: (result.stdout || "").trim(),
|
||||
stderr: (result.stderr || "").trim(),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a mock executable script in the mock bin directory.
|
||||
*/
|
||||
function createMockCommand(name: string, script: string): void {
|
||||
const path = join(mockBinDir, name);
|
||||
writeFileSync(path, `#!/bin/bash\n${script}`, { mode: 0o755 });
|
||||
}
|
||||
|
||||
// ── ssh_run_server ──────────────────────────────────────────────────────────
|
||||
|
||||
describe("ssh_run_server", () => {
|
||||
it("should construct correct SSH command with default SSH_USER=root", () => {
|
||||
// Use a mock ssh that prints its arguments
|
||||
createMockCommand("ssh", 'echo "ARGS: $@"');
|
||||
const { stdout, exitCode } = runBash(
|
||||
'SSH_OPTS="-o StrictHostKeyChecking=no"\nssh_run_server "192.168.1.1" "uptime"',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("-o StrictHostKeyChecking=no");
|
||||
expect(stdout).toContain("root@192.168.1.1");
|
||||
expect(stdout).toContain("uptime");
|
||||
});
|
||||
|
||||
it("should use SSH_USER when set", () => {
|
||||
createMockCommand("ssh", 'echo "ARGS: $@"');
|
||||
const { stdout, exitCode } = runBash(
|
||||
'SSH_OPTS="-o StrictHostKeyChecking=no"\nSSH_USER=ubuntu\nssh_run_server "10.0.0.1" "ls -la"',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("ubuntu@10.0.0.1");
|
||||
expect(stdout).toContain("ls -la");
|
||||
});
|
||||
|
||||
it("should pass through SSH exit code on failure", () => {
|
||||
createMockCommand("ssh", "exit 1");
|
||||
const { exitCode } = runBash(
|
||||
'SSH_OPTS=""\nssh_run_server "10.0.0.1" "false"',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should pass SSH_OPTS as unquoted options", () => {
|
||||
// This tests that SSH_OPTS is word-split (not quoted) per the SC2086 disable comment
|
||||
createMockCommand("ssh", 'echo "ARGS: $@"');
|
||||
const { stdout, exitCode } = runBash(
|
||||
'SSH_OPTS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"\nssh_run_server "10.0.0.1" "echo hello"',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(exitCode).toBe(0);
|
||||
// Both options should appear as separate arguments
|
||||
expect(stdout).toContain("StrictHostKeyChecking=no");
|
||||
expect(stdout).toContain("UserKnownHostsFile=/dev/null");
|
||||
});
|
||||
|
||||
it("should handle empty SSH_OPTS", () => {
|
||||
createMockCommand("ssh", 'echo "ARGS: $@"');
|
||||
const { stdout, exitCode } = runBash(
|
||||
'SSH_OPTS=""\nssh_run_server "10.0.0.1" "hostname"',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("root@10.0.0.1");
|
||||
expect(stdout).toContain("hostname");
|
||||
});
|
||||
|
||||
it("should handle command with spaces and special characters", () => {
|
||||
createMockCommand("ssh", 'echo "CMD: $@"');
|
||||
const { stdout, exitCode } = runBash(
|
||||
'SSH_OPTS=""\nssh_run_server "10.0.0.1" "cat /etc/os-release | grep NAME"',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("cat /etc/os-release | grep NAME");
|
||||
});
|
||||
});
|
||||
|
||||
// ── ssh_upload_file ──────────────────────────────────────────────────────────
|
||||
|
||||
describe("ssh_upload_file", () => {
|
||||
it("should construct correct SCP command with default SSH_USER=root", () => {
|
||||
createMockCommand("scp", 'echo "SCP: $@"');
|
||||
const { stdout, exitCode } = runBash(
|
||||
'SSH_OPTS="-o StrictHostKeyChecking=no"\nssh_upload_file "192.168.1.1" "/tmp/local.txt" "/remote/path.txt"',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("-o StrictHostKeyChecking=no");
|
||||
expect(stdout).toContain("/tmp/local.txt");
|
||||
expect(stdout).toContain("root@192.168.1.1:/remote/path.txt");
|
||||
});
|
||||
|
||||
it("should use SSH_USER when set", () => {
|
||||
createMockCommand("scp", 'echo "SCP: $@"');
|
||||
const { stdout, exitCode } = runBash(
|
||||
'SSH_OPTS=""\nSSH_USER=admin\nssh_upload_file "10.0.0.1" "/local/file" "/home/admin/file"',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("admin@10.0.0.1:/home/admin/file");
|
||||
});
|
||||
|
||||
it("should pass through SCP exit code on failure", () => {
|
||||
createMockCommand("scp", "exit 1");
|
||||
const { exitCode } = runBash(
|
||||
'SSH_OPTS=""\nssh_upload_file "10.0.0.1" "/local" "/remote"',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should pass SSH_OPTS as word-split options to SCP", () => {
|
||||
createMockCommand("scp", 'echo "SCP: $@"');
|
||||
const { stdout } = runBash(
|
||||
'SSH_OPTS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"\nssh_upload_file "10.0.0.1" "/a" "/b"',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(stdout).toContain("StrictHostKeyChecking=no");
|
||||
expect(stdout).toContain("UserKnownHostsFile=/dev/null");
|
||||
});
|
||||
});
|
||||
|
||||
// ── ssh_interactive_session ──────────────────────────────────────────────────
|
||||
|
||||
describe("ssh_interactive_session", () => {
|
||||
it("should include -t flag for interactive/TTY allocation", () => {
|
||||
createMockCommand("ssh", 'echo "ARGS: $@"');
|
||||
const { stdout, exitCode } = runBash(
|
||||
'SSH_OPTS="-o StrictHostKeyChecking=no"\nssh_interactive_session "192.168.1.1" "bash"',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("-t");
|
||||
expect(stdout).toContain("root@192.168.1.1");
|
||||
expect(stdout).toContain("bash");
|
||||
});
|
||||
|
||||
it("should use SSH_USER when set", () => {
|
||||
createMockCommand("ssh", 'echo "ARGS: $@"');
|
||||
const { stdout } = runBash(
|
||||
'SSH_OPTS=""\nSSH_USER=deploy\nssh_interactive_session "10.0.0.1" "tmux"',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(stdout).toContain("deploy@10.0.0.1");
|
||||
expect(stdout).toContain("-t");
|
||||
});
|
||||
|
||||
it("should differ from ssh_run_server by having -t flag", () => {
|
||||
createMockCommand("ssh", 'echo "ARGS: $@"');
|
||||
|
||||
const interactive = runBash(
|
||||
'SSH_OPTS=""\nssh_interactive_session "10.0.0.1" "bash"',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
const nonInteractive = runBash(
|
||||
'SSH_OPTS=""\nssh_run_server "10.0.0.1" "bash"',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
|
||||
expect(interactive.stdout).toContain("-t");
|
||||
expect(nonInteractive.stdout).not.toContain("-t");
|
||||
});
|
||||
});
|
||||
|
||||
// ── ssh_verify_connectivity ──────────────────────────────────────────────────
|
||||
|
||||
describe("ssh_verify_connectivity", () => {
|
||||
it("should add ConnectTimeout=5 to SSH options", () => {
|
||||
// generic_ssh_wait redirects ssh output to /dev/null, so use a log file
|
||||
const logFile = join(testDir, "ssh_args_log");
|
||||
createMockCommand("ssh", `echo "$@" >> "${logFile}"; exit 0`);
|
||||
const { exitCode } = runBash(
|
||||
`SSH_OPTS="-o StrictHostKeyChecking=no"\nssh_verify_connectivity "10.0.0.1" 1 1`,
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(exitCode).toBe(0);
|
||||
const log = readFileSync(logFile, "utf-8");
|
||||
expect(log).toContain("ConnectTimeout=5");
|
||||
});
|
||||
|
||||
it("should use SSH_USER default of root", () => {
|
||||
const logFile = join(testDir, "ssh_args_log");
|
||||
createMockCommand("ssh", `echo "$@" >> "${logFile}"; exit 0`);
|
||||
runBash(
|
||||
`SSH_OPTS=""\nssh_verify_connectivity "10.0.0.1" 1 1`,
|
||||
{ useMockPath: true }
|
||||
);
|
||||
const log = readFileSync(logFile, "utf-8");
|
||||
expect(log).toContain("root@10.0.0.1");
|
||||
});
|
||||
|
||||
it("should use custom SSH_USER", () => {
|
||||
const logFile = join(testDir, "ssh_args_log");
|
||||
createMockCommand("ssh", `echo "$@" >> "${logFile}"; exit 0`);
|
||||
runBash(
|
||||
`SSH_OPTS=""\nSSH_USER=ec2-user\nssh_verify_connectivity "10.0.0.1" 1 1`,
|
||||
{ useMockPath: true }
|
||||
);
|
||||
const log = readFileSync(logFile, "utf-8");
|
||||
expect(log).toContain("ec2-user@10.0.0.1");
|
||||
});
|
||||
|
||||
it("should fail after max_attempts when SSH never succeeds", () => {
|
||||
// Mock SSH to always fail and sleep to be instant
|
||||
createMockCommand("ssh", "exit 1");
|
||||
createMockCommand("sleep", "exit 0");
|
||||
const { exitCode } = runBash(
|
||||
'SSH_OPTS=""\nssh_verify_connectivity "10.0.0.1" 2 1',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should pass 'echo ok' as the test command", () => {
|
||||
const logFile = join(testDir, "ssh_args_log");
|
||||
createMockCommand("ssh", `echo "$@" >> "${logFile}"; exit 0`);
|
||||
runBash(
|
||||
`SSH_OPTS=""\nssh_verify_connectivity "10.0.0.1" 1 1`,
|
||||
{ useMockPath: true }
|
||||
);
|
||||
const log = readFileSync(logFile, "utf-8");
|
||||
expect(log).toContain("echo ok");
|
||||
});
|
||||
});
|
||||
|
||||
// ── generic_ssh_wait ─────────────────────────────────────────────────────────
|
||||
|
||||
describe("generic_ssh_wait", () => {
|
||||
it("should succeed immediately when SSH command succeeds on first try", () => {
|
||||
createMockCommand("ssh", "exit 0");
|
||||
const { exitCode, stderr } = runBash(
|
||||
'generic_ssh_wait root 10.0.0.1 "" "echo ok" "SSH connectivity" 5 1',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stderr).toContain("SSH connectivity ready");
|
||||
});
|
||||
|
||||
it("should fail after max_attempts when SSH never succeeds", () => {
|
||||
createMockCommand("ssh", "exit 1");
|
||||
createMockCommand("sleep", "exit 0");
|
||||
const { exitCode, stderr } = runBash(
|
||||
'generic_ssh_wait root 10.0.0.1 "" "echo ok" "SSH connectivity" 2 1',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("SSH connectivity timed out after");
|
||||
});
|
||||
|
||||
it("should succeed on the second attempt", () => {
|
||||
// Create a mock SSH that fails on first call, succeeds on second
|
||||
const counterFile = join(testDir, "ssh_counter");
|
||||
writeFileSync(counterFile, "0");
|
||||
createMockCommand("sleep", "exit 0");
|
||||
createMockCommand("ssh", `
|
||||
count=$(cat "${counterFile}")
|
||||
count=$((count + 1))
|
||||
echo "$count" > "${counterFile}"
|
||||
if [ "$count" -ge 2 ]; then
|
||||
exit 0
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
`);
|
||||
const { exitCode, stderr } = runBash(
|
||||
'generic_ssh_wait root 10.0.0.1 "" "echo ok" "SSH test" 5 1',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stderr).toContain("SSH test ready");
|
||||
});
|
||||
|
||||
it("should log elapsed time and attempt count", () => {
|
||||
createMockCommand("ssh", "exit 0");
|
||||
const { stderr } = runBash(
|
||||
'generic_ssh_wait root 10.0.0.1 "" "echo ok" "Connection" 3 1',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(stderr).toContain("Connection ready");
|
||||
});
|
||||
|
||||
it("should pass username and IP to SSH command", () => {
|
||||
const logFile = join(testDir, "ssh_log");
|
||||
createMockCommand("ssh", `echo "$@" >> "${logFile}"; exit 0`);
|
||||
const { exitCode } = runBash(
|
||||
`generic_ssh_wait myuser 203.0.113.1 "-o StrictHostKeyChecking=no" "echo ok" "test" 1 1`,
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(exitCode).toBe(0);
|
||||
const log = readFileSync(logFile, "utf-8");
|
||||
expect(log).toContain("-o StrictHostKeyChecking=no");
|
||||
expect(log).toContain("myuser@203.0.113.1");
|
||||
expect(log).toContain("echo ok");
|
||||
});
|
||||
|
||||
it("should use default max_attempts=30 when not specified", () => {
|
||||
// Just verify it doesn't crash with default params
|
||||
createMockCommand("ssh", "exit 0");
|
||||
const { exitCode } = runBash(
|
||||
'generic_ssh_wait root 10.0.0.1 "" "echo ok" "test"',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should log failure message with server IP for user guidance", () => {
|
||||
createMockCommand("ssh", "exit 1");
|
||||
createMockCommand("sleep", "exit 0");
|
||||
const { stderr } = runBash(
|
||||
'generic_ssh_wait root 10.0.0.1 "" "echo ok" "SSH" 2 1',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(stderr).toContain("10.0.0.1");
|
||||
expect(stderr).toContain("Server is still booting");
|
||||
});
|
||||
});
|
||||
|
||||
// ── wait_for_cloud_init ──────────────────────────────────────────────────────
|
||||
|
||||
describe("wait_for_cloud_init", () => {
|
||||
it("should pass correct arguments to generic_ssh_wait", () => {
|
||||
const logFile = join(testDir, "ssh_log");
|
||||
createMockCommand("ssh", `echo "$@" >> "${logFile}"; exit 0`);
|
||||
const { exitCode } = runBash(
|
||||
`wait_for_cloud_init "10.0.0.1" 2`,
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(exitCode).toBe(0);
|
||||
const log = readFileSync(logFile, "utf-8");
|
||||
expect(log).toContain("root@10.0.0.1");
|
||||
expect(log).toContain("test -f /root/.cloud-init-complete");
|
||||
});
|
||||
|
||||
it("should use SSH_OPTS for SSH options", () => {
|
||||
const logFile = join(testDir, "ssh_log");
|
||||
createMockCommand("ssh", `echo "$@" >> "${logFile}"; exit 0`);
|
||||
runBash(
|
||||
`SSH_OPTS="-o StrictHostKeyChecking=no"\nwait_for_cloud_init "10.0.0.1" 1`,
|
||||
{ useMockPath: true }
|
||||
);
|
||||
const log = readFileSync(logFile, "utf-8");
|
||||
expect(log).toContain("StrictHostKeyChecking=no");
|
||||
});
|
||||
|
||||
it("should fail when cloud-init never completes", () => {
|
||||
createMockCommand("ssh", "exit 1");
|
||||
createMockCommand("sleep", "exit 0");
|
||||
const { exitCode } = runBash(
|
||||
'wait_for_cloud_init "10.0.0.1" 2',
|
||||
{ useMockPath: true }
|
||||
);
|
||||
expect(exitCode).toBe(1);
|
||||
});
|
||||
});
|
||||
|
||||
// ── generic_wait_for_instance ────────────────────────────────────────────────
|
||||
|
||||
describe("generic_wait_for_instance", () => {
|
||||
it("should succeed when API returns target status and IP on first poll", () => {
|
||||
const { exitCode, stderr, stdout } = runBash(`
|
||||
# Mock API function that returns a JSON response
|
||||
mock_api() {
|
||||
echo '{"instance": {"status": "active", "main_ip": "203.0.113.42"}}'
|
||||
}
|
||||
INSTANCE_STATUS_POLL_DELAY=0
|
||||
generic_wait_for_instance mock_api "/instances/123" "active" \\
|
||||
"d['instance']['status']" "d['instance']['main_ip']" \\
|
||||
TEST_SERVER_IP "Test instance" 5
|
||||
echo "IP=$TEST_SERVER_IP"
|
||||
`);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("IP=203.0.113.42");
|
||||
expect(stderr).toContain("Test instance ready (IP: 203.0.113.42)");
|
||||
});
|
||||
|
||||
it("should poll until target status is reached", () => {
|
||||
const counterFile = join(testDir, "poll_counter");
|
||||
writeFileSync(counterFile, "0");
|
||||
const { exitCode, stdout } = runBash(`
|
||||
mock_api() {
|
||||
local count
|
||||
count=$(cat "${counterFile}")
|
||||
count=$((count + 1))
|
||||
echo "$count" > "${counterFile}"
|
||||
if [ "$count" -ge 3 ]; then
|
||||
echo '{"server": {"status": "running", "ip": "10.0.0.5"}}'
|
||||
else
|
||||
echo '{"server": {"status": "provisioning", "ip": ""}}'
|
||||
fi
|
||||
}
|
||||
INSTANCE_STATUS_POLL_DELAY=0
|
||||
generic_wait_for_instance mock_api "/servers/1" "running" \\
|
||||
"d['server']['status']" "d['server']['ip']" \\
|
||||
MY_SERVER_IP "Server" 5
|
||||
echo "RESULT=$MY_SERVER_IP"
|
||||
`);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("RESULT=10.0.0.5");
|
||||
const count = parseInt(readFileSync(counterFile, "utf-8").trim());
|
||||
expect(count).toBe(3);
|
||||
});
|
||||
|
||||
it("should fail after max_attempts when status never reaches target", () => {
|
||||
const { exitCode, stderr } = runBash(`
|
||||
mock_api() {
|
||||
echo '{"instance": {"status": "pending", "ip": ""}}'
|
||||
}
|
||||
INSTANCE_STATUS_POLL_DELAY=0
|
||||
generic_wait_for_instance mock_api "/instances/1" "active" \\
|
||||
"d['instance']['status']" "d['instance']['ip']" \\
|
||||
TEST_IP "Instance" 3
|
||||
`);
|
||||
expect(exitCode).toBe(1);
|
||||
expect(stderr).toContain("Instance did not become active within");
|
||||
});
|
||||
|
||||
it("should export the IP variable to the environment", () => {
|
||||
const { exitCode, stdout } = runBash(`
|
||||
mock_api() {
|
||||
echo '{"vm": {"state": "ready", "address": "172.16.0.1"}}'
|
||||
}
|
||||
INSTANCE_STATUS_POLL_DELAY=0
|
||||
generic_wait_for_instance mock_api "/vms/abc" "ready" \\
|
||||
"d['vm']['state']" "d['vm']['address']" \\
|
||||
VM_IP "VM" 2
|
||||
echo "EXPORTED=$VM_IP"
|
||||
`);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("EXPORTED=172.16.0.1");
|
||||
});
|
||||
|
||||
it("should handle empty IP even when status matches (keep polling)", () => {
|
||||
const counterFile = join(testDir, "ip_counter");
|
||||
writeFileSync(counterFile, "0");
|
||||
const { exitCode, stdout } = runBash(`
|
||||
mock_api() {
|
||||
local count
|
||||
count=$(cat "${counterFile}")
|
||||
count=$((count + 1))
|
||||
echo "$count" > "${counterFile}"
|
||||
if [ "$count" -ge 2 ]; then
|
||||
echo '{"i": {"s": "active", "ip": "1.2.3.4"}}'
|
||||
else
|
||||
echo '{"i": {"s": "active", "ip": ""}}'
|
||||
fi
|
||||
}
|
||||
INSTANCE_STATUS_POLL_DELAY=0
|
||||
generic_wait_for_instance mock_api "/i/1" "active" \\
|
||||
"d['i']['s']" "d['i']['ip']" \\
|
||||
GOT_IP "Instance" 5
|
||||
echo "IP=$GOT_IP"
|
||||
`);
|
||||
expect(exitCode).toBe(0);
|
||||
expect(stdout).toContain("IP=1.2.3.4");
|
||||
});
|
||||
|
||||
it("should handle API errors gracefully (response extraction fails)", () => {
|
||||
const { exitCode } = runBash(`
|
||||
mock_api() {
|
||||
echo "not valid json"
|
||||
}
|
||||
INSTANCE_STATUS_POLL_DELAY=0
|
||||
generic_wait_for_instance mock_api "/e/1" "active" \\
|
||||
"d['status']" "d['ip']" \\
|
||||
FAIL_IP "Broken" 2
|
||||
`);
|
||||
expect(exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should default max_attempts to 60 when not specified", () => {
|
||||
// Just verify the function accepts 7 args without crashing
|
||||
const { exitCode } = runBash(`
|
||||
mock_api() {
|
||||
echo '{"s": {"status": "active", "ip": "1.1.1.1"}}'
|
||||
}
|
||||
INSTANCE_STATUS_POLL_DELAY=0
|
||||
generic_wait_for_instance mock_api "/x" "active" \\
|
||||
"d['s']['status']" "d['s']['ip']" \\
|
||||
X_IP "X"
|
||||
echo "OK=$X_IP"
|
||||
`);
|
||||
expect(exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should use INSTANCE_STATUS_POLL_DELAY for delay between polls", () => {
|
||||
const counterFile = join(testDir, "delay_counter");
|
||||
writeFileSync(counterFile, "0");
|
||||
const { exitCode } = runBash(`
|
||||
mock_api() {
|
||||
local count
|
||||
count=$(cat "${counterFile}")
|
||||
count=$((count + 1))
|
||||
echo "$count" > "${counterFile}"
|
||||
if [ "$count" -ge 2 ]; then
|
||||
echo '{"r": {"status": "done", "ip": "5.5.5.5"}}'
|
||||
else
|
||||
echo '{"r": {"status": "waiting", "ip": ""}}'
|
||||
fi
|
||||
}
|
||||
INSTANCE_STATUS_POLL_DELAY=0
|
||||
generic_wait_for_instance mock_api "/r/1" "done" \\
|
||||
"d['r']['status']" "d['r']['ip']" \\
|
||||
R_IP "Resource" 5
|
||||
`);
|
||||
expect(exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should show helpful guidance when polling times out", () => {
|
||||
const { stderr } = runBash(`
|
||||
mock_api() {
|
||||
echo '{"x": {"status": "creating"}}'
|
||||
}
|
||||
INSTANCE_STATUS_POLL_DELAY=0
|
||||
generic_wait_for_instance mock_api "/x/1" "ready" \\
|
||||
"d['x']['status']" "d['x']['ip']" \\
|
||||
X_IP "Droplet" 2
|
||||
`);
|
||||
expect(stderr).toContain("Check your cloud dashboard");
|
||||
expect(stderr).toContain("Wait 2-3 minutes and retry");
|
||||
expect(stderr).toContain("Try a different region");
|
||||
});
|
||||
|
||||
it("should log current status during polling", () => {
|
||||
const counterFile = join(testDir, "status_counter");
|
||||
writeFileSync(counterFile, "0");
|
||||
const { stderr, exitCode } = runBash(`
|
||||
mock_api() {
|
||||
local count
|
||||
count=$(cat "${counterFile}")
|
||||
count=$((count + 1))
|
||||
echo "$count" > "${counterFile}"
|
||||
if [ "$count" -ge 3 ]; then
|
||||
echo '{"s": "running", "ip": "9.9.9.9"}'
|
||||
else
|
||||
echo '{"s": "booting", "ip": ""}'
|
||||
fi
|
||||
}
|
||||
INSTANCE_STATUS_POLL_DELAY=0
|
||||
generic_wait_for_instance mock_api "/s/1" "running" \\
|
||||
"d['s']" "d['ip']" \\
|
||||
S_IP "Server" 5
|
||||
`);
|
||||
expect(exitCode).toBe(0);
|
||||
// Should show intermediate status during polling
|
||||
expect(stderr).toContain("booting");
|
||||
});
|
||||
});
|
||||
|
||||
// extract_api_error_message tests are in shared-common-error-polling.test.ts
|
||||
|
|
@ -1,624 +0,0 @@
|
|||
import { describe, it, expect, beforeEach, afterEach } from "bun:test";
|
||||
import { spawnSync } from "child_process";
|
||||
import { resolve, join } from "path";
|
||||
import {
|
||||
mkdirSync,
|
||||
writeFileSync,
|
||||
rmSync,
|
||||
existsSync,
|
||||
readFileSync,
|
||||
} from "fs";
|
||||
import { tmpdir } from "os";
|
||||
|
||||
/**
|
||||
* Tests for the SSH key lifecycle functions in shared/common.sh:
|
||||
*
|
||||
* - ensure_ssh_key_with_provider: Generic SSH key registration flow using
|
||||
* provider-specific callbacks. Used by all cloud providers to handle the
|
||||
* full generate -> check -> register lifecycle. ZERO prior test coverage.
|
||||
*
|
||||
* - generate_ssh_key_if_missing: Generates ed25519 key if not present.
|
||||
* Edge cases around existing keys, nested directories, permissions.
|
||||
*
|
||||
* - get_ssh_fingerprint: Extracts MD5 fingerprint from public key.
|
||||
* Edge cases around key formats and error handling.
|
||||
*
|
||||
* These functions are security-critical (SSH key management) and are invoked
|
||||
* by every cloud provider's lib/common.sh. The callback-based pattern in
|
||||
* ensure_ssh_key_with_provider can have subtle bugs around:
|
||||
* - Check callback returning unexpected exit codes
|
||||
* - Register callback failing after key generation succeeded
|
||||
* - Custom key paths vs default key paths
|
||||
* - Key already registered vs needs registration
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const REPO_ROOT = resolve(import.meta.dir, "../../..");
|
||||
const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh");
|
||||
|
||||
/**
|
||||
* Run a bash snippet that sources shared/common.sh first.
|
||||
* Uses spawnSync to properly capture both stdout and stderr
|
||||
* (execSync only captures stderr in the error path).
|
||||
*/
|
||||
function runBash(
|
||||
script: string,
|
||||
env?: Record<string, string>
|
||||
): { exitCode: number; stdout: string; stderr: string } {
|
||||
const fullScript = `source "${COMMON_SH}"\n${script}`;
|
||||
const result = spawnSync("bash", ["-c", fullScript], {
|
||||
encoding: "utf-8",
|
||||
timeout: 15000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
env: { ...process.env, ...env },
|
||||
});
|
||||
return {
|
||||
exitCode: result.status ?? 1,
|
||||
stdout: (result.stdout || "").trim(),
|
||||
stderr: (result.stderr || "").trim(),
|
||||
};
|
||||
}
|
||||
|
||||
/** Create a temporary directory for test files. */
|
||||
function createTempDir(): string {
|
||||
const dir = join(
|
||||
tmpdir(),
|
||||
`spawn-ssh-test-${Date.now()}-${Math.random().toString(36).slice(2)}`
|
||||
);
|
||||
mkdirSync(dir, { recursive: true });
|
||||
return dir;
|
||||
}
|
||||
|
||||
// ── generate_ssh_key_if_missing ──────────────────────────────────────────
|
||||
|
||||
describe("generate_ssh_key_if_missing", () => {
|
||||
let tempDir: string;
|
||||
|
||||
beforeEach(() => {
|
||||
tempDir = createTempDir();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (existsSync(tempDir)) {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should generate an ed25519 key when none exists", () => {
|
||||
const keyPath = join(tempDir, "test_key");
|
||||
const result = runBash(`generate_ssh_key_if_missing "${keyPath}"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(existsSync(keyPath)).toBe(true);
|
||||
expect(existsSync(`${keyPath}.pub`)).toBe(true);
|
||||
});
|
||||
|
||||
it("should generate key content that starts with openssh private key header", () => {
|
||||
const keyPath = join(tempDir, "test_key");
|
||||
runBash(`generate_ssh_key_if_missing "${keyPath}"`);
|
||||
const content = readFileSync(keyPath, "utf-8");
|
||||
expect(content).toContain("OPENSSH PRIVATE KEY");
|
||||
});
|
||||
|
||||
it("should generate public key with ssh-ed25519 prefix", () => {
|
||||
const keyPath = join(tempDir, "test_key");
|
||||
runBash(`generate_ssh_key_if_missing "${keyPath}"`);
|
||||
const pubContent = readFileSync(`${keyPath}.pub`, "utf-8");
|
||||
expect(pubContent).toContain("ssh-ed25519");
|
||||
});
|
||||
|
||||
it("should not overwrite an existing key", () => {
|
||||
const keyPath = join(tempDir, "existing_key");
|
||||
writeFileSync(keyPath, "existing-content");
|
||||
|
||||
const result = runBash(`generate_ssh_key_if_missing "${keyPath}"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
|
||||
// Content should be unchanged
|
||||
const content = readFileSync(keyPath, "utf-8");
|
||||
expect(content).toBe("existing-content");
|
||||
});
|
||||
|
||||
it("should create nested directories if they do not exist", () => {
|
||||
const keyPath = join(tempDir, "deep", "nested", "dir", "test_key");
|
||||
const result = runBash(`generate_ssh_key_if_missing "${keyPath}"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(existsSync(keyPath)).toBe(true);
|
||||
});
|
||||
|
||||
it("should generate key with empty passphrase (no password)", () => {
|
||||
const keyPath = join(tempDir, "no_pass_key");
|
||||
runBash(`generate_ssh_key_if_missing "${keyPath}"`);
|
||||
|
||||
// Verify the key can be read without a passphrase by getting its fingerprint
|
||||
const result = runBash(`ssh-keygen -lf "${keyPath}.pub" -E md5`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("MD5:");
|
||||
});
|
||||
|
||||
it("should log a step message when generating", () => {
|
||||
const keyPath = join(tempDir, "test_key_log");
|
||||
const result = runBash(`generate_ssh_key_if_missing "${keyPath}"`);
|
||||
expect(result.stderr).toContain("Generating SSH key");
|
||||
});
|
||||
|
||||
it("should log info message after successful generation", () => {
|
||||
const keyPath = join(tempDir, "test_key_info");
|
||||
const result = runBash(`generate_ssh_key_if_missing "${keyPath}"`);
|
||||
expect(result.stderr).toContain("SSH key generated at");
|
||||
expect(result.stderr).toContain(keyPath);
|
||||
});
|
||||
|
||||
it("should not log generation messages when key already exists", () => {
|
||||
const keyPath = join(tempDir, "existing_key2");
|
||||
writeFileSync(keyPath, "existing");
|
||||
|
||||
const result = runBash(`generate_ssh_key_if_missing "${keyPath}"`);
|
||||
expect(result.stderr).not.toContain("Generating SSH key");
|
||||
expect(result.stderr).not.toContain("SSH key generated");
|
||||
});
|
||||
});
|
||||
|
||||
// ── get_ssh_fingerprint ──────────────────────────────────────────────────
|
||||
|
||||
describe("get_ssh_fingerprint", () => {
|
||||
let tempDir: string;
|
||||
|
||||
beforeEach(() => {
|
||||
tempDir = createTempDir();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (existsSync(tempDir)) {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
it("should return MD5 fingerprint for a valid ed25519 public key", () => {
|
||||
const keyPath = join(tempDir, "fp_test_key");
|
||||
runBash(`ssh-keygen -t ed25519 -f "${keyPath}" -N "" -q`);
|
||||
|
||||
const result = runBash(`get_ssh_fingerprint "${keyPath}.pub"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
// MD5 fingerprint format: aa:bb:cc:dd:...
|
||||
expect(result.stdout).toMatch(/^[0-9a-f]{2}(:[0-9a-f]{2})+$/);
|
||||
});
|
||||
|
||||
it("should strip MD5: prefix from fingerprint", () => {
|
||||
const keyPath = join(tempDir, "fp_strip_key");
|
||||
runBash(`ssh-keygen -t ed25519 -f "${keyPath}" -N "" -q`);
|
||||
|
||||
const result = runBash(`get_ssh_fingerprint "${keyPath}.pub"`);
|
||||
expect(result.stdout).not.toContain("MD5:");
|
||||
});
|
||||
|
||||
it("should return consistent fingerprint for same key", () => {
|
||||
const keyPath = join(tempDir, "fp_consistent_key");
|
||||
runBash(`ssh-keygen -t ed25519 -f "${keyPath}" -N "" -q`);
|
||||
|
||||
const result1 = runBash(`get_ssh_fingerprint "${keyPath}.pub"`);
|
||||
const result2 = runBash(`get_ssh_fingerprint "${keyPath}.pub"`);
|
||||
expect(result1.stdout).toBe(result2.stdout);
|
||||
});
|
||||
|
||||
it("should return different fingerprints for different keys", () => {
|
||||
const keyPath1 = join(tempDir, "fp_key1");
|
||||
const keyPath2 = join(tempDir, "fp_key2");
|
||||
runBash(`ssh-keygen -t ed25519 -f "${keyPath1}" -N "" -q`);
|
||||
runBash(`ssh-keygen -t ed25519 -f "${keyPath2}" -N "" -q`);
|
||||
|
||||
const fp1 = runBash(`get_ssh_fingerprint "${keyPath1}.pub"`);
|
||||
const fp2 = runBash(`get_ssh_fingerprint "${keyPath2}.pub"`);
|
||||
expect(fp1.stdout).not.toBe(fp2.stdout);
|
||||
});
|
||||
|
||||
it("should work with RSA public keys", () => {
|
||||
const keyPath = join(tempDir, "fp_rsa_key");
|
||||
runBash(`ssh-keygen -t rsa -b 2048 -f "${keyPath}" -N "" -q`);
|
||||
|
||||
const result = runBash(`get_ssh_fingerprint "${keyPath}.pub"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toMatch(/^[0-9a-f]{2}(:[0-9a-f]{2})+$/);
|
||||
});
|
||||
});
|
||||
|
||||
// ── ensure_ssh_key_with_provider ──────────────────────────────────────────
|
||||
|
||||
describe("ensure_ssh_key_with_provider", () => {
|
||||
let tempDir: string;
|
||||
|
||||
beforeEach(() => {
|
||||
tempDir = createTempDir();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (existsSync(tempDir)) {
|
||||
rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
describe("key already registered", () => {
|
||||
it("should succeed when check callback returns 0 (key exists)", () => {
|
||||
const keyPath = join(tempDir, "existing_key");
|
||||
// Pre-generate a key so we skip generation
|
||||
runBash(`ssh-keygen -t ed25519 -f "${keyPath}" -N "" -q`);
|
||||
|
||||
const result = runBash(`
|
||||
check_always_exists() { return 0; }
|
||||
register_noop() { return 0; }
|
||||
ensure_ssh_key_with_provider check_always_exists register_noop "TestCloud" "${keyPath}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stderr).toContain("already registered");
|
||||
expect(result.stderr).toContain("TestCloud");
|
||||
});
|
||||
|
||||
it("should not call register callback when key already registered", () => {
|
||||
const keyPath = join(tempDir, "noreg_key");
|
||||
runBash(`ssh-keygen -t ed25519 -f "${keyPath}" -N "" -q`);
|
||||
|
||||
const markerFile = join(tempDir, "register_called");
|
||||
const result = runBash(`
|
||||
check_exists() { return 0; }
|
||||
register_with_marker() { touch "${markerFile}"; return 0; }
|
||||
ensure_ssh_key_with_provider check_exists register_with_marker "Cloud" "${keyPath}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(existsSync(markerFile)).toBe(false);
|
||||
});
|
||||
|
||||
it("should pass fingerprint to check callback", () => {
|
||||
const keyPath = join(tempDir, "fp_check_key");
|
||||
runBash(`ssh-keygen -t ed25519 -f "${keyPath}" -N "" -q`);
|
||||
|
||||
const fpFile = join(tempDir, "fingerprint.txt");
|
||||
const result = runBash(`
|
||||
check_save_fp() {
|
||||
echo "\$1" > "${fpFile}"
|
||||
return 0
|
||||
}
|
||||
register_noop() { return 0; }
|
||||
ensure_ssh_key_with_provider check_save_fp register_noop "Cloud" "${keyPath}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
const savedFp = readFileSync(fpFile, "utf-8").trim();
|
||||
// Fingerprint should be in MD5 hex format
|
||||
expect(savedFp).toMatch(/^[0-9a-f]{2}(:[0-9a-f]{2})+$/);
|
||||
});
|
||||
|
||||
it("should pass pub key path to check callback", () => {
|
||||
const keyPath = join(tempDir, "pub_check_key");
|
||||
runBash(`ssh-keygen -t ed25519 -f "${keyPath}" -N "" -q`);
|
||||
|
||||
const pathFile = join(tempDir, "pub_path.txt");
|
||||
const result = runBash(`
|
||||
check_save_path() {
|
||||
echo "\$2" > "${pathFile}"
|
||||
return 0
|
||||
}
|
||||
register_noop() { return 0; }
|
||||
ensure_ssh_key_with_provider check_save_path register_noop "Cloud" "${keyPath}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
const savedPath = readFileSync(pathFile, "utf-8").trim();
|
||||
expect(savedPath).toBe(`${keyPath}.pub`);
|
||||
});
|
||||
});
|
||||
|
||||
describe("key not registered - successful registration", () => {
|
||||
it("should register key when check callback returns 1 (not found)", () => {
|
||||
const keyPath = join(tempDir, "new_reg_key");
|
||||
runBash(`ssh-keygen -t ed25519 -f "${keyPath}" -N "" -q`);
|
||||
|
||||
const result = runBash(`
|
||||
check_not_found() { return 1; }
|
||||
register_success() { return 0; }
|
||||
ensure_ssh_key_with_provider check_not_found register_success "Hetzner" "${keyPath}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stderr).toContain("Registering SSH key");
|
||||
expect(result.stderr).toContain("Hetzner");
|
||||
expect(result.stderr).toContain("SSH key registered");
|
||||
});
|
||||
|
||||
it("should pass key name to register callback", () => {
|
||||
const keyPath = join(tempDir, "name_reg_key");
|
||||
runBash(`ssh-keygen -t ed25519 -f "${keyPath}" -N "" -q`);
|
||||
|
||||
const nameFile = join(tempDir, "key_name.txt");
|
||||
const result = runBash(`
|
||||
check_not_found() { return 1; }
|
||||
register_save_name() {
|
||||
echo "\$1" > "${nameFile}"
|
||||
return 0
|
||||
}
|
||||
ensure_ssh_key_with_provider check_not_found register_save_name "Cloud" "${keyPath}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
const keyName = readFileSync(nameFile, "utf-8").trim();
|
||||
// Key name format: spawn-<hostname>-<timestamp>
|
||||
expect(keyName).toMatch(/^spawn-/);
|
||||
});
|
||||
|
||||
it("should pass pub key path to register callback", () => {
|
||||
const keyPath = join(tempDir, "path_reg_key");
|
||||
runBash(`ssh-keygen -t ed25519 -f "${keyPath}" -N "" -q`);
|
||||
|
||||
const pathFile = join(tempDir, "reg_path.txt");
|
||||
const result = runBash(`
|
||||
check_not_found() { return 1; }
|
||||
register_save_path() {
|
||||
echo "\$2" > "${pathFile}"
|
||||
return 0
|
||||
}
|
||||
ensure_ssh_key_with_provider check_not_found register_save_path "Cloud" "${keyPath}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
const savedPath = readFileSync(pathFile, "utf-8").trim();
|
||||
expect(savedPath).toBe(`${keyPath}.pub`);
|
||||
});
|
||||
});
|
||||
|
||||
describe("key not registered - failed registration", () => {
|
||||
it("should return error when register callback fails", () => {
|
||||
const keyPath = join(tempDir, "fail_reg_key");
|
||||
runBash(`ssh-keygen -t ed25519 -f "${keyPath}" -N "" -q`);
|
||||
|
||||
const result = runBash(`
|
||||
check_not_found() { return 1; }
|
||||
register_fail() { return 1; }
|
||||
ensure_ssh_key_with_provider check_not_found register_fail "DigitalOcean" "${keyPath}"
|
||||
`);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
expect(result.stderr).toContain("Failed to register SSH key");
|
||||
expect(result.stderr).toContain("DigitalOcean");
|
||||
});
|
||||
});
|
||||
|
||||
describe("key generation during the flow", () => {
|
||||
it("should auto-generate key when key file does not exist", () => {
|
||||
const keyPath = join(tempDir, "auto_gen_key");
|
||||
// Do NOT pre-generate the key
|
||||
|
||||
const result = runBash(`
|
||||
check_not_found() { return 1; }
|
||||
register_success() { return 0; }
|
||||
ensure_ssh_key_with_provider check_not_found register_success "Vultr" "${keyPath}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
// Key should have been generated
|
||||
expect(existsSync(keyPath)).toBe(true);
|
||||
expect(existsSync(`${keyPath}.pub`)).toBe(true);
|
||||
expect(result.stderr).toContain("Generating SSH key");
|
||||
});
|
||||
|
||||
it("should auto-generate key and then register it", () => {
|
||||
const keyPath = join(tempDir, "gen_and_reg_key");
|
||||
const fpFile = join(tempDir, "auto_fp.txt");
|
||||
|
||||
const result = runBash(`
|
||||
check_not_found() { return 1; }
|
||||
register_save_fp() {
|
||||
# Verify the pub key exists at this point
|
||||
if [[ -f "${keyPath}.pub" ]]; then
|
||||
echo "pub_exists" > "${fpFile}"
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
ensure_ssh_key_with_provider check_not_found register_save_fp "Cloud" "${keyPath}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
// Register callback should have been called after key was generated
|
||||
expect(readFileSync(fpFile, "utf-8").trim()).toBe("pub_exists");
|
||||
});
|
||||
});
|
||||
|
||||
describe("default key path", () => {
|
||||
it("should use default key path when not specified", () => {
|
||||
// Create a mock HOME with an existing SSH key to avoid generating in real ~/.ssh
|
||||
const fakeHome = join(tempDir, "fakehome");
|
||||
const sshDir = join(fakeHome, ".ssh");
|
||||
mkdirSync(sshDir, { recursive: true });
|
||||
const defaultKeyPath = join(sshDir, "id_ed25519");
|
||||
runBash(`ssh-keygen -t ed25519 -f "${defaultKeyPath}" -N "" -q`);
|
||||
|
||||
const pathFile = join(tempDir, "default_path.txt");
|
||||
const result = runBash(
|
||||
`
|
||||
check_save_path() {
|
||||
echo "\$2" > "${pathFile}"
|
||||
return 0
|
||||
}
|
||||
register_noop() { return 0; }
|
||||
ensure_ssh_key_with_provider check_save_path register_noop "Cloud"
|
||||
`,
|
||||
{ HOME: fakeHome }
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
const savedPath = readFileSync(pathFile, "utf-8").trim();
|
||||
expect(savedPath).toBe(`${defaultKeyPath}.pub`);
|
||||
});
|
||||
});
|
||||
|
||||
describe("provider name in messages", () => {
|
||||
it("should include provider name in 'already registered' message", () => {
|
||||
const keyPath = join(tempDir, "prov_name_key1");
|
||||
runBash(`ssh-keygen -t ed25519 -f "${keyPath}" -N "" -q`);
|
||||
|
||||
const result = runBash(`
|
||||
check_exists() { return 0; }
|
||||
reg() { return 0; }
|
||||
ensure_ssh_key_with_provider check_exists reg "Lambda Cloud" "${keyPath}"
|
||||
`);
|
||||
expect(result.stderr).toContain("Lambda Cloud");
|
||||
});
|
||||
|
||||
it("should include provider name in 'registering' message", () => {
|
||||
const keyPath = join(tempDir, "prov_name_key2");
|
||||
runBash(`ssh-keygen -t ed25519 -f "${keyPath}" -N "" -q`);
|
||||
|
||||
const result = runBash(`
|
||||
check_nf() { return 1; }
|
||||
reg_ok() { return 0; }
|
||||
ensure_ssh_key_with_provider check_nf reg_ok "Linode" "${keyPath}"
|
||||
`);
|
||||
expect(result.stderr).toContain("Registering SSH key with Linode");
|
||||
});
|
||||
|
||||
it("should include provider name in 'registered' message", () => {
|
||||
const keyPath = join(tempDir, "prov_name_key3");
|
||||
runBash(`ssh-keygen -t ed25519 -f "${keyPath}" -N "" -q`);
|
||||
|
||||
const result = runBash(`
|
||||
check_nf() { return 1; }
|
||||
reg_ok() { return 0; }
|
||||
ensure_ssh_key_with_provider check_nf reg_ok "UpCloud" "${keyPath}"
|
||||
`);
|
||||
expect(result.stderr).toContain("SSH key registered with UpCloud");
|
||||
});
|
||||
|
||||
it("should include provider name in failure message", () => {
|
||||
const keyPath = join(tempDir, "prov_name_key4");
|
||||
runBash(`ssh-keygen -t ed25519 -f "${keyPath}" -N "" -q`);
|
||||
|
||||
const result = runBash(`
|
||||
check_nf() { return 1; }
|
||||
reg_fail() { return 1; }
|
||||
ensure_ssh_key_with_provider check_nf reg_fail "Kamatera" "${keyPath}"
|
||||
`);
|
||||
expect(result.stderr).toContain("Failed to register SSH key with Kamatera");
|
||||
});
|
||||
});
|
||||
|
||||
describe("callback contract", () => {
|
||||
it("should call check callback exactly once when key is registered", () => {
|
||||
const keyPath = join(tempDir, "once_check_key");
|
||||
runBash(`ssh-keygen -t ed25519 -f "${keyPath}" -N "" -q`);
|
||||
|
||||
const countFile = join(tempDir, "check_count.txt");
|
||||
writeFileSync(countFile, "0");
|
||||
|
||||
const result = runBash(`
|
||||
check_count() {
|
||||
local current
|
||||
current=$(cat "${countFile}")
|
||||
echo $((current + 1)) > "${countFile}"
|
||||
return 0
|
||||
}
|
||||
register_noop() { return 0; }
|
||||
ensure_ssh_key_with_provider check_count register_noop "Cloud" "${keyPath}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(readFileSync(countFile, "utf-8").trim()).toBe("1");
|
||||
});
|
||||
|
||||
it("should call register callback exactly once when key needs registration", () => {
|
||||
const keyPath = join(tempDir, "once_reg_key");
|
||||
runBash(`ssh-keygen -t ed25519 -f "${keyPath}" -N "" -q`);
|
||||
|
||||
const countFile = join(tempDir, "reg_count.txt");
|
||||
writeFileSync(countFile, "0");
|
||||
|
||||
const result = runBash(`
|
||||
check_not_found() { return 1; }
|
||||
register_count() {
|
||||
local current
|
||||
current=$(cat "${countFile}")
|
||||
echo $((current + 1)) > "${countFile}"
|
||||
return 0
|
||||
}
|
||||
ensure_ssh_key_with_provider check_not_found register_count "Cloud" "${keyPath}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(readFileSync(countFile, "utf-8").trim()).toBe("1");
|
||||
});
|
||||
|
||||
it("should not call register callback when check succeeds", () => {
|
||||
const keyPath = join(tempDir, "no_reg_key");
|
||||
runBash(`ssh-keygen -t ed25519 -f "${keyPath}" -N "" -q`);
|
||||
|
||||
const countFile = join(tempDir, "no_reg_count.txt");
|
||||
writeFileSync(countFile, "0");
|
||||
|
||||
const result = runBash(`
|
||||
check_found() { return 0; }
|
||||
register_count() {
|
||||
local current
|
||||
current=$(cat "${countFile}")
|
||||
echo $((current + 1)) > "${countFile}"
|
||||
return 0
|
||||
}
|
||||
ensure_ssh_key_with_provider check_found register_count "Cloud" "${keyPath}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(readFileSync(countFile, "utf-8").trim()).toBe("0");
|
||||
});
|
||||
});
|
||||
|
||||
describe("key name generation", () => {
|
||||
it("should generate key name with spawn- prefix", () => {
|
||||
const keyPath = join(tempDir, "kname_key");
|
||||
runBash(`ssh-keygen -t ed25519 -f "${keyPath}" -N "" -q`);
|
||||
|
||||
const nameFile = join(tempDir, "kname.txt");
|
||||
const result = runBash(`
|
||||
check_nf() { return 1; }
|
||||
register_save() {
|
||||
echo "\$1" > "${nameFile}"
|
||||
return 0
|
||||
}
|
||||
ensure_ssh_key_with_provider check_nf register_save "Cloud" "${keyPath}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
const keyName = readFileSync(nameFile, "utf-8").trim();
|
||||
expect(keyName.startsWith("spawn-")).toBe(true);
|
||||
});
|
||||
|
||||
it("should include hostname in key name", () => {
|
||||
const keyPath = join(tempDir, "hostname_key");
|
||||
runBash(`ssh-keygen -t ed25519 -f "${keyPath}" -N "" -q`);
|
||||
|
||||
const nameFile = join(tempDir, "hostname_name.txt");
|
||||
const result = runBash(`
|
||||
check_nf() { return 1; }
|
||||
register_save() {
|
||||
echo "\$1" > "${nameFile}"
|
||||
return 0
|
||||
}
|
||||
ensure_ssh_key_with_provider check_nf register_save "Cloud" "${keyPath}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
const keyName = readFileSync(nameFile, "utf-8").trim();
|
||||
// Format: spawn-<hostname>-<timestamp>
|
||||
const parts = keyName.split("-");
|
||||
// At minimum: spawn, hostname (might contain dashes), timestamp
|
||||
expect(parts.length).toBeGreaterThanOrEqual(3);
|
||||
});
|
||||
|
||||
it("should include timestamp in key name", () => {
|
||||
const keyPath = join(tempDir, "ts_key");
|
||||
runBash(`ssh-keygen -t ed25519 -f "${keyPath}" -N "" -q`);
|
||||
|
||||
const nameFile = join(tempDir, "ts_name.txt");
|
||||
const result = runBash(`
|
||||
check_nf() { return 1; }
|
||||
register_save() {
|
||||
echo "\$1" > "${nameFile}"
|
||||
return 0
|
||||
}
|
||||
ensure_ssh_key_with_provider check_nf register_save "Cloud" "${keyPath}"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
const keyName = readFileSync(nameFile, "utf-8").trim();
|
||||
// Last segment should be a unix timestamp (numeric)
|
||||
const lastPart = keyName.split("-").pop() || "";
|
||||
expect(lastPart).toMatch(/^\d+$/);
|
||||
// Should be a reasonable timestamp (after 2020)
|
||||
const ts = parseInt(lastPart, 10);
|
||||
expect(ts).toBeGreaterThan(1577836800); // 2020-01-01
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// extract_ssh_key_ids tests are in shared-common-untested-helpers.test.ts
|
||||
// check_ssh_key_by_fingerprint tests are in shared-common-env-inject.test.ts
|
||||
|
|
@ -1,162 +0,0 @@
|
|||
import { describe, it, expect } from "bun:test";
|
||||
import { execSync } from "child_process";
|
||||
import { resolve } from "path";
|
||||
|
||||
/**
|
||||
* Tests for shared/common.sh helper functions:
|
||||
*
|
||||
* - log_install_failed: Actionable error guidance for agent installation failures
|
||||
* - ensure_jq: Cross-platform jq installation
|
||||
* - verify_agent_installed: Agent binary verification
|
||||
* - generate_env_config: Shell export statement generation
|
||||
*
|
||||
* Other functions previously here are now tested in their canonical locations:
|
||||
* - _multi_creds_validate -> shared-common-credential-mgmt.test.ts
|
||||
* - _load_json_config_fields / _save_json_config -> shared-common-helpers.test.ts
|
||||
* - extract_ssh_key_ids -> shared-common-helpers.test.ts
|
||||
* - interactive_pick -> shared-common-input-validation.test.ts
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const REPO_ROOT = resolve(import.meta.dir, "../../..");
|
||||
const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh");
|
||||
|
||||
function runBash(script: string): { exitCode: number; stdout: string; stderr: string } {
|
||||
const fullScript = `source "${COMMON_SH}"\n${script}`;
|
||||
try {
|
||||
const stdout = execSync(`bash -c '${fullScript.replace(/'/g, "'\\''")}'`, {
|
||||
encoding: "utf-8",
|
||||
timeout: 10000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
});
|
||||
return { exitCode: 0, stdout: stdout.trim(), stderr: "" };
|
||||
} catch (err: any) {
|
||||
return {
|
||||
exitCode: err.status ?? 1,
|
||||
stdout: (err.stdout ?? "").toString().trim(),
|
||||
stderr: (err.stderr ?? "").toString().trim(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// log_install_failed
|
||||
// ============================================================================
|
||||
|
||||
describe("log_install_failed", () => {
|
||||
it("should include agent name and install failed message", () => {
|
||||
const result = runBash(`log_install_failed "Claude Code" 2>&1`);
|
||||
expect(result.stdout).toContain("Claude Code");
|
||||
expect(result.stdout).toContain("installation failed");
|
||||
});
|
||||
|
||||
it("should show SSH hint when server IP is provided", () => {
|
||||
const result = runBash(
|
||||
`log_install_failed "Codex" "" "10.0.0.5" 2>&1`
|
||||
);
|
||||
expect(result.stdout).toContain("ssh root@10.0.0.5");
|
||||
});
|
||||
|
||||
it("should not show SSH hint when server IP is empty", () => {
|
||||
const result = runBash(`log_install_failed "Codex" "npm install -g codex" "" 2>&1`);
|
||||
expect(result.stdout).not.toContain("ssh root@");
|
||||
});
|
||||
|
||||
it("should show install command hint when provided", () => {
|
||||
const result = runBash(
|
||||
`log_install_failed "Cline" "npm install -g cline" 2>&1`
|
||||
);
|
||||
expect(result.stdout).toContain("Try manual installation");
|
||||
expect(result.stdout).toContain("npm install -g cline");
|
||||
});
|
||||
|
||||
it("should always show common causes section", () => {
|
||||
const result = runBash(`log_install_failed "Test" 2>&1`);
|
||||
expect(result.stdout).toContain("Common causes");
|
||||
});
|
||||
|
||||
it("should not exit with an error code (informational only)", () => {
|
||||
const result = runBash(`log_install_failed "Test" "cmd" "1.2.3.4"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// ensure_jq
|
||||
// ============================================================================
|
||||
|
||||
describe("ensure_jq", () => {
|
||||
it("should return 0 when jq is already installed", () => {
|
||||
const checkResult = runBash("command -v jq &>/dev/null && echo found || echo missing");
|
||||
if (checkResult.stdout === "found") {
|
||||
const result = runBash("ensure_jq 2>/dev/null");
|
||||
expect(result.exitCode).toBe(0);
|
||||
}
|
||||
});
|
||||
|
||||
it("should check for jq using command -v", () => {
|
||||
const result = runBash("type ensure_jq | head -5");
|
||||
expect(result.stdout).toContain("command -v jq");
|
||||
});
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// verify_agent_installed
|
||||
// ============================================================================
|
||||
|
||||
describe("verify_agent_installed", () => {
|
||||
it("should return 0 when command exists", () => {
|
||||
const result = runBash(`verify_agent_installed "bash"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should return 1 when command does not exist", () => {
|
||||
const result = runBash(
|
||||
`verify_agent_installed "nonexistent_cmd_12345" 2>/dev/null`
|
||||
);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should show diagnostic error on failure", () => {
|
||||
const result = runBash(
|
||||
`verify_agent_installed "nonexistent_cmd_12345" "--version" "Claude Code" 2>&1`
|
||||
);
|
||||
expect(result.exitCode).toBe(1);
|
||||
expect(result.stdout).toContain("Claude Code");
|
||||
expect(result.stdout).toContain("installation failed");
|
||||
});
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// generate_env_config
|
||||
// ============================================================================
|
||||
|
||||
describe("generate_env_config", () => {
|
||||
it("should generate export statements", () => {
|
||||
const result = runBash(`generate_env_config "MY_KEY=my_value"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("export MY_KEY='my_value'");
|
||||
});
|
||||
|
||||
it("should include spawn:env marker", () => {
|
||||
const result = runBash(`generate_env_config "K=V"`);
|
||||
expect(result.stdout).toContain("# [spawn:env]");
|
||||
});
|
||||
|
||||
it("should handle values containing equals signs", () => {
|
||||
const result = runBash(`generate_env_config "API_URL=https://example.com?key=abc"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("export API_URL='https://example.com?key=abc'");
|
||||
});
|
||||
|
||||
it("should produce sourceable bash output", () => {
|
||||
const result = runBash(`
|
||||
OUTPUT=$(generate_env_config "TEST_VAR=hello_world")
|
||||
eval "$OUTPUT"
|
||||
echo "$TEST_VAR"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("hello_world");
|
||||
});
|
||||
});
|
||||
|
|
@ -1,517 +0,0 @@
|
|||
import { describe, it, expect } from "bun:test";
|
||||
import { execSync } from "child_process";
|
||||
import { resolve } from "path";
|
||||
|
||||
/**
|
||||
* Tests for security-critical bash validation functions in shared/common.sh.
|
||||
*
|
||||
* These functions prevent injection attacks across ALL cloud provider scripts:
|
||||
* - validate_model_id: prevents command injection via model ID parameters
|
||||
* - validate_server_name: prevents injection via server/instance names
|
||||
* - validate_api_token: prevents injection via API tokens
|
||||
* - validate_region_name: prevents injection via region/zone parameters
|
||||
* - validate_resource_name: prevents injection via resource type/size params
|
||||
* - json_escape: safe JSON string encoding
|
||||
*
|
||||
* Each function is tested by sourcing shared/common.sh and calling it directly
|
||||
* in a bash subprocess. This catches real bash behavior (regex engine quirks,
|
||||
* quoting edge cases) that TypeScript replica tests would miss.
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const REPO_ROOT = resolve(import.meta.dir, "../../..");
|
||||
const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh");
|
||||
|
||||
/**
|
||||
* Run a bash validator function and return the exit code.
|
||||
* Sources shared/common.sh, then calls the named function with the given argument.
|
||||
*/
|
||||
function runValidator(funcName: string, arg: string): { exitCode: number; stderr: string } {
|
||||
// Use printf to safely pass the argument without shell interpretation
|
||||
// The argument is base64-encoded to avoid any shell quoting issues
|
||||
const b64 = Buffer.from(arg).toString("base64");
|
||||
const script = `
|
||||
source "${COMMON_SH}"
|
||||
ARG="$(echo "${b64}" | base64 -d)"
|
||||
${funcName} "$ARG"
|
||||
`;
|
||||
try {
|
||||
execSync(`bash -c '${script.replace(/'/g, "'\\''")}'`, {
|
||||
encoding: "utf-8",
|
||||
timeout: 5000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
});
|
||||
return { exitCode: 0, stderr: "" };
|
||||
} catch (err: any) {
|
||||
return { exitCode: err.status ?? 1, stderr: err.stderr || "" };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Run json_escape and return the output string.
|
||||
*/
|
||||
function runJsonEscape(input: string): string {
|
||||
const b64 = Buffer.from(input).toString("base64");
|
||||
const script = `
|
||||
source "${COMMON_SH}"
|
||||
INPUT="$(echo "${b64}" | base64 -d)"
|
||||
json_escape "$INPUT"
|
||||
`;
|
||||
try {
|
||||
const result = execSync(`bash -c '${script.replace(/'/g, "'\\''")}'`, {
|
||||
encoding: "utf-8",
|
||||
timeout: 5000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
});
|
||||
return result.trim();
|
||||
} catch (err: any) {
|
||||
return err.stdout?.trim() || "";
|
||||
}
|
||||
}
|
||||
|
||||
// ── validate_model_id ───────────────────────────────────────────────────
|
||||
|
||||
describe("validate_model_id", () => {
|
||||
describe("accepts valid model IDs", () => {
|
||||
const validModels = [
|
||||
"anthropic/claude-3.5-sonnet",
|
||||
"openai/gpt-4o",
|
||||
"google/gemini-pro-1.5",
|
||||
"meta-llama/llama-3.1-70b-instruct",
|
||||
"mistralai/mixtral-8x7b:free",
|
||||
"deepseek/deepseek-coder-v2",
|
||||
"cohere/command-r-plus",
|
||||
"nousresearch/hermes-3-llama-3.1-405b",
|
||||
"openrouter/auto",
|
||||
];
|
||||
|
||||
for (const model of validModels) {
|
||||
it(`should accept "${model}"`, () => {
|
||||
const result = runValidator("validate_model_id", model);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
it("should accept empty string (optional parameter)", () => {
|
||||
const result = runValidator("validate_model_id", "");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
describe("rejects injection attempts", () => {
|
||||
const malicious = [
|
||||
"model; rm -rf /",
|
||||
"model$(whoami)",
|
||||
"model`id`",
|
||||
'model"injection',
|
||||
"model'injection",
|
||||
"model|cat /etc/passwd",
|
||||
"model&background",
|
||||
"model<input",
|
||||
"model>output",
|
||||
"model\\escape",
|
||||
"model\nnewline",
|
||||
"model space",
|
||||
"model\ttab",
|
||||
];
|
||||
|
||||
for (const input of malicious) {
|
||||
it(`should reject "${input.replace(/\n/g, "\\n").replace(/\t/g, "\\t")}"`, () => {
|
||||
const result = runValidator("validate_model_id", input);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// ── validate_server_name ────────────────────────────────────────────────
|
||||
|
||||
describe("validate_server_name", () => {
|
||||
describe("accepts valid server names", () => {
|
||||
const valid = [
|
||||
"spawn-claude-abc",
|
||||
"my-server-123",
|
||||
"test",
|
||||
"a".repeat(63), // max length
|
||||
"abc", // min length
|
||||
"ABC", // uppercase allowed
|
||||
"Server-Name-123",
|
||||
"123-numbers-first",
|
||||
];
|
||||
|
||||
for (const name of valid) {
|
||||
it(`should accept "${name.length > 30 ? name.substring(0, 27) + "..." : name}"`, () => {
|
||||
const result = runValidator("validate_server_name", name);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
describe("rejects invalid server names", () => {
|
||||
it("should reject empty string", () => {
|
||||
const result = runValidator("validate_server_name", "");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject name too short (2 chars)", () => {
|
||||
const result = runValidator("validate_server_name", "ab");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject name too long (64 chars)", () => {
|
||||
const result = runValidator("validate_server_name", "a".repeat(64));
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject name with leading dash", () => {
|
||||
const result = runValidator("validate_server_name", "-server");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject name with trailing dash", () => {
|
||||
const result = runValidator("validate_server_name", "server-");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject name with spaces", () => {
|
||||
const result = runValidator("validate_server_name", "my server");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject name with special characters", () => {
|
||||
const result = runValidator("validate_server_name", "server;rm");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject name with dots", () => {
|
||||
const result = runValidator("validate_server_name", "server.name");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject name with underscores", () => {
|
||||
const result = runValidator("validate_server_name", "server_name");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject name with shell metacharacters", () => {
|
||||
const result = runValidator("validate_server_name", "server$(id)");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
describe("boundary cases", () => {
|
||||
it("should accept exactly 3 characters", () => {
|
||||
const result = runValidator("validate_server_name", "abc");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should accept exactly 63 characters", () => {
|
||||
const result = runValidator("validate_server_name", "a".repeat(63));
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should reject exactly 2 characters", () => {
|
||||
const result = runValidator("validate_server_name", "ab");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject exactly 64 characters", () => {
|
||||
const result = runValidator("validate_server_name", "a".repeat(64));
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ── validate_api_token ──────────────────────────────────────────────────
|
||||
|
||||
describe("validate_api_token", () => {
|
||||
describe("accepts valid tokens", () => {
|
||||
const valid = [
|
||||
"sk-or-v1-abc123def456",
|
||||
"hcloud_abcdef1234567890",
|
||||
"dop_v1_abcdef1234567890abcdef",
|
||||
"ABCDEFGHIJKLMNOP",
|
||||
"simple-token-123",
|
||||
"token_with_underscores",
|
||||
"a".repeat(200), // long tokens OK
|
||||
"token.with.dots",
|
||||
"token=with=equals",
|
||||
"token+with+plus",
|
||||
"token/with/slashes",
|
||||
"token:with:colons",
|
||||
"token@with@at",
|
||||
"token~with~tilde",
|
||||
];
|
||||
|
||||
for (const token of valid) {
|
||||
it(`should accept "${token.length > 30 ? token.substring(0, 27) + "..." : token}"`, () => {
|
||||
const result = runValidator("validate_api_token", token);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
describe("rejects empty and injection tokens", () => {
|
||||
it("should reject empty string", () => {
|
||||
const result = runValidator("validate_api_token", "");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
expect(result.stderr).toContain("empty");
|
||||
});
|
||||
|
||||
const injectionTokens = [
|
||||
{ input: "token;rm -rf /", desc: "semicolon" },
|
||||
{ input: "token'injection", desc: "single quote" },
|
||||
{ input: 'token"injection', desc: "double quote" },
|
||||
{ input: "token<input", desc: "angle bracket <" },
|
||||
{ input: "token>output", desc: "angle bracket >" },
|
||||
{ input: "token|pipe", desc: "pipe" },
|
||||
{ input: "token&background", desc: "ampersand" },
|
||||
{ input: "token$VAR", desc: "dollar sign" },
|
||||
{ input: "token`id`", desc: "backtick" },
|
||||
{ input: "token\\escape", desc: "backslash" },
|
||||
{ input: "token(sub)", desc: "open paren" },
|
||||
{ input: "token)sub", desc: "close paren" },
|
||||
];
|
||||
|
||||
for (const { input, desc } of injectionTokens) {
|
||||
it(`should reject token with ${desc}`, () => {
|
||||
const result = runValidator("validate_api_token", input);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
expect(result.stderr).toContain("special characters");
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// ── validate_region_name ────────────────────────────────────────────────
|
||||
|
||||
describe("validate_region_name", () => {
|
||||
describe("accepts valid regions", () => {
|
||||
const valid = [
|
||||
"us-east-1",
|
||||
"eu-west-1",
|
||||
"ap-southeast-2",
|
||||
"fsn1",
|
||||
"nbg1-dc3",
|
||||
"nyc1",
|
||||
"sfo3",
|
||||
"lon1",
|
||||
"us_east_1",
|
||||
"EU-WEST",
|
||||
];
|
||||
|
||||
for (const region of valid) {
|
||||
it(`should accept "${region}"`, () => {
|
||||
const result = runValidator("validate_region_name", region);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
describe("rejects invalid regions", () => {
|
||||
it("should reject empty string", () => {
|
||||
const result = runValidator("validate_region_name", "");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject region with spaces", () => {
|
||||
const result = runValidator("validate_region_name", "us east");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject region with semicolons", () => {
|
||||
const result = runValidator("validate_region_name", "us;rm");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject region exceeding 63 characters", () => {
|
||||
const result = runValidator("validate_region_name", "a".repeat(64));
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should accept region at exactly 63 characters", () => {
|
||||
const result = runValidator("validate_region_name", "a".repeat(63));
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should accept single character region", () => {
|
||||
const result = runValidator("validate_region_name", "a");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ── validate_resource_name ──────────────────────────────────────────────
|
||||
|
||||
describe("validate_resource_name", () => {
|
||||
describe("accepts valid resource names", () => {
|
||||
const valid = [
|
||||
"cx21",
|
||||
"s-1vcpu-1gb",
|
||||
"vc2-1c-1gb",
|
||||
"g6-nanode-1",
|
||||
"e2-micro",
|
||||
"t3.micro",
|
||||
"Standard_B1s",
|
||||
"n1-standard-1",
|
||||
];
|
||||
|
||||
for (const name of valid) {
|
||||
it(`should accept "${name}"`, () => {
|
||||
const result = runValidator("validate_resource_name", name);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
describe("rejects invalid resource names", () => {
|
||||
it("should reject empty string", () => {
|
||||
const result = runValidator("validate_resource_name", "");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject name with spaces", () => {
|
||||
const result = runValidator("validate_resource_name", "my resource");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject name with shell metacharacters", () => {
|
||||
const result = runValidator("validate_resource_name", "cx21;rm");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should reject name exceeding 63 characters", () => {
|
||||
const result = runValidator("validate_resource_name", "a".repeat(64));
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should accept name at exactly 63 characters", () => {
|
||||
const result = runValidator("validate_resource_name", "a".repeat(63));
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should accept dots in name", () => {
|
||||
// validate_resource_name allows dots (unlike validate_server_name)
|
||||
const result = runValidator("validate_resource_name", "t3.micro");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// ── json_escape ─────────────────────────────────────────────────────────
|
||||
|
||||
describe("json_escape", () => {
|
||||
it("should escape a simple string", () => {
|
||||
const result = runJsonEscape("hello world");
|
||||
expect(result).toBe('"hello world"');
|
||||
});
|
||||
|
||||
it("should escape double quotes", () => {
|
||||
const result = runJsonEscape('say "hello"');
|
||||
expect(result).toBe('"say \\"hello\\""');
|
||||
});
|
||||
|
||||
it("should escape backslashes", () => {
|
||||
const result = runJsonEscape("path\\to\\file");
|
||||
expect(result).toBe('"path\\\\to\\\\file"');
|
||||
});
|
||||
|
||||
it("should escape newlines", () => {
|
||||
const result = runJsonEscape("line1\nline2");
|
||||
expect(result).toBe('"line1\\nline2"');
|
||||
});
|
||||
|
||||
it("should escape tabs", () => {
|
||||
const result = runJsonEscape("col1\tcol2");
|
||||
expect(result).toBe('"col1\\tcol2"');
|
||||
});
|
||||
|
||||
it("should handle empty string", () => {
|
||||
const result = runJsonEscape("");
|
||||
expect(result).toBe('""');
|
||||
});
|
||||
|
||||
it("should handle string with only special characters", () => {
|
||||
const result = runJsonEscape('"\\');
|
||||
const parsed = JSON.parse(result);
|
||||
expect(parsed).toBe('"\\');
|
||||
});
|
||||
|
||||
it("should produce valid JSON for various inputs", () => {
|
||||
const inputs = [
|
||||
"simple text",
|
||||
"text with 'single quotes'",
|
||||
'text with "double quotes"',
|
||||
"text with\nnewlines\nand\ttabs",
|
||||
"path/to/file",
|
||||
"special: !@#%^*(){}[]",
|
||||
"unicode: cafe\u0301",
|
||||
];
|
||||
|
||||
for (const input of inputs) {
|
||||
const result = runJsonEscape(input);
|
||||
// Should be parseable as valid JSON
|
||||
expect(() => JSON.parse(result)).not.toThrow();
|
||||
// Should round-trip back to the original string
|
||||
expect(JSON.parse(result)).toBe(input);
|
||||
}
|
||||
});
|
||||
|
||||
it("should handle SSH key content safely", () => {
|
||||
// This was a security finding: triple-quote injection in SSH keys
|
||||
const sshKey = 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ""" + __import__("os").system("id") + """';
|
||||
const result = runJsonEscape(sshKey);
|
||||
expect(() => JSON.parse(result)).not.toThrow();
|
||||
expect(JSON.parse(result)).toBe(sshKey);
|
||||
});
|
||||
|
||||
it("should handle long strings", () => {
|
||||
const longStr = "a".repeat(10000);
|
||||
const result = runJsonEscape(longStr);
|
||||
expect(() => JSON.parse(result)).not.toThrow();
|
||||
expect(JSON.parse(result)).toBe(longStr);
|
||||
});
|
||||
});
|
||||
|
||||
// ── Cross-function security: combined validation ────────────────────────
|
||||
|
||||
describe("combined security validation", () => {
|
||||
describe("common injection patterns blocked by all validators", () => {
|
||||
const injections = [
|
||||
"$(whoami)",
|
||||
"`id`",
|
||||
"; cat /etc/passwd",
|
||||
];
|
||||
|
||||
const validators = [
|
||||
"validate_server_name",
|
||||
"validate_api_token",
|
||||
];
|
||||
|
||||
for (const validator of validators) {
|
||||
for (const injection of injections) {
|
||||
it(`${validator} should block "${injection}"`, () => {
|
||||
const result = runValidator(validator, injection);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
describe("validate_model_id blocks non-alphanumeric patterns", () => {
|
||||
const injections = [
|
||||
"model; rm -rf /",
|
||||
"model$(whoami)",
|
||||
"model`id`",
|
||||
"model | cat /etc/passwd",
|
||||
];
|
||||
|
||||
for (const injection of injections) {
|
||||
it(`should block "${injection}"`, () => {
|
||||
const result = runValidator("validate_model_id", injection);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
|
|
@ -1,875 +0,0 @@
|
|||
import { describe, it, expect } from "bun:test";
|
||||
import { execSync } from "child_process";
|
||||
import { resolve, join } from "path";
|
||||
import { mkdirSync, writeFileSync, rmSync, chmodSync } from "fs";
|
||||
import { tmpdir } from "os";
|
||||
|
||||
/**
|
||||
* Tests for shared/github-auth.sh — standalone GitHub auth helper.
|
||||
*
|
||||
* This file was merged in PR #824 and has zero test coverage.
|
||||
* Tests cover:
|
||||
* - ensure_gh_cli: gh CLI detection and installation dispatch
|
||||
* - _install_gh_binary: OS/arch detection and binary fallback paths
|
||||
* - ensure_gh_auth: authentication via GITHUB_TOKEN and gh auth status
|
||||
* - ensure_github_auth: combined wrapper
|
||||
* - Fallback log functions when common.sh is unavailable
|
||||
* - Direct execution mode (BASH_SOURCE == $0)
|
||||
* - Source pattern and curl|bash fallback
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const REPO_ROOT = resolve(import.meta.dir, "../../..");
|
||||
const GITHUB_AUTH_SH = resolve(REPO_ROOT, "shared/github-auth.sh");
|
||||
const COMMON_SH = resolve(REPO_ROOT, "shared/common.sh");
|
||||
|
||||
/**
|
||||
* Run a bash snippet that sources github-auth.sh.
|
||||
* Mocks are applied via PATH manipulation and function overrides.
|
||||
*/
|
||||
function runBash(
|
||||
script: string,
|
||||
opts?: { env?: Record<string, string>; timeout?: number }
|
||||
): { exitCode: number; stdout: string; stderr: string } {
|
||||
const fullScript = `source "${GITHUB_AUTH_SH}"\n${script}`;
|
||||
const envVars = { ...process.env, ...opts?.env };
|
||||
try {
|
||||
const stdout = execSync(`bash -c '${fullScript.replace(/'/g, "'\\''")}'`, {
|
||||
encoding: "utf-8",
|
||||
timeout: opts?.timeout ?? 10000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
env: envVars,
|
||||
});
|
||||
return { exitCode: 0, stdout: stdout.trim(), stderr: "" };
|
||||
} catch (err: any) {
|
||||
return {
|
||||
exitCode: err.status ?? 1,
|
||||
stdout: (err.stdout || "").trim(),
|
||||
stderr: (err.stderr || "").trim(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Run a bash snippet without sourcing github-auth.sh first.
|
||||
*/
|
||||
function runRawBash(
|
||||
script: string,
|
||||
opts?: { env?: Record<string, string>; timeout?: number }
|
||||
): { exitCode: number; stdout: string; stderr: string } {
|
||||
const envVars = { ...process.env, ...opts?.env };
|
||||
try {
|
||||
const stdout = execSync(`bash -c '${fullEscape(script)}'`, {
|
||||
encoding: "utf-8",
|
||||
timeout: opts?.timeout ?? 10000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
env: envVars,
|
||||
});
|
||||
return { exitCode: 0, stdout: stdout.trim(), stderr: "" };
|
||||
} catch (err: any) {
|
||||
return {
|
||||
exitCode: err.status ?? 1,
|
||||
stdout: (err.stdout || "").trim(),
|
||||
stderr: (err.stderr || "").trim(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
function fullEscape(s: string): string {
|
||||
return s.replace(/'/g, "'\\''");
|
||||
}
|
||||
|
||||
function createTempDir(): string {
|
||||
const dir = join(
|
||||
tmpdir(),
|
||||
`spawn-test-${Date.now()}-${Math.random().toString(36).slice(2)}`
|
||||
);
|
||||
mkdirSync(dir, { recursive: true });
|
||||
return dir;
|
||||
}
|
||||
|
||||
// ── Source Pattern ───────────────────────────────────────────────────────
|
||||
|
||||
describe("shared/github-auth.sh source pattern", () => {
|
||||
it("should pass bash syntax check", () => {
|
||||
const result = runRawBash(`bash -n "${GITHUB_AUTH_SH}"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should source shared/common.sh and make log functions available", () => {
|
||||
const result = runBash("type log_info");
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("function");
|
||||
});
|
||||
|
||||
it("should make ensure_gh_cli available after sourcing", () => {
|
||||
const result = runBash("type ensure_gh_cli");
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("function");
|
||||
});
|
||||
|
||||
it("should make ensure_gh_auth available after sourcing", () => {
|
||||
const result = runBash("type ensure_gh_auth");
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("function");
|
||||
});
|
||||
|
||||
it("should make ensure_github_auth available after sourcing", () => {
|
||||
const result = runBash("type ensure_github_auth");
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("function");
|
||||
});
|
||||
|
||||
it("should make _install_gh_binary available after sourcing", () => {
|
||||
const result = runBash("type _install_gh_binary");
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("function");
|
||||
});
|
||||
|
||||
it("should have log_step available (from common.sh)", () => {
|
||||
const result = runBash("type log_step");
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("function");
|
||||
});
|
||||
|
||||
it("should have log_warn available (from common.sh)", () => {
|
||||
const result = runBash("type log_warn");
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("function");
|
||||
});
|
||||
|
||||
it("should have log_error available (from common.sh)", () => {
|
||||
const result = runBash("type log_error");
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("function");
|
||||
});
|
||||
});
|
||||
|
||||
// ── Fallback Log Functions ──────────────────────────────────────────────
|
||||
|
||||
describe("fallback log functions", () => {
|
||||
it("should define fallback log_info if common.sh loading is bypassed", () => {
|
||||
// Simulate common.sh failing to load by overriding SCRIPT_DIR
|
||||
// and checking that the fallback definitions are set up
|
||||
const script = `
|
||||
# Unset log functions to simulate common.sh failing
|
||||
unset -f log_info log_step log_warn log_error 2>/dev/null
|
||||
# Re-evaluate the fallback block from the script
|
||||
if ! type log_info &>/dev/null 2>&1; then
|
||||
log_info() { printf '[github-auth] %s\\n' "$*" >&2; }
|
||||
log_step() { printf '[github-auth] %s\\n' "$*" >&2; }
|
||||
log_warn() { printf '[github-auth] WARNING: %s\\n' "$*" >&2; }
|
||||
log_error() { printf '[github-auth] ERROR: %s\\n' "$*" >&2; }
|
||||
fi
|
||||
log_info "test message" 2>&1
|
||||
`;
|
||||
const result = runRawBash(script);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("[github-auth] test message");
|
||||
});
|
||||
|
||||
it("should prefix fallback log_warn with WARNING:", () => {
|
||||
const script = `
|
||||
unset -f log_info log_step log_warn log_error 2>/dev/null
|
||||
log_warn() { printf '[github-auth] WARNING: %s\\n' "$*" >&2; }
|
||||
log_warn "something wrong" 2>&1
|
||||
`;
|
||||
const result = runRawBash(script);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("[github-auth] WARNING: something wrong");
|
||||
});
|
||||
|
||||
it("should prefix fallback log_error with ERROR:", () => {
|
||||
const script = `
|
||||
unset -f log_info log_step log_warn log_error 2>/dev/null
|
||||
log_error() { printf '[github-auth] ERROR: %s\\n' "$*" >&2; }
|
||||
log_error "fatal problem" 2>&1
|
||||
`;
|
||||
const result = runRawBash(script);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("[github-auth] ERROR: fatal problem");
|
||||
});
|
||||
});
|
||||
|
||||
// ── ensure_gh_cli ───────────────────────────────────────────────────────
|
||||
|
||||
describe("ensure_gh_cli", () => {
|
||||
it("should succeed when gh is already on PATH", () => {
|
||||
// Redirect stderr to stdout so we can capture log messages
|
||||
const result = runBash("ensure_gh_cli 2>&1");
|
||||
// It should succeed since gh is installed in CI/dev
|
||||
if (result.exitCode === 0) {
|
||||
expect(result.stdout).toContain("gh");
|
||||
}
|
||||
// If gh is not installed, the function would try to install
|
||||
// Either way it should not hang
|
||||
});
|
||||
|
||||
it("should report gh version when already installed", () => {
|
||||
const result = runBash(`
|
||||
# Mock command -v to always succeed for gh, and gh --version
|
||||
gh() { if [[ "$1" == "--version" ]]; then echo "gh version 2.50.0 (2024-05-01)"; fi; }
|
||||
export -f gh
|
||||
command() { if [[ "$1" == "-v" && "$2" == "gh" ]]; then return 0; fi; builtin command "$@"; }
|
||||
ensure_gh_cli 2>&1
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should try homebrew on macOS when gh is missing", () => {
|
||||
// Test the macOS code path by checking the function body
|
||||
const result = runBash(`
|
||||
# Check that the function references brew install gh
|
||||
type ensure_gh_cli | grep -q "brew install gh"
|
||||
echo "found_brew_path"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("found_brew_path");
|
||||
});
|
||||
|
||||
it("should try apt-get on Debian/Ubuntu when gh is missing", () => {
|
||||
const result = runBash(`
|
||||
type ensure_gh_cli | grep -q "apt-get"
|
||||
echo "found_apt_path"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("found_apt_path");
|
||||
});
|
||||
|
||||
it("should try dnf on Fedora/RHEL when gh is missing", () => {
|
||||
const result = runBash(`
|
||||
type ensure_gh_cli | grep -q "dnf"
|
||||
echo "found_dnf_path"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("found_dnf_path");
|
||||
});
|
||||
|
||||
it("should fall back to binary installer on unknown systems", () => {
|
||||
const result = runBash(`
|
||||
type ensure_gh_cli | grep -q "_install_gh_binary"
|
||||
echo "found_binary_fallback"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("found_binary_fallback");
|
||||
});
|
||||
|
||||
it("should fail if gh not found after installation attempt", () => {
|
||||
const result = runBash(`
|
||||
# Set OSTYPE to linux
|
||||
export OSTYPE="linux-gnu"
|
||||
# Override command -v to fail for gh, apt-get, dnf
|
||||
command() {
|
||||
if [[ "$1" == "-v" ]]; then
|
||||
case "$2" in
|
||||
gh|apt-get|dnf) return 1 ;;
|
||||
esac
|
||||
fi
|
||||
builtin command "$@"
|
||||
}
|
||||
# Override _install_gh_binary to fail with an error
|
||||
_install_gh_binary() {
|
||||
log_error "Failed to install gh"
|
||||
return 1
|
||||
}
|
||||
ensure_gh_cli 2>&1
|
||||
`);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
expect(result.stdout + result.stderr).toContain("Failed to install");
|
||||
});
|
||||
});
|
||||
|
||||
// ── _install_gh_binary ──────────────────────────────────────────────────
|
||||
|
||||
describe("_install_gh_binary", () => {
|
||||
it("should detect Linux as gh_os=linux", () => {
|
||||
const result = runBash(`
|
||||
# Override uname to return Linux
|
||||
uname() {
|
||||
if [[ "$1" == "-s" ]]; then echo "Linux";
|
||||
elif [[ "$1" == "-m" ]]; then echo "x86_64";
|
||||
fi
|
||||
}
|
||||
# Override curl to avoid network calls
|
||||
curl() { echo '{"tag_name": "v2.50.0"}'; }
|
||||
# Check the case statement logic by sourcing and inspecting
|
||||
type _install_gh_binary | grep -q 'linux'
|
||||
echo "linux_detected"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("linux_detected");
|
||||
});
|
||||
|
||||
it("should detect Darwin as gh_os=macOS", () => {
|
||||
const result = runBash(`
|
||||
type _install_gh_binary | grep -q 'macOS'
|
||||
echo "macos_detected"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("macos_detected");
|
||||
});
|
||||
|
||||
it("should map x86_64 to amd64", () => {
|
||||
const result = runBash(`
|
||||
type _install_gh_binary | grep -q 'amd64'
|
||||
echo "amd64_mapped"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("amd64_mapped");
|
||||
});
|
||||
|
||||
it("should map aarch64 to arm64", () => {
|
||||
const result = runBash(`
|
||||
type _install_gh_binary | grep -q 'arm64'
|
||||
echo "arm64_mapped"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("arm64_mapped");
|
||||
});
|
||||
|
||||
it("should fail for unsupported OS", () => {
|
||||
const result = runBash(`
|
||||
uname() {
|
||||
if [[ "$1" == "-s" ]]; then echo "FreeBSD";
|
||||
elif [[ "$1" == "-m" ]]; then echo "x86_64";
|
||||
fi
|
||||
}
|
||||
_install_gh_binary 2>&1
|
||||
`);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
expect(result.stdout + result.stderr).toContain("Unsupported OS");
|
||||
});
|
||||
|
||||
it("should fail for unsupported architecture", () => {
|
||||
const result = runBash(`
|
||||
uname() {
|
||||
if [[ "$1" == "-s" ]]; then echo "Linux";
|
||||
elif [[ "$1" == "-m" ]]; then echo "mips64";
|
||||
fi
|
||||
}
|
||||
_install_gh_binary 2>&1
|
||||
`);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
expect(result.stdout + result.stderr).toContain("Unsupported architecture");
|
||||
});
|
||||
|
||||
it("should fail when version fetch returns empty", () => {
|
||||
const result = runBash(`
|
||||
uname() {
|
||||
if [[ "$1" == "-s" ]]; then echo "Linux";
|
||||
elif [[ "$1" == "-m" ]]; then echo "x86_64";
|
||||
fi
|
||||
}
|
||||
# Mock curl to return empty/bad JSON
|
||||
curl() { echo '{}'; return 0; }
|
||||
_install_gh_binary 2>&1
|
||||
`);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
expect(result.stdout + result.stderr).toContain("version");
|
||||
});
|
||||
|
||||
it("should install to ~/.local/bin", () => {
|
||||
const result = runBash(`
|
||||
type _install_gh_binary | grep -q '.local/bin'
|
||||
echo "local_bin_found"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("local_bin_found");
|
||||
});
|
||||
|
||||
it("should add ~/.local/bin to PATH if not already there", () => {
|
||||
const result = runBash(`
|
||||
type _install_gh_binary | grep -q 'export PATH'
|
||||
echo "path_export_found"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("path_export_found");
|
||||
});
|
||||
|
||||
it("should use GitHub releases API URL", () => {
|
||||
const result = runBash(`
|
||||
type _install_gh_binary | grep -q 'api.github.com/repos/cli/cli/releases/latest'
|
||||
echo "api_url_found"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("api_url_found");
|
||||
});
|
||||
|
||||
it("should clean up temp dir on download failure", () => {
|
||||
const result = runBash(`
|
||||
type _install_gh_binary | grep -q 'rm -rf'
|
||||
echo "cleanup_found"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("cleanup_found");
|
||||
});
|
||||
});
|
||||
|
||||
// ── ensure_gh_auth ──────────────────────────────────────────────────────
|
||||
|
||||
describe("ensure_gh_auth", () => {
|
||||
it("should succeed when gh auth status passes", () => {
|
||||
const result = runBash(`
|
||||
# Mock gh to succeed on auth status
|
||||
gh() {
|
||||
if [[ "$1" == "auth" && "$2" == "status" ]]; then return 0; fi
|
||||
return 1
|
||||
}
|
||||
ensure_gh_auth 2>&1
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout + result.stderr).toContain("Authenticated");
|
||||
});
|
||||
|
||||
it("should use GITHUB_TOKEN when set and auth status fails", () => {
|
||||
const result = runBash(`
|
||||
export GITHUB_TOKEN="ghp_test123"
|
||||
auth_attempted=0
|
||||
# Mock gh
|
||||
gh() {
|
||||
if [[ "$1" == "auth" && "$2" == "status" ]]; then
|
||||
if [[ "$auth_attempted" == "0" ]]; then
|
||||
auth_attempted=1
|
||||
return 1 # First check fails
|
||||
fi
|
||||
return 0 # After login, status succeeds
|
||||
fi
|
||||
if [[ "$1" == "auth" && "$2" == "login" && "$3" == "--with-token" ]]; then
|
||||
read token
|
||||
if [[ "$token" == "ghp_test123" ]]; then
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
ensure_gh_auth 2>&1
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should fail when GITHUB_TOKEN auth fails", () => {
|
||||
const result = runBash(`
|
||||
export GITHUB_TOKEN="ghp_badtoken"
|
||||
# Mock gh
|
||||
gh() {
|
||||
if [[ "$1" == "auth" && "$2" == "status" ]]; then return 1; fi
|
||||
if [[ "$1" == "auth" && "$2" == "login" && "$3" == "--with-token" ]]; then
|
||||
read token # consume stdin
|
||||
return 1
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
ensure_gh_auth 2>&1
|
||||
`);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
expect(result.stdout + result.stderr).toContain("Failed to authenticate");
|
||||
});
|
||||
|
||||
it("should attempt interactive login when GITHUB_TOKEN is not set", () => {
|
||||
const result = runBash(`
|
||||
unset GITHUB_TOKEN
|
||||
# Mock gh
|
||||
login_called=0
|
||||
gh() {
|
||||
if [[ "$1" == "auth" && "$2" == "status" ]]; then
|
||||
if [[ "$login_called" == "0" ]]; then return 1; fi
|
||||
return 0
|
||||
fi
|
||||
if [[ "$1" == "auth" && "$2" == "login" ]]; then
|
||||
login_called=1
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
ensure_gh_auth 2>&1
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should fail when interactive login fails", () => {
|
||||
const result = runBash(`
|
||||
unset GITHUB_TOKEN
|
||||
# Mock gh
|
||||
gh() {
|
||||
if [[ "$1" == "auth" && "$2" == "status" ]]; then return 1; fi
|
||||
if [[ "$1" == "auth" && "$2" == "login" ]]; then return 1; fi
|
||||
return 1
|
||||
}
|
||||
ensure_gh_auth 2>&1
|
||||
`);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
expect(result.stdout + result.stderr).toContain("authentication failed");
|
||||
});
|
||||
|
||||
it("should fail when post-login auth status check fails", () => {
|
||||
const result = runBash(`
|
||||
unset GITHUB_TOKEN
|
||||
# Mock gh - login succeeds but status always fails
|
||||
gh() {
|
||||
if [[ "$1" == "auth" && "$2" == "status" ]]; then return 1; fi
|
||||
if [[ "$1" == "auth" && "$2" == "login" ]]; then return 0; fi
|
||||
return 1
|
||||
}
|
||||
ensure_gh_auth 2>&1
|
||||
`);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
expect(result.stdout + result.stderr).toContain("auth status check failed");
|
||||
});
|
||||
|
||||
it("should log step message when not authenticated", () => {
|
||||
const result = runBash(`
|
||||
export GITHUB_TOKEN="ghp_good"
|
||||
call_count=0
|
||||
gh() {
|
||||
if [[ "$1" == "auth" && "$2" == "status" ]]; then
|
||||
call_count=$((call_count + 1))
|
||||
if [[ "$call_count" -le 1 ]]; then return 1; fi
|
||||
return 0
|
||||
fi
|
||||
if [[ "$1" == "auth" && "$2" == "login" ]]; then
|
||||
read token
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
ensure_gh_auth 2>&1
|
||||
`);
|
||||
expect(result.stdout + result.stderr).toContain("Persisting GITHUB_TOKEN");
|
||||
});
|
||||
|
||||
it("should mention GITHUB_TOKEN in log when using token auth", () => {
|
||||
const result = runBash(`
|
||||
export GITHUB_TOKEN="ghp_token123"
|
||||
call_count=0
|
||||
gh() {
|
||||
if [[ "$1" == "auth" && "$2" == "status" ]]; then
|
||||
call_count=$((call_count + 1))
|
||||
if [[ "$call_count" -le 1 ]]; then return 1; fi
|
||||
return 0
|
||||
fi
|
||||
if [[ "$1" == "auth" && "$2" == "login" && "$3" == "--with-token" ]]; then
|
||||
read token
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
ensure_gh_auth 2>&1
|
||||
`);
|
||||
expect(result.stdout + result.stderr).toContain("GITHUB_TOKEN");
|
||||
});
|
||||
});
|
||||
|
||||
// ── ensure_github_auth (combined wrapper) ───────────────────────────────
|
||||
|
||||
describe("ensure_github_auth", () => {
|
||||
it("should call ensure_gh_cli then ensure_gh_auth", () => {
|
||||
const result = runBash(`
|
||||
# Mock gh to be available and authenticated
|
||||
gh() {
|
||||
if [[ "$1" == "--version" ]]; then echo "gh version 2.50.0"; return 0; fi
|
||||
if [[ "$1" == "auth" && "$2" == "status" ]]; then return 0; fi
|
||||
return 1
|
||||
}
|
||||
command() {
|
||||
if [[ "$1" == "-v" && "$2" == "gh" ]]; then return 0; fi
|
||||
builtin command "$@"
|
||||
}
|
||||
ensure_github_auth 2>&1
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should fail if ensure_gh_cli fails", () => {
|
||||
const result = runBash(`
|
||||
# Override ensure_gh_cli to fail
|
||||
ensure_gh_cli() { return 1; }
|
||||
ensure_github_auth 2>&1
|
||||
`);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should fail if ensure_gh_auth fails after ensure_gh_cli succeeds", () => {
|
||||
const result = runBash(`
|
||||
# Override ensure_gh_cli to succeed
|
||||
ensure_gh_cli() { return 0; }
|
||||
# Override ensure_gh_auth to fail
|
||||
ensure_gh_auth() { return 1; }
|
||||
ensure_github_auth 2>&1
|
||||
`);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should succeed when both steps succeed", () => {
|
||||
const result = runBash(`
|
||||
ensure_gh_cli() { return 0; }
|
||||
ensure_gh_auth() { return 0; }
|
||||
ensure_github_auth 2>&1
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ── Direct Execution Mode ───────────────────────────────────────────────
|
||||
|
||||
describe("direct execution mode", () => {
|
||||
it("should run ensure_github_auth when executed directly", () => {
|
||||
// The script has a check: if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
|
||||
// When executed directly, it should set -eo pipefail and call ensure_github_auth
|
||||
const result = runBash(`
|
||||
# Check that the direct execution block exists
|
||||
grep -q 'BASH_SOURCE\\[0\\].*==.*\\$.*0' "${GITHUB_AUTH_SH}"
|
||||
echo "direct_exec_check_found"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("direct_exec_check_found");
|
||||
});
|
||||
|
||||
it("should set -eo pipefail in direct execution mode", () => {
|
||||
const result = runBash(`
|
||||
grep -q 'set -eo pipefail' "${GITHUB_AUTH_SH}"
|
||||
echo "pipefail_found"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("pipefail_found");
|
||||
});
|
||||
});
|
||||
|
||||
// ── Script Structure and Conventions ────────────────────────────────────
|
||||
|
||||
describe("script structure and conventions", () => {
|
||||
it("should start with bash shebang", () => {
|
||||
const result = runRawBash(`head -1 "${GITHUB_AUTH_SH}"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("#!/bin/bash");
|
||||
});
|
||||
|
||||
it("should source shared/common.sh with local-or-remote fallback", () => {
|
||||
const result = runRawBash(`
|
||||
grep -q 'source.*common.sh' "${GITHUB_AUTH_SH}" && \
|
||||
grep -q 'curl.*common.sh' "${GITHUB_AUTH_SH}" && \
|
||||
echo "fallback_pattern_found"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("fallback_pattern_found");
|
||||
});
|
||||
|
||||
it("should use SCRIPT_DIR for path resolution", () => {
|
||||
const result = runRawBash(`grep -q 'SCRIPT_DIR=' "${GITHUB_AUTH_SH}" && echo "found"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("found");
|
||||
});
|
||||
|
||||
it("should use raw.githubusercontent.com for remote fallback", () => {
|
||||
const result = runRawBash(
|
||||
`grep -q 'raw.githubusercontent.com/OpenRouterTeam/spawn/main' "${GITHUB_AUTH_SH}" && echo "found"`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("found");
|
||||
});
|
||||
|
||||
it("should not use echo -e (macOS bash 3.x compat)", () => {
|
||||
const result = runRawBash(`grep -n 'echo -e' "${GITHUB_AUTH_SH}" | wc -l`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout.trim()).toBe("0");
|
||||
});
|
||||
|
||||
it("should not use source <(cmd) pattern (macOS compat)", () => {
|
||||
const result = runRawBash(`grep -n 'source <(' "${GITHUB_AUTH_SH}" | wc -l`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout.trim()).toBe("0");
|
||||
});
|
||||
|
||||
it("should use printf instead of echo -e for formatted output", () => {
|
||||
const result = runRawBash(`grep -c 'printf' "${GITHUB_AUTH_SH}"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
const count = parseInt(result.stdout.trim(), 10);
|
||||
expect(count).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it("should not use set -u (nounset) flag", () => {
|
||||
const result = runRawBash(`grep -n 'set.*-.*u' "${GITHUB_AUTH_SH}" | grep -v 'set -eo' | wc -l`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
// Should have 0 lines with set -u (the set -eo pipefail line is fine)
|
||||
expect(result.stdout.trim()).toBe("0");
|
||||
});
|
||||
|
||||
it("should use ${VAR:-} for optional env var checks", () => {
|
||||
// Check GITHUB_TOKEN is accessed safely
|
||||
const result = runRawBash(`grep -q 'GITHUB_TOKEN:-' "${GITHUB_AUTH_SH}" && echo "safe"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("safe");
|
||||
});
|
||||
|
||||
it("should use ${SCRIPT_DIR:-} for optional SCRIPT_DIR check", () => {
|
||||
const result = runRawBash(`grep -q 'SCRIPT_DIR:-' "${GITHUB_AUTH_SH}" && echo "safe"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("safe");
|
||||
});
|
||||
});
|
||||
|
||||
// ── ensure_gh_cli installation paths ────────────────────────────────────
|
||||
|
||||
describe("ensure_gh_cli installation paths", () => {
|
||||
it("should check OSTYPE for macOS detection", () => {
|
||||
const result = runRawBash(`grep -q 'OSTYPE.*darwin' "${GITHUB_AUTH_SH}" && echo "found"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("found");
|
||||
});
|
||||
|
||||
it("should check OSTYPE for linux detection", () => {
|
||||
const result = runRawBash(`grep -q 'OSTYPE.*linux-gnu' "${GITHUB_AUTH_SH}" && echo "found"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("found");
|
||||
});
|
||||
|
||||
it("should add GitHub CLI APT repository on Debian/Ubuntu", () => {
|
||||
const result = runRawBash(
|
||||
`grep -q 'githubcli-archive-keyring.gpg' "${GITHUB_AUTH_SH}" && echo "found"`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("found");
|
||||
});
|
||||
|
||||
it("should handle Homebrew not being installed on macOS", () => {
|
||||
const result = runRawBash(
|
||||
`grep -q 'Homebrew not found' "${GITHUB_AUTH_SH}" && echo "found"`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("found");
|
||||
});
|
||||
});
|
||||
|
||||
// ── Error handling edge cases ───────────────────────────────────────────
|
||||
|
||||
describe("error handling edge cases", () => {
|
||||
it("should return 1 from ensure_gh_cli when brew install fails", () => {
|
||||
const result = runBash(`
|
||||
type ensure_gh_cli | grep -q 'return 1'
|
||||
echo "return_1_found"
|
||||
`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("return_1_found");
|
||||
});
|
||||
|
||||
it("should provide helpful error message when installation fails on macOS without brew", () => {
|
||||
const result = runRawBash(
|
||||
`grep -q 'Install Homebrew first' "${GITHUB_AUTH_SH}" && echo "found"`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("found");
|
||||
});
|
||||
|
||||
it("should suggest manual install URL for unsupported platforms", () => {
|
||||
const result = runRawBash(
|
||||
`grep -q 'cli.github.com' "${GITHUB_AUTH_SH}" && echo "found"`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("found");
|
||||
});
|
||||
|
||||
it("should suggest gh auth login in error message on auth failure", () => {
|
||||
const result = runRawBash(
|
||||
`grep -q 'gh auth login' "${GITHUB_AUTH_SH}" && echo "found"`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("found");
|
||||
});
|
||||
|
||||
it("should handle _install_gh_binary curl failure for version fetch", () => {
|
||||
const result = runBash(`
|
||||
uname() {
|
||||
if [[ "$1" == "-s" ]]; then echo "Linux";
|
||||
elif [[ "$1" == "-m" ]]; then echo "x86_64";
|
||||
fi
|
||||
}
|
||||
curl() { return 1; }
|
||||
_install_gh_binary 2>&1
|
||||
`);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should handle _install_gh_binary tarball download failure", () => {
|
||||
const result = runBash(`
|
||||
uname() {
|
||||
if [[ "$1" == "-s" ]]; then echo "Linux";
|
||||
elif [[ "$1" == "-m" ]]; then echo "x86_64";
|
||||
fi
|
||||
}
|
||||
call_count=0
|
||||
curl() {
|
||||
call_count=$((call_count + 1))
|
||||
if [[ "$call_count" -eq 1 ]]; then
|
||||
# First call: version fetch succeeds
|
||||
echo '"tag_name": "v2.50.0"'
|
||||
return 0
|
||||
fi
|
||||
# Second call: download fails
|
||||
return 1
|
||||
}
|
||||
_install_gh_binary 2>&1
|
||||
`);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should handle _install_gh_binary tar extraction failure", () => {
|
||||
const tmpDir = createTempDir();
|
||||
const fakeTarball = join(tmpDir, "fake.tar.gz");
|
||||
writeFileSync(fakeTarball, "not a real tarball");
|
||||
|
||||
const result = runBash(`
|
||||
uname() {
|
||||
if [[ "$1" == "-s" ]]; then echo "Linux";
|
||||
elif [[ "$1" == "-m" ]]; then echo "x86_64";
|
||||
fi
|
||||
}
|
||||
call_count=0
|
||||
curl() {
|
||||
call_count=$((call_count + 1))
|
||||
if [[ "$1" == "-fsSL" && "$2" =~ api.github.com ]]; then
|
||||
echo '"tag_name": "v2.50.0"'
|
||||
return 0
|
||||
fi
|
||||
# Download: write garbage to the output file
|
||||
local outfile=""
|
||||
while [[ $# -gt 0 ]]; do
|
||||
if [[ "$1" == "-o" ]]; then outfile="$2"; shift; shift; continue; fi
|
||||
shift
|
||||
done
|
||||
if [[ -n "$outfile" ]]; then
|
||||
echo "not a tarball" > "$outfile"
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
_install_gh_binary 2>&1
|
||||
`);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
|
||||
rmSync(tmpDir, { recursive: true, force: true });
|
||||
});
|
||||
});
|
||||
|
||||
// ── GITHUB_TOKEN piping security ────────────────────────────────────────
|
||||
|
||||
describe("GITHUB_TOKEN handling security", () => {
|
||||
it("should pipe token via printf (not command line arg)", () => {
|
||||
// The script uses: printf '%s\n' "${_gh_token}" | gh auth login --with-token
|
||||
// (_gh_token is a local copy of GITHUB_TOKEN, used after unsetting the env var)
|
||||
// This avoids exposing the token in process args
|
||||
const result = runRawBash(
|
||||
`grep -q "printf.*_gh_token.*gh auth login --with-token" "${GITHUB_AUTH_SH}" && echo "piped"`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("piped");
|
||||
});
|
||||
|
||||
it("should not pass GITHUB_TOKEN as a command line argument", () => {
|
||||
// Ensure the token is never directly on the gh command line
|
||||
const result = runRawBash(
|
||||
`grep 'gh auth login.*GITHUB_TOKEN' "${GITHUB_AUTH_SH}" | wc -l`
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout.trim()).toBe("0");
|
||||
});
|
||||
});
|
||||
|
|
@ -1,776 +0,0 @@
|
|||
import { describe, it, expect, beforeEach, afterEach } from "bun:test";
|
||||
import { execSync } from "child_process";
|
||||
import { resolve, join } from "path";
|
||||
import { mkdirSync, writeFileSync, rmSync, existsSync, readFileSync, chmodSync } from "fs";
|
||||
import { tmpdir } from "os";
|
||||
|
||||
/**
|
||||
* Tests for shared/key-request.sh — credential loading and key server helpers.
|
||||
*
|
||||
* This file has zero existing test coverage. It provides:
|
||||
* - get_cloud_env_vars: Extract env var names for a cloud from manifest.json
|
||||
* - _parse_cloud_auths: Parse manifest for cloud auth specs (cloud_key|auth_string)
|
||||
* - _try_load_env_var: Load a single env var from a JSON config file
|
||||
* - _load_cloud_credentials: Load all env vars for one cloud provider
|
||||
* - load_cloud_keys_from_config: Full credential loader from ~/.config/spawn/
|
||||
* - request_missing_cloud_keys: Fire-and-forget POST to key server
|
||||
* - invalidate_cloud_key: Delete a cloud's config file (with path traversal guard)
|
||||
*
|
||||
* Each test sources shared/key-request.sh in a real bash subprocess.
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const REPO_ROOT = resolve(import.meta.dir, "../../..");
|
||||
const KEY_REQUEST_SH = resolve(REPO_ROOT, "shared/key-request.sh");
|
||||
|
||||
// ── Test helpers ────────────────────────────────────────────────────────────
|
||||
|
||||
let testDir: string;
|
||||
|
||||
beforeEach(() => {
|
||||
testDir = join(tmpdir(), `spawn-keyreq-test-${Date.now()}-${Math.random().toString(36).slice(2)}`);
|
||||
mkdirSync(testDir, { recursive: true });
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (existsSync(testDir)) {
|
||||
rmSync(testDir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
/** Create a minimal manifest.json for testing */
|
||||
function createTestManifest(clouds: Record<string, { auth: string }>): string {
|
||||
const manifest: any = {
|
||||
agents: { claude: { name: "Claude", description: "test", url: "", install: "", launch: "", env: {} } },
|
||||
clouds: {} as any,
|
||||
matrix: {},
|
||||
};
|
||||
for (const [key, def] of Object.entries(clouds)) {
|
||||
manifest.clouds[key] = {
|
||||
name: key,
|
||||
description: "test",
|
||||
url: "",
|
||||
type: "vm",
|
||||
auth: def.auth,
|
||||
provision_method: "api",
|
||||
exec_method: "ssh",
|
||||
interactive_method: "ssh",
|
||||
};
|
||||
}
|
||||
const path = join(testDir, "manifest.json");
|
||||
writeFileSync(path, JSON.stringify(manifest, null, 2));
|
||||
return path;
|
||||
}
|
||||
|
||||
/** Run a bash snippet that sources key-request.sh. */
|
||||
function runBash(
|
||||
script: string,
|
||||
env?: Record<string, string>,
|
||||
): { exitCode: number; stdout: string; stderr: string } {
|
||||
const fullScript = `source "${KEY_REQUEST_SH}"\n${script}`;
|
||||
const mergedEnv = {
|
||||
...process.env,
|
||||
HOME: testDir,
|
||||
REPO_ROOT: testDir,
|
||||
...(env || {}),
|
||||
};
|
||||
try {
|
||||
const stdout = execSync(`bash -c '${fullScript.replace(/'/g, "'\\''")}'`, {
|
||||
encoding: "utf-8",
|
||||
timeout: 10000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
env: mergedEnv,
|
||||
});
|
||||
return { exitCode: 0, stdout: stdout.trim(), stderr: "" };
|
||||
} catch (err: any) {
|
||||
return {
|
||||
exitCode: err.status ?? 1,
|
||||
stdout: (err.stdout ?? "").toString().trim(),
|
||||
stderr: (err.stderr ?? "").toString().trim(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// get_cloud_env_vars
|
||||
// ============================================================================
|
||||
|
||||
describe("get_cloud_env_vars", () => {
|
||||
it("should return single env var for simple auth", () => {
|
||||
const manifestPath = createTestManifest({ hetzner: { auth: "HCLOUD_TOKEN" } });
|
||||
const result = runBash(`get_cloud_env_vars "hetzner"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("HCLOUD_TOKEN");
|
||||
});
|
||||
|
||||
it("should return multiple env vars for multi-auth clouds", () => {
|
||||
const manifestPath = createTestManifest({ upcloud: { auth: "UPCLOUD_USERNAME + UPCLOUD_PASSWORD" } });
|
||||
const result = runBash(`get_cloud_env_vars "upcloud"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
const lines = result.stdout.split("\n").filter(Boolean);
|
||||
expect(lines).toContain("UPCLOUD_USERNAME");
|
||||
expect(lines).toContain("UPCLOUD_PASSWORD");
|
||||
});
|
||||
|
||||
it("should return empty output for CLI-based auth (login)", () => {
|
||||
const manifestPath = createTestManifest({ sprite: { auth: "sprite login" } });
|
||||
const result = runBash(`get_cloud_env_vars "sprite"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("");
|
||||
});
|
||||
|
||||
it("should return empty output for CLI-based auth (configure)", () => {
|
||||
const manifestPath = createTestManifest({ aws: { auth: "aws configure" } });
|
||||
const result = runBash(`get_cloud_env_vars "aws"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("");
|
||||
});
|
||||
|
||||
it("should return empty output for CLI-based auth (setup)", () => {
|
||||
const manifestPath = createTestManifest({ local: { auth: "local setup" } });
|
||||
const result = runBash(`get_cloud_env_vars "local"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("");
|
||||
});
|
||||
|
||||
it("should return empty for nonexistent cloud key", () => {
|
||||
createTestManifest({ hetzner: { auth: "HCLOUD_TOKEN" } });
|
||||
const result = runBash(`get_cloud_env_vars "nonexistent"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("");
|
||||
});
|
||||
|
||||
it("should handle cloud with empty auth field", () => {
|
||||
createTestManifest({ noauth: { auth: "" } });
|
||||
const result = runBash(`get_cloud_env_vars "noauth"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("");
|
||||
});
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// _parse_cloud_auths
|
||||
// ============================================================================
|
||||
|
||||
describe("_parse_cloud_auths", () => {
|
||||
it("should output cloud_key|auth_string for API-token clouds", () => {
|
||||
const manifestPath = createTestManifest({
|
||||
hetzner: { auth: "HCLOUD_TOKEN" },
|
||||
vultr: { auth: "VULTR_API_KEY" },
|
||||
});
|
||||
const result = runBash(`_parse_cloud_auths "${manifestPath}"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
const lines = result.stdout.split("\n").filter(Boolean);
|
||||
expect(lines).toContain("hetzner|HCLOUD_TOKEN");
|
||||
expect(lines).toContain("vultr|VULTR_API_KEY");
|
||||
});
|
||||
|
||||
it("should skip CLI-based auth clouds", () => {
|
||||
const manifestPath = createTestManifest({
|
||||
hetzner: { auth: "HCLOUD_TOKEN" },
|
||||
sprite: { auth: "sprite login" },
|
||||
aws: { auth: "aws configure" },
|
||||
});
|
||||
const result = runBash(`_parse_cloud_auths "${manifestPath}"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("hetzner|HCLOUD_TOKEN");
|
||||
expect(result.stdout).not.toContain("sprite");
|
||||
expect(result.stdout).not.toContain("aws");
|
||||
});
|
||||
|
||||
it("should skip clouds with empty auth", () => {
|
||||
const manifestPath = createTestManifest({
|
||||
hetzner: { auth: "HCLOUD_TOKEN" },
|
||||
noauth: { auth: "" },
|
||||
});
|
||||
const result = runBash(`_parse_cloud_auths "${manifestPath}"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("hetzner|HCLOUD_TOKEN");
|
||||
expect(result.stdout).not.toContain("noauth");
|
||||
});
|
||||
|
||||
it("should handle multi-credential auth strings", () => {
|
||||
const manifestPath = createTestManifest({
|
||||
upcloud: { auth: "UPCLOUD_USERNAME + UPCLOUD_PASSWORD" },
|
||||
});
|
||||
const result = runBash(`_parse_cloud_auths "${manifestPath}"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("upcloud|UPCLOUD_USERNAME + UPCLOUD_PASSWORD");
|
||||
});
|
||||
|
||||
it("should return empty output for empty manifest clouds", () => {
|
||||
const manifestPath = createTestManifest({});
|
||||
const result = runBash(`_parse_cloud_auths "${manifestPath}"`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toBe("");
|
||||
});
|
||||
|
||||
it("should return empty for missing manifest file", () => {
|
||||
const result = runBash(`_parse_cloud_auths "/nonexistent/manifest.json"`);
|
||||
// python3 will fail silently due to 2>/dev/null
|
||||
expect(result.stdout).toBe("");
|
||||
});
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// _try_load_env_var
|
||||
// ============================================================================
|
||||
|
||||
describe("_try_load_env_var", () => {
|
||||
it("should return 0 when env var is already set", () => {
|
||||
const result = runBash(
|
||||
`export MY_TOKEN="already-set"
|
||||
_try_load_env_var "MY_TOKEN" "/nonexistent/config.json"
|
||||
echo "exit=$?"`,
|
||||
{ MY_TOKEN: "already-set" },
|
||||
);
|
||||
expect(result.stdout).toContain("exit=0");
|
||||
});
|
||||
|
||||
it("should load value from config file when env var is not set", () => {
|
||||
const configDir = join(testDir, ".config", "spawn");
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
writeFileSync(join(configDir, "test.json"), JSON.stringify({ MY_TOKEN: "from-config" }));
|
||||
|
||||
const result = runBash(
|
||||
`unset MY_TOKEN 2>/dev/null
|
||||
_try_load_env_var "MY_TOKEN" "${configDir}/test.json"
|
||||
echo "val=\${MY_TOKEN}"`,
|
||||
);
|
||||
expect(result.stdout).toContain("val=from-config");
|
||||
});
|
||||
|
||||
it("should return 1 when env var is not set and config file is missing", () => {
|
||||
const result = runBash(
|
||||
`unset MY_TOKEN 2>/dev/null
|
||||
_try_load_env_var "MY_TOKEN" "/nonexistent/config.json"`,
|
||||
);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should return 1 when env var is not set and config file has no matching key", () => {
|
||||
const configDir = join(testDir, ".config", "spawn");
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
writeFileSync(join(configDir, "test.json"), JSON.stringify({ OTHER_KEY: "value" }));
|
||||
|
||||
const result = runBash(
|
||||
`unset MY_TOKEN 2>/dev/null
|
||||
_try_load_env_var "MY_TOKEN" "${configDir}/test.json"`,
|
||||
);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should fall back to api_key field in config file", () => {
|
||||
const configDir = join(testDir, ".config", "spawn");
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
writeFileSync(join(configDir, "test.json"), JSON.stringify({ api_key: "fallback-key" }));
|
||||
|
||||
const result = runBash(
|
||||
`unset MY_TOKEN 2>/dev/null
|
||||
_try_load_env_var "MY_TOKEN" "${configDir}/test.json"
|
||||
echo "val=\${MY_TOKEN}"`,
|
||||
);
|
||||
expect(result.stdout).toContain("val=fallback-key");
|
||||
});
|
||||
|
||||
it("should fall back to token field in config file", () => {
|
||||
const configDir = join(testDir, ".config", "spawn");
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
writeFileSync(join(configDir, "test.json"), JSON.stringify({ token: "token-value" }));
|
||||
|
||||
const result = runBash(
|
||||
`unset MY_TOKEN 2>/dev/null
|
||||
_try_load_env_var "MY_TOKEN" "${configDir}/test.json"
|
||||
echo "val=\${MY_TOKEN}"`,
|
||||
);
|
||||
expect(result.stdout).toContain("val=token-value");
|
||||
});
|
||||
|
||||
it("should prefer exact var name over api_key fallback", () => {
|
||||
const configDir = join(testDir, ".config", "spawn");
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
writeFileSync(
|
||||
join(configDir, "test.json"),
|
||||
JSON.stringify({ MY_TOKEN: "exact-match", api_key: "fallback" }),
|
||||
);
|
||||
|
||||
const result = runBash(
|
||||
`unset MY_TOKEN 2>/dev/null
|
||||
_try_load_env_var "MY_TOKEN" "${configDir}/test.json"
|
||||
echo "val=\${MY_TOKEN}"`,
|
||||
);
|
||||
expect(result.stdout).toContain("val=exact-match");
|
||||
});
|
||||
|
||||
it("should export the loaded variable", () => {
|
||||
const configDir = join(testDir, ".config", "spawn");
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
writeFileSync(join(configDir, "test.json"), JSON.stringify({ MY_TOKEN: "exported-val" }));
|
||||
|
||||
const result = runBash(
|
||||
`unset MY_TOKEN 2>/dev/null
|
||||
_try_load_env_var "MY_TOKEN" "${configDir}/test.json"
|
||||
# Verify it's exported (available in subshell)
|
||||
bash -c 'echo "sub=\${MY_TOKEN}"'`,
|
||||
);
|
||||
expect(result.stdout).toContain("sub=exported-val");
|
||||
});
|
||||
|
||||
it("should return 1 when config file contains empty value for the key", () => {
|
||||
const configDir = join(testDir, ".config", "spawn");
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
writeFileSync(join(configDir, "test.json"), JSON.stringify({ MY_TOKEN: "" }));
|
||||
|
||||
const result = runBash(
|
||||
`unset MY_TOKEN 2>/dev/null
|
||||
_try_load_env_var "MY_TOKEN" "${configDir}/test.json"`,
|
||||
);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should handle malformed JSON gracefully", () => {
|
||||
const configDir = join(testDir, ".config", "spawn");
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
writeFileSync(join(configDir, "test.json"), "not valid json{{{");
|
||||
|
||||
const result = runBash(
|
||||
`unset MY_TOKEN 2>/dev/null
|
||||
_try_load_env_var "MY_TOKEN" "${configDir}/test.json"`,
|
||||
);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// _load_cloud_credentials
|
||||
// ============================================================================
|
||||
|
||||
describe("_load_cloud_credentials", () => {
|
||||
it("should load single-var credentials from config file", () => {
|
||||
const configDir = join(testDir, ".config", "spawn");
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
writeFileSync(join(configDir, "hetzner.json"), JSON.stringify({ HCLOUD_TOKEN: "test-token" }));
|
||||
|
||||
const result = runBash(
|
||||
`unset HCLOUD_TOKEN 2>/dev/null
|
||||
_load_cloud_credentials "hetzner" "HCLOUD_TOKEN"
|
||||
echo "exit=$?"
|
||||
echo "val=\${HCLOUD_TOKEN}"`,
|
||||
);
|
||||
expect(result.stdout).toContain("exit=0");
|
||||
expect(result.stdout).toContain("val=test-token");
|
||||
});
|
||||
|
||||
it("should load multi-var credentials from config file", () => {
|
||||
const configDir = join(testDir, ".config", "spawn");
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
writeFileSync(
|
||||
join(configDir, "upcloud.json"),
|
||||
JSON.stringify({ UPCLOUD_USERNAME: "user", UPCLOUD_PASSWORD: "pass" }),
|
||||
);
|
||||
|
||||
const result = runBash(
|
||||
`unset UPCLOUD_USERNAME UPCLOUD_PASSWORD 2>/dev/null
|
||||
_load_cloud_credentials "upcloud" "UPCLOUD_USERNAME + UPCLOUD_PASSWORD"
|
||||
echo "exit=$?"
|
||||
echo "u=\${UPCLOUD_USERNAME}"
|
||||
echo "p=\${UPCLOUD_PASSWORD}"`,
|
||||
);
|
||||
expect(result.stdout).toContain("exit=0");
|
||||
expect(result.stdout).toContain("u=user");
|
||||
expect(result.stdout).toContain("p=pass");
|
||||
});
|
||||
|
||||
it("should return 1 when config file is missing", () => {
|
||||
const result = runBash(
|
||||
`unset HCLOUD_TOKEN 2>/dev/null
|
||||
_load_cloud_credentials "hetzner" "HCLOUD_TOKEN"`,
|
||||
);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
|
||||
it("should return 0 when env vars are already set", () => {
|
||||
const result = runBash(
|
||||
`export HCLOUD_TOKEN="already-set"
|
||||
_load_cloud_credentials "hetzner" "HCLOUD_TOKEN"`,
|
||||
{ HCLOUD_TOKEN: "already-set" },
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should return 1 when only some multi-cred vars are available", () => {
|
||||
const configDir = join(testDir, ".config", "spawn");
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
writeFileSync(
|
||||
join(configDir, "upcloud.json"),
|
||||
JSON.stringify({ UPCLOUD_USERNAME: "user" }),
|
||||
// Note: UPCLOUD_PASSWORD is missing
|
||||
);
|
||||
|
||||
const result = runBash(
|
||||
`unset UPCLOUD_USERNAME UPCLOUD_PASSWORD 2>/dev/null
|
||||
_load_cloud_credentials "upcloud" "UPCLOUD_USERNAME + UPCLOUD_PASSWORD"`,
|
||||
);
|
||||
expect(result.exitCode).toBe(1);
|
||||
});
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// load_cloud_keys_from_config
|
||||
// ============================================================================
|
||||
|
||||
describe("load_cloud_keys_from_config", () => {
|
||||
it("should log key preflight count", () => {
|
||||
createTestManifest({
|
||||
hetzner: { auth: "HCLOUD_TOKEN" },
|
||||
});
|
||||
const configDir = join(testDir, ".config", "spawn");
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
writeFileSync(join(configDir, "hetzner.json"), JSON.stringify({ HCLOUD_TOKEN: "token123" }));
|
||||
|
||||
const result = runBash(
|
||||
`unset HCLOUD_TOKEN 2>/dev/null
|
||||
load_cloud_keys_from_config 2>&1`,
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).toContain("Key preflight:");
|
||||
expect(result.stdout).toContain("1/1");
|
||||
});
|
||||
|
||||
it("should set MISSING_KEY_PROVIDERS for clouds without keys", () => {
|
||||
createTestManifest({
|
||||
hetzner: { auth: "HCLOUD_TOKEN" },
|
||||
vultr: { auth: "VULTR_API_KEY" },
|
||||
});
|
||||
|
||||
const result = runBash(
|
||||
`unset HCLOUD_TOKEN VULTR_API_KEY 2>/dev/null
|
||||
load_cloud_keys_from_config 2>/dev/null
|
||||
echo "missing=\${MISSING_KEY_PROVIDERS}"`,
|
||||
);
|
||||
expect(result.stdout).toContain("hetzner");
|
||||
expect(result.stdout).toContain("vultr");
|
||||
});
|
||||
|
||||
it("should not include loaded clouds in MISSING_KEY_PROVIDERS", () => {
|
||||
createTestManifest({
|
||||
hetzner: { auth: "HCLOUD_TOKEN" },
|
||||
vultr: { auth: "VULTR_API_KEY" },
|
||||
});
|
||||
const configDir = join(testDir, ".config", "spawn");
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
writeFileSync(join(configDir, "hetzner.json"), JSON.stringify({ HCLOUD_TOKEN: "token123" }));
|
||||
|
||||
const result = runBash(
|
||||
`unset HCLOUD_TOKEN VULTR_API_KEY 2>/dev/null
|
||||
load_cloud_keys_from_config 2>/dev/null
|
||||
echo "missing=\${MISSING_KEY_PROVIDERS}"`,
|
||||
);
|
||||
expect(result.stdout).not.toContain("hetzner");
|
||||
expect(result.stdout).toContain("vultr");
|
||||
});
|
||||
|
||||
it("should skip CLI-based auth clouds", () => {
|
||||
createTestManifest({
|
||||
hetzner: { auth: "HCLOUD_TOKEN" },
|
||||
sprite: { auth: "sprite login" },
|
||||
});
|
||||
|
||||
const result = runBash(
|
||||
`unset HCLOUD_TOKEN 2>/dev/null
|
||||
load_cloud_keys_from_config 2>/dev/null
|
||||
echo "missing=\${MISSING_KEY_PROVIDERS}"`,
|
||||
);
|
||||
// sprite should not appear since it uses CLI-based auth
|
||||
expect(result.stdout).not.toContain("sprite");
|
||||
expect(result.stdout).toContain("hetzner");
|
||||
});
|
||||
|
||||
it("should return 1 when manifest.json is missing", () => {
|
||||
// Don't create a manifest
|
||||
const result = runBash(`load_cloud_keys_from_config 2>&1`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
expect(result.stdout).toContain("manifest.json not found");
|
||||
});
|
||||
|
||||
it("should export loaded env vars", () => {
|
||||
createTestManifest({ hetzner: { auth: "HCLOUD_TOKEN" } });
|
||||
const configDir = join(testDir, ".config", "spawn");
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
writeFileSync(join(configDir, "hetzner.json"), JSON.stringify({ HCLOUD_TOKEN: "loaded-token" }));
|
||||
|
||||
const result = runBash(
|
||||
`unset HCLOUD_TOKEN 2>/dev/null
|
||||
load_cloud_keys_from_config 2>/dev/null
|
||||
echo "token=\${HCLOUD_TOKEN}"`,
|
||||
);
|
||||
expect(result.stdout).toContain("token=loaded-token");
|
||||
});
|
||||
|
||||
it("should handle empty MISSING_KEY_PROVIDERS when all keys are loaded", () => {
|
||||
createTestManifest({ hetzner: { auth: "HCLOUD_TOKEN" } });
|
||||
const configDir = join(testDir, ".config", "spawn");
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
writeFileSync(join(configDir, "hetzner.json"), JSON.stringify({ HCLOUD_TOKEN: "token123" }));
|
||||
|
||||
const result = runBash(
|
||||
`unset HCLOUD_TOKEN 2>/dev/null
|
||||
load_cloud_keys_from_config 2>/dev/null
|
||||
echo "missing=[\${MISSING_KEY_PROVIDERS}]"`,
|
||||
);
|
||||
expect(result.stdout).toContain("missing=[]");
|
||||
});
|
||||
|
||||
it("should count correctly with multiple clouds", () => {
|
||||
createTestManifest({
|
||||
hetzner: { auth: "HCLOUD_TOKEN" },
|
||||
vultr: { auth: "VULTR_API_KEY" },
|
||||
linode: { auth: "LINODE_TOKEN" },
|
||||
});
|
||||
const configDir = join(testDir, ".config", "spawn");
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
writeFileSync(join(configDir, "hetzner.json"), JSON.stringify({ HCLOUD_TOKEN: "t1" }));
|
||||
writeFileSync(join(configDir, "vultr.json"), JSON.stringify({ VULTR_API_KEY: "t2" }));
|
||||
|
||||
const result = runBash(
|
||||
`unset HCLOUD_TOKEN VULTR_API_KEY LINODE_TOKEN 2>/dev/null
|
||||
load_cloud_keys_from_config 2>&1`,
|
||||
);
|
||||
expect(result.stdout).toContain("2/3");
|
||||
expect(result.stdout).toContain("linode");
|
||||
});
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// invalidate_cloud_key
|
||||
// ============================================================================
|
||||
|
||||
describe("invalidate_cloud_key", () => {
|
||||
it("should delete the config file for a valid provider", () => {
|
||||
const configDir = join(testDir, ".config", "spawn");
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
const configFile = join(configDir, "hetzner.json");
|
||||
writeFileSync(configFile, JSON.stringify({ HCLOUD_TOKEN: "secret" }));
|
||||
|
||||
const result = runBash(`invalidate_cloud_key "hetzner" 2>&1`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(existsSync(configFile)).toBe(false);
|
||||
expect(result.stdout).toContain("Invalidated key config for hetzner");
|
||||
});
|
||||
|
||||
it("should succeed silently when config file does not exist", () => {
|
||||
const result = runBash(`invalidate_cloud_key "hetzner" 2>&1`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should reject path traversal attempts (..)", () => {
|
||||
const result = runBash(`invalidate_cloud_key "../etc/passwd" 2>&1`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
expect(result.stdout).toContain("invalid provider name");
|
||||
});
|
||||
|
||||
it("should reject provider names starting with a hyphen", () => {
|
||||
const result = runBash(`invalidate_cloud_key "-badname" 2>&1`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
expect(result.stdout).toContain("invalid provider name");
|
||||
});
|
||||
|
||||
it("should reject provider names with slashes", () => {
|
||||
const result = runBash(`invalidate_cloud_key "foo/bar" 2>&1`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
expect(result.stdout).toContain("invalid provider name");
|
||||
});
|
||||
|
||||
it("should reject empty provider name", () => {
|
||||
const result = runBash(`invalidate_cloud_key "" 2>&1`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
expect(result.stdout).toContain("invalid provider name");
|
||||
});
|
||||
|
||||
it("should reject provider names longer than 64 characters", () => {
|
||||
const longName = "a".repeat(65);
|
||||
const result = runBash(`invalidate_cloud_key "${longName}" 2>&1`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
expect(result.stdout).toContain("invalid provider name");
|
||||
});
|
||||
|
||||
it("should accept valid provider names with dots, hyphens, underscores", () => {
|
||||
const configDir = join(testDir, ".config", "spawn");
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
const configFile = join(configDir, "my-cloud_v2.json");
|
||||
writeFileSync(configFile, JSON.stringify({ TOKEN: "val" }));
|
||||
|
||||
const result = runBash(`invalidate_cloud_key "my-cloud_v2" 2>&1`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(existsSync(configFile)).toBe(false);
|
||||
});
|
||||
|
||||
it("should accept exactly 64-character provider names", () => {
|
||||
const name = "a".repeat(64);
|
||||
const result = runBash(`invalidate_cloud_key "${name}" 2>&1`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should reject provider names with spaces", () => {
|
||||
const result = runBash(`invalidate_cloud_key "bad name" 2>&1`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
expect(result.stdout).toContain("invalid provider name");
|
||||
});
|
||||
|
||||
it("should reject provider names with uppercase letters", () => {
|
||||
const result = runBash(`invalidate_cloud_key "BadName" 2>&1`);
|
||||
expect(result.exitCode).toBe(1);
|
||||
expect(result.stdout).toContain("invalid provider name");
|
||||
});
|
||||
|
||||
it("should not delete non-JSON files or directories", () => {
|
||||
const configDir = join(testDir, ".config", "spawn");
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
// Only hetzner.json should be targeted, not a directory
|
||||
const dirToPreserve = join(configDir, "hetzner");
|
||||
mkdirSync(dirToPreserve, { recursive: true });
|
||||
|
||||
const result = runBash(`invalidate_cloud_key "hetzner" 2>&1`);
|
||||
expect(result.exitCode).toBe(0);
|
||||
// The function uses -f (regular file check), so directory should be preserved
|
||||
expect(existsSync(dirToPreserve)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// request_missing_cloud_keys
|
||||
// ============================================================================
|
||||
|
||||
describe("request_missing_cloud_keys", () => {
|
||||
it("should skip silently when KEY_SERVER_URL is not set", () => {
|
||||
const result = runBash(
|
||||
`unset KEY_SERVER_URL 2>/dev/null
|
||||
MISSING_KEY_PROVIDERS="hetzner vultr"
|
||||
request_missing_cloud_keys 2>&1`,
|
||||
{ KEY_SERVER_URL: "" },
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).not.toContain("Requesting keys");
|
||||
});
|
||||
|
||||
it("should warn when KEY_SERVER_SECRET is empty", () => {
|
||||
const result = runBash(
|
||||
`export KEY_SERVER_URL="http://localhost:9999"
|
||||
unset KEY_SERVER_SECRET 2>/dev/null
|
||||
MISSING_KEY_PROVIDERS="hetzner"
|
||||
request_missing_cloud_keys 2>&1`,
|
||||
{ KEY_SERVER_URL: "http://localhost:9999", KEY_SERVER_SECRET: "" },
|
||||
);
|
||||
expect(result.stdout).toContain("KEY_SERVER_SECRET is empty");
|
||||
});
|
||||
|
||||
it("should skip when MISSING_KEY_PROVIDERS is empty", () => {
|
||||
const result = runBash(
|
||||
`export KEY_SERVER_URL="http://localhost:9999"
|
||||
export KEY_SERVER_SECRET="secret123"
|
||||
MISSING_KEY_PROVIDERS=""
|
||||
request_missing_cloud_keys 2>&1`,
|
||||
{ KEY_SERVER_URL: "http://localhost:9999", KEY_SERVER_SECRET: "secret123" },
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
expect(result.stdout).not.toContain("Requesting keys");
|
||||
});
|
||||
|
||||
it("should log the request when all params are set", () => {
|
||||
// Override curl to avoid network calls; the log message happens before the background curl
|
||||
const result = runBash(
|
||||
`export KEY_SERVER_URL="http://localhost:1"
|
||||
export KEY_SERVER_SECRET="secret123"
|
||||
MISSING_KEY_PROVIDERS="hetzner vultr"
|
||||
# Override curl so the background job completes instantly
|
||||
curl() { return 0; }
|
||||
export -f curl
|
||||
request_missing_cloud_keys 2>&1
|
||||
wait 2>/dev/null`,
|
||||
{ KEY_SERVER_URL: "http://localhost:1", KEY_SERVER_SECRET: "secret123" },
|
||||
);
|
||||
expect(result.stdout).toContain("Requesting keys for: hetzner vultr");
|
||||
});
|
||||
});
|
||||
|
||||
// ============================================================================
|
||||
// Integration: end-to-end key loading
|
||||
// ============================================================================
|
||||
|
||||
describe("key-request integration", () => {
|
||||
it("should load env vars that persist for subsequent commands", () => {
|
||||
createTestManifest({
|
||||
hetzner: { auth: "HCLOUD_TOKEN" },
|
||||
});
|
||||
const configDir = join(testDir, ".config", "spawn");
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
writeFileSync(join(configDir, "hetzner.json"), JSON.stringify({ HCLOUD_TOKEN: "my-token" }));
|
||||
|
||||
const result = runBash(
|
||||
`unset HCLOUD_TOKEN 2>/dev/null
|
||||
load_cloud_keys_from_config 2>/dev/null
|
||||
# Verify env var is available for child processes
|
||||
bash -c 'echo "child-token=\${HCLOUD_TOKEN}"'`,
|
||||
);
|
||||
expect(result.stdout).toContain("child-token=my-token");
|
||||
});
|
||||
|
||||
it("should handle manifest with mixed auth types", () => {
|
||||
createTestManifest({
|
||||
hetzner: { auth: "HCLOUD_TOKEN" },
|
||||
sprite: { auth: "sprite login" },
|
||||
upcloud: { auth: "UPCLOUD_USERNAME + UPCLOUD_PASSWORD" },
|
||||
});
|
||||
const configDir = join(testDir, ".config", "spawn");
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
writeFileSync(join(configDir, "hetzner.json"), JSON.stringify({ HCLOUD_TOKEN: "t1" }));
|
||||
writeFileSync(join(configDir, "upcloud.json"), JSON.stringify({ UPCLOUD_USERNAME: "u", UPCLOUD_PASSWORD: "p" }));
|
||||
|
||||
const result = runBash(
|
||||
`unset HCLOUD_TOKEN UPCLOUD_USERNAME UPCLOUD_PASSWORD 2>/dev/null
|
||||
load_cloud_keys_from_config 2>&1`,
|
||||
);
|
||||
expect(result.exitCode).toBe(0);
|
||||
// 2 API-token clouds (hetzner + upcloud), both loaded; sprite skipped (CLI auth)
|
||||
expect(result.stdout).toContain("2/2");
|
||||
});
|
||||
|
||||
it("should invalidate and then fail to load a key", () => {
|
||||
createTestManifest({ hetzner: { auth: "HCLOUD_TOKEN" } });
|
||||
const configDir = join(testDir, ".config", "spawn");
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
writeFileSync(join(configDir, "hetzner.json"), JSON.stringify({ HCLOUD_TOKEN: "token123" }));
|
||||
|
||||
const result = runBash(
|
||||
`unset HCLOUD_TOKEN 2>/dev/null
|
||||
# First load should succeed
|
||||
load_cloud_keys_from_config 2>/dev/null
|
||||
echo "before=\${HCLOUD_TOKEN}"
|
||||
# Invalidate the key
|
||||
unset HCLOUD_TOKEN 2>/dev/null
|
||||
invalidate_cloud_key "hetzner" 2>/dev/null
|
||||
# Second load should fail (config file deleted)
|
||||
load_cloud_keys_from_config 2>/dev/null
|
||||
echo "after=\${HCLOUD_TOKEN:-missing}"`,
|
||||
);
|
||||
expect(result.stdout).toContain("before=token123");
|
||||
expect(result.stdout).toContain("after=missing");
|
||||
});
|
||||
|
||||
it("should prefer env vars over config file values", () => {
|
||||
createTestManifest({ hetzner: { auth: "HCLOUD_TOKEN" } });
|
||||
const configDir = join(testDir, ".config", "spawn");
|
||||
mkdirSync(configDir, { recursive: true });
|
||||
writeFileSync(join(configDir, "hetzner.json"), JSON.stringify({ HCLOUD_TOKEN: "from-config" }));
|
||||
|
||||
const result = runBash(
|
||||
`export HCLOUD_TOKEN="from-env"
|
||||
load_cloud_keys_from_config 2>/dev/null
|
||||
echo "val=\${HCLOUD_TOKEN}"`,
|
||||
{ HCLOUD_TOKEN: "from-env" },
|
||||
);
|
||||
// Env var should win over config file
|
||||
expect(result.stdout).toContain("val=from-env");
|
||||
});
|
||||
});
|
||||
|
|
@ -1,258 +0,0 @@
|
|||
import { describe, it, expect } from "bun:test";
|
||||
import { execSync } from "child_process";
|
||||
import { resolve } from "path";
|
||||
|
||||
/**
|
||||
* Tests for showInfoOrError in index.ts (lines 85-110).
|
||||
*
|
||||
* This function has zero direct test coverage. It handles the single-argument
|
||||
* case where a user types "spawn <name>" and the name could be:
|
||||
* - A valid agent key -> shows agent info (cmdAgentInfo)
|
||||
* - A valid cloud key -> shows cloud info (cmdCloudInfo)
|
||||
* - An unknown name -> shows "Unknown agent or cloud" with fuzzy suggestions
|
||||
*
|
||||
* Since showInfoOrError is not exported and calls loadManifest + process.exit,
|
||||
* we test it by spawning bun subprocesses (same approach as index-main-routing.test.ts).
|
||||
*
|
||||
* These tests use the local manifest.json by explicitly unsetting NODE_ENV/BUN_ENV
|
||||
* in the subprocess environment so that loadManifest reads the project manifest.
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const CLI_DIR = resolve(import.meta.dir, "../..");
|
||||
|
||||
// Use the project root (which has manifest.json) as cwd
|
||||
const PROJECT_ROOT = resolve(CLI_DIR, "..");
|
||||
|
||||
function runCli(
|
||||
args: string[],
|
||||
env: Record<string, string> = {}
|
||||
): { stdout: string; stderr: string; exitCode: number } {
|
||||
// Quote each arg to handle spaces properly
|
||||
const quotedArgs = args.map(a => `'${a.replace(/'/g, "'\\''")}'`).join(" ");
|
||||
const cmd = `bun run ${CLI_DIR}/src/index.ts ${quotedArgs}`;
|
||||
try {
|
||||
const stdout = execSync(cmd, {
|
||||
cwd: PROJECT_ROOT,
|
||||
env: {
|
||||
// Start with clean env to avoid bun test's NODE_ENV=test leaking
|
||||
PATH: `${process.env.HOME}/.bun/bin:${process.env.PATH}`,
|
||||
HOME: process.env.HOME,
|
||||
SHELL: process.env.SHELL,
|
||||
TERM: process.env.TERM || "xterm",
|
||||
...env,
|
||||
SPAWN_NO_UPDATE_CHECK: "1",
|
||||
// Explicitly unset test env vars so local manifest.json is loaded
|
||||
NODE_ENV: "",
|
||||
BUN_ENV: "",
|
||||
},
|
||||
encoding: "utf-8",
|
||||
timeout: 15000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
});
|
||||
return { stdout, stderr: "", exitCode: 0 };
|
||||
} catch (err: any) {
|
||||
return {
|
||||
stdout: err.stdout || "",
|
||||
stderr: err.stderr || "",
|
||||
exitCode: err.status ?? 1,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
describe("showInfoOrError - single argument routing", () => {
|
||||
// ── Valid agent name: shows agent info ──────────────────────────────────
|
||||
|
||||
describe("valid agent name shows agent info", () => {
|
||||
it("should show agent info for 'claude'", () => {
|
||||
const result = runCli(["claude"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Available clouds");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should show agent info for 'codex'", () => {
|
||||
const result = runCli(["codex"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Available clouds");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should show launch commands in agent info", () => {
|
||||
const result = runCli(["claude"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("spawn claude");
|
||||
});
|
||||
|
||||
it("should show agent description", () => {
|
||||
const result = runCli(["claude"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Claude Code");
|
||||
});
|
||||
});
|
||||
|
||||
// ── Valid cloud name: shows cloud info ─────────────────────────────────
|
||||
|
||||
describe("valid cloud name shows cloud info", () => {
|
||||
it("should show cloud info for 'hetzner'", () => {
|
||||
const result = runCli(["hetzner"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Available agents");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should show cloud info for 'sprite'", () => {
|
||||
const result = runCli(["sprite"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Available agents");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should show cloud type in cloud info", () => {
|
||||
const result = runCli(["hetzner"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Type:");
|
||||
});
|
||||
|
||||
it("should show cloud description", () => {
|
||||
const result = runCli(["hetzner"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Hetzner");
|
||||
});
|
||||
});
|
||||
|
||||
// ── Unknown agent or cloud: error output ──────────────────────────────────────
|
||||
|
||||
describe("unknown single argument", () => {
|
||||
it("should show 'Unknown agent or cloud' for an unrecognized name", () => {
|
||||
const result = runCli(["xyzzyplugh"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Unknown agent or cloud");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should include the unknown name in the error", () => {
|
||||
const result = runCli(["xyzzyplugh"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("xyzzyplugh");
|
||||
});
|
||||
|
||||
it("should suggest 'spawn agents' in error output", () => {
|
||||
const result = runCli(["xyzzyplugh"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("spawn agents");
|
||||
});
|
||||
|
||||
it("should suggest 'spawn clouds' in error output", () => {
|
||||
const result = runCli(["xyzzyplugh"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("spawn clouds");
|
||||
});
|
||||
|
||||
it("should suggest 'spawn help' in error output", () => {
|
||||
const result = runCli(["xyzzyplugh"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("spawn help");
|
||||
});
|
||||
|
||||
it("should exit with non-zero for unknown command", () => {
|
||||
const result = runCli(["totallyunknown"]);
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ── Fuzzy matching suggestions ─────────────────────────────────────────
|
||||
|
||||
describe("fuzzy match suggestions", () => {
|
||||
it("should suggest a close agent match for a typo", () => {
|
||||
// "codx" is close to "codex" (distance 1)
|
||||
const result = runCli(["codx"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Did you mean");
|
||||
expect(output).toContain("codex");
|
||||
});
|
||||
|
||||
it("should suggest a close cloud match for a typo", () => {
|
||||
// "sprte" is close to "sprite" (distance 1)
|
||||
const result = runCli(["sprte"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Did you mean");
|
||||
expect(output).toContain("sprite");
|
||||
});
|
||||
|
||||
it("should NOT suggest a match for a completely different string", () => {
|
||||
// "kubernetes" is far from any agent or cloud name
|
||||
const result = runCli(["kubernetes"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Unknown agent or cloud");
|
||||
expect(output).not.toContain("Did you mean");
|
||||
});
|
||||
|
||||
it("should label the suggestion type (agent or cloud)", () => {
|
||||
// "codx" should match "codex" (an agent)
|
||||
const result = runCli(["codx"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
// showInfoOrError labels suggestions as "(agent: Name)" or "(cloud: Name)"
|
||||
expect(output).toMatch(/\(agent:|\(cloud:/);
|
||||
});
|
||||
});
|
||||
|
||||
// ── handleDefaultCommand help flag routing ─────────────────────────────
|
||||
|
||||
describe("agent with help flag", () => {
|
||||
it("should show agent info when agent followed by --help", () => {
|
||||
const result = runCli(["claude", "--help"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
// handleDefaultCommand routes "spawn claude --help" to showInfoOrError
|
||||
expect(output).toContain("Available clouds");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should show agent info when agent followed by -h", () => {
|
||||
const result = runCli(["claude", "-h"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Available clouds");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
|
||||
it("should show agent info when agent followed by 'help'", () => {
|
||||
const result = runCli(["claude", "help"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Available clouds");
|
||||
expect(result.exitCode).toBe(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ── Edge cases ─────────────────────────────────────────────────────────
|
||||
|
||||
describe("edge cases", () => {
|
||||
it("should not treat numeric-only input as a valid agent or cloud", () => {
|
||||
const result = runCli(["12345"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Unknown agent or cloud");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should handle hyphenated names that are not real entries", () => {
|
||||
const result = runCli(["not-a-real-entry"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("Unknown agent or cloud");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should error when --prompt is given with agent but no cloud", () => {
|
||||
const result = runCli(["claude", "--prompt", "Fix bugs"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("--prompt requires both");
|
||||
expect(result.exitCode).not.toBe(0);
|
||||
});
|
||||
|
||||
it("should include usage hint in prompt-without-cloud error", () => {
|
||||
const result = runCli(["claude", "--prompt", "Fix bugs"]);
|
||||
const output = result.stdout + result.stderr;
|
||||
expect(output).toContain("spawn claude");
|
||||
expect(output).toContain("<cloud>");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -1,181 +0,0 @@
|
|||
import { describe, it, expect } from "bun:test";
|
||||
import { execSync } from "child_process";
|
||||
import { resolve } from "path";
|
||||
|
||||
/**
|
||||
* Tests for unicode-detect.ts side-effect module.
|
||||
*
|
||||
* unicode-detect.ts runs at import time and sets process.env.TERM
|
||||
* based on environment variables. Since it has side effects, we test
|
||||
* it by spawning subprocesses with controlled environments.
|
||||
*
|
||||
* Agent: test-engineer
|
||||
*/
|
||||
|
||||
const CLI_DIR = resolve(import.meta.dir, "../..");
|
||||
|
||||
// Helper: run a small bun script that imports unicode-detect and prints TERM
|
||||
function detectTerm(env: Record<string, string>): string {
|
||||
const script = `
|
||||
import "./src/unicode-detect.ts";
|
||||
console.log(process.env.TERM);
|
||||
`;
|
||||
const result = execSync(`bun -e '${script}'`, {
|
||||
cwd: CLI_DIR,
|
||||
env: { ...env, PATH: process.env.PATH, HOME: process.env.HOME },
|
||||
encoding: "utf-8",
|
||||
timeout: 5000,
|
||||
});
|
||||
return result.trim();
|
||||
}
|
||||
|
||||
describe("unicode-detect", () => {
|
||||
describe("shouldForceAscii logic", () => {
|
||||
it("should force ASCII (TERM=linux) when TERM is dumb", () => {
|
||||
const term = detectTerm({ TERM: "dumb" });
|
||||
expect(term).toBe("linux");
|
||||
});
|
||||
|
||||
it("should force ASCII (TERM=linux) when TERM is unset", () => {
|
||||
const term = detectTerm({});
|
||||
expect(term).toBe("linux");
|
||||
});
|
||||
|
||||
it("should force ASCII (TERM=linux) when SSH_CONNECTION is set", () => {
|
||||
const term = detectTerm({ TERM: "xterm-256color", SSH_CONNECTION: "1.2.3.4 5678 5.6.7.8 22" });
|
||||
expect(term).toBe("linux");
|
||||
});
|
||||
|
||||
it("should force ASCII (TERM=linux) when SSH_CLIENT is set", () => {
|
||||
const term = detectTerm({ TERM: "xterm-256color", SSH_CLIENT: "1.2.3.4 5678 22" });
|
||||
expect(term).toBe("linux");
|
||||
});
|
||||
|
||||
it("should force ASCII (TERM=linux) when SSH_TTY is set", () => {
|
||||
const term = detectTerm({ TERM: "xterm-256color", SSH_TTY: "/dev/pts/0" });
|
||||
expect(term).toBe("linux");
|
||||
});
|
||||
|
||||
it("should keep Unicode when TERM is a modern terminal", () => {
|
||||
const term = detectTerm({ TERM: "xterm-256color" });
|
||||
expect(term).toBe("xterm-256color");
|
||||
});
|
||||
|
||||
it("should keep Unicode when SPAWN_UNICODE=1, even with SSH", () => {
|
||||
const term = detectTerm({
|
||||
TERM: "xterm-256color",
|
||||
SSH_CONNECTION: "1.2.3.4 5678 5.6.7.8 22",
|
||||
SPAWN_UNICODE: "1",
|
||||
});
|
||||
expect(term).toBe("xterm-256color");
|
||||
});
|
||||
|
||||
it("should force ASCII when SPAWN_NO_UNICODE=1, even with modern terminal", () => {
|
||||
const term = detectTerm({ TERM: "xterm-256color", SPAWN_NO_UNICODE: "1" });
|
||||
expect(term).toBe("linux");
|
||||
});
|
||||
|
||||
it("should force ASCII when SPAWN_ASCII=1", () => {
|
||||
const term = detectTerm({ TERM: "xterm-256color", SPAWN_ASCII: "1" });
|
||||
expect(term).toBe("linux");
|
||||
});
|
||||
|
||||
it("should prioritize SPAWN_UNICODE=1 over SPAWN_ASCII=1", () => {
|
||||
// SPAWN_UNICODE is checked first in the code
|
||||
const term = detectTerm({
|
||||
TERM: "xterm-256color",
|
||||
SPAWN_UNICODE: "1",
|
||||
SPAWN_ASCII: "1",
|
||||
});
|
||||
expect(term).toBe("xterm-256color");
|
||||
});
|
||||
|
||||
it("should prioritize SPAWN_UNICODE=1 over dumb TERM", () => {
|
||||
const term = detectTerm({ TERM: "dumb", SPAWN_UNICODE: "1" });
|
||||
expect(term).toBe("dumb");
|
||||
});
|
||||
});
|
||||
|
||||
describe("LANG environment variable", () => {
|
||||
it("should not modify LANG when Unicode is enabled", () => {
|
||||
const script = `
|
||||
import "./src/unicode-detect.ts";
|
||||
console.log(process.env.LANG ?? "undefined");
|
||||
`;
|
||||
const result = execSync(`bun -e '${script}'`, {
|
||||
cwd: CLI_DIR,
|
||||
env: { TERM: "xterm-256color", PATH: process.env.PATH, HOME: process.env.HOME },
|
||||
encoding: "utf-8",
|
||||
timeout: 5000,
|
||||
});
|
||||
expect(result.trim()).toBe("undefined");
|
||||
});
|
||||
|
||||
it("should preserve existing LANG with UTF-8", () => {
|
||||
const script = `
|
||||
import "./src/unicode-detect.ts";
|
||||
console.log(process.env.LANG);
|
||||
`;
|
||||
const result = execSync(`bun -e '${script}'`, {
|
||||
cwd: CLI_DIR,
|
||||
env: { TERM: "xterm-256color", LANG: "fr_FR.UTF-8", PATH: process.env.PATH, HOME: process.env.HOME },
|
||||
encoding: "utf-8",
|
||||
timeout: 5000,
|
||||
});
|
||||
expect(result.trim()).toBe("fr_FR.UTF-8");
|
||||
});
|
||||
|
||||
it("should preserve LANG without UTF-8 when Unicode is enabled", () => {
|
||||
const script = `
|
||||
import "./src/unicode-detect.ts";
|
||||
console.log(process.env.LANG);
|
||||
`;
|
||||
const result = execSync(`bun -e '${script}'`, {
|
||||
cwd: CLI_DIR,
|
||||
env: { TERM: "xterm-256color", LANG: "C", PATH: process.env.PATH, HOME: process.env.HOME },
|
||||
encoding: "utf-8",
|
||||
timeout: 5000,
|
||||
});
|
||||
expect(result.trim()).toBe("C");
|
||||
});
|
||||
});
|
||||
|
||||
describe("debug output", () => {
|
||||
it("should print debug info to stderr when SPAWN_DEBUG=1", () => {
|
||||
const script = `
|
||||
import "./src/unicode-detect.ts";
|
||||
`;
|
||||
// Debug output goes to console.error (stderr), so redirect stderr to stdout
|
||||
const result = execSync(`bun -e '${script}' 2>&1`, {
|
||||
cwd: CLI_DIR,
|
||||
env: {
|
||||
TERM: "xterm-256color",
|
||||
SPAWN_DEBUG: "1",
|
||||
PATH: process.env.PATH,
|
||||
HOME: process.env.HOME,
|
||||
},
|
||||
encoding: "utf-8",
|
||||
timeout: 5000,
|
||||
});
|
||||
expect(result).toContain("[unicode-detect]");
|
||||
expect(result).toContain("TERM:");
|
||||
expect(result).toContain("Force ASCII:");
|
||||
});
|
||||
|
||||
it("should not print debug info without SPAWN_DEBUG", () => {
|
||||
const script = `
|
||||
import "./src/unicode-detect.ts";
|
||||
console.log("done");
|
||||
`;
|
||||
// Capture both stdout and stderr
|
||||
const result = execSync(`bun -e '${script}' 2>&1`, {
|
||||
cwd: CLI_DIR,
|
||||
env: { TERM: "xterm-256color", PATH: process.env.PATH, HOME: process.env.HOME },
|
||||
encoding: "utf-8",
|
||||
timeout: 5000,
|
||||
});
|
||||
expect(result).not.toContain("[unicode-detect]");
|
||||
expect(result.trim()).toBe("done");
|
||||
});
|
||||
});
|
||||
});
|
||||
3852
shared/common.sh
3852
shared/common.sh
File diff suppressed because it is too large
Load diff
|
|
@ -11,23 +11,13 @@
|
|||
# curl -fsSL https://raw.githubusercontent.com/OpenRouterTeam/spawn/main/shared/github-auth.sh | bash
|
||||
|
||||
# ============================================================
|
||||
# Source shared/common.sh for logging (local-or-remote fallback)
|
||||
# Logging helpers
|
||||
# ============================================================
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" 2>/dev/null && pwd)"
|
||||
if [[ -n "${SCRIPT_DIR:-}" && -f "${SCRIPT_DIR}/common.sh" ]]; then
|
||||
source "${SCRIPT_DIR}/common.sh"
|
||||
else
|
||||
eval "$(curl -fsSL https://raw.githubusercontent.com/OpenRouterTeam/spawn/main/shared/common.sh)"
|
||||
fi
|
||||
|
||||
# Fallback log functions if common.sh failed to load
|
||||
if ! type log_info &>/dev/null 2>&1; then
|
||||
log_info() { printf '[github-auth] %s\n' "$*" >&2; }
|
||||
log_step() { printf '[github-auth] %s\n' "$*" >&2; }
|
||||
log_warn() { printf '[github-auth] WARNING: %s\n' "$*" >&2; }
|
||||
log_error() { printf '[github-auth] ERROR: %s\n' "$*" >&2; }
|
||||
fi
|
||||
log_info() { printf '[github-auth] %s\n' "$*" >&2; }
|
||||
log_step() { printf '[github-auth] %s\n' "$*" >&2; }
|
||||
log_warn() { printf '[github-auth] WARNING: %s\n' "$*" >&2; }
|
||||
log_error() { printf '[github-auth] ERROR: %s\n' "$*" >&2; }
|
||||
|
||||
# ============================================================
|
||||
# ensure_gh_cli — Install gh CLI if not already present
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ process.stdout.write(d[process.env._VAR] || d.api_key || d.token || '');
|
|||
# - _ . / @ (standard API key chars)
|
||||
# : + = (base64 segments, URL-style formats)
|
||||
# space (Fly.io "FlyV1 <macaroon>" prefixed tokens)
|
||||
# Must match shared/common.sh _load_token_from_config regex
|
||||
# Must match CLI's loadTokenFromConfig regex in cli/src/digitalocean/digitalocean.ts
|
||||
if [[ ! "${val}" =~ ^[a-zA-Z0-9._/@:+=\ -]+$ ]]; then
|
||||
log "SECURITY: Invalid characters in config value for ${var_name}"
|
||||
return 1
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue