mirror of
https://github.com/OpenRouterTeam/spawn.git
synced 2026-05-05 23:50:48 +00:00
* feat: testing * feat: auto-fix dead apis * fix: mock works * feat: new fixtures * fix: more clouds tested * fix: dry run fix * fix: civo valid size * fix: civo result wait * feat: fixtures * feat: per cloud agent
1476 lines
48 KiB
Bash
1476 lines
48 KiB
Bash
#!/bin/bash
|
|
# Record real API responses from cloud providers as test fixtures
|
|
#
|
|
# Hits safe GET-only endpoints using each cloud's existing API wrapper,
|
|
# validates the response, and saves it as pretty-printed JSON.
|
|
#
|
|
# Usage:
|
|
# bash test/record.sh hetzner # Record one cloud
|
|
# bash test/record.sh hetzner vultr # Record multiple
|
|
# bash test/record.sh all # All clouds with available credentials
|
|
# bash test/record.sh --list # Show recordable clouds + credential status
|
|
|
|
set -eo pipefail
|
|
|
|
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
|
|
FIXTURES_DIR="${REPO_ROOT}/test/fixtures"
|
|
|
|
# Colors
|
|
RED='\033[0;31m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[1;33m'
|
|
CYAN='\033[0;36m'
|
|
NC='\033[0m'
|
|
|
|
# Counters
|
|
RECORDED=0
|
|
SKIPPED=0
|
|
ERRORS=0
|
|
|
|
# Whether to prompt for missing credentials (set by 'all' vs 'allsaved')
|
|
PROMPT_FOR_CREDS=true
|
|
|
|
# All clouds with REST APIs that we can record from
|
|
ALL_RECORDABLE_CLOUDS="hetzner digitalocean vultr linode civo upcloud binarylane ovh scaleway genesiscloud kamatera latitude hyperstack"
|
|
|
|
# --- Endpoint registry ---
|
|
# Format: "fixture_name:endpoint"
|
|
get_endpoints() {
|
|
local cloud="$1"
|
|
case "$cloud" in
|
|
hetzner)
|
|
printf '%s\n' \
|
|
"server_types:/server_types?per_page=50" \
|
|
"locations:/locations" \
|
|
"ssh_keys:/ssh_keys" \
|
|
"servers:/servers"
|
|
;;
|
|
digitalocean)
|
|
printf '%s\n' \
|
|
"account:/account" \
|
|
"ssh_keys:/account/keys" \
|
|
"droplets:/droplets" \
|
|
"sizes:/sizes" \
|
|
"regions:/regions"
|
|
;;
|
|
vultr)
|
|
printf '%s\n' \
|
|
"account:/account" \
|
|
"ssh_keys:/ssh-keys" \
|
|
"instances:/instances" \
|
|
"plans:/plans" \
|
|
"regions:/regions"
|
|
;;
|
|
linode)
|
|
printf '%s\n' \
|
|
"profile:/profile" \
|
|
"ssh_keys:/profile/sshkeys" \
|
|
"instances:/linode/instances" \
|
|
"types:/linode/types" \
|
|
"regions:/regions"
|
|
;;
|
|
lambda)
|
|
printf '%s\n' \
|
|
"instances:/instances" \
|
|
"ssh_keys:/ssh-keys" \
|
|
"instance_types:/instance-types"
|
|
;;
|
|
civo)
|
|
printf '%s\n' \
|
|
"regions:/regions" \
|
|
"instances:/instances" \
|
|
"ssh_keys:/sshkeys"
|
|
;;
|
|
upcloud)
|
|
printf '%s\n' \
|
|
"servers:/server" \
|
|
"server_sizes:/server_size"
|
|
;;
|
|
binarylane)
|
|
printf '%s\n' \
|
|
"sizes:/sizes" \
|
|
"regions:/regions" \
|
|
"servers:/servers"
|
|
;;
|
|
ovh)
|
|
printf '%s\n' \
|
|
"flavors:/cloud/project/${OVH_PROJECT_ID:-MISSING}/flavor" \
|
|
"images:/cloud/project/${OVH_PROJECT_ID:-MISSING}/image" \
|
|
"ssh_keys:/cloud/project/${OVH_PROJECT_ID:-MISSING}/sshkey"
|
|
;;
|
|
scaleway)
|
|
printf '%s\n' \
|
|
"servers:/servers" \
|
|
"images:/images?per_page=10"
|
|
;;
|
|
genesiscloud)
|
|
printf '%s\n' \
|
|
"ssh_keys:/ssh-keys" \
|
|
"instances:/instances"
|
|
;;
|
|
kamatera)
|
|
printf '%s\n' \
|
|
"server_options:/service/server"
|
|
;;
|
|
latitude)
|
|
printf '%s\n' \
|
|
"ssh_keys:/ssh_keys" \
|
|
"plans:/plans" \
|
|
"regions:/regions"
|
|
;;
|
|
hyperstack)
|
|
printf '%s\n' \
|
|
"flavors:/core/flavors" \
|
|
"ssh_keys:/core/keypairs"
|
|
;;
|
|
esac
|
|
}
|
|
|
|
# --- Auth env var check ---
|
|
get_auth_env_var() {
|
|
local cloud="$1"
|
|
case "$cloud" in
|
|
hetzner) printf "HCLOUD_TOKEN" ;;
|
|
digitalocean) printf "DO_API_TOKEN" ;;
|
|
vultr) printf "VULTR_API_KEY" ;;
|
|
linode) printf "LINODE_API_TOKEN" ;;
|
|
lambda) printf "LAMBDA_API_KEY" ;;
|
|
civo) printf "CIVO_API_TOKEN" ;;
|
|
upcloud) printf "UPCLOUD_USERNAME" ;;
|
|
binarylane) printf "BINARYLANE_API_TOKEN" ;;
|
|
ovh) printf "OVH_APPLICATION_KEY" ;;
|
|
scaleway) printf "SCW_SECRET_KEY" ;;
|
|
genesiscloud) printf "GENESIS_API_KEY" ;;
|
|
kamatera) printf "KAMATERA_API_CLIENT_ID" ;;
|
|
latitude) printf "LATITUDE_API_KEY" ;;
|
|
hyperstack) printf "HYPERSTACK_API_KEY" ;;
|
|
esac
|
|
}
|
|
|
|
# Try loading token from ~/.config/spawn/{cloud}.json (same config the agent scripts use)
|
|
try_load_config() {
|
|
local cloud="$1"
|
|
local env_var
|
|
env_var=$(get_auth_env_var "$cloud")
|
|
|
|
# Already set via env var — nothing to do
|
|
eval "local current_val=\"\${${env_var}:-}\""
|
|
if [[ -n "$current_val" ]]; then
|
|
return 0
|
|
fi
|
|
|
|
# Map cloud name to config file
|
|
local config_file="$HOME/.config/spawn/${cloud}.json"
|
|
|
|
# OVH uses separate config with multiple fields
|
|
if [[ "$cloud" == "ovh" ]]; then
|
|
if [[ -f "$config_file" ]]; then
|
|
eval "$(python3 -c "
|
|
import json, sys
|
|
try:
|
|
d = json.load(open(sys.argv[1]))
|
|
for k, e in [('application_key','OVH_APPLICATION_KEY'), ('application_secret','OVH_APPLICATION_SECRET'),
|
|
('consumer_key','OVH_CONSUMER_KEY'), ('project_id','OVH_PROJECT_ID')]:
|
|
v = d.get(k, '')
|
|
if v: print(f'export {e}=\"{v}\"')
|
|
except: pass
|
|
" "$config_file" 2>/dev/null)" || true
|
|
fi
|
|
return 0
|
|
fi
|
|
|
|
# Standard single-token config
|
|
if [[ -f "$config_file" ]]; then
|
|
local token
|
|
token=$(python3 -c "import json,sys; d=json.load(open(sys.argv[1])); print(d.get('api_key','') or d.get('token',''))" "$config_file" 2>/dev/null) || true
|
|
if [[ -n "${token:-}" ]]; then
|
|
export "${env_var}=${token}"
|
|
fi
|
|
fi
|
|
}
|
|
|
|
has_credentials() {
|
|
local cloud="$1"
|
|
|
|
# Try loading from config file first
|
|
try_load_config "$cloud"
|
|
|
|
case "$cloud" in
|
|
upcloud)
|
|
[[ -n "${UPCLOUD_USERNAME:-}" ]] && [[ -n "${UPCLOUD_PASSWORD:-}" ]]
|
|
;;
|
|
ovh)
|
|
[[ -n "${OVH_APPLICATION_KEY:-}" ]] && [[ -n "${OVH_APPLICATION_SECRET:-}" ]] && \
|
|
[[ -n "${OVH_CONSUMER_KEY:-}" ]] && [[ -n "${OVH_PROJECT_ID:-}" ]]
|
|
;;
|
|
kamatera)
|
|
[[ -n "${KAMATERA_API_CLIENT_ID:-}" ]] && [[ -n "${KAMATERA_API_SECRET:-}" ]]
|
|
;;
|
|
*)
|
|
local env_var
|
|
env_var=$(get_auth_env_var "$cloud")
|
|
eval "[[ -n \"\${${env_var}:-}\" ]]"
|
|
;;
|
|
esac
|
|
}
|
|
|
|
# Save credentials to ~/.config/spawn/{cloud}.json for future use
|
|
save_config() {
|
|
local cloud="$1"
|
|
local config_dir="$HOME/.config/spawn"
|
|
local config_file="${config_dir}/${cloud}.json"
|
|
mkdir -p "$config_dir"
|
|
|
|
case "$cloud" in
|
|
ovh)
|
|
python3 -c "
|
|
import json
|
|
d = {'application_key': '${OVH_APPLICATION_KEY:-}', 'application_secret': '${OVH_APPLICATION_SECRET:-}',
|
|
'consumer_key': '${OVH_CONSUMER_KEY:-}', 'project_id': '${OVH_PROJECT_ID:-}'}
|
|
print(json.dumps(d, indent=2))
|
|
" > "$config_file"
|
|
;;
|
|
upcloud)
|
|
python3 -c "
|
|
import json
|
|
print(json.dumps({'username': '${UPCLOUD_USERNAME:-}', 'password': '${UPCLOUD_PASSWORD:-}'}, indent=2))
|
|
" > "$config_file"
|
|
;;
|
|
kamatera)
|
|
python3 -c "
|
|
import json
|
|
print(json.dumps({'client_id': '${KAMATERA_API_CLIENT_ID:-}', 'secret': '${KAMATERA_API_SECRET:-}'}, indent=2))
|
|
" > "$config_file"
|
|
;;
|
|
*)
|
|
local env_var
|
|
env_var=$(get_auth_env_var "$cloud")
|
|
eval "local val=\"\${${env_var}:-}\""
|
|
python3 -c "import json; print(json.dumps({'api_key': '${val}'}, indent=2))" > "$config_file"
|
|
;;
|
|
esac
|
|
printf '%b\n' " ${GREEN}saved${NC} → ${config_file}"
|
|
}
|
|
|
|
# Prompt user for missing credentials, export them, and save to config
|
|
prompt_credentials() {
|
|
local cloud="$1"
|
|
local vars_needed=""
|
|
local val=""
|
|
|
|
case "$cloud" in
|
|
ovh)
|
|
vars_needed="OVH_APPLICATION_KEY OVH_APPLICATION_SECRET OVH_CONSUMER_KEY OVH_PROJECT_ID"
|
|
;;
|
|
upcloud)
|
|
vars_needed="UPCLOUD_USERNAME UPCLOUD_PASSWORD"
|
|
;;
|
|
kamatera)
|
|
vars_needed="KAMATERA_API_CLIENT_ID KAMATERA_API_SECRET"
|
|
;;
|
|
*)
|
|
vars_needed=$(get_auth_env_var "$cloud")
|
|
;;
|
|
esac
|
|
|
|
for var_name in $vars_needed; do
|
|
eval "local current=\"\${${var_name}:-}\""
|
|
if [[ -n "$current" ]]; then
|
|
continue
|
|
fi
|
|
printf " Enter %s (press Enter to skip %s): " "$var_name" "$cloud" >&2
|
|
read -r val
|
|
if [[ -z "$val" ]]; then
|
|
return 1
|
|
fi
|
|
export "${var_name}=${val}"
|
|
done
|
|
|
|
# Save so they don't have to enter again
|
|
save_config "$cloud"
|
|
return 0
|
|
}
|
|
|
|
# --- API call dispatcher ---
|
|
# Each cloud sources its lib and calls its wrapper function
|
|
call_api() {
|
|
local cloud="$1"
|
|
local endpoint="$2"
|
|
case "$cloud" in
|
|
hetzner) hetzner_api GET "$endpoint" ;;
|
|
digitalocean) do_api GET "$endpoint" ;;
|
|
vultr) vultr_api GET "$endpoint" ;;
|
|
linode) linode_api GET "$endpoint" ;;
|
|
lambda) lambda_api GET "$endpoint" "" ;;
|
|
civo) civo_api GET "$endpoint" ;;
|
|
upcloud) upcloud_api GET "$endpoint" ;;
|
|
binarylane) binarylane_api GET "$endpoint" ;;
|
|
ovh) ovh_api_call GET "$endpoint" ;;
|
|
scaleway) scaleway_instance_api GET "$endpoint" ;;
|
|
genesiscloud) genesis_api GET "$endpoint" ;;
|
|
kamatera) kamatera_api GET "$endpoint" ;;
|
|
latitude) latitude_api GET "$endpoint" ;;
|
|
hyperstack) hyperstack_api GET "$endpoint" ;;
|
|
esac
|
|
}
|
|
|
|
# --- Validation ---
|
|
is_valid_json() {
|
|
python3 -c "import json,sys; json.loads(sys.stdin.read())" 2>/dev/null
|
|
}
|
|
|
|
has_api_error() {
|
|
local cloud="$1"
|
|
local response="$2"
|
|
|
|
echo "$response" | python3 -c "
|
|
import json, sys
|
|
d = json.loads(sys.stdin.read())
|
|
cloud = '$cloud'
|
|
|
|
if cloud == 'hetzner':
|
|
err = d.get('error')
|
|
sys.exit(0 if err and isinstance(err, dict) else 1)
|
|
elif cloud == 'digitalocean':
|
|
sys.exit(0 if 'id' in d and isinstance(d.get('id'), str) and 'message' in d else 1)
|
|
elif cloud in ('vultr', 'genesiscloud', 'hyperstack'):
|
|
sys.exit(0 if 'error' in d and d['error'] else 1)
|
|
elif cloud == 'linode':
|
|
sys.exit(0 if 'errors' in d and d['errors'] else 1)
|
|
elif cloud in ('ovh', 'scaleway', 'binarylane'):
|
|
# These use 'message' for errors, but some success responses also have 'message'
|
|
sys.exit(0 if 'message' in d and len(d) <= 3 and not any(k in d for k in ('servers','images','ssh_keys','flavors','sizes','regions')) else 1)
|
|
elif cloud == 'civo':
|
|
sys.exit(0 if 'reason' in d and 'result' in d and d['result'] == 'failed' else 1)
|
|
elif cloud == 'lambda':
|
|
err = d.get('error')
|
|
sys.exit(0 if err and isinstance(err, dict) else 1)
|
|
elif cloud == 'kamatera':
|
|
sys.exit(0 if d.get('status') == 'error' else 1)
|
|
elif cloud == 'latitude':
|
|
sys.exit(0 if 'error' in d or ('errors' in d and d['errors']) else 1)
|
|
else:
|
|
sys.exit(1)
|
|
" 2>/dev/null
|
|
}
|
|
|
|
# --- Pretty print JSON ---
|
|
pretty_json() {
|
|
python3 -c "import json,sys; print(json.dumps(json.loads(sys.stdin.read()), indent=2, sort_keys=True))"
|
|
}
|
|
|
|
# --- Live create+delete cycle (captures real POST/DELETE responses) ---
|
|
# Creates a server with a timestamped name, records the response, then deletes it.
|
|
# These functions access cloud_recorded, cloud_errors, metadata_entries from the
|
|
# calling scope (record_cloud) via bash dynamic scoping — no namerefs needed.
|
|
_record_live_cycle() {
|
|
local cloud="$1"
|
|
local fixture_dir="$2"
|
|
|
|
case "$cloud" in
|
|
hetzner|digitalocean|vultr|linode|lambda|civo|upcloud|binarylane|scaleway|genesiscloud|latitude)
|
|
source "${REPO_ROOT}/${cloud}/lib/common.sh" 2>/dev/null || true
|
|
"_live_${cloud}" "$fixture_dir"
|
|
;;
|
|
*) return 0 ;; # No live cycle for this cloud yet
|
|
esac
|
|
}
|
|
|
|
# Save a live fixture and update the caller's counters/metadata
|
|
_save_live_fixture() {
|
|
local fixture_dir="$1"
|
|
local fixture_name="$2"
|
|
local endpoint="$3"
|
|
local response="$4"
|
|
|
|
if [[ -z "$response" ]]; then
|
|
printf '%b\n' " ${RED}fail${NC} ${fixture_name} — empty response"
|
|
cloud_errors=$((cloud_errors + 1))
|
|
return 1
|
|
fi
|
|
|
|
if ! echo "$response" | is_valid_json; then
|
|
printf '%b\n' " ${RED}fail${NC} ${fixture_name} — invalid JSON"
|
|
cloud_errors=$((cloud_errors + 1))
|
|
return 1
|
|
fi
|
|
|
|
# Reject obvious API error responses (small JSON with error-like keys, no data payload)
|
|
if echo "$response" | python3 -c "
|
|
import json, sys
|
|
d = json.loads(sys.stdin.read())
|
|
error_keys = {'error', 'code', 'reason', 'message', 'errors'}
|
|
data_keys = {'server', 'droplet', 'instance', 'data', 'action', 'task'}
|
|
if (error_keys & set(d.keys())) and not (data_keys & set(d.keys())) and len(d) <= 4:
|
|
sys.exit(0)
|
|
sys.exit(1)
|
|
" 2>/dev/null; then
|
|
local err_msg
|
|
err_msg=$(echo "$response" | python3 -c "
|
|
import json, sys
|
|
d = json.loads(sys.stdin.read())
|
|
print(d.get('reason', d.get('message', d.get('error', {}).get('message', str(d)[:100]))) if isinstance(d.get('error'), dict) else d.get('reason', d.get('message', str(d)[:100])))
|
|
" 2>/dev/null || echo "unknown error")
|
|
printf '%b\n' " ${RED}fail${NC} ${fixture_name} — API error: ${err_msg}"
|
|
cloud_errors=$((cloud_errors + 1))
|
|
return 1
|
|
fi
|
|
|
|
echo "$response" | pretty_json > "${fixture_dir}/${fixture_name}.json"
|
|
printf '%b\n' " ${GREEN} ok${NC} ${fixture_name} (live)"
|
|
|
|
local ts
|
|
ts=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
|
metadata_entries="${metadata_entries} \"${fixture_name}\": {\"endpoint\": \"${endpoint}\", \"type\": \"live\", \"recorded_at\": \"${ts}\"},
|
|
"
|
|
cloud_recorded=$((cloud_recorded + 1))
|
|
return 0
|
|
}
|
|
|
|
_live_hetzner() {
|
|
local fixture_dir="$1"
|
|
local server_name="spawn-record-$(date +%s)"
|
|
local server_type="cx23"
|
|
local location="nbg1"
|
|
local image="ubuntu-24.04"
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Creating test server '${server_name}' (${server_type}, ${location})..."
|
|
|
|
# Get SSH key IDs for the create request
|
|
local ssh_keys_response
|
|
ssh_keys_response=$(hetzner_api GET "/ssh_keys")
|
|
local ssh_key_ids
|
|
ssh_key_ids=$(echo "$ssh_keys_response" | python3 -c "
|
|
import json, sys
|
|
d = json.loads(sys.stdin.read())
|
|
ids = [k['id'] for k in d.get('ssh_keys', [])]
|
|
print(json.dumps(ids))
|
|
" 2>/dev/null) || ssh_key_ids="[]"
|
|
|
|
# Create server (minimal — no cloud-init userdata to speed up)
|
|
local body
|
|
body=$(python3 -c "
|
|
import json
|
|
body = {
|
|
'name': '${server_name}',
|
|
'server_type': '${server_type}',
|
|
'location': '${location}',
|
|
'image': '${image}',
|
|
'ssh_keys': ${ssh_key_ids},
|
|
'start_after_create': True
|
|
}
|
|
print(json.dumps(body))
|
|
")
|
|
|
|
local create_response
|
|
create_response=$(hetzner_api POST "/servers" "$body")
|
|
|
|
# Save create response
|
|
_save_live_fixture "$fixture_dir" "create_server" "POST /servers" "$create_response" || {
|
|
printf '%b\n' " ${RED}fail${NC} Could not create server — skipping delete fixture"
|
|
return 0
|
|
}
|
|
|
|
# Extract server ID for deletion
|
|
local server_id
|
|
server_id=$(echo "$create_response" | python3 -c "import json,sys; print(json.loads(sys.stdin.read())['server']['id'])" 2>/dev/null) || true
|
|
|
|
if [[ -z "${server_id:-}" ]]; then
|
|
printf '%b\n' " ${RED}fail${NC} Could not extract server ID from create response"
|
|
cloud_errors=$((cloud_errors + 1))
|
|
return 0
|
|
fi
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Server created (ID: ${server_id}). Deleting..."
|
|
|
|
# Brief pause to let the server register
|
|
sleep 2
|
|
|
|
# Delete server
|
|
local delete_response
|
|
delete_response=$(hetzner_api DELETE "/servers/${server_id}")
|
|
|
|
_save_live_fixture "$fixture_dir" "delete_server" "DELETE /servers/{id}" "$delete_response"
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Server ${server_id} deleted"
|
|
}
|
|
|
|
_live_digitalocean() {
|
|
local fixture_dir="$1"
|
|
local server_name="spawn-record-$(date +%s)"
|
|
local size="s-1vcpu-512mb-10gb"
|
|
local region="nyc3"
|
|
local image="ubuntu-24-04-x64"
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Creating test droplet '${server_name}' (${size}, ${region})..."
|
|
|
|
local ssh_keys_response
|
|
ssh_keys_response=$(do_api GET "/account/keys")
|
|
local ssh_key_ids
|
|
ssh_key_ids=$(echo "$ssh_keys_response" | python3 -c "
|
|
import json, sys
|
|
d = json.loads(sys.stdin.read())
|
|
ids = [k['id'] for k in d.get('ssh_keys', [])]
|
|
print(json.dumps(ids))
|
|
" 2>/dev/null) || ssh_key_ids="[]"
|
|
|
|
local body
|
|
body=$(python3 -c "
|
|
import json
|
|
body = {
|
|
'name': '${server_name}',
|
|
'region': '${region}',
|
|
'size': '${size}',
|
|
'image': '${image}',
|
|
'ssh_keys': ${ssh_key_ids}
|
|
}
|
|
print(json.dumps(body))
|
|
")
|
|
|
|
local create_response
|
|
create_response=$(do_api POST "/droplets" "$body")
|
|
|
|
_save_live_fixture "$fixture_dir" "create_server" "POST /droplets" "$create_response" || {
|
|
printf '%b\n' " ${RED}fail${NC} Could not create droplet — skipping delete fixture"
|
|
return 0
|
|
}
|
|
|
|
local server_id
|
|
server_id=$(echo "$create_response" | python3 -c "import json,sys; print(json.loads(sys.stdin.read())['droplet']['id'])" 2>/dev/null) || true
|
|
|
|
if [[ -z "${server_id:-}" ]]; then
|
|
printf '%b\n' " ${RED}fail${NC} Could not extract droplet ID from create response"
|
|
cloud_errors=$((cloud_errors + 1))
|
|
return 0
|
|
fi
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Droplet created (ID: ${server_id}). Deleting..."
|
|
sleep 3
|
|
|
|
local delete_response
|
|
delete_response=$(do_api DELETE "/droplets/${server_id}") || true
|
|
# DO returns 204 No Content — normalize to empty JSON
|
|
if [[ -z "$delete_response" ]]; then
|
|
delete_response='{}'
|
|
fi
|
|
_save_live_fixture "$fixture_dir" "delete_server" "DELETE /droplets/{id}" "$delete_response"
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Droplet ${server_id} deleted"
|
|
}
|
|
|
|
_live_vultr() {
|
|
local fixture_dir="$1"
|
|
local server_name="spawn-record-$(date +%s)"
|
|
local plan="vc2-1c-1gb"
|
|
local region="ewr"
|
|
local os_id=2284 # Ubuntu 24.04 LTS x64
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Creating test instance '${server_name}' (${plan}, ${region})..."
|
|
|
|
local ssh_keys_response
|
|
ssh_keys_response=$(vultr_api GET "/ssh-keys")
|
|
local ssh_key_ids
|
|
ssh_key_ids=$(echo "$ssh_keys_response" | python3 -c "
|
|
import json, sys
|
|
d = json.loads(sys.stdin.read())
|
|
ids = [k['id'] for k in d.get('ssh_keys', [])]
|
|
print(json.dumps(ids))
|
|
" 2>/dev/null) || ssh_key_ids="[]"
|
|
|
|
local body
|
|
body=$(python3 -c "
|
|
import json
|
|
body = {
|
|
'region': '${region}',
|
|
'plan': '${plan}',
|
|
'os_id': ${os_id},
|
|
'label': '${server_name}',
|
|
'sshkey_id': ${ssh_key_ids}
|
|
}
|
|
print(json.dumps(body))
|
|
")
|
|
|
|
local create_response
|
|
create_response=$(vultr_api POST "/instances" "$body")
|
|
|
|
_save_live_fixture "$fixture_dir" "create_server" "POST /instances" "$create_response" || {
|
|
printf '%b\n' " ${RED}fail${NC} Could not create Vultr instance — skipping delete fixture"
|
|
return 0
|
|
}
|
|
|
|
local server_id
|
|
server_id=$(echo "$create_response" | python3 -c "import json,sys; print(json.loads(sys.stdin.read())['instance']['id'])" 2>/dev/null) || true
|
|
|
|
if [[ -z "${server_id:-}" ]]; then
|
|
printf '%b\n' " ${RED}fail${NC} Could not extract Vultr instance ID from create response"
|
|
cloud_errors=$((cloud_errors + 1))
|
|
return 0
|
|
fi
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Instance created (ID: ${server_id}). Waiting for it to become active before deleting..."
|
|
|
|
# Vultr requires the instance to be fully active before it can be deleted
|
|
local attempt=0
|
|
local max_attempts=12
|
|
local delete_response=""
|
|
while [[ "$attempt" -lt "$max_attempts" ]]; do
|
|
sleep 10
|
|
attempt=$((attempt + 1))
|
|
delete_response=$(vultr_api DELETE "/instances/${server_id}" 2>/dev/null) || true
|
|
if [[ -z "$delete_response" ]]; then
|
|
delete_response='{}'
|
|
fi
|
|
# Vultr returns {"error":"..."} on failure, empty/{} on success
|
|
if echo "$delete_response" | python3 -c "
|
|
import json, sys
|
|
d = json.loads(sys.stdin.read())
|
|
sys.exit(0 if not d or 'error' not in d else 1)
|
|
" 2>/dev/null; then
|
|
break
|
|
fi
|
|
printf '%b\n' " ${YELLOW}retry${NC} Delete attempt ${attempt}/${max_attempts} — instance not ready yet"
|
|
done
|
|
|
|
_save_live_fixture "$fixture_dir" "delete_server" "DELETE /instances/{id}" "$delete_response" || true
|
|
|
|
if [[ "$attempt" -ge "$max_attempts" ]]; then
|
|
printf '%b\n' " ${RED}WARNING: Could not delete Vultr instance ${server_id} — delete it manually!${NC}"
|
|
else
|
|
printf '%b\n' " ${CYAN}live${NC} Instance ${server_id} deleted"
|
|
fi
|
|
}
|
|
|
|
_live_linode() {
|
|
local fixture_dir="$1"
|
|
local server_name="spawn-record-$(date +%s)"
|
|
local type="g6-nanode-1"
|
|
local region="us-east"
|
|
local image="linode/ubuntu24.04"
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Creating test linode '${server_name}' (${type}, ${region})..."
|
|
|
|
local ssh_keys_response
|
|
ssh_keys_response=$(linode_api GET "/profile/sshkeys")
|
|
local ssh_keys
|
|
ssh_keys=$(echo "$ssh_keys_response" | python3 -c "
|
|
import json, sys
|
|
d = json.loads(sys.stdin.read())
|
|
keys = [k['ssh_key'] for k in d.get('data', [])]
|
|
print(json.dumps(keys))
|
|
" 2>/dev/null) || ssh_keys="[]"
|
|
|
|
local root_pass
|
|
root_pass="Sp4wn-$(openssl rand -hex 12)"
|
|
|
|
local body
|
|
body=$(python3 -c "
|
|
import json
|
|
body = {
|
|
'label': '${server_name}',
|
|
'region': '${region}',
|
|
'type': '${type}',
|
|
'image': '${image}',
|
|
'root_pass': '${root_pass}',
|
|
'authorized_keys': ${ssh_keys}
|
|
}
|
|
print(json.dumps(body))
|
|
")
|
|
|
|
local create_response
|
|
create_response=$(linode_api POST "/linode/instances" "$body")
|
|
|
|
_save_live_fixture "$fixture_dir" "create_server" "POST /linode/instances" "$create_response" || {
|
|
printf '%b\n' " ${RED}fail${NC} Could not create linode — skipping delete fixture"
|
|
return 0
|
|
}
|
|
|
|
local server_id
|
|
server_id=$(echo "$create_response" | python3 -c "import json,sys; print(json.loads(sys.stdin.read())['id'])" 2>/dev/null) || true
|
|
|
|
if [[ -z "${server_id:-}" ]]; then
|
|
printf '%b\n' " ${RED}fail${NC} Could not extract linode ID from create response"
|
|
cloud_errors=$((cloud_errors + 1))
|
|
return 0
|
|
fi
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Linode created (ID: ${server_id}). Deleting..."
|
|
sleep 3
|
|
|
|
local delete_response
|
|
delete_response=$(linode_api DELETE "/linode/instances/${server_id}") || true
|
|
if [[ -z "$delete_response" ]]; then
|
|
delete_response='{}'
|
|
fi
|
|
_save_live_fixture "$fixture_dir" "delete_server" "DELETE /linode/instances/{id}" "$delete_response"
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Linode ${server_id} deleted"
|
|
}
|
|
|
|
_live_lambda() {
|
|
local fixture_dir="$1"
|
|
local server_name="spawn-record-$(date +%s)"
|
|
local instance_type="gpu_1x_a10"
|
|
local region="us-east-1"
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Creating test Lambda instance '${server_name}' (${instance_type}, ${region})..."
|
|
|
|
local ssh_keys_response
|
|
ssh_keys_response=$(lambda_api GET "/ssh-keys")
|
|
local ssh_key_names
|
|
ssh_key_names=$(echo "$ssh_keys_response" | python3 -c "
|
|
import json, sys
|
|
data = json.loads(sys.stdin.read())
|
|
names = [k['name'] for k in data.get('data', [])]
|
|
print(json.dumps(names))
|
|
" 2>/dev/null) || ssh_key_names="[]"
|
|
|
|
local body
|
|
body=$(python3 -c "
|
|
import json
|
|
body = {
|
|
'name': '${server_name}',
|
|
'instance_type_name': '${instance_type}',
|
|
'region_name': '${region}',
|
|
'ssh_key_names': ${ssh_key_names}
|
|
}
|
|
print(json.dumps(body))
|
|
")
|
|
|
|
local create_response
|
|
create_response=$(lambda_api POST "/instance-operations/launch" "$body")
|
|
|
|
_save_live_fixture "$fixture_dir" "create_server" "POST /instance-operations/launch" "$create_response" || {
|
|
printf '%b\n' " ${YELLOW}warn${NC} Could not launch Lambda instance (GPU — may be out of capacity)"
|
|
return 0
|
|
}
|
|
|
|
local server_id
|
|
server_id=$(echo "$create_response" | python3 -c "import json,sys; print(json.loads(sys.stdin.read())['data']['instance_ids'][0])" 2>/dev/null) || true
|
|
|
|
if [[ -z "${server_id:-}" ]]; then
|
|
printf '%b\n' " ${YELLOW}warn${NC} Could not extract Lambda instance ID — may be out of GPU capacity"
|
|
return 0
|
|
fi
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Lambda instance launched (ID: ${server_id}). Terminating..."
|
|
sleep 3
|
|
|
|
local delete_response
|
|
delete_response=$(lambda_api POST "/instance-operations/terminate" "{\"instance_ids\":[\"${server_id}\"]}")
|
|
if [[ -z "$delete_response" ]]; then
|
|
delete_response='{"terminated_instances":["'"${server_id}"'"]}'
|
|
fi
|
|
_save_live_fixture "$fixture_dir" "delete_server" "POST /instance-operations/terminate" "$delete_response"
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Lambda instance ${server_id} terminated"
|
|
}
|
|
|
|
_live_civo() {
|
|
local fixture_dir="$1"
|
|
local server_name="spawn-record-$(date +%s)"
|
|
local size="g4s.xsmall"
|
|
local region="LON1"
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Creating test Civo instance '${server_name}' (${size}, ${region})..."
|
|
|
|
local network_id template_id
|
|
network_id=$(get_default_network_id "$region") || {
|
|
printf '%b\n' " ${RED}fail${NC} Could not find Civo network in ${region}"
|
|
cloud_errors=$((cloud_errors + 1))
|
|
return 0
|
|
}
|
|
template_id=$(get_ubuntu_template_id "$region") || {
|
|
printf '%b\n' " ${RED}fail${NC} Could not find Ubuntu template for Civo"
|
|
cloud_errors=$((cloud_errors + 1))
|
|
return 0
|
|
}
|
|
|
|
local ssh_keys_response
|
|
ssh_keys_response=$(civo_api GET "/sshkeys")
|
|
local ssh_key_id
|
|
ssh_key_id=$(echo "$ssh_keys_response" | python3 -c "
|
|
import json, sys
|
|
data = json.loads(sys.stdin.read())
|
|
items = data if isinstance(data, list) else data.get('items', [])
|
|
if items:
|
|
print(items[0]['id'])
|
|
else:
|
|
print('')
|
|
" 2>/dev/null) || ssh_key_id=""
|
|
|
|
local body
|
|
body=$(python3 -c "
|
|
import json
|
|
body = {
|
|
'hostname': '${server_name}',
|
|
'size': '${size}',
|
|
'region': '${region}',
|
|
'network_id': '${network_id}',
|
|
'template_id': '${template_id}',
|
|
'initial_user': 'root',
|
|
'public_ip': 'create'
|
|
}
|
|
ssh_key = '${ssh_key_id}'
|
|
if ssh_key:
|
|
body['ssh_key_id'] = ssh_key
|
|
print(json.dumps(body))
|
|
")
|
|
|
|
local create_response
|
|
create_response=$(civo_api POST "/instances" "$body")
|
|
|
|
_save_live_fixture "$fixture_dir" "create_server" "POST /instances" "$create_response" || {
|
|
printf '%b\n' " ${RED}fail${NC} Could not create Civo instance — skipping delete fixture"
|
|
return 0
|
|
}
|
|
|
|
local server_id
|
|
server_id=$(echo "$create_response" | python3 -c "import json,sys; print(json.loads(sys.stdin.read())['id'])" 2>/dev/null) || true
|
|
|
|
if [[ -z "${server_id:-}" ]]; then
|
|
printf '%b\n' " ${RED}fail${NC} Could not extract Civo instance ID from create response"
|
|
cloud_errors=$((cloud_errors + 1))
|
|
return 0
|
|
fi
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Civo instance created (ID: ${server_id}). Waiting for it to register before deleting..."
|
|
|
|
# Civo needs time to register the instance in its internal DB before deletion works
|
|
sleep 15
|
|
|
|
local attempt=0
|
|
local max_attempts=12
|
|
local delete_response=""
|
|
while [[ "$attempt" -lt "$max_attempts" ]]; do
|
|
attempt=$((attempt + 1))
|
|
delete_response=$(civo_api DELETE "/instances/${server_id}?region=${region}" 2>/dev/null) || true
|
|
if [[ -z "$delete_response" ]]; then
|
|
delete_response='{}'
|
|
fi
|
|
# Civo returns {"result":"failed","reason":"..."} or just {"reason":"..."} on error
|
|
# Success is empty {} or {"result":"success"}
|
|
if echo "$delete_response" | python3 -c "
|
|
import json, sys
|
|
d = json.loads(sys.stdin.read())
|
|
if not d:
|
|
sys.exit(0) # empty = success
|
|
if d.get('result') == 'failed' or 'reason' in d:
|
|
sys.exit(1) # error
|
|
sys.exit(0)
|
|
" 2>/dev/null; then
|
|
break
|
|
fi
|
|
printf '%b\n' " ${YELLOW}retry${NC} Delete attempt ${attempt}/${max_attempts} — instance not ready yet"
|
|
sleep 10
|
|
done
|
|
|
|
_save_live_fixture "$fixture_dir" "delete_server" "DELETE /instances/{id}" "$delete_response" || true
|
|
|
|
if [[ "$attempt" -ge "$max_attempts" ]]; then
|
|
printf '%b\n' " ${RED}WARNING: Could not delete Civo instance ${server_id} — delete it manually!${NC}"
|
|
else
|
|
printf '%b\n' " ${CYAN}live${NC} Civo instance ${server_id} deleted"
|
|
fi
|
|
}
|
|
|
|
_live_upcloud() {
|
|
local fixture_dir="$1"
|
|
local server_name="spawn-record-$(date +%s)"
|
|
local plan="1xCPU-1GB"
|
|
local zone="de-fra1"
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Creating test UpCloud server '${server_name}' (${plan}, ${zone})..."
|
|
|
|
local template_uuid
|
|
template_uuid=$(find_ubuntu_template) || {
|
|
printf '%b\n' " ${RED}fail${NC} Could not find Ubuntu template for UpCloud"
|
|
cloud_errors=$((cloud_errors + 1))
|
|
return 0
|
|
}
|
|
|
|
# Read SSH public key if available
|
|
local ssh_pub_key=""
|
|
local key_path="${HOME}/.ssh/id_ed25519.pub"
|
|
if [[ -f "${key_path}" ]]; then
|
|
ssh_pub_key=$(cat "${key_path}")
|
|
elif [[ -f "${HOME}/.ssh/id_rsa.pub" ]]; then
|
|
ssh_pub_key=$(cat "${HOME}/.ssh/id_rsa.pub")
|
|
fi
|
|
|
|
local body
|
|
if [[ -n "$ssh_pub_key" ]]; then
|
|
local json_ssh_key
|
|
json_ssh_key=$(json_escape "$ssh_pub_key")
|
|
body=$(_build_upcloud_server_body "$server_name" "$zone" "$plan" "$template_uuid" "$json_ssh_key")
|
|
else
|
|
body=$(python3 -c "
|
|
import json
|
|
body = {
|
|
'server': {
|
|
'zone': '${zone}',
|
|
'title': '${server_name}',
|
|
'hostname': '${server_name}',
|
|
'plan': '${plan}',
|
|
'storage_devices': {
|
|
'storage_device': [{
|
|
'action': 'clone',
|
|
'storage': '${template_uuid}',
|
|
'title': '${server_name}-os',
|
|
'size': 25,
|
|
'tier': 'maxiops'
|
|
}]
|
|
}
|
|
}
|
|
}
|
|
print(json.dumps(body))
|
|
")
|
|
fi
|
|
|
|
local create_response
|
|
create_response=$(upcloud_api POST "/server" "$body")
|
|
|
|
_save_live_fixture "$fixture_dir" "create_server" "POST /server" "$create_response" || {
|
|
printf '%b\n' " ${RED}fail${NC} Could not create UpCloud server — skipping delete fixture"
|
|
return 0
|
|
}
|
|
|
|
local server_uuid
|
|
server_uuid=$(echo "$create_response" | python3 -c "import json,sys; print(json.loads(sys.stdin.read())['server']['uuid'])" 2>/dev/null) || true
|
|
|
|
if [[ -z "${server_uuid:-}" ]]; then
|
|
printf '%b\n' " ${RED}fail${NC} Could not extract UpCloud server UUID from create response"
|
|
cloud_errors=$((cloud_errors + 1))
|
|
return 0
|
|
fi
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} UpCloud server created (UUID: ${server_uuid}). Stopping..."
|
|
|
|
# UpCloud requires stop before delete
|
|
upcloud_api POST "/server/$server_uuid/stop" '{"stop_server":{"stop_type":"soft","timeout":"60"}}' >/dev/null 2>&1 || true
|
|
|
|
# Wait for server to stop (up to 30s)
|
|
local attempt=1
|
|
while [[ "$attempt" -le 15 ]]; do
|
|
local status_response
|
|
status_response=$(upcloud_api GET "/server/$server_uuid" 2>/dev/null) || true
|
|
local state
|
|
state=$(echo "$status_response" | python3 -c "import json,sys; print(json.loads(sys.stdin.read())['server']['state'])" 2>/dev/null || echo "unknown")
|
|
if [[ "$state" == "stopped" ]]; then
|
|
break
|
|
fi
|
|
sleep 2
|
|
attempt=$((attempt + 1))
|
|
done
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} UpCloud server stopped. Deleting..."
|
|
|
|
local delete_response
|
|
delete_response=$(upcloud_api DELETE "/server/${server_uuid}?storages=1") || true
|
|
if [[ -z "$delete_response" ]]; then
|
|
delete_response='{}'
|
|
fi
|
|
_save_live_fixture "$fixture_dir" "delete_server" "DELETE /server/{uuid}" "$delete_response"
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} UpCloud server ${server_uuid} deleted"
|
|
}
|
|
|
|
_live_binarylane() {
|
|
local fixture_dir="$1"
|
|
local server_name="spawn-record-$(date +%s)"
|
|
local size="std-1vcpu"
|
|
local region="syd"
|
|
local image="ubuntu-24.04"
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Creating test BinaryLane server '${server_name}' (${size}, ${region})..."
|
|
|
|
local ssh_keys_response
|
|
ssh_keys_response=$(binarylane_api GET "/account/keys")
|
|
local ssh_key_ids
|
|
ssh_key_ids=$(echo "$ssh_keys_response" | python3 -c "
|
|
import json, sys
|
|
d = json.loads(sys.stdin.read())
|
|
ids = [k['id'] for k in d.get('ssh_keys', [])]
|
|
print(json.dumps(ids))
|
|
" 2>/dev/null) || ssh_key_ids="[]"
|
|
|
|
local body
|
|
body=$(python3 -c "
|
|
import json
|
|
body = {
|
|
'name': '${server_name}',
|
|
'region': '${region}',
|
|
'size': '${size}',
|
|
'image': '${image}',
|
|
'ssh_keys': ${ssh_key_ids}
|
|
}
|
|
print(json.dumps(body))
|
|
")
|
|
|
|
local create_response
|
|
create_response=$(binarylane_api POST "/servers" "$body")
|
|
|
|
_save_live_fixture "$fixture_dir" "create_server" "POST /servers" "$create_response" || {
|
|
printf '%b\n' " ${RED}fail${NC} Could not create BinaryLane server — skipping delete fixture"
|
|
return 0
|
|
}
|
|
|
|
local server_id
|
|
server_id=$(echo "$create_response" | python3 -c "import json,sys; print(json.loads(sys.stdin.read())['server']['id'])" 2>/dev/null) || true
|
|
|
|
if [[ -z "${server_id:-}" ]]; then
|
|
printf '%b\n' " ${RED}fail${NC} Could not extract BinaryLane server ID from create response"
|
|
cloud_errors=$((cloud_errors + 1))
|
|
return 0
|
|
fi
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} BinaryLane server created (ID: ${server_id}). Deleting..."
|
|
sleep 3
|
|
|
|
local delete_response
|
|
delete_response=$(binarylane_api DELETE "/servers/${server_id}") || true
|
|
if [[ -z "$delete_response" ]]; then
|
|
delete_response='{}'
|
|
fi
|
|
_save_live_fixture "$fixture_dir" "delete_server" "DELETE /servers/{id}" "$delete_response"
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} BinaryLane server ${server_id} deleted"
|
|
}
|
|
|
|
_live_scaleway() {
|
|
local fixture_dir="$1"
|
|
local server_name="spawn-record-$(date +%s)"
|
|
local commercial_type="DEV1-S"
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Creating test Scaleway instance '${server_name}' (${commercial_type})..."
|
|
|
|
local project_id
|
|
project_id=$(get_scaleway_project_id) || {
|
|
printf '%b\n' " ${RED}fail${NC} Could not get Scaleway project ID"
|
|
cloud_errors=$((cloud_errors + 1))
|
|
return 0
|
|
}
|
|
|
|
local image_id
|
|
image_id=$(get_ubuntu_image_id) || {
|
|
printf '%b\n' " ${RED}fail${NC} Could not find Ubuntu image for Scaleway"
|
|
cloud_errors=$((cloud_errors + 1))
|
|
return 0
|
|
}
|
|
|
|
local body
|
|
body=$(python3 -c "
|
|
import json
|
|
body = {
|
|
'name': '${server_name}',
|
|
'commercial_type': '${commercial_type}',
|
|
'image': '${image_id}',
|
|
'project': '${project_id}',
|
|
'dynamic_ip_required': True
|
|
}
|
|
print(json.dumps(body))
|
|
")
|
|
|
|
local create_response
|
|
create_response=$(scaleway_instance_api POST "/servers" "$body")
|
|
|
|
_save_live_fixture "$fixture_dir" "create_server" "POST /servers" "$create_response" || {
|
|
printf '%b\n' " ${RED}fail${NC} Could not create Scaleway instance — skipping delete fixture"
|
|
return 0
|
|
}
|
|
|
|
local server_id
|
|
server_id=$(echo "$create_response" | python3 -c "import json,sys; print(json.loads(sys.stdin.read())['server']['id'])" 2>/dev/null) || true
|
|
|
|
if [[ -z "${server_id:-}" ]]; then
|
|
printf '%b\n' " ${RED}fail${NC} Could not extract Scaleway server ID from create response"
|
|
cloud_errors=$((cloud_errors + 1))
|
|
return 0
|
|
fi
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Scaleway instance created (ID: ${server_id}). Terminating..."
|
|
sleep 2
|
|
|
|
# Scaleway: poweroff then terminate
|
|
scaleway_instance_api POST "/servers/$server_id/action" '{"action":"poweroff"}' >/dev/null 2>&1 || true
|
|
sleep 5
|
|
|
|
local delete_response
|
|
delete_response=$(scaleway_instance_api POST "/servers/$server_id/action" '{"action":"terminate"}') || true
|
|
if [[ -z "$delete_response" ]]; then
|
|
delete_response='{"task":{"status":"success"}}'
|
|
fi
|
|
_save_live_fixture "$fixture_dir" "delete_server" "POST /servers/{id}/action terminate" "$delete_response"
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Scaleway instance ${server_id} terminated"
|
|
}
|
|
|
|
_live_genesiscloud() {
|
|
local fixture_dir="$1"
|
|
local server_name="spawn-record-$(date +%s)"
|
|
local instance_type="vcpu-4_memory-12g_nvidia-rtx-3080-1"
|
|
local region="ARC-IS-HAF-1"
|
|
local image="Ubuntu 24.04"
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Creating test Genesis Cloud instance '${server_name}' (GPU — ${instance_type})..."
|
|
|
|
local ssh_keys_response
|
|
ssh_keys_response=$(genesis_api GET "/ssh-keys")
|
|
local ssh_key_ids
|
|
ssh_key_ids=$(echo "$ssh_keys_response" | python3 -c "
|
|
import json, sys
|
|
data = json.loads(sys.stdin.read())
|
|
ids = [k['id'] for k in data.get('ssh_keys', [])]
|
|
print(json.dumps(ids))
|
|
" 2>/dev/null) || ssh_key_ids="[]"
|
|
|
|
local body
|
|
body=$(python3 -c "
|
|
import json
|
|
body = {
|
|
'name': '${server_name}',
|
|
'type': '${instance_type}',
|
|
'image': '${image}',
|
|
'region': '${region}',
|
|
'ssh_key_ids': ${ssh_key_ids}
|
|
}
|
|
print(json.dumps(body))
|
|
")
|
|
|
|
local create_response
|
|
create_response=$(genesis_api POST "/instances" "$body")
|
|
|
|
_save_live_fixture "$fixture_dir" "create_server" "POST /instances" "$create_response" || {
|
|
printf '%b\n' " ${YELLOW}warn${NC} Could not create Genesis Cloud instance (GPU — may be out of capacity)"
|
|
return 0
|
|
}
|
|
|
|
local server_id
|
|
server_id=$(echo "$create_response" | python3 -c "import json,sys; print(json.loads(sys.stdin.read())['instance']['id'])" 2>/dev/null) || true
|
|
|
|
if [[ -z "${server_id:-}" ]]; then
|
|
printf '%b\n' " ${YELLOW}warn${NC} Could not extract Genesis Cloud instance ID — may be out of GPU capacity"
|
|
return 0
|
|
fi
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Genesis Cloud instance created (ID: ${server_id}). Deleting..."
|
|
sleep 3
|
|
|
|
local delete_response
|
|
delete_response=$(genesis_api DELETE "/instances/${server_id}") || true
|
|
if [[ -z "$delete_response" ]]; then
|
|
delete_response='{}'
|
|
fi
|
|
_save_live_fixture "$fixture_dir" "delete_server" "DELETE /instances/{id}" "$delete_response"
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Genesis Cloud instance ${server_id} deleted"
|
|
}
|
|
|
|
_live_latitude() {
|
|
local fixture_dir="$1"
|
|
local hostname="spawn-record-$(date +%s)"
|
|
local plan="vm.tiny"
|
|
local site="DAL2"
|
|
local os="ubuntu_24_04_x64_lts"
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Creating test Latitude server '${hostname}' (${plan}, ${site})..."
|
|
|
|
local project_id
|
|
project_id=$(get_latitude_project_id) || {
|
|
printf '%b\n' " ${RED}fail${NC} Could not get Latitude project ID"
|
|
cloud_errors=$((cloud_errors + 1))
|
|
return 0
|
|
}
|
|
|
|
local ssh_keys_response
|
|
ssh_keys_response=$(latitude_api GET "/ssh_keys")
|
|
local ssh_key_ids
|
|
ssh_key_ids=$(echo "$ssh_keys_response" | python3 -c "
|
|
import json, sys
|
|
data = json.loads(sys.stdin.read())
|
|
ids = [str(k['data']['id']) if 'data' in k else str(k['id']) for k in data.get('data', [])]
|
|
print(json.dumps(ids))
|
|
" 2>/dev/null) || ssh_key_ids="[]"
|
|
|
|
local body
|
|
body=$(python3 -c "
|
|
import json
|
|
body = {
|
|
'data': {
|
|
'type': 'servers',
|
|
'attributes': {
|
|
'hostname': '${hostname}',
|
|
'plan': '${plan}',
|
|
'site': '${site}',
|
|
'operating_system': '${os}',
|
|
'project': '${project_id}',
|
|
'ssh_keys': ${ssh_key_ids}
|
|
}
|
|
}
|
|
}
|
|
print(json.dumps(body))
|
|
")
|
|
|
|
local create_response
|
|
create_response=$(latitude_api POST "/servers" "$body")
|
|
|
|
_save_live_fixture "$fixture_dir" "create_server" "POST /servers" "$create_response" || {
|
|
printf '%b\n' " ${YELLOW}warn${NC} Could not create Latitude server (bare metal — may be unavailable)"
|
|
return 0
|
|
}
|
|
|
|
local server_id
|
|
server_id=$(echo "$create_response" | python3 -c "import json,sys; print(json.loads(sys.stdin.read())['data']['id'])" 2>/dev/null) || true
|
|
|
|
if [[ -z "${server_id:-}" ]]; then
|
|
printf '%b\n' " ${YELLOW}warn${NC} Could not extract Latitude server ID"
|
|
return 0
|
|
fi
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Latitude server created (ID: ${server_id}). Deleting..."
|
|
sleep 3
|
|
|
|
local delete_response
|
|
delete_response=$(latitude_api DELETE "/servers/${server_id}") || true
|
|
if [[ -z "$delete_response" ]]; then
|
|
delete_response='{}'
|
|
fi
|
|
_save_live_fixture "$fixture_dir" "delete_server" "DELETE /servers/{id}" "$delete_response"
|
|
|
|
printf '%b\n' " ${CYAN}live${NC} Latitude server ${server_id} deleted"
|
|
}
|
|
|
|
# --- Record one cloud ---
|
|
record_cloud() {
|
|
local cloud="$1"
|
|
|
|
if ! has_credentials "$cloud"; then
|
|
local env_var
|
|
env_var=$(get_auth_env_var "$cloud")
|
|
if [[ "$PROMPT_FOR_CREDS" == "true" ]]; then
|
|
printf '%b\n' "${CYAN}━━━ ${cloud} ━━━${NC}"
|
|
printf '%b\n' " ${YELLOW}missing${NC} ${env_var}"
|
|
if ! prompt_credentials "$cloud"; then
|
|
printf '%b\n' " ${YELLOW}skip${NC} ${cloud}"
|
|
SKIPPED=$((SKIPPED + 1))
|
|
return 0
|
|
fi
|
|
else
|
|
printf '%b\n' " ${YELLOW}skip${NC} ${cloud} — ${env_var} not set"
|
|
SKIPPED=$((SKIPPED + 1))
|
|
return 0
|
|
fi
|
|
fi
|
|
|
|
printf '%b\n' "${CYAN}━━━ Recording ${cloud} ━━━${NC}"
|
|
|
|
# Create fixture directory
|
|
local fixture_dir="${FIXTURES_DIR}/${cloud}"
|
|
mkdir -p "$fixture_dir"
|
|
|
|
# Source the cloud's lib in a subshell to avoid namespace collisions
|
|
# Capture results via temp files
|
|
local endpoints
|
|
endpoints=$(get_endpoints "$cloud")
|
|
|
|
local cloud_recorded=0
|
|
local cloud_errors=0
|
|
local metadata_entries=""
|
|
|
|
while IFS=: read -r fixture_name endpoint; do
|
|
[[ -z "$fixture_name" ]] && continue
|
|
|
|
local response=""
|
|
local record_ok=false
|
|
|
|
# Call API in a subshell that sources the cloud lib
|
|
local tmp_response
|
|
tmp_response=$(mktemp /tmp/spawn-record-XXXXXX)
|
|
|
|
(
|
|
# Source cloud lib (this also sources shared/common.sh)
|
|
source "${REPO_ROOT}/${cloud}/lib/common.sh" 2>/dev/null
|
|
|
|
# Suppress any interactive prompts by ensuring tokens are loaded
|
|
# The API call itself uses env vars directly
|
|
call_api "$cloud" "$endpoint" 2>/dev/null
|
|
) > "$tmp_response" 2>/dev/null || true
|
|
|
|
response=$(cat "$tmp_response")
|
|
rm -f "$tmp_response"
|
|
|
|
if [[ -z "$response" ]]; then
|
|
printf '%b\n' " ${RED}fail${NC} ${fixture_name} — empty response"
|
|
cloud_errors=$((cloud_errors + 1))
|
|
continue
|
|
fi
|
|
|
|
if ! echo "$response" | is_valid_json; then
|
|
printf '%b\n' " ${RED}fail${NC} ${fixture_name} — invalid JSON"
|
|
cloud_errors=$((cloud_errors + 1))
|
|
continue
|
|
fi
|
|
|
|
if has_api_error "$cloud" "$response"; then
|
|
printf '%b\n' " ${RED}fail${NC} ${fixture_name} — API error response"
|
|
cloud_errors=$((cloud_errors + 1))
|
|
continue
|
|
fi
|
|
|
|
# Save pretty-printed fixture
|
|
echo "$response" | pretty_json > "${fixture_dir}/${fixture_name}.json"
|
|
printf '%b\n' " ${GREEN} ok${NC} ${fixture_name} → fixtures/${cloud}/${fixture_name}.json"
|
|
cloud_recorded=$((cloud_recorded + 1))
|
|
|
|
# Build metadata entry
|
|
local timestamp
|
|
timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
|
metadata_entries="${metadata_entries} \"${fixture_name}\": {\"endpoint\": \"${endpoint}\", \"recorded_at\": \"${timestamp}\"},
|
|
"
|
|
done <<< "$endpoints"
|
|
|
|
# --- Live create+delete cycle for write endpoint fixtures ---
|
|
# || true: live cycle failures (e.g. delete timeout) must not abort the whole script
|
|
_record_live_cycle "$cloud" "$fixture_dir" cloud_recorded cloud_errors metadata_entries || true
|
|
|
|
# Write metadata
|
|
local meta_timestamp
|
|
meta_timestamp=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
|
|
|
|
# Remove trailing comma and newline from metadata_entries
|
|
metadata_entries=$(printf '%s' "$metadata_entries" | sed '$ s/,$//')
|
|
|
|
cat > "${fixture_dir}/_metadata.json" << METADATA_EOF
|
|
{
|
|
"cloud": "${cloud}",
|
|
"recorded_at": "${meta_timestamp}",
|
|
"fixtures": {
|
|
${metadata_entries}
|
|
}
|
|
}
|
|
METADATA_EOF
|
|
|
|
RECORDED=$((RECORDED + cloud_recorded))
|
|
ERRORS=$((ERRORS + cloud_errors))
|
|
|
|
if [[ "$cloud_errors" -eq 0 ]]; then
|
|
printf '%b\n' " ${GREEN}done${NC} ${cloud_recorded} fixtures recorded"
|
|
else
|
|
printf '%b\n' " ${YELLOW}done${NC} ${cloud_recorded} recorded, ${cloud_errors} failed"
|
|
fi
|
|
printf '\n'
|
|
}
|
|
|
|
# --- List mode ---
|
|
list_clouds() {
|
|
printf '%b\n' "${CYAN}Recordable clouds:${NC}"
|
|
printf '\n'
|
|
printf " %-15s %-30s %s\n" "CLOUD" "AUTH ENV VAR" "STATUS"
|
|
printf " %-15s %-30s %s\n" "-----" "------------" "------"
|
|
|
|
for cloud in $ALL_RECORDABLE_CLOUDS; do
|
|
local env_var
|
|
env_var=$(get_auth_env_var "$cloud")
|
|
local status
|
|
|
|
if has_credentials "$cloud"; then
|
|
status=$(printf '%b' "${GREEN}ready${NC}")
|
|
else
|
|
status=$(printf '%b' "${RED}not set${NC}")
|
|
fi
|
|
|
|
# For multi-var clouds, show all required vars
|
|
case "$cloud" in
|
|
upcloud) env_var="UPCLOUD_USERNAME + UPCLOUD_PASSWORD" ;;
|
|
ovh) env_var="OVH_APPLICATION_KEY + 3 more" ;;
|
|
kamatera) env_var="KAMATERA_API_CLIENT_ID + SECRET" ;;
|
|
esac
|
|
|
|
printf " %-15s %-30s %b\n" "$cloud" "$env_var" "$status"
|
|
done
|
|
|
|
printf '\n'
|
|
local ready_count=0
|
|
for cloud in $ALL_RECORDABLE_CLOUDS; do
|
|
if has_credentials "$cloud"; then
|
|
ready_count=$((ready_count + 1))
|
|
fi
|
|
done
|
|
|
|
local total_count
|
|
total_count=$(echo "$ALL_RECORDABLE_CLOUDS" | wc -w | tr -d ' ')
|
|
printf '%b\n' " ${ready_count}/${total_count} clouds have credentials set"
|
|
printf '\n'
|
|
printf " CLI-based clouds (not recordable): sprite, gcp, e2b, modal, fly, daytona, northflank, runpod, vastai, koyeb\n"
|
|
}
|
|
|
|
# --- Main ---
|
|
printf '%b\n' "${CYAN}===============================${NC}"
|
|
printf '%b\n' "${CYAN} Spawn API Response Recorder${NC}"
|
|
printf '%b\n' "${CYAN}===============================${NC}"
|
|
printf '\n'
|
|
|
|
if [[ $# -eq 0 ]]; then
|
|
printf "Usage:\n"
|
|
printf " bash test/record.sh CLOUD [CLOUD...] Record fixtures for specified clouds\n"
|
|
printf " bash test/record.sh all Record all clouds (prompts for missing keys)\n"
|
|
printf " bash test/record.sh allsaved Record clouds that already have keys saved\n"
|
|
printf " bash test/record.sh --list Show recordable clouds\n"
|
|
printf '\n'
|
|
exit 0
|
|
fi
|
|
|
|
case "$1" in
|
|
--list|-l)
|
|
list_clouds
|
|
exit 0
|
|
;;
|
|
--help|-h)
|
|
printf "Usage:\n"
|
|
printf " bash test/record.sh CLOUD [CLOUD...] Record fixtures for specified clouds\n"
|
|
printf " bash test/record.sh all Record all clouds with credentials\n"
|
|
printf " bash test/record.sh --list Show recordable clouds\n"
|
|
printf '\n'
|
|
exit 0
|
|
;;
|
|
esac
|
|
|
|
# Determine which clouds to record
|
|
CLOUDS_TO_RECORD=""
|
|
if [[ "$1" == "all" ]]; then
|
|
CLOUDS_TO_RECORD="$ALL_RECORDABLE_CLOUDS"
|
|
elif [[ "$1" == "allsaved" ]]; then
|
|
PROMPT_FOR_CREDS=false
|
|
CLOUDS_TO_RECORD="$ALL_RECORDABLE_CLOUDS"
|
|
else
|
|
CLOUDS_TO_RECORD="$*"
|
|
fi
|
|
|
|
# Validate cloud names
|
|
for cloud in $CLOUDS_TO_RECORD; do
|
|
if ! echo "$ALL_RECORDABLE_CLOUDS" | grep -qw "$cloud"; then
|
|
printf '%b\n' "${RED}Unknown cloud: ${cloud}${NC}"
|
|
printf "Recordable clouds: %s\n" "$ALL_RECORDABLE_CLOUDS"
|
|
exit 1
|
|
fi
|
|
done
|
|
|
|
printf "Fixtures dir: %s\n" "$FIXTURES_DIR"
|
|
printf "Clouds: %s\n" "$CLOUDS_TO_RECORD"
|
|
printf '\n'
|
|
|
|
mkdir -p "$FIXTURES_DIR"
|
|
|
|
for cloud in $CLOUDS_TO_RECORD; do
|
|
record_cloud "$cloud"
|
|
done
|
|
|
|
# --- Summary ---
|
|
printf '%b\n' "${CYAN}===============================${NC}"
|
|
TOTAL=$((RECORDED + SKIPPED + ERRORS))
|
|
printf '%b\n' " Results: ${GREEN}${RECORDED} recorded${NC}, ${YELLOW}${SKIPPED} skipped${NC}, ${RED}${ERRORS} failed${NC}"
|
|
printf '%b\n' "${CYAN}===============================${NC}"
|
|
|
|
if [[ "$ERRORS" -gt 0 ]]; then
|
|
exit 1
|
|
fi
|
|
exit 0
|