mirror of
https://github.com/OpenRouterTeam/spawn.git
synced 2026-04-28 03:49:31 +00:00
feat: add --model flag and preferences file for LLM model override (#2543)
Adds --model / -m CLI flag to override the agent's default LLM model:
spawn codex gcp --model openai/gpt-5.3-codex
Also supports persistent per-agent model preferences via config file at
~/.config/spawn/preferences.json:
{ "models": { "codex": "openai/gpt-5.3-codex" } }
Priority: --model flag > preferences file > agent default.
This enables a future web UI to pass model selection via CLI args when
invoking spawn programmatically to provision machines.
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
Co-authored-by: L <6723574+louisgv@users.noreply.github.com>
This commit is contained in:
parent
0d66125fd6
commit
d2d71b17ef
6 changed files with 55 additions and 4 deletions
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "@openrouter/spawn",
|
||||
"version": "0.16.19",
|
||||
"version": "0.17.0",
|
||||
"type": "module",
|
||||
"bin": {
|
||||
"spawn": "cli.js"
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ function getHelpUsageSection(): string {
|
|||
spawn <agent> <cloud> --dry-run Preview what would be provisioned (or -n)
|
||||
spawn <agent> <cloud> --zone <zone> Set zone/region (works for all clouds)
|
||||
spawn <agent> <cloud> --size <type> Set instance size/type (works for all clouds)
|
||||
spawn <agent> <cloud> --model <id> Set the LLM model (e.g. openai/gpt-5.3-codex)
|
||||
spawn <agent> <cloud> --custom Show interactive size/region pickers
|
||||
spawn <agent> <cloud> --headless Provision and exit (no interactive session)
|
||||
spawn <agent> <cloud> --output json
|
||||
|
|
@ -53,6 +54,8 @@ function getHelpExamplesSection(): string {
|
|||
spawn claude gcp --zone us-east1-b ${pc.dim("# Use a specific GCP zone")}
|
||||
spawn claude gcp --size e2-standard-4
|
||||
${pc.dim("# Use a specific machine type")}
|
||||
spawn codex gcp --model openai/gpt-5.3-codex
|
||||
${pc.dim("# Override the default LLM model")}
|
||||
spawn opencode gcp --dry-run ${pc.dim("# Preview without provisioning")}
|
||||
spawn claude hetzner --headless ${pc.dim("# Provision, print connection info, exit")}
|
||||
spawn claude hetzner --output json ${pc.dim("# Structured JSON output on stdout")}
|
||||
|
|
@ -94,6 +97,7 @@ function getHelpTroubleshootingSection(): string {
|
|||
function getHelpEnvVarsSection(): string {
|
||||
return `${pc.bold("ENVIRONMENT VARIABLES")}
|
||||
${pc.cyan("OPENROUTER_API_KEY")} OpenRouter API key (all agents require this)
|
||||
${pc.cyan("MODEL_ID")} Override agent's default LLM model (or use --model flag)
|
||||
${pc.cyan("SPAWN_NO_UPDATE_CHECK=1")} Skip auto-update check on startup
|
||||
${pc.cyan("SPAWN_NO_UNICODE=1")} Force ASCII output (no unicode symbols)
|
||||
${pc.cyan("SPAWN_UNICODE=1")} Force Unicode output (override auto-detection)
|
||||
|
|
|
|||
|
|
@ -31,6 +31,8 @@ export const KNOWN_FLAGS = new Set([
|
|||
"--prune",
|
||||
"--json",
|
||||
"--beta",
|
||||
"--model",
|
||||
"-m",
|
||||
]);
|
||||
|
||||
/** Return the first unknown flag in args, or null if all are known/positional */
|
||||
|
|
|
|||
|
|
@ -115,6 +115,7 @@ function checkUnknownFlags(args: string[]): void {
|
|||
console.error(` ${pc.cyan("--custom")} Show interactive size/region pickers`);
|
||||
console.error(` ${pc.cyan("--zone, --region")} Set zone/region (e.g. us-east1-b, nyc3)`);
|
||||
console.error(` ${pc.cyan("--size, --machine-type")} Set instance size (e.g. e2-standard-4, s-2vcpu-2gb)`);
|
||||
console.error(` ${pc.cyan("--model, -m")} Set the LLM model (e.g. openai/gpt-5.3-codex)`);
|
||||
console.error(` ${pc.cyan("--name")} Set the spawn/resource name`);
|
||||
console.error(` ${pc.cyan("--reauth")} Force re-prompting for cloud credentials`);
|
||||
console.error(` ${pc.cyan("--beta tarball")} Use pre-built tarball for agent install (repeatable)`);
|
||||
|
|
@ -865,6 +866,21 @@ async function main(): Promise<void> {
|
|||
process.env.LIGHTSAIL_BUNDLE = sizeFlag;
|
||||
}
|
||||
|
||||
// Extract --model / -m <model_id> flag (overrides the agent's default model)
|
||||
const [modelFlag, modelFilteredArgs] = extractFlagValue(
|
||||
filteredArgs,
|
||||
[
|
||||
"--model",
|
||||
"-m",
|
||||
],
|
||||
"model ID",
|
||||
"spawn codex gcp --model openai/gpt-5.3-codex",
|
||||
);
|
||||
filteredArgs.splice(0, filteredArgs.length, ...modelFilteredArgs);
|
||||
if (modelFlag) {
|
||||
process.env.MODEL_ID = modelFlag;
|
||||
}
|
||||
|
||||
// --output implies --headless
|
||||
const effectiveHeadless = headless || !!outputFormat;
|
||||
|
||||
|
|
|
|||
|
|
@ -6,12 +6,15 @@ import type { CloudRunner } from "./agent-setup";
|
|||
import type { AgentConfig } from "./agents";
|
||||
import type { SshTunnelHandle } from "./ssh";
|
||||
|
||||
import { readFileSync } from "node:fs";
|
||||
import * as v from "valibot";
|
||||
import { generateSpawnId, saveLaunchCmd, saveSpawnRecord } from "../history.js";
|
||||
import { offerGithubAuth, wrapSshCall } from "./agent-setup";
|
||||
import { tryTarballInstall } from "./agent-tarball";
|
||||
import { generateEnvConfig } from "./agents";
|
||||
import { getOrPromptApiKey } from "./oauth";
|
||||
import { asyncTryCatch, asyncTryCatchIf, isOperationalError } from "./result.js";
|
||||
import { getSpawnPreferencesPath } from "./paths";
|
||||
import { asyncTryCatch, asyncTryCatchIf, isFileError, isOperationalError, tryCatchIf } from "./result.js";
|
||||
import { startSshTunnel } from "./ssh";
|
||||
import { ensureSshKeys, getSshKeyOpts } from "./ssh-keys";
|
||||
import { getErrorMessage } from "./type-guards";
|
||||
|
|
@ -78,6 +81,27 @@ export interface OrchestrationOptions {
|
|||
getApiKey?: (agentSlug?: string, cloudSlug?: string) => Promise<string>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Load a preferred model from ~/.config/spawn/preferences.json.
|
||||
* Format: { "models": { "codex": "openai/gpt-5.3-codex", "openclaw": "anthropic/claude-sonnet-4.6" } }
|
||||
* Returns null if no preference is set or the file doesn't exist.
|
||||
*/
|
||||
const PreferencesSchema = v.object({
|
||||
models: v.optional(v.record(v.string(), v.string())),
|
||||
});
|
||||
|
||||
function loadPreferredModel(agentName: string): string | null {
|
||||
const result = tryCatchIf(isFileError, () => {
|
||||
const raw = JSON.parse(readFileSync(getSpawnPreferencesPath(), "utf-8"));
|
||||
const parsed = v.safeParse(PreferencesSchema, raw);
|
||||
if (!parsed.success) {
|
||||
return null;
|
||||
}
|
||||
return parsed.output.models?.[agentName] ?? null;
|
||||
});
|
||||
return result.ok ? result.data : null;
|
||||
}
|
||||
|
||||
export async function runOrchestration(
|
||||
cloud: CloudOrchestrator,
|
||||
agent: AgentConfig,
|
||||
|
|
@ -115,8 +139,8 @@ export async function runOrchestration(
|
|||
}
|
||||
}
|
||||
|
||||
// 4. Model ID (use agent default — no interactive prompt)
|
||||
const rawModelId = agent.modelDefault || process.env.MODEL_ID;
|
||||
// 4. Model ID — priority: --model flag (MODEL_ID env) > preferences file > agent default
|
||||
const rawModelId = process.env.MODEL_ID || loadPreferredModel(agentName) || agent.modelDefault;
|
||||
const modelId = rawModelId && validateModelId(rawModelId) ? rawModelId : undefined;
|
||||
if (rawModelId && !modelId) {
|
||||
logWarn(`Ignoring invalid MODEL_ID: ${rawModelId}`);
|
||||
|
|
|
|||
|
|
@ -53,6 +53,11 @@ export function getSpawnCloudConfigPath(cloud: string): string {
|
|||
return join(getUserHome(), ".config", "spawn", `${cloud}.json`);
|
||||
}
|
||||
|
||||
/** Return the path to the spawn preferences file: ~/.config/spawn/preferences.json */
|
||||
export function getSpawnPreferencesPath(): string {
|
||||
return join(getUserHome(), ".config", "spawn", "preferences.json");
|
||||
}
|
||||
|
||||
/** Return the cache directory for spawn, respecting XDG_CACHE_HOME. */
|
||||
export function getCacheDir(): string {
|
||||
return join(process.env.XDG_CACHE_HOME || join(getUserHome(), ".cache"), "spawn");
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue