fix(cli): respect OPENAI_MODEL precedence in CLI model resolution (#3567)
Some checks failed
Qwen Code CI / Lint (push) Waiting to run
Qwen Code CI / Test (push) Blocked by required conditions
Qwen Code CI / Test-1 (push) Blocked by required conditions
Qwen Code CI / Test-2 (push) Blocked by required conditions
Qwen Code CI / Test-3 (push) Blocked by required conditions
Qwen Code CI / Test-4 (push) Blocked by required conditions
Qwen Code CI / Test-5 (push) Blocked by required conditions
Qwen Code CI / Test-6 (push) Blocked by required conditions
Qwen Code CI / Test-7 (push) Blocked by required conditions
Qwen Code CI / Test-8 (push) Blocked by required conditions
Qwen Code CI / Post Coverage Comment (push) Blocked by required conditions
Qwen Code CI / CodeQL (push) Waiting to run
E2E Tests / E2E Test (Linux) - sandbox:none (push) Waiting to run
E2E Tests / E2E Test - macOS (push) Waiting to run
E2E Tests / E2E Test (Linux) - sandbox:docker (push) Waiting to run
SDK Python / SDK Python (3.10) (push) Has been cancelled
SDK Python / SDK Python (3.11) (push) Has been cancelled
SDK Python / SDK Python (3.12) (push) Has been cancelled

* fix(cli): respect OPENAI_MODEL precedence in CLI model resolution

* test(cli): cover env-driven model precedence for OpenAI-compatible auth

* fix(cli): scope model env precedence by auth type

* test(cli): cover QWEN_MODEL fallback precedence
This commit is contained in:
John London 2026-04-24 18:19:27 -05:00 committed by GitHub
parent e384338145
commit 007a109db8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 254 additions and 2 deletions

View file

@ -442,6 +442,187 @@ describe('modelConfigUtils', () => {
);
});
it('should find modelProvider from OPENAI_MODEL when argv.model is not provided', () => {
const argv = {};
const modelProvider: ProviderModelConfig = {
id: 'env-openai-model',
name: 'Env OpenAI Model',
generationConfig: {
samplingParams: { temperature: 0.6 },
},
};
const settings = makeMockSettings({
model: { name: 'settings-model' },
modelProviders: {
[AuthType.USE_OPENAI]: [
{ id: 'settings-model', name: 'Settings Model' },
modelProvider,
],
},
});
const selectedAuthType = AuthType.USE_OPENAI;
vi.mocked(resolveModelConfig).mockReturnValue({
config: {
model: 'env-openai-model',
apiKey: '',
baseUrl: '',
},
sources: {},
warnings: [],
});
resolveCliGenerationConfig({
argv,
settings,
selectedAuthType,
env: { OPENAI_MODEL: 'env-openai-model' },
});
expect(vi.mocked(resolveModelConfig)).toHaveBeenCalledWith(
expect.objectContaining({
modelProvider,
}),
);
});
it('should find modelProvider from QWEN_MODEL when OPENAI_MODEL is not provided', () => {
const argv = {};
const modelProvider: ProviderModelConfig = {
id: 'qwen-env-model',
name: 'Qwen Env Model',
generationConfig: {
samplingParams: { temperature: 0.7 },
},
};
const settings = makeMockSettings({
model: { name: 'settings-model' },
modelProviders: {
[AuthType.USE_OPENAI]: [
{ id: 'settings-model', name: 'Settings Model' },
modelProvider,
],
},
});
const selectedAuthType = AuthType.USE_OPENAI;
vi.mocked(resolveModelConfig).mockReturnValue({
config: {
model: 'qwen-env-model',
apiKey: '',
baseUrl: '',
},
sources: {},
warnings: [],
});
resolveCliGenerationConfig({
argv,
settings,
selectedAuthType,
env: { QWEN_MODEL: 'qwen-env-model' },
});
expect(vi.mocked(resolveModelConfig)).toHaveBeenCalledWith(
expect.objectContaining({
modelProvider,
}),
);
});
it('should prefer OPENAI_MODEL over QWEN_MODEL and settings.model.name for USE_OPENAI provider lookup', () => {
const argv = {};
const openAIProvider: ProviderModelConfig = {
id: 'openai-env-model',
name: 'OpenAI Env Model',
};
const qwenProvider: ProviderModelConfig = {
id: 'qwen-env-model',
name: 'Qwen Env Model',
};
const settings = makeMockSettings({
model: { name: 'settings-model' },
modelProviders: {
[AuthType.USE_OPENAI]: [
{ id: 'settings-model', name: 'Settings Model' },
qwenProvider,
openAIProvider,
],
},
});
const selectedAuthType = AuthType.USE_OPENAI;
vi.mocked(resolveModelConfig).mockReturnValue({
config: {
model: 'openai-env-model',
apiKey: '',
baseUrl: '',
},
sources: {},
warnings: [],
});
resolveCliGenerationConfig({
argv,
settings,
selectedAuthType,
env: {
OPENAI_MODEL: 'openai-env-model',
QWEN_MODEL: 'qwen-env-model',
},
});
expect(vi.mocked(resolveModelConfig)).toHaveBeenCalledWith(
expect.objectContaining({
modelProvider: openAIProvider,
}),
);
});
it('should ignore OPENAI_MODEL for non-USE_OPENAI provider lookup', () => {
const argv = {};
const settingsModelProvider: ProviderModelConfig = {
id: 'settings-model',
name: 'Settings Model',
};
const unrelatedOpenAIProvider: ProviderModelConfig = {
id: 'openai-env-model',
name: 'OpenAI Env Model',
};
const settings = makeMockSettings({
model: { name: 'settings-model' },
modelProviders: {
[AuthType.USE_ANTHROPIC]: [
settingsModelProvider,
unrelatedOpenAIProvider,
],
},
});
const selectedAuthType = AuthType.USE_ANTHROPIC;
vi.mocked(resolveModelConfig).mockReturnValue({
config: {
model: 'settings-model',
apiKey: '',
baseUrl: '',
},
sources: {},
warnings: [],
});
resolveCliGenerationConfig({
argv,
settings,
selectedAuthType,
env: { OPENAI_MODEL: 'openai-env-model' },
});
expect(vi.mocked(resolveModelConfig)).toHaveBeenCalledWith(
expect.objectContaining({
modelProvider: settingsModelProvider,
}),
);
});
it('should not find modelProvider when authType is undefined', () => {
const argv = { model: 'test-model' };
const settings = makeMockSettings({
@ -723,5 +904,71 @@ describe('modelConfigUtils', () => {
}),
);
});
it('should respect precedence: argv.model > OPENAI_MODEL > QWEN_MODEL > settings.model.name', () => {
const mockSettings = makeMockSettings({
model: { name: 'settings-model' },
modelProviders: {
[AuthType.USE_OPENAI]: [
{ id: 'settings-model' } as ProviderModelConfig,
{ id: 'openai-env-model' } as ProviderModelConfig,
{ id: 'qwen-env-model' } as ProviderModelConfig,
{ id: 'cli-model' } as ProviderModelConfig,
],
},
});
vi.mocked(resolveModelConfig).mockReturnValue({
config: { model: 'cli-model', apiKey: '', baseUrl: '' },
sources: {},
warnings: [],
});
const result1 = resolveCliGenerationConfig({
argv: { model: 'cli-model' },
settings: mockSettings,
selectedAuthType: AuthType.USE_OPENAI,
env: { OPENAI_MODEL: 'openai-env-model' },
});
expect(result1.model).toBe('cli-model');
vi.mocked(resolveModelConfig).mockReturnValue({
config: { model: 'openai-env-model', apiKey: '', baseUrl: '' },
sources: {},
warnings: [],
});
const result2 = resolveCliGenerationConfig({
argv: {},
settings: mockSettings,
selectedAuthType: AuthType.USE_OPENAI,
env: { OPENAI_MODEL: 'openai-env-model', QWEN_MODEL: 'qwen-env-model' },
});
expect(result2.model).toBe('openai-env-model');
vi.mocked(resolveModelConfig).mockReturnValue({
config: { model: 'openai-env-model', apiKey: '', baseUrl: '' },
sources: {},
warnings: [],
});
const result3 = resolveCliGenerationConfig({
argv: {},
settings: mockSettings,
selectedAuthType: AuthType.USE_OPENAI,
env: { OPENAI_MODEL: 'openai-env-model' },
});
expect(result3.model).toBe('openai-env-model');
vi.mocked(resolveModelConfig).mockReturnValue({
config: { model: 'settings-model', apiKey: '', baseUrl: '' },
sources: {},
warnings: [],
});
const result4 = resolveCliGenerationConfig({
argv: {},
settings: mockSettings,
selectedAuthType: AuthType.USE_OPENAI,
env: {},
});
expect(result4.model).toBe('settings-model');
});
});
});

View file

@ -100,8 +100,13 @@ export function resolveCliGenerationConfig(
if (authType && settings.modelProviders) {
const providers = settings.modelProviders[authType];
if (providers && Array.isArray(providers)) {
// Try to find by requested model (from CLI or settings)
const requestedModel = argv.model || settings.model?.name;
const requestedModel =
authType === AuthType.USE_OPENAI
? argv.model ||
env['OPENAI_MODEL'] ||
env['QWEN_MODEL'] ||
settings.model?.name
: argv.model || settings.model?.name;
if (requestedModel) {
modelProvider = providers.find((p) => p.id === requestedModel) as
| ProviderModelConfig