review: stage1

This commit is contained in:
mingholy.lmh 2025-12-30 19:49:15 +08:00
parent 570ec432af
commit aa9cdf2a3c
10 changed files with 206 additions and 19 deletions

View file

@ -32,7 +32,7 @@ describe('validateAuthMethod', () => {
it('should return an error message for USE_OPENAI if OPENAI_API_KEY is not set', () => {
delete process.env['OPENAI_API_KEY'];
expect(validateAuthMethod(AuthType.USE_OPENAI)).toBe(
'OPENAI_API_KEY environment variable not found. You can enter it interactively or add it to your .env file.',
"Missing API key for OpenAI-compatible auth. Set settings.security.auth.apiKey, or set the 'OPENAI_API_KEY' environment variable. If you configured a model in settings.modelProviders with an envKey, set that env var as well.",
);
});

View file

@ -15,7 +15,11 @@ export function validateAuthMethod(authMethod: string): string | null {
const hasApiKey =
process.env['OPENAI_API_KEY'] || settings.merged.security?.auth?.apiKey;
if (!hasApiKey) {
return 'OPENAI_API_KEY environment variable not found. You can enter it interactively or add it to your .env file.';
return (
'Missing API key for OpenAI-compatible auth. ' +
"Set settings.security.auth.apiKey, or set the 'OPENAI_API_KEY' environment variable. " +
'If you configured a model in settings.modelProviders with an envKey, set that env var as well.'
);
}
return null;
}

View file

@ -31,6 +31,10 @@ import {
} from '@qwen-code/qwen-code-core';
import { extensionsCommand } from '../commands/extensions.js';
import type { Settings } from './settings.js';
import {
buildGenerationConfigSources,
getModelProvidersConfigFromSettings,
} from '../utils/modelProviderUtils.js';
import yargs, { type Argv } from 'yargs';
import { hideBin } from 'yargs/helpers';
import * as fs from 'node:fs';
@ -979,6 +983,18 @@ export async function loadCliConfig(
}
}
const modelProvidersConfig = getModelProvidersConfigFromSettings(settings);
const generationConfigSources = buildGenerationConfigSources({
argv: {
model: argv.model,
openaiApiKey: argv.openaiApiKey,
openaiBaseUrl: argv.openaiBaseUrl,
},
settings,
selectedAuthType,
env: process.env as Record<string, string | undefined>,
});
return new Config({
sessionId,
sessionData,
@ -1036,6 +1052,8 @@ export async function loadCliConfig(
inputFormat,
outputFormat,
includePartialMessages,
modelProvidersConfig,
generationConfigSources,
generationConfig: {
...(settings.model?.generationConfig || {}),
model: resolvedModel,

View file

@ -10,6 +10,7 @@ import type {
TelemetrySettings,
AuthType,
ChatCompressionSettings,
ModelProvidersConfig,
} from '@qwen-code/qwen-code-core';
import {
ApprovalMode,
@ -102,6 +103,19 @@ const SETTINGS_SCHEMA = {
mergeStrategy: MergeStrategy.SHALLOW_MERGE,
},
// Model providers configuration grouped by authType
modelProviders: {
type: 'object',
label: 'Model Providers',
category: 'Model',
requiresRestart: false,
default: {} as ModelProvidersConfig,
description:
'Model providers configuration grouped by authType. Each authType contains an array of model configurations.',
showInDialog: false,
mergeStrategy: MergeStrategy.SHALLOW_MERGE,
},
general: {
type: 'object',
label: 'General',

View file

@ -0,0 +1,142 @@
/**
* @license
* Copyright 2025 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
import {
AuthType,
type ContentGeneratorConfig,
type ContentGeneratorConfigSource,
type ContentGeneratorConfigSources,
type ModelProvidersConfig,
type ProviderModelConfig as ModelConfig,
} from '@qwen-code/qwen-code-core';
import type { Settings } from '../config/settings.js';
export interface GenerationConfigSourceInputs {
argv: {
model?: string | undefined;
openaiApiKey?: string | undefined;
openaiBaseUrl?: string | undefined;
};
settings: Settings;
selectedAuthType: AuthType | undefined;
/**
* Injectable env for testability. Defaults to process.env at callsites.
*/
env?: Record<string, string | undefined>;
}
/**
* Get models configuration from settings, grouped by authType.
* Returns the models config from the merged settings without mutating files.
*/
export function getModelProvidersConfigFromSettings(
settings: Settings,
): ModelProvidersConfig {
return (settings.modelProviders as ModelProvidersConfig) || {};
}
/**
* Get models for a specific authType from settings.
*/
export function getModelsForAuthType(
settings: Settings,
authType: AuthType,
): ModelConfig[] {
const modelProvidersConfig = getModelProvidersConfigFromSettings(settings);
return modelProvidersConfig[authType] || [];
}
/**
* Best-effort attribution for the seed generationConfig fields.
*
* NOTE:
* - This does not attempt to distinguish user vs workspace settings; it reflects merged settings.
* - This should stay consistent with the actual precedence used to compute the corresponding values.
*/
export function buildGenerationConfigSources(
inputs: GenerationConfigSourceInputs,
): ContentGeneratorConfigSources {
const { argv, settings, selectedAuthType } = inputs;
const env = inputs.env ?? (process.env as Record<string, string | undefined>);
const sources: ContentGeneratorConfigSources = {};
const setSource = (path: string, source: ContentGeneratorConfigSource) => {
sources[path] = source;
};
// Model/apiKey/baseUrl attribution mirrors current CLI precedence:
// - model: argv.model > (OPENAI_MODEL|QWEN_MODEL|settings.model.name) only for OpenAI auth
// - apiKey/baseUrl: only meaningful for OpenAI auth in current CLI wiring
if (selectedAuthType === AuthType.USE_OPENAI) {
if (argv.model) {
setSource('model', { kind: 'cli', detail: '--model' });
} else if (env['OPENAI_MODEL']) {
setSource('model', { kind: 'env', envKey: 'OPENAI_MODEL' });
} else if (env['QWEN_MODEL']) {
setSource('model', { kind: 'env', envKey: 'QWEN_MODEL' });
} else if (settings.model?.name) {
setSource('model', { kind: 'settings', settingsPath: 'model.name' });
}
if (argv.openaiApiKey) {
setSource('apiKey', { kind: 'cli', detail: '--openaiApiKey' });
} else if (env['OPENAI_API_KEY']) {
setSource('apiKey', { kind: 'env', envKey: 'OPENAI_API_KEY' });
} else if (settings.security?.auth?.apiKey) {
setSource('apiKey', {
kind: 'settings',
settingsPath: 'security.auth.apiKey',
});
}
if (argv.openaiBaseUrl) {
setSource('baseUrl', { kind: 'cli', detail: '--openaiBaseUrl' });
} else if (env['OPENAI_BASE_URL']) {
setSource('baseUrl', { kind: 'env', envKey: 'OPENAI_BASE_URL' });
} else if (settings.security?.auth?.baseUrl) {
setSource('baseUrl', {
kind: 'settings',
settingsPath: 'security.auth.baseUrl',
});
}
} else if (argv.model) {
// For non-openai auth types, the CLI only wires through an explicit raw model override.
setSource('model', { kind: 'cli', detail: '--model' });
}
const mergedGenerationConfig = settings.model?.generationConfig as
| Partial<ContentGeneratorConfig>
| undefined;
if (mergedGenerationConfig) {
setSource('generationConfig', {
kind: 'settings',
settingsPath: 'model.generationConfig',
});
// We also map the known top-level fields used by core.
if (mergedGenerationConfig.samplingParams) {
setSource('samplingParams', {
kind: 'settings',
settingsPath: 'model.generationConfig.samplingParams',
});
}
for (const k of [
'timeout',
'maxRetries',
'disableCacheControl',
'schemaCompliance',
] as const) {
if (mergedGenerationConfig[k] !== undefined) {
setSource(k, {
kind: 'settings',
settingsPath: `model.generationConfig.${k}`,
});
}
}
}
return sources;
}