Merge pull request #1415 from QwenLM/fix/openai-reasoning-config

fix(core): don’t force reasoning/topP defaults for OpenAI-compatible APIs
This commit is contained in:
tanzhenxin 2026-01-07 16:59:26 +08:00 committed by GitHub
commit 2c8be05029
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 15 additions and 10 deletions

View file

@ -317,15 +317,22 @@ export class ContentGenerationPipeline {
} }
private buildReasoningConfig(): Record<string, unknown> { private buildReasoningConfig(): Record<string, unknown> {
const reasoning = this.contentGeneratorConfig.reasoning; // Reasoning configuration for OpenAI-compatible endpoints is highly fragmented.
// For example, across common providers and models:
//
// - deepseek-reasoner — thinking is enabled by default and cannot be disabled
// - glm-4.7 — thinking is enabled by default; can be disabled via `extra_body.thinking.enabled`
// - kimi-k2-thinking — thinking is enabled by default and cannot be disabled
// - gpt-5.x series — thinking is enabled by default; can be disabled via `reasoning.effort`
// - qwen3 series — model-dependent; can be manually disabled via `extra_body.enable_thinking`
//
// Given this inconsistency, we choose not to set any reasoning config here and
// instead rely on each models default behavior.
if (reasoning === false) { // We plan to introduce provider- and model-specific settings to enable more
return {}; // fine-grained control over reasoning configuration.
}
return { return {};
reasoning_effort: reasoning?.effort ?? 'medium',
};
} }
/** /**

View file

@ -58,8 +58,6 @@ export class DefaultOpenAICompatibleProvider
} }
getDefaultGenerationConfig(): GenerateContentConfig { getDefaultGenerationConfig(): GenerateContentConfig {
return { return {};
topP: 0.95,
};
} }
} }