fix(core,cli): stop stripping reasoning on switch and resume paths (#3682)

This commit is contained in:
Fu Yuchen 2026-04-28 09:22:17 +08:00 committed by GitHub
parent 4ac9ec07c3
commit d09c19c0c5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 7 additions and 10 deletions

View file

@ -501,7 +501,7 @@ describe('useSlashCommandProcessor', () => {
);
});
it('should strip thoughts when handling "load_history" action', async () => {
it('should preserve thoughts when handling "load_history" action', async () => {
const mockClient = {
setHistory: vi.fn(),
stripThoughtsFromHistory: vi.fn(),
@ -531,7 +531,7 @@ describe('useSlashCommandProcessor', () => {
});
expect(mockClient.setHistory).toHaveBeenCalledTimes(1);
expect(mockClient.stripThoughtsFromHistory).toHaveBeenCalledWith();
expect(mockClient.stripThoughtsFromHistory).not.toHaveBeenCalled();
});
it('should handle a "quit" action', async () => {

View file

@ -647,7 +647,6 @@ export const useSlashCommandProcessor = (
}
case 'load_history': {
config?.getGeminiClient()?.setHistory(result.clientHistory);
config?.getGeminiClient()?.stripThoughtsFromHistory();
fullCommandContext.ui.clear();
result.history.forEach((item, index) => {
fullCommandContext.ui.addItem(item, index);

View file

@ -531,7 +531,7 @@ describe('Server Config (config.ts)', () => {
expect(vi.mocked(createContentGenerator)).toHaveBeenCalledTimes(1);
});
it('should strip thoughts from history on model switch (#3304)', async () => {
it('should preserve thoughts from history on model switch', async () => {
const config = new Config(baseParams);
const mockContentConfig: ContentGeneratorConfig = {
@ -567,7 +567,7 @@ describe('Server Config (config.ts)', () => {
await config.switchModel(AuthType.QWEN_OAUTH, 'coder-model');
expect(stripSpy).toHaveBeenCalledTimes(1);
expect(stripSpy).not.toHaveBeenCalled();
});
});

View file

@ -1423,11 +1423,9 @@ export class Config {
return;
}
// Strip thinking blocks from conversation history on model switch.
// reasoning_content is a non-standard field that causes strict
// OpenAI-compatible providers to reject requests with 422 errors
// when thought parts from a previous model leak into the payload (#3304).
this.geminiClient.stripThoughtsFromHistory();
// Keep full history (including thought parts) on model switch.
// Some OpenAI-compatible reasoning models (e.g. DeepSeek) require
// reasoning_content to be preserved across turns.
// Hot update path: only supported for qwen-oauth.
// For other auth types we always refresh to recreate the ContentGenerator.