mirror of
https://github.com/QwenLM/qwen-code.git
synced 2026-04-28 03:30:40 +00:00
test(core): exercise #3619 merge through realistic orphan-tool-call pipeline
The previous version of this test set up model_1 with reasoning + orphaned tool_call but no visible text. cleanOrphanedToolCalls then dropped that assistant entirely (no validToolCalls + empty content = skip), so the merge under test never actually ran — it was a false positive that would not have caught the regression it claims to cover. Restructure the test so model_1 also carries visible text. That keeps the assistant alive through cleanup (content non-empty), strips the orphan tool_call, and leaves it adjacent to model_2 — which is the real-world #3619 trigger pattern. The merge is now genuinely exercised end-to-end through processContent → cleanOrphanedToolCalls → mergeConsecutiveAssistantMessages. Refs #3619
This commit is contained in:
parent
60d2b5dc9b
commit
a3c6c9a8b7
1 changed files with 49 additions and 0 deletions
|
|
@ -2122,6 +2122,55 @@ describe('OpenAIContentConverter', () => {
|
|||
).toBe('only thought');
|
||||
});
|
||||
|
||||
it('should preserve reasoning_content through the realistic #3619 trigger pattern (orphaned tool_call between reasoning turns)', () => {
|
||||
// Realistic #3619 trigger: model_1 has reasoning + visible text + an
|
||||
// orphaned tool_call (no matching tool response). cleanOrphanedToolCalls
|
||||
// strips the tool_call but keeps the message because content is
|
||||
// non-empty. That leaves model_1 adjacent to model_2, and the merge
|
||||
// must carry both reasoning blocks across so the next request to
|
||||
// DeepSeek thinking mode keeps reasoning_content populated.
|
||||
const request: GenerateContentParameters = {
|
||||
model: 'models/test',
|
||||
contents: [
|
||||
{
|
||||
role: 'model',
|
||||
parts: [
|
||||
{ text: 'plan', thought: true },
|
||||
{ text: 'visible 1' },
|
||||
{
|
||||
functionCall: { id: 'call_orphan', name: 'tool_x', args: {} },
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
role: 'model',
|
||||
parts: [
|
||||
{ text: 'replan', thought: true },
|
||||
{ text: 'visible 2' },
|
||||
],
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const messages = converter.convertGeminiRequestToOpenAI(
|
||||
request,
|
||||
requestContext,
|
||||
);
|
||||
|
||||
expect(messages).toHaveLength(1);
|
||||
const merged = messages[0] as {
|
||||
role: string;
|
||||
content: string | null;
|
||||
reasoning_content?: string;
|
||||
tool_calls?: unknown[];
|
||||
};
|
||||
expect(merged.role).toBe('assistant');
|
||||
expect(merged.content).toBe('visible 1visible 2');
|
||||
expect(merged.reasoning_content).toBe('planreplan');
|
||||
// The orphaned tool_call was stripped by cleanOrphanedToolCalls.
|
||||
expect(merged.tool_calls).toBeUndefined();
|
||||
});
|
||||
|
||||
it('should use empty string instead of null for content when merged result has reasoning but no visible text (issue #3499)', () => {
|
||||
// Two reasoning-only assistant turns merge to content='' (Ollama
|
||||
// compatibility), not null. processContent enforces this for single
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue