mirror of
https://github.com/QwenLM/qwen-code.git
synced 2026-04-28 03:30:40 +00:00
fix(vscode-ide-companion): preserve split stream ordering (#3450)
This commit is contained in:
parent
60a6dfc14c
commit
cfe142e9a3
2 changed files with 111 additions and 4 deletions
|
|
@ -0,0 +1,97 @@
|
|||
/**
|
||||
* @license
|
||||
* Copyright 2025 Qwen Team
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/** @vitest-environment jsdom */
|
||||
|
||||
import { act } from 'react';
|
||||
import { createRoot, type Root } from 'react-dom/client';
|
||||
import { afterEach, beforeEach, describe, expect, it } from 'vitest';
|
||||
import { useMessageHandling, type TextMessage } from './useMessageHandling.js';
|
||||
|
||||
type MessageHandlingApi = ReturnType<typeof useMessageHandling>;
|
||||
|
||||
function renderHookHarness() {
|
||||
const container = document.createElement('div');
|
||||
document.body.appendChild(container);
|
||||
const root = createRoot(container);
|
||||
|
||||
let latestApi: MessageHandlingApi | null = null;
|
||||
|
||||
function Harness() {
|
||||
latestApi = useMessageHandling();
|
||||
return null;
|
||||
}
|
||||
|
||||
act(() => {
|
||||
root.render(<Harness />);
|
||||
});
|
||||
|
||||
return {
|
||||
container,
|
||||
root,
|
||||
get api(): MessageHandlingApi {
|
||||
if (!latestApi) {
|
||||
throw new Error('Hook API is not available');
|
||||
}
|
||||
return latestApi;
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
describe('useMessageHandling', () => {
|
||||
let root: Root | null = null;
|
||||
let container: HTMLDivElement | null = null;
|
||||
|
||||
beforeEach(() => {
|
||||
(
|
||||
globalThis as typeof globalThis & { IS_REACT_ACT_ENVIRONMENT?: boolean }
|
||||
).IS_REACT_ACT_ENVIRONMENT = true;
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (root) {
|
||||
act(() => {
|
||||
root?.unmount();
|
||||
});
|
||||
root = null;
|
||||
}
|
||||
if (container) {
|
||||
container.remove();
|
||||
container = null;
|
||||
}
|
||||
});
|
||||
|
||||
it('keeps the original stream timestamp when a tool call splits one assistant reply into multiple segments', () => {
|
||||
const rendered = renderHookHarness();
|
||||
root = rendered.root;
|
||||
container = rendered.container;
|
||||
|
||||
act(() => {
|
||||
rendered.api.startStreaming(1_000);
|
||||
});
|
||||
|
||||
act(() => {
|
||||
rendered.api.appendStreamChunk('before tool call');
|
||||
});
|
||||
|
||||
act(() => {
|
||||
rendered.api.breakAssistantSegment();
|
||||
});
|
||||
|
||||
act(() => {
|
||||
rendered.api.appendStreamChunk('after tool call');
|
||||
});
|
||||
|
||||
const assistantMessages = rendered.api.messages.filter(
|
||||
(message): message is TextMessage => message.role === 'assistant',
|
||||
);
|
||||
|
||||
expect(assistantMessages).toHaveLength(2);
|
||||
expect(assistantMessages.map((message) => message.timestamp)).toEqual([
|
||||
1_000, 1_000,
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
|
@ -35,6 +35,8 @@ export const useMessageHandling = () => {
|
|||
const streamingMessageIndexRef = useRef<number | null>(null);
|
||||
// Track the index of the current aggregated thinking message
|
||||
const thinkingMessageIndexRef = useRef<number | null>(null);
|
||||
// Preserve one stable timestamp for all message segments in the same turn.
|
||||
const currentStreamTimestampRef = useRef<number | null>(null);
|
||||
|
||||
/**
|
||||
* Add message
|
||||
|
|
@ -54,6 +56,9 @@ export const useMessageHandling = () => {
|
|||
* Start streaming response
|
||||
*/
|
||||
const startStreaming = useCallback((timestamp?: number) => {
|
||||
const resolvedTimestamp =
|
||||
typeof timestamp === 'number' ? timestamp : Date.now();
|
||||
currentStreamTimestampRef.current = resolvedTimestamp;
|
||||
// Create an assistant placeholder message immediately so tool calls won't jump before it
|
||||
setMessages((prev) => {
|
||||
// Record index of the placeholder to update on chunks
|
||||
|
|
@ -63,8 +68,8 @@ export const useMessageHandling = () => {
|
|||
{
|
||||
role: 'assistant',
|
||||
content: '',
|
||||
// Use provided timestamp (from extension) to keep ordering stable
|
||||
timestamp: typeof timestamp === 'number' ? timestamp : Date.now(),
|
||||
// Use one stable turn timestamp so later split segments sort correctly.
|
||||
timestamp: resolvedTimestamp,
|
||||
},
|
||||
];
|
||||
});
|
||||
|
|
@ -89,7 +94,11 @@ export const useMessageHandling = () => {
|
|||
if (idx === null) {
|
||||
idx = next.length;
|
||||
streamingMessageIndexRef.current = idx;
|
||||
next.push({ role: 'assistant', content: '', timestamp: Date.now() });
|
||||
next.push({
|
||||
role: 'assistant',
|
||||
content: '',
|
||||
timestamp: currentStreamTimestampRef.current ?? Date.now(),
|
||||
});
|
||||
}
|
||||
|
||||
if (idx < 0 || idx >= next.length) {
|
||||
|
|
@ -122,6 +131,7 @@ export const useMessageHandling = () => {
|
|||
setIsStreaming(false);
|
||||
streamingMessageIndexRef.current = null;
|
||||
thinkingMessageIndexRef.current = null;
|
||||
currentStreamTimestampRef.current = null;
|
||||
}, []);
|
||||
|
||||
/**
|
||||
|
|
@ -173,7 +183,7 @@ export const useMessageHandling = () => {
|
|||
assistantIdx >= 0 &&
|
||||
assistantIdx < next.length
|
||||
? next[assistantIdx].timestamp
|
||||
: Date.now();
|
||||
: (currentStreamTimestampRef.current ?? Date.now());
|
||||
next.push({
|
||||
role: 'thinking',
|
||||
content: '',
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue