mirror of
https://github.com/QwenLM/qwen-code.git
synced 2026-04-29 20:20:57 +00:00
- Add onResponseChunk hook for progressive text display during streaming - Add onResponseComplete hook for customizing response delivery - Update mock plugin channel to support streaming chunks This enables channels to display AI responses progressively as they stream, improving user experience with real-time feedback. Co-authored-by: Qwen-Coder <qwen-coder@alibabacloud.com>
31 lines
696 B
TypeScript
31 lines
696 B
TypeScript
/**
|
|
* Shared protocol types for mock channel WebSocket communication.
|
|
*/
|
|
|
|
/** Server → Plugin Channel (WebSocket) */
|
|
export interface InboundMessage {
|
|
type: 'inbound';
|
|
messageId: string;
|
|
senderId: string;
|
|
senderName: string;
|
|
chatId: string;
|
|
text: string;
|
|
}
|
|
|
|
/** Plugin Channel → Server (WebSocket) — streaming chunk */
|
|
export interface ChunkMessage {
|
|
type: 'chunk';
|
|
messageId: string;
|
|
chatId: string;
|
|
text: string;
|
|
}
|
|
|
|
/** Plugin Channel → Server (WebSocket) — final response */
|
|
export interface OutboundMessage {
|
|
type: 'outbound';
|
|
messageId: string;
|
|
chatId: string;
|
|
text: string;
|
|
}
|
|
|
|
export type WsMessage = InboundMessage | ChunkMessage | OutboundMessage;
|