Merge remote-tracking branch 'origin/main' into feat/debug-logging-refactor

This commit is contained in:
tanzhenxin 2026-02-05 20:23:48 +08:00
commit 4abec5c055
331 changed files with 19546 additions and 7771 deletions

View file

@ -0,0 +1,383 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import * as fs from 'node:fs/promises';
import { exportCommand } from './exportCommand.js';
import { createMockCommandContext } from '../../test-utils/mockCommandContext.js';
import type { ChatRecord } from '@qwen-code/qwen-code-core';
import type { Part, Content } from '@google/genai';
import {
collectSessionData,
normalizeSessionData,
toMarkdown,
toHtml,
generateExportFilename,
} from '../utils/export/index.js';
const mockSessionServiceMocks = vi.hoisted(() => ({
loadLastSession: vi.fn(),
}));
vi.mock('@qwen-code/qwen-code-core', () => {
class SessionService {
constructor(_cwd: string) {}
async loadLastSession() {
return mockSessionServiceMocks.loadLastSession();
}
}
return {
SessionService,
};
});
vi.mock('../utils/export/index.js', () => ({
collectSessionData: vi.fn(),
normalizeSessionData: vi.fn(),
toMarkdown: vi.fn(),
toHtml: vi.fn(),
generateExportFilename: vi.fn(),
}));
vi.mock('node:fs/promises', () => ({
writeFile: vi.fn(),
}));
describe('exportCommand', () => {
const mockSessionData = {
conversation: {
sessionId: 'test-session-id',
startTime: '2025-01-01T00:00:00Z',
messages: [
{
type: 'user',
message: {
parts: [{ text: 'Hello' }] as Part[],
} as Content,
},
] as ChatRecord[],
},
};
let mockContext: ReturnType<typeof createMockCommandContext>;
beforeEach(() => {
vi.clearAllMocks();
mockSessionServiceMocks.loadLastSession.mockResolvedValue(mockSessionData);
mockContext = createMockCommandContext({
services: {
config: {
getWorkingDir: vi.fn().mockReturnValue('/test/dir'),
getProjectRoot: vi.fn().mockReturnValue('/test/project'),
},
},
});
vi.mocked(collectSessionData).mockResolvedValue({
sessionId: 'test-session-id',
startTime: '2025-01-01T00:00:00Z',
messages: [],
});
vi.mocked(normalizeSessionData).mockImplementation((data) => data);
vi.mocked(toMarkdown).mockReturnValue('# Test Markdown');
vi.mocked(toHtml).mockReturnValue(
'<html><script id="chat-data" type="application/json">{"data": "test"}</script></html>',
);
vi.mocked(generateExportFilename).mockImplementation(
(ext: string) => `export-2025-01-01T00-00-00-000Z.${ext}`,
);
});
afterEach(() => {
vi.restoreAllMocks();
});
describe('command structure', () => {
it('should have correct name and description', () => {
expect(exportCommand.name).toBe('export');
expect(exportCommand.description).toBe(
'Export current session message history to a file',
);
});
it('should have html, md, json, and jsonl subcommands', () => {
expect(exportCommand.subCommands).toHaveLength(4);
expect(exportCommand.subCommands?.map((c) => c.name)).toEqual([
'html',
'md',
'json',
'jsonl',
]);
});
});
describe('exportMarkdownAction', () => {
it('should export session to markdown file', async () => {
const mdCommand = exportCommand.subCommands?.find((c) => c.name === 'md');
if (!mdCommand?.action) {
throw new Error('md command not found');
}
const result = await mdCommand.action(mockContext, '');
expect(result).toEqual({
type: 'message',
messageType: 'info',
content: expect.stringContaining('export-2025-01-01T00-00-00-000Z.md'),
});
expect(mockSessionServiceMocks.loadLastSession).toHaveBeenCalled();
expect(collectSessionData).toHaveBeenCalledWith(
mockSessionData.conversation,
expect.anything(),
);
expect(normalizeSessionData).toHaveBeenCalled();
expect(toMarkdown).toHaveBeenCalled();
expect(generateExportFilename).toHaveBeenCalledWith('md');
expect(fs.writeFile).toHaveBeenCalledWith(
expect.stringContaining('export-2025-01-01T00-00-00-000Z.md'),
'# Test Markdown',
'utf-8',
);
});
it('should return error when config is not available', async () => {
const contextWithoutConfig = createMockCommandContext({
services: {
config: null,
},
});
const mdCommand = exportCommand.subCommands?.find((c) => c.name === 'md');
if (!mdCommand?.action) {
throw new Error('md command not found');
}
const result = await mdCommand.action(contextWithoutConfig, '');
expect(result).toEqual({
type: 'message',
messageType: 'error',
content: 'Configuration not available.',
});
});
it('should return error when working directory cannot be determined', async () => {
const contextWithoutCwd = createMockCommandContext({
services: {
config: {
getWorkingDir: vi.fn().mockReturnValue(null),
getProjectRoot: vi.fn().mockReturnValue(null),
},
},
});
const mdCommand = exportCommand.subCommands?.find((c) => c.name === 'md');
if (!mdCommand || !mdCommand.action) {
throw new Error('md command not found');
}
const result = await mdCommand.action(contextWithoutCwd, '');
expect(result).toEqual({
type: 'message',
messageType: 'error',
content: 'Could not determine current working directory.',
});
});
it('should return error when no session is found', async () => {
mockSessionServiceMocks.loadLastSession.mockResolvedValue(undefined);
const mdCommand = exportCommand.subCommands?.find((c) => c.name === 'md');
if (!mdCommand?.action) {
throw new Error('md command not found');
}
const result = await mdCommand.action(mockContext, '');
expect(result).toEqual({
type: 'message',
messageType: 'error',
content: 'No active session found to export.',
});
});
it('should handle errors during export', async () => {
const error = new Error('File write failed');
vi.mocked(fs.writeFile).mockRejectedValue(error);
const mdCommand = exportCommand.subCommands?.find((c) => c.name === 'md');
if (!mdCommand?.action) {
throw new Error('md command not found');
}
const result = await mdCommand.action(mockContext, '');
expect(result).toEqual({
type: 'message',
messageType: 'error',
content: 'Failed to export session: File write failed',
});
});
it('should use project root when working dir is not available', async () => {
const contextWithProjectRoot = createMockCommandContext({
services: {
config: {
getWorkingDir: vi.fn().mockReturnValue(null),
getProjectRoot: vi.fn().mockReturnValue('/test/project'),
},
},
});
const mdCommand = exportCommand.subCommands?.find((c) => c.name === 'md');
if (!mdCommand?.action) {
throw new Error('md command not found');
}
await mdCommand.action(contextWithProjectRoot, '');
});
});
describe('exportHtmlAction', () => {
it('should export session to HTML file', async () => {
const htmlCommand = exportCommand.subCommands?.find(
(c) => c.name === 'html',
);
if (!htmlCommand?.action) {
throw new Error('html command not found');
}
const result = await htmlCommand.action(mockContext, '');
expect(result).toEqual({
type: 'message',
messageType: 'info',
content: expect.stringContaining(
'export-2025-01-01T00-00-00-000Z.html',
),
});
expect(mockSessionServiceMocks.loadLastSession).toHaveBeenCalled();
expect(collectSessionData).toHaveBeenCalledWith(
mockSessionData.conversation,
expect.anything(),
);
expect(normalizeSessionData).toHaveBeenCalled();
expect(toHtml).toHaveBeenCalled();
expect(generateExportFilename).toHaveBeenCalledWith('html');
expect(fs.writeFile).toHaveBeenCalledWith(
expect.stringContaining('export-2025-01-01T00-00-00-000Z.html'),
expect.stringContaining('{"data": "test"}'),
'utf-8',
);
});
it('should return error when config is not available', async () => {
const contextWithoutConfig = createMockCommandContext({
services: {
config: null,
},
});
const htmlCommand = exportCommand.subCommands?.find(
(c) => c.name === 'html',
);
if (!htmlCommand?.action) {
throw new Error('html command not found');
}
const result = await htmlCommand.action(contextWithoutConfig, '');
expect(result).toEqual({
type: 'message',
messageType: 'error',
content: 'Configuration not available.',
});
});
it('should return error when working directory cannot be determined', async () => {
const contextWithoutCwd = createMockCommandContext({
services: {
config: {
getWorkingDir: vi.fn().mockReturnValue(null),
getProjectRoot: vi.fn().mockReturnValue(null),
},
},
});
const htmlCommand = exportCommand.subCommands?.find(
(c) => c.name === 'html',
);
if (!htmlCommand || !htmlCommand.action) {
throw new Error('html command not found');
}
const result = await htmlCommand.action(contextWithoutCwd, '');
expect(result).toEqual({
type: 'message',
messageType: 'error',
content: 'Could not determine current working directory.',
});
});
it('should return error when no session is found', async () => {
mockSessionServiceMocks.loadLastSession.mockResolvedValue(undefined);
const htmlCommand = exportCommand.subCommands?.find(
(c) => c.name === 'html',
);
if (!htmlCommand?.action) {
throw new Error('html command not found');
}
const result = await htmlCommand.action(mockContext, '');
expect(result).toEqual({
type: 'message',
messageType: 'error',
content: 'No active session found to export.',
});
});
it('should handle errors during HTML generation', async () => {
const error = new Error('Failed to generate HTML');
vi.mocked(toHtml).mockImplementation(() => {
throw error;
});
const htmlCommand = exportCommand.subCommands?.find(
(c) => c.name === 'html',
);
if (!htmlCommand?.action) {
throw new Error('html command not found');
}
const result = await htmlCommand.action(mockContext, '');
expect(result).toEqual({
type: 'message',
messageType: 'error',
content: 'Failed to export session: Failed to generate HTML',
});
});
it('should handle errors during file write', async () => {
const error = new Error('File write failed');
vi.mocked(fs.writeFile).mockRejectedValue(error);
const htmlCommand = exportCommand.subCommands?.find(
(c) => c.name === 'html',
);
if (!htmlCommand?.action) {
throw new Error('html command not found');
}
const result = await htmlCommand.action(mockContext, '');
expect(result).toEqual({
type: 'message',
messageType: 'error',
content: 'Failed to export session: File write failed',
});
});
});
});

View file

@ -0,0 +1,347 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import * as fs from 'node:fs/promises';
import path from 'node:path';
import {
type CommandContext,
type SlashCommand,
type MessageActionReturn,
CommandKind,
} from './types.js';
import { SessionService } from '@qwen-code/qwen-code-core';
import {
collectSessionData,
normalizeSessionData,
toMarkdown,
toHtml,
toJson,
toJsonl,
generateExportFilename,
} from '../utils/export/index.js';
/**
* Action for the 'md' subcommand - exports session to markdown.
*/
async function exportMarkdownAction(
context: CommandContext,
): Promise<MessageActionReturn> {
const { services } = context;
const { config } = services;
if (!config) {
return {
type: 'message',
messageType: 'error',
content: 'Configuration not available.',
};
}
const cwd = config.getWorkingDir() || config.getProjectRoot();
if (!cwd) {
return {
type: 'message',
messageType: 'error',
content: 'Could not determine current working directory.',
};
}
try {
// Load the current session
const sessionService = new SessionService(cwd);
const sessionData = await sessionService.loadLastSession();
if (!sessionData) {
return {
type: 'message',
messageType: 'error',
content: 'No active session found to export.',
};
}
const { conversation } = sessionData;
// Collect and normalize export data (SSOT)
const exportData = await collectSessionData(conversation, config);
const normalizedData = normalizeSessionData(
exportData,
conversation.messages,
config,
);
// Generate markdown from SSOT
const markdown = toMarkdown(normalizedData);
const filename = generateExportFilename('md');
const filepath = path.join(cwd, filename);
// Write to file
await fs.writeFile(filepath, markdown, 'utf-8');
return {
type: 'message',
messageType: 'info',
content: `Session exported to markdown: ${filename}`,
};
} catch (error) {
return {
type: 'message',
messageType: 'error',
content: `Failed to export session: ${error instanceof Error ? error.message : String(error)}`,
};
}
}
/**
* Action for the 'html' subcommand - exports session to HTML.
*/
async function exportHtmlAction(
context: CommandContext,
): Promise<MessageActionReturn> {
const { services } = context;
const { config } = services;
if (!config) {
return {
type: 'message',
messageType: 'error',
content: 'Configuration not available.',
};
}
const cwd = config.getWorkingDir() || config.getProjectRoot();
if (!cwd) {
return {
type: 'message',
messageType: 'error',
content: 'Could not determine current working directory.',
};
}
try {
// Load the current session
const sessionService = new SessionService(cwd);
const sessionData = await sessionService.loadLastSession();
if (!sessionData) {
return {
type: 'message',
messageType: 'error',
content: 'No active session found to export.',
};
}
const { conversation } = sessionData;
// Collect and normalize export data (SSOT)
const exportData = await collectSessionData(conversation, config);
const normalizedData = normalizeSessionData(
exportData,
conversation.messages,
config,
);
// Generate HTML from SSOT
const html = toHtml(normalizedData);
const filename = generateExportFilename('html');
const filepath = path.join(cwd, filename);
// Write to file
await fs.writeFile(filepath, html, 'utf-8');
return {
type: 'message',
messageType: 'info',
content: `Session exported to HTML: ${filename}`,
};
} catch (error) {
return {
type: 'message',
messageType: 'error',
content: `Failed to export session: ${error instanceof Error ? error.message : String(error)}`,
};
}
}
/**
* Action for the 'json' subcommand - exports session to JSON.
*/
async function exportJsonAction(
context: CommandContext,
): Promise<MessageActionReturn> {
const { services } = context;
const { config } = services;
if (!config) {
return {
type: 'message',
messageType: 'error',
content: 'Configuration not available.',
};
}
const cwd = config.getWorkingDir() || config.getProjectRoot();
if (!cwd) {
return {
type: 'message',
messageType: 'error',
content: 'Could not determine current working directory.',
};
}
try {
// Load the current session
const sessionService = new SessionService(cwd);
const sessionData = await sessionService.loadLastSession();
if (!sessionData) {
return {
type: 'message',
messageType: 'error',
content: 'No active session found to export.',
};
}
const { conversation } = sessionData;
// Collect and normalize export data (SSOT)
const exportData = await collectSessionData(conversation, config);
const normalizedData = normalizeSessionData(
exportData,
conversation.messages,
config,
);
// Generate JSON from SSOT
const json = toJson(normalizedData);
const filename = generateExportFilename('json');
const filepath = path.join(cwd, filename);
// Write to file
await fs.writeFile(filepath, json, 'utf-8');
return {
type: 'message',
messageType: 'info',
content: `Session exported to JSON: ${filename}`,
};
} catch (error) {
return {
type: 'message',
messageType: 'error',
content: `Failed to export session: ${error instanceof Error ? error.message : String(error)}`,
};
}
}
/**
* Action for the 'jsonl' subcommand - exports session to JSONL.
*/
async function exportJsonlAction(
context: CommandContext,
): Promise<MessageActionReturn> {
const { services } = context;
const { config } = services;
if (!config) {
return {
type: 'message',
messageType: 'error',
content: 'Configuration not available.',
};
}
const cwd = config.getWorkingDir() || config.getProjectRoot();
if (!cwd) {
return {
type: 'message',
messageType: 'error',
content: 'Could not determine current working directory.',
};
}
try {
// Load the current session
const sessionService = new SessionService(cwd);
const sessionData = await sessionService.loadLastSession();
if (!sessionData) {
return {
type: 'message',
messageType: 'error',
content: 'No active session found to export.',
};
}
const { conversation } = sessionData;
// Collect and normalize export data (SSOT)
const exportData = await collectSessionData(conversation, config);
const normalizedData = normalizeSessionData(
exportData,
conversation.messages,
config,
);
// Generate JSONL from SSOT
const jsonl = toJsonl(normalizedData);
const filename = generateExportFilename('jsonl');
const filepath = path.join(cwd, filename);
// Write to file
await fs.writeFile(filepath, jsonl, 'utf-8');
return {
type: 'message',
messageType: 'info',
content: `Session exported to JSONL: ${filename}`,
};
} catch (error) {
return {
type: 'message',
messageType: 'error',
content: `Failed to export session: ${error instanceof Error ? error.message : String(error)}`,
};
}
}
/**
* Main export command with subcommands.
*/
export const exportCommand: SlashCommand = {
name: 'export',
description: 'Export current session message history to a file',
kind: CommandKind.BUILT_IN,
subCommands: [
{
name: 'html',
description: 'Export session to HTML format',
kind: CommandKind.BUILT_IN,
action: exportHtmlAction,
},
{
name: 'md',
description: 'Export session to markdown format',
kind: CommandKind.BUILT_IN,
action: exportMarkdownAction,
},
{
name: 'json',
description: 'Export session to JSON format',
kind: CommandKind.BUILT_IN,
action: exportJsonAction,
},
{
name: 'jsonl',
description: 'Export session to JSONL format (one message per line)',
kind: CommandKind.BUILT_IN,
action: exportJsonlAction,
},
],
};

View file

@ -45,14 +45,16 @@ export const Composer = () => {
<Box flexDirection="column" marginTop={1}>
{!uiState.embeddedShellFocused && (
<LoadingIndicator
// Hide loading phrases when enableLoadingPhrases is explicitly false.
// Using === false ensures phrases show by default when undefined.
thought={
uiState.streamingState === StreamingState.WaitingForConfirmation ||
config.getAccessibility()?.disableLoadingPhrases
config.getAccessibility()?.enableLoadingPhrases === false
? undefined
: uiState.thought
}
currentLoadingPhrase={
config.getAccessibility()?.disableLoadingPhrases
config.getAccessibility()?.enableLoadingPhrases === false
? undefined
: uiState.currentLoadingPhrase
}

View file

@ -47,30 +47,35 @@ const renderComponent = (
setValue: vi.fn(),
} as unknown as LoadedSettings;
const mockConfig = contextValue
? ({
// --- Functions used by ModelDialog ---
getModel: vi.fn(() => MAINLINE_CODER),
setModel: vi.fn().mockResolvedValue(undefined),
switchModel: vi.fn().mockResolvedValue(undefined),
getAuthType: vi.fn(() => 'qwen-oauth'),
const mockConfig = {
// --- Functions used by ModelDialog ---
getModel: vi.fn(() => MAINLINE_CODER),
setModel: vi.fn().mockResolvedValue(undefined),
switchModel: vi.fn().mockResolvedValue(undefined),
getAuthType: vi.fn(() => 'qwen-oauth'),
getAllConfiguredModels: vi.fn(() =>
AVAILABLE_MODELS_QWEN.map((m) => ({
id: m.id,
label: m.label,
description: m.description || '',
authType: AuthType.QWEN_OAUTH,
})),
),
// --- Functions used by ClearcutLogger ---
getUsageStatisticsEnabled: vi.fn(() => true),
getSessionId: vi.fn(() => 'mock-session-id'),
getDebugMode: vi.fn(() => false),
getContentGeneratorConfig: vi.fn(() => ({
authType: AuthType.QWEN_OAUTH,
model: MAINLINE_CODER,
})),
getUseSmartEdit: vi.fn(() => false),
getUseModelRouter: vi.fn(() => false),
getProxy: vi.fn(() => undefined),
// --- Functions used by ClearcutLogger ---
getUsageStatisticsEnabled: vi.fn(() => true),
getSessionId: vi.fn(() => 'mock-session-id'),
getDebugMode: vi.fn(() => false),
getContentGeneratorConfig: vi.fn(() => ({
authType: AuthType.QWEN_OAUTH,
model: MAINLINE_CODER,
})),
getUseModelRouter: vi.fn(() => false),
getProxy: vi.fn(() => undefined),
// --- Spread test-specific overrides ---
...contextValue,
} as unknown as Config)
: undefined;
// --- Spread test-specific overrides ---
...(contextValue ?? {}),
} as unknown as Config;
const renderResult = render(
<SettingsContext.Provider value={mockSettings}>
@ -176,10 +181,6 @@ describe('<ModelDialog />', () => {
AuthType.QWEN_OAUTH,
MAINLINE_CODER,
undefined,
{
reason: 'user_manual',
context: 'Model switched via /model dialog',
},
);
expect(mockSettings.setValue).toHaveBeenCalledWith(
SettingScope.User,
@ -236,10 +237,6 @@ describe('<ModelDialog />', () => {
AuthType.QWEN_OAUTH,
MAINLINE_CODER,
{ requireCachedCredentials: true },
{
reason: 'user_manual',
context: 'AuthType+model switched via /model dialog',
},
);
expect(mockSettings.setValue).toHaveBeenCalledWith(
SettingScope.User,
@ -308,6 +305,14 @@ describe('<ModelDialog />', () => {
{
getModel: mockGetModel,
getAuthType: mockGetAuthType,
getAllConfiguredModels: vi.fn(() =>
AVAILABLE_MODELS_QWEN.map((m) => ({
id: m.id,
label: m.label,
description: m.description || '',
authType: AuthType.QWEN_OAUTH,
})),
),
} as unknown as Config
}
>
@ -322,6 +327,14 @@ describe('<ModelDialog />', () => {
const newMockConfig = {
getModel: mockGetModel,
getAuthType: mockGetAuthType,
getAllConfiguredModels: vi.fn(() =>
AVAILABLE_MODELS_QWEN.map((m) => ({
id: m.id,
label: m.label,
description: m.description || '',
authType: AuthType.QWEN_OAUTH,
})),
),
} as unknown as Config;
rerender(

View file

@ -11,6 +11,7 @@ import {
AuthType,
ModelSlashCommandEvent,
logModelSlashCommand,
type AvailableModel as CoreAvailableModel,
type ContentGeneratorConfig,
type ContentGeneratorConfigSource,
type ContentGeneratorConfigSources,
@ -19,12 +20,9 @@ import { useKeypress } from '../hooks/useKeypress.js';
import { theme } from '../semantic-colors.js';
import { DescriptiveRadioButtonSelect } from './shared/DescriptiveRadioButtonSelect.js';
import { ConfigContext } from '../contexts/ConfigContext.js';
import { UIStateContext } from '../contexts/UIStateContext.js';
import { UIStateContext, type UIState } from '../contexts/UIStateContext.js';
import { useSettings } from '../contexts/SettingsContext.js';
import {
getAvailableModelsForAuthType,
MAINLINE_CODER,
} from '../models/availableModels.js';
import { MAINLINE_CODER } from '../models/availableModels.js';
import { getPersistScopeForModelSelection } from '../../config/modelProvidersScope.js';
import { t } from '../../i18n/index.js';
@ -105,6 +103,46 @@ function persistAuthTypeSelection(
settings.setValue(scope, 'security.auth.selectedType', authType);
}
interface HandleModelSwitchSuccessParams {
settings: ReturnType<typeof useSettings>;
uiState: UIState | null;
after: ContentGeneratorConfig | undefined;
effectiveAuthType: AuthType | undefined;
effectiveModelId: string;
isRuntime: boolean;
}
function handleModelSwitchSuccess({
settings,
uiState,
after,
effectiveAuthType,
effectiveModelId,
isRuntime,
}: HandleModelSwitchSuccessParams): void {
persistModelSelection(settings, effectiveModelId);
if (effectiveAuthType) {
persistAuthTypeSelection(settings, effectiveAuthType);
}
const baseUrl = after?.baseUrl ?? t('(default)');
const maskedKey = maskApiKey(after?.apiKey);
uiState?.historyManager.addItem(
{
type: 'info',
text:
`authType: ${effectiveAuthType ?? '(none)'}` +
`\n` +
`Using ${isRuntime ? 'runtime ' : ''}model: ${effectiveModelId}` +
`\n` +
`Base URL: ${baseUrl}` +
`\n` +
`API key: ${maskedKey}`,
},
Date.now(),
);
}
function ConfigRow({
label,
value,
@ -154,13 +192,21 @@ export function ModelDialog({ onClose }: ModelDialogProps): React.JSX.Element {
const sources = readSourcesFromConfig(config);
const availableModelEntries = useMemo(() => {
const allAuthTypes = Object.values(AuthType) as AuthType[];
const modelsByAuthType = allAuthTypes
.map((t) => ({
authType: t,
models: getAvailableModelsForAuthType(t, config ?? undefined),
}))
.filter((x) => x.models.length > 0);
const allModels = config ? config.getAllConfiguredModels() : [];
// Separate runtime models from registry models
const runtimeModels = allModels.filter((m) => m.isRuntimeModel);
const registryModels = allModels.filter((m) => !m.isRuntimeModel);
// Group registry models by authType
const modelsByAuthTypeMap = new Map<AuthType, CoreAvailableModel[]>();
for (const model of registryModels) {
const authType = model.authType;
if (!modelsByAuthTypeMap.has(authType)) {
modelsByAuthTypeMap.set(authType, []);
}
modelsByAuthTypeMap.get(authType)!.push(model);
}
// Fixed order: qwen-oauth first, then others in a stable order
const authTypeOrder: AuthType[] = [
@ -171,44 +217,91 @@ export function ModelDialog({ onClose }: ModelDialogProps): React.JSX.Element {
AuthType.USE_VERTEX_AI,
];
// Filter to only include authTypes that have models
const availableAuthTypes = new Set(modelsByAuthType.map((x) => x.authType));
// Filter to only include authTypes that have registry models and maintain order
const availableAuthTypes = new Set(modelsByAuthTypeMap.keys());
const orderedAuthTypes = authTypeOrder.filter((t) =>
availableAuthTypes.has(t),
);
return orderedAuthTypes.flatMap((t) => {
const models =
modelsByAuthType.find((x) => x.authType === t)?.models ?? [];
return models.map((m) => ({ authType: t, model: m }));
});
// Build ordered list: runtime models first, then registry models grouped by authType
const result: Array<{
authType: AuthType;
model: CoreAvailableModel;
isRuntime?: boolean;
snapshotId?: string;
}> = [];
// Add all runtime models first
for (const runtimeModel of runtimeModels) {
result.push({
authType: runtimeModel.authType,
model: runtimeModel,
isRuntime: true,
snapshotId: runtimeModel.runtimeSnapshotId,
});
}
// Add registry models grouped by authType
for (const t of orderedAuthTypes) {
for (const model of modelsByAuthTypeMap.get(t) ?? []) {
result.push({ authType: t, model, isRuntime: false });
}
}
return result;
}, [config]);
const MODEL_OPTIONS = useMemo(
() =>
availableModelEntries.map(({ authType: t2, model }) => {
const value = `${t2}::${model.id}`;
const title = (
<Text>
<Text bold color={theme.text.accent}>
[{t2}]
availableModelEntries.map(
({ authType: t2, model, isRuntime, snapshotId }) => {
// Runtime models use snapshotId directly (format: $runtime|${authType}|${modelId})
const value =
isRuntime && snapshotId ? snapshotId : `${t2}::${model.id}`;
const title = (
<Text>
<Text
bold
color={isRuntime ? theme.status.warning : theme.text.accent}
>
[{t2}]
</Text>
<Text>{` ${model.label}`}</Text>
{isRuntime && (
<Text color={theme.status.warning}> (Runtime)</Text>
)}
</Text>
<Text>{` ${model.label}`}</Text>
</Text>
);
const description = model.description || '';
return {
value,
title,
description,
key: value,
};
}),
);
// Include runtime indicator in description
let description = model.description || '';
if (isRuntime) {
description = description
? `${description} (Runtime)`
: 'Runtime model';
}
return {
value,
title,
description,
key: value,
};
},
),
[availableModelEntries],
);
const preferredModelId = config?.getModel() || MAINLINE_CODER;
const preferredKey = authType ? `${authType}::${preferredModelId}` : '';
// Check if current model is a runtime model
// Runtime snapshot ID is already in $runtime|${authType}|${modelId} format
const activeRuntimeSnapshot = config?.getActiveRuntimeModelSnapshot?.();
const preferredKey = activeRuntimeSnapshot
? activeRuntimeSnapshot.id
: authType
? `${authType}::${preferredModelId}`
: '';
useKeypress(
(key) => {
@ -228,67 +321,81 @@ export function ModelDialog({ onClose }: ModelDialogProps): React.JSX.Element {
const handleSelect = useCallback(
async (selected: string) => {
// Clear any previous error
setErrorMessage(null);
const sep = '::';
const idx = selected.indexOf(sep);
const selectedAuthType = (
idx >= 0 ? selected.slice(0, idx) : authType
) as AuthType;
const modelId = idx >= 0 ? selected.slice(idx + sep.length) : selected;
let after: ContentGeneratorConfig | undefined;
let effectiveAuthType: AuthType | undefined;
let effectiveModelId = selected;
let isRuntime = false;
if (config) {
try {
await config.switchModel(
selectedAuthType,
modelId,
selectedAuthType !== authType &&
selectedAuthType === AuthType.QWEN_OAUTH
? { requireCachedCredentials: true }
: undefined,
{
reason: 'user_manual',
context:
selectedAuthType === authType
? 'Model switched via /model dialog'
: 'AuthType+model switched via /model dialog',
},
);
} catch (e) {
const baseErrorMessage = e instanceof Error ? e.message : String(e);
setErrorMessage(
`Failed to switch model to '${modelId}'.\n\n${baseErrorMessage}`,
);
return;
if (!config) {
onClose();
return;
}
try {
// Determine if this is a runtime model selection
// Runtime model format: $runtime|${authType}|${modelId}
isRuntime = selected.startsWith('$runtime|');
let selectedAuthType: AuthType;
let modelId: string;
if (isRuntime) {
// For runtime models, extract authType from the snapshot ID
// Format: $runtime|${authType}|${modelId}
const parts = selected.split('|');
if (parts.length >= 2 && parts[0] === '$runtime') {
selectedAuthType = parts[1] as AuthType;
} else {
selectedAuthType = authType as AuthType;
}
modelId = selected; // Pass the full snapshot ID to switchModel
} else {
const sep = '::';
const idx = selected.indexOf(sep);
selectedAuthType = (
idx >= 0 ? selected.slice(0, idx) : authType
) as AuthType;
modelId = idx >= 0 ? selected.slice(idx + sep.length) : selected;
}
const event = new ModelSlashCommandEvent(modelId);
logModelSlashCommand(config, event);
const after = config.getContentGeneratorConfig?.() as
await config.switchModel(
selectedAuthType,
modelId,
selectedAuthType !== authType &&
selectedAuthType === AuthType.QWEN_OAUTH
? { requireCachedCredentials: true }
: undefined,
);
if (!isRuntime) {
const event = new ModelSlashCommandEvent(modelId);
logModelSlashCommand(config, event);
}
after = config.getContentGeneratorConfig?.() as
| ContentGeneratorConfig
| undefined;
const effectiveAuthType =
after?.authType ?? selectedAuthType ?? authType;
const effectiveModelId = after?.model ?? modelId;
persistModelSelection(settings, effectiveModelId);
persistAuthTypeSelection(settings, effectiveAuthType);
const baseUrl = after?.baseUrl ?? t('(default)');
const maskedKey = maskApiKey(after?.apiKey);
uiState?.historyManager.addItem(
{
type: 'info',
text:
`authType: ${effectiveAuthType}\n` +
`Using model: ${effectiveModelId}\n` +
`Base URL: ${baseUrl}\n` +
`API key: ${maskedKey}`,
},
Date.now(),
);
effectiveAuthType = after?.authType ?? selectedAuthType ?? authType;
effectiveModelId = after?.model ?? modelId;
} catch (e) {
const baseErrorMessage = e instanceof Error ? e.message : String(e);
const errorPrefix = isRuntime
? 'Failed to switch to runtime model.'
: `Failed to switch model to '${effectiveModelId ?? selected}'.`;
setErrorMessage(`${errorPrefix}\n\n${baseErrorMessage}`);
return;
}
handleModelSwitchSuccess({
settings,
uiState,
after,
effectiveAuthType,
effectiveModelId,
isRuntime,
});
onClose();
},
[authType, config, onClose, settings, uiState, setErrorMessage],

File diff suppressed because it is too large Load diff

View file

@ -6,30 +6,35 @@
import * as fs from 'node:fs/promises';
import * as path from 'node:path';
import type { PartListUnion, PartUnion } from '@google/genai';
import type { AnyToolInvocation, Config } from '@qwen-code/qwen-code-core';
import type { PartListUnion } from '@google/genai';
import type { Config } from '@qwen-code/qwen-code-core';
import {
createDebugLogger,
getErrorMessage,
isNodeError,
unescapePath,
readManyFiles,
} from '@qwen-code/qwen-code-core';
import type { HistoryItem, IndividualToolCallDisplay } from '../types.js';
import type {
HistoryItemToolGroup,
HistoryItemWithoutId,
IndividualToolCallDisplay,
} from '../types.js';
import { ToolCallStatus } from '../types.js';
import type { UseHistoryManagerReturn } from './useHistoryManager.js';
interface HandleAtCommandParams {
query: string;
config: Config;
addItem: UseHistoryManagerReturn['addItem'];
onDebugMessage: (message: string) => void;
messageId: number;
signal: AbortSignal;
addItem?: (item: HistoryItemWithoutId, baseTimestamp: number) => number;
}
interface HandleAtCommandResult {
processedQuery: PartListUnion | null;
shouldProceed: boolean;
toolDisplays?: IndividualToolCallDisplay[];
filesRead?: string[];
}
interface AtCommandPart {
@ -37,14 +42,6 @@ interface AtCommandPart {
content: string;
}
interface McpResourceAtReference {
atCommand: string; // e.g. "@github:repos/owner/repo/issues"
serverName: string;
uri: string; // e.g. "github://repos/owner/repo/issues"
}
const debugLogger = createDebugLogger('AT_COMMAND_PROCESSOR');
/**
* Parses a query string to find all '@<path>' commands and text segments.
* Handles \ escaped spaces within paths.
@ -119,199 +116,6 @@ function parseAllAtCommands(query: string): AtCommandPart[] {
);
}
function getConfiguredMcpServerNames(config: Config): Set<string> {
const names = new Set(Object.keys(config.getMcpServers() ?? {}));
if (config.getMcpServerCommand()) {
names.add('mcp');
}
return names;
}
function normalizeMcpResourceUri(serverName: string, resource: string): string {
if (resource.includes('://')) {
return resource;
}
const cleaned = resource.startsWith('/') ? resource.slice(1) : resource;
return `${serverName}://${cleaned}`;
}
function splitLeadingToken(
text: string,
): { token: string; rest: string } | null {
let i = 0;
while (i < text.length && /\s/.test(text[i])) {
i++;
}
if (i >= text.length) {
return null;
}
let token = '';
let inEscape = false;
while (i < text.length) {
const char = text[i];
if (inEscape) {
token += char;
inEscape = false;
i++;
continue;
}
if (char === '\\') {
inEscape = true;
i++;
continue;
}
if (/[,\s;!?()[\]{}]/.test(char)) {
break;
}
if (char === '.') {
const nextChar = i + 1 < text.length ? text[i + 1] : '';
if (nextChar === '' || /\s/.test(nextChar)) {
break;
}
}
token += char;
i++;
}
if (!token) {
return null;
}
return { token, rest: text.slice(i) };
}
function extractMcpResourceAtReferences(
parts: AtCommandPart[],
config: Config,
): { parts: AtCommandPart[]; refs: McpResourceAtReference[] } {
const configuredServers = getConfiguredMcpServerNames(config);
const refs: McpResourceAtReference[] = [];
const merged: AtCommandPart[] = [];
for (let i = 0; i < parts.length; i++) {
const part = parts[i];
if (part.type !== 'atPath') {
merged.push(part);
continue;
}
const atText = part.content; // e.g. "@github:" or "@github:repos/..."
const colonIndex = atText.indexOf(':');
if (!atText.startsWith('@') || colonIndex <= 1) {
merged.push(part);
continue;
}
const serverName = atText.slice(1, colonIndex);
if (!configuredServers.has(serverName)) {
merged.push(part);
continue;
}
let resource = atText.slice(colonIndex + 1);
// Support the documented "@server: resource" format where the resource is
// separated into the following text part.
if (!resource) {
const next = parts[i + 1];
if (next?.type === 'text') {
const tokenInfo = splitLeadingToken(next.content);
if (tokenInfo) {
resource = tokenInfo.token;
const remainingText = tokenInfo.rest;
// Update the next part in place, and let the next iteration handle it.
parts[i + 1] = { type: 'text', content: remainingText };
}
}
}
if (!resource) {
// Treat "@server:" without a resource as plain text, rather than falling
// through to file resolution for a path like "server:".
merged.push({ type: 'text', content: atText });
continue;
}
const normalizedResource = resource.includes('://')
? resource
: resource.startsWith('/')
? resource.slice(1)
: resource;
const normalizedAtCommand = `@${serverName}:${normalizedResource}`;
refs.push({
atCommand: normalizedAtCommand,
serverName,
uri: normalizeMcpResourceUri(serverName, normalizedResource),
});
merged.push({ type: 'atPath', content: normalizedAtCommand });
}
return {
parts: merged.filter(
(p) => !(p.type === 'text' && p.content.trim() === ''),
),
refs,
};
}
function formatMcpResourceContents(
raw: unknown,
limits: { maxCharsPerResource: number; maxLinesPerResource: number },
): string {
if (!raw || typeof raw !== 'object') {
return '[Error: Invalid MCP resource response]';
}
const contents = (raw as { contents?: unknown }).contents;
if (!Array.isArray(contents)) {
return '[Error: Invalid MCP resource response]';
}
const parts: string[] = [];
for (const item of contents) {
if (!item || typeof item !== 'object') {
continue;
}
const text = (item as { text?: unknown }).text;
const blob = (item as { blob?: unknown }).blob;
const mimeType = (item as { mimeType?: unknown }).mimeType;
if (typeof text === 'string') {
parts.push(text);
continue;
}
if (typeof blob === 'string') {
const mimeTypeLabel =
typeof mimeType === 'string' ? mimeType : 'application/octet-stream';
parts.push(
`[Binary MCP resource omitted (mimeType: ${mimeTypeLabel}, bytes: ${blob.length})]`,
);
}
}
let combined = parts.join('\n\n');
const maxLines = limits.maxLinesPerResource;
if (Number.isFinite(maxLines)) {
const lines = combined.split('\n');
if (lines.length > maxLines) {
combined = `${lines.slice(0, maxLines).join('\n')}\n[truncated]`;
}
}
const maxChars = limits.maxCharsPerResource;
if (Number.isFinite(maxChars) && combined.length > maxChars) {
combined = `${combined.slice(0, maxChars)}\n[truncated]`;
}
return combined;
}
/**
* Processes user input potentially containing one or more '@<path>' commands.
* If found, it attempts to read the specified files/directories using the
@ -324,22 +128,26 @@ function formatMcpResourceContents(
export async function handleAtCommand({
query,
config,
addItem,
onDebugMessage,
messageId: userMessageTimestamp,
signal,
addItem,
}: HandleAtCommandParams): Promise<HandleAtCommandResult> {
const parsedParts = parseAllAtCommands(query);
const { parts: commandParts, refs: mcpResourceRefs } =
extractMcpResourceAtReferences(parsedParts, config);
const mcpAtCommands = new Set(mcpResourceRefs.map((r) => r.atCommand));
const commandParts = parseAllAtCommands(query);
const atPathCommandParts = commandParts.filter(
(part) => part.type === 'atPath',
);
const fileAtPathCommandParts = atPathCommandParts.filter(
(part) => !mcpAtCommands.has(part.content),
);
const addToolGroup = (result: HandleAtCommandResult): void => {
if (!addItem) return;
if (result.toolDisplays && result.toolDisplays.length > 0) {
const toolGroupItem: HistoryItemToolGroup = {
type: 'tool_group',
tools: result.toolDisplays,
};
addItem(toolGroupItem, userMessageTimestamp);
}
};
if (atPathCommandParts.length === 0) {
return { processedQuery: [{ text: query }], shouldProceed: true };
@ -359,11 +167,7 @@ export async function handleAtCommand({
both: [],
};
const toolRegistry = config.getToolRegistry();
const readManyFilesTool = toolRegistry.getTool('read_many_files');
const globTool = toolRegistry.getTool('glob');
for (const atPathPart of fileAtPathCommandParts) {
for (const atPathPart of atPathCommandParts) {
const originalAtPath = atPathPart.content; // e.g., "@file.txt" or "@"
if (originalAtPath === '@') {
@ -374,23 +178,8 @@ export async function handleAtCommand({
}
const pathName = originalAtPath.substring(1);
if (!pathName) {
// This case should ideally not be hit if parseAllAtCommands ensures content after @
// but as a safeguard:
addItem(
{
type: 'error',
text: `Error: Invalid @ command '${originalAtPath}'. No path specified.`,
},
userMessageTimestamp,
);
// Decide if this is a fatal error for the whole command or just skip this @ part
// For now, let's be strict and fail the command if one @path is malformed.
return { processedQuery: null, shouldProceed: false };
}
// Check if path should be ignored based on filtering options
const workspaceContext = config.getWorkspaceContext();
if (!workspaceContext.isPathWithinWorkspace(pathName)) {
onDebugMessage(
@ -426,79 +215,27 @@ export async function handleAtCommand({
continue;
}
let resolvedSuccessfully = false;
let sawNotFound = false;
for (const dir of config.getWorkspaceContext().getDirectories()) {
let currentPathSpec = pathName;
let resolvedSuccessfully = false;
try {
const absolutePath = path.resolve(dir, pathName);
const stats = await fs.stat(absolutePath);
if (stats.isDirectory()) {
currentPathSpec =
pathName + (pathName.endsWith(path.sep) ? `**` : `/**`);
onDebugMessage(
`Path ${pathName} resolved to directory, using glob: ${currentPathSpec}`,
);
currentPathSpec = pathName;
onDebugMessage(`Path ${pathName} resolved to directory.`);
} else {
onDebugMessage(`Path ${pathName} resolved to file: ${absolutePath}`);
}
resolvedSuccessfully = true;
} catch (error) {
if (isNodeError(error) && error.code === 'ENOENT') {
if (config.getEnableRecursiveFileSearch() && globTool) {
onDebugMessage(
`Path ${pathName} not found directly, attempting glob search.`,
);
try {
const globResult = await globTool.buildAndExecute(
{
pattern: `**/*${pathName}*`,
path: dir,
},
signal,
);
if (
globResult.llmContent &&
typeof globResult.llmContent === 'string' &&
!globResult.llmContent.startsWith('No files found') &&
!globResult.llmContent.startsWith('Error:')
) {
const lines = globResult.llmContent.split('\n');
if (lines.length > 1 && lines[1]) {
const firstMatchAbsolute = lines[1].trim();
currentPathSpec = path.relative(dir, firstMatchAbsolute);
onDebugMessage(
`Glob search for ${pathName} found ${firstMatchAbsolute}, using relative path: ${currentPathSpec}`,
);
resolvedSuccessfully = true;
} else {
onDebugMessage(
`Glob search for '**/*${pathName}*' did not return a usable path. Path ${pathName} will be skipped.`,
);
}
} else {
onDebugMessage(
`Glob search for '**/*${pathName}*' found no files or an error. Path ${pathName} will be skipped.`,
);
}
} catch (globError) {
debugLogger.error(
`Error during glob search for ${pathName}: ${getErrorMessage(globError)}`,
);
onDebugMessage(
`Error during glob search for ${pathName}. Path ${pathName} will be skipped.`,
);
}
} else {
onDebugMessage(
`Glob tool not found. Path ${pathName} will be skipped.`,
);
}
sawNotFound = true;
continue;
} else {
debugLogger.error(
`Error stating path ${pathName}: ${getErrorMessage(error)}`,
);
onDebugMessage(
`Error stating path ${pathName}. Path ${pathName} will be skipped.`,
`Error stating path ${pathName}: ${getErrorMessage(error)}. Path ${pathName} will be skipped.`,
);
}
}
@ -509,6 +246,11 @@ export async function handleAtCommand({
break;
}
}
if (!resolvedSuccessfully && sawNotFound) {
onDebugMessage(
`Path ${pathName} not found. Path ${pathName} will be skipped.`,
);
}
}
// Construct the initial part of the query for the LLM
@ -573,12 +315,11 @@ export async function handleAtCommand({
}
const message = `Ignored ${totalIgnored} files:\n${messages.join('\n')}`;
debugLogger.info(message);
onDebugMessage(message);
}
// Fallback for lone "@" or completely invalid @-commands resulting in empty initialQueryText
if (pathSpecsToRead.length === 0 && mcpResourceRefs.length === 0) {
if (pathSpecsToRead.length === 0) {
onDebugMessage('No valid file paths found in @ commands to read.');
if (initialQueryText === '@' && query.trim() === '@') {
// If the only thing was a lone @, pass original query (which might have spaces)
@ -594,167 +335,89 @@ export async function handleAtCommand({
};
}
const processedQueryParts: PartUnion[] = [{ text: initialQueryText }];
const toolDisplays: IndividualToolCallDisplay[] = [];
if (pathSpecsToRead.length > 0) {
if (!readManyFilesTool) {
addItem(
{ type: 'error', text: 'Error: read_many_files tool not found.' },
userMessageTimestamp,
);
return { processedQuery: null, shouldProceed: false };
}
const toolArgs = {
try {
const result = await readManyFiles(config, {
paths: pathSpecsToRead,
file_filtering_options: {
respect_git_ignore: respectFileIgnore.respectGitIgnore,
respect_qwen_ignore: respectFileIgnore.respectQwenIgnore,
},
// Use configuration setting
};
let invocation: AnyToolInvocation | undefined = undefined;
try {
invocation = readManyFilesTool.build(toolArgs);
const result = await invocation.execute(signal);
toolDisplays.push({
callId: `client-read-${userMessageTimestamp}`,
name: readManyFilesTool.displayName,
description: invocation.getDescription(),
status: ToolCallStatus.Success,
resultDisplay:
result.returnDisplay ||
`Successfully read: ${contentLabelsForDisplay.join(', ')}`,
confirmationDetails: undefined,
});
if (Array.isArray(result.llmContent)) {
const fileContentRegex = /^--- (.*?) ---\n\n([\s\S]*?)\n\n$/;
processedQueryParts.push({
text: '\n--- Content from referenced files ---',
});
for (const part of result.llmContent) {
if (typeof part === 'string') {
const match = fileContentRegex.exec(part);
if (match) {
const filePathSpecInContent = match[1]; // This is a resolved pathSpec
const fileActualContent = match[2].trim();
processedQueryParts.push({
text: `\nContent from @${filePathSpecInContent}:\n`,
});
processedQueryParts.push({ text: fileActualContent });
} else {
processedQueryParts.push({ text: part });
}
} else {
// part is a Part object.
processedQueryParts.push(part);
}
}
} else {
onDebugMessage(
'read_many_files tool returned no content or empty content.',
);
}
} catch (error: unknown) {
toolDisplays.push({
callId: `client-read-${userMessageTimestamp}`,
name: readManyFilesTool.displayName,
description:
invocation?.getDescription() ??
'Error attempting to execute tool to read files',
status: ToolCallStatus.Error,
resultDisplay: `Error reading files (${contentLabelsForDisplay.join(', ')}): ${getErrorMessage(error)}`,
confirmationDetails: undefined,
});
addItem(
{ type: 'tool_group', tools: toolDisplays } as Omit<HistoryItem, 'id'>,
userMessageTimestamp,
);
return { processedQuery: null, shouldProceed: false };
}
}
if (mcpResourceRefs.length > 0) {
const totalCharLimit = config.getTruncateToolOutputThreshold();
const totalLineLimit = config.getTruncateToolOutputLines();
const maxCharsPerResource = Number.isFinite(totalCharLimit)
? Math.floor(totalCharLimit / Math.max(1, mcpResourceRefs.length))
: Number.POSITIVE_INFINITY;
const maxLinesPerResource = Number.isFinite(totalLineLimit)
? Math.floor(totalLineLimit / Math.max(1, mcpResourceRefs.length))
: Number.POSITIVE_INFINITY;
processedQueryParts.push({
text: '\n--- Content from referenced MCP resources ---',
signal,
});
for (let i = 0; i < mcpResourceRefs.length; i++) {
const ref = mcpResourceRefs[i];
let resourceResult: unknown;
try {
if (signal.aborted) {
const error = new Error('MCP resource read aborted');
error.name = 'AbortError';
throw error;
const parts = Array.isArray(result.contentParts)
? result.contentParts
: [result.contentParts];
// Create individual tool call displays for each file read
const toolCallDisplays: IndividualToolCallDisplay[] = result.files.map(
(file, index) => ({
callId: `client-read-${userMessageTimestamp}-${index}`,
name: file.isDirectory ? 'Read Directory' : 'Read File',
description: file.isDirectory
? `Read directory ${path.basename(file.filePath)}`
: `Read file ${path.basename(file.filePath)}`,
status: ToolCallStatus.Success,
resultDisplay: undefined,
confirmationDetails: undefined,
}),
);
const processedQueryParts: PartListUnion = [{ text: initialQueryText }];
if (parts.length > 0 && !result.error) {
// readManyFiles now returns properly formatted parts with headers and prefixes
for (const part of parts) {
if (typeof part === 'string') {
processedQueryParts.push({ text: part });
} else {
// part is a Part object (text, inlineData, or fileData)
processedQueryParts.push(part);
}
resourceResult = await toolRegistry.readMcpResource(
ref.serverName,
ref.uri,
{ signal },
);
toolDisplays.push({
callId: `client-mcp-resource-${userMessageTimestamp}-${i}`,
name: 'McpResourceRead',
description: `Read MCP resource ${ref.uri} (server: ${ref.serverName})`,
status: ToolCallStatus.Success,
resultDisplay: `Read: ${ref.uri}`,
confirmationDetails: undefined,
});
} catch (error: unknown) {
toolDisplays.push({
callId: `client-mcp-resource-${userMessageTimestamp}-${i}`,
name: 'McpResourceRead',
description: `Read MCP resource ${ref.uri} (server: ${ref.serverName})`,
status: ToolCallStatus.Error,
resultDisplay: `Error reading MCP resource (${ref.uri}): ${getErrorMessage(error)}`,
confirmationDetails: undefined,
});
addItem(
{ type: 'tool_group', tools: toolDisplays } as Omit<
HistoryItem,
'id'
>,
userMessageTimestamp,
);
return { processedQuery: null, shouldProceed: false };
}
processedQueryParts.push({
text: `\nContent from ${ref.atCommand}:\n`,
});
processedQueryParts.push({
text: formatMcpResourceContents(resourceResult, {
maxCharsPerResource,
maxLinesPerResource,
}),
});
} else {
onDebugMessage('readManyFiles returned no content or empty content.');
}
processedQueryParts.push({ text: '\n--- End of MCP resource content ---' });
}
const processedResult: HandleAtCommandResult = {
processedQuery: processedQueryParts,
shouldProceed: true,
toolDisplays: toolCallDisplays,
filesRead: contentLabelsForDisplay,
};
if (toolDisplays.length > 0) {
addItem(
{ type: 'tool_group', tools: toolDisplays } as Omit<HistoryItem, 'id'>,
userMessageTimestamp,
);
}
const chatRecorder = config.getChatRecordingService?.();
chatRecorder?.recordAtCommand({
filesRead: contentLabelsForDisplay,
status: 'success',
userText: query,
});
return { processedQuery: processedQueryParts, shouldProceed: true };
addToolGroup(processedResult);
return processedResult;
} catch (error: unknown) {
const errorToolCallDisplay: IndividualToolCallDisplay = {
callId: `client-read-${userMessageTimestamp}`,
name: 'Read File(s)',
description: 'Error attempting to read files',
status: ToolCallStatus.Error,
resultDisplay: `Error reading files (${contentLabelsForDisplay.join(', ')}): ${getErrorMessage(error)}`,
confirmationDetails: undefined,
};
const chatRecorder = config.getChatRecordingService?.();
const errorMessage =
typeof errorToolCallDisplay.resultDisplay === 'string'
? errorToolCallDisplay.resultDisplay
: undefined;
chatRecorder?.recordAtCommand({
filesRead: contentLabelsForDisplay,
status: 'error',
message: errorMessage,
userText: query,
});
const result = {
processedQuery: null,
shouldProceed: false,
toolDisplays: [errorToolCallDisplay],
filesRead: contentLabelsForDisplay,
};
addToolGroup(result);
return result;
}
}

View file

@ -49,7 +49,7 @@ describe('useAtCompletion', () => {
respectQwenIgnore: true,
})),
getEnableRecursiveFileSearch: () => true,
getFileFilteringDisableFuzzySearch: () => false,
getFileFilteringEnableFuzzySearch: () => true,
} as unknown as Config;
vi.clearAllMocks();
});
@ -197,7 +197,7 @@ describe('useAtCompletion', () => {
cache: false,
cacheTtl: 0,
enableRecursiveFileSearch: true,
disableFuzzySearch: false,
enableFuzzySearch: true,
});
await realFileSearch.initialize();
@ -479,7 +479,7 @@ describe('useAtCompletion', () => {
respectGitIgnore: true,
respectQwenIgnore: true,
})),
getFileFilteringDisableFuzzySearch: () => false,
getFileFilteringEnableFuzzySearch: () => true,
} as unknown as Config;
const { result } = renderHook(() =>

View file

@ -166,8 +166,9 @@ export function useAtCompletion(props: UseAtCompletionProps): void {
cacheTtl: 30, // 30 seconds
enableRecursiveFileSearch:
config?.getEnableRecursiveFileSearch() ?? true,
disableFuzzySearch:
config?.getFileFilteringDisableFuzzySearch() ?? false,
// Use enableFuzzySearch with !== false to default to true when undefined.
enableFuzzySearch:
config?.getFileFilteringEnableFuzzySearch() !== false,
});
await searcher.initialize();
fileSearch.current = searcher;

View file

@ -233,7 +233,6 @@ describe('useGeminiStream', () => {
.fn()
.mockReturnValue(contentGeneratorConfig),
getMaxSessionTurns: vi.fn(() => 50),
getUseSmartEdit: () => false,
} as unknown as Config;
mockOnDebugMessage = vi.fn();
mockHandleSlashCommand = vi.fn().mockResolvedValue(false);

View file

@ -385,34 +385,28 @@ export const useGeminiStream = (
return { queryToSend: null, shouldProceed: false };
}
localQueryToSendToGemini = trimmedQuery;
addItem(
{ type: MessageType.USER, text: trimmedQuery },
userMessageTimestamp,
);
// Handle @-commands (which might involve tool calls)
if (isAtCommand(trimmedQuery)) {
const atCommandResult = await handleAtCommand({
query: trimmedQuery,
config,
addItem,
onDebugMessage,
messageId: userMessageTimestamp,
signal: abortSignal,
addItem,
});
// Add user's turn after @ command processing is done.
addItem(
{ type: MessageType.USER, text: trimmedQuery },
userMessageTimestamp,
);
if (!atCommandResult.shouldProceed) {
return { queryToSend: null, shouldProceed: false };
}
localQueryToSendToGemini = atCommandResult.processedQuery;
} else {
// Normal query for Gemini
addItem(
{ type: MessageType.USER, text: trimmedQuery },
userMessageTimestamp,
);
localQueryToSendToGemini = trimmedQuery;
}
} else {
// It's a function response (PartListUnion that isn't a string)
@ -984,6 +978,7 @@ export const useGeminiStream = (
prompt_id!,
options,
);
const processingStatus = await processGeminiStreamEvents(
stream,
userMessageTimestamp,

View file

@ -64,7 +64,6 @@ const mockConfig = {
model: 'test-model',
authType: 'gemini',
}),
getUseSmartEdit: () => false,
getUseModelRouter: () => false,
getGeminiClient: () => null, // No client needed for these tests
getShellExecutionConfig: () => ({ terminalWidth: 80, terminalHeight: 24 }),

View file

@ -0,0 +1,266 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import { randomUUID } from 'node:crypto';
import type { Config, ChatRecord } from '@qwen-code/qwen-code-core';
import type { SessionContext } from '../../../acp-integration/session/types.js';
import type * as acp from '../../../acp-integration/acp.js';
import { HistoryReplayer } from '../../../acp-integration/session/HistoryReplayer.js';
import type { ExportMessage, ExportSessionData } from './types.js';
/**
* Export session context that captures session updates into export messages.
* Implements SessionContext to work with HistoryReplayer.
*/
class ExportSessionContext implements SessionContext {
readonly sessionId: string;
readonly config: Config;
private messages: ExportMessage[] = [];
private currentMessage: {
type: 'user' | 'assistant';
role: 'user' | 'assistant' | 'thinking';
parts: Array<{ text: string }>;
timestamp: number;
} | null = null;
private activeRecordId: string | null = null;
private activeRecordTimestamp: string | null = null;
private toolCallMap: Map<string, ExportMessage['toolCall']> = new Map();
constructor(sessionId: string, config: Config) {
this.sessionId = sessionId;
this.config = config;
}
async sendUpdate(update: acp.SessionUpdate): Promise<void> {
switch (update.sessionUpdate) {
case 'user_message_chunk':
this.handleMessageChunk('user', update.content);
break;
case 'agent_message_chunk':
this.handleMessageChunk('assistant', update.content);
break;
case 'agent_thought_chunk':
this.handleMessageChunk('assistant', update.content, 'thinking');
break;
case 'tool_call':
this.flushCurrentMessage();
this.handleToolCallStart(update);
break;
case 'tool_call_update':
this.handleToolCallUpdate(update);
break;
case 'plan':
this.flushCurrentMessage();
this.handlePlanUpdate(update);
break;
default:
// Ignore other update types
break;
}
}
setActiveRecordId(recordId: string | null, timestamp?: string): void {
this.activeRecordId = recordId;
this.activeRecordTimestamp = timestamp ?? null;
}
private getMessageTimestamp(): string {
return this.activeRecordTimestamp ?? new Date().toISOString();
}
private getMessageUuid(): string {
return this.activeRecordId ?? randomUUID();
}
private handleMessageChunk(
role: 'user' | 'assistant',
content: { type: string; text?: string },
messageRole: 'user' | 'assistant' | 'thinking' = role,
): void {
if (content.type !== 'text' || !content.text) return;
// If we're starting a new message type, flush the previous one
if (
this.currentMessage &&
(this.currentMessage.type !== role ||
this.currentMessage.role !== messageRole)
) {
this.flushCurrentMessage();
}
// Add to current message or create new one
if (
this.currentMessage &&
this.currentMessage.type === role &&
this.currentMessage.role === messageRole
) {
this.currentMessage.parts.push({ text: content.text });
} else {
this.currentMessage = {
type: role,
role: messageRole,
parts: [{ text: content.text }],
timestamp: Date.now(),
};
}
}
private handleToolCallStart(update: acp.ToolCall): void {
const toolCall: ExportMessage['toolCall'] = {
toolCallId: update.toolCallId,
kind: update.kind || 'other',
title:
typeof update.title === 'string' ? update.title : update.title || '',
status: update.status || 'pending',
rawInput: update.rawInput as string | object | undefined,
locations: update.locations,
timestamp: Date.now(),
};
this.toolCallMap.set(update.toolCallId, toolCall);
// Immediately add tool call to messages to preserve order
const uuid = this.getMessageUuid();
this.messages.push({
uuid,
sessionId: this.sessionId,
timestamp: this.getMessageTimestamp(),
type: 'tool_call',
toolCall,
});
}
private handleToolCallUpdate(update: {
toolCallId: string;
status?: 'pending' | 'in_progress' | 'completed' | 'failed' | null;
title?: string | null;
content?: Array<{ type: string; [key: string]: unknown }> | null;
kind?: string | null;
}): void {
const toolCall = this.toolCallMap.get(update.toolCallId);
if (toolCall) {
// Update the tool call in place
if (update.status) toolCall.status = update.status;
if (update.content) toolCall.content = update.content;
if (update.title)
toolCall.title = typeof update.title === 'string' ? update.title : '';
}
}
private handlePlanUpdate(update: {
entries: Array<{
content: string;
status: 'pending' | 'in_progress' | 'completed';
priority?: string;
}>;
}): void {
// Create a tool_call message for plan updates (TodoWriteTool)
// This ensures todos appear at the correct position in the chat
const uuid = this.getMessageUuid();
const timestamp = this.getMessageTimestamp();
// Format entries as markdown checklist text for UpdatedPlanToolCall.parsePlanEntries
const todoText = update.entries
.map((entry) => {
const checkbox =
entry.status === 'completed'
? '[x]'
: entry.status === 'in_progress'
? '[-]'
: '[ ]';
return `- ${checkbox} ${entry.content}`;
})
.join('\n');
const todoContent = [
{
type: 'content' as const,
content: {
type: 'text',
text: todoText,
},
},
];
this.messages.push({
uuid,
sessionId: this.sessionId,
timestamp,
type: 'tool_call',
toolCall: {
toolCallId: uuid, // Use the same uuid as toolCallId for plan updates
kind: 'todowrite',
title: 'TodoWrite',
status: 'completed',
content: todoContent,
timestamp: Date.parse(timestamp),
},
});
}
private flushCurrentMessage(): void {
if (!this.currentMessage) return;
const uuid = this.getMessageUuid();
this.messages.push({
uuid,
sessionId: this.sessionId,
timestamp: this.getMessageTimestamp(),
type: this.currentMessage.type,
message: {
role: this.currentMessage.role,
parts: this.currentMessage.parts,
},
});
this.currentMessage = null;
}
flushMessages(): void {
this.flushCurrentMessage();
}
getMessages(): ExportMessage[] {
return this.messages;
}
}
/**
* Collects session data from ChatRecord[] using HistoryReplayer.
* Returns the raw ExportSessionData (SSOT) without normalization.
*/
export async function collectSessionData(
conversation: {
sessionId: string;
startTime: string;
messages: ChatRecord[];
},
config: Config,
): Promise<ExportSessionData> {
// Create export session context
const exportContext = new ExportSessionContext(
conversation.sessionId,
config,
);
// Create history replayer with export context
const replayer = new HistoryReplayer(exportContext);
// Replay chat records to build export messages
await replayer.replay(conversation.messages);
// Flush any buffered messages
exportContext.flushMessages();
// Get the export messages
const messages = exportContext.getMessages();
return {
sessionId: conversation.sessionId,
startTime: conversation.startTime,
messages,
};
}

View file

@ -0,0 +1,83 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import type { ExportSessionData } from '../types.js';
import { HTML_TEMPLATE } from './htmlTemplate.js';
/**
* Escapes JSON for safe embedding in HTML.
*/
function escapeJsonForHtml(json: string): string {
return json
.replace(/<\/script/gi, '<\\/script')
.replace(/&/g, '\\u0026')
.replace(/</g, '\\u003c')
.replace(/>/g, '\\u003e')
.replace(/\u2028/g, '\\u2028')
.replace(/\u2029/g, '\\u2029');
}
/**
* Loads the HTML template built from assets.
*/
export function loadHtmlTemplate(): string {
return HTML_TEMPLATE;
}
/**
* Injects JSON data into the HTML template.
*/
export function injectDataIntoHtmlTemplate(
template: string,
data: {
sessionId: string;
startTime: string;
messages: unknown[];
},
): string {
const jsonData = JSON.stringify(data, null, 2);
const escapedJsonData = escapeJsonForHtml(jsonData);
const idAttribute = 'id="chat-data"';
const idIndex = template.indexOf(idAttribute);
if (idIndex === -1) {
return template;
}
const openTagStart = template.lastIndexOf('<script', idIndex);
if (openTagStart === -1) {
return template;
}
const openTagEnd = template.indexOf('>', idIndex);
if (openTagEnd === -1) {
return template;
}
const closeTagStart = template.indexOf('</script>', openTagEnd);
if (closeTagStart === -1) {
return template;
}
const lineStart = template.lastIndexOf('\n', openTagStart);
const lineIndent =
lineStart === -1 ? '' : template.slice(lineStart + 1, openTagStart);
const indentedJson = escapedJsonData
.split('\n')
.map((line) => `${lineIndent}${line}`)
.join('\n');
const before = template.slice(0, openTagEnd + 1);
const after = template.slice(closeTagStart);
return `${before}\n${indentedJson}\n${after}`;
}
/**
* Converts ExportSessionData to HTML format.
*/
export function toHtml(sessionData: ExportSessionData): string {
const template = loadHtmlTemplate();
return injectDataIntoHtmlTemplate(template, sessionData);
}

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,15 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import type { ExportSessionData } from '../types.js';
/**
* Converts ExportSessionData to JSON format.
* Outputs a single JSON object containing the entire session.
*/
export function toJson(sessionData: ExportSessionData): string {
return JSON.stringify(sessionData, null, 2);
}

View file

@ -0,0 +1,31 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import type { ExportSessionData } from '../types.js';
/**
* Converts ExportSessionData to JSONL (JSON Lines) format.
* Each message is output as a separate JSON object on its own line.
*/
export function toJsonl(sessionData: ExportSessionData): string {
const lines: string[] = [];
// Add session metadata as the first line
lines.push(
JSON.stringify({
type: 'session_metadata',
sessionId: sessionData.sessionId,
startTime: sessionData.startTime,
}),
);
// Add each message as a separate line
for (const message of sessionData.messages) {
lines.push(JSON.stringify(message));
}
return lines.join('\n');
}

View file

@ -0,0 +1,225 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import type { ExportSessionData, ExportMessage } from '../types.js';
/**
* Converts ExportSessionData to markdown format.
*/
export function toMarkdown(sessionData: ExportSessionData): string {
const lines: string[] = [];
// Add header with metadata
lines.push('# Chat Session Export\n');
lines.push(`- **Session ID**: \`${sanitizeText(sessionData.sessionId)}\``);
lines.push(`- **Start Time**: ${sanitizeText(sessionData.startTime)}`);
lines.push(`- **Exported**: ${new Date().toISOString()}`);
lines.push('\n---\n');
// Process each message
for (const message of sessionData.messages) {
if (message.type === 'user') {
lines.push('## User\n');
lines.push(formatMessageContent(message));
} else if (message.type === 'assistant') {
lines.push('## Assistant\n');
lines.push(formatMessageContent(message));
} else if (message.type === 'tool_call') {
lines.push(formatToolCall(message));
} else if (message.type === 'system') {
lines.push('### System\n');
// Format as blockquote
const text = formatMessageContent(message);
lines.push(`> ${text.replace(/\n/g, '\n> ')}`);
}
lines.push('\n');
}
return lines.join('\n');
}
function formatMessageContent(message: ExportMessage): string {
const text = extractTextFromMessage(message);
// Special handling for "Content from referenced files"
// We look for the pattern: --- Content from referenced files --- ... --- End of content ---
// and wrap the inner content in code blocks if possible.
// Note: This simple regex replacement might be fragile if nested, but usually this marker is top-level.
// We'll use a replacer function to handle the wrapping.
const processedText = text.replace(
/--- Content from referenced files ---\n([\s\S]*?)\n--- End of content ---/g,
(match, content) =>
`\n> **Referenced Files:**\n\n${createCodeBlock(content)}\n`,
);
return processedText;
}
function formatToolCall(message: ExportMessage): string {
if (!message.toolCall) return '';
const lines: string[] = [];
const { title, status, rawInput, content, locations } = message.toolCall;
const titleStr = typeof title === 'string' ? title : JSON.stringify(title);
lines.push(`### Tool: ${sanitizeText(titleStr)}`);
lines.push(`**Status**: ${sanitizeText(status)}\n`);
// Input
if (rawInput) {
lines.push('**Input:**');
const inputStr =
typeof rawInput === 'string'
? rawInput
: JSON.stringify(rawInput, null, 2);
lines.push(createCodeBlock(inputStr, 'json'));
lines.push('');
}
// Locations
if (locations && locations.length > 0) {
lines.push('**Affected Files:**');
for (const loc of locations) {
const lineSuffix = loc.line ? `:${loc.line}` : '';
lines.push(`- \`${sanitizeText(loc.path)}${lineSuffix}\``);
}
lines.push('');
}
// Output Content
if (content && content.length > 0) {
lines.push('**Output:**');
for (const item of content) {
if (item.type === 'content' && item['content']) {
const contentData = item['content'] as { type: string; text?: string };
if (contentData.type === 'text' && contentData.text) {
// Try to infer language from locations if available and if there is only one location
// or if the tool title suggests a file operation.
let language = '';
if (locations && locations.length === 1 && locations[0].path) {
language = getLanguageFromPath(locations[0].path);
}
lines.push(createCodeBlock(contentData.text, language));
}
} else if (item.type === 'diff') {
const path = item['path'] as string;
const diffText = item['newText'] as string;
lines.push(`\n*Diff for \`${sanitizeText(path)}\`:*`);
lines.push(createCodeBlock(diffText, 'diff'));
}
}
}
return lines.join('\n');
}
/**
* Extracts text content from an export message.
*/
function extractTextFromMessage(message: ExportMessage): string {
if (!message.message?.parts) return '';
const textParts: string[] = [];
for (const part of message.message.parts) {
if ('text' in part) {
textParts.push(part.text);
}
}
return textParts.join('\n');
}
/**
* Creates a markdown code block with dynamic fence length to avoid escaping issues.
* Does NOT escape HTML content inside the block, as that would break code readability.
* Security is handled by the fence.
*/
function createCodeBlock(content: string, language: string = ''): string {
const fence = buildFence(content);
return `${fence}${language}\n${content}\n${fence}`;
}
/**
* Sanitizes text to prevent HTML injection while preserving Markdown.
* Only escapes < and & to avoid breaking Markdown structures like code blocks (if used inline) or quotes.
*/
function sanitizeText(value: string): string {
return (value ?? '').replace(/&/g, '&amp;').replace(/</g, '&lt;');
}
/**
* Calculates the necessary fence length for a code block.
* Ensures the fence is longer than any sequence of backticks in the content.
*/
function buildFence(value: string): string {
const matches = (value ?? '').match(/`+/g);
const maxRun = matches
? Math.max(...matches.map((match) => match.length))
: 0;
const fenceLength = Math.max(3, maxRun + 1);
return '`'.repeat(fenceLength);
}
/**
* Simple helper to guess language from file extension.
*/
function getLanguageFromPath(path: string): string {
const ext = path.split('.').pop()?.toLowerCase();
switch (ext) {
case 'ts':
case 'tsx':
return 'typescript';
case 'js':
case 'jsx':
case 'mjs':
case 'cjs':
return 'javascript';
case 'py':
return 'python';
case 'rb':
return 'ruby';
case 'go':
return 'go';
case 'rs':
return 'rust';
case 'java':
return 'java';
case 'c':
case 'cpp':
case 'h':
case 'hpp':
return 'cpp';
case 'cs':
return 'csharp';
case 'html':
return 'html';
case 'css':
return 'css';
case 'json':
return 'json';
case 'md':
return 'markdown';
case 'sh':
case 'bash':
case 'zsh':
return 'bash';
case 'yaml':
case 'yml':
return 'yaml';
case 'xml':
return 'xml';
case 'sql':
return 'sql';
default:
return '';
}
}

View file

@ -0,0 +1,18 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
export type { ExportMessage, ExportSessionData } from './types.js';
export { collectSessionData } from './collect.js';
export { normalizeSessionData } from './normalize.js';
export { toMarkdown } from './formatters/markdown.js';
export {
toHtml,
loadHtmlTemplate,
injectDataIntoHtmlTemplate,
} from './formatters/html.js';
export { toJson } from './formatters/json.js';
export { toJsonl } from './formatters/jsonl.js';
export { generateExportFilename } from './utils.js';

View file

@ -0,0 +1,324 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
import type { Part } from '@google/genai';
import { ExitPlanModeTool, ToolNames } from '@qwen-code/qwen-code-core';
import type { ChatRecord, Config, Kind } from '@qwen-code/qwen-code-core';
import type { ExportMessage, ExportSessionData } from './types.js';
/**
* Normalizes export session data by merging tool call information from tool_result records.
* This ensures the SSOT contains complete tool call metadata.
*/
export function normalizeSessionData(
sessionData: ExportSessionData,
originalRecords: ChatRecord[],
config: Config,
): ExportSessionData {
const normalized = [...sessionData.messages];
const toolCallIndexById = new Map<string, number>();
// Build index of tool call messages
normalized.forEach((message, index) => {
if (message.type === 'tool_call' && message.toolCall?.toolCallId) {
toolCallIndexById.set(message.toolCall.toolCallId, index);
}
});
// Merge tool result information into tool call messages
for (const record of originalRecords) {
if (record.type !== 'tool_result') continue;
const toolCallMessage = buildToolCallMessageFromResult(record, config);
if (!toolCallMessage?.toolCall) continue;
const existingIndex = toolCallIndexById.get(
toolCallMessage.toolCall.toolCallId,
);
if (existingIndex === undefined) {
// No existing tool call, add this one
toolCallIndexById.set(
toolCallMessage.toolCall.toolCallId,
normalized.length,
);
normalized.push(toolCallMessage);
continue;
}
// Merge into existing tool call
const existingMessage = normalized[existingIndex];
if (existingMessage.type !== 'tool_call' || !existingMessage.toolCall) {
continue;
}
mergeToolCallData(existingMessage.toolCall, toolCallMessage.toolCall);
}
return {
...sessionData,
messages: normalized,
};
}
/**
* Merges incoming tool call data into existing tool call.
*/
function mergeToolCallData(
existing: NonNullable<ExportMessage['toolCall']>,
incoming: NonNullable<ExportMessage['toolCall']>,
): void {
if (!existing.content || existing.content.length === 0) {
existing.content = incoming.content;
}
if (existing.status === 'pending' || existing.status === 'in_progress') {
existing.status = incoming.status;
}
if (!existing.rawInput && incoming.rawInput) {
existing.rawInput = incoming.rawInput;
}
if (!existing.kind || existing.kind === 'other') {
existing.kind = incoming.kind;
}
if ((!existing.title || existing.title === '') && incoming.title) {
existing.title = incoming.title;
}
if (
(!existing.locations || existing.locations.length === 0) &&
incoming.locations &&
incoming.locations.length > 0
) {
existing.locations = incoming.locations;
}
}
/**
* Builds a tool call message from a tool_result ChatRecord.
*/
function buildToolCallMessageFromResult(
record: ChatRecord,
config: Config,
): ExportMessage | null {
const toolCallResult = record.toolCallResult;
const toolName = extractToolNameFromRecord(record);
// Skip todo_write tool - it's already handled by plan update in collect.ts
// This prevents duplicate todo messages in the export
if (toolName === ToolNames.TODO_WRITE) {
return null;
}
const toolCallId = toolCallResult?.callId ?? record.uuid;
const functionCallArgs = extractFunctionCallArgs(record);
const { kind, title, locations } = resolveToolMetadata(
config,
toolName,
functionCallArgs ??
(toolCallResult as { args?: Record<string, unknown> } | undefined)?.args,
);
const rawInput = normalizeRawInput(
functionCallArgs ??
(toolCallResult as { args?: unknown } | undefined)?.args,
);
const content =
extractDiffContent(toolCallResult?.resultDisplay) ??
transformPartsToToolCallContent(record.message?.parts ?? []);
return {
uuid: record.uuid,
parentUuid: record.parentUuid,
sessionId: record.sessionId,
timestamp: record.timestamp,
type: 'tool_call',
toolCall: {
toolCallId,
kind,
title,
status: toolCallResult?.error ? 'failed' : 'completed',
rawInput,
content,
locations,
timestamp: Date.parse(record.timestamp),
},
};
}
/**
* Extracts tool name from a ChatRecord.
*/
function extractToolNameFromRecord(record: ChatRecord): string {
if (!record.message?.parts) {
return '';
}
for (const part of record.message.parts) {
if ('functionResponse' in part && part.functionResponse?.name) {
return part.functionResponse.name;
}
}
return '';
}
/**
* Extracts function call args from a ChatRecord.
*/
function extractFunctionCallArgs(
record: ChatRecord,
): Record<string, unknown> | undefined {
if (!record.message?.parts) {
return undefined;
}
for (const part of record.message.parts) {
if ('functionCall' in part && part.functionCall?.args) {
return part.functionCall.args as Record<string, unknown>;
}
}
return undefined;
}
/**
* Resolves tool metadata (kind, title, locations) from tool registry.
*/
function resolveToolMetadata(
config: Config,
toolName: string,
args?: Record<string, unknown>,
): {
kind: string;
title: string | object;
locations?: Array<{ path: string; line?: number | null }>;
} {
const toolRegistry = config.getToolRegistry?.();
const tool = toolName ? toolRegistry?.getTool?.(toolName) : undefined;
let title: string | object = tool?.displayName ?? toolName ?? 'tool_call';
let locations: Array<{ path: string; line?: number | null }> | undefined;
const kind = mapToolKind(tool?.kind as Kind | undefined, toolName);
if (tool && args) {
try {
const invocation = tool.build(args);
title = `${title}: ${invocation.getDescription()}`;
locations = invocation.toolLocations().map((loc) => ({
path: loc.path,
line: loc.line ?? null,
}));
} catch {
// Keep defaults on build failure
}
}
return { kind, title, locations };
}
/**
* Maps tool kind to allowed export kinds.
*/
function mapToolKind(kind: Kind | undefined, toolName?: string): string {
if (toolName && toolName === ExitPlanModeTool.Name) {
return 'switch_mode';
}
if (toolName && toolName === ToolNames.TODO_WRITE) {
return 'todowrite';
}
const allowedKinds = new Set<string>([
'read',
'edit',
'delete',
'move',
'search',
'execute',
'think',
'fetch',
'other',
]);
if (kind && allowedKinds.has(kind)) {
return kind;
}
return 'other';
}
/**
* Extracts diff content from tool result display.
*/
function extractDiffContent(
resultDisplay: unknown,
): Array<{ type: string; [key: string]: unknown }> | null {
if (!resultDisplay || typeof resultDisplay !== 'object') {
return null;
}
const display = resultDisplay as Record<string, unknown>;
if ('fileName' in display && 'newContent' in display) {
return [
{
type: 'diff',
path: display['fileName'] as string,
oldText: (display['originalContent'] as string) ?? '',
newText: display['newContent'] as string,
},
];
}
return null;
}
/**
* Normalizes raw input to string or object.
*/
function normalizeRawInput(value: unknown): string | object | undefined {
if (typeof value === 'string') return value;
if (typeof value === 'object' && value !== null) return value;
return undefined;
}
/**
* Transforms Parts to tool call content array.
*/
function transformPartsToToolCallContent(
parts: Part[],
): Array<{ type: string; [key: string]: unknown }> {
const content: Array<{ type: string; [key: string]: unknown }> = [];
for (const part of parts) {
if ('text' in part && part.text) {
content.push({
type: 'content',
content: { type: 'text', text: part.text },
});
continue;
}
if ('functionResponse' in part && part.functionResponse) {
const response = part.functionResponse.response as Record<
string,
unknown
>;
const outputField = response?.['output'];
const errorField = response?.['error'];
const responseText =
typeof outputField === 'string'
? outputField
: typeof errorField === 'string'
? errorField
: JSON.stringify(response);
content.push({
type: 'content',
content: { type: 'text', text: responseText },
});
}
}
return content;
}

View file

@ -0,0 +1,54 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
/**
* Universal export message format - SSOT for all export formats.
* This is format-agnostic and contains all information needed for any export type.
*/
export interface ExportMessage {
uuid: string;
parentUuid?: string | null;
sessionId?: string;
timestamp: string;
type: 'user' | 'assistant' | 'system' | 'tool_call';
/** For user/assistant messages */
message?: {
role?: string;
parts?: Array<{ text: string }>;
content?: string;
};
/** Model used for assistant messages */
model?: string;
/** For tool_call messages */
toolCall?: {
toolCallId: string;
kind: string;
title: string | object;
status: 'pending' | 'in_progress' | 'completed' | 'failed';
rawInput?: string | object;
content?: Array<{
type: string;
[key: string]: unknown;
}>;
locations?: Array<{
path: string;
line?: number | null;
}>;
timestamp?: number;
};
}
/**
* Complete export session data - the single source of truth.
*/
export interface ExportSessionData {
sessionId: string;
startTime: string;
messages: ExportMessage[];
}

View file

@ -0,0 +1,13 @@
/**
* @license
* Copyright 2025 Qwen Team
* SPDX-License-Identifier: Apache-2.0
*/
/**
* Generates a filename with timestamp for export files.
*/
export function generateExportFilename(extension: string): string {
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
return `qwen-code-export-${timestamp}.${extension}`;
}

View file

@ -4,6 +4,7 @@
* SPDX-License-Identifier: Apache-2.0
*/
import * as path from 'node:path';
import type { Part, FunctionCall } from '@google/genai';
import type {
ResumedSessionData,
@ -12,8 +13,13 @@ import type {
AnyDeclarativeTool,
ToolResultDisplay,
SlashCommandRecordPayload,
AtCommandRecordPayload,
} from '@qwen-code/qwen-code-core';
import type { HistoryItem, HistoryItemWithoutId } from '../types.js';
import type {
HistoryItem,
HistoryItemWithoutId,
IndividualToolCallDisplay,
} from '../types.js';
import { ToolCallStatus } from '../types.js';
/**
@ -137,6 +143,8 @@ function convertToHistoryItems(
config: Config,
): HistoryItemWithoutId[] {
const items: HistoryItemWithoutId[] = [];
const pendingAtCommands: AtCommandRecordPayload[] = [];
let atCommandCounter = 0;
// Track pending tool calls for grouping with results
const pendingToolCalls = new Map<
@ -152,6 +160,59 @@ function convertToHistoryItems(
confirmationDetails: undefined;
}> = [];
const buildAtCommandDisplays = (
payload: AtCommandRecordPayload,
): IndividualToolCallDisplay[] => {
// Error case: single "Read File(s)" with error message
if (payload.status === 'error') {
atCommandCounter += 1;
const filesLabel = payload.filesRead?.length
? payload.filesRead.join(', ')
: 'files';
return [
{
callId: `at-command-${atCommandCounter}`,
name: 'Read File(s)',
description: 'Error attempting to read files',
status: ToolCallStatus.Error,
resultDisplay:
payload.message || `Error reading files (${filesLabel})`,
confirmationDetails: undefined,
},
];
}
// Success case: individual tool calls for each file
if (!payload.filesRead?.length) {
atCommandCounter += 1;
return [
{
callId: `at-command-${atCommandCounter}`,
name: 'Read File',
description: 'Read File(s)',
status: ToolCallStatus.Success,
resultDisplay: undefined,
confirmationDetails: undefined,
},
];
}
return payload.filesRead.map((filePath) => {
atCommandCounter += 1;
const isDir = filePath.endsWith('/');
return {
callId: `at-command-${atCommandCounter}`,
name: isDir ? 'Read Directory' : 'Read File',
description: isDir
? `Read directory ${path.basename(filePath)}`
: `Read file ${path.basename(filePath)}`,
status: ToolCallStatus.Success,
resultDisplay: undefined,
confirmationDetails: undefined,
};
});
};
for (const record of conversation.messages) {
if (record.type === 'system') {
if (record.subtype === 'slash_command') {
@ -180,10 +241,44 @@ function convertToHistoryItems(
}
}
}
if (record.subtype === 'at_command') {
const payload = record.systemPayload as
| AtCommandRecordPayload
| undefined;
if (!payload) continue;
pendingAtCommands.push(payload);
}
continue;
}
switch (record.type) {
case 'user': {
if (pendingAtCommands.length > 0) {
// Flush any pending tool group before user message
if (currentToolGroup.length > 0) {
items.push({
type: 'tool_group',
tools: [...currentToolGroup],
});
currentToolGroup = [];
}
const payload = pendingAtCommands.shift()!;
const text =
payload.userText ||
extractTextFromParts(record.message?.parts as Part[]);
if (text) {
items.push({ type: 'user', text });
}
const toolDisplays = buildAtCommandDisplays(payload);
if (toolDisplays.length > 0) {
items.push({
type: 'tool_group',
tools: toolDisplays,
});
}
break;
}
// Flush any pending tool group before user message
if (currentToolGroup.length > 0) {
items.push({
@ -290,6 +385,31 @@ function convertToHistoryItems(
}
}
if (pendingAtCommands.length > 0) {
for (const payload of pendingAtCommands) {
// Flush any pending tool group before standalone @-command
if (currentToolGroup.length > 0) {
items.push({
type: 'tool_group',
tools: [...currentToolGroup],
});
currentToolGroup = [];
}
const text = payload.userText;
if (text) {
items.push({ type: 'user', text });
}
const toolDisplays = buildAtCommandDisplays(payload);
if (toolDisplays.length > 0) {
items.push({
type: 'tool_group',
tools: toolDisplays,
});
}
}
}
// Flush any remaining tool group
if (currentToolGroup.length > 0) {
items.push({