fix(cli): expand MCP @server: resource references

This commit is contained in:
liqoingyu 2026-01-18 18:47:33 +08:00
parent a38a5ba87d
commit c8a148b92e
5 changed files with 499 additions and 85 deletions

View file

@ -26,6 +26,7 @@ import * as path from 'node:path';
describe('handleAtCommand', () => {
let testRootDir: string;
let mockConfig: Config;
let registry: ToolRegistry;
const mockAddItem: Mock<UseHistoryManagerReturn['addItem']> = vi.fn();
const mockOnDebugMessage: Mock<(message: string) => void> = vi.fn();
@ -53,6 +54,7 @@ describe('handleAtCommand', () => {
getToolRegistry,
getTargetDir: () => testRootDir,
isSandboxed: () => false,
isTrustedFolder: () => true,
getFileService: () => new FileDiscoveryService(testRootDir),
getFileFilteringRespectGitIgnore: () => true,
getFileFilteringRespectQwenIgnore: () => true,
@ -84,7 +86,7 @@ describe('handleAtCommand', () => {
getTruncateToolOutputLines: () => 500,
} as unknown as Config;
const registry = new ToolRegistry(mockConfig);
registry = new ToolRegistry(mockConfig);
registry.registerTool(new ReadManyFilesTool(mockConfig));
registry.registerTool(new GlobTool(mockConfig));
getToolRegistry.mockReturnValue(registry);
@ -204,6 +206,57 @@ describe('handleAtCommand', () => {
);
});
it('should expand an MCP resource reference in @server: resource format', async () => {
(mockConfig as unknown as { getMcpServers: () => unknown }).getMcpServers =
() =>
({
github: {},
}) as unknown;
vi.spyOn(registry, 'readMcpResource').mockResolvedValue({
contents: [
{
uri: 'github://repos/owner/repo/issues',
mimeType: 'application/json',
text: '{"ok":true}',
},
],
} as unknown as Awaited<ReturnType<ToolRegistry['readMcpResource']>>);
const query = 'Show me the data from @github: repos/owner/repo/issues';
const result = await handleAtCommand({
query,
config: mockConfig,
addItem: mockAddItem,
onDebugMessage: mockOnDebugMessage,
messageId: 1000,
signal: abortController.signal,
});
expect(result).toEqual({
processedQuery: [
{ text: 'Show me the data from @github:repos/owner/repo/issues' },
{ text: '\n--- Content from referenced MCP resources ---' },
{ text: '\nContent from @github:repos/owner/repo/issues:\n' },
{ text: '{"ok":true}' },
{ text: '\n--- End of MCP resource content ---' },
],
shouldProceed: true,
});
expect(registry.readMcpResource).toHaveBeenCalledWith(
'github',
'github://repos/owner/repo/issues',
);
expect(mockAddItem).toHaveBeenCalledWith(
expect.objectContaining({
type: 'tool_group',
tools: [expect.objectContaining({ status: ToolCallStatus.Success })],
}),
1000,
);
});
it('should handle query with text before and after @command', async () => {
const fileContent = 'Markdown content.';
const filePath = await createTestFile(

View file

@ -36,6 +36,12 @@ interface AtCommandPart {
content: string;
}
interface McpResourceAtReference {
atCommand: string; // e.g. "@github:repos/owner/repo/issues"
serverName: string;
uri: string; // e.g. "github://repos/owner/repo/issues"
}
/**
* Parses a query string to find all '@<path>' commands and text segments.
* Handles \ escaped spaces within paths.
@ -110,6 +116,191 @@ function parseAllAtCommands(query: string): AtCommandPart[] {
);
}
function getConfiguredMcpServerNames(config: Config): Set<string> {
const names = new Set(Object.keys(config.getMcpServers() ?? {}));
if (config.getMcpServerCommand()) {
names.add('mcp');
}
return names;
}
function normalizeMcpResourceUri(serverName: string, resource: string): string {
if (resource.includes('://')) {
return resource;
}
const cleaned = resource.startsWith('/') ? resource.slice(1) : resource;
return `${serverName}://${cleaned}`;
}
function splitLeadingToken(
text: string,
): { token: string; rest: string } | null {
let i = 0;
while (i < text.length && /\s/.test(text[i])) {
i++;
}
if (i >= text.length) {
return null;
}
let token = '';
let inEscape = false;
while (i < text.length) {
const char = text[i];
if (inEscape) {
token += char;
inEscape = false;
i++;
continue;
}
if (char === '\\') {
inEscape = true;
i++;
continue;
}
if (/[,\s;!?()[\]{}]/.test(char)) {
break;
}
if (char === '.') {
const nextChar = i + 1 < text.length ? text[i + 1] : '';
if (nextChar === '' || /\s/.test(nextChar)) {
break;
}
}
token += char;
i++;
}
if (!token) {
return null;
}
return { token, rest: text.slice(i) };
}
function extractMcpResourceAtReferences(
parts: AtCommandPart[],
config: Config,
): { parts: AtCommandPart[]; refs: McpResourceAtReference[] } {
const configuredServers = getConfiguredMcpServerNames(config);
const refs: McpResourceAtReference[] = [];
const merged: AtCommandPart[] = [];
for (let i = 0; i < parts.length; i++) {
const part = parts[i];
if (part.type !== 'atPath') {
merged.push(part);
continue;
}
const atText = part.content; // e.g. "@github:" or "@github:repos/..."
const colonIndex = atText.indexOf(':');
if (!atText.startsWith('@') || colonIndex <= 1) {
merged.push(part);
continue;
}
const serverName = atText.slice(1, colonIndex);
if (!configuredServers.has(serverName)) {
merged.push(part);
continue;
}
let resource = atText.slice(colonIndex + 1);
// Support the documented "@server: resource" format where the resource is
// separated into the following text part.
if (!resource) {
const next = parts[i + 1];
if (next?.type === 'text') {
const tokenInfo = splitLeadingToken(next.content);
if (tokenInfo) {
resource = tokenInfo.token;
const remainingText = tokenInfo.rest;
// Update the next part in place, and let the next iteration handle it.
parts[i + 1] = { type: 'text', content: remainingText };
}
}
}
if (!resource) {
merged.push(part);
continue;
}
const normalizedAtCommand = `@${serverName}:${resource}`;
refs.push({
atCommand: normalizedAtCommand,
serverName,
uri: normalizeMcpResourceUri(serverName, resource),
});
merged.push({ type: 'atPath', content: normalizedAtCommand });
}
return {
parts: merged.filter(
(p) => !(p.type === 'text' && p.content.trim() === ''),
),
refs,
};
}
function formatMcpResourceContents(
raw: unknown,
limits: { maxCharsPerResource: number; maxLinesPerResource: number },
): string {
if (!raw || typeof raw !== 'object') {
return '[Error: Invalid MCP resource response]';
}
const contents = (raw as { contents?: unknown }).contents;
if (!Array.isArray(contents)) {
return '[Error: Invalid MCP resource response]';
}
const parts: string[] = [];
for (const item of contents) {
if (!item || typeof item !== 'object') {
continue;
}
const text = (item as { text?: unknown }).text;
const blob = (item as { blob?: unknown }).blob;
const mimeType = (item as { mimeType?: unknown }).mimeType;
if (typeof text === 'string') {
parts.push(text);
continue;
}
if (typeof blob === 'string') {
const mimeTypeLabel =
typeof mimeType === 'string' ? mimeType : 'application/octet-stream';
parts.push(
`[Binary MCP resource omitted (mimeType: ${mimeTypeLabel}, bytes: ${blob.length})]`,
);
}
}
let combined = parts.join('\n\n');
const maxLines = limits.maxLinesPerResource;
if (Number.isFinite(maxLines)) {
const lines = combined.split('\n');
if (lines.length > maxLines) {
combined = `${lines.slice(0, maxLines).join('\n')}\n[truncated]`;
}
}
const maxChars = limits.maxCharsPerResource;
if (Number.isFinite(maxChars) && combined.length > maxChars) {
combined = `${combined.slice(0, maxChars)}\n[truncated]`;
}
return combined;
}
/**
* Processes user input potentially containing one or more '@<path>' commands.
* If found, it attempts to read the specified files/directories using the
@ -127,10 +318,17 @@ export async function handleAtCommand({
messageId: userMessageTimestamp,
signal,
}: HandleAtCommandParams): Promise<HandleAtCommandResult> {
const commandParts = parseAllAtCommands(query);
const parsedParts = parseAllAtCommands(query);
const { parts: commandParts, refs: mcpResourceRefs } =
extractMcpResourceAtReferences(parsedParts, config);
const mcpAtCommands = new Set(mcpResourceRefs.map((r) => r.atCommand));
const atPathCommandParts = commandParts.filter(
(part) => part.type === 'atPath',
);
const fileAtPathCommandParts = atPathCommandParts.filter(
(part) => !mcpAtCommands.has(part.content),
);
if (atPathCommandParts.length === 0) {
return { processedQuery: [{ text: query }], shouldProceed: true };
@ -154,15 +352,7 @@ export async function handleAtCommand({
const readManyFilesTool = toolRegistry.getTool('read_many_files');
const globTool = toolRegistry.getTool('glob');
if (!readManyFilesTool) {
addItem(
{ type: 'error', text: 'Error: read_many_files tool not found.' },
userMessageTimestamp,
);
return { processedQuery: null, shouldProceed: false };
}
for (const atPathPart of atPathCommandParts) {
for (const atPathPart of fileAtPathCommandParts) {
const originalAtPath = atPathPart.content; // e.g., "@file.txt" or "@"
if (originalAtPath === '@') {
@ -377,7 +567,7 @@ export async function handleAtCommand({
}
// Fallback for lone "@" or completely invalid @-commands resulting in empty initialQueryText
if (pathSpecsToRead.length === 0) {
if (pathSpecsToRead.length === 0 && mcpResourceRefs.length === 0) {
onDebugMessage('No valid file paths found in @ commands to read.');
if (initialQueryText === '@' && query.trim() === '@') {
// If the only thing was a lone @, pass original query (which might have spaces)
@ -395,86 +585,185 @@ export async function handleAtCommand({
const processedQueryParts: PartUnion[] = [{ text: initialQueryText }];
const toolArgs = {
paths: pathSpecsToRead,
file_filtering_options: {
respect_git_ignore: respectFileIgnore.respectGitIgnore,
respect_qwen_ignore: respectFileIgnore.respectQwenIgnore,
},
// Use configuration setting
};
let toolCallDisplay: IndividualToolCallDisplay;
const toolDisplays: IndividualToolCallDisplay[] = [];
let invocation: AnyToolInvocation | undefined = undefined;
try {
invocation = readManyFilesTool.build(toolArgs);
const result = await invocation.execute(signal);
toolCallDisplay = {
callId: `client-read-${userMessageTimestamp}`,
name: readManyFilesTool.displayName,
description: invocation.getDescription(),
status: ToolCallStatus.Success,
resultDisplay:
result.returnDisplay ||
`Successfully read: ${contentLabelsForDisplay.join(', ')}`,
confirmationDetails: undefined,
};
if (Array.isArray(result.llmContent)) {
const fileContentRegex = /^--- (.*?) ---\n\n([\s\S]*?)\n\n$/;
processedQueryParts.push({
text: '\n--- Content from referenced files ---',
});
for (const part of result.llmContent) {
if (typeof part === 'string') {
const match = fileContentRegex.exec(part);
if (match) {
const filePathSpecInContent = match[1]; // This is a resolved pathSpec
const fileActualContent = match[2].trim();
processedQueryParts.push({
text: `\nContent from @${filePathSpecInContent}:\n`,
});
processedQueryParts.push({ text: fileActualContent });
} else {
processedQueryParts.push({ text: part });
}
} else {
// part is a Part object.
processedQueryParts.push(part);
}
}
} else {
onDebugMessage(
'read_many_files tool returned no content or empty content.',
if (pathSpecsToRead.length > 0) {
if (!readManyFilesTool) {
addItem(
{ type: 'error', text: 'Error: read_many_files tool not found.' },
userMessageTimestamp,
);
return { processedQuery: null, shouldProceed: false };
}
addItem(
{ type: 'tool_group', tools: [toolCallDisplay] } as Omit<
HistoryItem,
'id'
>,
userMessageTimestamp,
);
return { processedQuery: processedQueryParts, shouldProceed: true };
} catch (error: unknown) {
toolCallDisplay = {
callId: `client-read-${userMessageTimestamp}`,
name: readManyFilesTool.displayName,
description:
invocation?.getDescription() ??
'Error attempting to execute tool to read files',
status: ToolCallStatus.Error,
resultDisplay: `Error reading files (${contentLabelsForDisplay.join(', ')}): ${getErrorMessage(error)}`,
confirmationDetails: undefined,
const toolArgs = {
paths: pathSpecsToRead,
file_filtering_options: {
respect_git_ignore: respectFileIgnore.respectGitIgnore,
respect_qwen_ignore: respectFileIgnore.respectQwenIgnore,
},
// Use configuration setting
};
let invocation: AnyToolInvocation | undefined = undefined;
try {
invocation = readManyFilesTool.build(toolArgs);
const result = await invocation.execute(signal);
toolDisplays.push({
callId: `client-read-${userMessageTimestamp}`,
name: readManyFilesTool.displayName,
description: invocation.getDescription(),
status: ToolCallStatus.Success,
resultDisplay:
result.returnDisplay ||
`Successfully read: ${contentLabelsForDisplay.join(', ')}`,
confirmationDetails: undefined,
});
if (Array.isArray(result.llmContent)) {
const fileContentRegex = /^--- (.*?) ---\n\n([\s\S]*?)\n\n$/;
processedQueryParts.push({
text: '\n--- Content from referenced files ---',
});
for (const part of result.llmContent) {
if (typeof part === 'string') {
const match = fileContentRegex.exec(part);
if (match) {
const filePathSpecInContent = match[1]; // This is a resolved pathSpec
const fileActualContent = match[2].trim();
processedQueryParts.push({
text: `\nContent from @${filePathSpecInContent}:\n`,
});
processedQueryParts.push({ text: fileActualContent });
} else {
processedQueryParts.push({ text: part });
}
} else {
// part is a Part object.
processedQueryParts.push(part);
}
}
} else {
onDebugMessage(
'read_many_files tool returned no content or empty content.',
);
}
} catch (error: unknown) {
toolDisplays.push({
callId: `client-read-${userMessageTimestamp}`,
name: readManyFilesTool.displayName,
description:
invocation?.getDescription() ??
'Error attempting to execute tool to read files',
status: ToolCallStatus.Error,
resultDisplay: `Error reading files (${contentLabelsForDisplay.join(', ')}): ${getErrorMessage(error)}`,
confirmationDetails: undefined,
});
addItem(
{ type: 'tool_group', tools: toolDisplays } as Omit<HistoryItem, 'id'>,
userMessageTimestamp,
);
return { processedQuery: null, shouldProceed: false };
}
}
if (mcpResourceRefs.length > 0) {
const totalCharLimit = config.getTruncateToolOutputThreshold();
const totalLineLimit = config.getTruncateToolOutputLines();
const maxCharsPerResource = Number.isFinite(totalCharLimit)
? Math.floor(totalCharLimit / Math.max(1, mcpResourceRefs.length))
: Number.POSITIVE_INFINITY;
const maxLinesPerResource = Number.isFinite(totalLineLimit)
? Math.floor(totalLineLimit / Math.max(1, mcpResourceRefs.length))
: Number.POSITIVE_INFINITY;
processedQueryParts.push({
text: '\n--- Content from referenced MCP resources ---',
});
for (let i = 0; i < mcpResourceRefs.length; i++) {
const ref = mcpResourceRefs[i];
let resourceResult: unknown;
try {
resourceResult = await new Promise((resolve, reject) => {
if (signal.aborted) {
const error = new Error('MCP resource read aborted');
error.name = 'AbortError';
reject(error);
return;
}
const onAbort = () => {
cleanup();
const error = new Error('MCP resource read aborted');
error.name = 'AbortError';
reject(error);
};
const cleanup = () => {
signal.removeEventListener('abort', onAbort);
};
signal.addEventListener('abort', onAbort, { once: true });
toolRegistry
.readMcpResource(ref.serverName, ref.uri)
.then((res) => {
cleanup();
resolve(res);
})
.catch((err) => {
cleanup();
reject(err);
});
});
toolDisplays.push({
callId: `client-mcp-resource-${userMessageTimestamp}-${i}`,
name: 'McpResourceRead',
description: `Read MCP resource ${ref.uri} (server: ${ref.serverName})`,
status: ToolCallStatus.Success,
resultDisplay: `Read: ${ref.uri}`,
confirmationDetails: undefined,
});
} catch (error: unknown) {
toolDisplays.push({
callId: `client-mcp-resource-${userMessageTimestamp}-${i}`,
name: 'McpResourceRead',
description: `Read MCP resource ${ref.uri} (server: ${ref.serverName})`,
status: ToolCallStatus.Error,
resultDisplay: `Error reading MCP resource (${ref.uri}): ${getErrorMessage(error)}`,
confirmationDetails: undefined,
});
addItem(
{ type: 'tool_group', tools: toolDisplays } as Omit<
HistoryItem,
'id'
>,
userMessageTimestamp,
);
return { processedQuery: null, shouldProceed: false };
}
processedQueryParts.push({
text: `\nContent from ${ref.atCommand}:\n`,
});
processedQueryParts.push({
text: formatMcpResourceContents(resourceResult, {
maxCharsPerResource,
maxLinesPerResource,
}),
});
}
processedQueryParts.push({ text: '\n--- End of MCP resource content ---' });
}
if (toolDisplays.length > 0) {
addItem(
{ type: 'tool_group', tools: [toolCallDisplay] } as Omit<
HistoryItem,
'id'
>,
{ type: 'tool_group', tools: toolDisplays } as Omit<HistoryItem, 'id'>,
userMessageTimestamp,
);
return { processedQuery: null, shouldProceed: false };
}
return { processedQuery: processedQueryParts, shouldProceed: true };
}