mirror of
https://github.com/QwenLM/qwen-code.git
synced 2026-04-28 11:41:04 +00:00
Merge pull request #1673 from QwenLM/refactor/read-many-files-util
refactor: remove read_many_files tool, add readManyFiles utility for user @-commands
This commit is contained in:
commit
30027c8ce9
26 changed files with 1128 additions and 2693 deletions
|
|
@ -11,6 +11,7 @@ import * as path from 'node:path';
|
|||
import { Session } from './Session.js';
|
||||
import type { Config, GeminiChat } from '@qwen-code/qwen-code-core';
|
||||
import { ApprovalMode, AuthType } from '@qwen-code/qwen-code-core';
|
||||
import * as core from '@qwen-code/qwen-code-core';
|
||||
import type * as acp from '../acp.js';
|
||||
import type { LoadedSettings } from '../../config/settings.js';
|
||||
import * as nonInteractiveCliCommands from '../../nonInteractiveCliCommands.js';
|
||||
|
|
@ -213,24 +214,14 @@ describe('Session', () => {
|
|||
try {
|
||||
await fs.writeFile(filePath, '# Test\n', 'utf8');
|
||||
|
||||
const readManyFilesTool = {
|
||||
buildAndExecute: vi.fn().mockResolvedValue({
|
||||
llmContent: 'file content',
|
||||
returnDisplay: 'ok',
|
||||
}),
|
||||
};
|
||||
const toolRegistry = {
|
||||
getTool: vi.fn((name: string) =>
|
||||
name === 'read_many_files' ? readManyFilesTool : undefined,
|
||||
),
|
||||
};
|
||||
const fileService = {
|
||||
shouldGitIgnoreFile: vi.fn().mockReturnValue(false),
|
||||
};
|
||||
const readManyFilesSpy = vi
|
||||
.spyOn(core, 'readManyFiles')
|
||||
.mockResolvedValue({
|
||||
contentParts: 'file content',
|
||||
files: [],
|
||||
});
|
||||
|
||||
mockConfig.getTargetDir = vi.fn().mockReturnValue(tempDir);
|
||||
mockConfig.getToolRegistry = vi.fn().mockReturnValue(toolRegistry);
|
||||
mockConfig.getFileService = vi.fn().mockReturnValue(fileService);
|
||||
mockChat.sendMessageStream = vi
|
||||
.fn()
|
||||
.mockResolvedValue((async function* () {})());
|
||||
|
|
@ -249,10 +240,10 @@ describe('Session', () => {
|
|||
|
||||
await session.prompt(promptRequest);
|
||||
|
||||
expect(readManyFilesTool.buildAndExecute).toHaveBeenCalledWith(
|
||||
{ paths: [fileName] },
|
||||
expect.any(AbortSignal),
|
||||
);
|
||||
expect(readManyFilesSpy).toHaveBeenCalledWith(mockConfig, {
|
||||
paths: [fileName],
|
||||
signal: expect.any(AbortSignal),
|
||||
});
|
||||
} finally {
|
||||
await fs.rm(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
|
|
|
|||
|
|
@ -28,20 +28,16 @@ import {
|
|||
logToolCall,
|
||||
logUserPrompt,
|
||||
getErrorStatus,
|
||||
isWithinRoot,
|
||||
isNodeError,
|
||||
TaskTool,
|
||||
UserPromptEvent,
|
||||
TodoWriteTool,
|
||||
ExitPlanModeTool,
|
||||
readManyFiles,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
|
||||
import * as acp from '../acp.js';
|
||||
import type { LoadedSettings } from '../../config/settings.js';
|
||||
import * as fs from 'node:fs/promises';
|
||||
import * as path from 'node:path';
|
||||
import { z } from 'zod';
|
||||
import { getErrorMessage } from '../../utils/errors.js';
|
||||
import { normalizePartList } from '../../utils/nonInteractiveHelpers.js';
|
||||
import {
|
||||
handleSlashCommand,
|
||||
|
|
@ -850,120 +846,11 @@ export class Session implements SessionContext {
|
|||
return parts;
|
||||
}
|
||||
|
||||
const atPathToResolvedSpecMap = new Map<string, string>();
|
||||
|
||||
// Get centralized file discovery service
|
||||
const fileDiscovery = this.config.getFileService();
|
||||
const respectGitIgnore = this.config.getFileFilteringRespectGitIgnore();
|
||||
|
||||
const pathSpecsToRead: string[] = [];
|
||||
const contentLabelsForDisplay: string[] = [];
|
||||
const ignoredPaths: string[] = [];
|
||||
|
||||
const toolRegistry = this.config.getToolRegistry();
|
||||
const readManyFilesTool = toolRegistry.getTool('read_many_files');
|
||||
const globTool = toolRegistry.getTool('glob');
|
||||
|
||||
if (!readManyFilesTool) {
|
||||
throw new Error('Error: read_many_files tool not found.');
|
||||
}
|
||||
|
||||
for (const atPathPart of atPathCommandParts) {
|
||||
const pathName = atPathPart.fileData!.fileUri;
|
||||
// Check if path should be ignored by git
|
||||
if (fileDiscovery.shouldGitIgnoreFile(pathName)) {
|
||||
ignoredPaths.push(pathName);
|
||||
const reason = respectGitIgnore
|
||||
? 'git-ignored and will be skipped'
|
||||
: 'ignored by custom patterns';
|
||||
console.warn(`Path ${pathName} is ${reason}.`);
|
||||
continue;
|
||||
}
|
||||
let currentPathSpec = pathName;
|
||||
let resolvedSuccessfully = false;
|
||||
try {
|
||||
const absolutePath = path.resolve(this.config.getTargetDir(), pathName);
|
||||
if (isWithinRoot(absolutePath, this.config.getTargetDir())) {
|
||||
const stats = await fs.stat(absolutePath);
|
||||
if (stats.isDirectory()) {
|
||||
currentPathSpec = pathName.endsWith('/')
|
||||
? `${pathName}**`
|
||||
: `${pathName}/**`;
|
||||
this.debug(
|
||||
`Path ${pathName} resolved to directory, using glob: ${currentPathSpec}`,
|
||||
);
|
||||
} else {
|
||||
this.debug(`Path ${pathName} resolved to file: ${currentPathSpec}`);
|
||||
}
|
||||
resolvedSuccessfully = true;
|
||||
} else {
|
||||
this.debug(
|
||||
`Path ${pathName} is outside the project directory. Skipping.`,
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
if (isNodeError(error) && error.code === 'ENOENT') {
|
||||
if (this.config.getEnableRecursiveFileSearch() && globTool) {
|
||||
this.debug(
|
||||
`Path ${pathName} not found directly, attempting glob search.`,
|
||||
);
|
||||
try {
|
||||
const globResult = await globTool.buildAndExecute(
|
||||
{
|
||||
pattern: `**/*${pathName}*`,
|
||||
path: this.config.getTargetDir(),
|
||||
},
|
||||
abortSignal,
|
||||
);
|
||||
if (
|
||||
globResult.llmContent &&
|
||||
typeof globResult.llmContent === 'string' &&
|
||||
!globResult.llmContent.startsWith('No files found') &&
|
||||
!globResult.llmContent.startsWith('Error:')
|
||||
) {
|
||||
const lines = globResult.llmContent.split('\n');
|
||||
if (lines.length > 1 && lines[1]) {
|
||||
const firstMatchAbsolute = lines[1].trim();
|
||||
currentPathSpec = path.relative(
|
||||
this.config.getTargetDir(),
|
||||
firstMatchAbsolute,
|
||||
);
|
||||
this.debug(
|
||||
`Glob search for ${pathName} found ${firstMatchAbsolute}, using relative path: ${currentPathSpec}`,
|
||||
);
|
||||
resolvedSuccessfully = true;
|
||||
} else {
|
||||
this.debug(
|
||||
`Glob search for '**/*${pathName}*' did not return a usable path. Path ${pathName} will be skipped.`,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
this.debug(
|
||||
`Glob search for '**/*${pathName}*' found no files or an error. Path ${pathName} will be skipped.`,
|
||||
);
|
||||
}
|
||||
} catch (globError) {
|
||||
console.error(
|
||||
`Error during glob search for ${pathName}: ${getErrorMessage(globError)}`,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
this.debug(
|
||||
`Glob tool not found. Path ${pathName} will be skipped.`,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
console.error(
|
||||
`Error stating path ${pathName}. Path ${pathName} will be skipped.`,
|
||||
);
|
||||
}
|
||||
}
|
||||
if (resolvedSuccessfully) {
|
||||
pathSpecsToRead.push(currentPathSpec);
|
||||
atPathToResolvedSpecMap.set(pathName, currentPathSpec);
|
||||
contentLabelsForDisplay.push(pathName);
|
||||
}
|
||||
}
|
||||
// Extract paths from @ commands - pass directly to readManyFiles without filtering
|
||||
// since this is user-triggered behavior, not LLM-triggered
|
||||
const pathSpecsToRead: string[] = atPathCommandParts.map(
|
||||
(part) => part.fileData!.fileUri,
|
||||
);
|
||||
|
||||
// Construct the initial part of the query for the LLM
|
||||
let initialQueryText = '';
|
||||
|
|
@ -971,70 +858,49 @@ export class Session implements SessionContext {
|
|||
const chunk = parts[i];
|
||||
if ('text' in chunk) {
|
||||
initialQueryText += chunk.text;
|
||||
} else {
|
||||
// type === 'atPath'
|
||||
const resolvedSpec =
|
||||
chunk.fileData && atPathToResolvedSpecMap.get(chunk.fileData.fileUri);
|
||||
} else if ('fileData' in chunk) {
|
||||
const pathName = chunk.fileData!.fileUri;
|
||||
if (
|
||||
i > 0 &&
|
||||
initialQueryText.length > 0 &&
|
||||
!initialQueryText.endsWith(' ') &&
|
||||
resolvedSpec
|
||||
!initialQueryText.endsWith(' ')
|
||||
) {
|
||||
// Add space if previous part was text and didn't end with space, or if previous was @path
|
||||
const prevPart = parts[i - 1];
|
||||
if (
|
||||
'text' in prevPart ||
|
||||
('fileData' in prevPart &&
|
||||
atPathToResolvedSpecMap.has(prevPart.fileData!.fileUri))
|
||||
) {
|
||||
initialQueryText += ' ';
|
||||
}
|
||||
}
|
||||
// Append the resolved path spec for display purposes
|
||||
if (resolvedSpec) {
|
||||
initialQueryText += `@${resolvedSpec}`;
|
||||
initialQueryText += ' ';
|
||||
}
|
||||
initialQueryText += `@${pathName}`;
|
||||
}
|
||||
}
|
||||
|
||||
// Handle ignored paths message
|
||||
let ignoredPathsMessage = '';
|
||||
if (ignoredPaths.length > 0) {
|
||||
const pathList = ignoredPaths.map((p) => `- ${p}`).join('\n');
|
||||
ignoredPathsMessage = `Note: The following paths were skipped because they are ignored:\n${pathList}\n\n`;
|
||||
}
|
||||
|
||||
const processedQueryParts: Part[] = [];
|
||||
|
||||
// Read files using read_many_files tool
|
||||
// Read files using readManyFiles utility
|
||||
if (pathSpecsToRead.length > 0) {
|
||||
const readResult = await readManyFilesTool.buildAndExecute(
|
||||
{
|
||||
paths: pathSpecsToRead,
|
||||
},
|
||||
abortSignal,
|
||||
);
|
||||
const readResult = await readManyFiles(this.config, {
|
||||
paths: pathSpecsToRead,
|
||||
signal: abortSignal,
|
||||
});
|
||||
|
||||
const contentForLlm =
|
||||
typeof readResult.llmContent === 'string'
|
||||
? readResult.llmContent
|
||||
: JSON.stringify(readResult.llmContent);
|
||||
const contentParts = Array.isArray(readResult.contentParts)
|
||||
? readResult.contentParts
|
||||
: [readResult.contentParts];
|
||||
|
||||
// Combine content label, ignored paths message, file content, and user query
|
||||
const combinedText = `${ignoredPathsMessage}${contentForLlm}`.trim();
|
||||
processedQueryParts.push({ text: combinedText });
|
||||
// Add initial query text first
|
||||
processedQueryParts.push({ text: initialQueryText });
|
||||
|
||||
// Then add content parts (preserving binary files as inlineData)
|
||||
for (const part of contentParts) {
|
||||
if (typeof part === 'string') {
|
||||
processedQueryParts.push({ text: part });
|
||||
} else {
|
||||
processedQueryParts.push(part);
|
||||
}
|
||||
}
|
||||
} else if (embeddedContext.length > 0) {
|
||||
// No @path files to read, but we have embedded context
|
||||
processedQueryParts.push({
|
||||
text: `${ignoredPathsMessage}${initialQueryText}`.trim(),
|
||||
});
|
||||
processedQueryParts.push({ text: initialQueryText.trim() });
|
||||
} else {
|
||||
// No @path files found or resolved
|
||||
processedQueryParts.push({
|
||||
text: `${ignoredPathsMessage}${initialQueryText}`.trim(),
|
||||
});
|
||||
// No @path files found
|
||||
processedQueryParts.push({ text: initialQueryText.trim() });
|
||||
}
|
||||
|
||||
// Process embedded context from resource blocks
|
||||
|
|
|
|||
|
|
@ -103,7 +103,6 @@ export interface CliArgs {
|
|||
debug: boolean | undefined;
|
||||
prompt: string | undefined;
|
||||
promptInteractive: string | undefined;
|
||||
allFiles: boolean | undefined;
|
||||
yolo: boolean | undefined;
|
||||
approvalMode: string | undefined;
|
||||
telemetry: boolean | undefined;
|
||||
|
|
@ -290,12 +289,6 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
|
|||
type: 'string',
|
||||
description: 'Sandbox image URI.',
|
||||
})
|
||||
.option('all-files', {
|
||||
alias: ['a'],
|
||||
type: 'boolean',
|
||||
description: 'Include ALL files in context?',
|
||||
default: false,
|
||||
})
|
||||
.option('yolo', {
|
||||
alias: 'y',
|
||||
type: 'boolean',
|
||||
|
|
@ -512,10 +505,6 @@ export async function parseArguments(settings: Settings): Promise<CliArgs> {
|
|||
'checkpointing',
|
||||
'Use the "general.checkpointing.enabled" setting in settings.json instead. This flag will be removed in a future version.',
|
||||
)
|
||||
.deprecateOption(
|
||||
'all-files',
|
||||
'Use @ includes in the application instead. This flag will be removed in a future version.',
|
||||
)
|
||||
.deprecateOption(
|
||||
'prompt',
|
||||
'Use the positional prompt instead. This flag will be removed in a future version.',
|
||||
|
|
@ -950,7 +939,6 @@ export async function loadCliConfig(
|
|||
importFormat: settings.context?.importFormat || 'tree',
|
||||
debugMode,
|
||||
question,
|
||||
fullContext: argv.allFiles || false,
|
||||
coreTools: argv.coreTools || settings.tools?.core || undefined,
|
||||
allowedTools: argv.allowedTools || settings.tools?.allowed || undefined,
|
||||
excludeTools,
|
||||
|
|
|
|||
|
|
@ -449,7 +449,6 @@ describe('gemini.tsx main function kitty protocol', () => {
|
|||
prompt: undefined,
|
||||
promptInteractive: undefined,
|
||||
query: undefined,
|
||||
allFiles: undefined,
|
||||
yolo: undefined,
|
||||
approvalMode: undefined,
|
||||
telemetry: undefined,
|
||||
|
|
|
|||
|
|
@ -224,7 +224,6 @@ export async function runNonInteractive(
|
|||
const { processedQuery, shouldProceed } = await handleAtCommand({
|
||||
query: input,
|
||||
config,
|
||||
addItem: (_item, _timestamp) => 0,
|
||||
onDebugMessage: () => {},
|
||||
messageId: Date.now(),
|
||||
signal: abortController.signal,
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -6,29 +6,35 @@
|
|||
|
||||
import * as fs from 'node:fs/promises';
|
||||
import * as path from 'node:path';
|
||||
import type { PartListUnion, PartUnion } from '@google/genai';
|
||||
import type { AnyToolInvocation, Config } from '@qwen-code/qwen-code-core';
|
||||
import type { PartListUnion } from '@google/genai';
|
||||
import type { Config } from '@qwen-code/qwen-code-core';
|
||||
import {
|
||||
getErrorMessage,
|
||||
isNodeError,
|
||||
unescapePath,
|
||||
readManyFiles,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import type { HistoryItem, IndividualToolCallDisplay } from '../types.js';
|
||||
import type {
|
||||
HistoryItemToolGroup,
|
||||
HistoryItemWithoutId,
|
||||
IndividualToolCallDisplay,
|
||||
} from '../types.js';
|
||||
import { ToolCallStatus } from '../types.js';
|
||||
import type { UseHistoryManagerReturn } from './useHistoryManager.js';
|
||||
|
||||
interface HandleAtCommandParams {
|
||||
query: string;
|
||||
config: Config;
|
||||
addItem: UseHistoryManagerReturn['addItem'];
|
||||
onDebugMessage: (message: string) => void;
|
||||
messageId: number;
|
||||
signal: AbortSignal;
|
||||
addItem?: (item: HistoryItemWithoutId, baseTimestamp: number) => number;
|
||||
}
|
||||
|
||||
interface HandleAtCommandResult {
|
||||
processedQuery: PartListUnion | null;
|
||||
shouldProceed: boolean;
|
||||
toolDisplays?: IndividualToolCallDisplay[];
|
||||
filesRead?: string[];
|
||||
}
|
||||
|
||||
interface AtCommandPart {
|
||||
|
|
@ -36,12 +42,6 @@ interface AtCommandPart {
|
|||
content: string;
|
||||
}
|
||||
|
||||
interface McpResourceAtReference {
|
||||
atCommand: string; // e.g. "@github:repos/owner/repo/issues"
|
||||
serverName: string;
|
||||
uri: string; // e.g. "github://repos/owner/repo/issues"
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a query string to find all '@<path>' commands and text segments.
|
||||
* Handles \ escaped spaces within paths.
|
||||
|
|
@ -116,199 +116,6 @@ function parseAllAtCommands(query: string): AtCommandPart[] {
|
|||
);
|
||||
}
|
||||
|
||||
function getConfiguredMcpServerNames(config: Config): Set<string> {
|
||||
const names = new Set(Object.keys(config.getMcpServers() ?? {}));
|
||||
if (config.getMcpServerCommand()) {
|
||||
names.add('mcp');
|
||||
}
|
||||
return names;
|
||||
}
|
||||
|
||||
function normalizeMcpResourceUri(serverName: string, resource: string): string {
|
||||
if (resource.includes('://')) {
|
||||
return resource;
|
||||
}
|
||||
|
||||
const cleaned = resource.startsWith('/') ? resource.slice(1) : resource;
|
||||
return `${serverName}://${cleaned}`;
|
||||
}
|
||||
|
||||
function splitLeadingToken(
|
||||
text: string,
|
||||
): { token: string; rest: string } | null {
|
||||
let i = 0;
|
||||
while (i < text.length && /\s/.test(text[i])) {
|
||||
i++;
|
||||
}
|
||||
if (i >= text.length) {
|
||||
return null;
|
||||
}
|
||||
|
||||
let token = '';
|
||||
let inEscape = false;
|
||||
while (i < text.length) {
|
||||
const char = text[i];
|
||||
if (inEscape) {
|
||||
token += char;
|
||||
inEscape = false;
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
if (char === '\\') {
|
||||
inEscape = true;
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
if (/[,\s;!?()[\]{}]/.test(char)) {
|
||||
break;
|
||||
}
|
||||
if (char === '.') {
|
||||
const nextChar = i + 1 < text.length ? text[i + 1] : '';
|
||||
if (nextChar === '' || /\s/.test(nextChar)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
token += char;
|
||||
i++;
|
||||
}
|
||||
|
||||
if (!token) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return { token, rest: text.slice(i) };
|
||||
}
|
||||
|
||||
function extractMcpResourceAtReferences(
|
||||
parts: AtCommandPart[],
|
||||
config: Config,
|
||||
): { parts: AtCommandPart[]; refs: McpResourceAtReference[] } {
|
||||
const configuredServers = getConfiguredMcpServerNames(config);
|
||||
const refs: McpResourceAtReference[] = [];
|
||||
const merged: AtCommandPart[] = [];
|
||||
|
||||
for (let i = 0; i < parts.length; i++) {
|
||||
const part = parts[i];
|
||||
if (part.type !== 'atPath') {
|
||||
merged.push(part);
|
||||
continue;
|
||||
}
|
||||
|
||||
const atText = part.content; // e.g. "@github:" or "@github:repos/..."
|
||||
const colonIndex = atText.indexOf(':');
|
||||
if (!atText.startsWith('@') || colonIndex <= 1) {
|
||||
merged.push(part);
|
||||
continue;
|
||||
}
|
||||
|
||||
const serverName = atText.slice(1, colonIndex);
|
||||
if (!configuredServers.has(serverName)) {
|
||||
merged.push(part);
|
||||
continue;
|
||||
}
|
||||
|
||||
let resource = atText.slice(colonIndex + 1);
|
||||
|
||||
// Support the documented "@server: resource" format where the resource is
|
||||
// separated into the following text part.
|
||||
if (!resource) {
|
||||
const next = parts[i + 1];
|
||||
if (next?.type === 'text') {
|
||||
const tokenInfo = splitLeadingToken(next.content);
|
||||
if (tokenInfo) {
|
||||
resource = tokenInfo.token;
|
||||
const remainingText = tokenInfo.rest;
|
||||
// Update the next part in place, and let the next iteration handle it.
|
||||
parts[i + 1] = { type: 'text', content: remainingText };
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!resource) {
|
||||
// Treat "@server:" without a resource as plain text, rather than falling
|
||||
// through to file resolution for a path like "server:".
|
||||
merged.push({ type: 'text', content: atText });
|
||||
continue;
|
||||
}
|
||||
|
||||
const normalizedResource = resource.includes('://')
|
||||
? resource
|
||||
: resource.startsWith('/')
|
||||
? resource.slice(1)
|
||||
: resource;
|
||||
|
||||
const normalizedAtCommand = `@${serverName}:${normalizedResource}`;
|
||||
refs.push({
|
||||
atCommand: normalizedAtCommand,
|
||||
serverName,
|
||||
uri: normalizeMcpResourceUri(serverName, normalizedResource),
|
||||
});
|
||||
merged.push({ type: 'atPath', content: normalizedAtCommand });
|
||||
}
|
||||
|
||||
return {
|
||||
parts: merged.filter(
|
||||
(p) => !(p.type === 'text' && p.content.trim() === ''),
|
||||
),
|
||||
refs,
|
||||
};
|
||||
}
|
||||
|
||||
function formatMcpResourceContents(
|
||||
raw: unknown,
|
||||
limits: { maxCharsPerResource: number; maxLinesPerResource: number },
|
||||
): string {
|
||||
if (!raw || typeof raw !== 'object') {
|
||||
return '[Error: Invalid MCP resource response]';
|
||||
}
|
||||
|
||||
const contents = (raw as { contents?: unknown }).contents;
|
||||
if (!Array.isArray(contents)) {
|
||||
return '[Error: Invalid MCP resource response]';
|
||||
}
|
||||
|
||||
const parts: string[] = [];
|
||||
for (const item of contents) {
|
||||
if (!item || typeof item !== 'object') {
|
||||
continue;
|
||||
}
|
||||
|
||||
const text = (item as { text?: unknown }).text;
|
||||
const blob = (item as { blob?: unknown }).blob;
|
||||
const mimeType = (item as { mimeType?: unknown }).mimeType;
|
||||
|
||||
if (typeof text === 'string') {
|
||||
parts.push(text);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (typeof blob === 'string') {
|
||||
const mimeTypeLabel =
|
||||
typeof mimeType === 'string' ? mimeType : 'application/octet-stream';
|
||||
parts.push(
|
||||
`[Binary MCP resource omitted (mimeType: ${mimeTypeLabel}, bytes: ${blob.length})]`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let combined = parts.join('\n\n');
|
||||
|
||||
const maxLines = limits.maxLinesPerResource;
|
||||
if (Number.isFinite(maxLines)) {
|
||||
const lines = combined.split('\n');
|
||||
if (lines.length > maxLines) {
|
||||
combined = `${lines.slice(0, maxLines).join('\n')}\n[truncated]`;
|
||||
}
|
||||
}
|
||||
|
||||
const maxChars = limits.maxCharsPerResource;
|
||||
if (Number.isFinite(maxChars) && combined.length > maxChars) {
|
||||
combined = `${combined.slice(0, maxChars)}\n[truncated]`;
|
||||
}
|
||||
|
||||
return combined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Processes user input potentially containing one or more '@<path>' commands.
|
||||
* If found, it attempts to read the specified files/directories using the
|
||||
|
|
@ -321,22 +128,26 @@ function formatMcpResourceContents(
|
|||
export async function handleAtCommand({
|
||||
query,
|
||||
config,
|
||||
addItem,
|
||||
onDebugMessage,
|
||||
messageId: userMessageTimestamp,
|
||||
signal,
|
||||
addItem,
|
||||
}: HandleAtCommandParams): Promise<HandleAtCommandResult> {
|
||||
const parsedParts = parseAllAtCommands(query);
|
||||
const { parts: commandParts, refs: mcpResourceRefs } =
|
||||
extractMcpResourceAtReferences(parsedParts, config);
|
||||
|
||||
const mcpAtCommands = new Set(mcpResourceRefs.map((r) => r.atCommand));
|
||||
const commandParts = parseAllAtCommands(query);
|
||||
const atPathCommandParts = commandParts.filter(
|
||||
(part) => part.type === 'atPath',
|
||||
);
|
||||
const fileAtPathCommandParts = atPathCommandParts.filter(
|
||||
(part) => !mcpAtCommands.has(part.content),
|
||||
);
|
||||
|
||||
const addToolGroup = (result: HandleAtCommandResult): void => {
|
||||
if (!addItem) return;
|
||||
if (result.toolDisplays && result.toolDisplays.length > 0) {
|
||||
const toolGroupItem: HistoryItemToolGroup = {
|
||||
type: 'tool_group',
|
||||
tools: result.toolDisplays,
|
||||
};
|
||||
addItem(toolGroupItem, userMessageTimestamp);
|
||||
}
|
||||
};
|
||||
|
||||
if (atPathCommandParts.length === 0) {
|
||||
return { processedQuery: [{ text: query }], shouldProceed: true };
|
||||
|
|
@ -356,11 +167,7 @@ export async function handleAtCommand({
|
|||
both: [],
|
||||
};
|
||||
|
||||
const toolRegistry = config.getToolRegistry();
|
||||
const readManyFilesTool = toolRegistry.getTool('read_many_files');
|
||||
const globTool = toolRegistry.getTool('glob');
|
||||
|
||||
for (const atPathPart of fileAtPathCommandParts) {
|
||||
for (const atPathPart of atPathCommandParts) {
|
||||
const originalAtPath = atPathPart.content; // e.g., "@file.txt" or "@"
|
||||
|
||||
if (originalAtPath === '@') {
|
||||
|
|
@ -371,23 +178,8 @@ export async function handleAtCommand({
|
|||
}
|
||||
|
||||
const pathName = originalAtPath.substring(1);
|
||||
if (!pathName) {
|
||||
// This case should ideally not be hit if parseAllAtCommands ensures content after @
|
||||
// but as a safeguard:
|
||||
addItem(
|
||||
{
|
||||
type: 'error',
|
||||
text: `Error: Invalid @ command '${originalAtPath}'. No path specified.`,
|
||||
},
|
||||
userMessageTimestamp,
|
||||
);
|
||||
// Decide if this is a fatal error for the whole command or just skip this @ part
|
||||
// For now, let's be strict and fail the command if one @path is malformed.
|
||||
return { processedQuery: null, shouldProceed: false };
|
||||
}
|
||||
|
||||
// Check if path should be ignored based on filtering options
|
||||
|
||||
const workspaceContext = config.getWorkspaceContext();
|
||||
if (!workspaceContext.isPathWithinWorkspace(pathName)) {
|
||||
onDebugMessage(
|
||||
|
|
@ -423,73 +215,24 @@ export async function handleAtCommand({
|
|||
continue;
|
||||
}
|
||||
|
||||
let resolvedSuccessfully = false;
|
||||
let sawNotFound = false;
|
||||
for (const dir of config.getWorkspaceContext().getDirectories()) {
|
||||
let currentPathSpec = pathName;
|
||||
let resolvedSuccessfully = false;
|
||||
try {
|
||||
const absolutePath = path.resolve(dir, pathName);
|
||||
const stats = await fs.stat(absolutePath);
|
||||
if (stats.isDirectory()) {
|
||||
currentPathSpec =
|
||||
pathName + (pathName.endsWith(path.sep) ? `**` : `/**`);
|
||||
onDebugMessage(
|
||||
`Path ${pathName} resolved to directory, using glob: ${currentPathSpec}`,
|
||||
);
|
||||
currentPathSpec = pathName;
|
||||
onDebugMessage(`Path ${pathName} resolved to directory.`);
|
||||
} else {
|
||||
onDebugMessage(`Path ${pathName} resolved to file: ${absolutePath}`);
|
||||
}
|
||||
resolvedSuccessfully = true;
|
||||
} catch (error) {
|
||||
if (isNodeError(error) && error.code === 'ENOENT') {
|
||||
if (config.getEnableRecursiveFileSearch() && globTool) {
|
||||
onDebugMessage(
|
||||
`Path ${pathName} not found directly, attempting glob search.`,
|
||||
);
|
||||
try {
|
||||
const globResult = await globTool.buildAndExecute(
|
||||
{
|
||||
pattern: `**/*${pathName}*`,
|
||||
path: dir,
|
||||
},
|
||||
signal,
|
||||
);
|
||||
if (
|
||||
globResult.llmContent &&
|
||||
typeof globResult.llmContent === 'string' &&
|
||||
!globResult.llmContent.startsWith('No files found') &&
|
||||
!globResult.llmContent.startsWith('Error:')
|
||||
) {
|
||||
const lines = globResult.llmContent.split('\n');
|
||||
if (lines.length > 1 && lines[1]) {
|
||||
const firstMatchAbsolute = lines[1].trim();
|
||||
currentPathSpec = path.relative(dir, firstMatchAbsolute);
|
||||
onDebugMessage(
|
||||
`Glob search for ${pathName} found ${firstMatchAbsolute}, using relative path: ${currentPathSpec}`,
|
||||
);
|
||||
resolvedSuccessfully = true;
|
||||
} else {
|
||||
onDebugMessage(
|
||||
`Glob search for '**/*${pathName}*' did not return a usable path. Path ${pathName} will be skipped.`,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
onDebugMessage(
|
||||
`Glob search for '**/*${pathName}*' found no files or an error. Path ${pathName} will be skipped.`,
|
||||
);
|
||||
}
|
||||
} catch (globError) {
|
||||
console.error(
|
||||
`Error during glob search for ${pathName}: ${getErrorMessage(globError)}`,
|
||||
);
|
||||
onDebugMessage(
|
||||
`Error during glob search for ${pathName}. Path ${pathName} will be skipped.`,
|
||||
);
|
||||
}
|
||||
} else {
|
||||
onDebugMessage(
|
||||
`Glob tool not found. Path ${pathName} will be skipped.`,
|
||||
);
|
||||
}
|
||||
sawNotFound = true;
|
||||
continue;
|
||||
} else {
|
||||
console.error(
|
||||
`Error stating path ${pathName}: ${getErrorMessage(error)}`,
|
||||
|
|
@ -506,6 +249,11 @@ export async function handleAtCommand({
|
|||
break;
|
||||
}
|
||||
}
|
||||
if (!resolvedSuccessfully && sawNotFound) {
|
||||
onDebugMessage(
|
||||
`Path ${pathName} not found. Path ${pathName} will be skipped.`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Construct the initial part of the query for the LLM
|
||||
|
|
@ -575,7 +323,7 @@ export async function handleAtCommand({
|
|||
}
|
||||
|
||||
// Fallback for lone "@" or completely invalid @-commands resulting in empty initialQueryText
|
||||
if (pathSpecsToRead.length === 0 && mcpResourceRefs.length === 0) {
|
||||
if (pathSpecsToRead.length === 0) {
|
||||
onDebugMessage('No valid file paths found in @ commands to read.');
|
||||
if (initialQueryText === '@' && query.trim() === '@') {
|
||||
// If the only thing was a lone @, pass original query (which might have spaces)
|
||||
|
|
@ -591,167 +339,89 @@ export async function handleAtCommand({
|
|||
};
|
||||
}
|
||||
|
||||
const processedQueryParts: PartUnion[] = [{ text: initialQueryText }];
|
||||
|
||||
const toolDisplays: IndividualToolCallDisplay[] = [];
|
||||
|
||||
if (pathSpecsToRead.length > 0) {
|
||||
if (!readManyFilesTool) {
|
||||
addItem(
|
||||
{ type: 'error', text: 'Error: read_many_files tool not found.' },
|
||||
userMessageTimestamp,
|
||||
);
|
||||
return { processedQuery: null, shouldProceed: false };
|
||||
}
|
||||
|
||||
const toolArgs = {
|
||||
try {
|
||||
const result = await readManyFiles(config, {
|
||||
paths: pathSpecsToRead,
|
||||
file_filtering_options: {
|
||||
respect_git_ignore: respectFileIgnore.respectGitIgnore,
|
||||
respect_qwen_ignore: respectFileIgnore.respectQwenIgnore,
|
||||
},
|
||||
// Use configuration setting
|
||||
};
|
||||
|
||||
let invocation: AnyToolInvocation | undefined = undefined;
|
||||
try {
|
||||
invocation = readManyFilesTool.build(toolArgs);
|
||||
const result = await invocation.execute(signal);
|
||||
toolDisplays.push({
|
||||
callId: `client-read-${userMessageTimestamp}`,
|
||||
name: readManyFilesTool.displayName,
|
||||
description: invocation.getDescription(),
|
||||
status: ToolCallStatus.Success,
|
||||
resultDisplay:
|
||||
result.returnDisplay ||
|
||||
`Successfully read: ${contentLabelsForDisplay.join(', ')}`,
|
||||
confirmationDetails: undefined,
|
||||
});
|
||||
|
||||
if (Array.isArray(result.llmContent)) {
|
||||
const fileContentRegex = /^--- (.*?) ---\n\n([\s\S]*?)\n\n$/;
|
||||
processedQueryParts.push({
|
||||
text: '\n--- Content from referenced files ---',
|
||||
});
|
||||
for (const part of result.llmContent) {
|
||||
if (typeof part === 'string') {
|
||||
const match = fileContentRegex.exec(part);
|
||||
if (match) {
|
||||
const filePathSpecInContent = match[1]; // This is a resolved pathSpec
|
||||
const fileActualContent = match[2].trim();
|
||||
processedQueryParts.push({
|
||||
text: `\nContent from @${filePathSpecInContent}:\n`,
|
||||
});
|
||||
processedQueryParts.push({ text: fileActualContent });
|
||||
} else {
|
||||
processedQueryParts.push({ text: part });
|
||||
}
|
||||
} else {
|
||||
// part is a Part object.
|
||||
processedQueryParts.push(part);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
onDebugMessage(
|
||||
'read_many_files tool returned no content or empty content.',
|
||||
);
|
||||
}
|
||||
} catch (error: unknown) {
|
||||
toolDisplays.push({
|
||||
callId: `client-read-${userMessageTimestamp}`,
|
||||
name: readManyFilesTool.displayName,
|
||||
description:
|
||||
invocation?.getDescription() ??
|
||||
'Error attempting to execute tool to read files',
|
||||
status: ToolCallStatus.Error,
|
||||
resultDisplay: `Error reading files (${contentLabelsForDisplay.join(', ')}): ${getErrorMessage(error)}`,
|
||||
confirmationDetails: undefined,
|
||||
});
|
||||
addItem(
|
||||
{ type: 'tool_group', tools: toolDisplays } as Omit<HistoryItem, 'id'>,
|
||||
userMessageTimestamp,
|
||||
);
|
||||
return { processedQuery: null, shouldProceed: false };
|
||||
}
|
||||
}
|
||||
|
||||
if (mcpResourceRefs.length > 0) {
|
||||
const totalCharLimit = config.getTruncateToolOutputThreshold();
|
||||
const totalLineLimit = config.getTruncateToolOutputLines();
|
||||
const maxCharsPerResource = Number.isFinite(totalCharLimit)
|
||||
? Math.floor(totalCharLimit / Math.max(1, mcpResourceRefs.length))
|
||||
: Number.POSITIVE_INFINITY;
|
||||
const maxLinesPerResource = Number.isFinite(totalLineLimit)
|
||||
? Math.floor(totalLineLimit / Math.max(1, mcpResourceRefs.length))
|
||||
: Number.POSITIVE_INFINITY;
|
||||
|
||||
processedQueryParts.push({
|
||||
text: '\n--- Content from referenced MCP resources ---',
|
||||
signal,
|
||||
});
|
||||
|
||||
for (let i = 0; i < mcpResourceRefs.length; i++) {
|
||||
const ref = mcpResourceRefs[i];
|
||||
let resourceResult: unknown;
|
||||
try {
|
||||
if (signal.aborted) {
|
||||
const error = new Error('MCP resource read aborted');
|
||||
error.name = 'AbortError';
|
||||
throw error;
|
||||
const parts = Array.isArray(result.contentParts)
|
||||
? result.contentParts
|
||||
: [result.contentParts];
|
||||
|
||||
// Create individual tool call displays for each file read
|
||||
const toolCallDisplays: IndividualToolCallDisplay[] = result.files.map(
|
||||
(file, index) => ({
|
||||
callId: `client-read-${userMessageTimestamp}-${index}`,
|
||||
name: file.isDirectory ? 'Read Directory' : 'Read File',
|
||||
description: file.isDirectory
|
||||
? `Read directory ${path.basename(file.filePath)}`
|
||||
: `Read file ${path.basename(file.filePath)}`,
|
||||
status: ToolCallStatus.Success,
|
||||
resultDisplay: undefined,
|
||||
confirmationDetails: undefined,
|
||||
}),
|
||||
);
|
||||
|
||||
const processedQueryParts: PartListUnion = [{ text: initialQueryText }];
|
||||
|
||||
if (parts.length > 0 && !result.error) {
|
||||
// readManyFiles now returns properly formatted parts with headers and prefixes
|
||||
for (const part of parts) {
|
||||
if (typeof part === 'string') {
|
||||
processedQueryParts.push({ text: part });
|
||||
} else {
|
||||
// part is a Part object (text, inlineData, or fileData)
|
||||
processedQueryParts.push(part);
|
||||
}
|
||||
|
||||
resourceResult = await toolRegistry.readMcpResource(
|
||||
ref.serverName,
|
||||
ref.uri,
|
||||
{ signal },
|
||||
);
|
||||
|
||||
toolDisplays.push({
|
||||
callId: `client-mcp-resource-${userMessageTimestamp}-${i}`,
|
||||
name: 'McpResourceRead',
|
||||
description: `Read MCP resource ${ref.uri} (server: ${ref.serverName})`,
|
||||
status: ToolCallStatus.Success,
|
||||
resultDisplay: `Read: ${ref.uri}`,
|
||||
confirmationDetails: undefined,
|
||||
});
|
||||
} catch (error: unknown) {
|
||||
toolDisplays.push({
|
||||
callId: `client-mcp-resource-${userMessageTimestamp}-${i}`,
|
||||
name: 'McpResourceRead',
|
||||
description: `Read MCP resource ${ref.uri} (server: ${ref.serverName})`,
|
||||
status: ToolCallStatus.Error,
|
||||
resultDisplay: `Error reading MCP resource (${ref.uri}): ${getErrorMessage(error)}`,
|
||||
confirmationDetails: undefined,
|
||||
});
|
||||
addItem(
|
||||
{ type: 'tool_group', tools: toolDisplays } as Omit<
|
||||
HistoryItem,
|
||||
'id'
|
||||
>,
|
||||
userMessageTimestamp,
|
||||
);
|
||||
return { processedQuery: null, shouldProceed: false };
|
||||
}
|
||||
|
||||
processedQueryParts.push({
|
||||
text: `\nContent from ${ref.atCommand}:\n`,
|
||||
});
|
||||
processedQueryParts.push({
|
||||
text: formatMcpResourceContents(resourceResult, {
|
||||
maxCharsPerResource,
|
||||
maxLinesPerResource,
|
||||
}),
|
||||
});
|
||||
} else {
|
||||
onDebugMessage('readManyFiles returned no content or empty content.');
|
||||
}
|
||||
|
||||
processedQueryParts.push({ text: '\n--- End of MCP resource content ---' });
|
||||
}
|
||||
const processedResult: HandleAtCommandResult = {
|
||||
processedQuery: processedQueryParts,
|
||||
shouldProceed: true,
|
||||
toolDisplays: toolCallDisplays,
|
||||
filesRead: contentLabelsForDisplay,
|
||||
};
|
||||
|
||||
if (toolDisplays.length > 0) {
|
||||
addItem(
|
||||
{ type: 'tool_group', tools: toolDisplays } as Omit<HistoryItem, 'id'>,
|
||||
userMessageTimestamp,
|
||||
);
|
||||
}
|
||||
const chatRecorder = config.getChatRecordingService?.();
|
||||
chatRecorder?.recordAtCommand({
|
||||
filesRead: contentLabelsForDisplay,
|
||||
status: 'success',
|
||||
userText: query,
|
||||
});
|
||||
|
||||
return { processedQuery: processedQueryParts, shouldProceed: true };
|
||||
addToolGroup(processedResult);
|
||||
return processedResult;
|
||||
} catch (error: unknown) {
|
||||
const errorToolCallDisplay: IndividualToolCallDisplay = {
|
||||
callId: `client-read-${userMessageTimestamp}`,
|
||||
name: 'Read File(s)',
|
||||
description: 'Error attempting to read files',
|
||||
status: ToolCallStatus.Error,
|
||||
resultDisplay: `Error reading files (${contentLabelsForDisplay.join(', ')}): ${getErrorMessage(error)}`,
|
||||
confirmationDetails: undefined,
|
||||
};
|
||||
const chatRecorder = config.getChatRecordingService?.();
|
||||
const errorMessage =
|
||||
typeof errorToolCallDisplay.resultDisplay === 'string'
|
||||
? errorToolCallDisplay.resultDisplay
|
||||
: undefined;
|
||||
chatRecorder?.recordAtCommand({
|
||||
filesRead: contentLabelsForDisplay,
|
||||
status: 'error',
|
||||
message: errorMessage,
|
||||
userText: query,
|
||||
});
|
||||
const result = {
|
||||
processedQuery: null,
|
||||
shouldProceed: false,
|
||||
toolDisplays: [errorToolCallDisplay],
|
||||
filesRead: contentLabelsForDisplay,
|
||||
};
|
||||
addToolGroup(result);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -382,34 +382,28 @@ export const useGeminiStream = (
|
|||
return { queryToSend: null, shouldProceed: false };
|
||||
}
|
||||
|
||||
localQueryToSendToGemini = trimmedQuery;
|
||||
|
||||
addItem(
|
||||
{ type: MessageType.USER, text: trimmedQuery },
|
||||
userMessageTimestamp,
|
||||
);
|
||||
|
||||
// Handle @-commands (which might involve tool calls)
|
||||
if (isAtCommand(trimmedQuery)) {
|
||||
const atCommandResult = await handleAtCommand({
|
||||
query: trimmedQuery,
|
||||
config,
|
||||
addItem,
|
||||
onDebugMessage,
|
||||
messageId: userMessageTimestamp,
|
||||
signal: abortSignal,
|
||||
addItem,
|
||||
});
|
||||
|
||||
// Add user's turn after @ command processing is done.
|
||||
addItem(
|
||||
{ type: MessageType.USER, text: trimmedQuery },
|
||||
userMessageTimestamp,
|
||||
);
|
||||
|
||||
if (!atCommandResult.shouldProceed) {
|
||||
return { queryToSend: null, shouldProceed: false };
|
||||
}
|
||||
localQueryToSendToGemini = atCommandResult.processedQuery;
|
||||
} else {
|
||||
// Normal query for Gemini
|
||||
addItem(
|
||||
{ type: MessageType.USER, text: trimmedQuery },
|
||||
userMessageTimestamp,
|
||||
);
|
||||
localQueryToSendToGemini = trimmedQuery;
|
||||
}
|
||||
} else {
|
||||
// It's a function response (PartListUnion that isn't a string)
|
||||
|
|
@ -981,6 +975,7 @@ export const useGeminiStream = (
|
|||
prompt_id!,
|
||||
options,
|
||||
);
|
||||
|
||||
const processingStatus = await processGeminiStreamEvents(
|
||||
stream,
|
||||
userMessageTimestamp,
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import * as path from 'node:path';
|
||||
import type { Part, FunctionCall } from '@google/genai';
|
||||
import type {
|
||||
ResumedSessionData,
|
||||
|
|
@ -12,8 +13,13 @@ import type {
|
|||
AnyDeclarativeTool,
|
||||
ToolResultDisplay,
|
||||
SlashCommandRecordPayload,
|
||||
AtCommandRecordPayload,
|
||||
} from '@qwen-code/qwen-code-core';
|
||||
import type { HistoryItem, HistoryItemWithoutId } from '../types.js';
|
||||
import type {
|
||||
HistoryItem,
|
||||
HistoryItemWithoutId,
|
||||
IndividualToolCallDisplay,
|
||||
} from '../types.js';
|
||||
import { ToolCallStatus } from '../types.js';
|
||||
|
||||
/**
|
||||
|
|
@ -137,6 +143,8 @@ function convertToHistoryItems(
|
|||
config: Config,
|
||||
): HistoryItemWithoutId[] {
|
||||
const items: HistoryItemWithoutId[] = [];
|
||||
const pendingAtCommands: AtCommandRecordPayload[] = [];
|
||||
let atCommandCounter = 0;
|
||||
|
||||
// Track pending tool calls for grouping with results
|
||||
const pendingToolCalls = new Map<
|
||||
|
|
@ -152,6 +160,59 @@ function convertToHistoryItems(
|
|||
confirmationDetails: undefined;
|
||||
}> = [];
|
||||
|
||||
const buildAtCommandDisplays = (
|
||||
payload: AtCommandRecordPayload,
|
||||
): IndividualToolCallDisplay[] => {
|
||||
// Error case: single "Read File(s)" with error message
|
||||
if (payload.status === 'error') {
|
||||
atCommandCounter += 1;
|
||||
const filesLabel = payload.filesRead?.length
|
||||
? payload.filesRead.join(', ')
|
||||
: 'files';
|
||||
return [
|
||||
{
|
||||
callId: `at-command-${atCommandCounter}`,
|
||||
name: 'Read File(s)',
|
||||
description: 'Error attempting to read files',
|
||||
status: ToolCallStatus.Error,
|
||||
resultDisplay:
|
||||
payload.message || `Error reading files (${filesLabel})`,
|
||||
confirmationDetails: undefined,
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
// Success case: individual tool calls for each file
|
||||
if (!payload.filesRead?.length) {
|
||||
atCommandCounter += 1;
|
||||
return [
|
||||
{
|
||||
callId: `at-command-${atCommandCounter}`,
|
||||
name: 'Read File',
|
||||
description: 'Read File(s)',
|
||||
status: ToolCallStatus.Success,
|
||||
resultDisplay: undefined,
|
||||
confirmationDetails: undefined,
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
return payload.filesRead.map((filePath) => {
|
||||
atCommandCounter += 1;
|
||||
const isDir = filePath.endsWith('/');
|
||||
return {
|
||||
callId: `at-command-${atCommandCounter}`,
|
||||
name: isDir ? 'Read Directory' : 'Read File',
|
||||
description: isDir
|
||||
? `Read directory ${path.basename(filePath)}`
|
||||
: `Read file ${path.basename(filePath)}`,
|
||||
status: ToolCallStatus.Success,
|
||||
resultDisplay: undefined,
|
||||
confirmationDetails: undefined,
|
||||
};
|
||||
});
|
||||
};
|
||||
|
||||
for (const record of conversation.messages) {
|
||||
if (record.type === 'system') {
|
||||
if (record.subtype === 'slash_command') {
|
||||
|
|
@ -180,10 +241,44 @@ function convertToHistoryItems(
|
|||
}
|
||||
}
|
||||
}
|
||||
if (record.subtype === 'at_command') {
|
||||
const payload = record.systemPayload as
|
||||
| AtCommandRecordPayload
|
||||
| undefined;
|
||||
if (!payload) continue;
|
||||
pendingAtCommands.push(payload);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
switch (record.type) {
|
||||
case 'user': {
|
||||
if (pendingAtCommands.length > 0) {
|
||||
// Flush any pending tool group before user message
|
||||
if (currentToolGroup.length > 0) {
|
||||
items.push({
|
||||
type: 'tool_group',
|
||||
tools: [...currentToolGroup],
|
||||
});
|
||||
currentToolGroup = [];
|
||||
}
|
||||
|
||||
const payload = pendingAtCommands.shift()!;
|
||||
const text =
|
||||
payload.userText ||
|
||||
extractTextFromParts(record.message?.parts as Part[]);
|
||||
if (text) {
|
||||
items.push({ type: 'user', text });
|
||||
}
|
||||
|
||||
const toolDisplays = buildAtCommandDisplays(payload);
|
||||
if (toolDisplays.length > 0) {
|
||||
items.push({
|
||||
type: 'tool_group',
|
||||
tools: toolDisplays,
|
||||
});
|
||||
}
|
||||
break;
|
||||
}
|
||||
// Flush any pending tool group before user message
|
||||
if (currentToolGroup.length > 0) {
|
||||
items.push({
|
||||
|
|
@ -290,6 +385,31 @@ function convertToHistoryItems(
|
|||
}
|
||||
}
|
||||
|
||||
if (pendingAtCommands.length > 0) {
|
||||
for (const payload of pendingAtCommands) {
|
||||
// Flush any pending tool group before standalone @-command
|
||||
if (currentToolGroup.length > 0) {
|
||||
items.push({
|
||||
type: 'tool_group',
|
||||
tools: [...currentToolGroup],
|
||||
});
|
||||
currentToolGroup = [];
|
||||
}
|
||||
|
||||
const text = payload.userText;
|
||||
if (text) {
|
||||
items.push({ type: 'user', text });
|
||||
}
|
||||
const toolDisplays = buildAtCommandDisplays(payload);
|
||||
if (toolDisplays.length > 0) {
|
||||
items.push({
|
||||
type: 'tool_group',
|
||||
tools: toolDisplays,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Flush any remaining tool group
|
||||
if (currentToolGroup.length > 0) {
|
||||
items.push({
|
||||
|
|
|
|||
|
|
@ -206,7 +206,6 @@ describe('Server Config (config.ts)', () => {
|
|||
const TARGET_DIR = '/path/to/target';
|
||||
const DEBUG_MODE = false;
|
||||
const QUESTION = 'test question';
|
||||
const FULL_CONTEXT = false;
|
||||
const USER_MEMORY = 'Test User Memory';
|
||||
const TELEMETRY_SETTINGS = { enabled: false };
|
||||
const EMBEDDING_MODEL = 'gemini-embedding';
|
||||
|
|
@ -217,7 +216,6 @@ describe('Server Config (config.ts)', () => {
|
|||
targetDir: TARGET_DIR,
|
||||
debugMode: DEBUG_MODE,
|
||||
question: QUESTION,
|
||||
fullContext: FULL_CONTEXT,
|
||||
userMemory: USER_MEMORY,
|
||||
telemetry: TELEMETRY_SETTINGS,
|
||||
model: MODEL,
|
||||
|
|
@ -1266,7 +1264,6 @@ describe('BaseLlmClient Lifecycle', () => {
|
|||
const TARGET_DIR = '/path/to/target';
|
||||
const DEBUG_MODE = false;
|
||||
const QUESTION = 'test question';
|
||||
const FULL_CONTEXT = false;
|
||||
const USER_MEMORY = 'Test User Memory';
|
||||
const TELEMETRY_SETTINGS = { enabled: false };
|
||||
const EMBEDDING_MODEL = 'gemini-embedding';
|
||||
|
|
@ -1277,7 +1274,6 @@ describe('BaseLlmClient Lifecycle', () => {
|
|||
targetDir: TARGET_DIR,
|
||||
debugMode: DEBUG_MODE,
|
||||
question: QUESTION,
|
||||
fullContext: FULL_CONTEXT,
|
||||
userMemory: USER_MEMORY,
|
||||
telemetry: TELEMETRY_SETTINGS,
|
||||
model: MODEL,
|
||||
|
|
|
|||
|
|
@ -50,7 +50,6 @@ import { LSTool } from '../tools/ls.js';
|
|||
import type { SendSdkMcpMessage } from '../tools/mcp-client.js';
|
||||
import { MemoryTool, setGeminiMdFilename } from '../tools/memoryTool.js';
|
||||
import { ReadFileTool } from '../tools/read-file.js';
|
||||
import { ReadManyFilesTool } from '../tools/read-many-files.js';
|
||||
import { canUseRipgrep } from '../utils/ripgrepUtils.js';
|
||||
import { RipGrepTool } from '../tools/ripGrep.js';
|
||||
import { ShellTool } from '../tools/shell.js';
|
||||
|
|
@ -285,7 +284,6 @@ export interface ConfigParameters {
|
|||
debugMode: boolean;
|
||||
includePartialMessages?: boolean;
|
||||
question?: string;
|
||||
fullContext?: boolean;
|
||||
coreTools?: string[];
|
||||
allowedTools?: string[];
|
||||
excludeTools?: string[];
|
||||
|
|
@ -432,7 +430,6 @@ export class Config {
|
|||
private readonly outputFormat: OutputFormat;
|
||||
private readonly includePartialMessages: boolean;
|
||||
private readonly question: string | undefined;
|
||||
private readonly fullContext: boolean;
|
||||
private readonly coreTools: string[] | undefined;
|
||||
private readonly allowedTools: string[] | undefined;
|
||||
private readonly excludeTools: string[] | undefined;
|
||||
|
|
@ -537,7 +534,6 @@ export class Config {
|
|||
this.outputFormat = normalizedOutputFormat ?? OutputFormat.TEXT;
|
||||
this.includePartialMessages = params.includePartialMessages ?? false;
|
||||
this.question = params.question;
|
||||
this.fullContext = params.fullContext ?? false;
|
||||
this.coreTools = params.coreTools;
|
||||
this.allowedTools = params.allowedTools;
|
||||
this.excludeTools = params.excludeTools;
|
||||
|
|
@ -1079,10 +1075,6 @@ export class Config {
|
|||
return this.question;
|
||||
}
|
||||
|
||||
getFullContext(): boolean {
|
||||
return this.fullContext;
|
||||
}
|
||||
|
||||
getCoreTools(): string[] | undefined {
|
||||
return this.coreTools;
|
||||
}
|
||||
|
|
@ -1695,7 +1687,6 @@ export class Config {
|
|||
registerCoreTool(GlobTool, this);
|
||||
registerCoreTool(EditTool, this);
|
||||
registerCoreTool(WriteFileTool, this);
|
||||
registerCoreTool(ReadManyFilesTool, this);
|
||||
registerCoreTool(ShellTool, this);
|
||||
registerCoreTool(MemoryTool);
|
||||
registerCoreTool(TodoWriteTool, this);
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
|||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', and 'read_file' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
|
@ -190,7 +190,7 @@ model:
|
|||
Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality.
|
||||
[tool_call: read_file for path '/path/to/someFile.ts']
|
||||
Now I'll look for existing or related test files to understand current testing conventions and dependencies.
|
||||
[tool_call: read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts']]
|
||||
[tool_call: read_file for path '/path/to/existingTest.test.ts']
|
||||
(After reviewing existing tests and the file content)
|
||||
[tool_call: write_file for path '/path/to/someFile.test.ts']
|
||||
I've written the tests. Now I'll run the project's test command to verify them.
|
||||
|
|
@ -211,7 +211,7 @@ To help you check their settings, I can read their contents. Which one would you
|
|||
</example>
|
||||
|
||||
# Final Reminder
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' or 'read_many_files' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved.
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved.
|
||||
|
||||
---
|
||||
|
||||
|
|
@ -288,7 +288,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
|||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', and 'read_file' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
|
@ -424,7 +424,7 @@ model:
|
|||
Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality.
|
||||
[tool_call: read_file for path '/path/to/someFile.ts']
|
||||
Now I'll look for existing or related test files to understand current testing conventions and dependencies.
|
||||
[tool_call: read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts']]
|
||||
[tool_call: read_file for path '/path/to/existingTest.test.ts']
|
||||
(After reviewing existing tests and the file content)
|
||||
[tool_call: write_file for path '/path/to/someFile.test.ts']
|
||||
I've written the tests. Now I'll run the project's test command to verify them.
|
||||
|
|
@ -445,7 +445,7 @@ To help you check their settings, I can read their contents. Which one would you
|
|||
</example>
|
||||
|
||||
# Final Reminder
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' or 'read_many_files' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
`;
|
||||
|
||||
exports[`Core System Prompt (prompts.ts) > should include non-sandbox instructions when SANDBOX env var is not set 1`] = `
|
||||
|
|
@ -517,7 +517,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
|||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', and 'read_file' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
|
@ -638,7 +638,7 @@ model:
|
|||
Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality.
|
||||
[tool_call: read_file for path '/path/to/someFile.ts']
|
||||
Now I'll look for existing or related test files to understand current testing conventions and dependencies.
|
||||
[tool_call: read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts']]
|
||||
[tool_call: read_file for path '/path/to/existingTest.test.ts']
|
||||
(After reviewing existing tests and the file content)
|
||||
[tool_call: write_file for path '/path/to/someFile.test.ts']
|
||||
I've written the tests. Now I'll run the project's test command to verify them.
|
||||
|
|
@ -659,7 +659,7 @@ To help you check their settings, I can read their contents. Which one would you
|
|||
</example>
|
||||
|
||||
# Final Reminder
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' or 'read_many_files' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
`;
|
||||
|
||||
exports[`Core System Prompt (prompts.ts) > should include sandbox-specific instructions when SANDBOX env var is set 1`] = `
|
||||
|
|
@ -731,7 +731,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
|||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', and 'read_file' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
|
@ -852,7 +852,7 @@ model:
|
|||
Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality.
|
||||
[tool_call: read_file for path '/path/to/someFile.ts']
|
||||
Now I'll look for existing or related test files to understand current testing conventions and dependencies.
|
||||
[tool_call: read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts']]
|
||||
[tool_call: read_file for path '/path/to/existingTest.test.ts']
|
||||
(After reviewing existing tests and the file content)
|
||||
[tool_call: write_file for path '/path/to/someFile.test.ts']
|
||||
I've written the tests. Now I'll run the project's test command to verify them.
|
||||
|
|
@ -873,7 +873,7 @@ To help you check their settings, I can read their contents. Which one would you
|
|||
</example>
|
||||
|
||||
# Final Reminder
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' or 'read_many_files' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
`;
|
||||
|
||||
exports[`Core System Prompt (prompts.ts) > should include seatbelt-specific instructions when SANDBOX env var is "sandbox-exec" 1`] = `
|
||||
|
|
@ -945,7 +945,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
|||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', and 'read_file' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
|
@ -1066,7 +1066,7 @@ model:
|
|||
Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality.
|
||||
[tool_call: read_file for path '/path/to/someFile.ts']
|
||||
Now I'll look for existing or related test files to understand current testing conventions and dependencies.
|
||||
[tool_call: read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts']]
|
||||
[tool_call: read_file for path '/path/to/existingTest.test.ts']
|
||||
(After reviewing existing tests and the file content)
|
||||
[tool_call: write_file for path '/path/to/someFile.test.ts']
|
||||
I've written the tests. Now I'll run the project's test command to verify them.
|
||||
|
|
@ -1087,7 +1087,7 @@ To help you check their settings, I can read their contents. Which one would you
|
|||
</example>
|
||||
|
||||
# Final Reminder
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' or 'read_many_files' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
`;
|
||||
|
||||
exports[`Core System Prompt (prompts.ts) > should not include git instructions when not in a git repo 1`] = `
|
||||
|
|
@ -1159,7 +1159,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
|||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', and 'read_file' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
|
@ -1280,7 +1280,7 @@ model:
|
|||
Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality.
|
||||
[tool_call: read_file for path '/path/to/someFile.ts']
|
||||
Now I'll look for existing or related test files to understand current testing conventions and dependencies.
|
||||
[tool_call: read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts']]
|
||||
[tool_call: read_file for path '/path/to/existingTest.test.ts']
|
||||
(After reviewing existing tests and the file content)
|
||||
[tool_call: write_file for path '/path/to/someFile.test.ts']
|
||||
I've written the tests. Now I'll run the project's test command to verify them.
|
||||
|
|
@ -1301,7 +1301,7 @@ To help you check their settings, I can read their contents. Which one would you
|
|||
</example>
|
||||
|
||||
# Final Reminder
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' or 'read_many_files' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
`;
|
||||
|
||||
exports[`Core System Prompt (prompts.ts) > should return the base prompt when no userMemory is provided 1`] = `
|
||||
|
|
@ -1373,7 +1373,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
|||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', and 'read_file' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
|
@ -1494,7 +1494,7 @@ model:
|
|||
Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality.
|
||||
[tool_call: read_file for path '/path/to/someFile.ts']
|
||||
Now I'll look for existing or related test files to understand current testing conventions and dependencies.
|
||||
[tool_call: read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts']]
|
||||
[tool_call: read_file for path '/path/to/existingTest.test.ts']
|
||||
(After reviewing existing tests and the file content)
|
||||
[tool_call: write_file for path '/path/to/someFile.test.ts']
|
||||
I've written the tests. Now I'll run the project's test command to verify them.
|
||||
|
|
@ -1515,7 +1515,7 @@ To help you check their settings, I can read their contents. Which one would you
|
|||
</example>
|
||||
|
||||
# Final Reminder
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' or 'read_many_files' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
`;
|
||||
|
||||
exports[`Core System Prompt (prompts.ts) > should return the base prompt when userMemory is empty string 1`] = `
|
||||
|
|
@ -1587,7 +1587,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
|||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', and 'read_file' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
|
@ -1708,7 +1708,7 @@ model:
|
|||
Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality.
|
||||
[tool_call: read_file for path '/path/to/someFile.ts']
|
||||
Now I'll look for existing or related test files to understand current testing conventions and dependencies.
|
||||
[tool_call: read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts']]
|
||||
[tool_call: read_file for path '/path/to/existingTest.test.ts']
|
||||
(After reviewing existing tests and the file content)
|
||||
[tool_call: write_file for path '/path/to/someFile.test.ts']
|
||||
I've written the tests. Now I'll run the project's test command to verify them.
|
||||
|
|
@ -1729,7 +1729,7 @@ To help you check their settings, I can read their contents. Which one would you
|
|||
</example>
|
||||
|
||||
# Final Reminder
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' or 'read_many_files' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
`;
|
||||
|
||||
exports[`Core System Prompt (prompts.ts) > should return the base prompt when userMemory is whitespace only 1`] = `
|
||||
|
|
@ -1801,7 +1801,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
|||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', and 'read_file' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
|
@ -1922,7 +1922,7 @@ model:
|
|||
Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality.
|
||||
[tool_call: read_file for path '/path/to/someFile.ts']
|
||||
Now I'll look for existing or related test files to understand current testing conventions and dependencies.
|
||||
[tool_call: read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts']]
|
||||
[tool_call: read_file for path '/path/to/existingTest.test.ts']
|
||||
(After reviewing existing tests and the file content)
|
||||
[tool_call: write_file for path '/path/to/someFile.test.ts']
|
||||
I've written the tests. Now I'll run the project's test command to verify them.
|
||||
|
|
@ -1943,7 +1943,7 @@ To help you check their settings, I can read their contents. Which one would you
|
|||
</example>
|
||||
|
||||
# Final Reminder
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' or 'read_many_files' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
`;
|
||||
|
||||
exports[`Model-specific tool call formats > should preserve model-specific formats with sandbox environment 1`] = `
|
||||
|
|
@ -2015,7 +2015,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
|||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', and 'read_file' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
|
@ -2152,7 +2152,7 @@ Okay, I can write those tests. First, I'll read someFile.ts to understand its fu
|
|||
</tool_call>
|
||||
Now I'll look for existing or related test files to understand current testing conventions and dependencies.
|
||||
<tool_call>
|
||||
{"name": "read_many_files", "arguments": {"paths": ["**/*.test.ts", "src/**/*.spec.ts"]}}
|
||||
{"name": "read_file", "arguments": {"path": "/path/to/existingTest.test.ts"}}
|
||||
</tool_call>
|
||||
(After reviewing existing tests and the file content)
|
||||
<tool_call>
|
||||
|
|
@ -2180,7 +2180,7 @@ To help you check their settings, I can read their contents. Which one would you
|
|||
</example>
|
||||
|
||||
# Final Reminder
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' or 'read_many_files' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
`;
|
||||
|
||||
exports[`Model-specific tool call formats > should preserve model-specific formats with user memory 1`] = `
|
||||
|
|
@ -2252,7 +2252,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
|||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', and 'read_file' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
|
@ -2432,9 +2432,9 @@ Okay, I can write those tests. First, I'll read someFile.ts to understand its fu
|
|||
</tool_call>
|
||||
Now I'll look for existing or related test files to understand current testing conventions and dependencies.
|
||||
<tool_call>
|
||||
<function=read_many_files>
|
||||
<parameter=paths>
|
||||
['**/*.test.ts', 'src/**/*.spec.ts']
|
||||
<function=read_file>
|
||||
<parameter=path>
|
||||
/path/to/existingTest.test.ts
|
||||
</parameter>
|
||||
</function>
|
||||
</tool_call>
|
||||
|
|
@ -2476,7 +2476,7 @@ To help you check their settings, I can read their contents. Which one would you
|
|||
</example>
|
||||
|
||||
# Final Reminder
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' or 'read_many_files' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved.
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved.
|
||||
|
||||
---
|
||||
|
||||
|
|
@ -2552,7 +2552,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
|||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', and 'read_file' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
|
@ -2689,7 +2689,7 @@ Okay, I can write those tests. First, I'll read someFile.ts to understand its fu
|
|||
</tool_call>
|
||||
Now I'll look for existing or related test files to understand current testing conventions and dependencies.
|
||||
<tool_call>
|
||||
{"name": "read_many_files", "arguments": {"paths": ["**/*.test.ts", "src/**/*.spec.ts"]}}
|
||||
{"name": "read_file", "arguments": {"path": "/path/to/existingTest.test.ts"}}
|
||||
</tool_call>
|
||||
(After reviewing existing tests and the file content)
|
||||
<tool_call>
|
||||
|
|
@ -2717,7 +2717,7 @@ To help you check their settings, I can read their contents. Which one would you
|
|||
</example>
|
||||
|
||||
# Final Reminder
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' or 'read_many_files' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
`;
|
||||
|
||||
exports[`Model-specific tool call formats > should use XML format for qwen3-coder model 1`] = `
|
||||
|
|
@ -2789,7 +2789,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
|||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', and 'read_file' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
|
@ -2969,9 +2969,9 @@ Okay, I can write those tests. First, I'll read someFile.ts to understand its fu
|
|||
</tool_call>
|
||||
Now I'll look for existing or related test files to understand current testing conventions and dependencies.
|
||||
<tool_call>
|
||||
<function=read_many_files>
|
||||
<parameter=paths>
|
||||
['**/*.test.ts', 'src/**/*.spec.ts']
|
||||
<function=read_file>
|
||||
<parameter=path>
|
||||
/path/to/existingTest.test.ts
|
||||
</parameter>
|
||||
</function>
|
||||
</tool_call>
|
||||
|
|
@ -3013,7 +3013,7 @@ To help you check their settings, I can read their contents. Which one would you
|
|||
</example>
|
||||
|
||||
# Final Reminder
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' or 'read_many_files' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
`;
|
||||
|
||||
exports[`Model-specific tool call formats > should use bracket format for generic models 1`] = `
|
||||
|
|
@ -3085,7 +3085,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
|||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', and 'read_file' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
|
@ -3206,7 +3206,7 @@ model:
|
|||
Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality.
|
||||
[tool_call: read_file for path '/path/to/someFile.ts']
|
||||
Now I'll look for existing or related test files to understand current testing conventions and dependencies.
|
||||
[tool_call: read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts']]
|
||||
[tool_call: read_file for path '/path/to/existingTest.test.ts']
|
||||
(After reviewing existing tests and the file content)
|
||||
[tool_call: write_file for path '/path/to/someFile.test.ts']
|
||||
I've written the tests. Now I'll run the project's test command to verify them.
|
||||
|
|
@ -3227,7 +3227,7 @@ To help you check their settings, I can read their contents. Which one would you
|
|||
</example>
|
||||
|
||||
# Final Reminder
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' or 'read_many_files' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
`;
|
||||
|
||||
exports[`Model-specific tool call formats > should use bracket format when no model is specified 1`] = `
|
||||
|
|
@ -3299,7 +3299,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
|||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the 'todo_write' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', 'read_file', and 'read_many_files' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use 'grep_search', 'glob', and 'read_file' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., 'edit', 'write_file' 'run_shell_command' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
|
@ -3420,7 +3420,7 @@ model:
|
|||
Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality.
|
||||
[tool_call: read_file for path '/path/to/someFile.ts']
|
||||
Now I'll look for existing or related test files to understand current testing conventions and dependencies.
|
||||
[tool_call: read_many_files for paths ['**/*.test.ts', 'src/**/*.spec.ts']]
|
||||
[tool_call: read_file for path '/path/to/existingTest.test.ts']
|
||||
(After reviewing existing tests and the file content)
|
||||
[tool_call: write_file for path '/path/to/someFile.test.ts']
|
||||
I've written the tests. Now I'll run the project's test command to verify them.
|
||||
|
|
@ -3441,5 +3441,5 @@ To help you check their settings, I can read their contents. Which one would you
|
|||
</example>
|
||||
|
||||
# Final Reminder
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' or 'read_many_files' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use 'read_file' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved."
|
||||
`;
|
||||
|
|
|
|||
|
|
@ -203,7 +203,7 @@ I've found some existing telemetry code. Let me mark the first todo as in_progre
|
|||
## Software Engineering Tasks
|
||||
When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this iterative approach:
|
||||
- **Plan:** After understanding the user's request, create an initial plan based on your existing knowledge and any immediately obvious context. Use the '${ToolNames.TODO_WRITE}' tool to capture this rough plan for complex or multi-step work. Don't wait for complete understanding - start with what you know.
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use '${ToolNames.GREP}', '${ToolNames.GLOB}', '${ToolNames.READ_FILE}', and '${ToolNames.READ_MANY_FILES}' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., '${ToolNames.EDIT}', '${ToolNames.WRITE_FILE}' '${ToolNames.SHELL}' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Implement:** Begin implementing the plan while gathering additional context as needed. Use '${ToolNames.GREP}', '${ToolNames.GLOB}', and '${ToolNames.READ_FILE}' tools strategically when you encounter specific unknowns during implementation. Use the available tools (e.g., '${ToolNames.EDIT}', '${ToolNames.WRITE_FILE}' '${ToolNames.SHELL}' ...) to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates').
|
||||
- **Adapt:** As you discover new information or encounter obstacles, update your plan and todos accordingly. Mark todos as in_progress when starting and completed when finishing each task. Add new todos if the scope expands. Refine your approach based on what you learn.
|
||||
- **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'package.json'), or existing test execution patterns. NEVER assume standard test commands.
|
||||
- **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (e.g., 'tsc', 'npm run lint', 'ruff check .') that you have identified for this project (or obtained from the user). This ensures code quality and adherence to standards. If unsure about these commands, you can ask the user if they'd like you to run them and if so how to.
|
||||
|
|
@ -311,7 +311,7 @@ ${(function () {
|
|||
${getToolCallExamples(model || '')}
|
||||
|
||||
# Final Reminder
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use '${ToolNames.READ_FILE}' or '${ToolNames.READ_MANY_FILES}' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved.
|
||||
Your core function is efficient and safe assistance. Balance extreme conciseness with the crucial need for clarity, especially regarding safety and potential system modifications. Always prioritize user control and project conventions. Never make assumptions about the contents of files; instead use '${ToolNames.READ_FILE}' to ensure you aren't making broad assumptions. Finally, you are an agent - please keep going until the user's query is completely resolved.
|
||||
`.trim();
|
||||
|
||||
// if QWEN_WRITE_SYSTEM_MD is set (and not 0|false), write base system prompt to file
|
||||
|
|
@ -488,7 +488,7 @@ model:
|
|||
Okay, I can write those tests. First, I'll read someFile.ts to understand its functionality.
|
||||
[tool_call: ${ToolNames.READ_FILE} for path '/path/to/someFile.ts']
|
||||
Now I'll look for existing or related test files to understand current testing conventions and dependencies.
|
||||
[tool_call: ${ToolNames.READ_MANY_FILES} for paths ['**/*.test.ts', 'src/**/*.spec.ts']]
|
||||
[tool_call: ${ToolNames.READ_FILE} for path '/path/to/existingTest.test.ts']
|
||||
(After reviewing existing tests and the file content)
|
||||
[tool_call: ${ToolNames.WRITE_FILE} for path '/path/to/someFile.test.ts']
|
||||
I've written the tests. Now I'll run the project's test command to verify them.
|
||||
|
|
@ -623,9 +623,9 @@ Okay, I can write those tests. First, I'll read someFile.ts to understand its fu
|
|||
</tool_call>
|
||||
Now I'll look for existing or related test files to understand current testing conventions and dependencies.
|
||||
<tool_call>
|
||||
<function=${ToolNames.READ_MANY_FILES}>
|
||||
<parameter=paths>
|
||||
['**/*.test.ts', 'src/**/*.spec.ts']
|
||||
<function=${ToolNames.READ_FILE}>
|
||||
<parameter=path>
|
||||
/path/to/existingTest.test.ts
|
||||
</parameter>
|
||||
</function>
|
||||
</tool_call>
|
||||
|
|
@ -737,7 +737,7 @@ Okay, I can write those tests. First, I'll read someFile.ts to understand its fu
|
|||
</tool_call>
|
||||
Now I'll look for existing or related test files to understand current testing conventions and dependencies.
|
||||
<tool_call>
|
||||
{"name": "${ToolNames.READ_MANY_FILES}", "arguments": {"paths": ["**/*.test.ts", "src/**/*.spec.ts"]}}
|
||||
{"name": "${ToolNames.READ_FILE}", "arguments": {"path": "/path/to/existingTest.test.ts"}}
|
||||
</tool_call>
|
||||
(After reviewing existing tests and the file content)
|
||||
<tool_call>
|
||||
|
|
|
|||
|
|
@ -80,7 +80,6 @@ export * from './tools/mcp-client.js';
|
|||
export * from './tools/mcp-client-manager.js';
|
||||
export * from './tools/mcp-tool.js';
|
||||
export * from './tools/read-file.js';
|
||||
export * from './tools/read-many-files.js';
|
||||
export * from './tools/ripGrep.js';
|
||||
export * from './tools/sdk-control-client-transport.js';
|
||||
export * from './tools/shell.js';
|
||||
|
|
@ -207,6 +206,7 @@ export * from './utils/paths.js';
|
|||
export * from './utils/promptIdContext.js';
|
||||
export * from './utils/projectSummary.js';
|
||||
export * from './utils/quotaErrorDetection.js';
|
||||
export * from './utils/readManyFiles.js';
|
||||
export * from './utils/request-tokenizer/supportedImageFormats.js';
|
||||
export * from './utils/retry.js';
|
||||
export * from './utils/ripgrepUtils.js';
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ import type { Config } from '../config/config.js';
|
|||
import {
|
||||
ChatRecordingService,
|
||||
type ChatRecord,
|
||||
type AtCommandRecordPayload,
|
||||
} from './chatRecordingService.js';
|
||||
import * as jsonl from '../utils/jsonl-utils.js';
|
||||
import type { Part } from '@google/genai';
|
||||
|
|
@ -131,6 +132,33 @@ describe('ChatRecordingService', () => {
|
|||
});
|
||||
});
|
||||
|
||||
describe('recordAtCommand', () => {
|
||||
it('should record @-command metadata as a system payload', () => {
|
||||
const userParts: Part[] = [{ text: 'Hello, world!' }];
|
||||
const payload: AtCommandRecordPayload = {
|
||||
filesRead: ['foo.txt'],
|
||||
status: 'success',
|
||||
message: 'Success',
|
||||
userText: '@foo.txt',
|
||||
};
|
||||
|
||||
chatRecordingService.recordUserMessage(userParts);
|
||||
chatRecordingService.recordAtCommand(payload);
|
||||
|
||||
expect(jsonl.writeLineSync).toHaveBeenCalledTimes(2);
|
||||
const userRecord = vi.mocked(jsonl.writeLineSync).mock
|
||||
.calls[0][1] as ChatRecord;
|
||||
const systemRecord = vi.mocked(jsonl.writeLineSync).mock
|
||||
.calls[1][1] as ChatRecord;
|
||||
|
||||
expect(userRecord.type).toBe('user');
|
||||
expect(systemRecord.type).toBe('system');
|
||||
expect(systemRecord.subtype).toBe('at_command');
|
||||
expect(systemRecord.systemPayload).toEqual(payload);
|
||||
expect(systemRecord.parentUuid).toBe(userRecord.uuid);
|
||||
});
|
||||
});
|
||||
|
||||
describe('recordAssistantTurn', () => {
|
||||
it('should record assistant turn with content only', () => {
|
||||
const parts: Part[] = [{ text: 'Hello!' }];
|
||||
|
|
|
|||
|
|
@ -50,7 +50,11 @@ export interface ChatRecord {
|
|||
*/
|
||||
type: 'user' | 'assistant' | 'tool_result' | 'system';
|
||||
/** Optional system subtype for distinguishing system behaviors */
|
||||
subtype?: 'chat_compression' | 'slash_command' | 'ui_telemetry';
|
||||
subtype?:
|
||||
| 'chat_compression'
|
||||
| 'slash_command'
|
||||
| 'ui_telemetry'
|
||||
| 'at_command';
|
||||
/** Working directory at time of message */
|
||||
cwd: string;
|
||||
/** CLI version for compatibility tracking */
|
||||
|
|
@ -87,7 +91,8 @@ export interface ChatRecord {
|
|||
systemPayload?:
|
||||
| ChatCompressionRecordPayload
|
||||
| SlashCommandRecordPayload
|
||||
| UiTelemetryRecordPayload;
|
||||
| UiTelemetryRecordPayload
|
||||
| AtCommandRecordPayload;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -117,6 +122,20 @@ export interface SlashCommandRecordPayload {
|
|||
outputHistoryItems?: Array<Record<string, unknown>>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stored payload for @-command replay.
|
||||
*/
|
||||
export interface AtCommandRecordPayload {
|
||||
/** Files that were read for this @-command. */
|
||||
filesRead: string[];
|
||||
/** Status for UI reconstruction. */
|
||||
status: 'success' | 'error';
|
||||
/** Optional result message for UI reconstruction. */
|
||||
message?: string;
|
||||
/** Raw user-entered @-command query (optional for legacy records). */
|
||||
userText?: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stored payload for UI telemetry replay.
|
||||
*/
|
||||
|
|
@ -405,4 +424,22 @@ export class ChatRecordingService {
|
|||
console.error('Error saving ui telemetry record:', error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Records @-command metadata as a system record for UI reconstruction.
|
||||
*/
|
||||
recordAtCommand(payload: AtCommandRecordPayload): void {
|
||||
try {
|
||||
const record: ChatRecord = {
|
||||
...this.createBaseRecord('system'),
|
||||
type: 'system',
|
||||
subtype: 'at_command',
|
||||
systemPayload: payload,
|
||||
};
|
||||
|
||||
this.appendRecord(record);
|
||||
} catch (error) {
|
||||
console.error('Error saving @-command record:', error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,759 +0,0 @@
|
|||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { vi, describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import type { Mock } from 'vitest';
|
||||
import { mockControl } from '../__mocks__/fs/promises.js';
|
||||
import { ReadManyFilesTool } from './read-many-files.js';
|
||||
import { FileDiscoveryService } from '../services/fileDiscoveryService.js';
|
||||
import path from 'node:path';
|
||||
import fs from 'node:fs'; // Actual fs for setup
|
||||
import os from 'node:os';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { WorkspaceContext } from '../utils/workspaceContext.js';
|
||||
import { StandardFileSystemService } from '../services/fileSystemService.js';
|
||||
import { ToolErrorType } from './tool-error.js';
|
||||
import {
|
||||
COMMON_IGNORE_PATTERNS,
|
||||
DEFAULT_FILE_EXCLUDES,
|
||||
} from '../utils/ignorePatterns.js';
|
||||
import * as glob from 'glob';
|
||||
|
||||
vi.mock('glob', { spy: true });
|
||||
|
||||
vi.mock('mime', () => {
|
||||
const getType = (filename: string) => {
|
||||
if (filename.endsWith('.ts') || filename.endsWith('.js')) {
|
||||
return 'text/plain';
|
||||
}
|
||||
if (filename.endsWith('.png')) {
|
||||
return 'image/png';
|
||||
}
|
||||
if (filename.endsWith('.pdf')) {
|
||||
return 'application/pdf';
|
||||
}
|
||||
if (filename.endsWith('.mp3') || filename.endsWith('.wav')) {
|
||||
return 'audio/mpeg';
|
||||
}
|
||||
if (filename.endsWith('.mp4') || filename.endsWith('.mov')) {
|
||||
return 'video/mp4';
|
||||
}
|
||||
return false;
|
||||
};
|
||||
return {
|
||||
default: {
|
||||
getType,
|
||||
},
|
||||
getType,
|
||||
};
|
||||
});
|
||||
|
||||
vi.mock('../telemetry/loggers.js', () => ({
|
||||
logFileOperation: vi.fn(),
|
||||
}));
|
||||
|
||||
describe('ReadManyFilesTool', () => {
|
||||
let tool: ReadManyFilesTool;
|
||||
let tempRootDir: string;
|
||||
let tempDirOutsideRoot: string;
|
||||
let mockReadFileFn: Mock;
|
||||
|
||||
beforeEach(async () => {
|
||||
tempRootDir = fs.realpathSync(
|
||||
fs.mkdtempSync(path.join(os.tmpdir(), 'read-many-files-root-')),
|
||||
);
|
||||
tempDirOutsideRoot = fs.realpathSync(
|
||||
fs.mkdtempSync(path.join(os.tmpdir(), 'read-many-files-external-')),
|
||||
);
|
||||
fs.writeFileSync(path.join(tempRootDir, '.qwenignore'), 'foo.*');
|
||||
const fileService = new FileDiscoveryService(tempRootDir);
|
||||
const mockConfig = {
|
||||
getFileService: () => fileService,
|
||||
getFileSystemService: () => new StandardFileSystemService(),
|
||||
|
||||
getFileFilteringOptions: () => ({
|
||||
respectGitIgnore: true,
|
||||
respectQwenIgnore: true,
|
||||
}),
|
||||
getTargetDir: () => tempRootDir,
|
||||
getWorkspaceDirs: () => [tempRootDir],
|
||||
getWorkspaceContext: () => new WorkspaceContext(tempRootDir),
|
||||
getFileExclusions: () => ({
|
||||
getCoreIgnorePatterns: () => COMMON_IGNORE_PATTERNS,
|
||||
getDefaultExcludePatterns: () => DEFAULT_FILE_EXCLUDES,
|
||||
getGlobExcludes: () => COMMON_IGNORE_PATTERNS,
|
||||
buildExcludePatterns: () => DEFAULT_FILE_EXCLUDES,
|
||||
getReadManyFilesExcludes: () => DEFAULT_FILE_EXCLUDES,
|
||||
}),
|
||||
getTruncateToolOutputThreshold: () => 2500,
|
||||
getTruncateToolOutputLines: () => 500,
|
||||
} as Partial<Config> as Config;
|
||||
tool = new ReadManyFilesTool(mockConfig);
|
||||
|
||||
mockReadFileFn = mockControl.mockReadFile;
|
||||
mockReadFileFn.mockReset();
|
||||
|
||||
mockReadFileFn.mockImplementation(
|
||||
async (filePath: fs.PathLike, options?: Record<string, unknown>) => {
|
||||
const fp =
|
||||
typeof filePath === 'string'
|
||||
? filePath
|
||||
: (filePath as Buffer).toString();
|
||||
|
||||
if (fs.existsSync(fp)) {
|
||||
const originalFs = await vi.importActual<typeof fs>('fs');
|
||||
return originalFs.promises.readFile(fp, options);
|
||||
}
|
||||
|
||||
if (fp.endsWith('nonexistent-file.txt')) {
|
||||
const err = new Error(
|
||||
`ENOENT: no such file or directory, open '${fp}'`,
|
||||
);
|
||||
(err as NodeJS.ErrnoException).code = 'ENOENT';
|
||||
throw err;
|
||||
}
|
||||
if (fp.endsWith('unreadable.txt')) {
|
||||
const err = new Error(`EACCES: permission denied, open '${fp}'`);
|
||||
(err as NodeJS.ErrnoException).code = 'EACCES';
|
||||
throw err;
|
||||
}
|
||||
if (fp.endsWith('.png'))
|
||||
return Buffer.from([0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a]); // PNG header
|
||||
if (fp.endsWith('.pdf')) return Buffer.from('%PDF-1.4...'); // PDF start
|
||||
if (fp.endsWith('binary.bin'))
|
||||
return Buffer.from([0x00, 0x01, 0x02, 0x00, 0x03]);
|
||||
|
||||
const err = new Error(
|
||||
`ENOENT: no such file or directory, open '${fp}' (unmocked path)`,
|
||||
);
|
||||
(err as NodeJS.ErrnoException).code = 'ENOENT';
|
||||
throw err;
|
||||
},
|
||||
);
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (fs.existsSync(tempRootDir)) {
|
||||
fs.rmSync(tempRootDir, { recursive: true, force: true });
|
||||
}
|
||||
if (fs.existsSync(tempDirOutsideRoot)) {
|
||||
fs.rmSync(tempDirOutsideRoot, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
describe('build', () => {
|
||||
it('should return an invocation for valid relative paths within root', () => {
|
||||
const params = { paths: ['file1.txt', 'subdir/file2.txt'] };
|
||||
const invocation = tool.build(params);
|
||||
expect(invocation).toBeDefined();
|
||||
});
|
||||
|
||||
it('should return an invocation for valid glob patterns within root', () => {
|
||||
const params = { paths: ['*.txt', 'subdir/**/*.js'] };
|
||||
const invocation = tool.build(params);
|
||||
expect(invocation).toBeDefined();
|
||||
});
|
||||
|
||||
it('should return an invocation for paths trying to escape the root (e.g., ../) as execute handles this', () => {
|
||||
const params = { paths: ['../outside.txt'] };
|
||||
const invocation = tool.build(params);
|
||||
expect(invocation).toBeDefined();
|
||||
});
|
||||
|
||||
it('should return an invocation for absolute paths as execute handles this', () => {
|
||||
const params = { paths: [path.join(tempDirOutsideRoot, 'absolute.txt')] };
|
||||
const invocation = tool.build(params);
|
||||
expect(invocation).toBeDefined();
|
||||
});
|
||||
|
||||
it('should throw error if paths array is empty', () => {
|
||||
const params = { paths: [] };
|
||||
expect(() => tool.build(params)).toThrow(
|
||||
'params/paths must NOT have fewer than 1 items',
|
||||
);
|
||||
});
|
||||
|
||||
it('should return an invocation for valid exclude and include patterns', () => {
|
||||
const params = {
|
||||
paths: ['src/**/*.ts'],
|
||||
exclude: ['**/*.test.ts'],
|
||||
include: ['src/utils/*.ts'],
|
||||
};
|
||||
const invocation = tool.build(params);
|
||||
expect(invocation).toBeDefined();
|
||||
});
|
||||
|
||||
it('should throw error if paths array contains an empty string', () => {
|
||||
const params = { paths: ['file1.txt', ''] };
|
||||
expect(() => tool.build(params)).toThrow(
|
||||
'params/paths/1 must NOT have fewer than 1 characters',
|
||||
);
|
||||
});
|
||||
|
||||
it('should coerce non-string elements in include array', () => {
|
||||
const params = {
|
||||
paths: ['file1.txt'],
|
||||
include: ['*.ts', 123] as string[],
|
||||
};
|
||||
expect(() => tool.build(params)).toBeDefined();
|
||||
});
|
||||
|
||||
it('should throw error if exclude array contains non-string elements', () => {
|
||||
const params = {
|
||||
paths: ['file1.txt'],
|
||||
exclude: ['*.log', {}] as string[],
|
||||
};
|
||||
expect(() => tool.build(params)).toThrow(
|
||||
'params/exclude/1 must be string',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('execute', () => {
|
||||
const createFile = (filePath: string, content = '') => {
|
||||
const fullPath = path.join(tempRootDir, filePath);
|
||||
fs.mkdirSync(path.dirname(fullPath), { recursive: true });
|
||||
fs.writeFileSync(fullPath, content);
|
||||
};
|
||||
const createBinaryFile = (filePath: string, data: Uint8Array) => {
|
||||
const fullPath = path.join(tempRootDir, filePath);
|
||||
fs.mkdirSync(path.dirname(fullPath), { recursive: true });
|
||||
fs.writeFileSync(fullPath, data);
|
||||
};
|
||||
|
||||
it('should read a single specified file', async () => {
|
||||
createFile('file1.txt', 'Content of file1');
|
||||
const params = { paths: ['file1.txt'] };
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
const expectedPath = path.join(tempRootDir, 'file1.txt');
|
||||
expect(result.llmContent).toEqual([
|
||||
`--- ${expectedPath} ---\n\nContent of file1\n\n`,
|
||||
`\n--- End of content ---`,
|
||||
]);
|
||||
expect(result.returnDisplay).toContain(
|
||||
'Successfully read and concatenated content from **1 file(s)**',
|
||||
);
|
||||
});
|
||||
|
||||
it('should read multiple specified files', async () => {
|
||||
createFile('file1.txt', 'Content1');
|
||||
createFile('subdir/file2.js', 'Content2');
|
||||
const params = { paths: ['file1.txt', 'subdir/file2.js'] };
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
const content = result.llmContent as string[];
|
||||
const expectedPath1 = path.join(tempRootDir, 'file1.txt');
|
||||
const expectedPath2 = path.join(tempRootDir, 'subdir/file2.js');
|
||||
expect(
|
||||
content.some((c) =>
|
||||
c.includes(`--- ${expectedPath1} ---\n\nContent1\n\n`),
|
||||
),
|
||||
).toBe(true);
|
||||
expect(
|
||||
content.some((c) =>
|
||||
c.includes(`--- ${expectedPath2} ---\n\nContent2\n\n`),
|
||||
),
|
||||
).toBe(true);
|
||||
expect(result.returnDisplay).toContain(
|
||||
'Successfully read and concatenated content from **2 file(s)**',
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle glob patterns', async () => {
|
||||
createFile('file.txt', 'Text file');
|
||||
createFile('another.txt', 'Another text');
|
||||
createFile('sub/data.json', '{}');
|
||||
const params = { paths: ['*.txt'] };
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
const content = result.llmContent as string[];
|
||||
const expectedPath1 = path.join(tempRootDir, 'file.txt');
|
||||
const expectedPath2 = path.join(tempRootDir, 'another.txt');
|
||||
expect(
|
||||
content.some((c) =>
|
||||
c.includes(`--- ${expectedPath1} ---\n\nText file\n\n`),
|
||||
),
|
||||
).toBe(true);
|
||||
expect(
|
||||
content.some((c) =>
|
||||
c.includes(`--- ${expectedPath2} ---\n\nAnother text\n\n`),
|
||||
),
|
||||
).toBe(true);
|
||||
expect(content.find((c) => c.includes('sub/data.json'))).toBeUndefined();
|
||||
expect(result.returnDisplay).toContain(
|
||||
'Successfully read and concatenated content from **2 file(s)**',
|
||||
);
|
||||
});
|
||||
|
||||
it('should respect exclude patterns', async () => {
|
||||
createFile('src/main.ts', 'Main content');
|
||||
createFile('src/main.test.ts', 'Test content');
|
||||
const params = { paths: ['src/**/*.ts'], exclude: ['**/*.test.ts'] };
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
const content = result.llmContent as string[];
|
||||
const expectedPath = path.join(tempRootDir, 'src/main.ts');
|
||||
expect(content).toEqual([
|
||||
`--- ${expectedPath} ---\n\nMain content\n\n`,
|
||||
`\n--- End of content ---`,
|
||||
]);
|
||||
expect(
|
||||
content.find((c) => c.includes('src/main.test.ts')),
|
||||
).toBeUndefined();
|
||||
expect(result.returnDisplay).toContain(
|
||||
'Successfully read and concatenated content from **1 file(s)**',
|
||||
);
|
||||
});
|
||||
|
||||
it('should handle nonexistent specific files gracefully', async () => {
|
||||
const params = { paths: ['nonexistent-file.txt'] };
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
expect(result.llmContent).toEqual([
|
||||
'No files matching the criteria were found or all were skipped.',
|
||||
]);
|
||||
expect(result.returnDisplay).toContain(
|
||||
'No files were read and concatenated based on the criteria.',
|
||||
);
|
||||
});
|
||||
|
||||
it('should use default excludes', async () => {
|
||||
createFile('node_modules/some-lib/index.js', 'lib code');
|
||||
createFile('src/app.js', 'app code');
|
||||
const params = { paths: ['**/*.js'] };
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
const content = result.llmContent as string[];
|
||||
const expectedPath = path.join(tempRootDir, 'src/app.js');
|
||||
expect(content).toEqual([
|
||||
`--- ${expectedPath} ---\n\napp code\n\n`,
|
||||
`\n--- End of content ---`,
|
||||
]);
|
||||
expect(
|
||||
content.find((c) => c.includes('node_modules/some-lib/index.js')),
|
||||
).toBeUndefined();
|
||||
expect(result.returnDisplay).toContain(
|
||||
'Successfully read and concatenated content from **1 file(s)**',
|
||||
);
|
||||
});
|
||||
|
||||
it('should NOT use default excludes if useDefaultExcludes is false', async () => {
|
||||
createFile('node_modules/some-lib/index.js', 'lib code');
|
||||
createFile('src/app.js', 'app code');
|
||||
const params = { paths: ['**/*.js'], useDefaultExcludes: false };
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
const content = result.llmContent as string[];
|
||||
const expectedPath1 = path.join(
|
||||
tempRootDir,
|
||||
'node_modules/some-lib/index.js',
|
||||
);
|
||||
const expectedPath2 = path.join(tempRootDir, 'src/app.js');
|
||||
expect(
|
||||
content.some((c) =>
|
||||
c.includes(`--- ${expectedPath1} ---\n\nlib code\n\n`),
|
||||
),
|
||||
).toBe(true);
|
||||
expect(
|
||||
content.some((c) =>
|
||||
c.includes(`--- ${expectedPath2} ---\n\napp code\n\n`),
|
||||
),
|
||||
).toBe(true);
|
||||
expect(result.returnDisplay).toContain(
|
||||
'Successfully read and concatenated content from **2 file(s)**',
|
||||
);
|
||||
});
|
||||
|
||||
it('should include images as inlineData parts if explicitly requested by extension', async () => {
|
||||
createBinaryFile(
|
||||
'image.png',
|
||||
Buffer.from([0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a]),
|
||||
);
|
||||
const params = { paths: ['*.png'] }; // Explicitly requesting .png
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
expect(result.llmContent).toEqual([
|
||||
{
|
||||
inlineData: {
|
||||
data: Buffer.from([
|
||||
0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a,
|
||||
]).toString('base64'),
|
||||
mimeType: 'image/png',
|
||||
displayName: 'image.png',
|
||||
},
|
||||
},
|
||||
'\n--- End of content ---',
|
||||
]);
|
||||
expect(result.returnDisplay).toContain(
|
||||
'Successfully read and concatenated content from **1 file(s)**',
|
||||
);
|
||||
});
|
||||
|
||||
it('should include images as inlineData parts if explicitly requested by name', async () => {
|
||||
createBinaryFile(
|
||||
'myExactImage.png',
|
||||
Buffer.from([0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a]),
|
||||
);
|
||||
const params = { paths: ['myExactImage.png'] }; // Explicitly requesting by full name
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
expect(result.llmContent).toEqual([
|
||||
{
|
||||
inlineData: {
|
||||
data: Buffer.from([
|
||||
0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a,
|
||||
]).toString('base64'),
|
||||
mimeType: 'image/png',
|
||||
displayName: 'myExactImage.png',
|
||||
},
|
||||
},
|
||||
'\n--- End of content ---',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should skip PDF files if not explicitly requested by extension or name', async () => {
|
||||
createBinaryFile('document.pdf', Buffer.from('%PDF-1.4...'));
|
||||
createFile('notes.txt', 'text notes');
|
||||
const params = { paths: ['*'] }; // Generic glob, not specific to .pdf
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
const content = result.llmContent as string[];
|
||||
const expectedPath = path.join(tempRootDir, 'notes.txt');
|
||||
expect(
|
||||
content.some(
|
||||
(c) =>
|
||||
typeof c === 'string' &&
|
||||
c.includes(`--- ${expectedPath} ---\n\ntext notes\n\n`),
|
||||
),
|
||||
).toBe(true);
|
||||
expect(result.returnDisplay).toContain('**Skipped 1 item(s):**');
|
||||
expect(result.returnDisplay).toContain(
|
||||
'- `document.pdf` (Reason: asset file (image/pdf) was not explicitly requested by name or extension)',
|
||||
);
|
||||
});
|
||||
|
||||
it('should include PDF files as inlineData parts if explicitly requested by extension', async () => {
|
||||
createBinaryFile('important.pdf', Buffer.from('%PDF-1.4...'));
|
||||
const params = { paths: ['*.pdf'] }; // Explicitly requesting .pdf files
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
expect(result.llmContent).toEqual([
|
||||
{
|
||||
inlineData: {
|
||||
data: Buffer.from('%PDF-1.4...').toString('base64'),
|
||||
mimeType: 'application/pdf',
|
||||
displayName: 'important.pdf',
|
||||
},
|
||||
},
|
||||
'\n--- End of content ---',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should include PDF files as inlineData parts if explicitly requested by name', async () => {
|
||||
createBinaryFile('report-final.pdf', Buffer.from('%PDF-1.4...'));
|
||||
const params = { paths: ['report-final.pdf'] };
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
expect(result.llmContent).toEqual([
|
||||
{
|
||||
inlineData: {
|
||||
data: Buffer.from('%PDF-1.4...').toString('base64'),
|
||||
mimeType: 'application/pdf',
|
||||
displayName: 'report-final.pdf',
|
||||
},
|
||||
},
|
||||
'\n--- End of content ---',
|
||||
]);
|
||||
});
|
||||
|
||||
it('should return error if path is ignored by a .qwenignore pattern', async () => {
|
||||
createFile('foo.bar', '');
|
||||
createFile('bar.ts', '');
|
||||
createFile('foo.quux', '');
|
||||
const params = { paths: ['foo.bar', 'bar.ts', 'foo.quux'] };
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
expect(result.returnDisplay).not.toContain('foo.bar');
|
||||
expect(result.returnDisplay).not.toContain('foo.quux');
|
||||
expect(result.returnDisplay).toContain('bar.ts');
|
||||
});
|
||||
|
||||
it('should read files from multiple workspace directories', async () => {
|
||||
const tempDir1 = fs.realpathSync(
|
||||
fs.mkdtempSync(path.join(os.tmpdir(), 'multi-dir-1-')),
|
||||
);
|
||||
const tempDir2 = fs.realpathSync(
|
||||
fs.mkdtempSync(path.join(os.tmpdir(), 'multi-dir-2-')),
|
||||
);
|
||||
const fileService = new FileDiscoveryService(tempDir1);
|
||||
const mockConfig = {
|
||||
getFileService: () => fileService,
|
||||
getFileSystemService: () => new StandardFileSystemService(),
|
||||
getFileFilteringOptions: () => ({
|
||||
respectGitIgnore: true,
|
||||
respectQwenIgnore: true,
|
||||
}),
|
||||
getWorkspaceContext: () => new WorkspaceContext(tempDir1, [tempDir2]),
|
||||
getTargetDir: () => tempDir1,
|
||||
getFileExclusions: () => ({
|
||||
getCoreIgnorePatterns: () => COMMON_IGNORE_PATTERNS,
|
||||
getDefaultExcludePatterns: () => [],
|
||||
getGlobExcludes: () => COMMON_IGNORE_PATTERNS,
|
||||
buildExcludePatterns: () => [],
|
||||
getReadManyFilesExcludes: () => [],
|
||||
}),
|
||||
getTruncateToolOutputThreshold: () => 2500,
|
||||
getTruncateToolOutputLines: () => 500,
|
||||
} as Partial<Config> as Config;
|
||||
tool = new ReadManyFilesTool(mockConfig);
|
||||
|
||||
fs.writeFileSync(path.join(tempDir1, 'file1.txt'), 'Content1');
|
||||
fs.writeFileSync(path.join(tempDir2, 'file2.txt'), 'Content2');
|
||||
|
||||
const params = { paths: ['*.txt'] };
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
const content = result.llmContent as string[];
|
||||
if (!Array.isArray(content)) {
|
||||
throw new Error(`llmContent is not an array: ${content}`);
|
||||
}
|
||||
const expectedPath1 = path.join(tempDir1, 'file1.txt');
|
||||
const expectedPath2 = path.join(tempDir2, 'file2.txt');
|
||||
|
||||
expect(
|
||||
content.some((c) =>
|
||||
c.includes(`--- ${expectedPath1} ---\n\nContent1\n\n`),
|
||||
),
|
||||
).toBe(true);
|
||||
expect(
|
||||
content.some((c) =>
|
||||
c.includes(`--- ${expectedPath2} ---\n\nContent2\n\n`),
|
||||
),
|
||||
).toBe(true);
|
||||
expect(result.returnDisplay).toContain(
|
||||
'Successfully read and concatenated content from **2 file(s)**',
|
||||
);
|
||||
|
||||
fs.rmSync(tempDir1, { recursive: true, force: true });
|
||||
fs.rmSync(tempDir2, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
it('should add a warning for truncated files', async () => {
|
||||
createFile('file1.txt', 'Content1');
|
||||
// Create a file that will be "truncated" by making it long
|
||||
const longContent = Array.from({ length: 2500 }, (_, i) => `L${i}`).join(
|
||||
'\n',
|
||||
);
|
||||
createFile('large-file.txt', longContent);
|
||||
|
||||
const params = { paths: ['*.txt'] };
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
const content = result.llmContent as string[];
|
||||
|
||||
const normalFileContent = content.find((c) => c.includes('file1.txt'));
|
||||
const truncatedFileContent = content.find((c) =>
|
||||
c.includes('large-file.txt'),
|
||||
);
|
||||
|
||||
expect(normalFileContent).not.toContain('Showing lines');
|
||||
expect(truncatedFileContent).toContain(
|
||||
'Showing lines 1-250 of 2500 total lines.',
|
||||
);
|
||||
});
|
||||
|
||||
it('should read files with special characters like [] and () in the path', async () => {
|
||||
const filePath = 'src/app/[test]/(dashboard)/testing/components/code.tsx';
|
||||
createFile(filePath, 'Content of receive-detail');
|
||||
const params = { paths: [filePath] };
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
const expectedPath = path.join(tempRootDir, filePath);
|
||||
expect(result.llmContent).toEqual([
|
||||
`--- ${expectedPath} ---
|
||||
|
||||
Content of receive-detail
|
||||
|
||||
`,
|
||||
`\n--- End of content ---`,
|
||||
]);
|
||||
expect(result.returnDisplay).toContain(
|
||||
'Successfully read and concatenated content from **1 file(s)**',
|
||||
);
|
||||
});
|
||||
|
||||
it('should read files with special characters in the name', async () => {
|
||||
createFile('file[1].txt', 'Content of file[1]');
|
||||
const params = { paths: ['file[1].txt'] };
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
const expectedPath = path.join(tempRootDir, 'file[1].txt');
|
||||
expect(result.llmContent).toEqual([
|
||||
`--- ${expectedPath} ---
|
||||
|
||||
Content of file[1]
|
||||
|
||||
`,
|
||||
`\n--- End of content ---`,
|
||||
]);
|
||||
expect(result.returnDisplay).toContain(
|
||||
'Successfully read and concatenated content from **1 file(s)**',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('Error handling', () => {
|
||||
it('should return an INVALID_TOOL_PARAMS error if no paths are provided', async () => {
|
||||
const params = { paths: [], include: [] };
|
||||
expect(() => {
|
||||
tool.build(params);
|
||||
}).toThrow('params/paths must NOT have fewer than 1 items');
|
||||
});
|
||||
|
||||
it('should return a READ_MANY_FILES_SEARCH_ERROR on glob failure', async () => {
|
||||
vi.mocked(glob.glob).mockRejectedValue(new Error('Glob failed'));
|
||||
const params = { paths: ['*.txt'] };
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
expect(result.error?.type).toBe(
|
||||
ToolErrorType.READ_MANY_FILES_SEARCH_ERROR,
|
||||
);
|
||||
expect(result.llmContent).toBe('Error during file search: Glob failed');
|
||||
// Reset glob.
|
||||
vi.mocked(glob.glob).mockReset();
|
||||
});
|
||||
});
|
||||
|
||||
describe('Batch Processing', () => {
|
||||
const createMultipleFiles = (count: number, contentPrefix = 'Content') => {
|
||||
const files: string[] = [];
|
||||
for (let i = 0; i < count; i++) {
|
||||
const fileName = `file${i}.txt`;
|
||||
createFile(fileName, `${contentPrefix} ${i}`);
|
||||
files.push(fileName);
|
||||
}
|
||||
return files;
|
||||
};
|
||||
|
||||
const createFile = (filePath: string, content = '') => {
|
||||
const fullPath = path.join(tempRootDir, filePath);
|
||||
fs.mkdirSync(path.dirname(fullPath), { recursive: true });
|
||||
fs.writeFileSync(fullPath, content);
|
||||
};
|
||||
|
||||
it('should process files in parallel', async () => {
|
||||
// Mock detectFileType to add artificial delay to simulate I/O
|
||||
const detectFileTypeSpy = vi.spyOn(
|
||||
await import('../utils/fileUtils.js'),
|
||||
'detectFileType',
|
||||
);
|
||||
|
||||
// Create files
|
||||
const fileCount = 4;
|
||||
const files = createMultipleFiles(fileCount, 'Batch test');
|
||||
|
||||
// Mock with 10ms delay per file to simulate I/O operations
|
||||
detectFileTypeSpy.mockImplementation(async (_filePath: string) => {
|
||||
await new Promise((resolve) => setTimeout(resolve, 10));
|
||||
return 'text';
|
||||
});
|
||||
|
||||
const params = { paths: files };
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
|
||||
// Verify all files were processed. The content should have fileCount
|
||||
// entries + 1 for the output terminator.
|
||||
const content = result.llmContent as string[];
|
||||
expect(content).toHaveLength(fileCount + 1);
|
||||
for (let i = 0; i < fileCount; i++) {
|
||||
expect(content.join('')).toContain(`Batch test ${i}`);
|
||||
}
|
||||
|
||||
// Cleanup mock
|
||||
detectFileTypeSpy.mockRestore();
|
||||
});
|
||||
|
||||
it('should handle batch processing errors gracefully', async () => {
|
||||
// Create mix of valid and problematic files
|
||||
createFile('valid1.txt', 'Valid content 1');
|
||||
createFile('valid2.txt', 'Valid content 2');
|
||||
createFile('valid3.txt', 'Valid content 3');
|
||||
|
||||
const params = {
|
||||
paths: [
|
||||
'valid1.txt',
|
||||
'valid2.txt',
|
||||
'nonexistent-file.txt', // This will fail
|
||||
'valid3.txt',
|
||||
],
|
||||
};
|
||||
|
||||
const invocation = tool.build(params);
|
||||
const result = await invocation.execute(new AbortController().signal);
|
||||
const content = result.llmContent as string[];
|
||||
|
||||
// Should successfully process valid files despite one failure
|
||||
expect(content.length).toBeGreaterThanOrEqual(3);
|
||||
expect(result.returnDisplay).toContain('Successfully read');
|
||||
|
||||
// Verify valid files were processed
|
||||
const expectedPath1 = path.join(tempRootDir, 'valid1.txt');
|
||||
const expectedPath3 = path.join(tempRootDir, 'valid3.txt');
|
||||
expect(content.some((c) => c.includes(expectedPath1))).toBe(true);
|
||||
expect(content.some((c) => c.includes(expectedPath3))).toBe(true);
|
||||
});
|
||||
|
||||
it('should execute file operations concurrently', async () => {
|
||||
// Track execution order to verify concurrency
|
||||
const executionOrder: string[] = [];
|
||||
const detectFileTypeSpy = vi.spyOn(
|
||||
await import('../utils/fileUtils.js'),
|
||||
'detectFileType',
|
||||
);
|
||||
|
||||
const files = ['file1.txt', 'file2.txt', 'file3.txt'];
|
||||
files.forEach((file) => createFile(file, 'test content'));
|
||||
|
||||
// Mock to track concurrent vs sequential execution
|
||||
detectFileTypeSpy.mockImplementation(async (filePath: string) => {
|
||||
const fileName = filePath.split('/').pop() || '';
|
||||
executionOrder.push(`start:${fileName}`);
|
||||
|
||||
// Add delay to make timing differences visible
|
||||
await new Promise((resolve) => setTimeout(resolve, 50));
|
||||
|
||||
executionOrder.push(`end:${fileName}`);
|
||||
return 'text';
|
||||
});
|
||||
|
||||
const invocation = tool.build({ paths: files });
|
||||
await invocation.execute(new AbortController().signal);
|
||||
|
||||
console.log('Execution order:', executionOrder);
|
||||
|
||||
// Verify concurrent execution pattern
|
||||
// In parallel execution: all "start:" events should come before all "end:" events
|
||||
// In sequential execution: "start:file1", "end:file1", "start:file2", "end:file2", etc.
|
||||
|
||||
const startEvents = executionOrder.filter((e) =>
|
||||
e.startsWith('start:'),
|
||||
).length;
|
||||
const firstEndIndex = executionOrder.findIndex((e) =>
|
||||
e.startsWith('end:'),
|
||||
);
|
||||
const startsBeforeFirstEnd = executionOrder
|
||||
.slice(0, firstEndIndex)
|
||||
.filter((e) => e.startsWith('start:')).length;
|
||||
|
||||
// For parallel processing, ALL start events should happen before the first end event
|
||||
expect(startsBeforeFirstEnd).toBe(startEvents); // Should PASS with parallel implementation
|
||||
|
||||
detectFileTypeSpy.mockRestore();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
@ -1,578 +0,0 @@
|
|||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import type { ToolInvocation, ToolResult } from './tools.js';
|
||||
import { BaseDeclarativeTool, BaseToolInvocation, Kind } from './tools.js';
|
||||
import { ToolNames, ToolDisplayNames } from './tool-names.js';
|
||||
import { getErrorMessage } from '../utils/errors.js';
|
||||
import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import { glob, escape } from 'glob';
|
||||
import type { ProcessedFileReadResult } from '../utils/fileUtils.js';
|
||||
import {
|
||||
detectFileType,
|
||||
processSingleFileContent,
|
||||
DEFAULT_ENCODING,
|
||||
getSpecificMimeType,
|
||||
} from '../utils/fileUtils.js';
|
||||
import type { PartListUnion } from '@google/genai';
|
||||
import {
|
||||
type Config,
|
||||
DEFAULT_FILE_FILTERING_OPTIONS,
|
||||
} from '../config/config.js';
|
||||
import { FileOperation } from '../telemetry/metrics.js';
|
||||
import { getProgrammingLanguage } from '../telemetry/telemetry-utils.js';
|
||||
import { logFileOperation } from '../telemetry/loggers.js';
|
||||
import { FileOperationEvent } from '../telemetry/types.js';
|
||||
import { ToolErrorType } from './tool-error.js';
|
||||
|
||||
/**
|
||||
* Parameters for the ReadManyFilesTool.
|
||||
*/
|
||||
export interface ReadManyFilesParams {
|
||||
/**
|
||||
* An array of file paths or directory paths to search within.
|
||||
* Paths are relative to the tool's configured target directory.
|
||||
* Glob patterns can be used directly in these paths.
|
||||
*/
|
||||
paths: string[];
|
||||
|
||||
/**
|
||||
* Optional. Glob patterns for files to include.
|
||||
* These are effectively combined with the `paths`.
|
||||
* Example: ["*.ts", "src/** /*.md"]
|
||||
*/
|
||||
include?: string[];
|
||||
|
||||
/**
|
||||
* Optional. Glob patterns for files/directories to exclude.
|
||||
* Applied as ignore patterns.
|
||||
* Example: ["*.log", "dist/**"]
|
||||
*/
|
||||
exclude?: string[];
|
||||
|
||||
/**
|
||||
* Optional. Search directories recursively.
|
||||
* This is generally controlled by glob patterns (e.g., `**`).
|
||||
* The glob implementation is recursive by default for `**`.
|
||||
* For simplicity, we'll rely on `**` for recursion.
|
||||
*/
|
||||
recursive?: boolean;
|
||||
|
||||
/**
|
||||
* Optional. Apply default exclusion patterns. Defaults to true.
|
||||
*/
|
||||
useDefaultExcludes?: boolean;
|
||||
|
||||
/**
|
||||
* Whether to respect .gitignore and .qwenignore patterns (optional, defaults to true)
|
||||
*/
|
||||
file_filtering_options?: {
|
||||
respect_git_ignore?: boolean;
|
||||
respect_qwen_ignore?: boolean;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Result type for file processing operations
|
||||
*/
|
||||
type FileProcessingResult =
|
||||
| {
|
||||
success: true;
|
||||
filePath: string;
|
||||
relativePathForDisplay: string;
|
||||
fileReadResult: ProcessedFileReadResult;
|
||||
reason?: undefined;
|
||||
}
|
||||
| {
|
||||
success: false;
|
||||
filePath: string;
|
||||
relativePathForDisplay: string;
|
||||
fileReadResult?: undefined;
|
||||
reason: string;
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates the default exclusion patterns including dynamic patterns.
|
||||
* This combines the shared patterns with dynamic patterns like QWEN.md.
|
||||
* TODO(adh): Consider making this configurable or extendable through a command line argument.
|
||||
*/
|
||||
function getDefaultExcludes(config?: Config): string[] {
|
||||
return config?.getFileExclusions().getReadManyFilesExcludes() ?? [];
|
||||
}
|
||||
|
||||
const DEFAULT_OUTPUT_SEPARATOR_FORMAT = '--- {filePath} ---';
|
||||
const DEFAULT_OUTPUT_TERMINATOR = '\n--- End of content ---';
|
||||
|
||||
class ReadManyFilesToolInvocation extends BaseToolInvocation<
|
||||
ReadManyFilesParams,
|
||||
ToolResult
|
||||
> {
|
||||
constructor(
|
||||
private readonly config: Config,
|
||||
params: ReadManyFilesParams,
|
||||
) {
|
||||
super(params);
|
||||
}
|
||||
|
||||
getDescription(): string {
|
||||
const allPatterns = [...this.params.paths, ...(this.params.include || [])];
|
||||
const pathDesc = `using patterns:
|
||||
${allPatterns.join('`, `')}
|
||||
(within target directory:
|
||||
${this.config.getTargetDir()}
|
||||
) `;
|
||||
|
||||
// Determine the final list of exclusion patterns exactly as in execute method
|
||||
const paramExcludes = this.params.exclude || [];
|
||||
const paramUseDefaultExcludes = this.params.useDefaultExcludes !== false;
|
||||
const qwenIgnorePatterns = this.config
|
||||
.getFileService()
|
||||
.getQwenIgnorePatterns();
|
||||
const finalExclusionPatternsForDescription: string[] =
|
||||
paramUseDefaultExcludes
|
||||
? [
|
||||
...getDefaultExcludes(this.config),
|
||||
...paramExcludes,
|
||||
...qwenIgnorePatterns,
|
||||
]
|
||||
: [...paramExcludes, ...qwenIgnorePatterns];
|
||||
|
||||
let excludeDesc = `Excluding: ${
|
||||
finalExclusionPatternsForDescription.length > 0
|
||||
? `patterns like
|
||||
${finalExclusionPatternsForDescription
|
||||
.slice(0, 2)
|
||||
.join(
|
||||
'`, `',
|
||||
)}${finalExclusionPatternsForDescription.length > 2 ? '...`' : '`'}`
|
||||
: 'none specified'
|
||||
}`;
|
||||
|
||||
// Add a note if .qwenignore patterns contributed to the final list of exclusions
|
||||
if (qwenIgnorePatterns.length > 0) {
|
||||
const geminiPatternsInEffect = qwenIgnorePatterns.filter((p) =>
|
||||
finalExclusionPatternsForDescription.includes(p),
|
||||
).length;
|
||||
if (geminiPatternsInEffect > 0) {
|
||||
excludeDesc += ` (includes ${geminiPatternsInEffect} from .qwenignore)`;
|
||||
}
|
||||
}
|
||||
|
||||
return `Will attempt to read and concatenate files ${pathDesc}. ${excludeDesc}. File encoding: ${DEFAULT_ENCODING}. Separator: "${DEFAULT_OUTPUT_SEPARATOR_FORMAT.replace(
|
||||
'{filePath}',
|
||||
'path/to/file.ext',
|
||||
)}".`;
|
||||
}
|
||||
|
||||
async execute(signal: AbortSignal): Promise<ToolResult> {
|
||||
const {
|
||||
paths: inputPatterns,
|
||||
include = [],
|
||||
exclude = [],
|
||||
useDefaultExcludes = true,
|
||||
} = this.params;
|
||||
|
||||
const filesToConsider = new Set<string>();
|
||||
const skippedFiles: Array<{ path: string; reason: string }> = [];
|
||||
const processedFilesRelativePaths: string[] = [];
|
||||
const contentParts: PartListUnion = [];
|
||||
|
||||
const effectiveExcludes = useDefaultExcludes
|
||||
? [...getDefaultExcludes(this.config), ...exclude]
|
||||
: [...exclude];
|
||||
|
||||
const searchPatterns = [...inputPatterns, ...include];
|
||||
try {
|
||||
const allEntries = new Set<string>();
|
||||
const workspaceDirs = this.config.getWorkspaceContext().getDirectories();
|
||||
|
||||
for (const dir of workspaceDirs) {
|
||||
const processedPatterns = [];
|
||||
for (const p of searchPatterns) {
|
||||
const normalizedP = p.replace(/\\/g, '/');
|
||||
const fullPath = path.join(dir, normalizedP);
|
||||
if (fs.existsSync(fullPath)) {
|
||||
processedPatterns.push(escape(normalizedP));
|
||||
} else {
|
||||
// The path does not exist or is not a file, so we treat it as a glob pattern.
|
||||
processedPatterns.push(normalizedP);
|
||||
}
|
||||
}
|
||||
|
||||
const entriesInDir = await glob(processedPatterns, {
|
||||
cwd: dir,
|
||||
ignore: effectiveExcludes,
|
||||
nodir: true,
|
||||
dot: true,
|
||||
absolute: true,
|
||||
nocase: true,
|
||||
signal,
|
||||
});
|
||||
for (const entry of entriesInDir) {
|
||||
allEntries.add(entry);
|
||||
}
|
||||
}
|
||||
const relativeEntries = Array.from(allEntries).map((p) =>
|
||||
path.relative(this.config.getTargetDir(), p),
|
||||
);
|
||||
|
||||
const fileDiscovery = this.config.getFileService();
|
||||
const { filteredPaths, gitIgnoredCount, qwenIgnoredCount } =
|
||||
fileDiscovery.filterFilesWithReport(relativeEntries, {
|
||||
respectGitIgnore:
|
||||
this.params.file_filtering_options?.respect_git_ignore ??
|
||||
this.config.getFileFilteringOptions().respectGitIgnore ??
|
||||
DEFAULT_FILE_FILTERING_OPTIONS.respectGitIgnore,
|
||||
respectQwenIgnore:
|
||||
this.params.file_filtering_options?.respect_qwen_ignore ??
|
||||
this.config.getFileFilteringOptions().respectQwenIgnore ??
|
||||
DEFAULT_FILE_FILTERING_OPTIONS.respectQwenIgnore,
|
||||
});
|
||||
|
||||
for (const relativePath of filteredPaths) {
|
||||
// Security check: ensure the glob library didn't return something outside the workspace.
|
||||
|
||||
const fullPath = path.resolve(this.config.getTargetDir(), relativePath);
|
||||
if (
|
||||
!this.config.getWorkspaceContext().isPathWithinWorkspace(fullPath)
|
||||
) {
|
||||
skippedFiles.push({
|
||||
path: fullPath,
|
||||
reason: `Security: Glob library returned path outside workspace. Path: ${fullPath}`,
|
||||
});
|
||||
continue;
|
||||
}
|
||||
filesToConsider.add(fullPath);
|
||||
}
|
||||
|
||||
// Add info about git-ignored files if any were filtered
|
||||
if (gitIgnoredCount > 0) {
|
||||
skippedFiles.push({
|
||||
path: `${gitIgnoredCount} file(s)`,
|
||||
reason: 'git ignored',
|
||||
});
|
||||
}
|
||||
|
||||
// Add info about qwen-ignored files if any were filtered
|
||||
if (qwenIgnoredCount > 0) {
|
||||
skippedFiles.push({
|
||||
path: `${qwenIgnoredCount} file(s)`,
|
||||
reason: 'qwen ignored',
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
const errorMessage = `Error during file search: ${getErrorMessage(error)}`;
|
||||
return {
|
||||
llmContent: errorMessage,
|
||||
returnDisplay: `## File Search Error\n\nAn error occurred while searching for files:\n\`\`\`\n${getErrorMessage(error)}\n\`\`\``,
|
||||
error: {
|
||||
message: errorMessage,
|
||||
type: ToolErrorType.READ_MANY_FILES_SEARCH_ERROR,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
const sortedFiles = Array.from(filesToConsider).sort();
|
||||
const truncateToolOutputLines = this.config.getTruncateToolOutputLines();
|
||||
const file_line_limit = Number.isFinite(truncateToolOutputLines)
|
||||
? Math.floor(truncateToolOutputLines / Math.max(1, sortedFiles.length))
|
||||
: undefined;
|
||||
|
||||
const fileProcessingPromises = sortedFiles.map(
|
||||
async (filePath): Promise<FileProcessingResult> => {
|
||||
try {
|
||||
const relativePathForDisplay = path
|
||||
.relative(this.config.getTargetDir(), filePath)
|
||||
.replace(/\\/g, '/');
|
||||
|
||||
const fileType = await detectFileType(filePath);
|
||||
|
||||
if (fileType === 'image' || fileType === 'pdf') {
|
||||
const fileExtension = path.extname(filePath).toLowerCase();
|
||||
const fileNameWithoutExtension = path.basename(
|
||||
filePath,
|
||||
fileExtension,
|
||||
);
|
||||
const requestedExplicitly = inputPatterns.some(
|
||||
(pattern: string) =>
|
||||
pattern.toLowerCase().includes(fileExtension) ||
|
||||
pattern.includes(fileNameWithoutExtension),
|
||||
);
|
||||
|
||||
if (!requestedExplicitly) {
|
||||
return {
|
||||
success: false,
|
||||
filePath,
|
||||
relativePathForDisplay,
|
||||
reason:
|
||||
'asset file (image/pdf) was not explicitly requested by name or extension',
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// Use processSingleFileContent for all file types now
|
||||
const fileReadResult = await processSingleFileContent(
|
||||
filePath,
|
||||
this.config,
|
||||
0,
|
||||
file_line_limit,
|
||||
);
|
||||
|
||||
if (fileReadResult.error) {
|
||||
return {
|
||||
success: false,
|
||||
filePath,
|
||||
relativePathForDisplay,
|
||||
reason: `Read error: ${fileReadResult.error}`,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
filePath,
|
||||
relativePathForDisplay,
|
||||
fileReadResult,
|
||||
};
|
||||
} catch (error) {
|
||||
const relativePathForDisplay = path
|
||||
.relative(this.config.getTargetDir(), filePath)
|
||||
.replace(/\\/g, '/');
|
||||
|
||||
return {
|
||||
success: false,
|
||||
filePath,
|
||||
relativePathForDisplay,
|
||||
reason: `Unexpected error: ${error instanceof Error ? error.message : String(error)}`,
|
||||
};
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
const results = await Promise.allSettled(fileProcessingPromises);
|
||||
|
||||
for (const result of results) {
|
||||
if (result.status === 'fulfilled') {
|
||||
const fileResult = result.value;
|
||||
|
||||
if (!fileResult.success) {
|
||||
// Handle skipped files (images/PDFs not requested or read errors)
|
||||
skippedFiles.push({
|
||||
path: fileResult.relativePathForDisplay,
|
||||
reason: fileResult.reason,
|
||||
});
|
||||
} else {
|
||||
// Handle successfully processed files
|
||||
const { filePath, relativePathForDisplay, fileReadResult } =
|
||||
fileResult;
|
||||
|
||||
if (typeof fileReadResult.llmContent === 'string') {
|
||||
const separator = DEFAULT_OUTPUT_SEPARATOR_FORMAT.replace(
|
||||
'{filePath}',
|
||||
filePath,
|
||||
);
|
||||
let fileContentForLlm = '';
|
||||
if (fileReadResult.isTruncated) {
|
||||
const [start, end] = fileReadResult.linesShown!;
|
||||
const total = fileReadResult.originalLineCount!;
|
||||
fileContentForLlm = `Showing lines ${start}-${end} of ${total} total lines.\n---\n${fileReadResult.llmContent}`;
|
||||
} else {
|
||||
fileContentForLlm = fileReadResult.llmContent;
|
||||
}
|
||||
contentParts.push(`${separator}\n\n${fileContentForLlm}\n\n`);
|
||||
} else {
|
||||
// This is a Part for image/pdf, which we don't add the separator to.
|
||||
contentParts.push(fileReadResult.llmContent);
|
||||
}
|
||||
|
||||
processedFilesRelativePaths.push(relativePathForDisplay);
|
||||
|
||||
const lines =
|
||||
typeof fileReadResult.llmContent === 'string'
|
||||
? fileReadResult.llmContent.split('\n').length
|
||||
: undefined;
|
||||
const mimetype = getSpecificMimeType(filePath);
|
||||
const programming_language = getProgrammingLanguage({
|
||||
absolute_path: filePath,
|
||||
});
|
||||
logFileOperation(
|
||||
this.config,
|
||||
new FileOperationEvent(
|
||||
ReadManyFilesTool.Name,
|
||||
FileOperation.READ,
|
||||
lines,
|
||||
mimetype,
|
||||
path.extname(filePath),
|
||||
programming_language,
|
||||
),
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// Handle Promise rejection (unexpected errors)
|
||||
skippedFiles.push({
|
||||
path: 'unknown',
|
||||
reason: `Unexpected error: ${result.reason}`,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let displayMessage = `### ReadManyFiles Result (Target Dir: \`${this.config.getTargetDir()}\`)\n\n`;
|
||||
if (processedFilesRelativePaths.length > 0) {
|
||||
displayMessage += `Successfully read and concatenated content from **${processedFilesRelativePaths.length} file(s)**.\n`;
|
||||
if (processedFilesRelativePaths.length <= 10) {
|
||||
displayMessage += `\n**Processed Files:**\n`;
|
||||
processedFilesRelativePaths.forEach(
|
||||
(p) => (displayMessage += `- \`${p}\`\n`),
|
||||
);
|
||||
} else {
|
||||
displayMessage += `\n**Processed Files (first 10 shown):**\n`;
|
||||
processedFilesRelativePaths
|
||||
.slice(0, 10)
|
||||
.forEach((p) => (displayMessage += `- \`${p}\`\n`));
|
||||
displayMessage += `- ...and ${processedFilesRelativePaths.length - 10} more.\n`;
|
||||
}
|
||||
}
|
||||
|
||||
if (skippedFiles.length > 0) {
|
||||
if (processedFilesRelativePaths.length === 0) {
|
||||
displayMessage += `No files were read and concatenated based on the criteria.\n`;
|
||||
}
|
||||
if (skippedFiles.length <= 5) {
|
||||
displayMessage += `\n**Skipped ${skippedFiles.length} item(s):**\n`;
|
||||
} else {
|
||||
displayMessage += `\n**Skipped ${skippedFiles.length} item(s) (first 5 shown):**\n`;
|
||||
}
|
||||
skippedFiles
|
||||
.slice(0, 5)
|
||||
.forEach(
|
||||
(f) => (displayMessage += `- \`${f.path}\` (Reason: ${f.reason})\n`),
|
||||
);
|
||||
if (skippedFiles.length > 5) {
|
||||
displayMessage += `- ...and ${skippedFiles.length - 5} more.\n`;
|
||||
}
|
||||
} else if (
|
||||
processedFilesRelativePaths.length === 0 &&
|
||||
skippedFiles.length === 0
|
||||
) {
|
||||
displayMessage += `No files were read and concatenated based on the criteria.\n`;
|
||||
}
|
||||
|
||||
if (contentParts.length > 0) {
|
||||
contentParts.push(DEFAULT_OUTPUT_TERMINATOR);
|
||||
} else {
|
||||
contentParts.push(
|
||||
'No files matching the criteria were found or all were skipped.',
|
||||
);
|
||||
}
|
||||
return {
|
||||
llmContent: contentParts,
|
||||
returnDisplay: displayMessage.trim(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool implementation for finding and reading multiple text files from the local filesystem
|
||||
* within a specified target directory. The content is concatenated.
|
||||
* It is intended to run in an environment with access to the local file system (e.g., a Node.js backend).
|
||||
*/
|
||||
export class ReadManyFilesTool extends BaseDeclarativeTool<
|
||||
ReadManyFilesParams,
|
||||
ToolResult
|
||||
> {
|
||||
static readonly Name: string = ToolNames.READ_MANY_FILES;
|
||||
|
||||
constructor(private config: Config) {
|
||||
const parameterSchema = {
|
||||
type: 'object',
|
||||
properties: {
|
||||
paths: {
|
||||
type: 'array',
|
||||
items: {
|
||||
type: 'string',
|
||||
minLength: 1,
|
||||
},
|
||||
minItems: 1,
|
||||
description:
|
||||
"Required. An array of glob patterns or paths relative to the tool's target directory. Examples: ['src/**/*.ts'], ['README.md', 'docs/']",
|
||||
},
|
||||
include: {
|
||||
type: 'array',
|
||||
items: {
|
||||
type: 'string',
|
||||
minLength: 1,
|
||||
},
|
||||
description:
|
||||
'Optional. Additional glob patterns to include. These are merged with `paths`. Example: "*.test.ts" to specifically add test files if they were broadly excluded.',
|
||||
default: [],
|
||||
},
|
||||
exclude: {
|
||||
type: 'array',
|
||||
items: {
|
||||
type: 'string',
|
||||
minLength: 1,
|
||||
},
|
||||
description:
|
||||
'Optional. Glob patterns for files/directories to exclude. Added to default excludes if useDefaultExcludes is true. Example: "**/*.log", "temp/"',
|
||||
default: [],
|
||||
},
|
||||
recursive: {
|
||||
type: 'boolean',
|
||||
description:
|
||||
'Optional. Whether to search recursively (primarily controlled by `**` in glob patterns). Defaults to true.',
|
||||
default: true,
|
||||
},
|
||||
useDefaultExcludes: {
|
||||
type: 'boolean',
|
||||
description:
|
||||
'Optional. Whether to apply a list of default exclusion patterns (e.g., node_modules, .git, binary files). Defaults to true.',
|
||||
default: true,
|
||||
},
|
||||
file_filtering_options: {
|
||||
description:
|
||||
'Whether to respect ignore patterns from .gitignore or .qwenignore',
|
||||
type: 'object',
|
||||
properties: {
|
||||
respect_git_ignore: {
|
||||
description:
|
||||
'Optional: Whether to respect .gitignore patterns when listing files. Only available in git repositories. Defaults to true.',
|
||||
type: 'boolean',
|
||||
},
|
||||
respect_qwen_ignore: {
|
||||
description:
|
||||
'Optional: Whether to respect .qwenignore patterns when listing files. Defaults to true.',
|
||||
type: 'boolean',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
required: ['paths'],
|
||||
};
|
||||
|
||||
super(
|
||||
ReadManyFilesTool.Name,
|
||||
ToolDisplayNames.READ_MANY_FILES,
|
||||
`Reads content from multiple files specified by paths or glob patterns within a configured target directory. For text files, it concatenates their content into a single string. It is primarily designed for text-based files. However, it can also process image (e.g., .png, .jpg) and PDF (.pdf) files if their file names or extensions are explicitly included in the 'paths' argument. For these explicitly requested non-text files, their data is read and included in a format suitable for model consumption (e.g., base64 encoded).
|
||||
|
||||
This tool is useful when you need to understand or analyze a collection of files, such as:
|
||||
- Getting an overview of a codebase or parts of it (e.g., all TypeScript files in the 'src' directory).
|
||||
- Finding where specific functionality is implemented if the user asks broad questions about code.
|
||||
- Reviewing documentation files (e.g., all Markdown files in the 'docs' directory).
|
||||
- Gathering context from multiple configuration files.
|
||||
- When the user asks to "read all files in X directory" or "show me the content of all Y files".
|
||||
|
||||
Use this tool when the user's query implies needing the content of several files simultaneously for context, analysis, or summarization. For text files, it uses default UTF-8 encoding and a '--- {filePath} ---' separator between file contents. The tool inserts a '--- End of content ---' after the last file. Ensure paths are relative to the target directory. Glob patterns like 'src/**/*.js' are supported. Avoid using for single files if a more specific single-file reading tool is available, unless the user specifically requests to process a list containing just one file via this tool. Other binary files (not explicitly requested as image/PDF) are generally skipped. Default excludes apply to common non-text files (except for explicitly requested images/PDFs) and large dependency directories unless 'useDefaultExcludes' is false.`,
|
||||
Kind.Read,
|
||||
parameterSchema,
|
||||
);
|
||||
}
|
||||
|
||||
protected createInvocation(
|
||||
params: ReadManyFilesParams,
|
||||
): ToolInvocation<ReadManyFilesParams, ToolResult> {
|
||||
return new ReadManyFilesToolInvocation(this.config, params);
|
||||
}
|
||||
}
|
||||
|
|
@ -53,9 +53,6 @@ export enum ToolErrorType {
|
|||
// Memory-specific Errors
|
||||
MEMORY_TOOL_EXECUTION_ERROR = 'memory_tool_execution_error',
|
||||
|
||||
// ReadManyFiles-specific Errors
|
||||
READ_MANY_FILES_SEARCH_ERROR = 'read_many_files_search_error',
|
||||
|
||||
// Shell errors
|
||||
SHELL_EXECUTE_ERROR = 'shell_execute_error',
|
||||
|
||||
|
|
|
|||
|
|
@ -13,7 +13,6 @@ export const ToolNames = {
|
|||
EDIT: 'edit',
|
||||
WRITE_FILE: 'write_file',
|
||||
READ_FILE: 'read_file',
|
||||
READ_MANY_FILES: 'read_many_files',
|
||||
GREP: 'grep_search',
|
||||
GLOB: 'glob',
|
||||
SHELL: 'run_shell_command',
|
||||
|
|
@ -37,7 +36,6 @@ export const ToolDisplayNames = {
|
|||
EDIT: 'Edit',
|
||||
WRITE_FILE: 'WriteFile',
|
||||
READ_FILE: 'ReadFile',
|
||||
READ_MANY_FILES: 'ReadManyFiles',
|
||||
GREP: 'Grep',
|
||||
GLOB: 'Glob',
|
||||
SHELL: 'Shell',
|
||||
|
|
|
|||
|
|
@ -75,7 +75,6 @@ describe('getDirectoryContextString', () => {
|
|||
|
||||
describe('getEnvironmentContext', () => {
|
||||
let mockConfig: Partial<Config>;
|
||||
let mockToolRegistry: { getTool: Mock };
|
||||
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
|
|
@ -89,17 +88,11 @@ describe('getEnvironmentContext', () => {
|
|||
})),
|
||||
});
|
||||
|
||||
mockToolRegistry = {
|
||||
getTool: vi.fn(),
|
||||
};
|
||||
|
||||
mockConfig = {
|
||||
getWorkspaceContext: vi.fn().mockReturnValue({
|
||||
getDirectories: vi.fn().mockReturnValue(['/test/dir']),
|
||||
}),
|
||||
getFileService: vi.fn(),
|
||||
getFullContext: vi.fn().mockReturnValue(false),
|
||||
getToolRegistry: vi.fn().mockReturnValue(mockToolRegistry),
|
||||
};
|
||||
|
||||
vi.mocked(getFolderStructure).mockResolvedValue('Mock Folder Structure');
|
||||
|
|
@ -152,68 +145,6 @@ describe('getEnvironmentContext', () => {
|
|||
);
|
||||
expect(getFolderStructure).toHaveBeenCalledTimes(2);
|
||||
});
|
||||
|
||||
it('should include full file context when getFullContext is true', async () => {
|
||||
mockConfig.getFullContext = vi.fn().mockReturnValue(true);
|
||||
const mockReadManyFilesTool = {
|
||||
build: vi.fn().mockReturnValue({
|
||||
execute: vi
|
||||
.fn()
|
||||
.mockResolvedValue({ llmContent: 'Full file content here' }),
|
||||
}),
|
||||
};
|
||||
mockToolRegistry.getTool.mockReturnValue(mockReadManyFilesTool);
|
||||
|
||||
const parts = await getEnvironmentContext(mockConfig as Config);
|
||||
|
||||
expect(parts.length).toBe(2);
|
||||
expect(parts[1].text).toBe(
|
||||
'\n--- Full File Context ---\nFull file content here',
|
||||
);
|
||||
expect(mockToolRegistry.getTool).toHaveBeenCalledWith('read_many_files');
|
||||
expect(mockReadManyFilesTool.build).toHaveBeenCalledWith({
|
||||
paths: ['**/*'],
|
||||
useDefaultExcludes: true,
|
||||
});
|
||||
});
|
||||
|
||||
it('should handle read_many_files returning no content', async () => {
|
||||
mockConfig.getFullContext = vi.fn().mockReturnValue(true);
|
||||
const mockReadManyFilesTool = {
|
||||
build: vi.fn().mockReturnValue({
|
||||
execute: vi.fn().mockResolvedValue({ llmContent: '' }),
|
||||
}),
|
||||
};
|
||||
mockToolRegistry.getTool.mockReturnValue(mockReadManyFilesTool);
|
||||
|
||||
const parts = await getEnvironmentContext(mockConfig as Config);
|
||||
|
||||
expect(parts.length).toBe(1); // No extra part added
|
||||
});
|
||||
|
||||
it('should handle read_many_files tool not being found', async () => {
|
||||
mockConfig.getFullContext = vi.fn().mockReturnValue(true);
|
||||
mockToolRegistry.getTool.mockReturnValue(null);
|
||||
|
||||
const parts = await getEnvironmentContext(mockConfig as Config);
|
||||
|
||||
expect(parts.length).toBe(1); // No extra part added
|
||||
});
|
||||
|
||||
it('should handle errors when reading full file context', async () => {
|
||||
mockConfig.getFullContext = vi.fn().mockReturnValue(true);
|
||||
const mockReadManyFilesTool = {
|
||||
build: vi.fn().mockReturnValue({
|
||||
execute: vi.fn().mockRejectedValue(new Error('Read error')),
|
||||
}),
|
||||
};
|
||||
mockToolRegistry.getTool.mockReturnValue(mockReadManyFilesTool);
|
||||
|
||||
const parts = await getEnvironmentContext(mockConfig as Config);
|
||||
|
||||
expect(parts.length).toBe(2);
|
||||
expect(parts[1].text).toBe('\n--- Error reading full file context ---');
|
||||
});
|
||||
});
|
||||
|
||||
describe('getInitialChatHistory', () => {
|
||||
|
|
@ -227,8 +158,6 @@ describe('getInitialChatHistory', () => {
|
|||
getDirectories: vi.fn().mockReturnValue(['/test/dir']),
|
||||
}),
|
||||
getFileService: vi.fn(),
|
||||
getFullContext: vi.fn().mockReturnValue(false),
|
||||
getToolRegistry: vi.fn().mockReturnValue({ getTool: vi.fn() }),
|
||||
};
|
||||
});
|
||||
|
||||
|
|
@ -267,16 +196,6 @@ describe('getInitialChatHistory', () => {
|
|||
'getWorkspaceContext should not be called when skipping startup context',
|
||||
);
|
||||
});
|
||||
mockConfig.getFullContext = vi.fn(() => {
|
||||
throw new Error(
|
||||
'getFullContext should not be called when skipping startup context',
|
||||
);
|
||||
});
|
||||
mockConfig.getToolRegistry = vi.fn(() => {
|
||||
throw new Error(
|
||||
'getToolRegistry should not be called when skipping startup context',
|
||||
);
|
||||
});
|
||||
const extraHistory: Content[] = [
|
||||
{ role: 'user', parts: [{ text: 'custom context' }] },
|
||||
];
|
||||
|
|
@ -298,16 +217,6 @@ describe('getInitialChatHistory', () => {
|
|||
'getWorkspaceContext should not be called when skipping startup context',
|
||||
);
|
||||
});
|
||||
mockConfig.getFullContext = vi.fn(() => {
|
||||
throw new Error(
|
||||
'getFullContext should not be called when skipping startup context',
|
||||
);
|
||||
});
|
||||
mockConfig.getToolRegistry = vi.fn(() => {
|
||||
throw new Error(
|
||||
'getToolRegistry should not be called when skipping startup context',
|
||||
);
|
||||
});
|
||||
|
||||
const history = await getInitialChatHistory(mockConfig as Config);
|
||||
|
||||
|
|
|
|||
|
|
@ -46,7 +46,6 @@ ${folderStructure}`;
|
|||
/**
|
||||
* Retrieves environment-related information to be included in the chat context.
|
||||
* This includes the current working directory, date, operating system, and folder structure.
|
||||
* Optionally, it can also include the full file context if enabled.
|
||||
* @param {Config} config - The runtime configuration and services.
|
||||
* @returns A promise that resolves to an array of `Part` objects containing environment information.
|
||||
*/
|
||||
|
|
@ -67,45 +66,7 @@ My operating system is: ${platform}
|
|||
${directoryContext}
|
||||
`.trim();
|
||||
|
||||
const initialParts: Part[] = [{ text: context }];
|
||||
const toolRegistry = config.getToolRegistry();
|
||||
|
||||
// Add full file context if the flag is set
|
||||
if (config.getFullContext()) {
|
||||
try {
|
||||
const readManyFilesTool = toolRegistry.getTool('read_many_files');
|
||||
if (readManyFilesTool) {
|
||||
const invocation = readManyFilesTool.build({
|
||||
paths: ['**/*'], // Read everything recursively
|
||||
useDefaultExcludes: true, // Use default excludes
|
||||
});
|
||||
|
||||
// Read all files in the target directory
|
||||
const result = await invocation.execute(AbortSignal.timeout(30000));
|
||||
if (result.llmContent) {
|
||||
initialParts.push({
|
||||
text: `\n--- Full File Context ---\n${result.llmContent}`,
|
||||
});
|
||||
} else {
|
||||
console.warn(
|
||||
'Full context requested, but read_many_files returned no content.',
|
||||
);
|
||||
}
|
||||
} else {
|
||||
console.warn(
|
||||
'Full context requested, but read_many_files tool not found.',
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
// Not using reportError here as it's a startup/config phase, not a chat/generation phase error.
|
||||
console.error('Error reading full file context:', error);
|
||||
initialParts.push({
|
||||
text: '\n--- Error reading full file context ---',
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return initialParts;
|
||||
return [{ text: context }];
|
||||
}
|
||||
|
||||
export async function getInitialChatHistory(
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ describe('getFolderStructure', () => {
|
|||
const structure = await getFolderStructure(testRootDir);
|
||||
expect(structure.trim()).toBe(
|
||||
`
|
||||
Showing up to 20 items (files + folders).
|
||||
Showing up to 20 items:
|
||||
|
||||
${testRootDir}${path.sep}
|
||||
├───fileA1.ts
|
||||
|
|
@ -60,7 +60,7 @@ ${testRootDir}${path.sep}
|
|||
const structure = await getFolderStructure(testRootDir);
|
||||
expect(structure.trim()).toBe(
|
||||
`
|
||||
Showing up to 20 items (files + folders).
|
||||
Showing up to 20 items:
|
||||
|
||||
${testRootDir}${path.sep}
|
||||
`
|
||||
|
|
@ -81,7 +81,7 @@ ${testRootDir}${path.sep}
|
|||
const structure = await getFolderStructure(testRootDir);
|
||||
expect(structure.trim()).toBe(
|
||||
`
|
||||
Showing up to 20 items (files + folders). Folders or files indicated with ... contain more items not shown, were ignored, or the display limit (20 items) was reached.
|
||||
Showing up to 20 items:
|
||||
|
||||
${testRootDir}${path.sep}
|
||||
├───.hiddenfile
|
||||
|
|
@ -108,7 +108,7 @@ ${testRootDir}${path.sep}
|
|||
ignoredFolders: new Set(['subfolderA', 'node_modules']),
|
||||
});
|
||||
const expected = `
|
||||
Showing up to 20 items (files + folders). Folders or files indicated with ... contain more items not shown, were ignored, or the display limit (20 items) was reached.
|
||||
Showing up to 20 items:
|
||||
|
||||
${testRootDir}${path.sep}
|
||||
├───.hiddenfile
|
||||
|
|
@ -129,7 +129,7 @@ ${testRootDir}${path.sep}
|
|||
fileIncludePattern: /\.ts$/,
|
||||
});
|
||||
const expected = `
|
||||
Showing up to 20 items (files + folders).
|
||||
Showing up to 20 items:
|
||||
|
||||
${testRootDir}${path.sep}
|
||||
├───fileA1.ts
|
||||
|
|
@ -147,7 +147,7 @@ ${testRootDir}${path.sep}
|
|||
maxItems: 3,
|
||||
});
|
||||
const expected = `
|
||||
Showing up to 3 items (files + folders).
|
||||
Showing up to 3 items:
|
||||
|
||||
${testRootDir}${path.sep}
|
||||
├───fileA1.ts
|
||||
|
|
@ -166,7 +166,7 @@ ${testRootDir}${path.sep}
|
|||
maxItems: 4,
|
||||
});
|
||||
const expectedRevised = `
|
||||
Showing up to 4 items (files + folders). Folders or files indicated with ... contain more items not shown, were ignored, or the display limit (4 items) was reached.
|
||||
Showing up to 4 items:
|
||||
|
||||
${testRootDir}${path.sep}
|
||||
├───folder-0${path.sep}
|
||||
|
|
@ -187,7 +187,7 @@ ${testRootDir}${path.sep}
|
|||
maxItems: 1,
|
||||
});
|
||||
const expected = `
|
||||
Showing up to 1 items (files + folders). Folders or files indicated with ... contain more items not shown, were ignored, or the display limit (1 items) was reached.
|
||||
Showing up to 1 items:
|
||||
|
||||
${testRootDir}${path.sep}
|
||||
├───fileA1.ts
|
||||
|
|
@ -212,7 +212,7 @@ ${testRootDir}${path.sep}
|
|||
maxItems: 10,
|
||||
});
|
||||
const expected = `
|
||||
Showing up to 10 items (files + folders).
|
||||
Showing up to 10 items:
|
||||
|
||||
${testRootDir}${path.sep}
|
||||
└───level1${path.sep}
|
||||
|
|
@ -230,7 +230,7 @@ ${testRootDir}${path.sep}
|
|||
maxItems: 3,
|
||||
});
|
||||
const expected = `
|
||||
Showing up to 3 items (files + folders).
|
||||
Showing up to 3 items:
|
||||
|
||||
${testRootDir}${path.sep}
|
||||
└───level1${path.sep}
|
||||
|
|
|
|||
|
|
@ -322,25 +322,7 @@ export async function getFolderStructure(
|
|||
formatStructure(structureRoot, '', true, true, structureLines);
|
||||
|
||||
// 3. Build the final output string
|
||||
function isTruncated(node: FullFolderInfo): boolean {
|
||||
if (node.hasMoreFiles || node.hasMoreSubfolders || node.isIgnored) {
|
||||
return true;
|
||||
}
|
||||
for (const sub of node.subFolders) {
|
||||
if (isTruncated(sub)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
let summary = `Showing up to ${mergedOptions.maxItems} items (files + folders).`;
|
||||
|
||||
if (isTruncated(structureRoot)) {
|
||||
summary += ` Folders or files indicated with ${TRUNCATION_INDICATOR} contain more items not shown, were ignored, or the display limit (${mergedOptions.maxItems} items) was reached.`;
|
||||
}
|
||||
|
||||
return `${summary}\n\n${resolvedPath}${path.sep}\n${structureLines.join('\n')}`;
|
||||
return `Showing up to ${mergedOptions.maxItems} items:\n\n${resolvedPath}${path.sep}\n${structureLines.join('\n')}`;
|
||||
} catch (error: unknown) {
|
||||
console.error(`Error getting folder structure for ${resolvedPath}:`, error);
|
||||
return `Error processing directory "${resolvedPath}": ${getErrorMessage(error)}`;
|
||||
|
|
|
|||
298
packages/core/src/utils/readManyFiles.test.ts
Normal file
298
packages/core/src/utils/readManyFiles.test.ts
Normal file
|
|
@ -0,0 +1,298 @@
|
|||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
||||
import fs from 'node:fs/promises';
|
||||
import * as nodeFs from 'node:fs';
|
||||
import path from 'node:path';
|
||||
import os from 'node:os';
|
||||
import type { PartListUnion } from '@google/genai';
|
||||
import { readManyFiles } from './readManyFiles.js';
|
||||
import { FileDiscoveryService } from '../services/fileDiscoveryService.js';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { createMockWorkspaceContext } from '../test-utils/mockWorkspaceContext.js';
|
||||
|
||||
/** Helper to convert PartListUnion to string for test assertions */
|
||||
function contentToString(parts: PartListUnion): string {
|
||||
if (typeof parts === 'string') {
|
||||
return parts;
|
||||
}
|
||||
if (Array.isArray(parts)) {
|
||||
return parts
|
||||
.map((p) => (typeof p === 'string' ? p : JSON.stringify(p)))
|
||||
.join('');
|
||||
}
|
||||
return JSON.stringify(parts);
|
||||
}
|
||||
|
||||
describe('readManyFiles', () => {
|
||||
let tempRootDir: string;
|
||||
|
||||
// Helper to create mock config
|
||||
const createMockConfig = (rootDir: string): Config =>
|
||||
({
|
||||
getFileService: () => new FileDiscoveryService(rootDir),
|
||||
getFileFilteringOptions: () => ({
|
||||
respectGitIgnore: true,
|
||||
respectQwenIgnore: true,
|
||||
}),
|
||||
getTargetDir: () => rootDir,
|
||||
getProjectRoot: () => rootDir,
|
||||
getWorkspaceContext: () => createMockWorkspaceContext(rootDir),
|
||||
getTruncateToolOutputLines: () => 1000,
|
||||
getTruncateToolOutputThreshold: () => 2500,
|
||||
}) as unknown as Config;
|
||||
|
||||
async function createTestFile(
|
||||
...pathSegments: string[]
|
||||
): Promise<{ relativePath: string; absolutePath: string }> {
|
||||
const relativePath = path.join(...pathSegments);
|
||||
const absolutePath = path.join(tempRootDir, relativePath);
|
||||
await fs.mkdir(path.dirname(absolutePath), { recursive: true });
|
||||
await fs.writeFile(absolutePath, `Content of ${pathSegments.at(-1)}`);
|
||||
return { relativePath, absolutePath };
|
||||
}
|
||||
|
||||
async function createTestDir(...pathSegments: string[]): Promise<string> {
|
||||
const absolutePath = path.join(tempRootDir, ...pathSegments);
|
||||
await fs.mkdir(absolutePath, { recursive: true });
|
||||
return absolutePath;
|
||||
}
|
||||
|
||||
beforeEach(async () => {
|
||||
tempRootDir = nodeFs.realpathSync(
|
||||
await fs.mkdtemp(path.join(os.tmpdir(), 'read-many-files-test-')),
|
||||
);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await fs.rm(tempRootDir, { recursive: true, force: true });
|
||||
});
|
||||
|
||||
describe('file reading', () => {
|
||||
it('should read a single file', async () => {
|
||||
await createTestFile('file1.txt');
|
||||
const mockConfig = createMockConfig(tempRootDir);
|
||||
|
||||
const result = await readManyFiles(mockConfig, { paths: ['file1.txt'] });
|
||||
|
||||
const content = contentToString(result.contentParts);
|
||||
expect(content).toContain('--- Content from referenced files ---');
|
||||
expect(content).toContain('Content from');
|
||||
expect(content).toContain('file1.txt');
|
||||
expect(content).toContain('Content of file1.txt');
|
||||
expect(content).toContain('--- End of content ---');
|
||||
});
|
||||
|
||||
it('should read multiple files', async () => {
|
||||
await createTestFile('file1.txt');
|
||||
await createTestFile('file2.txt');
|
||||
const mockConfig = createMockConfig(tempRootDir);
|
||||
|
||||
const result = await readManyFiles(mockConfig, {
|
||||
paths: ['file1.txt', 'file2.txt'],
|
||||
});
|
||||
|
||||
const content = contentToString(result.contentParts);
|
||||
expect(content).toContain('--- Content from referenced files ---');
|
||||
expect(content).toContain('Content of file1.txt');
|
||||
expect(content).toContain('Content of file2.txt');
|
||||
expect(content).toContain('--- End of content ---');
|
||||
});
|
||||
|
||||
it('should return message when no files found', async () => {
|
||||
const mockConfig = createMockConfig(tempRootDir);
|
||||
|
||||
const result = await readManyFiles(mockConfig, {
|
||||
paths: ['nonexistent.txt'],
|
||||
});
|
||||
|
||||
expect(contentToString(result.contentParts)).toContain(
|
||||
'No files matching the criteria were found',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('directory handling', () => {
|
||||
it('should return directory structure when path is a directory', async () => {
|
||||
await createTestFile('mydir', 'file1.txt');
|
||||
await createTestFile('mydir', 'file2.txt');
|
||||
const mockConfig = createMockConfig(tempRootDir);
|
||||
|
||||
const result = await readManyFiles(mockConfig, { paths: ['mydir'] });
|
||||
|
||||
const content = contentToString(result.contentParts);
|
||||
expect(content).toContain('--- Content from referenced files ---');
|
||||
expect(content).toContain('Content from');
|
||||
expect(content).toContain('mydir');
|
||||
expect(content).toContain('file1.txt');
|
||||
expect(content).toContain('file2.txt');
|
||||
// Should NOT contain the file contents, just the structure
|
||||
expect(content).not.toContain('Content of file1.txt');
|
||||
});
|
||||
|
||||
it('should handle directory with trailing slash', async () => {
|
||||
await createTestFile('mydir', 'file1.txt');
|
||||
const mockConfig = createMockConfig(tempRootDir);
|
||||
|
||||
const result = await readManyFiles(mockConfig, { paths: ['mydir/'] });
|
||||
|
||||
const content = contentToString(result.contentParts);
|
||||
expect(content).toContain('Content from');
|
||||
expect(content).toContain('mydir');
|
||||
});
|
||||
|
||||
it('should handle empty directory', async () => {
|
||||
await createTestDir('emptydir');
|
||||
const mockConfig = createMockConfig(tempRootDir);
|
||||
|
||||
const result = await readManyFiles(mockConfig, { paths: ['emptydir'] });
|
||||
|
||||
const content = contentToString(result.contentParts);
|
||||
expect(content).toContain('Content from');
|
||||
expect(content).toContain('emptydir');
|
||||
});
|
||||
});
|
||||
|
||||
describe('mixed files and directories', () => {
|
||||
it('should handle mix of files and directories', async () => {
|
||||
await createTestFile('file.txt');
|
||||
await createTestFile('mydir', 'nested.txt');
|
||||
const mockConfig = createMockConfig(tempRootDir);
|
||||
|
||||
const result = await readManyFiles(mockConfig, {
|
||||
paths: ['file.txt', 'mydir'],
|
||||
});
|
||||
|
||||
const content = contentToString(result.contentParts);
|
||||
expect(content).toContain('--- Content from referenced files ---');
|
||||
// File content should be present
|
||||
expect(content).toContain('Content of file.txt');
|
||||
// Directory structure should be present
|
||||
expect(content).toContain('Content from');
|
||||
expect(content).toContain('mydir');
|
||||
expect(content).toContain('nested.txt');
|
||||
});
|
||||
});
|
||||
|
||||
describe('edge cases', () => {
|
||||
it('should handle paths with special characters', async () => {
|
||||
await createTestFile('dir-with-dash', 'file.txt');
|
||||
const mockConfig = createMockConfig(tempRootDir);
|
||||
|
||||
const result = await readManyFiles(mockConfig, {
|
||||
paths: ['dir-with-dash'],
|
||||
});
|
||||
|
||||
const content = contentToString(result.contentParts);
|
||||
expect(content).toContain('Content from');
|
||||
expect(content).toContain('dir-with-dash');
|
||||
});
|
||||
|
||||
it('should allow directories outside project root', async () => {
|
||||
// Create a directory outside the workspace
|
||||
const outsideDir = await fs.mkdtemp(
|
||||
path.join(os.tmpdir(), 'outside-workspace-'),
|
||||
);
|
||||
await fs.writeFile(path.join(outsideDir, 'secret.txt'), 'secret');
|
||||
|
||||
const mockConfig = createMockConfig(tempRootDir);
|
||||
|
||||
const result = await readManyFiles(mockConfig, { paths: [outsideDir] });
|
||||
|
||||
// Should include the outside directory listing
|
||||
expect(contentToString(result.contentParts)).toContain('secret.txt');
|
||||
|
||||
// Cleanup
|
||||
await fs.rm(outsideDir, { recursive: true, force: true });
|
||||
});
|
||||
});
|
||||
|
||||
describe('files array', () => {
|
||||
it('should populate files array for single file', async () => {
|
||||
const { absolutePath } = await createTestFile('file1.txt');
|
||||
const mockConfig = createMockConfig(tempRootDir);
|
||||
|
||||
const result = await readManyFiles(mockConfig, { paths: ['file1.txt'] });
|
||||
|
||||
expect(result.files).toHaveLength(1);
|
||||
expect(result.files[0].filePath).toBe(absolutePath);
|
||||
expect(result.files[0].isDirectory).toBe(false);
|
||||
expect(result.files[0].content).toContain('Content of file1.txt');
|
||||
});
|
||||
|
||||
it('should populate files array for multiple files', async () => {
|
||||
const file1 = await createTestFile('file1.txt');
|
||||
const file2 = await createTestFile('file2.txt');
|
||||
const mockConfig = createMockConfig(tempRootDir);
|
||||
|
||||
const result = await readManyFiles(mockConfig, {
|
||||
paths: ['file1.txt', 'file2.txt'],
|
||||
});
|
||||
|
||||
expect(result.files).toHaveLength(2);
|
||||
const filePaths = result.files.map((f) => f.filePath);
|
||||
expect(filePaths).toContain(file1.absolutePath);
|
||||
expect(filePaths).toContain(file2.absolutePath);
|
||||
});
|
||||
|
||||
it('should mark directories in files array', async () => {
|
||||
await createTestFile('mydir', 'nested.txt');
|
||||
const mockConfig = createMockConfig(tempRootDir);
|
||||
|
||||
const result = await readManyFiles(mockConfig, { paths: ['mydir'] });
|
||||
|
||||
expect(result.files).toHaveLength(1);
|
||||
expect(result.files[0].isDirectory).toBe(true);
|
||||
expect(result.files[0].filePath).toContain('mydir');
|
||||
});
|
||||
|
||||
it('should include both files and directories in files array', async () => {
|
||||
const file = await createTestFile('file.txt');
|
||||
await createTestFile('mydir', 'nested.txt');
|
||||
const mockConfig = createMockConfig(tempRootDir);
|
||||
|
||||
const result = await readManyFiles(mockConfig, {
|
||||
paths: ['file.txt', 'mydir'],
|
||||
});
|
||||
|
||||
expect(result.files).toHaveLength(2);
|
||||
|
||||
const fileEntry = result.files.find((f) => !f.isDirectory);
|
||||
const dirEntry = result.files.find((f) => f.isDirectory);
|
||||
|
||||
expect(fileEntry).toBeDefined();
|
||||
expect(fileEntry!.filePath).toBe(file.absolutePath);
|
||||
|
||||
expect(dirEntry).toBeDefined();
|
||||
expect(dirEntry!.filePath).toContain('mydir');
|
||||
});
|
||||
|
||||
it('should return empty files array when no files found', async () => {
|
||||
const mockConfig = createMockConfig(tempRootDir);
|
||||
|
||||
const result = await readManyFiles(mockConfig, {
|
||||
paths: ['nonexistent.txt'],
|
||||
});
|
||||
|
||||
expect(result.files).toHaveLength(0);
|
||||
});
|
||||
|
||||
it('should return empty files array on error', async () => {
|
||||
const mockConfig = {
|
||||
...createMockConfig(tempRootDir),
|
||||
getProjectRoot: () => {
|
||||
throw new Error('Test error');
|
||||
},
|
||||
} as unknown as Config;
|
||||
|
||||
const result = await readManyFiles(mockConfig, { paths: ['file.txt'] });
|
||||
|
||||
expect(result.files).toHaveLength(0);
|
||||
expect(result.error).toBeDefined();
|
||||
});
|
||||
});
|
||||
});
|
||||
210
packages/core/src/utils/readManyFiles.ts
Normal file
210
packages/core/src/utils/readManyFiles.ts
Normal file
|
|
@ -0,0 +1,210 @@
|
|||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import * as fs from 'node:fs';
|
||||
import * as path from 'node:path';
|
||||
import type { Part, PartListUnion } from '@google/genai';
|
||||
import type { Config } from '../config/config.js';
|
||||
import { getErrorMessage } from './errors.js';
|
||||
import { processSingleFileContent } from './fileUtils.js';
|
||||
import { getFolderStructure } from './getFolderStructure.js';
|
||||
|
||||
/**
|
||||
* Options for reading multiple files.
|
||||
*/
|
||||
export interface ReadManyFilesOptions {
|
||||
/**
|
||||
* An array of file or directory paths to read.
|
||||
* Paths are relative to the project root.
|
||||
*/
|
||||
paths: string[];
|
||||
|
||||
/**
|
||||
* Optional AbortSignal for cancellation support.
|
||||
*/
|
||||
signal?: AbortSignal;
|
||||
}
|
||||
|
||||
/**
|
||||
* Information about a single file that was read.
|
||||
*/
|
||||
export interface FileReadInfo {
|
||||
/** Absolute path to the file */
|
||||
filePath: string;
|
||||
/** Content of the file (string for text, Part for images/PDFs) */
|
||||
content: PartListUnion;
|
||||
/** Whether this is a directory listing rather than file content */
|
||||
isDirectory: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Result from reading multiple files.
|
||||
*/
|
||||
export interface ReadManyFilesResult {
|
||||
/**
|
||||
* Content parts ready for LLM consumption.
|
||||
* For text files, content is concatenated with separators.
|
||||
* For images/PDFs, includes inline data parts.
|
||||
*/
|
||||
contentParts: PartListUnion;
|
||||
|
||||
/**
|
||||
* Individual file results with paths and content.
|
||||
* Used for recording each file read as a separate tool result.
|
||||
*/
|
||||
files: FileReadInfo[];
|
||||
|
||||
/**
|
||||
* Error message if an error occurred during file search.
|
||||
*/
|
||||
error?: string;
|
||||
}
|
||||
|
||||
const DEFAULT_OUTPUT_HEADER = '\n--- Content from referenced files ---';
|
||||
const DEFAULT_OUTPUT_TERMINATOR = '\n--- End of content ---';
|
||||
|
||||
/**
|
||||
* Reads content from multiple files and directories specified by paths.
|
||||
*
|
||||
* For directories, returns the folder structure.
|
||||
* For text files, concatenates their content into a single string with separators.
|
||||
* For image and PDF files, returns base64-encoded data.
|
||||
*
|
||||
* @param config - The runtime configuration
|
||||
* @param options - Options for file reading (paths, filters, signal)
|
||||
* @returns Result containing content parts and processed files
|
||||
*
|
||||
* NOTE: This utility is invoked only by explicit user-triggered file reads.
|
||||
* Do not apply workspace filters or path restrictions here.
|
||||
*/
|
||||
export async function readManyFiles(
|
||||
config: Config,
|
||||
options: ReadManyFilesOptions,
|
||||
): Promise<ReadManyFilesResult> {
|
||||
const { paths: inputPatterns } = options;
|
||||
|
||||
const seenFiles = new Set<string>();
|
||||
const contentParts: Part[] = [];
|
||||
const files: FileReadInfo[] = [];
|
||||
|
||||
try {
|
||||
const projectRoot = config.getProjectRoot();
|
||||
|
||||
for (const rawPattern of inputPatterns) {
|
||||
const normalizedPattern = rawPattern.replace(/\\/g, '/');
|
||||
const fullPath = path.resolve(projectRoot, normalizedPattern);
|
||||
const stats = fs.existsSync(fullPath) ? fs.statSync(fullPath) : null;
|
||||
|
||||
if (stats?.isDirectory()) {
|
||||
const { contentParts: dirParts, info } = await readDirectory(
|
||||
config,
|
||||
fullPath,
|
||||
);
|
||||
contentParts.push(...dirParts);
|
||||
files.push(info);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (stats?.isFile() && !seenFiles.has(fullPath)) {
|
||||
seenFiles.add(fullPath);
|
||||
const readResult = await readFileContent(config, fullPath);
|
||||
if (readResult) {
|
||||
contentParts.push(...readResult.contentParts);
|
||||
files.push(readResult.info);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
const errorMessage = `Error during file search: ${getErrorMessage(error)}`;
|
||||
return {
|
||||
contentParts: [errorMessage],
|
||||
files: [],
|
||||
error: errorMessage,
|
||||
};
|
||||
}
|
||||
|
||||
if (contentParts.length > 0) {
|
||||
contentParts.unshift({ text: DEFAULT_OUTPUT_HEADER });
|
||||
contentParts.push({ text: DEFAULT_OUTPUT_TERMINATOR });
|
||||
} else {
|
||||
contentParts.push({
|
||||
text: 'No files matching the criteria were found or all were skipped.',
|
||||
});
|
||||
}
|
||||
|
||||
return { contentParts: contentParts as PartListUnion, files };
|
||||
}
|
||||
|
||||
async function readDirectory(
|
||||
config: Config,
|
||||
directoryPath: string,
|
||||
): Promise<{ contentParts: Part[]; info: FileReadInfo }> {
|
||||
const structure = await getFolderStructure(directoryPath, {
|
||||
fileService: config.getFileService(),
|
||||
fileFilteringOptions: config.getFileFilteringOptions(),
|
||||
});
|
||||
|
||||
const contentParts: Part[] = [
|
||||
{ text: `\nContent from ${directoryPath}:\n` },
|
||||
{ text: structure },
|
||||
];
|
||||
|
||||
return {
|
||||
contentParts,
|
||||
info: {
|
||||
filePath: directoryPath,
|
||||
content: structure,
|
||||
isDirectory: true,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async function readFileContent(
|
||||
config: Config,
|
||||
filePath: string,
|
||||
): Promise<{ contentParts: Part[]; info: FileReadInfo } | null> {
|
||||
try {
|
||||
const fileReadResult = await processSingleFileContent(filePath, config);
|
||||
if (fileReadResult.error) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const prefixText: Part = { text: `\nContent from ${filePath}:\n` };
|
||||
|
||||
if (typeof fileReadResult.llmContent === 'string') {
|
||||
let fileContentForLlm = '';
|
||||
if (fileReadResult.isTruncated) {
|
||||
const [start, end] = fileReadResult.linesShown!;
|
||||
const total = fileReadResult.originalLineCount!;
|
||||
fileContentForLlm = `Showing lines ${start}-${end} of ${total} total lines.\n---\n${fileReadResult.llmContent}`;
|
||||
} else {
|
||||
fileContentForLlm = fileReadResult.llmContent;
|
||||
}
|
||||
const contentParts: Part[] = [prefixText, { text: fileContentForLlm }];
|
||||
return {
|
||||
contentParts,
|
||||
info: {
|
||||
filePath,
|
||||
content: fileContentForLlm,
|
||||
isDirectory: false,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
// For binary files (images, PDFs), add prefix text before the inlineData/fileData part
|
||||
const contentParts: Part[] = [prefixText, fileReadResult.llmContent];
|
||||
return {
|
||||
contentParts,
|
||||
info: {
|
||||
filePath,
|
||||
content: fileReadResult.llmContent,
|
||||
isDirectory: false,
|
||||
},
|
||||
};
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue