diff --git a/packages/cli/src/config/config.ts b/packages/cli/src/config/config.ts index 48961cdca..e66bed5cc 100755 --- a/packages/cli/src/config/config.ts +++ b/packages/cli/src/config/config.ts @@ -1011,7 +1011,7 @@ export async function loadCliConfig( useBuiltinRipgrep: settings.tools?.useBuiltinRipgrep, shouldUseNodePtyShell: settings.tools?.shell?.enableInteractiveShell, skipNextSpeakerCheck: settings.model?.skipNextSpeakerCheck, - skipLoopDetection: settings.model?.skipLoopDetection ?? false, + skipLoopDetection: settings.model?.skipLoopDetection ?? true, skipStartupContext: settings.model?.skipStartupContext ?? false, truncateToolOutputThreshold: settings.tools?.truncateToolOutputThreshold, truncateToolOutputLines: settings.tools?.truncateToolOutputLines, diff --git a/packages/cli/src/config/settingsSchema.ts b/packages/cli/src/config/settingsSchema.ts index fd6c3e85b..f569598a4 100644 --- a/packages/cli/src/config/settingsSchema.ts +++ b/packages/cli/src/config/settingsSchema.ts @@ -589,7 +589,7 @@ const SETTINGS_SCHEMA = { label: 'Skip Loop Detection', category: 'Model', requiresRestart: false, - default: false, + default: true, description: 'Disable all loop detection checks (streaming and LLM).', showInDialog: false, }, diff --git a/packages/core/src/core/client.test.ts b/packages/core/src/core/client.test.ts index b5234045e..215c012a1 100644 --- a/packages/core/src/core/client.test.ts +++ b/packages/core/src/core/client.test.ts @@ -2270,7 +2270,6 @@ Other open files: // Replace loop detector with spies const ldMock = { - turnStarted: vi.fn().mockResolvedValue(false), addAndCheck: vi.fn().mockReturnValue(false), reset: vi.fn(), }; @@ -2301,7 +2300,6 @@ Other open files: } // Assert - loop detection methods should not be called when skipLoopDetection is true - expect(ldMock.turnStarted).not.toHaveBeenCalled(); expect(ldMock.addAndCheck).not.toHaveBeenCalled(); }); }); diff --git a/packages/core/src/core/client.ts b/packages/core/src/core/client.ts index 9f3625c38..19d0a9c49 100644 --- a/packages/core/src/core/client.ts +++ b/packages/core/src/core/client.ts @@ -486,14 +486,6 @@ export class GeminiClient { const turn = new Turn(this.getChat(), prompt_id); - if (!this.config.getSkipLoopDetection()) { - const loopDetected = await this.loopDetector.turnStarted(signal); - if (loopDetected) { - yield { type: GeminiEventType.LoopDetected }; - return turn; - } - } - // append system reminders to the request let requestToSent = await flatMapTextParts(request, async (text) => [text]); if (!options?.isContinuation) { diff --git a/packages/core/src/services/loopDetectionService.test.ts b/packages/core/src/services/loopDetectionService.test.ts index c7629e134..31a8699dc 100644 --- a/packages/core/src/services/loopDetectionService.test.ts +++ b/packages/core/src/services/loopDetectionService.test.ts @@ -4,10 +4,8 @@ * SPDX-License-Identifier: Apache-2.0 */ -import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'; +import { beforeEach, describe, expect, it, vi } from 'vitest'; import type { Config } from '../config/config.js'; -import type { GeminiClient } from '../core/client.js'; -import type { BaseLlmClient } from '../core/baseLlmClient.js'; import type { ServerGeminiContentEvent, ServerGeminiStreamEvent, @@ -15,7 +13,6 @@ import type { } from '../core/turn.js'; import { GeminiEventType } from '../core/turn.js'; import * as loggers from '../telemetry/loggers.js'; -import { LoopType } from '../telemetry/types.js'; import { LoopDetectionService } from './loopDetectionService.js'; vi.mock('../telemetry/loggers.js', () => ({ @@ -623,142 +620,3 @@ describe('LoopDetectionService', () => { }); }); }); - -describe('LoopDetectionService LLM Checks', () => { - let service: LoopDetectionService; - let mockConfig: Config; - let mockGeminiClient: GeminiClient; - let mockBaseLlmClient: BaseLlmClient; - let abortController: AbortController; - - beforeEach(() => { - mockGeminiClient = { - getHistory: vi.fn().mockReturnValue([]), - } as unknown as GeminiClient; - - mockBaseLlmClient = { - generateJson: vi.fn(), - } as unknown as BaseLlmClient; - - mockConfig = { - getGeminiClient: () => mockGeminiClient, - getBaseLlmClient: () => mockBaseLlmClient, - getDebugMode: () => false, - getDebugLogger: () => ({ - debug: () => {}, - info: () => {}, - warn: () => {}, - error: () => {}, - }), - getTelemetryEnabled: () => true, - getModel: () => 'test-model', - } as unknown as Config; - - service = new LoopDetectionService(mockConfig); - abortController = new AbortController(); - vi.clearAllMocks(); - }); - - afterEach(() => { - vi.restoreAllMocks(); - }); - - const advanceTurns = async (count: number) => { - for (let i = 0; i < count; i++) { - await service.turnStarted(abortController.signal); - } - }; - - it('should not trigger LLM check before LLM_CHECK_AFTER_TURNS', async () => { - await advanceTurns(29); - expect(mockBaseLlmClient.generateJson).not.toHaveBeenCalled(); - }); - - it('should trigger LLM check on the 30th turn', async () => { - mockBaseLlmClient.generateJson = vi - .fn() - .mockResolvedValue({ confidence: 0.1 }); - await advanceTurns(30); - expect(mockBaseLlmClient.generateJson).toHaveBeenCalledTimes(1); - expect(mockBaseLlmClient.generateJson).toHaveBeenCalledWith( - expect.objectContaining({ - systemInstruction: expect.any(String), - contents: expect.any(Array), - model: expect.any(String), - schema: expect.any(Object), - promptId: expect.any(String), - }), - ); - }); - - it('should detect a cognitive loop when confidence is high', async () => { - // First check at turn 30 - mockBaseLlmClient.generateJson = vi - .fn() - .mockResolvedValue({ confidence: 0.85, reasoning: 'Repetitive actions' }); - await advanceTurns(30); - expect(mockBaseLlmClient.generateJson).toHaveBeenCalledTimes(1); - - // The confidence of 0.85 will result in a low interval. - // The interval will be: 5 + (15 - 5) * (1 - 0.85) = 5 + 10 * 0.15 = 6.5 -> rounded to 7 - await advanceTurns(6); // advance to turn 36 - - mockBaseLlmClient.generateJson = vi - .fn() - .mockResolvedValue({ confidence: 0.95, reasoning: 'Repetitive actions' }); - const finalResult = await service.turnStarted(abortController.signal); // This is turn 37 - - expect(finalResult).toBe(true); - expect(loggers.logLoopDetected).toHaveBeenCalledWith( - mockConfig, - expect.objectContaining({ - 'event.name': 'loop_detected', - loop_type: LoopType.LLM_DETECTED_LOOP, - }), - ); - }); - - it('should not detect a loop when confidence is low', async () => { - mockBaseLlmClient.generateJson = vi - .fn() - .mockResolvedValue({ confidence: 0.5, reasoning: 'Looks okay' }); - await advanceTurns(30); - const result = await service.turnStarted(abortController.signal); - expect(result).toBe(false); - expect(loggers.logLoopDetected).not.toHaveBeenCalled(); - }); - - it('should adjust the check interval based on confidence', async () => { - // Confidence is 0.0, so interval should be MAX_LLM_CHECK_INTERVAL (15) - mockBaseLlmClient.generateJson = vi - .fn() - .mockResolvedValue({ confidence: 0.0 }); - await advanceTurns(30); // First check at turn 30 - expect(mockBaseLlmClient.generateJson).toHaveBeenCalledTimes(1); - - await advanceTurns(14); // Advance to turn 44 - expect(mockBaseLlmClient.generateJson).toHaveBeenCalledTimes(1); - - await service.turnStarted(abortController.signal); // Turn 45 - expect(mockBaseLlmClient.generateJson).toHaveBeenCalledTimes(2); - }); - - it('should handle errors from generateJson gracefully', async () => { - mockBaseLlmClient.generateJson = vi - .fn() - .mockRejectedValue(new Error('API error')); - await advanceTurns(30); - const result = await service.turnStarted(abortController.signal); - expect(result).toBe(false); - expect(loggers.logLoopDetected).not.toHaveBeenCalled(); - }); - - it('should not trigger LLM check when disabled for session', async () => { - service.disableForSession(); - expect(loggers.logLoopDetectionDisabled).toHaveBeenCalledTimes(1); - await advanceTurns(30); - const result = await service.turnStarted(abortController.signal); - expect(result).toBe(false); - expect(mockBaseLlmClient.generateJson).not.toHaveBeenCalled(); - }); -}); diff --git a/packages/core/src/services/loopDetectionService.ts b/packages/core/src/services/loopDetectionService.ts index 9117d0120..d14e4223e 100644 --- a/packages/core/src/services/loopDetectionService.ts +++ b/packages/core/src/services/loopDetectionService.ts @@ -4,7 +4,6 @@ * SPDX-License-Identifier: Apache-2.0 */ -import type { Content } from '@google/genai'; import { createHash } from 'node:crypto'; import type { ServerGeminiStreamEvent } from '../core/turn.js'; import { GeminiEventType } from '../core/turn.js'; @@ -18,59 +17,12 @@ import { LoopType, } from '../telemetry/types.js'; import type { Config } from '../config/config.js'; -import { - isFunctionCall, - isFunctionResponse, -} from '../utils/messageInspectors.js'; -import { DEFAULT_QWEN_MODEL } from '../config/models.js'; -import { createDebugLogger } from '../utils/debugLogger.js'; - -const debugLogger = createDebugLogger('LOOP_DETECTION'); const TOOL_CALL_LOOP_THRESHOLD = 5; const CONTENT_LOOP_THRESHOLD = 10; const CONTENT_CHUNK_SIZE = 50; const MAX_HISTORY_LENGTH = 1000; -/** - * The number of recent conversation turns to include in the history when asking the LLM to check for a loop. - */ -const LLM_LOOP_CHECK_HISTORY_COUNT = 20; - -/** - * The number of turns that must pass in a single prompt before the LLM-based loop check is activated. - */ -const LLM_CHECK_AFTER_TURNS = 30; - -/** - * The default interval, in number of turns, at which the LLM-based loop check is performed. - * This value is adjusted dynamically based on the LLM's confidence. - */ -const DEFAULT_LLM_CHECK_INTERVAL = 3; - -/** - * The minimum interval for LLM-based loop checks. - * This is used when the confidence of a loop is high, to check more frequently. - */ -const MIN_LLM_CHECK_INTERVAL = 5; - -/** - * The maximum interval for LLM-based loop checks. - * This is used when the confidence of a loop is low, to check less frequently. - */ -const MAX_LLM_CHECK_INTERVAL = 15; - -const LOOP_DETECTION_SYSTEM_PROMPT = `You are a sophisticated AI diagnostic agent specializing in identifying when a conversational AI is stuck in an unproductive state. Your task is to analyze the provided conversation history and determine if the assistant has ceased to make meaningful progress. - -An unproductive state is characterized by one or more of the following patterns over the last 5 or more assistant turns: - -Repetitive Actions: The assistant repeats the same tool calls or conversational responses a decent number of times. This includes simple loops (e.g., tool_A, tool_A, tool_A) and alternating patterns (e.g., tool_A, tool_B, tool_A, tool_B, ...). - -Cognitive Loop: The assistant seems unable to determine the next logical step. It might express confusion, repeatedly ask the same questions, or generate responses that don't logically follow from the previous turns, indicating it's stuck and not advancing the task. - -Crucially, differentiate between a true unproductive state and legitimate, incremental progress. -For example, a series of 'tool_A' or 'tool_B' tool calls that make small, distinct changes to the same file (like adding docstrings to functions one by one) is considered forward progress and is NOT a loop. A loop would be repeatedly replacing the same text with the same content, or cycling between a small set of files with no net change.`; - /** * Service for detecting and preventing infinite loops in AI responses. * Monitors tool call repetitions and content sentence repetitions. @@ -90,11 +42,6 @@ export class LoopDetectionService { private loopDetected = false; private inCodeBlock = false; - // LLM loop track tracking - private turnsInCurrentPrompt = 0; - private llmCheckInterval = DEFAULT_LLM_CHECK_INTERVAL; - private lastCheckTurn = 0; - // Session-level disable flag private disabledForSession = false; @@ -145,33 +92,6 @@ export class LoopDetectionService { return this.loopDetected; } - /** - * Signals the start of a new turn in the conversation. - * - * This method increments the turn counter and, if specific conditions are met, - * triggers an LLM-based check to detect potential conversation loops. The check - * is performed periodically based on the `llmCheckInterval`. - * - * @param signal - An AbortSignal to allow for cancellation of the asynchronous LLM check. - * @returns A promise that resolves to `true` if a loop is detected, and `false` otherwise. - */ - async turnStarted(signal: AbortSignal) { - if (this.disabledForSession) { - return false; - } - this.turnsInCurrentPrompt++; - - if ( - this.turnsInCurrentPrompt >= LLM_CHECK_AFTER_TURNS && - this.turnsInCurrentPrompt - this.lastCheckTurn >= this.llmCheckInterval - ) { - this.lastCheckTurn = this.turnsInCurrentPrompt; - return await this.checkForLoopWithLLM(signal); - } - - return false; - } - private checkToolCallLoop(toolCall: { name: string; args: object }): boolean { const key = this.getToolCallKey(toolCall); if (this.lastToolCallKey === key) { @@ -371,94 +291,6 @@ export class LoopDetectionService { return originalChunk === currentChunk; } - private trimRecentHistory(recentHistory: Content[]): Content[] { - // A function response must be preceded by a function call. - // Continuously removes dangling function calls from the end of the history - // until the last turn is not a function call. - while ( - recentHistory.length > 0 && - isFunctionCall(recentHistory[recentHistory.length - 1]) - ) { - recentHistory.pop(); - } - - // A function response should follow a function call. - // Continuously removes leading function responses from the beginning of history - // until the first turn is not a function response. - while (recentHistory.length > 0 && isFunctionResponse(recentHistory[0])) { - recentHistory.shift(); - } - - return recentHistory; - } - - private async checkForLoopWithLLM(signal: AbortSignal) { - const recentHistory = this.config - .getGeminiClient() - .getHistory() - .slice(-LLM_LOOP_CHECK_HISTORY_COUNT); - - const trimmedHistory = this.trimRecentHistory(recentHistory); - - const taskPrompt = `Please analyze the conversation history to determine the possibility that the conversation is stuck in a repetitive, non-productive state. Provide your response in the requested JSON format.`; - - const contents = [ - ...trimmedHistory, - { role: 'user', parts: [{ text: taskPrompt }] }, - ]; - const schema: Record = { - type: 'object', - properties: { - reasoning: { - type: 'string', - description: - 'Your reasoning on if the conversation is looping without forward progress.', - }, - confidence: { - type: 'number', - description: - 'A number between 0.0 and 1.0 representing your confidence that the conversation is in an unproductive state.', - }, - }, - required: ['reasoning', 'confidence'], - }; - let result; - try { - result = await this.config.getBaseLlmClient().generateJson({ - contents, - schema, - model: this.config.getModel() || DEFAULT_QWEN_MODEL, - systemInstruction: LOOP_DETECTION_SYSTEM_PROMPT, - abortSignal: signal, - promptId: this.promptId, - }); - } catch (e) { - // Do nothing, treat it as a non-loop. - this.config.getDebugLogger().error(e); - return false; - } - - if (typeof result['confidence'] === 'number') { - if (result['confidence'] > 0.9) { - if (typeof result['reasoning'] === 'string' && result['reasoning']) { - debugLogger.warn(result['reasoning']); - } - logLoopDetected( - this.config, - new LoopDetectedEvent(LoopType.LLM_DETECTED_LOOP, this.promptId), - ); - return true; - } else { - this.llmCheckInterval = Math.round( - MIN_LLM_CHECK_INTERVAL + - (MAX_LLM_CHECK_INTERVAL - MIN_LLM_CHECK_INTERVAL) * - (1 - result['confidence']), - ); - } - } - return false; - } - /** * Resets all loop detection state. */ @@ -466,7 +298,6 @@ export class LoopDetectionService { this.promptId = promptId; this.resetToolCallCount(); this.resetContentTracking(); - this.resetLlmCheckTracking(); this.loopDetected = false; } @@ -482,10 +313,4 @@ export class LoopDetectionService { this.contentStats.clear(); this.lastContentIndex = 0; } - - private resetLlmCheckTracking(): void { - this.turnsInCurrentPrompt = 0; - this.llmCheckInterval = DEFAULT_LLM_CHECK_INTERVAL; - this.lastCheckTurn = 0; - } } diff --git a/packages/core/src/telemetry/types.ts b/packages/core/src/telemetry/types.ts index 98c8d5cac..d9c6b535d 100644 --- a/packages/core/src/telemetry/types.ts +++ b/packages/core/src/telemetry/types.ts @@ -362,7 +362,6 @@ export class RipgrepFallbackEvent implements BaseTelemetryEvent { export enum LoopType { CONSECUTIVE_IDENTICAL_TOOL_CALLS = 'consecutive_identical_tool_calls', CHANTING_IDENTICAL_SENTENCES = 'chanting_identical_sentences', - LLM_DETECTED_LOOP = 'llm_detected_loop', } export class LoopDetectedEvent implements BaseTelemetryEvent {