Merge pull request #2765 from QwenLM/fix/subagent-token-leak-footer

fix: prevent subagent telemetry from overwriting main agent footer context
This commit is contained in:
tanzhenxin 2026-04-01 15:37:55 +08:00 committed by GitHub
commit 2489044e61
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 46 additions and 9 deletions

View file

@ -788,6 +788,48 @@ describe('GeminiChat', async () => {
); );
}); });
it('should not update global telemetry when no telemetryService is provided (subagent isolation)', async () => {
// Simulate a subagent GeminiChat: created without a telemetryService
const subagentChat = new GeminiChat(mockConfig, config, []);
const response = (async function* () {
yield {
candidates: [
{
content: {
parts: [{ text: 'subagent response' }],
role: 'model',
},
finishReason: 'STOP',
index: 0,
safetyRatings: [],
},
],
text: () => 'subagent response',
usageMetadata: {
promptTokenCount: 12000,
candidatesTokenCount: 500,
totalTokenCount: 12500,
},
} as unknown as GenerateContentResponse;
})();
vi.mocked(mockContentGenerator.generateContentStream).mockResolvedValue(
response,
);
const stream = await subagentChat.sendMessageStream(
'test-model',
{ message: 'subagent task' },
'prompt-id-subagent',
);
for await (const _ of stream) {
// consume stream
}
// The global uiTelemetryService must NOT be called by subagent chats
expect(uiTelemetryService.setLastPromptTokenCount).not.toHaveBeenCalled();
});
it('should keep parts with thoughtSignature when consolidating history', async () => { it('should keep parts with thoughtSignature when consolidating history', async () => {
const stream = (async function* () { const stream = (async function* () {
yield { yield {

View file

@ -35,7 +35,6 @@ import {
ContentRetryFailureEvent, ContentRetryFailureEvent,
} from '../telemetry/types.js'; } from '../telemetry/types.js';
import type { UiTelemetryService } from '../telemetry/uiTelemetry.js'; import type { UiTelemetryService } from '../telemetry/uiTelemetry.js';
import { uiTelemetryService } from '../telemetry/uiTelemetry.js';
const debugLogger = createDebugLogger('QWEN_CODE_CHAT'); const debugLogger = createDebugLogger('QWEN_CODE_CHAT');
@ -659,15 +658,11 @@ export class GeminiChat {
// Some providers omit total_tokens or return 0 in streaming usage chunks. // Some providers omit total_tokens or return 0 in streaming usage chunks.
const lastPromptTokenCount = const lastPromptTokenCount =
usageMetadata.totalTokenCount || usageMetadata.promptTokenCount; usageMetadata.totalTokenCount || usageMetadata.promptTokenCount;
if (lastPromptTokenCount) { if (lastPromptTokenCount && this.telemetryService) {
(this.telemetryService ?? uiTelemetryService).setLastPromptTokenCount( this.telemetryService.setLastPromptTokenCount(lastPromptTokenCount);
lastPromptTokenCount,
);
} }
if (usageMetadata.cachedContentTokenCount) { if (usageMetadata.cachedContentTokenCount && this.telemetryService) {
( this.telemetryService.setLastCachedContentTokenCount(
this.telemetryService ?? uiTelemetryService
).setLastCachedContentTokenCount(
usageMetadata.cachedContentTokenCount, usageMetadata.cachedContentTokenCount,
); );
} }