Fix Antigravity dedup collision and add Codex ChatGPT Plus token estimation (#204)

- Antigravity: use loop index as fallback when responseId is empty to prevent
  all entries in a cascade sharing the same dedup key; bump CACHE_VERSION to
  force re-parse of stale cached data
- Codex: estimate tokens from message text when info is null (ChatGPT Plus/Pro
  subscription sessions), feeding through calculateCost so subscription users
  see API-equivalent spend; add costIsEstimated flag to ParsedProviderCall
- Update LiteLLM pricing snapshot
This commit is contained in:
AgentSeal 2026-05-03 19:47:58 +02:00
parent 95585febf4
commit 8cf68e7a16
4 changed files with 60 additions and 5 deletions

File diff suppressed because one or more lines are too long

View file

@ -9,7 +9,7 @@ import { calculateCost } from '../models.js'
import type { Provider, SessionSource, SessionParser, ParsedProviderCall } from './types.js'
const CONVERSATIONS_DIR = join(homedir(), '.gemini', 'antigravity', 'conversations')
const CACHE_VERSION = 1
const CACHE_VERSION = 2
const RPC_TIMEOUT_MS = 5000
const MAX_RESPONSE_BYTES = 16 * 1024 * 1024
@ -283,7 +283,8 @@ function createParser(source: SessionSource, seenKeys: Set<string>): SessionPars
const results: ParsedProviderCall[] = []
for (const entry of metadata) {
for (let i = 0; i < metadata.length; i++) {
const entry = metadata[i]!
const usage = entry.chatModel?.usage
if (!usage) continue
@ -294,7 +295,7 @@ function createParser(source: SessionSource, seenKeys: Set<string>): SessionPars
if (inputTokens === 0 && outputTokens === 0) continue
const responseId = usage.responseId ?? ''
const responseId = usage.responseId || String(i)
const dedupKey = `antigravity:${cascadeId}:${responseId}`
const model = modelMap[usage.model] ?? usage.model

View file

@ -64,6 +64,8 @@ type CodexTokenUsage = {
total_tokens?: number
}
const CHARS_PER_TOKEN = 4
function getCodexDir(override?: string): string {
return override ?? process.env['CODEX_HOME'] ?? join(homedir(), '.codex')
}
@ -211,6 +213,8 @@ function createParser(source: SessionSource, seenKeys: Set<string>): SessionPars
let prevReasoning = 0
let pendingTools: string[] = []
let pendingUserMessage = ''
let pendingOutputChars = 0
let estCounter = 0
const results: ParsedProviderCall[] = []
for (const line of lines) {
@ -252,9 +256,57 @@ function createParser(source: SessionSource, seenKeys: Set<string>): SessionPars
continue
}
if (entry.type === 'response_item' && entry.payload?.type === 'message' && entry.payload?.role === 'assistant') {
const texts = (entry.payload.content ?? [])
.filter(c => c.type === 'output_text' || c.type === 'text')
.map(c => c.text ?? '')
pendingOutputChars += texts.join('').length
continue
}
if (entry.type === 'event_msg' && entry.payload?.type === 'token_count') {
const info = entry.payload.info
if (!info) continue
if (!info) {
if (pendingOutputChars === 0 && pendingUserMessage.length === 0) continue
const estInput = Math.ceil(pendingUserMessage.length / CHARS_PER_TOKEN)
const estOutput = Math.ceil(pendingOutputChars / CHARS_PER_TOKEN)
if (estInput === 0 && estOutput === 0) continue
const model = sessionModel ?? 'gpt-5'
const timestamp = entry.timestamp ?? ''
const dedupKey = `codex:${sessionId}:${timestamp}:est${estCounter++}`
if (seenKeys.has(dedupKey)) { pendingTools = []; pendingUserMessage = ''; pendingOutputChars = 0; continue }
seenKeys.add(dedupKey)
const costUSD = calculateCost(model, estInput, estOutput, 0, 0, 0)
results.push({
provider: 'codex',
model,
inputTokens: estInput,
outputTokens: estOutput,
cacheCreationInputTokens: 0,
cacheReadInputTokens: 0,
cachedInputTokens: 0,
reasoningTokens: 0,
webSearchRequests: 0,
costUSD,
costIsEstimated: true,
tools: pendingTools,
bashCommands: [],
timestamp,
speed: 'standard',
deduplicationKey: dedupKey,
userMessage: pendingUserMessage,
sessionId,
})
pendingTools = []
pendingUserMessage = ''
pendingOutputChars = 0
continue
}
const cumulativeTotal = info.total_token_usage?.total_tokens ?? 0
if (cumulativeTotal > 0 && cumulativeTotal === prevCumulativeTotal) continue
@ -335,6 +387,7 @@ function createParser(source: SessionSource, seenKeys: Set<string>): SessionPars
pendingTools = []
pendingUserMessage = ''
pendingOutputChars = 0
}
}

View file

@ -19,6 +19,7 @@ export type ParsedProviderCall = {
reasoningTokens: number
webSearchRequests: number
costUSD: number
costIsEstimated?: boolean
tools: string[]
bashCommands: string[]
timestamp: string