Add Gemini CLI provider for session tracking (#168)

Parse ~/.gemini/tmp/<project>/chats/session-*.json files from Gemini
CLI 0.38+. Uses real token counts (input, output, cached, thoughts)
embedded in each message instead of character estimation. Correctly
separates cached tokens from fresh input to avoid double-charging.

- Pricing for gemini-3.1-pro-preview, gemini-3-flash-preview,
  gemini-2.5-pro, gemini-2.5-flash from official Google API rates
- Tool name normalization (ReadFile->Read, SearchText->Grep, etc.)
- Menubar tab with Google Blue color (#4485F4)

Closes #166
This commit is contained in:
Resham Joshi 2026-04-27 19:48:25 -07:00 committed by GitHub
parent f7f64a01ab
commit 6d15ea43a5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 255 additions and 3 deletions

View file

@ -226,6 +226,7 @@ enum ProviderFilter: String, CaseIterable, Identifiable {
case codex = "Codex"
case cursor = "Cursor"
case copilot = "Copilot"
case gemini = "Gemini"
case kiro = "Kiro"
case opencode = "OpenCode"
case pi = "Pi"
@ -247,6 +248,7 @@ enum ProviderFilter: String, CaseIterable, Identifiable {
case .codex: "codex"
case .cursor: "cursor"
case .copilot: "copilot"
case .gemini: "gemini"
case .kiro: "kiro"
case .opencode: "opencode"
case .pi: "pi"

View file

@ -92,6 +92,7 @@ extension ProviderFilter {
case .codex: return Theme.categoricalCodex
case .cursor: return Theme.categoricalCursor
case .copilot: return Color(red: 0x6D/255.0, green: 0x8F/255.0, blue: 0xA6/255.0)
case .gemini: return Color(red: 0x44/255.0, green: 0x85/255.0, blue: 0xF4/255.0)
case .kiro: return Color(red: 0x4A/255.0, green: 0x9E/255.0, blue: 0xC4/255.0)
case .opencode: return Color(red: 0x5B/255.0, green: 0x83/255.0, blue: 0x5B/255.0)
case .pi: return Color(red: 0xB2/255.0, green: 0x6B/255.0, blue: 0x3D/255.0)

View file

@ -38,7 +38,10 @@ const FALLBACK_PRICING: Record<string, ModelCosts> = {
'claude-3-5-haiku': { inputCostPerToken: 0.8e-6, outputCostPerToken: 4e-6, cacheWriteCostPerToken: 1e-6, cacheReadCostPerToken: 0.08e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gpt-4o': { inputCostPerToken: 2.5e-6, outputCostPerToken: 10e-6, cacheWriteCostPerToken: 2.5e-6, cacheReadCostPerToken: 1.25e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gpt-4o-mini': { inputCostPerToken: 0.15e-6, outputCostPerToken: 0.6e-6, cacheWriteCostPerToken: 0.15e-6, cacheReadCostPerToken: 0.075e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gemini-2.5-pro': { inputCostPerToken: 1.25e-6, outputCostPerToken: 10e-6, cacheWriteCostPerToken: 1.25e-6, cacheReadCostPerToken: 0.315e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gemini-3.1-pro-preview': { inputCostPerToken: 2e-6, outputCostPerToken: 12e-6, cacheWriteCostPerToken: 2e-6, cacheReadCostPerToken: 0.5e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gemini-3-flash-preview': { inputCostPerToken: 0.5e-6, outputCostPerToken: 3e-6, cacheWriteCostPerToken: 0.5e-6, cacheReadCostPerToken: 0.125e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gemini-2.5-pro': { inputCostPerToken: 1.25e-6, outputCostPerToken: 10e-6, cacheWriteCostPerToken: 1.25e-6, cacheReadCostPerToken: 0.3125e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gemini-2.5-flash': { inputCostPerToken: 0.3e-6, outputCostPerToken: 2.5e-6, cacheWriteCostPerToken: 0.3e-6, cacheReadCostPerToken: 0.075e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gpt-5.3-codex': { inputCostPerToken: 2.5e-6, outputCostPerToken: 10e-6, cacheWriteCostPerToken: 2.5e-6, cacheReadCostPerToken: 1.25e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gpt-5.4': { inputCostPerToken: 2.5e-6, outputCostPerToken: 10e-6, cacheWriteCostPerToken: 2.5e-6, cacheReadCostPerToken: 1.25e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gpt-5.4-mini': { inputCostPerToken: 0.4e-6, outputCostPerToken: 1.6e-6, cacheWriteCostPerToken: 0.4e-6, cacheReadCostPerToken: 0.2e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
@ -241,7 +244,10 @@ export function getShortModelName(model: string): string {
'gpt-5.2': 'GPT-5.2',
'gpt-5-mini': 'GPT-5 Mini',
'gpt-5': 'GPT-5',
'gemini-3.1-pro-preview': 'Gemini 3.1 Pro',
'gemini-3-flash-preview': 'Gemini 3 Flash',
'gemini-2.5-pro': 'Gemini 2.5 Pro',
'gemini-2.5-flash': 'Gemini 2.5 Flash',
'o4-mini': 'o4-mini',
'o3': 'o3',
'MiniMax-M2.7-highspeed': 'MiniMax M2.7 Highspeed',

242
src/providers/gemini.ts Normal file
View file

@ -0,0 +1,242 @@
import { readdir, readFile, stat } from 'fs/promises'
import { join } from 'path'
import { homedir } from 'os'
import { calculateCost } from '../models.js'
import type { Provider, SessionSource, SessionParser, ParsedProviderCall } from './types.js'
const toolNameMap: Record<string, string> = {
read_file: 'Read',
write_file: 'Write',
edit_file: 'Edit',
create_file: 'Write',
delete_file: 'Delete',
list_dir: 'LS',
grep_search: 'Grep',
search_files: 'Grep',
find_files: 'Glob',
run_command: 'Bash',
web_search: 'WebSearch',
ReadFile: 'Read',
WriteFile: 'Write',
EditFile: 'Edit',
ListDir: 'LS',
SearchText: 'Grep',
Shell: 'Bash',
}
type GeminiTokens = {
input?: number
output?: number
cached?: number
thoughts?: number
tool?: number
total?: number
}
type GeminiToolCall = {
id: string
name: string
args: Record<string, unknown>
status?: string
displayName?: string
}
type GeminiMessage = {
id: string
timestamp: string
type: 'user' | 'gemini' | 'info'
content: string | Array<{ text: string }>
tokens?: GeminiTokens
model?: string
toolCalls?: GeminiToolCall[]
thoughts?: unknown[]
}
type GeminiSession = {
sessionId: string
projectHash?: string
startTime: string
lastUpdated?: string
messages: GeminiMessage[]
kind?: string
}
function parseSession(data: GeminiSession, seenKeys: Set<string>): ParsedProviderCall[] {
const results: ParsedProviderCall[] = []
const geminiMessages = data.messages.filter(m => m.type === 'gemini' && m.tokens && m.model)
if (geminiMessages.length === 0) return results
const dedupKey = `gemini:${data.sessionId}`
if (seenKeys.has(dedupKey)) return results
seenKeys.add(dedupKey)
let totalInput = 0
let totalOutput = 0
let totalCached = 0
let totalThoughts = 0
const allTools: string[] = []
const bashCommands: string[] = []
let model = ''
for (const msg of geminiMessages) {
const t = msg.tokens!
totalInput += t.input ?? 0
totalOutput += (t.output ?? 0) + (t.thoughts ?? 0)
totalCached += t.cached ?? 0
totalThoughts += t.thoughts ?? 0
if (msg.model && !model) model = msg.model
if (msg.toolCalls) {
for (const tc of msg.toolCalls) {
const mapped = toolNameMap[tc.displayName ?? ''] ?? toolNameMap[tc.name] ?? tc.displayName ?? tc.name
allTools.push(mapped)
if (mapped === 'Bash' && tc.args && typeof tc.args.command === 'string') {
const cmd = tc.args.command.split(/\s+/)[0] ?? ''
if (cmd) bashCommands.push(cmd)
}
}
}
}
if (totalInput === 0 && totalOutput === 0) return results
// Gemini's `input` count includes `cached` tokens as a subset, so fresh input
// must subtract cached to avoid double-charging at both rates.
const freshInput = totalInput - totalCached
let userMessage = ''
const firstUser = data.messages.find(m => m.type === 'user')
if (firstUser) {
if (Array.isArray(firstUser.content)) {
userMessage = firstUser.content.map(c => c.text).join(' ').slice(0, 500)
} else if (typeof firstUser.content === 'string') {
userMessage = firstUser.content.slice(0, 500)
}
}
const tsDate = new Date(data.startTime)
if (isNaN(tsDate.getTime()) || tsDate.getTime() < 1_000_000_000_000) return results
const costUSD = calculateCost(model, freshInput, totalOutput, 0, totalCached, 0)
results.push({
provider: 'gemini',
model,
inputTokens: freshInput,
outputTokens: totalOutput,
cacheCreationInputTokens: 0,
cacheReadInputTokens: totalCached,
cachedInputTokens: totalCached,
reasoningTokens: totalThoughts,
webSearchRequests: 0,
costUSD,
tools: [...new Set(allTools)],
bashCommands: [...new Set(bashCommands)],
timestamp: tsDate.toISOString(),
speed: 'standard',
deduplicationKey: dedupKey,
userMessage,
sessionId: data.sessionId,
})
return results
}
function createParser(source: SessionSource, seenKeys: Set<string>): SessionParser {
return {
async *parse(): AsyncGenerator<ParsedProviderCall> {
let raw: string
try {
raw = await readFile(source.path, 'utf-8')
} catch {
return
}
let data: GeminiSession
try {
data = JSON.parse(raw)
} catch {
return
}
if (!data.messages || !data.sessionId) return
const calls = parseSession(data, seenKeys)
for (const call of calls) {
yield call
}
},
}
}
function getGeminiTmpDir(): string {
return join(homedir(), '.gemini', 'tmp')
}
async function discoverSessions(): Promise<SessionSource[]> {
const sources: SessionSource[] = []
const tmpDir = getGeminiTmpDir()
let projectDirs: string[]
try {
const entries = await readdir(tmpDir, { withFileTypes: true })
projectDirs = entries.filter(e => e.isDirectory()).map(e => e.name)
} catch {
return sources
}
for (const project of projectDirs) {
const chatsDir = join(tmpDir, project, 'chats')
let files: string[]
try {
const entries = await readdir(chatsDir)
files = entries.filter(f => f.startsWith('session-') && (f.endsWith('.json') || f.endsWith('.jsonl')))
} catch {
continue
}
for (const file of files) {
const filePath = join(chatsDir, file)
const s = await stat(filePath).catch(() => null)
if (!s?.isFile()) continue
sources.push({ path: filePath, project, provider: 'gemini' })
}
}
return sources
}
export function createGeminiProvider(): Provider {
return {
name: 'gemini',
displayName: 'Gemini',
modelDisplayName(model: string): string {
if (model === 'gemini-auto') return 'Gemini (auto)'
const display: Record<string, string> = {
'gemini-3-flash-preview': 'Gemini 3 Flash',
'gemini-3.1-pro-preview': 'Gemini 3.1 Pro',
'gemini-2.5-pro': 'Gemini 2.5 Pro',
'gemini-2.5-flash': 'Gemini 2.5 Flash',
'gemini-2.0-flash': 'Gemini 2.0 Flash',
}
return display[model] ?? model
},
toolDisplayName(rawTool: string): string {
return toolNameMap[rawTool] ?? rawTool
},
async discoverSessions(): Promise<SessionSource[]> {
return discoverSessions()
},
createSessionParser(source: SessionSource, seenKeys: Set<string>): SessionParser {
return createParser(source, seenKeys)
},
}
}
export const gemini = createGeminiProvider()

View file

@ -1,6 +1,7 @@
import { claude } from './claude.js'
import { codex } from './codex.js'
import { copilot } from './copilot.js'
import { gemini } from './gemini.js'
import { kiro } from './kiro.js'
import { pi, omp } from './pi.js'
import type { Provider, SessionSource } from './types.js'
@ -50,7 +51,7 @@ async function loadCursorAgent(): Promise<Provider | null> {
}
}
const coreProviders: Provider[] = [claude, codex, copilot, kiro, pi, omp]
const coreProviders: Provider[] = [claude, codex, copilot, gemini, kiro, pi, omp]
export async function getAllProviders(): Promise<Provider[]> {
const [cursor, opencode, cursorAgent] = await Promise.all([loadCursor(), loadOpenCode(), loadCursorAgent()])

View file

@ -3,7 +3,7 @@ import { providers, getAllProviders } from '../src/providers/index.js'
describe('provider registry', () => {
it('has core providers registered synchronously', () => {
expect(providers.map(p => p.name)).toEqual(['claude', 'codex', 'copilot', 'kiro', 'pi', 'omp'])
expect(providers.map(p => p.name)).toEqual(['claude', 'codex', 'copilot', 'gemini', 'kiro', 'pi', 'omp'])
})
it('includes sqlite providers after async load', async () => {