Add Qwen provider and replace hardcoded pricing with LiteLLM snapshot

- Add Qwen CLI provider (discovers sessions from ~/.qwen/projects/)
- Replace FALLBACK_PRICING (40 hand-maintained entries) with auto-generated
  LiteLLM snapshot (3595 models including Azure, OpenRouter pricing)
- Build script fetches and bundles LiteLLM data before tsup
- Provider-prefixed lookups (azure/, openrouter/) resolve to correct pricing
- Add display names for all GPT-5.x model variants
- Add Qwen to menubar provider filter and tab strip
This commit is contained in:
AgentSeal 2026-04-28 19:49:14 +02:00
parent ec2de6a642
commit d043795855
9 changed files with 305 additions and 48 deletions

View file

@ -236,6 +236,7 @@ enum ProviderFilter: String, CaseIterable, Identifiable {
case openclaw = "OpenClaw"
case opencode = "OpenCode"
case pi = "Pi"
case qwen = "Qwen"
case omp = "OMP"
case rooCode = "Roo Code"
@ -264,6 +265,7 @@ enum ProviderFilter: String, CaseIterable, Identifiable {
case .openclaw: "openclaw"
case .opencode: "opencode"
case .pi: "pi"
case .qwen: "qwen"
case .omp: "omp"
case .rooCode: "roo-code"
}

View file

@ -98,6 +98,7 @@ extension ProviderFilter {
case .openclaw: return Color(red: 0xDA/255.0, green: 0x70/255.0, blue: 0x56/255.0)
case .opencode: return Color(red: 0x5B/255.0, green: 0x83/255.0, blue: 0x5B/255.0)
case .pi: return Color(red: 0xB2/255.0, green: 0x6B/255.0, blue: 0x3D/255.0)
case .qwen: return Color(red: 0x61/255.0, green: 0x5E/255.0, blue: 0xEB/255.0)
case .omp: return Color(red: 0x8B/255.0, green: 0x5C/255.0, blue: 0xB0/255.0)
case .rooCode: return Color(red: 0x4C/255.0, green: 0xAF/255.0, blue: 0x50/255.0)
}

View file

@ -11,7 +11,8 @@
"dist"
],
"scripts": {
"build": "tsup",
"bundle-litellm": "node scripts/bundle-litellm.mjs",
"build": "node scripts/bundle-litellm.mjs && tsup",
"dev": "tsx src/cli.ts",
"test": "vitest",
"prepublishOnly": "npm run build"

View file

@ -0,0 +1,49 @@
import { writeFileSync, mkdirSync } from 'fs'
import { dirname, join } from 'path'
import { fileURLToPath } from 'url'
const LITELLM_URL = 'https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json'
const __dirname = dirname(fileURLToPath(import.meta.url))
const outPath = join(__dirname, '..', 'src', 'data', 'litellm-snapshot.json')
const MANUAL_ENTRIES = {
'MiniMax-M2.7': [0.3e-6, 1.2e-6, 0.375e-6, 0.06e-6],
'MiniMax-M2.7-highspeed': [0.6e-6, 2.4e-6, 0.375e-6, 0.06e-6],
}
const res = await fetch(LITELLM_URL)
if (!res.ok) throw new Error(`HTTP ${res.status}`)
const data = await res.json()
const snapshot = {}
const entries = Object.entries(data).filter(([k]) => k !== 'sample_spec')
function toVal(entry) {
const inp = entry.input_cost_per_token
const out = entry.output_cost_per_token
if (inp == null || out == null) return null
return [inp, out, entry.cache_creation_input_token_cost ?? null, entry.cache_read_input_token_cost ?? null]
}
// Pass 1: direct entries (no prefix) get priority
for (const [name, entry] of entries) {
if (name.includes('/')) continue
const val = toVal(entry)
if (val) snapshot[name] = val
}
// Pass 2: prefixed entries - store full key + stripped (first-write-wins)
for (const [name, entry] of entries) {
if (!name.includes('/')) continue
const val = toVal(entry)
if (!val) continue
if (!snapshot[name]) snapshot[name] = val
const stripped = name.replace(/^[^/]+\//, '')
if (stripped !== name && !snapshot[stripped]) snapshot[stripped] = val
}
Object.assign(snapshot, MANUAL_ENTRIES)
mkdirSync(dirname(outPath), { recursive: true })
writeFileSync(outPath, JSON.stringify(snapshot))
console.log(`Bundled ${Object.keys(snapshot).length} models → src/data/litellm-snapshot.json`)

File diff suppressed because one or more lines are too long

View file

@ -1,6 +1,7 @@
import { readFile, writeFile, mkdir } from 'fs/promises'
import { join } from 'path'
import { homedir } from 'os'
import snapshotData from './data/litellm-snapshot.json'
export type ModelCosts = {
inputCostPerToken: number
@ -19,44 +20,34 @@ type LiteLLMEntry = {
provider_specific_entry?: { fast?: number }
}
type SnapshotEntry = [number, number, number | null, number | null]
const LITELLM_URL = 'https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json'
const CACHE_TTL_MS = 24 * 60 * 60 * 1000
const WEB_SEARCH_COST = 0.01
const FALLBACK_PRICING: Record<string, ModelCosts> = {
'claude-opus-4-7': { inputCostPerToken: 5e-6, outputCostPerToken: 25e-6, cacheWriteCostPerToken: 6.25e-6, cacheReadCostPerToken: 0.5e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 6 },
'claude-opus-4-6': { inputCostPerToken: 5e-6, outputCostPerToken: 25e-6, cacheWriteCostPerToken: 6.25e-6, cacheReadCostPerToken: 0.5e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 6 },
'claude-opus-4-5': { inputCostPerToken: 5e-6, outputCostPerToken: 25e-6, cacheWriteCostPerToken: 6.25e-6, cacheReadCostPerToken: 0.5e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'claude-opus-4-1': { inputCostPerToken: 15e-6, outputCostPerToken: 75e-6, cacheWriteCostPerToken: 18.75e-6, cacheReadCostPerToken: 1.5e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'claude-opus-4': { inputCostPerToken: 15e-6, outputCostPerToken: 75e-6, cacheWriteCostPerToken: 18.75e-6, cacheReadCostPerToken: 1.5e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'claude-sonnet-4-6': { inputCostPerToken: 3e-6, outputCostPerToken: 15e-6, cacheWriteCostPerToken: 3.75e-6, cacheReadCostPerToken: 0.3e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'claude-sonnet-4-5': { inputCostPerToken: 3e-6, outputCostPerToken: 15e-6, cacheWriteCostPerToken: 3.75e-6, cacheReadCostPerToken: 0.3e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'claude-sonnet-4': { inputCostPerToken: 3e-6, outputCostPerToken: 15e-6, cacheWriteCostPerToken: 3.75e-6, cacheReadCostPerToken: 0.3e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'claude-3-7-sonnet': { inputCostPerToken: 3e-6, outputCostPerToken: 15e-6, cacheWriteCostPerToken: 3.75e-6, cacheReadCostPerToken: 0.3e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'claude-3-5-sonnet': { inputCostPerToken: 3e-6, outputCostPerToken: 15e-6, cacheWriteCostPerToken: 3.75e-6, cacheReadCostPerToken: 0.3e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'claude-haiku-4-5': { inputCostPerToken: 1e-6, outputCostPerToken: 5e-6, cacheWriteCostPerToken: 1.25e-6, cacheReadCostPerToken: 0.1e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'claude-3-5-haiku': { inputCostPerToken: 0.8e-6, outputCostPerToken: 4e-6, cacheWriteCostPerToken: 1e-6, cacheReadCostPerToken: 0.08e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gpt-4o': { inputCostPerToken: 2.5e-6, outputCostPerToken: 10e-6, cacheWriteCostPerToken: 2.5e-6, cacheReadCostPerToken: 1.25e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gpt-4o-mini': { inputCostPerToken: 0.15e-6, outputCostPerToken: 0.6e-6, cacheWriteCostPerToken: 0.15e-6, cacheReadCostPerToken: 0.075e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gemini-3.1-pro-preview': { inputCostPerToken: 2e-6, outputCostPerToken: 12e-6, cacheWriteCostPerToken: 2e-6, cacheReadCostPerToken: 0.5e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gemini-3-flash-preview': { inputCostPerToken: 0.5e-6, outputCostPerToken: 3e-6, cacheWriteCostPerToken: 0.5e-6, cacheReadCostPerToken: 0.125e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gemini-2.5-pro': { inputCostPerToken: 1.25e-6, outputCostPerToken: 10e-6, cacheWriteCostPerToken: 1.25e-6, cacheReadCostPerToken: 0.3125e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gemini-2.5-flash': { inputCostPerToken: 0.3e-6, outputCostPerToken: 2.5e-6, cacheWriteCostPerToken: 0.3e-6, cacheReadCostPerToken: 0.075e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gpt-5.3-codex': { inputCostPerToken: 2.5e-6, outputCostPerToken: 10e-6, cacheWriteCostPerToken: 2.5e-6, cacheReadCostPerToken: 1.25e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gpt-5.4': { inputCostPerToken: 2.5e-6, outputCostPerToken: 10e-6, cacheWriteCostPerToken: 2.5e-6, cacheReadCostPerToken: 1.25e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gpt-5.4-mini': { inputCostPerToken: 0.4e-6, outputCostPerToken: 1.6e-6, cacheWriteCostPerToken: 0.4e-6, cacheReadCostPerToken: 0.2e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gpt-5': { inputCostPerToken: 2.5e-6, outputCostPerToken: 10e-6, cacheWriteCostPerToken: 2.5e-6, cacheReadCostPerToken: 1.25e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gpt-5-mini': { inputCostPerToken: 0.4e-6, outputCostPerToken: 1.6e-6, cacheWriteCostPerToken: 0.4e-6, cacheReadCostPerToken: 0.2e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gpt-4.1': { inputCostPerToken: 2e-6, outputCostPerToken: 8e-6, cacheWriteCostPerToken: 2e-6, cacheReadCostPerToken: 0.5e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gpt-4.1-mini': { inputCostPerToken: 0.4e-6, outputCostPerToken: 1.6e-6, cacheWriteCostPerToken: 0.4e-6, cacheReadCostPerToken: 0.1e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'gpt-4.1-nano': { inputCostPerToken: 0.1e-6, outputCostPerToken: 0.4e-6, cacheWriteCostPerToken: 0.1e-6, cacheReadCostPerToken: 0.025e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'o3': { inputCostPerToken: 10e-6, outputCostPerToken: 40e-6, cacheWriteCostPerToken: 10e-6, cacheReadCostPerToken: 2.5e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'o4-mini': { inputCostPerToken: 1.1e-6, outputCostPerToken: 4.4e-6, cacheWriteCostPerToken: 1.1e-6, cacheReadCostPerToken: 0.275e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'MiniMax-M2.7-highspeed': { inputCostPerToken: 0.6e-6, outputCostPerToken: 2.4e-6, cacheWriteCostPerToken: 0.375e-6, cacheReadCostPerToken: 0.06e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
'MiniMax-M2.7': { inputCostPerToken: 0.3e-6, outputCostPerToken: 1.2e-6, cacheWriteCostPerToken: 0.375e-6, cacheReadCostPerToken: 0.06e-6, webSearchCostPerRequest: WEB_SEARCH_COST, fastMultiplier: 1 },
const FAST_MULTIPLIERS: Record<string, number> = {
'claude-opus-4-7': 6,
'claude-opus-4-6': 6,
}
let pricingCache: Map<string, ModelCosts> | null = null
function loadSnapshot(): Map<string, ModelCosts> {
const map = new Map<string, ModelCosts>()
for (const [name, raw] of Object.entries(snapshotData as unknown as Record<string, SnapshotEntry>)) {
const [input, output, cacheWrite, cacheRead] = raw
map.set(name, {
inputCostPerToken: input,
outputCostPerToken: output,
cacheWriteCostPerToken: cacheWrite ?? input * 1.25,
cacheReadCostPerToken: cacheRead ?? input * 0.1,
webSearchCostPerRequest: WEB_SEARCH_COST,
fastMultiplier: FAST_MULTIPLIERS[name] ?? 1,
})
}
return map
}
let pricingCache: Map<string, ModelCosts> = loadSnapshot()
function getCacheDir(): string {
return join(homedir(), '.cache', 'codeburn')
@ -125,7 +116,7 @@ export async function loadPricing(): Promise<void> {
try {
pricingCache = await fetchAndCachePricing()
} catch {
pricingCache = new Map(Object.entries(FALLBACK_PRICING))
// snapshot already loaded at init; nothing more to do
}
}
@ -145,6 +136,7 @@ const BUILTIN_ALIASES: Record<string, string> = {
'kiro-auto': 'claude-sonnet-4-5',
'cline-auto': 'claude-sonnet-4-5',
'openclaw-auto': 'claude-sonnet-4-5',
'qwen-auto': 'claude-sonnet-4-5',
// Cursor emits dot-version tier-last names
'claude-4.6-sonnet': 'claude-sonnet-4-6',
'claude-4.5-sonnet-thinking': 'claude-sonnet-4-5',
@ -177,20 +169,15 @@ function getCanonicalName(model: string): string {
}
export function getModelCosts(model: string): ModelCosts | null {
// Try with provider prefix preserved (azure/gpt-5.4, openrouter/anthropic/claude-opus-4.6)
const withPrefix = model.replace(/@.*$/, '').replace(/-\d{8}$/, '')
if (pricingCache.has(withPrefix)) return pricingCache.get(withPrefix)!
const canonical = resolveAlias(getCanonicalName(model))
if (pricingCache.has(canonical)) return pricingCache.get(canonical)!
if (pricingCache?.has(canonical)) return pricingCache.get(canonical)!
for (const [key, costs] of Object.entries(FALLBACK_PRICING)) {
if (canonical === key || canonical.startsWith(key + '-')) return costs
}
for (const [key, costs] of pricingCache ?? new Map()) {
if (canonical.startsWith(key)) return costs
}
for (const [key, costs] of Object.entries(FALLBACK_PRICING)) {
if (canonical.startsWith(key)) return costs
for (const [key, costs] of pricingCache) {
if (canonical.startsWith(key + '-') || canonical.startsWith(key)) return costs
}
return null
@ -226,6 +213,7 @@ const autoModelNames: Record<string, string> = {
'kiro-auto': 'Kiro (auto)',
'cline-auto': 'Cline (auto)',
'openclaw-auto': 'OpenClaw (auto)',
'qwen-auto': 'Qwen (auto)',
}
export function getShortModelName(model: string): string {
@ -250,11 +238,21 @@ export function getShortModelName(model: string): string {
'gpt-4.1-mini': 'GPT-4.1 Mini',
'gpt-4.1': 'GPT-4.1',
'codex-auto-review': 'Codex Auto Review',
'gpt-5.5-pro': 'GPT-5.5 Pro',
'gpt-5.5': 'GPT-5.5',
'gpt-5.4-pro': 'GPT-5.4 Pro',
'gpt-5.4-nano': 'GPT-5.4 Nano',
'gpt-5.4-mini': 'GPT-5.4 Mini',
'gpt-5.4': 'GPT-5.4',
'gpt-5.3-codex': 'GPT-5.3 Codex',
'gpt-5.2-pro': 'GPT-5.2 Pro',
'gpt-5.2-low': 'GPT-5.2 Low',
'gpt-5.2': 'GPT-5.2',
'gpt-5.1-codex-mini': 'GPT-5.1 Codex Mini',
'gpt-5.1-codex': 'GPT-5.1 Codex',
'gpt-5.1': 'GPT-5.1',
'gpt-5-pro': 'GPT-5 Pro',
'gpt-5-nano': 'GPT-5 Nano',
'gpt-5-mini': 'GPT-5 Mini',
'gpt-5': 'GPT-5',
'gemini-3.1-pro-preview': 'Gemini 3.1 Pro',

View file

@ -6,6 +6,7 @@ import { kiloCode } from './kilo-code.js'
import { kiro } from './kiro.js'
import { openclaw } from './openclaw.js'
import { pi, omp } from './pi.js'
import { qwen } from './qwen.js'
import { rooCode } from './roo-code.js'
import type { Provider, SessionSource } from './types.js'
@ -54,7 +55,7 @@ async function loadCursorAgent(): Promise<Provider | null> {
}
}
const coreProviders: Provider[] = [claude, codex, copilot, gemini, kiloCode, kiro, openclaw, pi, omp, rooCode]
const coreProviders: Provider[] = [claude, codex, copilot, gemini, kiloCode, kiro, openclaw, pi, omp, qwen, rooCode]
export async function getAllProviders(): Promise<Provider[]> {
const [cursor, opencode, cursorAgent] = await Promise.all([loadCursor(), loadOpenCode(), loadCursorAgent()])

204
src/providers/qwen.ts Normal file
View file

@ -0,0 +1,204 @@
import { readdir, stat } from 'fs/promises'
import { basename, join } from 'path'
import { homedir } from 'os'
import { readSessionFile } from '../fs-utils.js'
import { calculateCost } from '../models.js'
import type { Provider, SessionSource, SessionParser, ParsedProviderCall } from './types.js'
const toolNameMap: Record<string, string> = {
read_file: 'Read',
write_to_file: 'Write',
edit_file: 'Edit',
execute_command: 'Bash',
search_files: 'Grep',
list_files: 'LS',
list_directory: 'LS',
browser_action: 'WebFetch',
web_search: 'WebSearch',
ask_followup_question: 'AskUser',
attempt_completion: 'Complete',
}
type QwenPart = {
text?: string
thought?: boolean
functionCall?: { name?: string; args?: Record<string, unknown> }
functionResponse?: unknown
}
type QwenEntry = {
uuid: string
sessionId: string
timestamp: string
type: string
subtype?: string
cwd?: string
model?: string
message?: {
role: string
parts: QwenPart[]
}
usageMetadata?: {
promptTokenCount: number
candidatesTokenCount: number
thoughtsTokenCount: number
totalTokenCount: number
cachedContentTokenCount: number
}
}
function getQwenProjectsDir(): string {
return process.env['QWEN_DATA_DIR'] ?? join(homedir(), '.qwen', 'projects')
}
function projectNameFromDirName(dirName: string): string {
const parts = dirName.replace(/^-/, '').split('-')
return parts[parts.length - 1] || dirName
}
function extractTools(parts: QwenPart[]): { tools: string[]; bashCommands: string[] } {
const tools: string[] = []
const bashCommands: string[] = []
for (const part of parts) {
if (part.functionCall?.name) {
const mapped = toolNameMap[part.functionCall.name] ?? part.functionCall.name
tools.push(mapped)
if (mapped === 'Bash' && part.functionCall.args && typeof part.functionCall.args['command'] === 'string') {
const cmd = (part.functionCall.args['command'] as string).split(/\s+/)[0] ?? ''
if (cmd) bashCommands.push(cmd)
}
}
}
return { tools, bashCommands }
}
function createParser(source: SessionSource, seenKeys: Set<string>): SessionParser {
return {
async *parse(): AsyncGenerator<ParsedProviderCall> {
const raw = await readSessionFile(source.path)
if (raw === null) return
const lines = raw.split('\n').filter(l => l.trim())
let pendingUserMessage = ''
for (const line of lines) {
let entry: QwenEntry
try {
entry = JSON.parse(line)
} catch {
continue
}
if (entry.type === 'user' && entry.message) {
const texts = (entry.message.parts ?? [])
.filter(p => p.text && !p.thought)
.map(p => p.text!)
if (texts.length > 0) {
pendingUserMessage = texts.join(' ').slice(0, 500)
}
continue
}
if (entry.type !== 'assistant' || !entry.usageMetadata) continue
const usage = entry.usageMetadata
if (usage.promptTokenCount === 0 && usage.candidatesTokenCount === 0) continue
const dedupKey = `qwen:${entry.sessionId}:${entry.uuid}`
if (seenKeys.has(dedupKey)) continue
seenKeys.add(dedupKey)
const model = entry.model || 'qwen-auto'
const { tools, bashCommands } = extractTools(entry.message?.parts ?? [])
const inputTokens = usage.promptTokenCount
const outputTokens = usage.candidatesTokenCount
const reasoningTokens = usage.thoughtsTokenCount ?? 0
const cachedTokens = usage.cachedContentTokenCount ?? 0
const costUSD = calculateCost(model, inputTokens, outputTokens + reasoningTokens, 0, cachedTokens, 0)
yield {
provider: 'qwen',
model,
inputTokens,
outputTokens,
cacheCreationInputTokens: 0,
cacheReadInputTokens: cachedTokens,
cachedInputTokens: cachedTokens,
reasoningTokens,
webSearchRequests: 0,
costUSD,
tools: [...new Set(tools)],
bashCommands: [...new Set(bashCommands)],
timestamp: entry.timestamp || '',
speed: 'standard',
deduplicationKey: dedupKey,
userMessage: pendingUserMessage,
sessionId: entry.sessionId,
}
pendingUserMessage = ''
}
},
}
}
export function createQwenProvider(overrideDir?: string): Provider {
const projectsDir = overrideDir ?? getQwenProjectsDir()
return {
name: 'qwen',
displayName: 'Qwen',
modelDisplayName(model: string): string {
return model
},
toolDisplayName(rawTool: string): string {
return toolNameMap[rawTool] ?? rawTool
},
async discoverSessions(): Promise<SessionSource[]> {
const sources: SessionSource[] = []
let projectDirs: string[]
try {
projectDirs = await readdir(projectsDir)
} catch {
return sources
}
for (const projDir of projectDirs) {
const chatsDir = join(projectsDir, projDir, 'chats')
const project = projectNameFromDirName(projDir)
let chatFiles: string[]
try {
chatFiles = await readdir(chatsDir)
} catch {
continue
}
for (const file of chatFiles) {
if (!file.endsWith('.jsonl')) continue
const filePath = join(chatsDir, file)
const s = await stat(filePath).catch(() => null)
if (!s?.isFile()) continue
sources.push({ path: filePath, project, provider: 'qwen' })
}
}
return sources
},
createSessionParser(source: SessionSource, seenKeys: Set<string>): SessionParser {
return createParser(source, seenKeys)
},
}
}
export const qwen = createQwenProvider()

View file

@ -3,7 +3,7 @@ import { providers, getAllProviders } from '../src/providers/index.js'
describe('provider registry', () => {
it('has core providers registered synchronously', () => {
expect(providers.map(p => p.name)).toEqual(['claude', 'codex', 'copilot', 'gemini', 'kilo-code', 'kiro', 'openclaw', 'pi', 'omp', 'roo-code'])
expect(providers.map(p => p.name)).toEqual(['claude', 'codex', 'copilot', 'gemini', 'kilo-code', 'kiro', 'openclaw', 'pi', 'omp', 'qwen', 'roo-code'])
})
it('includes sqlite providers after async load', async () => {