mirror of
https://github.com/AgentSeal/codeburn.git
synced 2026-05-17 12:20:43 +00:00
Some checks are pending
CI / semgrep (push) Waiting to run
Two passes of validators across CLI accuracy, dashboard UX, menubar Swift, performance, security, and end-to-end smoke tests on real session data. Data-correctness fixes: - parseLocalDate rejects month/day overflow. JS Date silently rolled Feb 31 to Mar 3, so --from 2026-02-31 --to 2026-03-15 quietly dropped sessions on Feb 28 - Mar 2. Now throws "Invalid date" with a clear reason. Leap-day case covered (2024-02-29 valid, 2025-02-29 rejected). - CSV/JSON exports use the active currency's natural decimal places. The previous round2 helper produced ¥412.37 in CSV while the dashboard rendered ¥412 — finance teams comparing the two surfaces saw a discrepancy. New roundForActiveCurrency consults Intl.NumberFormat for the right precision (0 for JPY/KRW/CLP, 2 for USD/EUR, etc). - Copilot toolRequests is Array.isArray-guarded in both modern and legacy event branches. Previously a corrupt session with toolRequests=null or a string aborted the whole file's parse loop and silently dropped every legitimate call after it. - Codex token_count dedup uses a null sentinel for prevCumulativeTotal so the first event is never confused with a duplicate. Sessions that emit only last_token_usage (no total_token_usage) report cumulativeTotal=0 on every event; with the previous 0-initialized prev, the first event matched the dedup guard and was dropped. - LiteLLM pricing values are clamped to [0, 1] per token via safePerTokenRate. Defense in depth against a tampered upstream JSON shipping negative or absurdly large per-token costs that would otherwise propagate into all cost totals. Performance: - Cursor SQLite parse no longer pegs at minutes on multi-GB DBs. Two changes: per-conversation user-message buffer uses an index pointer instead of Array.shift() (which was O(n) per call); and a real ROWID cutoff via subquery limits the scan to the most recent 250k bubbles with a stderr warning so power users get a partial report rather than a stalled CLI. - Spawned codeburn CLI subprocesses are terminated when the calling Task is cancelled. Without this, rapid period/provider tab clicks in the menubar cancelled the Task but left the subprocess running to completion, piling up zombie processes. UX: - Dashboard period switch flips to loading and clears projects synchronously before reloadData runs, eliminating the frame where the new period label rendered over the old period's projects. - Optimize findings tab paginates 3-at-a-time with j/k scroll. With 4 new detectors plus 7 originals, 8-10 findings * 6 lines was scrolling the StatusBar off the alt buffer top. - Custom --from/--to ranges hide the period tab strip and disable the 1-5 / arrow keys so a stray period press no longer abandons the user's explicit range. A "Custom range: X to Y" banner replaces the tab strip. - OpenCode storage-format warning is per-table-set, rate-limited to once per process, and points the user at OpenCode's migration step or the issue tracker. The previous all-or-nothing check fired the generic "format not recognized" string for any schema mismatch. Menubar / OAuth: - Both Claude and Codex bootstrap (Reconnect button) now honour the usageBlockedUntil 429 backoff that refreshIfBootstrapped respects. Spamming Reconnect during sustained rate-limit windows previously hammered the upstream endpoint on every click. - Codex Retry-After HTTP header is parsed (delta-seconds plus IMF-fixdate fallback) so we don't over-back-off when ChatGPT tells us a shorter window than our 5-minute floor. - Both credential cache files are written via SafeFile.write (O_CREAT | O_EXCL | O_NOFOLLOW with explicit 0600) so there is no race window where the temp file briefly exists at default umask, and a symlink at the destination cannot redirect the write. Reads now route through SafeFile.read with a 64 KiB cap, closing the symlink-follow gap on Data(contentsOf:). CI signal: - TypeScript strict typecheck (tsc --noEmit) is now zero errors. The six errors in src/providers/copilot.ts came from a discriminated-union catch-all branch whose `data: Record<string, unknown>` shape TS picked over the specific event branches when narrowing on `type`. Removed the catch-all; runtime falls through unknown event types via the existing if/else chain. Tests added: 16 new (now 555 total) - date-range-filter: month/day/year overflow rejection, leap-day correctness - currency-rounding: convertCost no-rounding contract, roundForActiveCurrency for USD/JPY/KRW/EUR - providers/copilot: malformed toolRequests does not abort the parse - providers/cursor-bubble-dedup: re-parse after token mutation does not double-count, single parse yields one call per bubble - providers/codex: first event with cumulativeTotal=0 not dropped, consecutive zero-cumulative duplicates still deduped
401 lines
16 KiB
TypeScript
401 lines
16 KiB
TypeScript
import { describe, it, expect, beforeEach, afterEach } from 'vitest'
|
|
import { mkdtemp, mkdir, writeFile, rm } from 'fs/promises'
|
|
import { join } from 'path'
|
|
import { tmpdir } from 'os'
|
|
|
|
import { copilot, createCopilotProvider } from '../../src/providers/copilot.js'
|
|
import type { ParsedProviderCall } from '../../src/providers/types.js'
|
|
|
|
let tmpDir: string
|
|
|
|
async function createSessionDir(sessionId: string, lines: string[], cwd = '/home/user/myproject') {
|
|
const sessionDir = join(tmpDir, sessionId)
|
|
await mkdir(sessionDir, { recursive: true })
|
|
await writeFile(join(sessionDir, 'workspace.yaml'), `id: ${sessionId}\ncwd: ${cwd}\n`)
|
|
await writeFile(join(sessionDir, 'events.jsonl'), lines.join('\n') + '\n')
|
|
return join(sessionDir, 'events.jsonl')
|
|
}
|
|
|
|
function modelChange(newModel: string, previousModel?: string) {
|
|
return JSON.stringify({ type: 'session.model_change', timestamp: '2026-04-15T10:00:01Z', data: { newModel, previousModel } })
|
|
}
|
|
|
|
function userMessage(content: string) {
|
|
return JSON.stringify({ type: 'user.message', timestamp: '2026-04-15T10:00:10Z', data: { content, interactionId: 'int-1' } })
|
|
}
|
|
|
|
function assistantMessage(opts: { messageId: string; outputTokens: number; tools?: string[]; timestamp?: string }) {
|
|
return JSON.stringify({
|
|
type: 'assistant.message',
|
|
timestamp: opts.timestamp ?? '2026-04-15T10:00:15Z',
|
|
data: {
|
|
messageId: opts.messageId,
|
|
outputTokens: opts.outputTokens,
|
|
interactionId: 'int-1',
|
|
toolRequests: (opts.tools ?? []).map(name => ({ name, toolCallId: `call-${name}`, type: 'function' })),
|
|
},
|
|
})
|
|
}
|
|
|
|
function transcriptSessionStart(sessionId: string) {
|
|
return JSON.stringify({ type: 'session.start', data: { sessionId, producer: 'copilot-agent' } })
|
|
}
|
|
|
|
function transcriptUserMessage(content: string) {
|
|
return JSON.stringify({ type: 'user.message', data: { content, attachments: [] } })
|
|
}
|
|
|
|
function transcriptAssistantMessage(opts: { messageId: string; content?: string; reasoningText?: string; toolCallIds?: string[] }) {
|
|
return JSON.stringify({
|
|
type: 'assistant.message',
|
|
data: {
|
|
messageId: opts.messageId,
|
|
content: opts.content ?? '',
|
|
reasoningText: opts.reasoningText ?? '',
|
|
toolRequests: (opts.toolCallIds ?? []).map((id, i) => ({
|
|
toolCallId: id,
|
|
name: i === 0 ? 'read_file' : 'run_in_terminal',
|
|
type: 'function',
|
|
})),
|
|
},
|
|
})
|
|
}
|
|
|
|
describe('copilot provider - JSONL parsing', () => {
|
|
beforeEach(async () => {
|
|
tmpDir = await mkdtemp(join(tmpdir(), 'copilot-test-'))
|
|
})
|
|
|
|
afterEach(async () => {
|
|
await rm(tmpDir, { recursive: true, force: true })
|
|
})
|
|
|
|
it('parses a basic assistant message', async () => {
|
|
const eventsPath = await createSessionDir('sess-001', [
|
|
modelChange('gpt-4.1'),
|
|
userMessage('write a function'),
|
|
assistantMessage({ messageId: 'msg-1', outputTokens: 150 }),
|
|
])
|
|
|
|
const source = { path: eventsPath, project: 'myproject', provider: 'copilot' }
|
|
const calls: ParsedProviderCall[] = []
|
|
for await (const call of copilot.createSessionParser(source, new Set()).parse()) calls.push(call)
|
|
|
|
expect(calls).toHaveLength(1)
|
|
const call = calls[0]!
|
|
expect(call.provider).toBe('copilot')
|
|
expect(call.model).toBe('gpt-4.1')
|
|
expect(call.outputTokens).toBe(150)
|
|
expect(call.inputTokens).toBe(0)
|
|
expect(call.userMessage).toBe('write a function')
|
|
expect(call.sessionId).toBe('sess-001')
|
|
expect(call.bashCommands).toEqual([])
|
|
expect(call.costUSD).toBeGreaterThan(0)
|
|
})
|
|
|
|
it('tracks model changes mid-session', async () => {
|
|
const eventsPath = await createSessionDir('sess-002', [
|
|
modelChange('gpt-5-mini'),
|
|
userMessage('first'),
|
|
assistantMessage({ messageId: 'msg-1', outputTokens: 50, timestamp: '2026-04-15T10:00:10Z' }),
|
|
modelChange('gpt-4.1', 'gpt-5-mini'),
|
|
userMessage('second'),
|
|
assistantMessage({ messageId: 'msg-2', outputTokens: 80, timestamp: '2026-04-15T10:01:00Z' }),
|
|
])
|
|
|
|
const source = { path: eventsPath, project: 'test', provider: 'copilot' }
|
|
const calls: ParsedProviderCall[] = []
|
|
for await (const call of copilot.createSessionParser(source, new Set()).parse()) calls.push(call)
|
|
|
|
expect(calls).toHaveLength(2)
|
|
expect(calls[0]!.model).toBe('gpt-5-mini')
|
|
expect(calls[1]!.model).toBe('gpt-4.1')
|
|
})
|
|
|
|
it('extracts tool names from toolRequests', async () => {
|
|
const eventsPath = await createSessionDir('sess-003', [
|
|
modelChange('gpt-4.1'),
|
|
userMessage('run tests'),
|
|
assistantMessage({ messageId: 'msg-1', outputTokens: 60, tools: ['bash', 'read_file', 'write_file'] }),
|
|
])
|
|
|
|
const source = { path: eventsPath, project: 'test', provider: 'copilot' }
|
|
const calls: ParsedProviderCall[] = []
|
|
for await (const call of copilot.createSessionParser(source, new Set()).parse()) calls.push(call)
|
|
|
|
expect(calls[0]!.tools).toEqual(['Bash', 'Read', 'Edit'])
|
|
})
|
|
|
|
it('does not crash on malformed toolRequests (string / null / missing)', async () => {
|
|
// Regression guard: a corrupt session previously aborted the whole file's
|
|
// parse loop because .map was called on a non-array. The fix coerces any
|
|
// non-array shape (string, null, missing) to []. We mix one corrupt event
|
|
// between two healthy events and assert both healthy events still parse.
|
|
const corruptToolRequestsString = JSON.stringify({
|
|
type: 'assistant.message',
|
|
timestamp: '2026-04-15T10:00:15Z',
|
|
data: { messageId: 'corrupt-string', outputTokens: 50, toolRequests: 'not an array' },
|
|
})
|
|
const corruptToolRequestsNull = JSON.stringify({
|
|
type: 'assistant.message',
|
|
timestamp: '2026-04-15T10:00:16Z',
|
|
data: { messageId: 'corrupt-null', outputTokens: 50, toolRequests: null },
|
|
})
|
|
const eventsPath = await createSessionDir('sess-corrupt', [
|
|
modelChange('gpt-4.1'),
|
|
assistantMessage({ messageId: 'msg-before', outputTokens: 100 }),
|
|
corruptToolRequestsString,
|
|
corruptToolRequestsNull,
|
|
assistantMessage({ messageId: 'msg-after', outputTokens: 200 }),
|
|
])
|
|
|
|
const source = { path: eventsPath, project: 'test', provider: 'copilot' }
|
|
const calls: ParsedProviderCall[] = []
|
|
for await (const call of copilot.createSessionParser(source, new Set()).parse()) calls.push(call)
|
|
|
|
// The healthy messages BEFORE and AFTER the corrupt events both parse —
|
|
// proving that the corrupt event no longer aborts the per-file parse loop.
|
|
// Pre-fix, .map on a non-array threw and we'd see < 4 calls.
|
|
expect(calls).toHaveLength(4)
|
|
expect(calls.find(c => c.outputTokens === 100)).toBeDefined() // msg-before
|
|
expect(calls.find(c => c.outputTokens === 200)).toBeDefined() // msg-after
|
|
// Corrupt events produce calls with empty tools, not crashes.
|
|
const corruptCalls = calls.filter(c => c.outputTokens === 50)
|
|
expect(corruptCalls.length).toBe(2)
|
|
for (const c of corruptCalls) {
|
|
expect(c.tools).toEqual([])
|
|
}
|
|
})
|
|
|
|
it('skips assistant messages with zero outputTokens', async () => {
|
|
const eventsPath = await createSessionDir('sess-004', [
|
|
modelChange('gpt-4.1'),
|
|
assistantMessage({ messageId: 'msg-empty', outputTokens: 0 }),
|
|
assistantMessage({ messageId: 'msg-real', outputTokens: 42 }),
|
|
])
|
|
|
|
const source = { path: eventsPath, project: 'test', provider: 'copilot' }
|
|
const calls: ParsedProviderCall[] = []
|
|
for await (const call of copilot.createSessionParser(source, new Set()).parse()) calls.push(call)
|
|
|
|
expect(calls).toHaveLength(1)
|
|
expect(calls[0]!.outputTokens).toBe(42)
|
|
})
|
|
|
|
it('deduplicates messages across parser runs', async () => {
|
|
const eventsPath = await createSessionDir('sess-005', [
|
|
modelChange('gpt-4.1'),
|
|
assistantMessage({ messageId: 'msg-dup', outputTokens: 100 }),
|
|
])
|
|
|
|
const source = { path: eventsPath, project: 'test', provider: 'copilot' }
|
|
const seenKeys = new Set<string>()
|
|
|
|
const calls1: ParsedProviderCall[] = []
|
|
for await (const call of copilot.createSessionParser(source, seenKeys).parse()) calls1.push(call)
|
|
|
|
const calls2: ParsedProviderCall[] = []
|
|
for await (const call of copilot.createSessionParser(source, seenKeys).parse()) calls2.push(call)
|
|
|
|
expect(calls1).toHaveLength(1)
|
|
expect(calls2).toHaveLength(0)
|
|
})
|
|
|
|
it('returns empty for missing file', async () => {
|
|
const source = { path: '/nonexistent/events.jsonl', project: 'test', provider: 'copilot' }
|
|
const calls: ParsedProviderCall[] = []
|
|
for await (const call of copilot.createSessionParser(source, new Set()).parse()) calls.push(call)
|
|
expect(calls).toHaveLength(0)
|
|
})
|
|
|
|
it('skips assistant messages before the first model_change event', async () => {
|
|
const eventsPath = await createSessionDir('sess-no-model', [
|
|
assistantMessage({ messageId: 'msg-early', outputTokens: 50 }),
|
|
modelChange('gpt-4.1'),
|
|
assistantMessage({ messageId: 'msg-after', outputTokens: 80 }),
|
|
])
|
|
|
|
const source = { path: eventsPath, project: 'test', provider: 'copilot' }
|
|
const calls: ParsedProviderCall[] = []
|
|
for await (const call of copilot.createSessionParser(source, new Set()).parse()) calls.push(call)
|
|
|
|
expect(calls).toHaveLength(1)
|
|
expect(calls[0]!.outputTokens).toBe(80)
|
|
expect(calls[0]!.model).toBe('gpt-4.1')
|
|
})
|
|
|
|
it('infers OpenAI auto bucket for transcript toolCallId prefix call_', async () => {
|
|
const eventsPath = await createSessionDir('sess-tr-call', [
|
|
transcriptSessionStart('sess-tr-call'),
|
|
transcriptUserMessage('check model inference'),
|
|
transcriptAssistantMessage({
|
|
messageId: 'msg-1',
|
|
content: 'done',
|
|
toolCallIds: ['call_abc123'],
|
|
}),
|
|
])
|
|
|
|
const source = { path: eventsPath, project: 'test', provider: 'copilot' }
|
|
const calls: ParsedProviderCall[] = []
|
|
for await (const call of copilot.createSessionParser(source, new Set()).parse()) calls.push(call)
|
|
|
|
expect(calls).toHaveLength(1)
|
|
expect(calls[0]!.model).toBe('copilot-openai-auto')
|
|
})
|
|
|
|
it('infers Anthropic auto bucket for transcript toolCallId prefixes tooluse_/toolu_vrtx_', async () => {
|
|
const eventsPath = await createSessionDir('sess-tr-claude', [
|
|
transcriptSessionStart('sess-tr-claude'),
|
|
transcriptUserMessage('check model inference'),
|
|
transcriptAssistantMessage({
|
|
messageId: 'msg-1',
|
|
content: 'done',
|
|
toolCallIds: ['tooluse_XY', 'toolu_vrtx_01ABC'],
|
|
}),
|
|
])
|
|
|
|
const source = { path: eventsPath, project: 'test', provider: 'copilot' }
|
|
const calls: ParsedProviderCall[] = []
|
|
for await (const call of copilot.createSessionParser(source, new Set()).parse()) calls.push(call)
|
|
|
|
expect(calls).toHaveLength(1)
|
|
expect(calls[0]!.model).toBe('copilot-anthropic-auto')
|
|
})
|
|
|
|
it('chooses the dominant inferred transcript model when prefixes are mixed', async () => {
|
|
const eventsPath = await createSessionDir('sess-tr-mixed', [
|
|
transcriptSessionStart('sess-tr-mixed'),
|
|
transcriptUserMessage('mixed'),
|
|
transcriptAssistantMessage({
|
|
messageId: 'msg-1',
|
|
content: 'one',
|
|
toolCallIds: ['toolu_bdrk_123'],
|
|
}),
|
|
transcriptAssistantMessage({
|
|
messageId: 'msg-2',
|
|
content: 'two',
|
|
toolCallIds: ['call_1'],
|
|
}),
|
|
transcriptAssistantMessage({
|
|
messageId: 'msg-3',
|
|
content: 'three',
|
|
toolCallIds: ['call_2'],
|
|
}),
|
|
])
|
|
|
|
const source = { path: eventsPath, project: 'test', provider: 'copilot' }
|
|
const calls: ParsedProviderCall[] = []
|
|
for await (const call of copilot.createSessionParser(source, new Set()).parse()) calls.push(call)
|
|
|
|
expect(calls).toHaveLength(3)
|
|
expect(calls.every(c => c.model === 'copilot-openai-auto')).toBe(true)
|
|
})
|
|
})
|
|
|
|
describe('copilot provider - discoverSessions', () => {
|
|
beforeEach(async () => {
|
|
tmpDir = await mkdtemp(join(tmpdir(), 'copilot-test-'))
|
|
})
|
|
|
|
afterEach(async () => {
|
|
await rm(tmpDir, { recursive: true, force: true })
|
|
})
|
|
|
|
it('discovers sessions from directory', async () => {
|
|
await createSessionDir('sess-disc-001', [modelChange('gpt-4.1')])
|
|
await createSessionDir('sess-disc-002', [modelChange('gpt-4.1')])
|
|
|
|
const provider = createCopilotProvider(tmpDir, '/nonexistent/vscode')
|
|
const sessions = await provider.discoverSessions()
|
|
|
|
expect(sessions).toHaveLength(2)
|
|
expect(sessions.every(s => s.provider === 'copilot')).toBe(true)
|
|
expect(sessions.every(s => s.path.endsWith('events.jsonl'))).toBe(true)
|
|
})
|
|
|
|
it('reads project name from workspace.yaml cwd', async () => {
|
|
await createSessionDir('sess-disc-003', [modelChange('gpt-4.1')], '/home/user/myapp')
|
|
|
|
const provider = createCopilotProvider(tmpDir, '/nonexistent/vscode')
|
|
const sessions = await provider.discoverSessions()
|
|
|
|
expect(sessions).toHaveLength(1)
|
|
expect(sessions[0]!.project).toBe('myapp')
|
|
})
|
|
|
|
it('strips quotes and trailing comments from workspace.yaml cwd', async () => {
|
|
const sessionDir = join(tmpDir, 'sess-quoted')
|
|
await mkdir(sessionDir, { recursive: true })
|
|
await writeFile(join(sessionDir, 'workspace.yaml'), 'cwd: "/home/user/myapp" # project root\n')
|
|
await writeFile(join(sessionDir, 'events.jsonl'), '\n')
|
|
|
|
const provider = createCopilotProvider(tmpDir, '/nonexistent/vscode')
|
|
const sessions = await provider.discoverSessions()
|
|
|
|
expect(sessions).toHaveLength(1)
|
|
expect(sessions[0]!.project).toBe('myapp')
|
|
})
|
|
|
|
it('returns empty when directory does not exist', async () => {
|
|
const provider = createCopilotProvider('/nonexistent/path', '/nonexistent/vscode')
|
|
const sessions = await provider.discoverSessions()
|
|
expect(sessions).toHaveLength(0)
|
|
})
|
|
|
|
it('skips entries without events.jsonl', async () => {
|
|
const emptyDir = join(tmpDir, 'empty-session')
|
|
await mkdir(emptyDir, { recursive: true })
|
|
|
|
const provider = createCopilotProvider(tmpDir, '/nonexistent/vscode')
|
|
const sessions = await provider.discoverSessions()
|
|
expect(sessions).toHaveLength(0)
|
|
})
|
|
|
|
it('discovers VS Code workspace transcripts', async () => {
|
|
const wsDir = join(tmpDir, 'vscode-ws')
|
|
const transcriptsDir = join(wsDir, 'abc123', 'GitHub.copilot-chat', 'transcripts')
|
|
await mkdir(transcriptsDir, { recursive: true })
|
|
await writeFile(join(wsDir, 'abc123', 'workspace.json'), JSON.stringify({ folder: 'file:///home/user/myapp' }))
|
|
await writeFile(join(transcriptsDir, 'session-1.jsonl'), JSON.stringify({ type: 'session.start', data: { sessionId: 's1', producer: 'copilot-agent' } }) + '\n')
|
|
|
|
const provider = createCopilotProvider('/nonexistent/legacy', wsDir)
|
|
const sessions = await provider.discoverSessions()
|
|
|
|
expect(sessions).toHaveLength(1)
|
|
expect(sessions[0]!.project).toBe('myapp')
|
|
expect(sessions[0]!.path).toContain('session-1.jsonl')
|
|
})
|
|
})
|
|
|
|
describe('copilot provider - metadata', () => {
|
|
it('has correct name and displayName', () => {
|
|
expect(copilot.name).toBe('copilot')
|
|
expect(copilot.displayName).toBe('Copilot')
|
|
})
|
|
|
|
it('normalizes tool display names', () => {
|
|
expect(copilot.toolDisplayName('bash')).toBe('Bash')
|
|
expect(copilot.toolDisplayName('read_file')).toBe('Read')
|
|
expect(copilot.toolDisplayName('write_file')).toBe('Edit')
|
|
expect(copilot.toolDisplayName('web_search')).toBe('WebSearch')
|
|
expect(copilot.toolDisplayName('unknown_tool')).toBe('unknown_tool')
|
|
})
|
|
|
|
it('normalizes model display names', () => {
|
|
expect(copilot.modelDisplayName('gpt-4.1')).toBe('GPT-4.1')
|
|
expect(copilot.modelDisplayName('gpt-4.1-mini')).toBe('GPT-4.1 Mini')
|
|
expect(copilot.modelDisplayName('gpt-4.1-nano')).toBe('GPT-4.1 Nano')
|
|
expect(copilot.modelDisplayName('gpt-5-mini')).toBe('GPT-5 Mini')
|
|
expect(copilot.modelDisplayName('o3')).toBe('o3')
|
|
expect(copilot.modelDisplayName('o4-mini')).toBe('o4-mini')
|
|
expect(copilot.modelDisplayName('copilot-openai-auto')).toBe('Copilot (OpenAI auto)')
|
|
expect(copilot.modelDisplayName('copilot-anthropic-auto')).toBe('Copilot (Anthropic auto)')
|
|
expect(copilot.modelDisplayName('unknown-model-xyz')).toBe('unknown-model-xyz')
|
|
})
|
|
|
|
it('longest-prefix match wins for versioned model IDs', () => {
|
|
// gpt-5-mini-2026-01-01 must match gpt-5-mini, not gpt-5
|
|
expect(copilot.modelDisplayName('gpt-5-mini-2026-01-01')).toBe('GPT-5 Mini')
|
|
expect(copilot.modelDisplayName('gpt-4.1-mini-2026-01-01')).toBe('GPT-4.1 Mini')
|
|
})
|
|
})
|