fix: update stale CI test expectations

This commit is contained in:
Mario Zechner 2026-04-24 20:22:16 +02:00
parent 39a9784d2a
commit 91154d9757
5 changed files with 13 additions and 7 deletions

View file

@ -4,6 +4,7 @@
### Fixed
- Fixed OpenAI-compatible prompt cache tests to cover proxies that explicitly disable long cache retention.
- Stopped sending `tools: []` on OpenAI-compatible, Anthropic, OpenAI Responses, OpenAI Codex Responses, and Azure OpenAI Responses requests when no tools are active (e.g. `pi --no-tools`). DashScope/Aliyun Qwen (OpenAI-compatible) rejects empty tools arrays with `"[] is too short - 'tools'"` (HTTP 400); the field is now omitted unless the conversation has tool history (the existing LiteLLM/Anthropic-proxy workaround).
- Fixed `supportsXhigh()` to recognize DeepSeek V4 Pro, preserving `xhigh` reasoning requests so they map to DeepSeek's `max` effort ([#3662](https://github.com/badlogic/pi-mono/issues/3662))
- Fixed OpenAI-compatible DeepSeek V4 model replay to include empty `reasoning_content` on assistant messages when needed, preventing OpenRouter DeepSeek V4 sessions from failing after responses without reasoning deltas ([#3668](https://github.com/badlogic/pi-mono/issues/3668))

View file

@ -132,9 +132,10 @@ describe("openai-completions prompt caching", () => {
expect(payload?.prompt_cache_retention).toBeUndefined();
});
it("omits prompt cache fields for non-OpenAI base URLs", async () => {
it("omits prompt cache fields for non-OpenAI base URLs without compatible long retention", async () => {
const model = createModel({
baseUrl: "https://proxy.example.com/v1",
compat: { supportsLongCacheRetention: false },
});
const { payload } = await captureRequest({ cacheRetention: "long", sessionId: "session-proxy" }, model);

View file

@ -2,6 +2,10 @@
## [Unreleased]
### Fixed
- Fixed coding-agent test expectations for current default models and missing-auth guidance.
## [0.70.2] - 2026-04-24
### Fixed

View file

@ -373,20 +373,20 @@ describe("resolveCliModel", () => {
});
describe("default model selection", () => {
test("openai defaults are gpt-5.4", () => {
test("openai defaults track current models", () => {
expect(defaultModelPerProvider.openai).toBe("gpt-5.4");
expect(defaultModelPerProvider["openai-codex"]).toBe("gpt-5.4");
expect(defaultModelPerProvider["openai-codex"]).toBe("gpt-5.5");
});
test("zai, minimax, and cerebras defaults track current models", () => {
expect(defaultModelPerProvider.zai).toBe("glm-5");
expect(defaultModelPerProvider.zai).toBe("glm-5.1");
expect(defaultModelPerProvider.minimax).toBe("MiniMax-M2.7");
expect(defaultModelPerProvider["minimax-cn"]).toBe("MiniMax-M2.7");
expect(defaultModelPerProvider.cerebras).toBe("zai-glm-4.7");
});
test("ai-gateway default is opus 4.6", () => {
expect(defaultModelPerProvider["vercel-ai-gateway"]).toBe("anthropic/claude-opus-4-6");
test("ai-gateway default tracks current model", () => {
expect(defaultModelPerProvider["vercel-ai-gateway"]).toBe("zai/glm-5.1");
});
test("findInitialModel accepts explicit provider custom model ids", async () => {

View file

@ -218,7 +218,7 @@ describe("RPC prompt response semantics", () => {
command: "prompt",
success: false,
error: expect.stringContaining(
"No API key found for fake-provider.\n\nUse /login or set an API key environment variable. See ",
"No API key found for fake-provider.\n\nUse /login to log into a provider via OAuth or API key. See:",
),
});
});