Add [Unreleased] section for next cycle
Some checks are pending
CI / build-check-test (push) Waiting to run

This commit is contained in:
Mario Zechner 2026-04-22 02:15:04 +02:00
parent a2c183db0f
commit 9f91276a42
7 changed files with 165 additions and 67 deletions

View file

@ -1,5 +1,7 @@
# Changelog
## [Unreleased]
## [0.68.1] - 2026-04-22
### Fixed

View file

@ -1,5 +1,7 @@
# Changelog
## [Unreleased]
## [0.68.1] - 2026-04-22
### Added

View file

@ -5005,6 +5005,24 @@ export const MODELS = {
contextWindow: 262144,
maxTokens: 262144,
} satisfies Model<"openai-completions">,
"moonshotai/Kimi-K2.6": {
id: "moonshotai/Kimi-K2.6",
name: "Kimi-K2.6",
api: "openai-completions",
provider: "huggingface",
baseUrl: "https://router.huggingface.co/v1",
compat: {"supportsDeveloperRole":false},
reasoning: true,
input: ["text", "image"],
cost: {
input: 0.95,
output: 4,
cacheRead: 0.16,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 262144,
} satisfies Model<"openai-completions">,
"zai-org/GLM-4.7": {
id: "zai-org/GLM-4.7",
name: "GLM-4.7",
@ -5079,6 +5097,23 @@ export const MODELS = {
} satisfies Model<"openai-completions">,
},
"kimi-coding": {
"k2p6": {
id: "k2p6",
name: "Kimi K2.6",
api: "anthropic-messages",
provider: "kimi-coding",
baseUrl: "https://api.kimi.com/coding",
reasoning: true,
input: ["text", "image"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 32768,
} satisfies Model<"anthropic-messages">,
"kimi-for-coding": {
id: "kimi-for-coding",
name: "Kimi For Coding",
@ -6961,6 +6996,40 @@ export const MODELS = {
contextWindow: 262144,
maxTokens: 65536,
} satisfies Model<"openai-completions">,
"kimi-k2.6": {
id: "kimi-k2.6",
name: "Kimi K2.6",
api: "openai-completions",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 0.95,
output: 4,
cacheRead: 0.16,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 65536,
} satisfies Model<"openai-completions">,
"ling-2.6-flash-free": {
id: "ling-2.6-flash-free",
name: "Ling 2.6 Flash Free",
api: "openai-completions",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262100,
maxTokens: 32800,
} satisfies Model<"openai-completions">,
"minimax-m2.5": {
id: "minimax-m2.5",
name: "MiniMax M2.5",
@ -6995,6 +7064,23 @@ export const MODELS = {
contextWindow: 204800,
maxTokens: 131072,
} satisfies Model<"anthropic-messages">,
"minimax-m2.7": {
id: "minimax-m2.7",
name: "MiniMax M2.7",
api: "openai-completions",
provider: "opencode",
baseUrl: "https://opencode.ai/zen/v1",
reasoning: true,
input: ["text"],
cost: {
input: 0.3,
output: 1.2,
cacheRead: 0.06,
cacheWrite: 0,
},
contextWindow: 204800,
maxTokens: 131072,
} satisfies Model<"openai-completions">,
"nemotron-3-super-free": {
id: "nemotron-3-super-free",
name: "Nemotron 3 Super Free",
@ -7103,16 +7189,16 @@ export const MODELS = {
} satisfies Model<"openai-completions">,
"kimi-k2.6": {
id: "kimi-k2.6",
name: "Kimi K2.6",
name: "Kimi K2.6 (3x limits)",
api: "openai-completions",
provider: "opencode-go",
baseUrl: "https://opencode.ai/zen/go/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 0.95,
output: 4,
cacheRead: 0.16,
input: 0.32,
output: 1.34,
cacheRead: 0.054,
cacheWrite: 0,
},
contextWindow: 262144,
@ -8185,13 +8271,13 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 0.08,
input: 0.07,
output: 0.35,
cacheRead: 0.01,
cacheRead: 0.04,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 4096,
maxTokens: 262144,
} satisfies Model<"openai-completions">,
"google/gemma-4-26b-a4b-it:free": {
id: "google/gemma-4-26b-a4b-it:free",
@ -8261,6 +8347,23 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 50000,
} satisfies Model<"openai-completions">,
"inclusionai/ling-2.6-flash:free": {
id: "inclusionai/ling-2.6-flash:free",
name: "inclusionAI: Ling-2.6-flash (free)",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 32768,
} satisfies Model<"openai-completions">,
"kwaipilot/kat-coder-pro-v2": {
id: "kwaipilot/kat-coder-pro-v2",
name: "Kwaipilot: KAT-Coder-Pro V2",
@ -8338,13 +8441,13 @@ export const MODELS = {
reasoning: false,
input: ["text"],
cost: {
input: 0.12,
output: 0.38,
input: 0.09999999999999999,
output: 0.32,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 131072,
maxTokens: 131072,
maxTokens: 16384,
} satisfies Model<"openai-completions">,
"meta-llama/llama-3.3-70b-instruct:free": {
id: "meta-llama/llama-3.3-70b-instruct:free",
@ -8933,9 +9036,9 @@ export const MODELS = {
reasoning: true,
input: ["text", "image"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
input: 0.7999999999999999,
output: 3.5,
cacheRead: 0.19999999999999998,
cacheWrite: 0,
},
contextWindow: 262144,
@ -9451,40 +9554,6 @@ export const MODELS = {
contextWindow: 400000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
"openai/gpt-5-image": {
id: "openai/gpt-5-image",
name: "OpenAI: GPT-5 Image",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 10,
output: 10,
cacheRead: 1.25,
cacheWrite: 0,
},
contextWindow: 400000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
"openai/gpt-5-image-mini": {
id: "openai/gpt-5-image-mini",
name: "OpenAI: GPT-5 Image Mini",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 2.5,
output: 2,
cacheRead: 0.25,
cacheWrite: 0,
},
contextWindow: 400000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
"openai/gpt-5-mini": {
id: "openai/gpt-5-mini",
name: "OpenAI: GPT-5 Mini",
@ -10080,23 +10149,6 @@ export const MODELS = {
contextWindow: 2000000,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"openrouter/elephant-alpha": {
id: "openrouter/elephant-alpha",
name: "Elephant",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: false,
input: ["text"],
cost: {
input: 0,
output: 0,
cacheRead: 0,
cacheWrite: 0,
},
contextWindow: 262144,
maxTokens: 32768,
} satisfies Model<"openai-completions">,
"openrouter/free": {
id: "openrouter/free",
name: "Free Models Router",
@ -10482,7 +10534,7 @@ export const MODELS = {
cost: {
input: 0.15,
output: 0.7999999999999999,
cacheRead: 0.12,
cacheRead: 0.11,
cacheWrite: 0,
},
contextWindow: 262144,
@ -11403,8 +11455,8 @@ export const MODELS = {
cacheRead: 0.119,
cacheWrite: 0,
},
contextWindow: 80000,
maxTokens: 131072,
contextWindow: 202752,
maxTokens: 4096,
} satisfies Model<"openai-completions">,
"z-ai/glm-5-turbo": {
id: "z-ai/glm-5-turbo",
@ -11457,6 +11509,23 @@ export const MODELS = {
contextWindow: 202752,
maxTokens: 131072,
} satisfies Model<"openai-completions">,
"~anthropic/claude-opus-latest": {
id: "~anthropic/claude-opus-latest",
name: "Anthropic: Claude Opus Latest",
api: "openai-completions",
provider: "openrouter",
baseUrl: "https://openrouter.ai/api/v1",
reasoning: true,
input: ["text", "image"],
cost: {
input: 5,
output: 25,
cacheRead: 0.5,
cacheWrite: 6.25,
},
contextWindow: 1000000,
maxTokens: 128000,
} satisfies Model<"openai-completions">,
},
"vercel-ai-gateway": {
"alibaba/qwen-3-14b": {
@ -11527,6 +11596,23 @@ export const MODELS = {
contextWindow: 128000,
maxTokens: 8192,
} satisfies Model<"anthropic-messages">,
"alibaba/qwen-3.6-max-preview": {
id: "alibaba/qwen-3.6-max-preview",
name: "Qwen 3.6 Max Preview",
api: "anthropic-messages",
provider: "vercel-ai-gateway",
baseUrl: "https://ai-gateway.vercel.sh",
reasoning: true,
input: ["text", "image"],
cost: {
input: 1.3,
output: 7.8,
cacheRead: 0.26,
cacheWrite: 1.625,
},
contextWindow: 240000,
maxTokens: 64000,
} satisfies Model<"anthropic-messages">,
"alibaba/qwen3-235b-a22b-thinking": {
id: "alibaba/qwen3-235b-a22b-thinking",
name: "Qwen3 235B A22B Thinking 2507",

View file

@ -1,5 +1,7 @@
# Changelog
## [Unreleased]
## [0.68.1] - 2026-04-22
### New Features

View file

@ -1,5 +1,7 @@
# Changelog
## [Unreleased]
## [0.68.1] - 2026-04-22
## [0.68.0] - 2026-04-20

View file

@ -1,5 +1,7 @@
# Changelog
## [Unreleased]
## [0.68.1] - 2026-04-22
### Fixed

View file

@ -1,5 +1,7 @@
# Changelog
## [Unreleased]
## [0.68.1] - 2026-04-22
## [0.68.0] - 2026-04-20