From f74a255ca9a8f5a8a95c31d05254a5148f78a834 Mon Sep 17 00:00:00 2001 From: Frank Date: Mon, 20 Apr 2026 22:21:06 -0400 Subject: [PATCH 01/73] zen: tpm routing --- .../app/src/routes/zen/util/handler.ts | 23 +- .../src/routes/zen/util/modelTpmLimiter.ts | 31 +- .../migration.sql | 5 + .../snapshot.json | 2709 +++++++++++++++++ packages/console/core/src/schema/ip.sql.ts | 7 +- 5 files changed, 2746 insertions(+), 29 deletions(-) create mode 100644 packages/console/core/migrations/20260421020842_bizarre_living_tribunal/migration.sql create mode 100644 packages/console/core/migrations/20260421020842_bizarre_living_tribunal/snapshot.json diff --git a/packages/console/app/src/routes/zen/util/handler.ts b/packages/console/app/src/routes/zen/util/handler.ts index 81c512b99a..635eadebe8 100644 --- a/packages/console/app/src/routes/zen/util/handler.ts +++ b/packages/console/app/src/routes/zen/util/handler.ts @@ -448,31 +448,40 @@ export async function handler( return modelInfo.providers.find((provider) => provider.id === modelInfo.byokProvider) } + // Filter out TPM limited providers + const allProviders = modelInfo.providers.filter((provider) => { + if (!provider.tpmLimit) return true + const usage = modelTpmLimits?.[`${provider.id}/${provider.model}`] ?? 0 + return usage < provider.tpmLimit * 1_000_000 + }) + // Always use the same provider for the same session if (stickyProvider) { - const provider = modelInfo.providers.find((provider) => provider.id === stickyProvider) + const provider = allProviders.find((provider) => provider.id === stickyProvider) if (provider) return provider } if (trialProviders) { const trialProvider = trialProviders[Math.floor(Math.random() * trialProviders.length)] - const provider = modelInfo.providers.find((provider) => provider.id === trialProvider) + const provider = allProviders.find((provider) => provider.id === trialProvider) if (provider) return provider } if (retry.retryCount !== MAX_FAILOVER_RETRIES) { - const allProviders = modelInfo.providers + let topPriority = Infinity + const providers = allProviders .filter((provider) => !provider.disabled) .filter((provider) => provider.weight !== 0) .filter((provider) => !retry.excludeProviders.includes(provider.id)) .filter((provider) => { if (!provider.tpmLimit) return true const usage = modelTpmLimits?.[`${provider.id}/${provider.model}`] ?? 0 - return usage < provider.tpmLimit * 1_000_000 + return usage < provider.tpmLimit * 1_000_000 * 0.8 + }) + .map((provider) => { + topPriority = Math.min(topPriority, provider.priority) + return provider }) - - const topPriority = Math.min(...allProviders.map((p) => p.priority)) - const providers = allProviders .filter((p) => p.priority <= topPriority) .flatMap((provider) => Array(provider.weight).fill(provider)) diff --git a/packages/console/app/src/routes/zen/util/modelTpmLimiter.ts b/packages/console/app/src/routes/zen/util/modelTpmLimiter.ts index 53015d51cc..8e3e8cc95e 100644 --- a/packages/console/app/src/routes/zen/util/modelTpmLimiter.ts +++ b/packages/console/app/src/routes/zen/util/modelTpmLimiter.ts @@ -1,28 +1,25 @@ import { and, Database, eq, inArray, sql } from "@opencode-ai/console-core/drizzle/index.js" -import { ModelTpmLimitTable } from "@opencode-ai/console-core/schema/ip.sql.js" +import { ModelTpmRateLimitTable } from "@opencode-ai/console-core/schema/ip.sql.js" import { UsageInfo } from "./provider/provider" export function createModelTpmLimiter(providers: { id: string; model: string; tpmLimit?: number }[]) { const ids = providers.filter((p) => p.tpmLimit).map((p) => `${p.id}/${p.model}`) if (ids.length === 0) return - const yyyyMMddHHmm = new Date(Date.now()) - .toISOString() - .replace(/[^0-9]/g, "") - .substring(0, 12) + const yyyyMMddHHmm = parseInt( + new Date(Date.now()) + .toISOString() + .replace(/[^0-9]/g, "") + .substring(0, 12), + ) return { check: async () => { const data = await Database.use((tx) => tx .select() - .from(ModelTpmLimitTable) - .where( - inArray( - ModelTpmLimitTable.id, - ids.map((id) => formatId(id, yyyyMMddHHmm)), - ), - ), + .from(ModelTpmRateLimitTable) + .where(and(inArray(ModelTpmRateLimitTable.id, ids), eq(ModelTpmRateLimitTable.interval, yyyyMMddHHmm))), ) // convert to map of model to count @@ -41,14 +38,10 @@ export function createModelTpmLimiter(providers: { id: string; model: string; tp if (usage <= 0) return await Database.use((tx) => tx - .insert(ModelTpmLimitTable) - .values({ id: formatId(id, yyyyMMddHHmm), count: usage }) - .onDuplicateKeyUpdate({ set: { count: sql`${ModelTpmLimitTable.count} + ${usage}` } }), + .insert(ModelTpmRateLimitTable) + .values({ id, interval: yyyyMMddHHmm, count: usage }) + .onDuplicateKeyUpdate({ set: { count: sql`${ModelTpmRateLimitTable.count} + ${usage}` } }), ) }, } - - function formatId(id: string, yyyyMMddHHmm: string) { - return `${id.substring(0, 200)}/${yyyyMMddHHmm}` - } } diff --git a/packages/console/core/migrations/20260421020842_bizarre_living_tribunal/migration.sql b/packages/console/core/migrations/20260421020842_bizarre_living_tribunal/migration.sql new file mode 100644 index 0000000000..07dc63d984 --- /dev/null +++ b/packages/console/core/migrations/20260421020842_bizarre_living_tribunal/migration.sql @@ -0,0 +1,5 @@ +CREATE TABLE `model_tpm_rate_limit` ( + `id` varchar(255) PRIMARY KEY, + `interval` bigint NOT NULL, + `count` int NOT NULL +); diff --git a/packages/console/core/migrations/20260421020842_bizarre_living_tribunal/snapshot.json b/packages/console/core/migrations/20260421020842_bizarre_living_tribunal/snapshot.json new file mode 100644 index 0000000000..e55204da17 --- /dev/null +++ b/packages/console/core/migrations/20260421020842_bizarre_living_tribunal/snapshot.json @@ -0,0 +1,2709 @@ +{ + "version": "6", + "dialect": "mysql", + "id": "29e20639-1d4f-4125-bed8-70b7adaaa387", + "prevIds": [ + "0af8994a-606c-4ac9-a0a7-ebc991faaa38" + ], + "ddl": [ + { + "name": "account", + "entityType": "tables" + }, + { + "name": "auth", + "entityType": "tables" + }, + { + "name": "benchmark", + "entityType": "tables" + }, + { + "name": "billing", + "entityType": "tables" + }, + { + "name": "coupon", + "entityType": "tables" + }, + { + "name": "lite", + "entityType": "tables" + }, + { + "name": "payment", + "entityType": "tables" + }, + { + "name": "subscription", + "entityType": "tables" + }, + { + "name": "usage", + "entityType": "tables" + }, + { + "name": "ip_rate_limit", + "entityType": "tables" + }, + { + "name": "ip", + "entityType": "tables" + }, + { + "name": "key_rate_limit", + "entityType": "tables" + }, + { + "name": "model_tpm_limit", + "entityType": "tables" + }, + { + "name": "model_tpm_rate_limit", + "entityType": "tables" + }, + { + "name": "key", + "entityType": "tables" + }, + { + "name": "model", + "entityType": "tables" + }, + { + "name": "provider", + "entityType": "tables" + }, + { + "name": "user", + "entityType": "tables" + }, + { + "name": "workspace", + "entityType": "tables" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "account" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "account" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "account" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "account" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "auth" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "auth" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "auth" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "auth" + }, + { + "type": "enum('email','github','google')", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "provider", + "entityType": "columns", + "table": "auth" + }, + { + "type": "varchar(255)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "subject", + "entityType": "columns", + "table": "auth" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "account_id", + "entityType": "columns", + "table": "auth" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "benchmark" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "benchmark" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "benchmark" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "benchmark" + }, + { + "type": "varchar(64)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "model", + "entityType": "columns", + "table": "benchmark" + }, + { + "type": "varchar(64)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "agent", + "entityType": "columns", + "table": "benchmark" + }, + { + "type": "mediumtext", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "result", + "entityType": "columns", + "table": "benchmark" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "billing" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "workspace_id", + "entityType": "columns", + "table": "billing" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "billing" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "billing" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "billing" + }, + { + "type": "varchar(255)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "customer_id", + "entityType": "columns", + "table": "billing" + }, + { + "type": "varchar(255)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "payment_method_id", + "entityType": "columns", + "table": "billing" + }, + { + "type": "varchar(32)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "payment_method_type", + "entityType": "columns", + "table": "billing" + }, + { + "type": "varchar(4)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "payment_method_last4", + "entityType": "columns", + "table": "billing" + }, + { + "type": "bigint", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "balance", + "entityType": "columns", + "table": "billing" + }, + { + "type": "int", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "monthly_limit", + "entityType": "columns", + "table": "billing" + }, + { + "type": "bigint", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "monthly_usage", + "entityType": "columns", + "table": "billing" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_monthly_usage_updated", + "entityType": "columns", + "table": "billing" + }, + { + "type": "boolean", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "reload", + "entityType": "columns", + "table": "billing" + }, + { + "type": "int", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "reload_trigger", + "entityType": "columns", + "table": "billing" + }, + { + "type": "int", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "reload_amount", + "entityType": "columns", + "table": "billing" + }, + { + "type": "varchar(255)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "reload_error", + "entityType": "columns", + "table": "billing" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_reload_error", + "entityType": "columns", + "table": "billing" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_reload_locked_till", + "entityType": "columns", + "table": "billing" + }, + { + "type": "json", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "subscription", + "entityType": "columns", + "table": "billing" + }, + { + "type": "varchar(28)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "subscription_id", + "entityType": "columns", + "table": "billing" + }, + { + "type": "enum('20','100','200')", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "subscription_plan", + "entityType": "columns", + "table": "billing" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_subscription_booked", + "entityType": "columns", + "table": "billing" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_subscription_selected", + "entityType": "columns", + "table": "billing" + }, + { + "type": "varchar(28)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "lite_subscription_id", + "entityType": "columns", + "table": "billing" + }, + { + "type": "json", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "lite", + "entityType": "columns", + "table": "billing" + }, + { + "type": "varchar(255)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "email", + "entityType": "columns", + "table": "coupon" + }, + { + "type": "enum('BUILDATHON','GOFREEMONTH')", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "type", + "entityType": "columns", + "table": "coupon" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_redeemed", + "entityType": "columns", + "table": "coupon" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "lite" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "workspace_id", + "entityType": "columns", + "table": "lite" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "lite" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "lite" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "lite" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "user_id", + "entityType": "columns", + "table": "lite" + }, + { + "type": "bigint", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "rolling_usage", + "entityType": "columns", + "table": "lite" + }, + { + "type": "bigint", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "weekly_usage", + "entityType": "columns", + "table": "lite" + }, + { + "type": "bigint", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "monthly_usage", + "entityType": "columns", + "table": "lite" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_rolling_updated", + "entityType": "columns", + "table": "lite" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_weekly_updated", + "entityType": "columns", + "table": "lite" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_monthly_updated", + "entityType": "columns", + "table": "lite" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "payment" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "workspace_id", + "entityType": "columns", + "table": "payment" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "payment" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "payment" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "payment" + }, + { + "type": "varchar(255)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "customer_id", + "entityType": "columns", + "table": "payment" + }, + { + "type": "varchar(255)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "invoice_id", + "entityType": "columns", + "table": "payment" + }, + { + "type": "varchar(255)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "payment_id", + "entityType": "columns", + "table": "payment" + }, + { + "type": "bigint", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "amount", + "entityType": "columns", + "table": "payment" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_refunded", + "entityType": "columns", + "table": "payment" + }, + { + "type": "json", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "enrichment", + "entityType": "columns", + "table": "payment" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "subscription" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "workspace_id", + "entityType": "columns", + "table": "subscription" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "subscription" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "subscription" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "subscription" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "user_id", + "entityType": "columns", + "table": "subscription" + }, + { + "type": "bigint", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "rolling_usage", + "entityType": "columns", + "table": "subscription" + }, + { + "type": "bigint", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "fixed_usage", + "entityType": "columns", + "table": "subscription" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_rolling_updated", + "entityType": "columns", + "table": "subscription" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_fixed_updated", + "entityType": "columns", + "table": "subscription" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "usage" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "workspace_id", + "entityType": "columns", + "table": "usage" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "usage" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "usage" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "usage" + }, + { + "type": "varchar(255)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "model", + "entityType": "columns", + "table": "usage" + }, + { + "type": "varchar(255)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "provider", + "entityType": "columns", + "table": "usage" + }, + { + "type": "int", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "input_tokens", + "entityType": "columns", + "table": "usage" + }, + { + "type": "int", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "output_tokens", + "entityType": "columns", + "table": "usage" + }, + { + "type": "int", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "reasoning_tokens", + "entityType": "columns", + "table": "usage" + }, + { + "type": "int", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "cache_read_tokens", + "entityType": "columns", + "table": "usage" + }, + { + "type": "int", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "cache_write_5m_tokens", + "entityType": "columns", + "table": "usage" + }, + { + "type": "int", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "cache_write_1h_tokens", + "entityType": "columns", + "table": "usage" + }, + { + "type": "bigint", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "cost", + "entityType": "columns", + "table": "usage" + }, + { + "type": "varchar(30)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "key_id", + "entityType": "columns", + "table": "usage" + }, + { + "type": "varchar(30)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "session_id", + "entityType": "columns", + "table": "usage" + }, + { + "type": "json", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "enrichment", + "entityType": "columns", + "table": "usage" + }, + { + "type": "varchar(45)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "ip", + "entityType": "columns", + "table": "ip_rate_limit" + }, + { + "type": "varchar(10)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "interval", + "entityType": "columns", + "table": "ip_rate_limit" + }, + { + "type": "int", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "count", + "entityType": "columns", + "table": "ip_rate_limit" + }, + { + "type": "varchar(45)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "ip", + "entityType": "columns", + "table": "ip" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "ip" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "ip" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "ip" + }, + { + "type": "int", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "usage", + "entityType": "columns", + "table": "ip" + }, + { + "type": "varchar(255)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "key", + "entityType": "columns", + "table": "key_rate_limit" + }, + { + "type": "varchar(40)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "interval", + "entityType": "columns", + "table": "key_rate_limit" + }, + { + "type": "int", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "count", + "entityType": "columns", + "table": "key_rate_limit" + }, + { + "type": "varchar(255)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "model_tpm_limit" + }, + { + "type": "int", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "count", + "entityType": "columns", + "table": "model_tpm_limit" + }, + { + "type": "varchar(255)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "model_tpm_rate_limit" + }, + { + "type": "bigint", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "interval", + "entityType": "columns", + "table": "model_tpm_rate_limit" + }, + { + "type": "int", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "count", + "entityType": "columns", + "table": "model_tpm_rate_limit" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "key" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "workspace_id", + "entityType": "columns", + "table": "key" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "key" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "key" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "key" + }, + { + "type": "varchar(255)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "name", + "entityType": "columns", + "table": "key" + }, + { + "type": "varchar(255)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "key", + "entityType": "columns", + "table": "key" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "user_id", + "entityType": "columns", + "table": "key" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_used", + "entityType": "columns", + "table": "key" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "model" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "workspace_id", + "entityType": "columns", + "table": "model" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "model" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "model" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "model" + }, + { + "type": "varchar(64)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "model", + "entityType": "columns", + "table": "model" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "provider" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "workspace_id", + "entityType": "columns", + "table": "provider" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "provider" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "provider" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "provider" + }, + { + "type": "varchar(64)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "provider", + "entityType": "columns", + "table": "provider" + }, + { + "type": "text", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "credentials", + "entityType": "columns", + "table": "provider" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "user" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "workspace_id", + "entityType": "columns", + "table": "user" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "user" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "user" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "user" + }, + { + "type": "varchar(30)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "account_id", + "entityType": "columns", + "table": "user" + }, + { + "type": "varchar(255)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "email", + "entityType": "columns", + "table": "user" + }, + { + "type": "varchar(255)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "name", + "entityType": "columns", + "table": "user" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_seen", + "entityType": "columns", + "table": "user" + }, + { + "type": "int", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "color", + "entityType": "columns", + "table": "user" + }, + { + "type": "enum('admin','member')", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "role", + "entityType": "columns", + "table": "user" + }, + { + "type": "int", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "monthly_limit", + "entityType": "columns", + "table": "user" + }, + { + "type": "bigint", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "monthly_usage", + "entityType": "columns", + "table": "user" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_monthly_usage_updated", + "entityType": "columns", + "table": "user" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "workspace" + }, + { + "type": "varchar(255)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "slug", + "entityType": "columns", + "table": "workspace" + }, + { + "type": "varchar(255)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "name", + "entityType": "columns", + "table": "workspace" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "workspace" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "workspace" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "workspace" + }, + { + "columns": [ + "id" + ], + "name": "PRIMARY", + "table": "account", + "entityType": "pks" + }, + { + "columns": [ + "id" + ], + "name": "PRIMARY", + "table": "auth", + "entityType": "pks" + }, + { + "columns": [ + "id" + ], + "name": "PRIMARY", + "table": "benchmark", + "entityType": "pks" + }, + { + "columns": [ + "workspace_id", + "id" + ], + "name": "PRIMARY", + "table": "billing", + "entityType": "pks" + }, + { + "columns": [ + "email", + "type" + ], + "name": "PRIMARY", + "table": "coupon", + "entityType": "pks" + }, + { + "columns": [ + "workspace_id", + "id" + ], + "name": "PRIMARY", + "table": "lite", + "entityType": "pks" + }, + { + "columns": [ + "workspace_id", + "id" + ], + "name": "PRIMARY", + "table": "payment", + "entityType": "pks" + }, + { + "columns": [ + "workspace_id", + "id" + ], + "name": "PRIMARY", + "table": "subscription", + "entityType": "pks" + }, + { + "columns": [ + "workspace_id", + "id" + ], + "name": "PRIMARY", + "table": "usage", + "entityType": "pks" + }, + { + "columns": [ + "ip", + "interval" + ], + "name": "PRIMARY", + "table": "ip_rate_limit", + "entityType": "pks" + }, + { + "columns": [ + "ip" + ], + "name": "PRIMARY", + "table": "ip", + "entityType": "pks" + }, + { + "columns": [ + "key", + "interval" + ], + "name": "PRIMARY", + "table": "key_rate_limit", + "entityType": "pks" + }, + { + "columns": [ + "id" + ], + "name": "PRIMARY", + "table": "model_tpm_limit", + "entityType": "pks" + }, + { + "columns": [ + "id" + ], + "name": "PRIMARY", + "table": "model_tpm_rate_limit", + "entityType": "pks" + }, + { + "columns": [ + "workspace_id", + "id" + ], + "name": "PRIMARY", + "table": "key", + "entityType": "pks" + }, + { + "columns": [ + "workspace_id", + "id" + ], + "name": "PRIMARY", + "table": "model", + "entityType": "pks" + }, + { + "columns": [ + "workspace_id", + "id" + ], + "name": "PRIMARY", + "table": "provider", + "entityType": "pks" + }, + { + "columns": [ + "workspace_id", + "id" + ], + "name": "PRIMARY", + "table": "user", + "entityType": "pks" + }, + { + "columns": [ + "id" + ], + "name": "PRIMARY", + "table": "workspace", + "entityType": "pks" + }, + { + "columns": [ + { + "value": "provider", + "isExpression": false + }, + { + "value": "subject", + "isExpression": false + } + ], + "isUnique": true, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "provider", + "entityType": "indexes", + "table": "auth" + }, + { + "columns": [ + { + "value": "account_id", + "isExpression": false + } + ], + "isUnique": false, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "account_id", + "entityType": "indexes", + "table": "auth" + }, + { + "columns": [ + { + "value": "time_created", + "isExpression": false + } + ], + "isUnique": false, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "time_created", + "entityType": "indexes", + "table": "benchmark" + }, + { + "columns": [ + { + "value": "customer_id", + "isExpression": false + } + ], + "isUnique": true, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "global_customer_id", + "entityType": "indexes", + "table": "billing" + }, + { + "columns": [ + { + "value": "subscription_id", + "isExpression": false + } + ], + "isUnique": true, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "global_subscription_id", + "entityType": "indexes", + "table": "billing" + }, + { + "columns": [ + { + "value": "workspace_id", + "isExpression": false + }, + { + "value": "user_id", + "isExpression": false + } + ], + "isUnique": true, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "workspace_user_id", + "entityType": "indexes", + "table": "lite" + }, + { + "columns": [ + { + "value": "workspace_id", + "isExpression": false + }, + { + "value": "user_id", + "isExpression": false + } + ], + "isUnique": true, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "workspace_user_id", + "entityType": "indexes", + "table": "subscription" + }, + { + "columns": [ + { + "value": "workspace_id", + "isExpression": false + }, + { + "value": "time_created", + "isExpression": false + } + ], + "isUnique": false, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "usage_time_created", + "entityType": "indexes", + "table": "usage" + }, + { + "columns": [ + { + "value": "key", + "isExpression": false + } + ], + "isUnique": true, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "global_key", + "entityType": "indexes", + "table": "key" + }, + { + "columns": [ + { + "value": "workspace_id", + "isExpression": false + }, + { + "value": "model", + "isExpression": false + } + ], + "isUnique": true, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "model_workspace_model", + "entityType": "indexes", + "table": "model" + }, + { + "columns": [ + { + "value": "workspace_id", + "isExpression": false + }, + { + "value": "provider", + "isExpression": false + } + ], + "isUnique": true, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "workspace_provider", + "entityType": "indexes", + "table": "provider" + }, + { + "columns": [ + { + "value": "workspace_id", + "isExpression": false + }, + { + "value": "account_id", + "isExpression": false + } + ], + "isUnique": true, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "user_account_id", + "entityType": "indexes", + "table": "user" + }, + { + "columns": [ + { + "value": "workspace_id", + "isExpression": false + }, + { + "value": "email", + "isExpression": false + } + ], + "isUnique": true, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "user_email", + "entityType": "indexes", + "table": "user" + }, + { + "columns": [ + { + "value": "account_id", + "isExpression": false + } + ], + "isUnique": false, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "global_account_id", + "entityType": "indexes", + "table": "user" + }, + { + "columns": [ + { + "value": "email", + "isExpression": false + } + ], + "isUnique": false, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "global_email", + "entityType": "indexes", + "table": "user" + }, + { + "columns": [ + { + "value": "slug", + "isExpression": false + } + ], + "isUnique": true, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "slug", + "entityType": "indexes", + "table": "workspace" + } + ], + "renames": [] +} \ No newline at end of file diff --git a/packages/console/core/src/schema/ip.sql.ts b/packages/console/core/src/schema/ip.sql.ts index 5814054460..336ec60dfd 100644 --- a/packages/console/core/src/schema/ip.sql.ts +++ b/packages/console/core/src/schema/ip.sql.ts @@ -1,4 +1,4 @@ -import { mysqlTable, int, primaryKey, varchar } from "drizzle-orm/mysql-core" +import { mysqlTable, int, primaryKey, varchar, bigint } from "drizzle-orm/mysql-core" import { timestamps } from "../drizzle/types" export const IpTable = mysqlTable( @@ -31,10 +31,11 @@ export const KeyRateLimitTable = mysqlTable( (table) => [primaryKey({ columns: [table.key, table.interval] })], ) -export const ModelTpmLimitTable = mysqlTable( - "model_tpm_limit", +export const ModelTpmRateLimitTable = mysqlTable( + "model_tpm_rate_limit", { id: varchar("id", { length: 255 }).notNull(), + interval: bigint("interval", { mode: "number" }).notNull(), count: int("count").notNull(), }, (table) => [primaryKey({ columns: [table.id] })], From 53b0084ce2ce595f58f1297ebbe3f17e6da1ee5c Mon Sep 17 00:00:00 2001 From: "opencode-agent[bot]" Date: Tue, 21 Apr 2026 02:22:12 +0000 Subject: [PATCH 02/73] chore: generate --- .../snapshot.json | 94 +++++-------------- 1 file changed, 21 insertions(+), 73 deletions(-) diff --git a/packages/console/core/migrations/20260421020842_bizarre_living_tribunal/snapshot.json b/packages/console/core/migrations/20260421020842_bizarre_living_tribunal/snapshot.json index e55204da17..99e6fa52f7 100644 --- a/packages/console/core/migrations/20260421020842_bizarre_living_tribunal/snapshot.json +++ b/packages/console/core/migrations/20260421020842_bizarre_living_tribunal/snapshot.json @@ -2,9 +2,7 @@ "version": "6", "dialect": "mysql", "id": "29e20639-1d4f-4125-bed8-70b7adaaa387", - "prevIds": [ - "0af8994a-606c-4ac9-a0a7-ebc991faaa38" - ], + "prevIds": ["0af8994a-606c-4ac9-a0a7-ebc991faaa38"], "ddl": [ { "name": "account", @@ -2253,165 +2251,115 @@ "table": "workspace" }, { - "columns": [ - "id" - ], + "columns": ["id"], "name": "PRIMARY", "table": "account", "entityType": "pks" }, { - "columns": [ - "id" - ], + "columns": ["id"], "name": "PRIMARY", "table": "auth", "entityType": "pks" }, { - "columns": [ - "id" - ], + "columns": ["id"], "name": "PRIMARY", "table": "benchmark", "entityType": "pks" }, { - "columns": [ - "workspace_id", - "id" - ], + "columns": ["workspace_id", "id"], "name": "PRIMARY", "table": "billing", "entityType": "pks" }, { - "columns": [ - "email", - "type" - ], + "columns": ["email", "type"], "name": "PRIMARY", "table": "coupon", "entityType": "pks" }, { - "columns": [ - "workspace_id", - "id" - ], + "columns": ["workspace_id", "id"], "name": "PRIMARY", "table": "lite", "entityType": "pks" }, { - "columns": [ - "workspace_id", - "id" - ], + "columns": ["workspace_id", "id"], "name": "PRIMARY", "table": "payment", "entityType": "pks" }, { - "columns": [ - "workspace_id", - "id" - ], + "columns": ["workspace_id", "id"], "name": "PRIMARY", "table": "subscription", "entityType": "pks" }, { - "columns": [ - "workspace_id", - "id" - ], + "columns": ["workspace_id", "id"], "name": "PRIMARY", "table": "usage", "entityType": "pks" }, { - "columns": [ - "ip", - "interval" - ], + "columns": ["ip", "interval"], "name": "PRIMARY", "table": "ip_rate_limit", "entityType": "pks" }, { - "columns": [ - "ip" - ], + "columns": ["ip"], "name": "PRIMARY", "table": "ip", "entityType": "pks" }, { - "columns": [ - "key", - "interval" - ], + "columns": ["key", "interval"], "name": "PRIMARY", "table": "key_rate_limit", "entityType": "pks" }, { - "columns": [ - "id" - ], + "columns": ["id"], "name": "PRIMARY", "table": "model_tpm_limit", "entityType": "pks" }, { - "columns": [ - "id" - ], + "columns": ["id"], "name": "PRIMARY", "table": "model_tpm_rate_limit", "entityType": "pks" }, { - "columns": [ - "workspace_id", - "id" - ], + "columns": ["workspace_id", "id"], "name": "PRIMARY", "table": "key", "entityType": "pks" }, { - "columns": [ - "workspace_id", - "id" - ], + "columns": ["workspace_id", "id"], "name": "PRIMARY", "table": "model", "entityType": "pks" }, { - "columns": [ - "workspace_id", - "id" - ], + "columns": ["workspace_id", "id"], "name": "PRIMARY", "table": "provider", "entityType": "pks" }, { - "columns": [ - "workspace_id", - "id" - ], + "columns": ["workspace_id", "id"], "name": "PRIMARY", "table": "user", "entityType": "pks" }, { - "columns": [ - "id" - ], + "columns": ["id"], "name": "PRIMARY", "table": "workspace", "entityType": "pks" @@ -2706,4 +2654,4 @@ } ], "renames": [] -} \ No newline at end of file +} From 6278ce51cec6afd1afa58f8b1c5a1a5372e8804e Mon Sep 17 00:00:00 2001 From: Frank Date: Mon, 20 Apr 2026 22:41:30 -0400 Subject: [PATCH 03/73] zen: tpm routing --- .../migration.sql | 3 + .../snapshot.json | 2670 +++++++++++++++++ packages/console/core/src/schema/ip.sql.ts | 2 +- 3 files changed, 2674 insertions(+), 1 deletion(-) create mode 100644 packages/console/core/migrations/20260421023950_nebulous_weapon_omega/migration.sql create mode 100644 packages/console/core/migrations/20260421023950_nebulous_weapon_omega/snapshot.json diff --git a/packages/console/core/migrations/20260421023950_nebulous_weapon_omega/migration.sql b/packages/console/core/migrations/20260421023950_nebulous_weapon_omega/migration.sql new file mode 100644 index 0000000000..d7da039b86 --- /dev/null +++ b/packages/console/core/migrations/20260421023950_nebulous_weapon_omega/migration.sql @@ -0,0 +1,3 @@ +DROP TABLE `model_tpm_limit`;--> statement-breakpoint +ALTER TABLE `model_tpm_rate_limit` DROP PRIMARY KEY;--> statement-breakpoint +ALTER TABLE `model_tpm_rate_limit` ADD PRIMARY KEY (`id`,`interval`); \ No newline at end of file diff --git a/packages/console/core/migrations/20260421023950_nebulous_weapon_omega/snapshot.json b/packages/console/core/migrations/20260421023950_nebulous_weapon_omega/snapshot.json new file mode 100644 index 0000000000..cdb198d464 --- /dev/null +++ b/packages/console/core/migrations/20260421023950_nebulous_weapon_omega/snapshot.json @@ -0,0 +1,2670 @@ +{ + "version": "6", + "dialect": "mysql", + "id": "9e2d81ba-88b4-4704-a8b6-4a27dd2bd8d9", + "prevIds": [ + "29e20639-1d4f-4125-bed8-70b7adaaa387" + ], + "ddl": [ + { + "name": "account", + "entityType": "tables" + }, + { + "name": "auth", + "entityType": "tables" + }, + { + "name": "benchmark", + "entityType": "tables" + }, + { + "name": "billing", + "entityType": "tables" + }, + { + "name": "coupon", + "entityType": "tables" + }, + { + "name": "lite", + "entityType": "tables" + }, + { + "name": "payment", + "entityType": "tables" + }, + { + "name": "subscription", + "entityType": "tables" + }, + { + "name": "usage", + "entityType": "tables" + }, + { + "name": "ip_rate_limit", + "entityType": "tables" + }, + { + "name": "ip", + "entityType": "tables" + }, + { + "name": "key_rate_limit", + "entityType": "tables" + }, + { + "name": "model_tpm_rate_limit", + "entityType": "tables" + }, + { + "name": "key", + "entityType": "tables" + }, + { + "name": "model", + "entityType": "tables" + }, + { + "name": "provider", + "entityType": "tables" + }, + { + "name": "user", + "entityType": "tables" + }, + { + "name": "workspace", + "entityType": "tables" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "account" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "account" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "account" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "account" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "auth" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "auth" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "auth" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "auth" + }, + { + "type": "enum('email','github','google')", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "provider", + "entityType": "columns", + "table": "auth" + }, + { + "type": "varchar(255)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "subject", + "entityType": "columns", + "table": "auth" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "account_id", + "entityType": "columns", + "table": "auth" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "benchmark" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "benchmark" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "benchmark" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "benchmark" + }, + { + "type": "varchar(64)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "model", + "entityType": "columns", + "table": "benchmark" + }, + { + "type": "varchar(64)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "agent", + "entityType": "columns", + "table": "benchmark" + }, + { + "type": "mediumtext", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "result", + "entityType": "columns", + "table": "benchmark" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "billing" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "workspace_id", + "entityType": "columns", + "table": "billing" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "billing" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "billing" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "billing" + }, + { + "type": "varchar(255)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "customer_id", + "entityType": "columns", + "table": "billing" + }, + { + "type": "varchar(255)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "payment_method_id", + "entityType": "columns", + "table": "billing" + }, + { + "type": "varchar(32)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "payment_method_type", + "entityType": "columns", + "table": "billing" + }, + { + "type": "varchar(4)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "payment_method_last4", + "entityType": "columns", + "table": "billing" + }, + { + "type": "bigint", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "balance", + "entityType": "columns", + "table": "billing" + }, + { + "type": "int", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "monthly_limit", + "entityType": "columns", + "table": "billing" + }, + { + "type": "bigint", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "monthly_usage", + "entityType": "columns", + "table": "billing" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_monthly_usage_updated", + "entityType": "columns", + "table": "billing" + }, + { + "type": "boolean", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "reload", + "entityType": "columns", + "table": "billing" + }, + { + "type": "int", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "reload_trigger", + "entityType": "columns", + "table": "billing" + }, + { + "type": "int", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "reload_amount", + "entityType": "columns", + "table": "billing" + }, + { + "type": "varchar(255)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "reload_error", + "entityType": "columns", + "table": "billing" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_reload_error", + "entityType": "columns", + "table": "billing" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_reload_locked_till", + "entityType": "columns", + "table": "billing" + }, + { + "type": "json", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "subscription", + "entityType": "columns", + "table": "billing" + }, + { + "type": "varchar(28)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "subscription_id", + "entityType": "columns", + "table": "billing" + }, + { + "type": "enum('20','100','200')", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "subscription_plan", + "entityType": "columns", + "table": "billing" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_subscription_booked", + "entityType": "columns", + "table": "billing" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_subscription_selected", + "entityType": "columns", + "table": "billing" + }, + { + "type": "varchar(28)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "lite_subscription_id", + "entityType": "columns", + "table": "billing" + }, + { + "type": "json", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "lite", + "entityType": "columns", + "table": "billing" + }, + { + "type": "varchar(255)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "email", + "entityType": "columns", + "table": "coupon" + }, + { + "type": "enum('BUILDATHON','GOFREEMONTH')", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "type", + "entityType": "columns", + "table": "coupon" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_redeemed", + "entityType": "columns", + "table": "coupon" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "lite" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "workspace_id", + "entityType": "columns", + "table": "lite" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "lite" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "lite" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "lite" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "user_id", + "entityType": "columns", + "table": "lite" + }, + { + "type": "bigint", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "rolling_usage", + "entityType": "columns", + "table": "lite" + }, + { + "type": "bigint", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "weekly_usage", + "entityType": "columns", + "table": "lite" + }, + { + "type": "bigint", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "monthly_usage", + "entityType": "columns", + "table": "lite" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_rolling_updated", + "entityType": "columns", + "table": "lite" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_weekly_updated", + "entityType": "columns", + "table": "lite" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_monthly_updated", + "entityType": "columns", + "table": "lite" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "payment" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "workspace_id", + "entityType": "columns", + "table": "payment" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "payment" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "payment" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "payment" + }, + { + "type": "varchar(255)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "customer_id", + "entityType": "columns", + "table": "payment" + }, + { + "type": "varchar(255)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "invoice_id", + "entityType": "columns", + "table": "payment" + }, + { + "type": "varchar(255)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "payment_id", + "entityType": "columns", + "table": "payment" + }, + { + "type": "bigint", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "amount", + "entityType": "columns", + "table": "payment" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_refunded", + "entityType": "columns", + "table": "payment" + }, + { + "type": "json", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "enrichment", + "entityType": "columns", + "table": "payment" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "subscription" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "workspace_id", + "entityType": "columns", + "table": "subscription" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "subscription" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "subscription" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "subscription" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "user_id", + "entityType": "columns", + "table": "subscription" + }, + { + "type": "bigint", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "rolling_usage", + "entityType": "columns", + "table": "subscription" + }, + { + "type": "bigint", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "fixed_usage", + "entityType": "columns", + "table": "subscription" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_rolling_updated", + "entityType": "columns", + "table": "subscription" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_fixed_updated", + "entityType": "columns", + "table": "subscription" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "usage" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "workspace_id", + "entityType": "columns", + "table": "usage" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "usage" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "usage" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "usage" + }, + { + "type": "varchar(255)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "model", + "entityType": "columns", + "table": "usage" + }, + { + "type": "varchar(255)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "provider", + "entityType": "columns", + "table": "usage" + }, + { + "type": "int", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "input_tokens", + "entityType": "columns", + "table": "usage" + }, + { + "type": "int", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "output_tokens", + "entityType": "columns", + "table": "usage" + }, + { + "type": "int", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "reasoning_tokens", + "entityType": "columns", + "table": "usage" + }, + { + "type": "int", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "cache_read_tokens", + "entityType": "columns", + "table": "usage" + }, + { + "type": "int", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "cache_write_5m_tokens", + "entityType": "columns", + "table": "usage" + }, + { + "type": "int", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "cache_write_1h_tokens", + "entityType": "columns", + "table": "usage" + }, + { + "type": "bigint", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "cost", + "entityType": "columns", + "table": "usage" + }, + { + "type": "varchar(30)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "key_id", + "entityType": "columns", + "table": "usage" + }, + { + "type": "varchar(30)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "session_id", + "entityType": "columns", + "table": "usage" + }, + { + "type": "json", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "enrichment", + "entityType": "columns", + "table": "usage" + }, + { + "type": "varchar(45)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "ip", + "entityType": "columns", + "table": "ip_rate_limit" + }, + { + "type": "varchar(10)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "interval", + "entityType": "columns", + "table": "ip_rate_limit" + }, + { + "type": "int", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "count", + "entityType": "columns", + "table": "ip_rate_limit" + }, + { + "type": "varchar(45)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "ip", + "entityType": "columns", + "table": "ip" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "ip" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "ip" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "ip" + }, + { + "type": "int", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "usage", + "entityType": "columns", + "table": "ip" + }, + { + "type": "varchar(255)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "key", + "entityType": "columns", + "table": "key_rate_limit" + }, + { + "type": "varchar(40)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "interval", + "entityType": "columns", + "table": "key_rate_limit" + }, + { + "type": "int", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "count", + "entityType": "columns", + "table": "key_rate_limit" + }, + { + "type": "varchar(255)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "model_tpm_rate_limit" + }, + { + "type": "bigint", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "interval", + "entityType": "columns", + "table": "model_tpm_rate_limit" + }, + { + "type": "int", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "count", + "entityType": "columns", + "table": "model_tpm_rate_limit" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "key" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "workspace_id", + "entityType": "columns", + "table": "key" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "key" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "key" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "key" + }, + { + "type": "varchar(255)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "name", + "entityType": "columns", + "table": "key" + }, + { + "type": "varchar(255)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "key", + "entityType": "columns", + "table": "key" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "user_id", + "entityType": "columns", + "table": "key" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_used", + "entityType": "columns", + "table": "key" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "model" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "workspace_id", + "entityType": "columns", + "table": "model" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "model" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "model" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "model" + }, + { + "type": "varchar(64)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "model", + "entityType": "columns", + "table": "model" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "provider" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "workspace_id", + "entityType": "columns", + "table": "provider" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "provider" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "provider" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "provider" + }, + { + "type": "varchar(64)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "provider", + "entityType": "columns", + "table": "provider" + }, + { + "type": "text", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "credentials", + "entityType": "columns", + "table": "provider" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "user" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "workspace_id", + "entityType": "columns", + "table": "user" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "user" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "user" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "user" + }, + { + "type": "varchar(30)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "account_id", + "entityType": "columns", + "table": "user" + }, + { + "type": "varchar(255)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "email", + "entityType": "columns", + "table": "user" + }, + { + "type": "varchar(255)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "name", + "entityType": "columns", + "table": "user" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_seen", + "entityType": "columns", + "table": "user" + }, + { + "type": "int", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "color", + "entityType": "columns", + "table": "user" + }, + { + "type": "enum('admin','member')", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "role", + "entityType": "columns", + "table": "user" + }, + { + "type": "int", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "monthly_limit", + "entityType": "columns", + "table": "user" + }, + { + "type": "bigint", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "monthly_usage", + "entityType": "columns", + "table": "user" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_monthly_usage_updated", + "entityType": "columns", + "table": "user" + }, + { + "type": "varchar(30)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "id", + "entityType": "columns", + "table": "workspace" + }, + { + "type": "varchar(255)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "slug", + "entityType": "columns", + "table": "workspace" + }, + { + "type": "varchar(255)", + "notNull": true, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "name", + "entityType": "columns", + "table": "workspace" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(now())", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_created", + "entityType": "columns", + "table": "workspace" + }, + { + "type": "timestamp(3)", + "notNull": true, + "autoIncrement": false, + "default": "(CURRENT_TIMESTAMP(3) ON UPDATE CURRENT_TIMESTAMP(3))", + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_updated", + "entityType": "columns", + "table": "workspace" + }, + { + "type": "timestamp(3)", + "notNull": false, + "autoIncrement": false, + "default": null, + "onUpdateNow": false, + "onUpdateNowFsp": null, + "charSet": null, + "collation": null, + "generated": null, + "name": "time_deleted", + "entityType": "columns", + "table": "workspace" + }, + { + "columns": [ + "id" + ], + "name": "PRIMARY", + "table": "account", + "entityType": "pks" + }, + { + "columns": [ + "id" + ], + "name": "PRIMARY", + "table": "auth", + "entityType": "pks" + }, + { + "columns": [ + "id" + ], + "name": "PRIMARY", + "table": "benchmark", + "entityType": "pks" + }, + { + "columns": [ + "workspace_id", + "id" + ], + "name": "PRIMARY", + "table": "billing", + "entityType": "pks" + }, + { + "columns": [ + "email", + "type" + ], + "name": "PRIMARY", + "table": "coupon", + "entityType": "pks" + }, + { + "columns": [ + "workspace_id", + "id" + ], + "name": "PRIMARY", + "table": "lite", + "entityType": "pks" + }, + { + "columns": [ + "workspace_id", + "id" + ], + "name": "PRIMARY", + "table": "payment", + "entityType": "pks" + }, + { + "columns": [ + "workspace_id", + "id" + ], + "name": "PRIMARY", + "table": "subscription", + "entityType": "pks" + }, + { + "columns": [ + "workspace_id", + "id" + ], + "name": "PRIMARY", + "table": "usage", + "entityType": "pks" + }, + { + "columns": [ + "ip", + "interval" + ], + "name": "PRIMARY", + "table": "ip_rate_limit", + "entityType": "pks" + }, + { + "columns": [ + "ip" + ], + "name": "PRIMARY", + "table": "ip", + "entityType": "pks" + }, + { + "columns": [ + "key", + "interval" + ], + "name": "PRIMARY", + "table": "key_rate_limit", + "entityType": "pks" + }, + { + "columns": [ + "id", + "interval" + ], + "name": "PRIMARY", + "table": "model_tpm_rate_limit", + "entityType": "pks" + }, + { + "columns": [ + "workspace_id", + "id" + ], + "name": "PRIMARY", + "table": "key", + "entityType": "pks" + }, + { + "columns": [ + "workspace_id", + "id" + ], + "name": "PRIMARY", + "table": "model", + "entityType": "pks" + }, + { + "columns": [ + "workspace_id", + "id" + ], + "name": "PRIMARY", + "table": "provider", + "entityType": "pks" + }, + { + "columns": [ + "workspace_id", + "id" + ], + "name": "PRIMARY", + "table": "user", + "entityType": "pks" + }, + { + "columns": [ + "id" + ], + "name": "PRIMARY", + "table": "workspace", + "entityType": "pks" + }, + { + "columns": [ + { + "value": "provider", + "isExpression": false + }, + { + "value": "subject", + "isExpression": false + } + ], + "isUnique": true, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "provider", + "entityType": "indexes", + "table": "auth" + }, + { + "columns": [ + { + "value": "account_id", + "isExpression": false + } + ], + "isUnique": false, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "account_id", + "entityType": "indexes", + "table": "auth" + }, + { + "columns": [ + { + "value": "time_created", + "isExpression": false + } + ], + "isUnique": false, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "time_created", + "entityType": "indexes", + "table": "benchmark" + }, + { + "columns": [ + { + "value": "customer_id", + "isExpression": false + } + ], + "isUnique": true, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "global_customer_id", + "entityType": "indexes", + "table": "billing" + }, + { + "columns": [ + { + "value": "subscription_id", + "isExpression": false + } + ], + "isUnique": true, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "global_subscription_id", + "entityType": "indexes", + "table": "billing" + }, + { + "columns": [ + { + "value": "workspace_id", + "isExpression": false + }, + { + "value": "user_id", + "isExpression": false + } + ], + "isUnique": true, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "workspace_user_id", + "entityType": "indexes", + "table": "lite" + }, + { + "columns": [ + { + "value": "workspace_id", + "isExpression": false + }, + { + "value": "user_id", + "isExpression": false + } + ], + "isUnique": true, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "workspace_user_id", + "entityType": "indexes", + "table": "subscription" + }, + { + "columns": [ + { + "value": "workspace_id", + "isExpression": false + }, + { + "value": "time_created", + "isExpression": false + } + ], + "isUnique": false, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "usage_time_created", + "entityType": "indexes", + "table": "usage" + }, + { + "columns": [ + { + "value": "key", + "isExpression": false + } + ], + "isUnique": true, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "global_key", + "entityType": "indexes", + "table": "key" + }, + { + "columns": [ + { + "value": "workspace_id", + "isExpression": false + }, + { + "value": "model", + "isExpression": false + } + ], + "isUnique": true, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "model_workspace_model", + "entityType": "indexes", + "table": "model" + }, + { + "columns": [ + { + "value": "workspace_id", + "isExpression": false + }, + { + "value": "provider", + "isExpression": false + } + ], + "isUnique": true, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "workspace_provider", + "entityType": "indexes", + "table": "provider" + }, + { + "columns": [ + { + "value": "workspace_id", + "isExpression": false + }, + { + "value": "account_id", + "isExpression": false + } + ], + "isUnique": true, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "user_account_id", + "entityType": "indexes", + "table": "user" + }, + { + "columns": [ + { + "value": "workspace_id", + "isExpression": false + }, + { + "value": "email", + "isExpression": false + } + ], + "isUnique": true, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "user_email", + "entityType": "indexes", + "table": "user" + }, + { + "columns": [ + { + "value": "account_id", + "isExpression": false + } + ], + "isUnique": false, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "global_account_id", + "entityType": "indexes", + "table": "user" + }, + { + "columns": [ + { + "value": "email", + "isExpression": false + } + ], + "isUnique": false, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "global_email", + "entityType": "indexes", + "table": "user" + }, + { + "columns": [ + { + "value": "slug", + "isExpression": false + } + ], + "isUnique": true, + "using": null, + "algorithm": null, + "lock": null, + "nameExplicit": true, + "name": "slug", + "entityType": "indexes", + "table": "workspace" + } + ], + "renames": [] +} \ No newline at end of file diff --git a/packages/console/core/src/schema/ip.sql.ts b/packages/console/core/src/schema/ip.sql.ts index 336ec60dfd..94087abe52 100644 --- a/packages/console/core/src/schema/ip.sql.ts +++ b/packages/console/core/src/schema/ip.sql.ts @@ -38,5 +38,5 @@ export const ModelTpmRateLimitTable = mysqlTable( interval: bigint("interval", { mode: "number" }).notNull(), count: int("count").notNull(), }, - (table) => [primaryKey({ columns: [table.id] })], + (table) => [primaryKey({ columns: [table.id, table.interval] })], ) From a38d53fe2f056e55347861b87f349264e7abec48 Mon Sep 17 00:00:00 2001 From: "opencode-agent[bot]" Date: Tue, 21 Apr 2026 02:42:45 +0000 Subject: [PATCH 04/73] chore: generate --- .../snapshot.json | 91 ++++--------------- 1 file changed, 20 insertions(+), 71 deletions(-) diff --git a/packages/console/core/migrations/20260421023950_nebulous_weapon_omega/snapshot.json b/packages/console/core/migrations/20260421023950_nebulous_weapon_omega/snapshot.json index cdb198d464..351c19d000 100644 --- a/packages/console/core/migrations/20260421023950_nebulous_weapon_omega/snapshot.json +++ b/packages/console/core/migrations/20260421023950_nebulous_weapon_omega/snapshot.json @@ -2,9 +2,7 @@ "version": "6", "dialect": "mysql", "id": "9e2d81ba-88b4-4704-a8b6-4a27dd2bd8d9", - "prevIds": [ - "29e20639-1d4f-4125-bed8-70b7adaaa387" - ], + "prevIds": ["29e20639-1d4f-4125-bed8-70b7adaaa387"], "ddl": [ { "name": "account", @@ -2221,158 +2219,109 @@ "table": "workspace" }, { - "columns": [ - "id" - ], + "columns": ["id"], "name": "PRIMARY", "table": "account", "entityType": "pks" }, { - "columns": [ - "id" - ], + "columns": ["id"], "name": "PRIMARY", "table": "auth", "entityType": "pks" }, { - "columns": [ - "id" - ], + "columns": ["id"], "name": "PRIMARY", "table": "benchmark", "entityType": "pks" }, { - "columns": [ - "workspace_id", - "id" - ], + "columns": ["workspace_id", "id"], "name": "PRIMARY", "table": "billing", "entityType": "pks" }, { - "columns": [ - "email", - "type" - ], + "columns": ["email", "type"], "name": "PRIMARY", "table": "coupon", "entityType": "pks" }, { - "columns": [ - "workspace_id", - "id" - ], + "columns": ["workspace_id", "id"], "name": "PRIMARY", "table": "lite", "entityType": "pks" }, { - "columns": [ - "workspace_id", - "id" - ], + "columns": ["workspace_id", "id"], "name": "PRIMARY", "table": "payment", "entityType": "pks" }, { - "columns": [ - "workspace_id", - "id" - ], + "columns": ["workspace_id", "id"], "name": "PRIMARY", "table": "subscription", "entityType": "pks" }, { - "columns": [ - "workspace_id", - "id" - ], + "columns": ["workspace_id", "id"], "name": "PRIMARY", "table": "usage", "entityType": "pks" }, { - "columns": [ - "ip", - "interval" - ], + "columns": ["ip", "interval"], "name": "PRIMARY", "table": "ip_rate_limit", "entityType": "pks" }, { - "columns": [ - "ip" - ], + "columns": ["ip"], "name": "PRIMARY", "table": "ip", "entityType": "pks" }, { - "columns": [ - "key", - "interval" - ], + "columns": ["key", "interval"], "name": "PRIMARY", "table": "key_rate_limit", "entityType": "pks" }, { - "columns": [ - "id", - "interval" - ], + "columns": ["id", "interval"], "name": "PRIMARY", "table": "model_tpm_rate_limit", "entityType": "pks" }, { - "columns": [ - "workspace_id", - "id" - ], + "columns": ["workspace_id", "id"], "name": "PRIMARY", "table": "key", "entityType": "pks" }, { - "columns": [ - "workspace_id", - "id" - ], + "columns": ["workspace_id", "id"], "name": "PRIMARY", "table": "model", "entityType": "pks" }, { - "columns": [ - "workspace_id", - "id" - ], + "columns": ["workspace_id", "id"], "name": "PRIMARY", "table": "provider", "entityType": "pks" }, { - "columns": [ - "workspace_id", - "id" - ], + "columns": ["workspace_id", "id"], "name": "PRIMARY", "table": "user", "entityType": "pks" }, { - "columns": [ - "id" - ], + "columns": ["id"], "name": "PRIMARY", "table": "workspace", "entityType": "pks" @@ -2667,4 +2616,4 @@ } ], "renames": [] -} \ No newline at end of file +} From e5687d646ce33b5c05bb007bf14cf5362676733b Mon Sep 17 00:00:00 2001 From: Brendan Allan <14191578+Brendonovich@users.noreply.github.com> Date: Tue, 21 Apr 2026 12:36:56 +0800 Subject: [PATCH 05/73] electron: use custom oc:// protocol for renderer windows (#23516) --- packages/desktop-electron/src/main/index.ts | 3 +- packages/desktop-electron/src/main/server.ts | 1 + packages/desktop-electron/src/main/windows.ts | 41 +++++++++++++++++-- .../src/renderer/html.test.ts | 6 +-- 4 files changed, 43 insertions(+), 8 deletions(-) diff --git a/packages/desktop-electron/src/main/index.ts b/packages/desktop-electron/src/main/index.ts index 6c4e6d5ca1..8ec39c3c9c 100644 --- a/packages/desktop-electron/src/main/index.ts +++ b/packages/desktop-electron/src/main/index.ts @@ -42,7 +42,7 @@ import { initLogging } from "./logging" import { parseMarkdown } from "./markdown" import { createMenu } from "./menu" import { getDefaultServerUrl, getWslConfig, setDefaultServerUrl, setWslConfig, spawnLocalServer } from "./server" -import { createLoadingWindow, createMainWindow, setBackgroundColor, setDockIcon } from "./windows" +import { createLoadingWindow, createMainWindow, registerRendererProtocol, setBackgroundColor, setDockIcon } from "./windows" import { drizzle } from "drizzle-orm/node-sqlite/driver" import type { Server } from "virtual:opencode-server" @@ -106,6 +106,7 @@ function setupApp() { void app.whenReady().then(async () => { app.setAsDefaultProtocolClient("opencode") + registerRendererProtocol() setDockIcon() setupAutoUpdater() await initialize() diff --git a/packages/desktop-electron/src/main/server.ts b/packages/desktop-electron/src/main/server.ts index 55dfdf6e9b..83d50f7cb6 100644 --- a/packages/desktop-electron/src/main/server.ts +++ b/packages/desktop-electron/src/main/server.ts @@ -39,6 +39,7 @@ export async function spawnLocalServer(hostname: string, port: number, password: hostname, username: "opencode", password, + cors: ["oc://renderer"], }) const wait = (async () => { diff --git a/packages/desktop-electron/src/main/windows.ts b/packages/desktop-electron/src/main/windows.ts index 95f80c1240..892e9d40d1 100644 --- a/packages/desktop-electron/src/main/windows.ts +++ b/packages/desktop-electron/src/main/windows.ts @@ -1,7 +1,7 @@ import windowState from "electron-window-state" -import { app, BrowserWindow, nativeImage, nativeTheme } from "electron" -import { dirname, join } from "node:path" -import { fileURLToPath } from "node:url" +import { app, BrowserWindow, net, nativeImage, nativeTheme, protocol } from "electron" +import { dirname, isAbsolute, join, relative, resolve } from "node:path" +import { fileURLToPath, pathToFileURL } from "node:url" import type { TitlebarTheme } from "../preload/types" type Globals = { @@ -10,6 +10,20 @@ type Globals = { } const root = dirname(fileURLToPath(import.meta.url)) +const rendererRoot = join(root, "../renderer") +const rendererProtocol = "oc" +const rendererHost = "renderer" + +protocol.registerSchemesAsPrivileged([ + { + scheme: rendererProtocol, + privileges: { + secure: true, + standard: true, + supportFetchAPI: true, + }, + }, +]) let backgroundColor: string | undefined @@ -131,6 +145,25 @@ export function createLoadingWindow(globals: Globals) { return win } +export function registerRendererProtocol() { + if (protocol.isProtocolHandled(rendererProtocol)) return + + protocol.handle(rendererProtocol, (request) => { + const url = new URL(request.url) + if (url.host !== rendererHost) { + return new Response("Not found", { status: 404 }) + } + + const file = resolve(rendererRoot, `.${decodeURIComponent(url.pathname)}`) + const rel = relative(rendererRoot, file) + if (rel.startsWith("..") || isAbsolute(rel)) { + return new Response("Not found", { status: 404 }) + } + + return net.fetch(pathToFileURL(file).toString()) + }) +} + function loadWindow(win: BrowserWindow, html: string) { const devUrl = process.env.ELECTRON_RENDERER_URL if (devUrl) { @@ -139,7 +172,7 @@ function loadWindow(win: BrowserWindow, html: string) { return } - void win.loadFile(join(root, `../renderer/${html}`)) + void win.loadURL(`${rendererProtocol}://${rendererHost}/${html}`) } function injectGlobals(win: BrowserWindow, globals: Globals) { diff --git a/packages/desktop-electron/src/renderer/html.test.ts b/packages/desktop-electron/src/renderer/html.test.ts index bd8281c2fb..1fc5c87178 100644 --- a/packages/desktop-electron/src/renderer/html.test.ts +++ b/packages/desktop-electron/src/renderer/html.test.ts @@ -9,9 +9,9 @@ const root = resolve(dir, "../..") const html = async (name: string) => Bun.file(join(dir, name)).text() /** - * Electron loads renderer HTML via `win.loadFile()` which uses the `file://` - * protocol. Absolute paths like `src="/foo.js"` resolve to the filesystem root - * (e.g. `file:///C:/foo.js` on Windows) instead of relative to the app bundle. + * Packaged Electron windows load renderer HTML via the privileged `oc://` + * protocol. Root-relative asset paths like `src="/foo.js"` would resolve from + * the protocol origin root instead of relative to the current HTML entrypoint. * * All local resource references must use relative paths (`./`). */ From 4964ce480c566a98b2b4ead4a6e163eb773c2b80 Mon Sep 17 00:00:00 2001 From: "opencode-agent[bot]" Date: Tue, 21 Apr 2026 04:37:57 +0000 Subject: [PATCH 06/73] chore: generate --- packages/desktop-electron/src/main/index.ts | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/packages/desktop-electron/src/main/index.ts b/packages/desktop-electron/src/main/index.ts index 8ec39c3c9c..8a826bd273 100644 --- a/packages/desktop-electron/src/main/index.ts +++ b/packages/desktop-electron/src/main/index.ts @@ -42,7 +42,13 @@ import { initLogging } from "./logging" import { parseMarkdown } from "./markdown" import { createMenu } from "./menu" import { getDefaultServerUrl, getWslConfig, setDefaultServerUrl, setWslConfig, spawnLocalServer } from "./server" -import { createLoadingWindow, createMainWindow, registerRendererProtocol, setBackgroundColor, setDockIcon } from "./windows" +import { + createLoadingWindow, + createMainWindow, + registerRendererProtocol, + setBackgroundColor, + setDockIcon, +} from "./windows" import { drizzle } from "drizzle-orm/node-sqlite/driver" import type { Server } from "virtual:opencode-server" From eb9906420fa8def2520b1b4950a9175af9116ea2 Mon Sep 17 00:00:00 2001 From: Brendan Allan <14191578+Brendonovich@users.noreply.github.com> Date: Tue, 21 Apr 2026 12:38:59 +0800 Subject: [PATCH 07/73] refactor(desktop-electron): enable contextIsolation and sandbox (#23523) --- .../desktop-electron/electron.vite.config.ts | 4 ++ packages/desktop-electron/src/main/index.ts | 11 ++---- packages/desktop-electron/src/main/ipc.ts | 13 ++++++- packages/desktop-electron/src/main/menu.ts | 2 +- packages/desktop-electron/src/main/windows.ts | 37 +++++-------------- .../desktop-electron/src/preload/index.ts | 2 + .../desktop-electron/src/preload/types.ts | 6 +++ .../desktop-electron/src/renderer/env.d.ts | 2 - .../desktop-electron/src/renderer/index.tsx | 30 ++++++++------- .../desktop-electron/src/renderer/updater.ts | 2 - 10 files changed, 55 insertions(+), 54 deletions(-) diff --git a/packages/desktop-electron/electron.vite.config.ts b/packages/desktop-electron/electron.vite.config.ts index d0e6c42b6c..f28c7b6c18 100644 --- a/packages/desktop-electron/electron.vite.config.ts +++ b/packages/desktop-electron/electron.vite.config.ts @@ -53,6 +53,10 @@ export default defineConfig({ build: { rollupOptions: { input: { index: "src/preload/index.ts" }, + output: { + format: "cjs", + entryFileNames: "[name].js", + }, }, }, }, diff --git a/packages/desktop-electron/src/main/index.ts b/packages/desktop-electron/src/main/index.ts index 8a826bd273..ae9f581186 100644 --- a/packages/desktop-electron/src/main/index.ts +++ b/packages/desktop-electron/src/main/index.ts @@ -195,15 +195,10 @@ async function initialize() { logger.log("loading task finished") })() - const globals = { - updaterEnabled: UPDATER_ENABLED, - deepLinks: pendingDeepLinks, - } - if (needsMigration) { const show = await Promise.race([loadingTask.then(() => false), delay(1_000).then(() => true)]) if (show) { - overlay = createLoadingWindow(globals) + overlay = createLoadingWindow() await delay(1_000) } } @@ -215,7 +210,7 @@ async function initialize() { await loadingComplete.promise } - mainWindow = createMainWindow(globals) + mainWindow = createMainWindow() wireMenu() overlay?.close() @@ -252,6 +247,8 @@ registerIpcHandlers({ initEmitter.off("step", listener) } }, + getWindowConfig: () => ({ updaterEnabled: UPDATER_ENABLED }), + consumeInitialDeepLinks: () => pendingDeepLinks.splice(0), getDefaultServerUrl: () => getDefaultServerUrl(), setDefaultServerUrl: (url) => setDefaultServerUrl(url), getWslConfig: () => Promise.resolve(getWslConfig()), diff --git a/packages/desktop-electron/src/main/ipc.ts b/packages/desktop-electron/src/main/ipc.ts index 52d87ed7ee..8dbca8eea1 100644 --- a/packages/desktop-electron/src/main/ipc.ts +++ b/packages/desktop-electron/src/main/ipc.ts @@ -2,7 +2,14 @@ import { execFile } from "node:child_process" import { BrowserWindow, Notification, app, clipboard, dialog, ipcMain, shell } from "electron" import type { IpcMainEvent, IpcMainInvokeEvent } from "electron" -import type { InitStep, ServerReadyData, SqliteMigrationProgress, TitlebarTheme, WslConfig } from "../preload/types" +import type { + InitStep, + ServerReadyData, + SqliteMigrationProgress, + TitlebarTheme, + WindowConfig, + WslConfig, +} from "../preload/types" import { getStore } from "./store" import { setTitlebar } from "./windows" @@ -14,6 +21,8 @@ const pickerFilters = (ext?: string[]) => { type Deps = { killSidecar: () => void awaitInitialization: (sendStep: (step: InitStep) => void) => Promise + getWindowConfig: () => Promise | WindowConfig + consumeInitialDeepLinks: () => Promise | string[] getDefaultServerUrl: () => Promise | string | null setDefaultServerUrl: (url: string | null) => Promise | void getWslConfig: () => Promise @@ -37,6 +46,8 @@ export function registerIpcHandlers(deps: Deps) { const send = (step: InitStep) => event.sender.send("init-step", step) return deps.awaitInitialization(send) }) + ipcMain.handle("get-window-config", () => deps.getWindowConfig()) + ipcMain.handle("consume-initial-deep-links", () => deps.consumeInitialDeepLinks()) ipcMain.handle("get-default-server-url", () => deps.getDefaultServerUrl()) ipcMain.handle("set-default-server-url", (_event: IpcMainInvokeEvent, url: string | null) => deps.setDefaultServerUrl(url), diff --git a/packages/desktop-electron/src/main/menu.ts b/packages/desktop-electron/src/main/menu.ts index fcf209fb67..0d9a697fa9 100644 --- a/packages/desktop-electron/src/main/menu.ts +++ b/packages/desktop-electron/src/main/menu.ts @@ -47,7 +47,7 @@ export function createMenu(deps: Deps) { { label: "New Window", accelerator: "Cmd+Shift+N", - click: () => createMainWindow({ updaterEnabled: UPDATER_ENABLED }), + click: () => createMainWindow(), }, { type: "separator" }, { role: "close" }, diff --git a/packages/desktop-electron/src/main/windows.ts b/packages/desktop-electron/src/main/windows.ts index 892e9d40d1..df55e8da2f 100644 --- a/packages/desktop-electron/src/main/windows.ts +++ b/packages/desktop-electron/src/main/windows.ts @@ -4,11 +4,6 @@ import { dirname, isAbsolute, join, relative, resolve } from "node:path" import { fileURLToPath, pathToFileURL } from "node:url" import type { TitlebarTheme } from "../preload/types" -type Globals = { - updaterEnabled: boolean - deepLinks?: string[] -} - const root = dirname(fileURLToPath(import.meta.url)) const rendererRoot = join(root, "../renderer") const rendererProtocol = "oc" @@ -68,7 +63,7 @@ export function setDockIcon() { if (!icon.isEmpty()) app.dock?.setIcon(icon) } -export function createMainWindow(globals: Globals) { +export function createMainWindow() { const state = windowState({ defaultWidth: 1280, defaultHeight: 800, @@ -98,15 +93,16 @@ export function createMainWindow(globals: Globals) { } : {}), webPreferences: { - preload: join(root, "../preload/index.mjs"), - sandbox: false, + preload: join(root, "../preload/index.js"), + contextIsolation: true, + nodeIntegration: false, + sandbox: true, }, }) state.manage(win) loadWindow(win, "index.html") wireZoom(win) - injectGlobals(win, globals) win.once("ready-to-show", () => { win.show() @@ -115,7 +111,7 @@ export function createMainWindow(globals: Globals) { return win } -export function createLoadingWindow(globals: Globals) { +export function createLoadingWindow() { const mode = tone() const win = new BrowserWindow({ width: 640, @@ -134,13 +130,14 @@ export function createLoadingWindow(globals: Globals) { } : {}), webPreferences: { - preload: join(root, "../preload/index.mjs"), - sandbox: false, + preload: join(root, "../preload/index.js"), + contextIsolation: true, + nodeIntegration: false, + sandbox: true, }, }) loadWindow(win, "loading.html") - injectGlobals(win, globals) return win } @@ -174,20 +171,6 @@ function loadWindow(win: BrowserWindow, html: string) { void win.loadURL(`${rendererProtocol}://${rendererHost}/${html}`) } - -function injectGlobals(win: BrowserWindow, globals: Globals) { - win.webContents.on("dom-ready", () => { - const deepLinks = globals.deepLinks ?? [] - const data = { - updaterEnabled: globals.updaterEnabled, - deepLinks: Array.isArray(deepLinks) ? deepLinks.splice(0) : deepLinks, - } - void win.webContents.executeJavaScript( - `window.__OPENCODE__ = Object.assign(window.__OPENCODE__ ?? {}, ${JSON.stringify(data)})`, - ) - }) -} - function wireZoom(win: BrowserWindow) { win.webContents.setZoomFactor(1) win.webContents.on("zoom-changed", () => { diff --git a/packages/desktop-electron/src/preload/index.ts b/packages/desktop-electron/src/preload/index.ts index 296fcb2f1c..6261419ca5 100644 --- a/packages/desktop-electron/src/preload/index.ts +++ b/packages/desktop-electron/src/preload/index.ts @@ -11,6 +11,8 @@ const api: ElectronAPI = { ipcRenderer.removeListener("init-step", handler) }) }, + getWindowConfig: () => ipcRenderer.invoke("get-window-config"), + consumeInitialDeepLinks: () => ipcRenderer.invoke("consume-initial-deep-links"), getDefaultServerUrl: () => ipcRenderer.invoke("get-default-server-url"), setDefaultServerUrl: (url) => ipcRenderer.invoke("set-default-server-url", url), getWslConfig: () => ipcRenderer.invoke("get-wsl-config"), diff --git a/packages/desktop-electron/src/preload/types.ts b/packages/desktop-electron/src/preload/types.ts index f8e6d52c7d..6e22954d18 100644 --- a/packages/desktop-electron/src/preload/types.ts +++ b/packages/desktop-electron/src/preload/types.ts @@ -15,10 +15,16 @@ export type TitlebarTheme = { mode: "light" | "dark" } +export type WindowConfig = { + updaterEnabled: boolean +} + export type ElectronAPI = { killSidecar: () => Promise installCli: () => Promise awaitInitialization: (onStep: (step: InitStep) => void) => Promise + getWindowConfig: () => Promise + consumeInitialDeepLinks: () => Promise getDefaultServerUrl: () => Promise setDefaultServerUrl: (url: string | null) => Promise getWslConfig: () => Promise diff --git a/packages/desktop-electron/src/renderer/env.d.ts b/packages/desktop-electron/src/renderer/env.d.ts index d1590ff048..6dff3baf1c 100644 --- a/packages/desktop-electron/src/renderer/env.d.ts +++ b/packages/desktop-electron/src/renderer/env.d.ts @@ -4,8 +4,6 @@ declare global { interface Window { api: ElectronAPI __OPENCODE__?: { - updaterEnabled?: boolean - wsl?: boolean deepLinks?: string[] } } diff --git a/packages/desktop-electron/src/renderer/index.tsx b/packages/desktop-electron/src/renderer/index.tsx index 44f2e6360c..843863290f 100644 --- a/packages/desktop-electron/src/renderer/index.tsx +++ b/packages/desktop-electron/src/renderer/index.tsx @@ -20,7 +20,6 @@ import { createEffect, createResource, onCleanup, onMount, Show } from "solid-js import { render } from "solid-js/web" import pkg from "../../package.json" import { initI18n, t } from "./i18n" -import { UPDATER_ENABLED } from "./updater" import { webviewZoom } from "./webview-zoom" import "./styles.css" import { useTheme } from "@opencode-ai/ui/theme" @@ -43,8 +42,7 @@ const emitDeepLinks = (urls: string[]) => { } const listenForDeepLinks = () => { - const startUrls = window.__OPENCODE__?.deepLinks ?? [] - if (startUrls.length) emitDeepLinks(startUrls) + void window.api.consumeInitialDeepLinks().then((urls) => emitDeepLinks(urls)) return window.api.onDeepLink((urls) => emitDeepLinks(urls)) } @@ -57,13 +55,18 @@ const createPlatform = (): Platform => { return undefined })() + const isWslEnabled = async () => { + if (os !== "windows") return false + return window.api.getWslConfig().then((config) => config.enabled).catch(() => false) + } + const wslHome = async () => { - if (os !== "windows" || !window.__OPENCODE__?.wsl) return undefined + if (!(await isWslEnabled())) return undefined return window.api.wslPath("~", "windows").catch(() => undefined) } const handleWslPicker = async (result: T | null): Promise => { - if (!result || !window.__OPENCODE__?.wsl) return result + if (!result || !(await isWslEnabled())) return result if (Array.isArray(result)) { return Promise.all(result.map((path) => window.api.wslPath(path, "linux").catch(() => path))) as any } @@ -137,7 +140,7 @@ const createPlatform = (): Platform => { if (os === "windows") { const resolvedApp = app ? await window.api.resolveAppPath(app).catch(() => null) : null const resolvedPath = await (async () => { - if (window.__OPENCODE__?.wsl) { + if (await isWslEnabled()) { const converted = await window.api.wslPath(path, "windows").catch(() => null) if (converted) return converted } @@ -159,12 +162,14 @@ const createPlatform = (): Platform => { storage, checkUpdate: async () => { - if (!UPDATER_ENABLED()) return { updateAvailable: false } + const config = await window.api.getWindowConfig().catch(() => ({ updaterEnabled: false })) + if (!config.updaterEnabled) return { updateAvailable: false } return window.api.checkUpdate() }, update: async () => { - if (!UPDATER_ENABLED()) return + const config = await window.api.getWindowConfig().catch(() => ({ updaterEnabled: false })) + if (!config.updaterEnabled) return await window.api.installUpdate() }, @@ -194,11 +199,7 @@ const createPlatform = (): Platform => { return fetch(input, init) }, - getWslEnabled: async () => { - const next = await window.api.getWslConfig().catch(() => null) - if (next) return next.enabled - return window.__OPENCODE__!.wsl ?? false - }, + getWslEnabled: () => isWslEnabled(), setWslEnabled: async (enabled) => { await window.api.setWslConfig({ enabled }) @@ -249,6 +250,7 @@ listenForDeepLinks() render(() => { const platform = createPlatform() + const [windowConfig] = createResource(() => window.api.getWindowConfig().catch(() => ({ updaterEnabled: false }))) const loadLocale = async () => { const current = await platform.storage?.("opencode.global.dat").getItem("language") const legacy = current ? undefined : await platform.storage?.().getItem("language.v1") @@ -325,7 +327,7 @@ render(() => { return ( - + {(_) => { return ( window.__OPENCODE__?.updaterEnabled ?? false - export async function runUpdater({ alertOnFail }: { alertOnFail: boolean }) { await initI18n() try { From a08aa21cb35d3c112d93af5abf39237a187265d6 Mon Sep 17 00:00:00 2001 From: "opencode-agent[bot]" Date: Tue, 21 Apr 2026 04:40:02 +0000 Subject: [PATCH 08/73] chore: generate --- packages/desktop-electron/src/renderer/index.tsx | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/packages/desktop-electron/src/renderer/index.tsx b/packages/desktop-electron/src/renderer/index.tsx index 843863290f..56fe9fa513 100644 --- a/packages/desktop-electron/src/renderer/index.tsx +++ b/packages/desktop-electron/src/renderer/index.tsx @@ -57,7 +57,10 @@ const createPlatform = (): Platform => { const isWslEnabled = async () => { if (os !== "windows") return false - return window.api.getWslConfig().then((config) => config.enabled).catch(() => false) + return window.api + .getWslConfig() + .then((config) => config.enabled) + .catch(() => false) } const wslHome = async () => { @@ -327,7 +330,15 @@ render(() => { return ( - + {(_) => { return ( Date: Tue, 21 Apr 2026 01:15:07 -0400 Subject: [PATCH 09/73] zen: m2.7 & k2.6 --- packages/web/src/content/docs/ar/zen.mdx | 15 ++++----------- packages/web/src/content/docs/bs/zen.mdx | 15 ++++----------- packages/web/src/content/docs/da/zen.mdx | 15 ++++----------- packages/web/src/content/docs/de/zen.mdx | 15 ++++----------- packages/web/src/content/docs/es/zen.mdx | 15 ++++----------- packages/web/src/content/docs/fr/zen.mdx | 15 ++++----------- packages/web/src/content/docs/it/zen.mdx | 15 ++++----------- packages/web/src/content/docs/ja/zen.mdx | 15 ++++----------- packages/web/src/content/docs/ko/zen.mdx | 15 ++++----------- packages/web/src/content/docs/nb/zen.mdx | 15 ++++----------- packages/web/src/content/docs/pl/zen.mdx | 15 ++++----------- packages/web/src/content/docs/pt-br/zen.mdx | 15 ++++----------- packages/web/src/content/docs/ru/zen.mdx | 15 ++++----------- packages/web/src/content/docs/th/zen.mdx | 15 ++++----------- packages/web/src/content/docs/tr/zen.mdx | 15 ++++----------- packages/web/src/content/docs/zen.mdx | 2 ++ packages/web/src/content/docs/zh-cn/zen.mdx | 15 ++++----------- packages/web/src/content/docs/zh-tw/zen.mdx | 15 ++++----------- 18 files changed, 70 insertions(+), 187 deletions(-) diff --git a/packages/web/src/content/docs/ar/zen.mdx b/packages/web/src/content/docs/ar/zen.mdx index 60225a7eaa..6150c72951 100644 --- a/packages/web/src/content/docs/ar/zen.mdx +++ b/packages/web/src/content/docs/ar/zen.mdx @@ -89,13 +89,12 @@ OpenCode Zen هي بوابة AI تتيح لك الوصول إلى هذه الن | Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5.1 | glm-5.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | يستخدم [معرّف النموذج](/docs/config/#models) في إعدادات OpenCode الصيغة `opencode/`. على سبيل المثال، بالنسبة إلى GPT 5.3 Codex، ستستخدم `opencode/gpt-5.3-codex` في إعداداتك. @@ -119,18 +118,16 @@ https://opencode.ai/zen/v1/models | النموذج | الإدخال | الإخراج | القراءة المخزنة | الكتابة المخزنة | | --------------------------------- | ------- | ------- | --------------- | --------------- | | Big Pickle | Free | Free | Free | - | -| MiMo V2 Pro Free | Free | Free | Free | - | -| MiMo V2 Omni Free | Free | Free | Free | - | -| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | -| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | | GLM 5 | $1.00 | $3.20 | $0.20 | - | | Kimi K2.5 | $0.60 | $3.00 | $0.10 | - | | Kimi K2.6 | $0.95 | $4.00 | $0.16 | - | -| Qwen3 Coder 480B | $0.45 | $1.50 | - | - | +| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | +| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Claude Opus 4.7 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 | @@ -170,8 +167,6 @@ https://opencode.ai/zen/v1/models النماذج المجانية: - MiniMax M2.5 Free متاح على OpenCode لفترة محدودة. يستخدم الفريق هذه الفترة لجمع الملاحظات وتحسين النموذج. -- MiMo V2 Pro Free متاح على OpenCode لفترة محدودة. يستخدم الفريق هذه الفترة لجمع الملاحظات وتحسين النموذج. -- MiMo V2 Omni Free متاح على OpenCode لفترة محدودة. يستخدم الفريق هذه الفترة لجمع الملاحظات وتحسين النموذج. - Nemotron 3 Super Free متاح على OpenCode لفترة محدودة. يستخدم الفريق هذه الفترة لجمع الملاحظات وتحسين النموذج. - Big Pickle نموذج خفي ومتاح مجانا على OpenCode لفترة محدودة. يستخدم الفريق هذه الفترة لجمع الملاحظات وتحسين النموذج. @@ -215,8 +210,6 @@ https://opencode.ai/zen/v1/models - Big Pickle: خلال فترته المجانية، قد تُستخدم البيانات المجمعة لتحسين النموذج. - MiniMax M2.5 Free: خلال فترته المجانية، قد تُستخدم البيانات المجمعة لتحسين النموذج. -- MiMo V2 Pro Free: خلال فترته المجانية، قد تُستخدم البيانات المجمعة لتحسين النموذج. -- MiMo V2 Omni Free: خلال فترته المجانية، قد تُستخدم البيانات المجمعة لتحسين النموذج. - Nemotron 3 Super Free (نقاط نهاية NVIDIA المجانية): يُقدَّم بموجب [شروط خدمة النسخة التجريبية من واجهة NVIDIA API](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). للاستخدام التجريبي فقط، وليس للإنتاج أو البيانات الحساسة. تقوم NVIDIA بتسجيل المطالبات والمخرجات لتحسين نماذجها وخدماتها. لا ترسل بيانات شخصية أو سرية. - OpenAI APIs: يتم الاحتفاظ بالطلبات لمدة 30 يوما وفقا لـ [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data). - Anthropic APIs: يتم الاحتفاظ بالطلبات لمدة 30 يوما وفقا لـ [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage). diff --git a/packages/web/src/content/docs/bs/zen.mdx b/packages/web/src/content/docs/bs/zen.mdx index 18d6ae9f0b..70ac7641f4 100644 --- a/packages/web/src/content/docs/bs/zen.mdx +++ b/packages/web/src/content/docs/bs/zen.mdx @@ -94,13 +94,12 @@ Našim modelima možete pristupiti i preko sljedećih API endpointa. | Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5.1 | glm-5.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | [model id](/docs/config/#models) u vašoj OpenCode konfiguraciji koristi format @@ -126,18 +125,16 @@ Podržavamo pay-as-you-go model. Ispod su cijene **po 1M tokena**. | Model | Input | Output | Cached Read | Cached Write | | --------------------------------- | ------ | ------- | ----------- | ------------ | | Big Pickle | Free | Free | Free | - | -| MiMo V2 Pro Free | Free | Free | Free | - | -| MiMo V2 Omni Free | Free | Free | Free | - | -| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | -| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | | GLM 5 | $1.00 | $3.20 | $0.20 | - | | Kimi K2.5 | $0.60 | $3.00 | $0.10 | - | | Kimi K2.6 | $0.95 | $4.00 | $0.16 | - | -| Qwen3 Coder 480B | $0.45 | $1.50 | - | - | +| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | +| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Claude Opus 4.7 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 | @@ -177,8 +174,6 @@ Naknade za kreditne kartice prosljeđujemo po stvarnom trošku (4.4% + $0.30 po Besplatni modeli: - MiniMax M2.5 Free je dostupan na OpenCode ograničeno vrijeme. Tim koristi ovo vrijeme da prikupi povratne informacije i poboljša model. -- MiMo V2 Pro Free je dostupan na OpenCode ograničeno vrijeme. Tim koristi ovo vrijeme da prikupi povratne informacije i poboljša model. -- MiMo V2 Omni Free je dostupan na OpenCode ograničeno vrijeme. Tim koristi ovo vrijeme da prikupi povratne informacije i poboljša model. - Nemotron 3 Super Free je dostupan na OpenCode ograničeno vrijeme. Tim koristi ovo vrijeme da prikupi povratne informacije i poboljša model. - Big Pickle je stealth model koji je besplatan na OpenCode ograničeno vrijeme. Tim koristi ovo vrijeme da prikupi povratne informacije i poboljša model. @@ -227,8 +222,6 @@ i ne koriste vaše podatke za treniranje modela, uz sljedeće izuzetke: - Big Pickle: Tokom besplatnog perioda, prikupljeni podaci mogu se koristiti za poboljšanje modela. - MiniMax M2.5 Free: Tokom besplatnog perioda, prikupljeni podaci mogu se koristiti za poboljšanje modela. -- MiMo V2 Pro Free: Tokom besplatnog perioda, prikupljeni podaci mogu se koristiti za poboljšanje modela. -- MiMo V2 Omni Free: Tokom besplatnog perioda, prikupljeni podaci mogu se koristiti za poboljšanje modela. - Nemotron 3 Super Free (besplatni NVIDIA endpointi): Dostupan je prema [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Samo za probnu upotrebu, nije za produkciju niti osjetljive podatke. NVIDIA bilježi promptove i izlaze radi poboljšanja svojih modela i usluga. Nemojte slati lične ili povjerljive podatke. - OpenAI APIs: Requests are retained for 30 days in accordance with [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data). - Anthropic APIs: Requests are retained for 30 days in accordance with [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage). diff --git a/packages/web/src/content/docs/da/zen.mdx b/packages/web/src/content/docs/da/zen.mdx index 71242882cd..c497f35b7b 100644 --- a/packages/web/src/content/docs/da/zen.mdx +++ b/packages/web/src/content/docs/da/zen.mdx @@ -94,13 +94,12 @@ Du kan også få adgang til vores modeller gennem følgende API-endpoints. | Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5.1 | glm-5.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | [model id](/docs/config/#models) i din OpenCode-konfiguration @@ -126,18 +125,16 @@ Vi understøtter en pay-as-you-go-model. Nedenfor er priserne **pr. 1M tokens**. | Model | Input | Output | Cached Read | Cached Write | | --------------------------------- | ------ | ------- | ----------- | ------------ | | Big Pickle | Free | Free | Free | - | -| MiMo V2 Pro Free | Free | Free | Free | - | -| MiMo V2 Omni Free | Free | Free | Free | - | -| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | -| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | | GLM 5 | $1.00 | $3.20 | $0.20 | - | | Kimi K2.5 | $0.60 | $3.00 | $0.10 | - | | Kimi K2.6 | $0.95 | $4.00 | $0.16 | - | -| Qwen3 Coder 480B | $0.45 | $1.50 | - | - | +| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | +| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Claude Opus 4.7 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 | @@ -177,8 +174,6 @@ Kreditkortgebyrer videregives til kostpris (4.4% + $0.30 pr. transaktion); vi op De gratis modeller: - MiniMax M2.5 Free er tilgængelig på OpenCode i en begrænset periode. Teamet bruger denne tid til at indsamle feedback og forbedre modellen. -- MiMo V2 Pro Free er tilgængelig på OpenCode i en begrænset periode. Teamet bruger denne tid til at indsamle feedback og forbedre modellen. -- MiMo V2 Omni Free er tilgængelig på OpenCode i en begrænset periode. Teamet bruger denne tid til at indsamle feedback og forbedre modellen. - Nemotron 3 Super Free er tilgængelig på OpenCode i en begrænset periode. Teamet bruger denne tid til at indsamle feedback og forbedre modellen. - Big Pickle er en stealth-model, som er gratis på OpenCode i en begrænset periode. Teamet bruger denne tid til at indsamle feedback og forbedre modellen. @@ -225,8 +220,6 @@ Alle vores modeller hostes i US. Vores udbydere følger en nul-opbevaringspoliti - Big Pickle: I den gratis periode kan indsamlede data blive brugt til at forbedre modellen. - MiniMax M2.5 Free: I den gratis periode kan indsamlede data blive brugt til at forbedre modellen. -- MiMo V2 Pro Free: I den gratis periode kan indsamlede data blive brugt til at forbedre modellen. -- MiMo V2 Omni Free: I den gratis periode kan indsamlede data blive brugt til at forbedre modellen. - Nemotron 3 Super Free (gratis NVIDIA-endpoints): Leveres under [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Kun til prøvebrug, ikke til produktion eller følsomme data. Prompts og outputs logges af NVIDIA for at forbedre deres modeller og tjenester. Indsend ikke personlige eller fortrolige data. - OpenAI APIs: Anmodninger opbevares i 30 dage i overensstemmelse med [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data). - Anthropic APIs: Anmodninger opbevares i 30 dage i overensstemmelse med [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage). diff --git a/packages/web/src/content/docs/de/zen.mdx b/packages/web/src/content/docs/de/zen.mdx index b5319c1ef1..0dfc728501 100644 --- a/packages/web/src/content/docs/de/zen.mdx +++ b/packages/web/src/content/docs/de/zen.mdx @@ -85,13 +85,12 @@ Du kannst auch über die folgenden API-Endpunkte auf unsere Modelle zugreifen. | Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5.1 | glm-5.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | Die [Model-ID](/docs/config/#models) in deiner OpenCode-Konfiguration verwendet das Format `opencode/`. Für GPT 5.3 Codex würdest du zum Beispiel `opencode/gpt-5.3-codex` in deiner Konfiguration verwenden. @@ -115,18 +114,16 @@ Wir unterstützen ein Pay-as-you-go-Modell. Unten findest du die Preise **pro 1M | Model | Input | Output | Cached Read | Cached Write | | --------------------------------- | ------ | ------- | ----------- | ------------ | | Big Pickle | Free | Free | Free | - | -| MiMo V2 Pro Free | Free | Free | Free | - | -| MiMo V2 Omni Free | Free | Free | Free | - | -| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | -| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | | GLM 5 | $1.00 | $3.20 | $0.20 | - | | Kimi K2.5 | $0.60 | $3.00 | $0.10 | - | | Kimi K2.6 | $0.95 | $4.00 | $0.16 | - | -| Qwen3 Coder 480B | $0.45 | $1.50 | - | - | +| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | +| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Claude Opus 4.7 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 | @@ -166,8 +163,6 @@ Kreditkartengebühren werden zum Selbstkostenpreis weitergegeben (4.4% + $0.30 p Die kostenlosen Modelle: - MiniMax M2.5 Free ist für begrenzte Zeit auf OpenCode verfügbar. Das Team nutzt diese Zeit, um Feedback zu sammeln und das Modell zu verbessern. -- MiMo V2 Pro Free ist für begrenzte Zeit auf OpenCode verfügbar. Das Team nutzt diese Zeit, um Feedback zu sammeln und das Modell zu verbessern. -- MiMo V2 Omni Free ist für begrenzte Zeit auf OpenCode verfügbar. Das Team nutzt diese Zeit, um Feedback zu sammeln und das Modell zu verbessern. - Nemotron 3 Super Free ist für begrenzte Zeit auf OpenCode verfügbar. Das Team nutzt diese Zeit, um Feedback zu sammeln und das Modell zu verbessern. - Big Pickle ist ein Stealth-Modell, das für begrenzte Zeit kostenlos auf OpenCode verfügbar ist. Das Team nutzt diese Zeit, um Feedback zu sammeln und das Modell zu verbessern. @@ -211,8 +206,6 @@ Alle unsere Modelle werden in den USA gehostet. Unsere Provider folgen einer Zer - Big Pickle: Während des kostenlosen Zeitraums können gesammelte Daten zur Verbesserung des Modells verwendet werden. - MiniMax M2.5 Free: Während des kostenlosen Zeitraums können gesammelte Daten zur Verbesserung des Modells verwendet werden. -- MiMo V2 Pro Free: Während des kostenlosen Zeitraums können gesammelte Daten zur Verbesserung des Modells verwendet werden. -- MiMo V2 Omni Free: Während des kostenlosen Zeitraums können gesammelte Daten zur Verbesserung des Modells verwendet werden. - Nemotron 3 Super Free (kostenlose NVIDIA-Endpunkte): Bereitgestellt gemäß den [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Nur für Testzwecke, nicht für Produktion oder sensible Daten. Eingaben und Ausgaben werden von NVIDIA protokolliert, um seine Modelle und Dienste zu verbessern. Übermitteln Sie keine personenbezogenen oder vertraulichen Daten. - OpenAI APIs: Anfragen werden in Übereinstimmung mit [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data) 30 Tage lang gespeichert. - Anthropic APIs: Anfragen werden in Übereinstimmung mit [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage) 30 Tage lang gespeichert. diff --git a/packages/web/src/content/docs/es/zen.mdx b/packages/web/src/content/docs/es/zen.mdx index 3ac71297cf..4a2866aa79 100644 --- a/packages/web/src/content/docs/es/zen.mdx +++ b/packages/web/src/content/docs/es/zen.mdx @@ -94,13 +94,12 @@ También puedes acceder a nuestros modelos a través de los siguientes endpoints | Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5.1 | glm-5.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | El [identificador del modelo](/docs/config/#models) en tu configuración de OpenCode @@ -126,18 +125,16 @@ Admitimos un modelo de pago por uso. A continuación se muestran los precios **p | Modelo | Entrada | Salida | Lectura en caché | Escritura en caché | | --------------------------------- | ------- | ------- | ---------------- | ------------------ | | Big Pickle | Free | Free | Free | - | -| MiMo V2 Pro Free | Free | Free | Free | - | -| MiMo V2 Omni Free | Free | Free | Free | - | -| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | -| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | | GLM 5 | $1.00 | $3.20 | $0.20 | - | | Kimi K2.5 | $0.60 | $3.00 | $0.10 | - | | Kimi K2.6 | $0.95 | $4.00 | $0.16 | - | -| Qwen3 Coder 480B | $0.45 | $1.50 | - | - | +| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | +| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Claude Opus 4.7 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 | @@ -177,8 +174,6 @@ Las comisiones de tarjeta de crédito se trasladan al costo (4.4% + $0.30 por tr Los modelos gratuitos: - MiniMax M2.5 Free está disponible en OpenCode por tiempo limitado. El equipo está usando este tiempo para recopilar comentarios y mejorar el modelo. -- MiMo V2 Pro Free está disponible en OpenCode por tiempo limitado. El equipo está usando este tiempo para recopilar comentarios y mejorar el modelo. -- MiMo V2 Omni Free está disponible en OpenCode por tiempo limitado. El equipo está usando este tiempo para recopilar comentarios y mejorar el modelo. - Nemotron 3 Super Free está disponible en OpenCode por tiempo limitado. El equipo está usando este tiempo para recopilar comentarios y mejorar el modelo. - Big Pickle es un modelo stealth que es gratuito en OpenCode por tiempo limitado. El equipo está usando este tiempo para recopilar comentarios y mejorar el modelo. @@ -225,8 +220,6 @@ Todos nuestros modelos están alojados en US. Nuestros proveedores siguen una po - Big Pickle: Durante su período gratuito, los datos recopilados pueden usarse para mejorar el modelo. - MiniMax M2.5 Free: Durante su período gratuito, los datos recopilados pueden usarse para mejorar el modelo. -- MiMo V2 Pro Free: Durante su período gratuito, los datos recopilados pueden usarse para mejorar el modelo. -- MiMo V2 Omni Free: Durante su período gratuito, los datos recopilados pueden usarse para mejorar el modelo. - Nemotron 3 Super Free (endpoints gratuitos de NVIDIA): Se ofrece bajo los [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Solo para uso de prueba, no para producción ni datos sensibles. NVIDIA registra los prompts y las salidas para mejorar sus modelos y servicios. No envíes datos personales ni confidenciales. - OpenAI APIs: Las solicitudes se conservan durante 30 días de acuerdo con [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data). - Anthropic APIs: Las solicitudes se conservan durante 30 días de acuerdo con [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage). diff --git a/packages/web/src/content/docs/fr/zen.mdx b/packages/web/src/content/docs/fr/zen.mdx index 3ac75d8957..247ae00d2d 100644 --- a/packages/web/src/content/docs/fr/zen.mdx +++ b/packages/web/src/content/docs/fr/zen.mdx @@ -85,13 +85,12 @@ Vous pouvez également accéder à nos modèles via les points de terminaison AP | Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5.1 | glm-5.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | Le [model id](/docs/config/#models) dans votre configuration OpenCode utilise le format `opencode/`. Par exemple, pour GPT 5.3 Codex, vous utiliseriez `opencode/gpt-5.3-codex` dans votre configuration. @@ -115,18 +114,16 @@ Nous prenons en charge un modèle de paiement à l'utilisation. Vous trouverez c | Modèle | Input | Output | Cached Read | Cached Write | | --------------------------------- | ------ | ------- | ----------- | ------------ | | Big Pickle | Free | Free | Free | - | -| MiMo V2 Pro Free | Free | Free | Free | - | -| MiMo V2 Omni Free | Free | Free | Free | - | -| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | -| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | | GLM 5 | $1.00 | $3.20 | $0.20 | - | | Kimi K2.5 | $0.60 | $3.00 | $0.10 | - | | Kimi K2.6 | $0.95 | $4.00 | $0.16 | - | -| Qwen3 Coder 480B | $0.45 | $1.50 | - | - | +| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | +| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Claude Opus 4.7 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 | @@ -166,8 +163,6 @@ Les frais de carte de crédit sont répercutés au prix coûtant (4.4% + $0.30 p Les modèles gratuits : - MiniMax M2.5 Free est disponible sur OpenCode pour une durée limitée. L'équipe utilise cette période pour recueillir des retours et améliorer le modèle. -- MiMo V2 Pro Free est disponible sur OpenCode pour une durée limitée. L'équipe utilise cette période pour recueillir des retours et améliorer le modèle. -- MiMo V2 Omni Free est disponible sur OpenCode pour une durée limitée. L'équipe utilise cette période pour recueillir des retours et améliorer le modèle. - Nemotron 3 Super Free est disponible sur OpenCode pour une durée limitée. L'équipe utilise cette période pour recueillir des retours et améliorer le modèle. - Big Pickle est un modèle stealth gratuit sur OpenCode pour une durée limitée. L'équipe utilise cette période pour recueillir des retours et améliorer le modèle. @@ -211,8 +206,6 @@ Tous nos modèles sont hébergés aux US. Nos fournisseurs suivent une politique - Big Pickle : Pendant sa période gratuite, les données collectées peuvent être utilisées pour améliorer le modèle. - MiniMax M2.5 Free : Pendant sa période gratuite, les données collectées peuvent être utilisées pour améliorer le modèle. -- MiMo V2 Pro Free : Pendant sa période gratuite, les données collectées peuvent être utilisées pour améliorer le modèle. -- MiMo V2 Omni Free : Pendant sa période gratuite, les données collectées peuvent être utilisées pour améliorer le modèle. - Nemotron 3 Super Free (endpoints NVIDIA gratuits) : Fourni dans le cadre des [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Réservé à un usage d'essai, pas à la production ni aux données sensibles. Les prompts et les sorties sont journalisés par NVIDIA pour améliorer ses modèles et services. N'envoyez pas de données personnelles ou confidentielles. - OpenAI APIs : Les requêtes sont conservées pendant 30 jours conformément à [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data). - Anthropic APIs : Les requêtes sont conservées pendant 30 jours conformément à [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage). diff --git a/packages/web/src/content/docs/it/zen.mdx b/packages/web/src/content/docs/it/zen.mdx index 927c5b4414..0922c51a5a 100644 --- a/packages/web/src/content/docs/it/zen.mdx +++ b/packages/web/src/content/docs/it/zen.mdx @@ -94,13 +94,12 @@ Puoi anche accedere ai nostri modelli tramite i seguenti endpoint API. | Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5.1 | glm-5.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | Il [model id](/docs/config/#models) nella config di OpenCode @@ -126,18 +125,16 @@ Supportiamo un modello pay-as-you-go. Qui sotto trovi i prezzi **per 1M token**. | Modello | Input | Output | Cached Read | Cached Write | | --------------------------------- | ------ | ------- | ----------- | ------------ | | Big Pickle | Free | Free | Free | - | -| MiMo V2 Pro Free | Free | Free | Free | - | -| MiMo V2 Omni Free | Free | Free | Free | - | -| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | -| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | | GLM 5 | $1.00 | $3.20 | $0.20 | - | | Kimi K2.5 | $0.60 | $3.00 | $0.10 | - | | Kimi K2.6 | $0.95 | $4.00 | $0.16 | - | -| Qwen3 Coder 480B | $0.45 | $1.50 | - | - | +| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | +| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Claude Opus 4.7 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 | @@ -177,8 +174,6 @@ Le commissioni della carta di credito vengono trasferite al costo (4.4% + $0.30 I modelli gratuiti: - MiniMax M2.5 Free è disponibile su OpenCode per un periodo limitato. Il team usa questo periodo per raccogliere feedback e migliorare il modello. -- MiMo V2 Pro Free è disponibile su OpenCode per un periodo limitato. Il team usa questo periodo per raccogliere feedback e migliorare il modello. -- MiMo V2 Omni Free è disponibile su OpenCode per un periodo limitato. Il team usa questo periodo per raccogliere feedback e migliorare il modello. - Nemotron 3 Super Free è disponibile su OpenCode per un periodo limitato. Il team usa questo periodo per raccogliere feedback e migliorare il modello. - Big Pickle è un modello stealth che è gratuito su OpenCode per un periodo limitato. Il team usa questo periodo per raccogliere feedback e migliorare il modello. @@ -225,8 +220,6 @@ Tutti i nostri modelli sono ospitati negli US. I nostri provider seguono una pol - Big Pickle: durante il periodo gratuito, i dati raccolti possono essere usati per migliorare il modello. - MiniMax M2.5 Free: durante il periodo gratuito, i dati raccolti possono essere usati per migliorare il modello. -- MiMo V2 Pro Free: durante il periodo gratuito, i dati raccolti possono essere usati per migliorare il modello. -- MiMo V2 Omni Free: durante il periodo gratuito, i dati raccolti possono essere usati per migliorare il modello. - Nemotron 3 Super Free (endpoint NVIDIA gratuiti): fornito secondo i [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Solo per uso di prova, non per produzione o dati sensibili. NVIDIA registra prompt e output per migliorare i propri modelli e servizi. Non inviare dati personali o riservati. - OpenAI APIs: le richieste vengono conservate per 30 giorni in conformità con [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data). - Anthropic APIs: le richieste vengono conservate per 30 giorni in conformità con [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage). diff --git a/packages/web/src/content/docs/ja/zen.mdx b/packages/web/src/content/docs/ja/zen.mdx index 772bb8d434..7419bd4c4b 100644 --- a/packages/web/src/content/docs/ja/zen.mdx +++ b/packages/web/src/content/docs/ja/zen.mdx @@ -85,13 +85,12 @@ OpenCode Zen は、OpenCode のほかのプロバイダーと同じように動 | Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5.1 | glm-5.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | OpenCode 設定で使う [model id](/docs/config/#models) は `opencode/` 形式です。たとえば、GPT 5.3 Codex では設定に `opencode/gpt-5.3-codex` を使用します。 @@ -115,18 +114,16 @@ https://opencode.ai/zen/v1/models | Model | Input | Output | Cached Read | Cached Write | | --------------------------------- | ------ | ------- | ----------- | ------------ | | Big Pickle | Free | Free | Free | - | -| MiMo V2 Pro Free | Free | Free | Free | - | -| MiMo V2 Omni Free | Free | Free | Free | - | -| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | -| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | | GLM 5 | $1.00 | $3.20 | $0.20 | - | | Kimi K2.5 | $0.60 | $3.00 | $0.10 | - | | Kimi K2.6 | $0.95 | $4.00 | $0.16 | - | -| Qwen3 Coder 480B | $0.45 | $1.50 | - | - | +| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | +| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Claude Opus 4.7 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 | @@ -166,8 +163,6 @@ https://opencode.ai/zen/v1/models 無料モデル: - MiniMax M2.5 Free は期間限定で OpenCode で利用できます。チームはこの期間中にフィードバックを集め、モデルを改善しています。 -- MiMo V2 Pro Free は期間限定で OpenCode で利用できます。チームはこの期間中にフィードバックを集め、モデルを改善しています。 -- MiMo V2 Omni Free は期間限定で OpenCode で利用できます。チームはこの期間中にフィードバックを集め、モデルを改善しています。 - Nemotron 3 Super Free は期間限定で OpenCode で利用できます。チームはこの期間中にフィードバックを集め、モデルを改善しています。 - Big Pickle はステルスモデルで、期間限定で OpenCode で無料提供されています。チームはこの期間中にフィードバックを集め、モデルを改善しています。 @@ -211,8 +206,6 @@ https://opencode.ai/zen/v1/models - Big Pickle: 無料提供期間中、収集されたデータがモデル改善に使われる場合があります。 - MiniMax M2.5 Free: 無料提供期間中、収集されたデータがモデル改善に使われる場合があります。 -- MiMo V2 Pro Free: 無料提供期間中、収集されたデータがモデル改善に使われる場合があります。 -- MiMo V2 Omni Free: 無料提供期間中、収集されたデータがモデル改善に使われる場合があります。 - Nemotron 3 Super Free(NVIDIA の無料エンドポイント): [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf) に基づいて提供されます。試用専用であり、本番環境や機密性の高いデータには使用しないでください。プロンプトと出力は、NVIDIA が自社のモデルとサービスを改善するために記録します。個人情報や機密データは送信しないでください。 - OpenAI APIs: リクエストは [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data) に従って 30 日間保持されます。 - Anthropic APIs: リクエストは [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage) に従って 30 日間保持されます。 diff --git a/packages/web/src/content/docs/ko/zen.mdx b/packages/web/src/content/docs/ko/zen.mdx index 143906aea4..3d796ee992 100644 --- a/packages/web/src/content/docs/ko/zen.mdx +++ b/packages/web/src/content/docs/ko/zen.mdx @@ -85,13 +85,12 @@ OpenCode Zen은 OpenCode의 다른 provider와 똑같이 작동합니다. | Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5.1 | glm-5.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | OpenCode config에서 사용하는 [모델 ID](/docs/config/#models)는 `opencode/` 형식입니다. 예를 들어 GPT 5.3 Codex를 사용하려면 config에서 `opencode/gpt-5.3-codex`를 사용하면 됩니다. @@ -115,18 +114,16 @@ https://opencode.ai/zen/v1/models | 모델 | 입력 | 출력 | Cached Read | Cached Write | | --------------------------------- | ------ | ------- | ----------- | ------------ | | Big Pickle | Free | Free | Free | - | -| MiMo V2 Pro Free | Free | Free | Free | - | -| MiMo V2 Omni Free | Free | Free | Free | - | -| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | -| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | | GLM 5 | $1.00 | $3.20 | $0.20 | - | | Kimi K2.5 | $0.60 | $3.00 | $0.10 | - | | Kimi K2.6 | $0.95 | $4.00 | $0.16 | - | -| Qwen3 Coder 480B | $0.45 | $1.50 | - | - | +| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | +| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Claude Opus 4.7 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 | @@ -166,8 +163,6 @@ https://opencode.ai/zen/v1/models 무료 모델: - MiniMax M2.5 Free는 한정된 기간 동안 OpenCode에서 제공됩니다. 팀은 이 기간에 피드백을 수집하고 모델을 개선합니다. -- MiMo V2 Pro Free는 한정된 기간 동안 OpenCode에서 제공됩니다. 팀은 이 기간에 피드백을 수집하고 모델을 개선합니다. -- MiMo V2 Omni Free는 한정된 기간 동안 OpenCode에서 제공됩니다. 팀은 이 기간에 피드백을 수집하고 모델을 개선합니다. - Nemotron 3 Super Free는 한정된 기간 동안 OpenCode에서 제공됩니다. 팀은 이 기간에 피드백을 수집하고 모델을 개선합니다. - Big Pickle은 한정된 기간 동안 OpenCode에서 무료로 제공되는 stealth model입니다. 팀은 이 기간에 피드백을 수집하고 모델을 개선합니다. @@ -211,8 +206,6 @@ https://opencode.ai/zen/v1/models - Big Pickle: 무료 제공 기간에는 수집된 데이터가 모델 개선에 사용될 수 있습니다. - MiniMax M2.5 Free: 무료 제공 기간에는 수집된 데이터가 모델 개선에 사용될 수 있습니다. -- MiMo V2 Pro Free: 무료 제공 기간에는 수집된 데이터가 모델 개선에 사용될 수 있습니다. -- MiMo V2 Omni Free: 무료 제공 기간에는 수집된 데이터가 모델 개선에 사용될 수 있습니다. - Nemotron 3 Super Free(NVIDIA 무료 엔드포인트): [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf)에 따라 제공됩니다. 평가판 전용이며 프로덕션 환경이나 민감한 데이터에는 사용할 수 없습니다. NVIDIA는 자사 모델과 서비스를 개선하기 위해 프롬프트와 출력을 기록합니다. 개인 정보나 기밀 데이터는 제출하지 마세요. - OpenAI APIs: 요청은 [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data)에 따라 30일 동안 보관됩니다. - Anthropic APIs: 요청은 [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage)에 따라 30일 동안 보관됩니다. diff --git a/packages/web/src/content/docs/nb/zen.mdx b/packages/web/src/content/docs/nb/zen.mdx index 2b9ecca151..139d13da8f 100644 --- a/packages/web/src/content/docs/nb/zen.mdx +++ b/packages/web/src/content/docs/nb/zen.mdx @@ -94,13 +94,12 @@ Du kan også få tilgang til modellene våre gjennom følgende API-endepunkter. | Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5.1 | glm-5.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | [modell-id](/docs/config/#models) i OpenCode-konfigurasjonen din @@ -126,18 +125,16 @@ Vi støtter en pay-as-you-go-modell. Nedenfor er prisene **per 1M tokens**. | Modell | Inndata | Utdata | Bufret lesing | Bufret skriving | | --------------------------------- | ------- | ------- | ------------- | --------------- | | Big Pickle | Free | Free | Free | - | -| MiMo V2 Pro Free | Free | Free | Free | - | -| MiMo V2 Omni Free | Free | Free | Free | - | -| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | -| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | | GLM 5 | $1.00 | $3.20 | $0.20 | - | | Kimi K2.5 | $0.60 | $3.00 | $0.10 | - | | Kimi K2.6 | $0.95 | $4.00 | $0.16 | - | -| Qwen3 Coder 480B | $0.45 | $1.50 | - | - | +| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | +| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Claude Opus 4.7 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 | @@ -177,8 +174,6 @@ Kredittkortgebyrer videreføres til kostpris (4.4% + $0.30 per transaction); vi Gratis-modellene: - MiniMax M2.5 Free er tilgjengelig på OpenCode i en begrenset periode. Teamet bruker denne tiden til å samle inn tilbakemeldinger og forbedre modellen. -- MiMo V2 Pro Free er tilgjengelig på OpenCode i en begrenset periode. Teamet bruker denne tiden til å samle inn tilbakemeldinger og forbedre modellen. -- MiMo V2 Omni Free er tilgjengelig på OpenCode i en begrenset periode. Teamet bruker denne tiden til å samle inn tilbakemeldinger og forbedre modellen. - Nemotron 3 Super Free er tilgjengelig på OpenCode i en begrenset periode. Teamet bruker denne tiden til å samle inn tilbakemeldinger og forbedre modellen. - Big Pickle er en stealth-modell som er gratis på OpenCode i en begrenset periode. Teamet bruker denne tiden til å samle inn tilbakemeldinger og forbedre modellen. @@ -225,8 +220,6 @@ Alle modellene våre hostes i US. Leverandørene våre følger en policy for zer - Big Pickle: I gratisperioden kan innsamlede data brukes til å forbedre modellen. - MiniMax M2.5 Free: I gratisperioden kan innsamlede data brukes til å forbedre modellen. -- MiMo V2 Pro Free: I gratisperioden kan innsamlede data brukes til å forbedre modellen. -- MiMo V2 Omni Free: I gratisperioden kan innsamlede data brukes til å forbedre modellen. - Nemotron 3 Super Free (gratis NVIDIA-endepunkter): Leveres under [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Kun for prøvebruk, ikke for produksjon eller sensitive data. Prompter og svar logges av NVIDIA for å forbedre modellene og tjenestene deres. Ikke send inn personopplysninger eller konfidensielle data. - OpenAI APIs: Forespørsler lagres i 30 dager i samsvar med [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data). - Anthropic APIs: Forespørsler lagres i 30 dager i samsvar med [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage). diff --git a/packages/web/src/content/docs/pl/zen.mdx b/packages/web/src/content/docs/pl/zen.mdx index 57cb046b42..42a9bb3d13 100644 --- a/packages/web/src/content/docs/pl/zen.mdx +++ b/packages/web/src/content/docs/pl/zen.mdx @@ -92,6 +92,7 @@ Możesz też uzyskać dostęp do naszych modeli przez poniższe endpointy API. | Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | | Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5.1 | glm-5.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | @@ -99,8 +100,6 @@ Możesz też uzyskać dostęp do naszych modeli przez poniższe endpointy API. | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | [ID modelu](/docs/config/#models) w Twojej konfiguracji OpenCode używa formatu @@ -126,18 +125,16 @@ Obsługujemy model pay-as-you-go. Poniżej znajdują się ceny **za 1M tokenów* | Model | Wejście | Wyjście | Odczyt z cache | Zapis do cache | | --------------------------------- | ------- | ------- | -------------- | -------------- | | Big Pickle | Free | Free | Free | - | -| MiMo V2 Pro Free | Free | Free | Free | - | -| MiMo V2 Omni Free | Free | Free | Free | - | -| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | -| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | | GLM 5 | $1.00 | $3.20 | $0.20 | - | | Kimi K2.5 | $0.60 | $3.00 | $0.10 | - | | Kimi K2.6 | $0.95 | $4.00 | $0.16 | - | -| Qwen3 Coder 480B | $0.45 | $1.50 | - | - | +| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | +| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Claude Opus 4.7 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 | @@ -178,8 +175,6 @@ Opłaty za karty kredytowe są przenoszone po kosztach (4.4% + $0.30 per transac Darmowe modele: - MiniMax M2.5 Free jest dostępny w OpenCode przez ograniczony czas. Zespół wykorzystuje ten czas do zbierania opinii i ulepszania modelu. -- MiMo V2 Pro Free jest dostępny w OpenCode przez ograniczony czas. Zespół wykorzystuje ten czas do zbierania opinii i ulepszania modelu. -- MiMo V2 Omni Free jest dostępny w OpenCode przez ograniczony czas. Zespół wykorzystuje ten czas do zbierania opinii i ulepszania modelu. - Nemotron 3 Super Free jest dostępny w OpenCode przez ograniczony czas. Zespół wykorzystuje ten czas do zbierania opinii i ulepszania modelu. - Big Pickle to stealth model, który jest darmowy w OpenCode przez ograniczony czas. Zespół wykorzystuje ten czas do zbierania opinii i ulepszania modelu. @@ -226,8 +221,6 @@ Wszystkie nasze modele są hostowane w US. Nasi dostawcy stosują politykę zero - Big Pickle: W czasie darmowego okresu zebrane dane mogą być wykorzystywane do ulepszania modelu. - MiniMax M2.5 Free: W czasie darmowego okresu zebrane dane mogą być wykorzystywane do ulepszania modelu. -- MiMo V2 Pro Free: W czasie darmowego okresu zebrane dane mogą być wykorzystywane do ulepszania modelu. -- MiMo V2 Omni Free: W czasie darmowego okresu zebrane dane mogą być wykorzystywane do ulepszania modelu. - Nemotron 3 Super Free (darmowe endpointy NVIDIA): Udostępniany zgodnie z [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Tylko do użytku próbnego, nie do produkcji ani danych wrażliwych. NVIDIA rejestruje prompty i odpowiedzi, aby ulepszać swoje modele i usługi. Nie przesyłaj danych osobowych ani poufnych. - OpenAI APIs: Żądania są przechowywane przez 30 dni zgodnie z [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data). - Anthropic APIs: Żądania są przechowywane przez 30 dni zgodnie z [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage). diff --git a/packages/web/src/content/docs/pt-br/zen.mdx b/packages/web/src/content/docs/pt-br/zen.mdx index ceead9b102..a2bb269ce1 100644 --- a/packages/web/src/content/docs/pt-br/zen.mdx +++ b/packages/web/src/content/docs/pt-br/zen.mdx @@ -83,6 +83,7 @@ Você também pode acessar nossos modelos pelos seguintes endpoints de API. | Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | | Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5.1 | glm-5.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | @@ -90,8 +91,6 @@ Você também pode acessar nossos modelos pelos seguintes endpoints de API. | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | O [model id](/docs/config/#models) na sua configuração do OpenCode usa o formato `opencode/`. Por exemplo, para GPT 5.3 Codex, você usaria `opencode/gpt-5.3-codex` na sua configuração. @@ -115,18 +114,16 @@ Oferecemos um modelo pay-as-you-go. Abaixo estão os preços **por 1M tokens**. | Modelo | Entrada | Saída | Leitura em cache | Escrita em cache | | --------------------------------- | ------- | ------- | ---------------- | ---------------- | | Big Pickle | Free | Free | Free | - | -| MiMo V2 Pro Free | Free | Free | Free | - | -| MiMo V2 Omni Free | Free | Free | Free | - | -| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | -| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | | GLM 5 | $1.00 | $3.20 | $0.20 | - | | Kimi K2.5 | $0.60 | $3.00 | $0.10 | - | | Kimi K2.6 | $0.95 | $4.00 | $0.16 | - | -| Qwen3 Coder 480B | $0.45 | $1.50 | - | - | +| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | +| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Claude Opus 4.7 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 | @@ -166,8 +163,6 @@ As taxas de cartão de crédito são repassadas a preço de custo (4.4% + $0.30 Os modelos gratuitos: - MiniMax M2.5 Free está disponível no OpenCode por tempo limitado. A equipe está usando esse período para coletar feedback e melhorar o modelo. -- MiMo V2 Pro Free está disponível no OpenCode por tempo limitado. A equipe está usando esse período para coletar feedback e melhorar o modelo. -- MiMo V2 Omni Free está disponível no OpenCode por tempo limitado. A equipe está usando esse período para coletar feedback e melhorar o modelo. - Nemotron 3 Super Free está disponível no OpenCode por tempo limitado. A equipe está usando esse período para coletar feedback e melhorar o modelo. - Big Pickle é um modelo stealth que está gratuito no OpenCode por tempo limitado. A equipe está usando esse período para coletar feedback e melhorar o modelo. @@ -211,8 +206,6 @@ Todos os nossos modelos são hospedados nos US. Nossos provedores seguem uma pol - Big Pickle: Durante seu período gratuito, os dados coletados podem ser usados para melhorar o modelo. - MiniMax M2.5 Free: Durante seu período gratuito, os dados coletados podem ser usados para melhorar o modelo. -- MiMo V2 Pro Free: Durante seu período gratuito, os dados coletados podem ser usados para melhorar o modelo. -- MiMo V2 Omni Free: Durante seu período gratuito, os dados coletados podem ser usados para melhorar o modelo. - Nemotron 3 Super Free (endpoints gratuitos da NVIDIA): Fornecido sob os [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Apenas para uso de avaliação, não para produção nem dados sensíveis. A NVIDIA registra prompts e saídas para melhorar seus modelos e serviços. Não envie dados pessoais ou confidenciais. - OpenAI APIs: As solicitações são retidas por 30 dias de acordo com [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data). - Anthropic APIs: As solicitações são retidas por 30 dias de acordo com [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage). diff --git a/packages/web/src/content/docs/ru/zen.mdx b/packages/web/src/content/docs/ru/zen.mdx index 76e6877e56..8d1b11a108 100644 --- a/packages/web/src/content/docs/ru/zen.mdx +++ b/packages/web/src/content/docs/ru/zen.mdx @@ -92,6 +92,7 @@ OpenCode Zen работает как любой другой провайдер | Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | | Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5.1 | glm-5.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | @@ -99,8 +100,6 @@ OpenCode Zen работает как любой другой провайдер | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | [идентификатор модели](/docs/config/#models) в вашей конфигурации OpenCode @@ -126,18 +125,16 @@ https://opencode.ai/zen/v1/models | Модель | Вход | Выход | Cached Read | Cached Write | | --------------------------------- | ------ | ------- | ----------- | ------------ | | Big Pickle | Free | Free | Free | - | -| MiMo V2 Pro Free | Free | Free | Free | - | -| MiMo V2 Omni Free | Free | Free | Free | - | -| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | -| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | | GLM 5 | $1.00 | $3.20 | $0.20 | - | | Kimi K2.5 | $0.60 | $3.00 | $0.10 | - | | Kimi K2.6 | $0.95 | $4.00 | $0.16 | - | -| Qwen3 Coder 480B | $0.45 | $1.50 | - | - | +| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | +| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Claude Opus 4.7 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 | @@ -177,8 +174,6 @@ https://opencode.ai/zen/v1/models Бесплатные модели: - MiniMax M2.5 Free доступна в OpenCode ограниченное время. Команда использует это время, чтобы собирать отзывы и улучшать модель. -- MiMo V2 Pro Free доступна в OpenCode ограниченное время. Команда использует это время, чтобы собирать отзывы и улучшать модель. -- MiMo V2 Omni Free доступна в OpenCode ограниченное время. Команда использует это время, чтобы собирать отзывы и улучшать модель. - Nemotron 3 Super Free доступна в OpenCode ограниченное время. Команда использует это время, чтобы собирать отзывы и улучшать модель. - Big Pickle — это скрытая модель, которая доступна бесплатно в OpenCode ограниченное время. Команда использует это время, чтобы собирать отзывы и улучшать модель. @@ -225,8 +220,6 @@ https://opencode.ai/zen/v1/models - Big Pickle: во время бесплатного периода собранные данные могут использоваться для улучшения модели. - MiniMax M2.5 Free: во время бесплатного периода собранные данные могут использоваться для улучшения модели. -- MiMo V2 Pro Free: во время бесплатного периода собранные данные могут использоваться для улучшения модели. -- MiMo V2 Omni Free: во время бесплатного периода собранные данные могут использоваться для улучшения модели. - Nemotron 3 Super Free (бесплатные эндпоинты NVIDIA): предоставляется в соответствии с [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Только для пробного использования, не для продакшена и не для чувствительных данных. NVIDIA логирует запросы и ответы, чтобы улучшать свои модели и сервисы. Не отправляйте персональные или конфиденциальные данные. - OpenAI APIs: запросы хранятся 30 дней в соответствии с [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data). - Anthropic APIs: запросы хранятся 30 дней в соответствии с [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage). diff --git a/packages/web/src/content/docs/th/zen.mdx b/packages/web/src/content/docs/th/zen.mdx index a7e85ac175..c3b298a329 100644 --- a/packages/web/src/content/docs/th/zen.mdx +++ b/packages/web/src/content/docs/th/zen.mdx @@ -87,13 +87,12 @@ OpenCode Zen ทำงานเหมือน provider อื่น ๆ ใน | Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5.1 | glm-5.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | [model id](/docs/config/#models) ใน OpenCode config ของคุณใช้รูปแบบ `opencode/` ตัวอย่างเช่น สำหรับ GPT 5.3 Codex คุณจะใช้ `opencode/gpt-5.3-codex` ใน config ของคุณ @@ -117,18 +116,16 @@ https://opencode.ai/zen/v1/models | Model | Input | Output | Cached Read | Cached Write | | --------------------------------- | ------ | ------- | ----------- | ------------ | | Big Pickle | Free | Free | Free | - | -| MiMo V2 Pro Free | Free | Free | Free | - | -| MiMo V2 Omni Free | Free | Free | Free | - | -| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | -| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | | GLM 5 | $1.00 | $3.20 | $0.20 | - | | Kimi K2.5 | $0.60 | $3.00 | $0.10 | - | | Kimi K2.6 | $0.95 | $4.00 | $0.16 | - | -| Qwen3 Coder 480B | $0.45 | $1.50 | - | - | +| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | +| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Claude Opus 4.7 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 | @@ -168,8 +165,6 @@ https://opencode.ai/zen/v1/models โมเดลฟรี: - MiniMax M2.5 Free เปิดให้ใช้บน OpenCode ในช่วงเวลาจำกัด ทีมกำลังใช้ช่วงเวลานี้เพื่อเก็บ feedback และปรับปรุงโมเดล -- MiMo V2 Pro Free เปิดให้ใช้บน OpenCode ในช่วงเวลาจำกัด ทีมกำลังใช้ช่วงเวลานี้เพื่อเก็บ feedback และปรับปรุงโมเดล -- MiMo V2 Omni Free เปิดให้ใช้บน OpenCode ในช่วงเวลาจำกัด ทีมกำลังใช้ช่วงเวลานี้เพื่อเก็บ feedback และปรับปรุงโมเดล - Nemotron 3 Super Free เปิดให้ใช้บน OpenCode ในช่วงเวลาจำกัด ทีมกำลังใช้ช่วงเวลานี้เพื่อเก็บ feedback และปรับปรุงโมเดล - Big Pickle เป็น stealth model ที่ใช้งานฟรีบน OpenCode ในช่วงเวลาจำกัด ทีมกำลังใช้ช่วงเวลานี้เพื่อเก็บ feedback และปรับปรุงโมเดล @@ -213,8 +208,6 @@ https://opencode.ai/zen/v1/models - Big Pickle: ระหว่างช่วงที่เปิดให้ใช้ฟรี ข้อมูลที่เก็บรวบรวมอาจถูกนำไปใช้เพื่อปรับปรุงโมเดล - MiniMax M2.5 Free: ระหว่างช่วงที่เปิดให้ใช้ฟรี ข้อมูลที่เก็บรวบรวมอาจถูกนำไปใช้เพื่อปรับปรุงโมเดล -- MiMo V2 Pro Free: ระหว่างช่วงที่เปิดให้ใช้ฟรี ข้อมูลที่เก็บรวบรวมอาจถูกนำไปใช้เพื่อปรับปรุงโมเดล -- MiMo V2 Omni Free: ระหว่างช่วงที่เปิดให้ใช้ฟรี ข้อมูลที่เก็บรวบรวมอาจถูกนำไปใช้เพื่อปรับปรุงโมเดล - Nemotron 3 Super Free (endpoint ฟรีของ NVIDIA): ให้บริการภายใต้ [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf) ใช้สำหรับการทดลองเท่านั้น ไม่เหมาะสำหรับ production หรือข้อมูลที่อ่อนไหว NVIDIA จะบันทึก prompt และ output เพื่อนำไปปรับปรุงโมเดลและบริการของตน โปรดอย่าส่งข้อมูลส่วนบุคคลหรือข้อมูลลับ. - OpenAI APIs: คำขอจะถูกเก็บไว้เป็นเวลา 30 วันตาม [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data). - Anthropic APIs: คำขอจะถูกเก็บไว้เป็นเวลา 30 วันตาม [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage). diff --git a/packages/web/src/content/docs/tr/zen.mdx b/packages/web/src/content/docs/tr/zen.mdx index 31d7e958a7..7540293054 100644 --- a/packages/web/src/content/docs/tr/zen.mdx +++ b/packages/web/src/content/docs/tr/zen.mdx @@ -85,13 +85,12 @@ Modellerimize aşağıdaki API uç noktaları aracılığıyla da erişebilirsin | Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5.1 | glm-5.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5 | glm-5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | OpenCode yapılandırmanızdaki [model id](/docs/config/#models) `opencode/` biçimini kullanır. Örneğin, GPT 5.3 Codex için yapılandırmanızda `opencode/gpt-5.3-codex` kullanırsınız. @@ -115,18 +114,16 @@ Kullandıkça öde modelini destekliyoruz. Aşağıda **1M token başına** fiya | Model | Input | Output | Cached Read | Cached Write | | --------------------------------- | ------ | ------- | ----------- | ------------ | | Big Pickle | Free | Free | Free | - | -| MiMo V2 Pro Free | Free | Free | Free | - | -| MiMo V2 Omni Free | Free | Free | Free | - | -| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | -| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | | GLM 5 | $1.00 | $3.20 | $0.20 | - | | Kimi K2.5 | $0.60 | $3.00 | $0.10 | - | | Kimi K2.6 | $0.95 | $4.00 | $0.16 | - | -| Qwen3 Coder 480B | $0.45 | $1.50 | - | - | +| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | +| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Claude Opus 4.7 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 | @@ -166,8 +163,6 @@ Kredi kartı ücretleri maliyet üzerinden yansıtılır (%4.4 + işlem başına Ücretsiz modeller: - MiniMax M2.5 Free, sınırlı bir süre için OpenCode'da ücretsizdir. Ekip bu süreyi geri bildirim toplamak ve modeli iyileştirmek için kullanıyor. -- MiMo V2 Pro Free, sınırlı bir süre için OpenCode'da ücretsizdir. Ekip bu süreyi geri bildirim toplamak ve modeli iyileştirmek için kullanıyor. -- MiMo V2 Omni Free, sınırlı bir süre için OpenCode'da ücretsizdir. Ekip bu süreyi geri bildirim toplamak ve modeli iyileştirmek için kullanıyor. - Nemotron 3 Super Free, sınırlı bir süre için OpenCode'da ücretsizdir. Ekip bu süreyi geri bildirim toplamak ve modeli iyileştirmek için kullanıyor. - Big Pickle, sınırlı bir süre için OpenCode'da ücretsiz olan gizli bir modeldir. Ekip bu süreyi geri bildirim toplamak ve modeli iyileştirmek için kullanıyor. @@ -211,8 +206,6 @@ Tüm modellerimiz US'de barındırılıyor. Sağlayıcılarımız zero-retention - Big Pickle: Ücretsiz döneminde toplanan veriler modeli iyileştirmek için kullanılabilir. - MiniMax M2.5 Free: Ücretsiz döneminde toplanan veriler modeli iyileştirmek için kullanılabilir. -- MiMo V2 Pro Free: Ücretsiz döneminde toplanan veriler modeli iyileştirmek için kullanılabilir. -- MiMo V2 Omni Free: Ücretsiz döneminde toplanan veriler modeli iyileştirmek için kullanılabilir. - Nemotron 3 Super Free (ücretsiz NVIDIA uç noktaları): [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf) kapsamında sunulur. Yalnızca deneme amaçlıdır; üretim veya hassas veriler için uygun değildir. NVIDIA, modellerini ve hizmetlerini geliştirmek için promptları ve çıktıları kaydeder. Kişisel veya gizli veri göndermeyin. - OpenAI APIs: İstekler [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data) uyarınca 30 gün boyunca saklanır. - Anthropic APIs: İstekler [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage) uyarınca 30 gün boyunca saklanır. diff --git a/packages/web/src/content/docs/zen.mdx b/packages/web/src/content/docs/zen.mdx index 0fd978b22b..ec44a0548a 100644 --- a/packages/web/src/content/docs/zen.mdx +++ b/packages/web/src/content/docs/zen.mdx @@ -92,6 +92,7 @@ You can also access our models through the following API endpoints. | Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | | Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5.1 | glm-5.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | @@ -126,6 +127,7 @@ We support a pay-as-you-go model. Below are the prices **per 1M tokens**. | Big Pickle | Free | Free | Free | - | | Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | | GLM 5 | $1.00 | $3.20 | $0.20 | - | diff --git a/packages/web/src/content/docs/zh-cn/zen.mdx b/packages/web/src/content/docs/zh-cn/zen.mdx index 30ab698d06..8eedcf31cf 100644 --- a/packages/web/src/content/docs/zh-cn/zen.mdx +++ b/packages/web/src/content/docs/zh-cn/zen.mdx @@ -83,6 +83,7 @@ OpenCode Zen 的工作方式与 OpenCode 中的任何其他提供商相同。 | Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | | Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5.1 | glm-5.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | @@ -90,8 +91,6 @@ OpenCode Zen 的工作方式与 OpenCode 中的任何其他提供商相同。 | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | 在你的 OpenCode 配置中,[模型 ID](/docs/config/#models) 使用 `opencode/` 格式。例如,对于 GPT 5.3 Codex,你需要在配置中使用 `opencode/gpt-5.3-codex`。 @@ -115,18 +114,16 @@ https://opencode.ai/zen/v1/models | 模型 | 输入 | 输出 | 缓存读取 | 缓存写入 | | --------------------------------- | ------ | ------- | -------- | -------- | | Big Pickle | Free | Free | Free | - | -| MiMo V2 Pro Free | Free | Free | Free | - | -| MiMo V2 Omni Free | Free | Free | Free | - | -| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | -| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | | GLM 5 | $1.00 | $3.20 | $0.20 | - | | Kimi K2.5 | $0.60 | $3.00 | $0.10 | - | | Kimi K2.6 | $0.95 | $4.00 | $0.16 | - | -| Qwen3 Coder 480B | $0.45 | $1.50 | - | - | +| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | +| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Claude Opus 4.7 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 | @@ -166,8 +163,6 @@ https://opencode.ai/zen/v1/models 免费模型: - MiniMax M2.5 Free 目前在 OpenCode 上限时免费提供。团队正在利用这段时间收集反馈并改进模型。 -- MiMo V2 Pro Free 目前在 OpenCode 上限时免费提供。团队正在利用这段时间收集反馈并改进模型。 -- MiMo V2 Omni Free 目前在 OpenCode 上限时免费提供。团队正在利用这段时间收集反馈并改进模型。 - Nemotron 3 Super Free 目前在 OpenCode 上限时免费提供。团队正在利用这段时间收集反馈并改进模型。 - Big Pickle 是一个隐身模型,目前在 OpenCode 上限时免费提供。团队正在利用这段时间收集反馈并改进模型。 @@ -211,8 +206,6 @@ https://opencode.ai/zen/v1/models - Big Pickle:在免费期间,收集的数据可能会被用于改进模型。 - MiniMax M2.5 Free:在免费期间,收集的数据可能会被用于改进模型。 -- MiMo V2 Pro Free:在免费期间,收集的数据可能会被用于改进模型。 -- MiMo V2 Omni Free:在免费期间,收集的数据可能会被用于改进模型。 - Nemotron 3 Super Free(NVIDIA 免费端点):根据 [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf) 提供。仅供试用,不适用于生产环境或敏感数据。NVIDIA 会记录提示词和输出内容,以改进其模型和服务。请勿提交个人或机密数据。 - OpenAI APIs:请求会根据 [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data) 保留 30 天。 - Anthropic APIs:请求会根据 [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage) 保留 30 天。 diff --git a/packages/web/src/content/docs/zh-tw/zen.mdx b/packages/web/src/content/docs/zh-tw/zen.mdx index a464386fd4..5ccc36785b 100644 --- a/packages/web/src/content/docs/zh-tw/zen.mdx +++ b/packages/web/src/content/docs/zh-tw/zen.mdx @@ -87,6 +87,7 @@ OpenCode Zen 的運作方式和 OpenCode 中的其他供應商一樣。 | Gemini 3 Flash | gemini-3-flash | `https://opencode.ai/zen/v1/models/gemini-3-flash` | `@ai-sdk/google` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | | Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.5 Free | minimax-m2.5-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | GLM 5.1 | glm-5.1 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | @@ -94,8 +95,6 @@ OpenCode Zen 的運作方式和 OpenCode 中的其他供應商一樣。 | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Pro Free | mimo-v2-pro-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo V2 Omni Free | mimo-v2-omni-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | OpenCode 設定中的 [模型 ID](/docs/config/#models) 會使用 `opencode/` @@ -120,18 +119,16 @@ https://opencode.ai/zen/v1/models | 模型 | 輸入 | 輸出 | 快取讀取 | 快取寫入 | | --------------------------------- | ------ | ------- | -------- | -------- | | Big Pickle | Free | Free | Free | - | -| MiMo V2 Pro Free | Free | Free | Free | - | -| MiMo V2 Omni Free | Free | Free | Free | - | -| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | -| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | | GLM 5 | $1.00 | $3.20 | $0.20 | - | | Kimi K2.5 | $0.60 | $3.00 | $0.10 | - | | Kimi K2.6 | $0.95 | $4.00 | $0.16 | - | -| Qwen3 Coder 480B | $0.45 | $1.50 | - | - | +| Qwen3.6 Plus | $0.50 | $3.00 | $0.05 | $0.625 | +| Qwen3.5 Plus | $0.20 | $1.20 | $0.02 | $0.25 | | Claude Opus 4.7 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.6 | $5.00 | $25.00 | $0.50 | $6.25 | | Claude Opus 4.5 | $5.00 | $25.00 | $0.50 | $6.25 | @@ -172,8 +169,6 @@ https://opencode.ai/zen/v1/models 免費模型: - MiniMax M2.5 Free 在 OpenCode 上限時提供。團隊正在利用這段時間收集回饋並改進模型。 -- MiMo V2 Pro Free 在 OpenCode 上限時提供。團隊正在利用這段時間收集回饋並改進模型。 -- MiMo V2 Omni Free 在 OpenCode 上限時提供。團隊正在利用這段時間收集回饋並改進模型。 - Nemotron 3 Super Free 在 OpenCode 上限時提供。團隊正在利用這段時間收集回饋並改進模型。 - Big Pickle 是一個隱身模型,在 OpenCode 上限時免費提供。團隊正在利用這段時間收集回饋並改進模型。 @@ -218,8 +213,6 @@ https://opencode.ai/zen/v1/models - Big Pickle: 在免費期間,收集到的資料可能會用於改進模型。 - MiniMax M2.5 Free: 在免費期間,收集到的資料可能會用於改進模型。 -- MiMo V2 Pro Free: 在免費期間,收集到的資料可能會用於改進模型。 -- MiMo V2 Omni Free: 在免費期間,收集到的資料可能會用於改進模型。 - Nemotron 3 Super Free(NVIDIA 免費端點):依據 [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf) 提供。僅供試用,不適用於正式環境或敏感資料。NVIDIA 會記錄提示詞與輸出內容,以改進其模型與服務。請勿提交個人或機密資料。 - OpenAI APIs: 請求會依據 [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data) 保留 30 天。 - Anthropic APIs: 請求會依據 [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage) 保留 30 天。 From 38e2f4cddafcbc4e3ac5f8ebdcdab9d1f468737b Mon Sep 17 00:00:00 2001 From: Brendan Allan <14191578+Brendonovich@users.noreply.github.com> Date: Tue, 21 Apr 2026 13:32:31 +0800 Subject: [PATCH 10/73] fix(desktop-electron): add CORS headers to main window webRequest (#23633) --- packages/desktop-electron/src/main/windows.ts | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/packages/desktop-electron/src/main/windows.ts b/packages/desktop-electron/src/main/windows.ts index df55e8da2f..337e1ca0bc 100644 --- a/packages/desktop-electron/src/main/windows.ts +++ b/packages/desktop-electron/src/main/windows.ts @@ -100,6 +100,19 @@ export function createMainWindow() { }, }) + win.webContents.session.webRequest.onBeforeSendHeaders((details, callback) => { + const { requestHeaders } = details + upsertKeyValue(requestHeaders, "Access-Control-Allow-Origin", ["*"]) + callback({ requestHeaders }) + }) + + win.webContents.session.webRequest.onHeadersReceived((details, callback) => { + const { responseHeaders = {} } = details + upsertKeyValue(responseHeaders, "Access-Control-Allow-Origin", ["*"]) + upsertKeyValue(responseHeaders, "Access-Control-Allow-Headers", ["*"]) + callback({ responseHeaders }) + }) + state.manage(win) loadWindow(win, "index.html") wireZoom(win) @@ -177,3 +190,17 @@ function wireZoom(win: BrowserWindow) { win.webContents.setZoomFactor(1) }) } + +function upsertKeyValue(obj: Record, keyToChange: string, value: any) { + const keyToChangeLower = keyToChange.toLowerCase() + for (const key of Object.keys(obj)) { + if (key.toLowerCase() === keyToChangeLower) { + // Reassign old key + obj[key] = value + // Done + return + } + } + // Insert at end instead + obj[keyToChange] = value +} From 1e0137f624e5b370ae5e1c21b4a512889e83928d Mon Sep 17 00:00:00 2001 From: Jack Date: Tue, 21 Apr 2026 14:01:52 +0800 Subject: [PATCH 11/73] go: promote kimi k2.6 usage limits (#23634) Co-authored-by: Frank --- packages/console/app/src/i18n/ar.ts | 2 + packages/console/app/src/i18n/br.ts | 2 + packages/console/app/src/i18n/da.ts | 2 + packages/console/app/src/i18n/de.ts | 2 + packages/console/app/src/i18n/en.ts | 2 + packages/console/app/src/i18n/es.ts | 2 + packages/console/app/src/i18n/fr.ts | 2 + packages/console/app/src/i18n/it.ts | 2 + packages/console/app/src/i18n/ja.ts | 2 + packages/console/app/src/i18n/ko.ts | 2 + packages/console/app/src/i18n/no.ts | 2 + packages/console/app/src/i18n/pl.ts | 2 + packages/console/app/src/i18n/ru.ts | 2 + packages/console/app/src/i18n/th.ts | 2 + packages/console/app/src/i18n/tr.ts | 2 + packages/console/app/src/i18n/zh.ts | 2 + packages/console/app/src/i18n/zht.ts | 2 + packages/console/app/src/routes/go/index.css | 45 +++++++++++++++++++- packages/console/app/src/routes/go/index.tsx | 25 +++++++++-- 19 files changed, 99 insertions(+), 5 deletions(-) diff --git a/packages/console/app/src/i18n/ar.ts b/packages/console/app/src/i18n/ar.ts index 2df31a213e..73c07c6775 100644 --- a/packages/console/app/src/i18n/ar.ts +++ b/packages/console/app/src/i18n/ar.ts @@ -261,6 +261,8 @@ export const dict = { "go.cta.promo": "$5 للشهر الأول", "go.pricing.body": "استخدمه مع أي وكيل. $5 للشهر الأول، ثم $10/شهر. قم بزيادة الرصيد إذا لزم الأمر. الإلغاء في أي وقت.", + "go.banner.badge": "3x", + "go.banner.text": "Kimi K2.6: حد الاستخدام 3 أضعاف حتى 27 أبريل", "go.graph.free": "مجاني", "go.graph.freePill": "Big Pickle ونماذج مجانية", "go.graph.go": "Go", diff --git a/packages/console/app/src/i18n/br.ts b/packages/console/app/src/i18n/br.ts index 2546443e94..d79b8350ae 100644 --- a/packages/console/app/src/i18n/br.ts +++ b/packages/console/app/src/i18n/br.ts @@ -265,6 +265,8 @@ export const dict = { "go.cta.promo": "$5 no primeiro mês", "go.pricing.body": "Use com qualquer agente. $5 no primeiro mês, depois $10/mês. Recarregue o crédito se necessário. Cancele a qualquer momento.", + "go.banner.badge": "3x", + "go.banner.text": "Kimi K2.6: limite de uso 3x maior até 27 de abril", "go.graph.free": "Grátis", "go.graph.freePill": "Big Pickle e modelos gratuitos", "go.graph.go": "Go", diff --git a/packages/console/app/src/i18n/da.ts b/packages/console/app/src/i18n/da.ts index 6cd974c18f..e806983967 100644 --- a/packages/console/app/src/i18n/da.ts +++ b/packages/console/app/src/i18n/da.ts @@ -263,6 +263,8 @@ export const dict = { "go.cta.promo": "$5 første måned", "go.pricing.body": "Brug med enhver agent. $5 første måned, derefter $10/måned. Tank op med kredit efter behov. Afmeld når som helst.", + "go.banner.badge": "3x", + "go.banner.text": "Kimi K2.6: brugsgrænsen tredoblet til 27. april", "go.graph.free": "Gratis", "go.graph.freePill": "Big Pickle og gratis modeller", "go.graph.go": "Go", diff --git a/packages/console/app/src/i18n/de.ts b/packages/console/app/src/i18n/de.ts index eaf069f5a6..bdd47e77cf 100644 --- a/packages/console/app/src/i18n/de.ts +++ b/packages/console/app/src/i18n/de.ts @@ -265,6 +265,8 @@ export const dict = { "go.cta.promo": "$5 im ersten Monat", "go.pricing.body": "Mit jedem Agenten nutzbar. $5 im ersten Monat, danach $10/Monat. Guthaben bei Bedarf aufladen. Jederzeit kündbar.", + "go.banner.badge": "3x", + "go.banner.text": "Kimi K2.6: Nutzungslimit bis zum 27. April verdreifacht", "go.graph.free": "Kostenlos", "go.graph.freePill": "Big Pickle und kostenlose Modelle", "go.graph.go": "Go", diff --git a/packages/console/app/src/i18n/en.ts b/packages/console/app/src/i18n/en.ts index 5cfd46123b..a242ff1010 100644 --- a/packages/console/app/src/i18n/en.ts +++ b/packages/console/app/src/i18n/en.ts @@ -249,6 +249,8 @@ export const dict = { "go.title": "OpenCode Go | Low cost coding models for everyone", "go.meta.description": "Go starts at $5 for your first month, then $10/month, with generous 5-hour request limits for GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5, and MiniMax M2.7.", + "go.banner.badge": "3x", + "go.banner.text": "Kimi K2.6 gets 3× usage limits through April 27", "go.hero.title": "Low cost coding models for everyone", "go.hero.body": "Go brings agentic coding to programmers around the world. Offering generous limits and reliable access to the most capable open-source models, so you can build with powerful agents without worrying about cost or availability.", diff --git a/packages/console/app/src/i18n/es.ts b/packages/console/app/src/i18n/es.ts index 8127883861..ddeff684b0 100644 --- a/packages/console/app/src/i18n/es.ts +++ b/packages/console/app/src/i18n/es.ts @@ -266,6 +266,8 @@ export const dict = { "go.cta.promo": "$5 el primer mes", "go.pricing.body": "Úsalo con cualquier agente. $5 el primer mes, luego 10 $/mes. Recarga crédito si es necesario. Cancela en cualquier momento.", + "go.banner.badge": "3x", + "go.banner.text": "Kimi K2.6: límite de uso triplicado hasta el 27 de abril", "go.graph.free": "Gratis", "go.graph.freePill": "Big Pickle y modelos gratuitos", "go.graph.go": "Go", diff --git a/packages/console/app/src/i18n/fr.ts b/packages/console/app/src/i18n/fr.ts index 53f167d7b8..df892c98d3 100644 --- a/packages/console/app/src/i18n/fr.ts +++ b/packages/console/app/src/i18n/fr.ts @@ -267,6 +267,8 @@ export const dict = { "go.cta.promo": "$5 le premier mois", "go.pricing.body": "Utilisez-le avec n'importe quel agent. $5 le premier mois, puis 10 $/mois. Rechargez du crédit si nécessaire. Annulez à tout moment.", + "go.banner.badge": "3x", + "go.banner.text": "Kimi K2.6 : limites d’utilisation triplées jusqu’au 27 avril", "go.graph.free": "Gratuit", "go.graph.freePill": "Big Pickle et modèles gratuits", "go.graph.go": "Go", diff --git a/packages/console/app/src/i18n/it.ts b/packages/console/app/src/i18n/it.ts index 580dad256d..67a73aaeef 100644 --- a/packages/console/app/src/i18n/it.ts +++ b/packages/console/app/src/i18n/it.ts @@ -263,6 +263,8 @@ export const dict = { "go.cta.promo": "$5 il primo mese", "go.pricing.body": "Usalo con qualsiasi agente. $5 il primo mese, poi $10/mese. Ricarica il credito se necessario. Annulla in qualsiasi momento.", + "go.banner.badge": "3x", + "go.banner.text": "Kimi K2.6: limite d'uso triplicato fino al 27 aprile", "go.graph.free": "Gratis", "go.graph.freePill": "Big Pickle e modelli gratuiti", "go.graph.go": "Go", diff --git a/packages/console/app/src/i18n/ja.ts b/packages/console/app/src/i18n/ja.ts index b21f6a00ed..541fdd56c1 100644 --- a/packages/console/app/src/i18n/ja.ts +++ b/packages/console/app/src/i18n/ja.ts @@ -262,6 +262,8 @@ export const dict = { "go.cta.promo": "初月 $5", "go.pricing.body": "どのエージェントでも使えます。最初の月$5、その後$10/月。必要に応じてクレジットを追加。いつでもキャンセルできます。", + "go.banner.badge": "3x", + "go.banner.text": "Kimi K2.6、4月27日まで利用上限が3倍に", "go.graph.free": "無料", "go.graph.freePill": "Big Pickleと無料モデル", "go.graph.go": "Go", diff --git a/packages/console/app/src/i18n/ko.ts b/packages/console/app/src/i18n/ko.ts index ce1f076a47..5d459425be 100644 --- a/packages/console/app/src/i18n/ko.ts +++ b/packages/console/app/src/i18n/ko.ts @@ -259,6 +259,8 @@ export const dict = { "go.cta.promo": "첫 달 $5", "go.pricing.body": "어떤 에이전트와도 사용할 수 있습니다. 첫 달 $5, 이후 $10/월. 필요하면 크레딧을 충전하세요. 언제든지 취소할 수 있습니다.", + "go.banner.badge": "3x", + "go.banner.text": "Kimi K2.6, 4월 27일까지 사용 한도 3배 확대", "go.graph.free": "무료", "go.graph.freePill": "Big Pickle 및 무료 모델", "go.graph.go": "Go", diff --git a/packages/console/app/src/i18n/no.ts b/packages/console/app/src/i18n/no.ts index a4af7b8b8e..af2b018b00 100644 --- a/packages/console/app/src/i18n/no.ts +++ b/packages/console/app/src/i18n/no.ts @@ -263,6 +263,8 @@ export const dict = { "go.cta.promo": "$5 første måned", "go.pricing.body": "Bruk med hvilken som helst agent. $5 første måned, deretter $10/måned. Fyll på kreditt ved behov. Avslutt når som helst.", + "go.banner.badge": "3x", + "go.banner.text": "Kimi K2.6: bruksgrensen er tredoblet til 27. april", "go.graph.free": "Gratis", "go.graph.freePill": "Big Pickle og gratis modeller", "go.graph.go": "Go", diff --git a/packages/console/app/src/i18n/pl.ts b/packages/console/app/src/i18n/pl.ts index d8abcf70b2..f2219487bc 100644 --- a/packages/console/app/src/i18n/pl.ts +++ b/packages/console/app/src/i18n/pl.ts @@ -264,6 +264,8 @@ export const dict = { "go.cta.promo": "$5 pierwszy miesiąc", "go.pricing.body": "Używaj z dowolnym agentem. $5 za pierwszy miesiąc, potem $10/miesiąc. Doładuj konto w razie potrzeby. Anuluj w dowolnym momencie.", + "go.banner.badge": "3x", + "go.banner.text": "Kimi K2.6: limit użycia zwiększony 3× do 27 kwietnia", "go.graph.free": "Darmowe", "go.graph.freePill": "Big Pickle i darmowe modele", "go.graph.go": "Go", diff --git a/packages/console/app/src/i18n/ru.ts b/packages/console/app/src/i18n/ru.ts index 8d4d3d4c20..8fd76226e4 100644 --- a/packages/console/app/src/i18n/ru.ts +++ b/packages/console/app/src/i18n/ru.ts @@ -267,6 +267,8 @@ export const dict = { "go.cta.promo": "$5 первый месяц", "go.pricing.body": "Используйте с любым агентом. $5 за первый месяц, затем $10/месяц. Пополняйте баланс при необходимости. Отменить можно в любое время.", + "go.banner.badge": "3x", + "go.banner.text": "Kimi K2.6: лимит использования увеличен в 3 раза до 27 апреля", "go.graph.free": "Бесплатно", "go.graph.freePill": "Big Pickle и бесплатные модели", "go.graph.go": "Go", diff --git a/packages/console/app/src/i18n/th.ts b/packages/console/app/src/i18n/th.ts index ebec3ab867..efe535094f 100644 --- a/packages/console/app/src/i18n/th.ts +++ b/packages/console/app/src/i18n/th.ts @@ -261,6 +261,8 @@ export const dict = { "go.cta.price": "$10/เดือน", "go.cta.promo": "$5 เดือนแรก", "go.pricing.body": "ใช้กับเอเจนต์ใดก็ได้ $5 ในเดือนแรก จากนั้น $10/เดือน เติมเครดิตหากจำเป็น ยกเลิกได้ตลอดเวลา", + "go.banner.badge": "3x", + "go.banner.text": "Kimi K2.6 โควตาการใช้งานเพิ่มเป็น 3 เท่า ถึง 27 เม.ย.", "go.graph.free": "ฟรี", "go.graph.freePill": "Big Pickle และโมเดลฟรี", "go.graph.go": "Go", diff --git a/packages/console/app/src/i18n/tr.ts b/packages/console/app/src/i18n/tr.ts index ccc6f1d327..114dcbdb0d 100644 --- a/packages/console/app/src/i18n/tr.ts +++ b/packages/console/app/src/i18n/tr.ts @@ -265,6 +265,8 @@ export const dict = { "go.cta.promo": "İlk ay $5", "go.pricing.body": "Herhangi bir ajanla kullanın. İlk ay $5, sonrasında ayda 10$. Gerekirse kredi yükleyin. İstediğiniz zaman iptal edin.", + "go.banner.badge": "3x", + "go.banner.text": "Kimi K2.6: kullanım limiti 27 Nisan'a kadar 3 katına çıktı", "go.graph.free": "Ücretsiz", "go.graph.freePill": "Big Pickle ve ücretsiz modeller", "go.graph.go": "Go", diff --git a/packages/console/app/src/i18n/zh.ts b/packages/console/app/src/i18n/zh.ts index f54bb68736..72a2a4570d 100644 --- a/packages/console/app/src/i18n/zh.ts +++ b/packages/console/app/src/i18n/zh.ts @@ -252,6 +252,8 @@ export const dict = { "go.cta.price": "$10/月", "go.cta.promo": "首月 $5", "go.pricing.body": "可配合任何代理使用。首月 $5,之后 $10/月。如有需要可充值。随时取消。", + "go.banner.badge": "3x", + "go.banner.text": "Kimi K2.6 使用额度提升至 3 倍,限时至 4 月 27 日", "go.graph.free": "免费", "go.graph.freePill": "Big Pickle 和免费模型", "go.graph.go": "Go", diff --git a/packages/console/app/src/i18n/zht.ts b/packages/console/app/src/i18n/zht.ts index 4076f18c0d..caea5c74bb 100644 --- a/packages/console/app/src/i18n/zht.ts +++ b/packages/console/app/src/i18n/zht.ts @@ -252,6 +252,8 @@ export const dict = { "go.cta.price": "$10/月", "go.cta.promo": "首月 $5", "go.pricing.body": "可搭配任何代理使用。首月 $5,之後 $10/月。如有需要可儲值。隨時取消。", + "go.banner.badge": "3x", + "go.banner.text": "Kimi K2.6 使用額度提升至 3 倍,限時至 4 月 27 日", "go.graph.free": "免費", "go.graph.freePill": "Big Pickle 與免費模型", "go.graph.go": "Go", diff --git a/packages/console/app/src/routes/go/index.css b/packages/console/app/src/routes/go/index.css index cd4406f253..de8dce4724 100644 --- a/packages/console/app/src/routes/go/index.css +++ b/packages/console/app/src/routes/go/index.css @@ -306,7 +306,7 @@ body { [data-component="hero"] { display: flex; flex-direction: column; - padding: calc(var(--vertical-padding) * 2) var(--padding); + padding: calc(var(--vertical-padding) * 1.5) var(--padding); [data-slot="zen logo dark"] { display: none; @@ -326,6 +326,37 @@ body { } } + [data-component="desktop-app-banner"] { + display: flex; + align-items: center; + gap: 12px; + margin-bottom: 32px; + + [data-slot="badge"] { + background: var(--color-background-strong); + color: var(--color-text-inverted); + font-weight: 500; + padding: 4px 8px; + line-height: 1; + flex-shrink: 0; + } + + [data-slot="content"] { + display: flex; + align-items: center; + gap: 1ch; + } + + [data-slot="text"] { + color: var(--color-text-strong); + line-height: 1.4; + + @media (max-width: 30.625rem) { + display: none; + } + } + } + [data-slot="hero-copy"] { img { margin-bottom: 24px; @@ -544,6 +575,14 @@ body { font-weight: 600; white-space: nowrap; } + + [data-bonus] { + color: var(--color-text-weak); + font-size: 12px; + font-weight: 400; + line-height: 1; + white-space: nowrap; + } } [data-slot="plot-labels"] { @@ -623,6 +662,10 @@ body { fill: var(--color-text-strong); } + [data-bar][data-kind="promo"] { + fill: color-mix(in srgb, var(--bar-go) 50%, transparent); + } + [data-val] { fill: var(--color-text-strong); font-size: 13px; diff --git a/packages/console/app/src/routes/go/index.tsx b/packages/console/app/src/routes/go/index.tsx index b66419c5a7..bae5ddd283 100644 --- a/packages/console/app/src/routes/go/index.tsx +++ b/packages/console/app/src/routes/go/index.tsx @@ -59,9 +59,8 @@ function LimitsGraph(props: { href: string }) { const free = 200 const graph = [ { id: "glm-5.1", name: "GLM-5.1", req: 880, d: "100ms" }, - { id: "kimi-k2.6", name: "Kimi K2.6", req: 1150, d: "150ms" }, + { id: "kimi-k2.6", name: "Kimi K2.6 (3x usage)", req: 3450, baseReq: 1150, d: "150ms" }, { id: "mimo-v2-pro", name: "MiMo-V2-Pro", req: 1290, d: "150ms" }, - { id: "kimi-k2.5", name: "Kimi K2.5", req: 1850, d: "240ms" }, { id: "qwen3.6-plus", name: "Qwen3.6 Plus", req: 3300, d: "280ms" }, { id: "minimax-m2.7", name: "MiniMax M2.7", req: 3400, d: "300ms" }, { id: "qwen3.5-plus", name: "Qwen3.5 Plus", req: 10200, d: "360ms" }, @@ -79,7 +78,7 @@ function LimitsGraph(props: { href: string }) { const rmax = Math.max(1, ...graph.map((m) => ratio(m.req))) const log = (n: number) => Math.log10(Math.max(n, 1)) const base = 24 - const p = 1.8 + const p = 2.2 const x = (r: number) => left + base + Math.pow(log(r) / log(rmax), p) * (plot - base) const start = (x(1) / w) * 100 @@ -152,12 +151,24 @@ function LimitsGraph(props: { href: string }) { + {m.baseReq && ( + + )} )} @@ -247,6 +258,12 @@ export default function Home() {
+
+ {i18n.t("home.banner.badge")} +
+ {i18n.t("go.banner.text")} +
+
From 22d33c57af94f3bac5022f64ec11c82a06c015c8 Mon Sep 17 00:00:00 2001 From: Brendan Allan <14191578+Brendonovich@users.noreply.github.com> Date: Tue, 21 Apr 2026 14:11:23 +0800 Subject: [PATCH 12/73] fix(app): properly wrap produce calls in setProjects (#23638) --- packages/app/src/context/global-sync.tsx | 11 +++-------- .../src/context/global-sync/event-reducer.ts | 18 +++++++++++------- 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/packages/app/src/context/global-sync.tsx b/packages/app/src/context/global-sync.tsx index 4ced2b939d..313ff29659 100644 --- a/packages/app/src/context/global-sync.tsx +++ b/packages/app/src/context/global-sync.tsx @@ -10,7 +10,7 @@ import type { import { showToast } from "@opencode-ai/ui/toast" import { getFilename } from "@opencode-ai/shared/util/path" import { batch, createContext, getOwner, onCleanup, onMount, type ParentProps, untrack, useContext } from "solid-js" -import { createStore, produce, reconcile } from "solid-js/store" +import { createStore, produce, reconcile, unwrap } from "solid-js/store" import { useLanguage } from "@/context/language" import { Persist, persisted } from "@/utils/persist" import type { InitError } from "../pages/error" @@ -95,13 +95,8 @@ function createGlobalSync() { ) } - const setProjects = (next: Project[] | ((draft: Project[]) => void)) => { + const setProjects = (next: Project[] | ((draft: Project[]) => Project[])) => { projectWritten = true - if (typeof next === "function") { - setGlobalStore("project", produce(next)) - cacheProjects() - return - } setGlobalStore("project", next) cacheProjects() } @@ -116,7 +111,7 @@ function createGlobalSync() { const set = ((...input: unknown[]) => { if (input[0] === "project" && (Array.isArray(input[1]) || typeof input[1] === "function")) { - setProjects(input[1] as Project[] | ((draft: Project[]) => void)) + setProjects(input[1] as Project[] | ((draft: Project[]) => Project[])) return input[1] } return (setGlobalStore as (...args: unknown[]) => unknown)(...input) diff --git a/packages/app/src/context/global-sync/event-reducer.ts b/packages/app/src/context/global-sync/event-reducer.ts index 11a0cf83fd..82408fdfe9 100644 --- a/packages/app/src/context/global-sync/event-reducer.ts +++ b/packages/app/src/context/global-sync/event-reducer.ts @@ -21,7 +21,7 @@ const SKIP_PARTS = new Set(["patch", "step-start", "step-finish"]) export function applyGlobalEvent(input: { event: { type: string; properties?: unknown } project: Project[] - setGlobalProject: (next: Project[] | ((draft: Project[]) => void)) => void + setGlobalProject: (next: Project[] | ((draft: Project[]) => Project[])) => void refresh: () => void }) { if (input.event.type === "global.disposed" || input.event.type === "server.connected") { @@ -33,14 +33,18 @@ export function applyGlobalEvent(input: { const properties = input.event.properties as Project const result = Binary.search(input.project, properties.id, (s) => s.id) if (result.found) { - input.setGlobalProject((draft) => { - draft[result.index] = { ...draft[result.index], ...properties } - }) + input.setGlobalProject( + produce((draft) => { + draft[result.index] = { ...draft[result.index], ...properties } + }), + ) return } - input.setGlobalProject((draft) => { - draft.splice(result.index, 0, properties) - }) + input.setGlobalProject( + produce((draft) => { + draft.splice(result.index, 0, properties) + }), + ) } function cleanupSessionCaches( From 8a7bb7c6a9fe4fb9b1f85561603cf131278ccf54 Mon Sep 17 00:00:00 2001 From: Frank Date: Tue, 21 Apr 2026 02:36:38 -0400 Subject: [PATCH 13/73] zen: tpm routing --- .../console/app/src/routes/zen/util/handler.ts | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/packages/console/app/src/routes/zen/util/handler.ts b/packages/console/app/src/routes/zen/util/handler.ts index 635eadebe8..d9dc450012 100644 --- a/packages/console/app/src/routes/zen/util/handler.ts +++ b/packages/console/app/src/routes/zen/util/handler.ts @@ -448,35 +448,28 @@ export async function handler( return modelInfo.providers.find((provider) => provider.id === modelInfo.byokProvider) } - // Filter out TPM limited providers - const allProviders = modelInfo.providers.filter((provider) => { - if (!provider.tpmLimit) return true - const usage = modelTpmLimits?.[`${provider.id}/${provider.model}`] ?? 0 - return usage < provider.tpmLimit * 1_000_000 - }) - // Always use the same provider for the same session if (stickyProvider) { - const provider = allProviders.find((provider) => provider.id === stickyProvider) + const provider = modelInfo.providers.find((provider) => provider.id === stickyProvider) if (provider) return provider } if (trialProviders) { const trialProvider = trialProviders[Math.floor(Math.random() * trialProviders.length)] - const provider = allProviders.find((provider) => provider.id === trialProvider) + const provider = modelInfo.providers.find((provider) => provider.id === trialProvider) if (provider) return provider } if (retry.retryCount !== MAX_FAILOVER_RETRIES) { let topPriority = Infinity - const providers = allProviders + const providers = modelInfo.providers .filter((provider) => !provider.disabled) .filter((provider) => provider.weight !== 0) .filter((provider) => !retry.excludeProviders.includes(provider.id)) .filter((provider) => { if (!provider.tpmLimit) return true const usage = modelTpmLimits?.[`${provider.id}/${provider.model}`] ?? 0 - return usage < provider.tpmLimit * 1_000_000 * 0.8 + return usage < provider.tpmLimit * 1_000_000 }) .map((provider) => { topPriority = Math.min(topPriority, provider.priority) From 224548d87d9aa9b8fdbcba2a8c1f96d5f2679ffa Mon Sep 17 00:00:00 2001 From: OpeOginni <107570612+OpeOginni@users.noreply.github.com> Date: Tue, 21 Apr 2026 08:38:56 +0200 Subject: [PATCH 14/73] fix(desktop): adjust layout properties in DialogSelectServer component (#23589) --- packages/app/src/components/dialog-select-server.tsx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/app/src/components/dialog-select-server.tsx b/packages/app/src/components/dialog-select-server.tsx index dd92edec3e..0cb5a2d604 100644 --- a/packages/app/src/components/dialog-select-server.tsx +++ b/packages/app/src/components/dialog-select-server.tsx @@ -504,7 +504,7 @@ export function DialogSelectServer() { return ( -
+
{(i) => { const key = ServerConnection.key(i) @@ -619,7 +619,7 @@ export function DialogSelectServer() { -
+
Date: Tue, 21 Apr 2026 17:54:53 +1000 Subject: [PATCH 15/73] fix(core): use file:// URLs for local dynamic import() on Windows+Node (#23639) --- packages/opencode/src/provider/provider.ts | 6 +++++- packages/opencode/src/tool/registry.ts | 6 +++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts index 6e116fe41e..d643f25373 100644 --- a/packages/opencode/src/provider/provider.ts +++ b/packages/opencode/src/provider/provider.ts @@ -19,6 +19,7 @@ import { zod } from "@/util/effect-zod" import { iife } from "@/util/iife" import { Global } from "../global" import path from "path" +import { pathToFileURL } from "url" import { Effect, Layer, Context, Schema, Types } from "effect" import { EffectBridge } from "@/effect" import { InstanceState } from "@/effect" @@ -1506,7 +1507,10 @@ const layer: Layer.Layer< installedPath = model.api.npm } - const mod = await import(installedPath) + // `installedPath` is a local entry path or an existing `file://` URL. Normalize + // only path inputs so Node on Windows accepts the dynamic import. + const importSpec = installedPath.startsWith("file://") ? installedPath : pathToFileURL(installedPath).href + const mod = await import(importSpec) const fn = mod[Object.keys(mod).find((key) => key.startsWith("create"))!] const loaded = fn({ diff --git a/packages/opencode/src/tool/registry.ts b/packages/opencode/src/tool/registry.ts index e27593e597..0211e33bcb 100644 --- a/packages/opencode/src/tool/registry.ts +++ b/packages/opencode/src/tool/registry.ts @@ -157,9 +157,9 @@ export const layer: Layer.Layer< if (matches.length) yield* config.waitForDependencies() for (const match of matches) { const namespace = path.basename(match, path.extname(match)) - const mod = yield* Effect.promise( - () => import(process.platform === "win32" ? match : pathToFileURL(match).href), - ) + // `match` is an absolute filesystem path from `Glob.scanSync(..., { absolute: true })`. + // Import it as `file://` so Node on Windows accepts the dynamic import. + const mod = yield* Effect.promise(() => import(pathToFileURL(match).href)) for (const [id, def] of Object.entries(mod)) { custom.push(fromPlugin(id === "default" ? namespace : `${namespace}_${id}`, def)) } From febadc5589401a3112ba7788e7fb5837fbf95d3e Mon Sep 17 00:00:00 2001 From: Brendan Allan <14191578+Brendonovich@users.noreply.github.com> Date: Tue, 21 Apr 2026 18:49:04 +0800 Subject: [PATCH 16/73] fix(ui): correct diff render condition logic (#23670) --- packages/ui/src/components/session-review.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/ui/src/components/session-review.tsx b/packages/ui/src/components/session-review.tsx index 6e2b0853ac..94bca6727d 100644 --- a/packages/ui/src/components/session-review.tsx +++ b/packages/ui/src/components/session-review.tsx @@ -388,7 +388,7 @@ export const SessionReview = (props: SessionReviewProps) => { const file = diff.file // binary files have empty diffs that we can't render - const diffCanRender = () => diff.additions !== 0 && diff.deletions !== 0 + const diffCanRender = () => diff.additions !== 0 || diff.deletions !== 0 const expanded = createMemo(() => open().includes(file)) const mounted = createMemo(() => expanded() && (!!store.visible[file] || pinned(file))) From 811a7e9a8bf04f93eff6b9efdec7c87991aead55 Mon Sep 17 00:00:00 2001 From: Brendan Allan <14191578+Brendonovich@users.noreply.github.com> Date: Tue, 21 Apr 2026 19:10:50 +0800 Subject: [PATCH 17/73] feat(app): allow disabling progress bar in settings (#23674) --- packages/app/src/components/settings-general.tsx | 12 ++++++++++++ packages/app/src/context/settings.tsx | 9 +++++++++ packages/app/src/i18n/ar.ts | 3 +++ packages/app/src/i18n/br.ts | 3 +++ packages/app/src/i18n/bs.ts | 3 +++ packages/app/src/i18n/da.ts | 3 +++ packages/app/src/i18n/de.ts | 3 +++ packages/app/src/i18n/en.ts | 3 +++ packages/app/src/i18n/es.ts | 3 +++ packages/app/src/i18n/fr.ts | 3 +++ packages/app/src/i18n/ja.ts | 3 +++ packages/app/src/i18n/ko.ts | 3 +++ packages/app/src/i18n/no.ts | 3 +++ packages/app/src/i18n/pl.ts | 3 +++ packages/app/src/i18n/ru.ts | 3 +++ packages/app/src/i18n/th.ts | 3 +++ packages/app/src/i18n/tr.ts | 4 ++++ packages/app/src/i18n/zh.ts | 2 ++ packages/app/src/i18n/zht.ts | 2 ++ packages/app/src/pages/session/message-timeline.tsx | 2 +- 20 files changed, 72 insertions(+), 1 deletion(-) diff --git a/packages/app/src/components/settings-general.tsx b/packages/app/src/components/settings-general.tsx index 490bc2e484..13651aac06 100644 --- a/packages/app/src/components/settings-general.tsx +++ b/packages/app/src/components/settings-general.tsx @@ -280,6 +280,18 @@ export const SettingsGeneral: Component = () => { />
+ + +
+ settings.general.setShowSessionProgressBar(checked)} + /> +
+
) diff --git a/packages/app/src/context/settings.tsx b/packages/app/src/context/settings.tsx index 6d4f3d2cda..be2fb49d7e 100644 --- a/packages/app/src/context/settings.tsx +++ b/packages/app/src/context/settings.tsx @@ -31,6 +31,7 @@ export interface Settings { showReasoningSummaries: boolean shellToolPartsExpanded: boolean editToolPartsExpanded: boolean + showSessionProgressBar: boolean } updates: { startup: boolean @@ -115,6 +116,7 @@ const defaultSettings: Settings = { showReasoningSummaries: false, shellToolPartsExpanded: false, editToolPartsExpanded: false, + showSessionProgressBar: true, }, updates: { startup: true, @@ -227,6 +229,13 @@ export const { use: useSettings, provider: SettingsProvider } = createSimpleCont setEditToolPartsExpanded(value: boolean) { setStore("general", "editToolPartsExpanded", value) }, + showSessionProgressBar: withFallback( + () => store.general?.showSessionProgressBar, + defaultSettings.general.showSessionProgressBar, + ), + setShowSessionProgressBar(value: boolean) { + setStore("general", "showSessionProgressBar", value) + }, }, updates: { startup: withFallback(() => store.updates?.startup, defaultSettings.updates.startup), diff --git a/packages/app/src/i18n/ar.ts b/packages/app/src/i18n/ar.ts index 9e9a88c2d0..e3f1209f21 100644 --- a/packages/app/src/i18n/ar.ts +++ b/packages/app/src/i18n/ar.ts @@ -582,6 +582,9 @@ export const dict = { "settings.general.row.editToolPartsExpanded.title": "توسيع أجزاء أداة edit", "settings.general.row.editToolPartsExpanded.description": "إظهار أجزاء أدوات edit و write و patch موسعة بشكل افتراضي في الشريط الزمني", + "settings.general.row.showSessionProgressBar.title": "إظهار شريط تقدم الجلسة", + "settings.general.row.showSessionProgressBar.description": + "عرض شريط التقدم المتحرك أعلى الجلسة أثناء عمل الوكيل", "settings.general.row.wayland.title": "استخدام Wayland الأصلي", "settings.general.row.wayland.description": "تعطيل التراجع إلى X11 على Wayland. يتطلب إعادة التشغيل.", "settings.general.row.wayland.tooltip": diff --git a/packages/app/src/i18n/br.ts b/packages/app/src/i18n/br.ts index 5fd1aee763..022d012984 100644 --- a/packages/app/src/i18n/br.ts +++ b/packages/app/src/i18n/br.ts @@ -590,6 +590,9 @@ export const dict = { "settings.general.row.editToolPartsExpanded.title": "Expandir partes da ferramenta de edição", "settings.general.row.editToolPartsExpanded.description": "Mostrar partes das ferramentas de edição, escrita e patch expandidas por padrão na linha do tempo", + "settings.general.row.showSessionProgressBar.title": "Mostrar barra de progresso da sessão", + "settings.general.row.showSessionProgressBar.description": + "Exibir a barra de progresso animada no topo da sessão quando o agente estiver trabalhando", "settings.general.row.wayland.title": "Usar Wayland nativo", "settings.general.row.wayland.description": "Desabilitar fallback X11 no Wayland. Requer reinicialização.", "settings.general.row.wayland.tooltip": diff --git a/packages/app/src/i18n/bs.ts b/packages/app/src/i18n/bs.ts index f872db1f00..15d8376ab6 100644 --- a/packages/app/src/i18n/bs.ts +++ b/packages/app/src/i18n/bs.ts @@ -655,6 +655,9 @@ export const dict = { "settings.general.row.editToolPartsExpanded.title": "Proširi dijelove alata za uređivanje", "settings.general.row.editToolPartsExpanded.description": "Prikaži dijelove alata za uređivanje, pisanje i patch podrazumijevano proširene na vremenskoj traci", + "settings.general.row.showSessionProgressBar.title": "Prikaži traku napretka sesije", + "settings.general.row.showSessionProgressBar.description": + "Prikaži animiranu traku napretka na vrhu sesije kada agent radi", "settings.general.row.wayland.title": "Koristi nativni Wayland", "settings.general.row.wayland.description": "Onemogući X11 fallback na Waylandu. Zahtijeva restart.", "settings.general.row.wayland.tooltip": diff --git a/packages/app/src/i18n/da.ts b/packages/app/src/i18n/da.ts index 82f4fe3f63..03cfe2b786 100644 --- a/packages/app/src/i18n/da.ts +++ b/packages/app/src/i18n/da.ts @@ -649,6 +649,9 @@ export const dict = { "settings.general.row.editToolPartsExpanded.title": "Udvid edit-værktøjsdele", "settings.general.row.editToolPartsExpanded.description": "Vis edit-, write- og patch-værktøjsdele udvidet som standard i tidslinjen", + "settings.general.row.showSessionProgressBar.title": "Vis sessionens fremdriftslinje", + "settings.general.row.showSessionProgressBar.description": + "Vis den animerede fremdriftslinje øverst i sessionen, når agenten arbejder", "settings.general.row.wayland.title": "Brug native Wayland", "settings.general.row.wayland.description": "Deaktiver X11-fallback på Wayland. Kræver genstart.", "settings.general.row.wayland.tooltip": diff --git a/packages/app/src/i18n/de.ts b/packages/app/src/i18n/de.ts index d5b95459ac..ccb88e9f41 100644 --- a/packages/app/src/i18n/de.ts +++ b/packages/app/src/i18n/de.ts @@ -601,6 +601,9 @@ export const dict = { "settings.general.row.editToolPartsExpanded.title": "Edit-Tool-Abschnitte ausklappen", "settings.general.row.editToolPartsExpanded.description": "Edit-, Write- und Patch-Tool-Abschnitte standardmäßig in der Timeline ausgeklappt anzeigen", + "settings.general.row.showSessionProgressBar.title": "Sitzungsfortschrittsleiste anzeigen", + "settings.general.row.showSessionProgressBar.description": + "Die animierte Fortschrittsleiste oben in der Sitzung anzeigen, wenn der Agent arbeitet", "settings.general.row.wayland.title": "Natives Wayland verwenden", "settings.general.row.wayland.description": "X11-Fallback unter Wayland deaktivieren. Erfordert Neustart.", "settings.general.row.wayland.tooltip": diff --git a/packages/app/src/i18n/en.ts b/packages/app/src/i18n/en.ts index 8a2fbf87f0..ed80b38ce4 100644 --- a/packages/app/src/i18n/en.ts +++ b/packages/app/src/i18n/en.ts @@ -762,6 +762,9 @@ export const dict = { "settings.general.row.editToolPartsExpanded.title": "Expand edit tool parts", "settings.general.row.editToolPartsExpanded.description": "Show edit, write, and patch tool parts expanded by default in the timeline", + "settings.general.row.showSessionProgressBar.title": "Show session progress bar", + "settings.general.row.showSessionProgressBar.description": + "Display the animated progress bar at the top of the session when the agent is working", "settings.general.row.wayland.title": "Use native Wayland", "settings.general.row.wayland.description": "Disable X11 fallback on Wayland. Requires restart.", diff --git a/packages/app/src/i18n/es.ts b/packages/app/src/i18n/es.ts index 12bc45cf38..0b4789c2aa 100644 --- a/packages/app/src/i18n/es.ts +++ b/packages/app/src/i18n/es.ts @@ -659,6 +659,9 @@ export const dict = { "settings.general.row.editToolPartsExpanded.title": "Expandir partes de la herramienta de edición", "settings.general.row.editToolPartsExpanded.description": "Mostrar las partes de las herramientas de edición, escritura y parcheado expandidas por defecto en la línea de tiempo", + "settings.general.row.showSessionProgressBar.title": "Mostrar barra de progreso de la sesión", + "settings.general.row.showSessionProgressBar.description": + "Mostrar la barra de progreso animada en la parte superior de la sesión cuando el agente esté trabajando", "settings.general.row.wayland.title": "Usar Wayland nativo", "settings.general.row.wayland.description": "Deshabilitar fallback a X11 en Wayland. Requiere reinicio.", "settings.general.row.wayland.tooltip": diff --git a/packages/app/src/i18n/fr.ts b/packages/app/src/i18n/fr.ts index 6c98b9ca1e..4d73f626b2 100644 --- a/packages/app/src/i18n/fr.ts +++ b/packages/app/src/i18n/fr.ts @@ -598,6 +598,9 @@ export const dict = { "settings.general.row.editToolPartsExpanded.title": "Développer les parties de l'outil edit", "settings.general.row.editToolPartsExpanded.description": "Afficher les parties des outils edit, write et patch développées par défaut dans la chronologie", + "settings.general.row.showSessionProgressBar.title": "Afficher la barre de progression de la session", + "settings.general.row.showSessionProgressBar.description": + "Afficher la barre de progression animée en haut de la session lorsque l'agent travaille", "settings.general.row.wayland.title": "Utiliser Wayland natif", "settings.general.row.wayland.description": "Désactiver le repli X11 sur Wayland. Nécessite un redémarrage.", "settings.general.row.wayland.tooltip": diff --git a/packages/app/src/i18n/ja.ts b/packages/app/src/i18n/ja.ts index 7678334127..493b1f17ff 100644 --- a/packages/app/src/i18n/ja.ts +++ b/packages/app/src/i18n/ja.ts @@ -587,6 +587,9 @@ export const dict = { "settings.general.row.editToolPartsExpanded.title": "edit ツールパーツを展開", "settings.general.row.editToolPartsExpanded.description": "タイムラインで edit、write、patch ツールパーツをデフォルトで展開して表示します", + "settings.general.row.showSessionProgressBar.title": "セッション進行状況バーを表示", + "settings.general.row.showSessionProgressBar.description": + "エージェントの作業中に、セッション上部にアニメーション付きの進行状況バーを表示します", "settings.general.row.wayland.title": "ネイティブWaylandを使用", "settings.general.row.wayland.description": "WaylandでのX11フォールバックを無効にします。再起動が必要です。", "settings.general.row.wayland.tooltip": diff --git a/packages/app/src/i18n/ko.ts b/packages/app/src/i18n/ko.ts index 76bf33df6f..0218cc1a9e 100644 --- a/packages/app/src/i18n/ko.ts +++ b/packages/app/src/i18n/ko.ts @@ -583,6 +583,9 @@ export const dict = { "settings.general.row.editToolPartsExpanded.title": "edit 도구 파트 펼치기", "settings.general.row.editToolPartsExpanded.description": "타임라인에서 기본적으로 edit, write, patch 도구 파트를 펼친 상태로 표시합니다", + "settings.general.row.showSessionProgressBar.title": "세션 진행 표시줄 표시", + "settings.general.row.showSessionProgressBar.description": + "에이전트가 작업 중일 때 세션 상단에 애니메이션 진행 표시줄을 표시합니다", "settings.general.row.wayland.title": "네이티브 Wayland 사용", "settings.general.row.wayland.description": "Wayland에서 X11 폴백을 비활성화합니다. 다시 시작해야 합니다.", "settings.general.row.wayland.tooltip": diff --git a/packages/app/src/i18n/no.ts b/packages/app/src/i18n/no.ts index 75e557b16b..43aa844200 100644 --- a/packages/app/src/i18n/no.ts +++ b/packages/app/src/i18n/no.ts @@ -656,6 +656,9 @@ export const dict = { "settings.general.row.editToolPartsExpanded.title": "Utvid edit-verktøydeler", "settings.general.row.editToolPartsExpanded.description": "Vis edit-, write- og patch-verktøydeler utvidet som standard i tidslinjen", + "settings.general.row.showSessionProgressBar.title": "Vis fremdriftslinje for sesjonen", + "settings.general.row.showSessionProgressBar.description": + "Vis den animerte fremdriftslinjen øverst i sesjonen når agenten jobber", "settings.general.row.wayland.title": "Bruk innebygd Wayland", "settings.general.row.wayland.description": "Deaktiver X11-fallback på Wayland. Krever omstart.", "settings.general.row.wayland.tooltip": diff --git a/packages/app/src/i18n/pl.ts b/packages/app/src/i18n/pl.ts index 0ab4a6906c..6c6d4dddc1 100644 --- a/packages/app/src/i18n/pl.ts +++ b/packages/app/src/i18n/pl.ts @@ -588,6 +588,9 @@ export const dict = { "settings.general.row.editToolPartsExpanded.title": "Rozwijaj elementy narzędzia edit", "settings.general.row.editToolPartsExpanded.description": "Domyślnie pokazuj rozwinięte elementy narzędzi edit, write i patch na osi czasu", + "settings.general.row.showSessionProgressBar.title": "Pokazuj pasek postępu sesji", + "settings.general.row.showSessionProgressBar.description": + "Wyświetlaj animowany pasek postępu u góry sesji, gdy agent pracuje", "settings.general.row.wayland.title": "Użyj natywnego Wayland", "settings.general.row.wayland.description": "Wyłącz fallback X11 na Wayland. Wymaga restartu.", "settings.general.row.wayland.tooltip": diff --git a/packages/app/src/i18n/ru.ts b/packages/app/src/i18n/ru.ts index 135c8e66c4..e0b094877a 100644 --- a/packages/app/src/i18n/ru.ts +++ b/packages/app/src/i18n/ru.ts @@ -656,6 +656,9 @@ export const dict = { "settings.general.row.editToolPartsExpanded.title": "Разворачивать элементы инструмента edit", "settings.general.row.editToolPartsExpanded.description": "Показывать элементы инструментов edit, write и patch в ленте развернутыми по умолчанию", + "settings.general.row.showSessionProgressBar.title": "Показывать индикатор прогресса сессии", + "settings.general.row.showSessionProgressBar.description": + "Показывать анимированный индикатор прогресса вверху сессии, когда агент работает", "settings.general.row.wayland.title": "Использовать нативный Wayland", "settings.general.row.wayland.description": "Отключить X11 fallback на Wayland. Требуется перезапуск.", "settings.general.row.wayland.tooltip": diff --git a/packages/app/src/i18n/th.ts b/packages/app/src/i18n/th.ts index 81674df32d..8a15f29c0b 100644 --- a/packages/app/src/i18n/th.ts +++ b/packages/app/src/i18n/th.ts @@ -647,6 +647,9 @@ export const dict = { "settings.general.row.editToolPartsExpanded.title": "ขยายส่วนเครื่องมือ edit", "settings.general.row.editToolPartsExpanded.description": "แสดงส่วนเครื่องมือ edit, write และ patch แบบขยายตามค่าเริ่มต้นในไทม์ไลน์", + "settings.general.row.showSessionProgressBar.title": "แสดงแถบความคืบหน้าของเซสชัน", + "settings.general.row.showSessionProgressBar.description": + "แสดงแถบความคืบหน้าแบบเคลื่อนไหวที่ด้านบนของเซสชันเมื่อเอเจนต์กำลังทำงาน", "settings.general.row.wayland.title": "ใช้ Wayland แบบเนทีฟ", "settings.general.row.wayland.description": "ปิดใช้งาน X11 fallback บน Wayland ต้องรีสตาร์ท", "settings.general.row.wayland.tooltip": "บน Linux ที่มีจอภาพรีเฟรชเรตแบบผสม Wayland แบบเนทีฟอาจเสถียรกว่า", diff --git a/packages/app/src/i18n/tr.ts b/packages/app/src/i18n/tr.ts index f3cb3ab464..f20c05000d 100644 --- a/packages/app/src/i18n/tr.ts +++ b/packages/app/src/i18n/tr.ts @@ -663,6 +663,10 @@ export const dict = { "settings.general.row.editToolPartsExpanded.description": "Zaman çizelgesinde düzenleme, yazma ve yama araç bileşenlerini varsayılan olarak genişletilmiş göster", + "settings.general.row.showSessionProgressBar.title": "Oturum ilerleme çubuğunu göster", + "settings.general.row.showSessionProgressBar.description": + "Ajan çalışırken oturumun üst kısmında animasyonlu ilerleme çubuğunu göster", + "settings.general.row.wayland.title": "Yerel Wayland kullan", "settings.general.row.wayland.description": "Wayland'da X11 geri dönüşünü devre dışı bırak. Yeniden başlatma gerektirir.", diff --git a/packages/app/src/i18n/zh.ts b/packages/app/src/i18n/zh.ts index d95bfd19ba..05310df965 100644 --- a/packages/app/src/i18n/zh.ts +++ b/packages/app/src/i18n/zh.ts @@ -646,6 +646,8 @@ export const dict = { "settings.general.row.shellToolPartsExpanded.description": "默认在时间线中展开 shell 工具部分", "settings.general.row.editToolPartsExpanded.title": "展开编辑工具部分", "settings.general.row.editToolPartsExpanded.description": "默认在时间线中展开 edit、write 和 patch 工具部分", + "settings.general.row.showSessionProgressBar.title": "显示会话进度条", + "settings.general.row.showSessionProgressBar.description": "当智能体正在工作时,在会话顶部显示动画进度条", "settings.general.row.wayland.title": "使用原生 Wayland", "settings.general.row.wayland.description": "在 Wayland 上禁用 X11 回退。需要重启。", "settings.general.row.wayland.tooltip": "在混合刷新率显示器的 Linux 系统上,原生 Wayland 可能更稳定。", diff --git a/packages/app/src/i18n/zht.ts b/packages/app/src/i18n/zht.ts index 4a88ca4fc8..43681c7793 100644 --- a/packages/app/src/i18n/zht.ts +++ b/packages/app/src/i18n/zht.ts @@ -642,6 +642,8 @@ export const dict = { "settings.general.row.shellToolPartsExpanded.description": "在時間軸中預設展開 shell 工具區塊", "settings.general.row.editToolPartsExpanded.title": "展開 edit 工具區塊", "settings.general.row.editToolPartsExpanded.description": "在時間軸中預設展開 edit、write 和 patch 工具區塊", + "settings.general.row.showSessionProgressBar.title": "顯示工作階段進度列", + "settings.general.row.showSessionProgressBar.description": "當代理程式正在運作時,在工作階段頂部顯示動畫進度列", "settings.general.row.wayland.title": "使用原生 Wayland", "settings.general.row.wayland.description": "在 Wayland 上停用 X11 後備模式。需要重新啟動。", "settings.general.row.wayland.tooltip": "在混合更新率螢幕的 Linux 系統上,原生 Wayland 可能更穩定。", diff --git a/packages/app/src/pages/session/message-timeline.tsx b/packages/app/src/pages/session/message-timeline.tsx index 978f188b6b..9e0fed11cc 100644 --- a/packages/app/src/pages/session/message-timeline.tsx +++ b/packages/app/src/pages/session/message-timeline.tsx @@ -721,7 +721,7 @@ export function MessageTimeline(props: { "md:max-w-200 md:mx-auto 2xl:max-w-[1000px]": props.centered, }} > - +
Date: Tue, 21 Apr 2026 11:11:54 +0000 Subject: [PATCH 18/73] chore: generate --- packages/app/src/i18n/ar.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/app/src/i18n/ar.ts b/packages/app/src/i18n/ar.ts index e3f1209f21..efb2919a5b 100644 --- a/packages/app/src/i18n/ar.ts +++ b/packages/app/src/i18n/ar.ts @@ -583,8 +583,7 @@ export const dict = { "settings.general.row.editToolPartsExpanded.description": "إظهار أجزاء أدوات edit و write و patch موسعة بشكل افتراضي في الشريط الزمني", "settings.general.row.showSessionProgressBar.title": "إظهار شريط تقدم الجلسة", - "settings.general.row.showSessionProgressBar.description": - "عرض شريط التقدم المتحرك أعلى الجلسة أثناء عمل الوكيل", + "settings.general.row.showSessionProgressBar.description": "عرض شريط التقدم المتحرك أعلى الجلسة أثناء عمل الوكيل", "settings.general.row.wayland.title": "استخدام Wayland الأصلي", "settings.general.row.wayland.description": "تعطيل التراجع إلى X11 على Wayland. يتطلب إعادة التشغيل.", "settings.general.row.wayland.tooltip": From 8cc2c81d57f7c3ca8942d0e2461bc676bd25e8cc Mon Sep 17 00:00:00 2001 From: Brendan Allan <14191578+Brendonovich@users.noreply.github.com> Date: Tue, 21 Apr 2026 19:12:32 +0800 Subject: [PATCH 19/73] fix(app): prevent prompt input animations from rerunning on every render (#23676) --- packages/app/src/components/prompt-input.tsx | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/packages/app/src/components/prompt-input.tsx b/packages/app/src/components/prompt-input.tsx index 1131baa498..06c91c2922 100644 --- a/packages/app/src/components/prompt-input.tsx +++ b/packages/app/src/components/prompt-input.tsx @@ -54,7 +54,7 @@ import { PromptImageAttachments } from "./prompt-input/image-attachments" import { PromptDragOverlay } from "./prompt-input/drag-overlay" import { promptPlaceholder } from "./prompt-input/placeholder" import { ImagePreview } from "@opencode-ai/ui/image-preview" -import { useQueries, useQuery } from "@tanstack/solid-query" +import { useQueries } from "@tanstack/solid-query" import { loadAgentsQuery, loadProvidersQuery } from "@/context/global-sync/bootstrap" interface PromptInputProps { @@ -1257,7 +1257,9 @@ export const PromptInput: Component = (props) => { })) const agentsLoading = () => agentsQuery.isLoading + const agentsShouldFadeIn = createMemo((prev) => prev ?? agentsLoading()) const providersLoading = () => agentsLoading() || providersQuery.isLoading || globalProvidersQuery.isLoading + const providersShouldFadeIn = createMemo((prev) => prev ?? providersLoading()) const [promptReady] = createResource( () => prompt.ready().promise, @@ -1460,7 +1462,10 @@ export const PromptInput: Component = (props) => {
-
+
= (props) => { -
+
0} fallback={ @@ -1557,7 +1565,10 @@ export const PromptInput: Component = (props) => {
-
+
Date: Tue, 21 Apr 2026 10:42:54 -0400 Subject: [PATCH 20/73] fix(core): fix permissions routing when using remote workspace (#23593) --- .../opencode/src/cli/cmd/tui/routes/session/permission.tsx | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/packages/opencode/src/cli/cmd/tui/routes/session/permission.tsx b/packages/opencode/src/cli/cmd/tui/routes/session/permission.tsx index 54cc86a40d..e48f348b98 100644 --- a/packages/opencode/src/cli/cmd/tui/routes/session/permission.tsx +++ b/packages/opencode/src/cli/cmd/tui/routes/session/permission.tsx @@ -9,6 +9,7 @@ import { useSDK } from "../../context/sdk" import { SplitBorder } from "../../component/border" import { useSync } from "../../context/sync" import { useTextareaKeybindings } from "../../component/textarea-keybindings" +import { useProject } from "../../context/project" import path from "path" import { LANGUAGE_EXTENSIONS } from "@/lsp/language" import { Keybind } from "@/util" @@ -131,6 +132,7 @@ function TextBody(props: { title: string; description?: string; icon?: string }) export function PermissionPrompt(props: { request: PermissionRequest }) { const sdk = useSDK() + const project = useProject() const sync = useSync() const [store, setStore] = createStore({ stage: "permission" as PermissionStage, @@ -187,6 +189,7 @@ export function PermissionPrompt(props: { request: PermissionRequest }) { void sdk.client.permission.reply({ reply: "always", requestID: props.request.id, + workspace: project.workspace.current(), }) }} /> @@ -198,6 +201,7 @@ export function PermissionPrompt(props: { request: PermissionRequest }) { reply: "reject", requestID: props.request.id, message: message || undefined, + workspace: project.workspace.current(), }) }} onCancel={() => { @@ -450,12 +454,14 @@ export function PermissionPrompt(props: { request: PermissionRequest }) { void sdk.client.permission.reply({ reply: "reject", requestID: props.request.id, + workspace: project.workspace.current(), }) return } void sdk.client.permission.reply({ reply: "once", requestID: props.request.id, + workspace: project.workspace.current(), }) }} /> From 2486621ca1b9d35ed15ee6c2ff2a04ba46c8e02a Mon Sep 17 00:00:00 2001 From: Aiden Cline <63023139+rekram1-node@users.noreply.github.com> Date: Tue, 21 Apr 2026 11:31:20 -0400 Subject: [PATCH 21/73] chore: kill unused tool (#23701) --- .../opencode/specs/effect/instance-context.md | 1 - packages/opencode/specs/effect/schema.md | 1 - packages/opencode/specs/effect/tools.md | 2 - packages/opencode/src/config/agent.ts | 2 +- packages/opencode/src/config/config.ts | 2 +- packages/opencode/src/permission/index.ts | 2 +- packages/opencode/src/tool/multiedit.ts | 61 ------------------- packages/opencode/src/tool/multiedit.txt | 41 ------------- packages/opencode/test/agent/agent.test.ts | 2 +- packages/opencode/test/config/config.test.ts | 29 --------- .../opencode/test/permission/next.test.ts | 5 +- .../web/src/content/docs/ar/permissions.mdx | 2 +- packages/web/src/content/docs/ar/tools.mdx | 4 +- .../web/src/content/docs/bs/permissions.mdx | 2 +- packages/web/src/content/docs/bs/tools.mdx | 4 +- .../web/src/content/docs/da/permissions.mdx | 2 +- packages/web/src/content/docs/da/tools.mdx | 4 +- .../web/src/content/docs/de/permissions.mdx | 2 +- packages/web/src/content/docs/de/tools.mdx | 4 +- .../web/src/content/docs/es/permissions.mdx | 2 +- packages/web/src/content/docs/es/tools.mdx | 4 +- .../web/src/content/docs/fr/permissions.mdx | 2 +- packages/web/src/content/docs/fr/tools.mdx | 4 +- .../web/src/content/docs/it/permissions.mdx | 2 +- packages/web/src/content/docs/it/tools.mdx | 4 +- .../web/src/content/docs/ja/permissions.mdx | 2 +- packages/web/src/content/docs/ja/tools.mdx | 4 +- .../web/src/content/docs/ko/permissions.mdx | 2 +- packages/web/src/content/docs/ko/tools.mdx | 4 +- .../web/src/content/docs/nb/permissions.mdx | 2 +- packages/web/src/content/docs/nb/tools.mdx | 4 +- packages/web/src/content/docs/permissions.mdx | 2 +- .../web/src/content/docs/pl/permissions.mdx | 2 +- packages/web/src/content/docs/pl/tools.mdx | 4 +- .../src/content/docs/pt-br/permissions.mdx | 2 +- packages/web/src/content/docs/pt-br/tools.mdx | 4 +- .../web/src/content/docs/ru/permissions.mdx | 2 +- packages/web/src/content/docs/ru/tools.mdx | 4 +- .../web/src/content/docs/th/permissions.mdx | 2 +- packages/web/src/content/docs/th/tools.mdx | 4 +- packages/web/src/content/docs/tools.mdx | 4 +- .../web/src/content/docs/tr/permissions.mdx | 2 +- packages/web/src/content/docs/tr/tools.mdx | 4 +- .../src/content/docs/zh-cn/permissions.mdx | 2 +- packages/web/src/content/docs/zh-cn/tools.mdx | 4 +- .../src/content/docs/zh-tw/permissions.mdx | 2 +- packages/web/src/content/docs/zh-tw/tools.mdx | 4 +- 47 files changed, 60 insertions(+), 196 deletions(-) delete mode 100644 packages/opencode/src/tool/multiedit.ts delete mode 100644 packages/opencode/src/tool/multiedit.txt diff --git a/packages/opencode/specs/effect/instance-context.md b/packages/opencode/specs/effect/instance-context.md index 7d0d7eb13c..6d63715030 100644 --- a/packages/opencode/specs/effect/instance-context.md +++ b/packages/opencode/specs/effect/instance-context.md @@ -224,7 +224,6 @@ These tools mostly use direct getters for path resolution and repo-relative disp - `src/tool/bash.ts` - `src/tool/edit.ts` - `src/tool/lsp.ts` -- `src/tool/multiedit.ts` - `src/tool/plan.ts` - `src/tool/read.ts` - `src/tool/write.ts` diff --git a/packages/opencode/specs/effect/schema.md b/packages/opencode/specs/effect/schema.md index 72ee10350d..2fcbfc12be 100644 --- a/packages/opencode/specs/effect/schema.md +++ b/packages/opencode/specs/effect/schema.md @@ -216,7 +216,6 @@ emitted JSON Schema must stay byte-identical. - [ ] `src/tool/grep.ts` - [ ] `src/tool/invalid.ts` - [ ] `src/tool/lsp.ts` -- [ ] `src/tool/multiedit.ts` - [ ] `src/tool/plan.ts` - [ ] `src/tool/question.ts` - [ ] `src/tool/read.ts` diff --git a/packages/opencode/specs/effect/tools.md b/packages/opencode/specs/effect/tools.md index 7b47831709..3cc277357b 100644 --- a/packages/opencode/specs/effect/tools.md +++ b/packages/opencode/specs/effect/tools.md @@ -46,7 +46,6 @@ These exported tool definitions currently use `Tool.define(...)` in `src/tool`: - [x] `grep.ts` - [x] `invalid.ts` - [x] `lsp.ts` -- [x] `multiedit.ts` - [x] `plan.ts` - [x] `question.ts` - [x] `read.ts` @@ -82,7 +81,6 @@ Notable items that are already effectively on the target path and do not need se - `write.ts` - `codesearch.ts` - `websearch.ts` -- `multiedit.ts` - `edit.ts` ## Filesystem notes diff --git a/packages/opencode/src/config/agent.ts b/packages/opencode/src/config/agent.ts index 1469522d98..85a214e122 100644 --- a/packages/opencode/src/config/agent.ts +++ b/packages/opencode/src/config/agent.ts @@ -93,7 +93,7 @@ const normalize = (agent: z.infer) => { const permission: ConfigPermission.Info = {} for (const [tool, enabled] of Object.entries(agent.tools ?? {})) { const action = enabled ? "allow" : "deny" - if (tool === "write" || tool === "edit" || tool === "patch" || tool === "multiedit") { + if (tool === "write" || tool === "edit" || tool === "patch") { permission.edit = action continue } diff --git a/packages/opencode/src/config/config.ts b/packages/opencode/src/config/config.ts index 55684fc70d..cbe7cf7a2a 100644 --- a/packages/opencode/src/config/config.ts +++ b/packages/opencode/src/config/config.ts @@ -660,7 +660,7 @@ export const layer = Layer.effect( const perms: Record = {} for (const [tool, enabled] of Object.entries(result.tools)) { const action: ConfigPermission.Action = enabled ? "allow" : "deny" - if (tool === "write" || tool === "edit" || tool === "patch" || tool === "multiedit") { + if (tool === "write" || tool === "edit" || tool === "patch") { perms.edit = action continue } diff --git a/packages/opencode/src/permission/index.ts b/packages/opencode/src/permission/index.ts index b9a221155c..caf66cc947 100644 --- a/packages/opencode/src/permission/index.ts +++ b/packages/opencode/src/permission/index.ts @@ -307,7 +307,7 @@ export function merge(...rulesets: Ruleset[]): Ruleset { return rulesets.flat() } -const EDIT_TOOLS = ["edit", "write", "apply_patch", "multiedit"] +const EDIT_TOOLS = ["edit", "write", "apply_patch"] export function disabled(tools: string[], ruleset: Ruleset): Set { const result = new Set() diff --git a/packages/opencode/src/tool/multiedit.ts b/packages/opencode/src/tool/multiedit.ts deleted file mode 100644 index 004d3c870d..0000000000 --- a/packages/opencode/src/tool/multiedit.ts +++ /dev/null @@ -1,61 +0,0 @@ -import z from "zod" -import { Effect } from "effect" -import * as Tool from "./tool" -import { EditTool } from "./edit" -import DESCRIPTION from "./multiedit.txt" -import path from "path" -import { Instance } from "../project/instance" - -export const MultiEditTool = Tool.define( - "multiedit", - Effect.gen(function* () { - const editInfo = yield* EditTool - const edit = yield* editInfo.init() - - return { - description: DESCRIPTION, - parameters: z.object({ - filePath: z.string().describe("The absolute path to the file to modify"), - edits: z - .array( - z.object({ - filePath: z.string().describe("The absolute path to the file to modify"), - oldString: z.string().describe("The text to replace"), - newString: z.string().describe("The text to replace it with (must be different from oldString)"), - replaceAll: z.boolean().optional().describe("Replace all occurrences of oldString (default false)"), - }), - ) - .describe("Array of edit operations to perform sequentially on the file"), - }), - execute: ( - params: { - filePath: string - edits: Array<{ filePath: string; oldString: string; newString: string; replaceAll?: boolean }> - }, - ctx: Tool.Context, - ) => - Effect.gen(function* () { - const results = [] - for (const [, entry] of params.edits.entries()) { - const result = yield* edit.execute( - { - filePath: params.filePath, - oldString: entry.oldString, - newString: entry.newString, - replaceAll: entry.replaceAll, - }, - ctx, - ) - results.push(result) - } - return { - title: path.relative(Instance.worktree, params.filePath), - metadata: { - results: results.map((r) => r.metadata), - }, - output: results.at(-1)!.output, - } - }), - } - }), -) diff --git a/packages/opencode/src/tool/multiedit.txt b/packages/opencode/src/tool/multiedit.txt deleted file mode 100644 index bb4815124d..0000000000 --- a/packages/opencode/src/tool/multiedit.txt +++ /dev/null @@ -1,41 +0,0 @@ -This is a tool for making multiple edits to a single file in one operation. It is built on top of the Edit tool and allows you to perform multiple find-and-replace operations efficiently. Prefer this tool over the Edit tool when you need to make multiple edits to the same file. - -Before using this tool: - -1. Use the Read tool to understand the file's contents and context -2. Verify the directory path is correct - -To make multiple file edits, provide the following: -1. file_path: The absolute path to the file to modify (must be absolute, not relative) -2. edits: An array of edit operations to perform, where each edit contains: - - oldString: The text to replace (must match the file contents exactly, including all whitespace and indentation) - - newString: The edited text to replace the oldString - - replaceAll: Replace all occurrences of oldString. This parameter is optional and defaults to false. - -IMPORTANT: -- All edits are applied in sequence, in the order they are provided -- Each edit operates on the result of the previous edit -- All edits must be valid for the operation to succeed - if any edit fails, none will be applied -- This tool is ideal when you need to make several changes to different parts of the same file - -CRITICAL REQUIREMENTS: -1. All edits follow the same requirements as the single Edit tool -2. The edits are atomic - either all succeed or none are applied -3. Plan your edits carefully to avoid conflicts between sequential operations - -WARNING: -- The tool will fail if edits.oldString doesn't match the file contents exactly (including whitespace) -- The tool will fail if edits.oldString and edits.newString are the same -- Since edits are applied in sequence, ensure that earlier edits don't affect the text that later edits are trying to find - -When making edits: -- Ensure all edits result in idiomatic, correct code -- Do not leave the code in a broken state -- Always use absolute file paths (starting with /) -- Only use emojis if the user explicitly requests it. Avoid adding emojis to files unless asked. -- Use replaceAll for replacing and renaming strings across the file. This parameter is useful if you want to rename a variable for instance. - -If you want to create a new file, use: -- A new file path, including dir name if needed -- First edit: empty oldString and the new file's contents as newString -- Subsequent edits: normal edit operations on the created content diff --git a/packages/opencode/test/agent/agent.test.ts b/packages/opencode/test/agent/agent.test.ts index 7e9a6fe90b..50a3668f98 100644 --- a/packages/opencode/test/agent/agent.test.ts +++ b/packages/opencode/test/agent/agent.test.ts @@ -474,7 +474,7 @@ test("legacy tools config converts to permissions", async () => { }) }) -test("legacy tools config maps write/edit/patch/multiedit to edit permission", async () => { +test("legacy tools config maps write/edit/patch to edit permission", async () => { await using tmp = await tmpdir({ config: { agent: { diff --git a/packages/opencode/test/config/config.test.ts b/packages/opencode/test/config/config.test.ts index 9f2bf9db9a..3fafdadaa6 100644 --- a/packages/opencode/test/config/config.test.ts +++ b/packages/opencode/test/config/config.test.ts @@ -1427,35 +1427,6 @@ test("migrates legacy patch tool to edit permission", async () => { }) }) -test("migrates legacy multiedit tool to edit permission", async () => { - await using tmp = await tmpdir({ - init: async (dir) => { - await Filesystem.write( - path.join(dir, "opencode.json"), - JSON.stringify({ - $schema: "https://opencode.ai/config.json", - agent: { - test: { - tools: { - multiedit: false, - }, - }, - }, - }), - ) - }, - }) - await Instance.provide({ - directory: tmp.path, - fn: async () => { - const config = await load() - expect(config.agent?.["test"]?.permission).toEqual({ - edit: "deny", - }) - }, - }) -}) - test("migrates mixed legacy tools config", async () => { await using tmp = await tmpdir({ init: async (dir) => { diff --git a/packages/opencode/test/permission/next.test.ts b/packages/opencode/test/permission/next.test.ts index d654d4b876..21a9d8400b 100644 --- a/packages/opencode/test/permission/next.test.ts +++ b/packages/opencode/test/permission/next.test.ts @@ -422,9 +422,9 @@ test("disabled - disables tool when denied", () => { expect(result.has("read")).toBe(false) }) -test("disabled - disables edit/write/apply_patch/multiedit when edit denied", () => { +test("disabled - disables edit/write/apply_patch when edit denied", () => { const result = Permission.disabled( - ["edit", "write", "apply_patch", "multiedit", "bash"], + ["edit", "write", "apply_patch", "bash"], [ { permission: "*", pattern: "*", action: "allow" }, { permission: "edit", pattern: "*", action: "deny" }, @@ -433,7 +433,6 @@ test("disabled - disables edit/write/apply_patch/multiedit when edit denied", () expect(result.has("edit")).toBe(true) expect(result.has("write")).toBe(true) expect(result.has("apply_patch")).toBe(true) - expect(result.has("multiedit")).toBe(true) expect(result.has("bash")).toBe(false) }) diff --git a/packages/web/src/content/docs/ar/permissions.mdx b/packages/web/src/content/docs/ar/permissions.mdx index bb21d00b24..2f4d1c47c5 100644 --- a/packages/web/src/content/docs/ar/permissions.mdx +++ b/packages/web/src/content/docs/ar/permissions.mdx @@ -130,7 +130,7 @@ description: تحكّم في الإجراءات التي تتطلب موافقة تُعرَّف أذونات OpenCode بأسماء الأدوات، بالإضافة إلى بعض حواجز الأمان: - `read` — قراءة ملف (يطابق مسار الملف) -- `edit` — جميع تعديلات الملفات (يشمل `edit` و`write` و`patch` و`multiedit`) +- `edit` — جميع تعديلات الملفات (يشمل `edit` و`write` و`patch`) - `glob` — مطابقة أسماء الملفات (يطابق نمط الـ glob) - `grep` — البحث في المحتوى (يطابق نمط regex) - `bash` — تشغيل أوامر shell (يطابق الأوامر المُحلَّلة مثل `git status --porcelain`) diff --git a/packages/web/src/content/docs/ar/tools.mdx b/packages/web/src/content/docs/ar/tools.mdx index 3f3c9ee068..0323e8d8ec 100644 --- a/packages/web/src/content/docs/ar/tools.mdx +++ b/packages/web/src/content/docs/ar/tools.mdx @@ -95,7 +95,7 @@ description: إدارة الأدوات التي يمكن لـ LLM استخدام استخدم هذا للسماح لـ LLM بإنشاء ملفات جديدة. سيكتب فوق الملفات الموجودة إذا كانت موجودة بالفعل. :::note -تُدار أداة `write` عبر إذن `edit`، والذي يشمل جميع تعديلات الملفات (`edit` و`write` و`patch` و`multiedit`). +تُدار أداة `write` عبر إذن `edit`، والذي يشمل جميع تعديلات الملفات (`edit` و`write` و`patch`). ::: --- @@ -190,7 +190,7 @@ description: إدارة الأدوات التي يمكن لـ LLM استخدام تطبق هذه الأداة ملفات الرقع على قاعدة الشفرة الخاصة بك. وهي مفيدة لتطبيق الفروقات (Diffs) والرقع من مصادر متعددة. :::note -تُدار أداة `patch` عبر إذن `edit`، والذي يشمل جميع تعديلات الملفات (`edit` و`write` و`patch` و`multiedit`). +تُدار أداة `patch` عبر إذن `edit`، والذي يشمل جميع تعديلات الملفات (`edit` و`write` و`patch`). ::: --- diff --git a/packages/web/src/content/docs/bs/permissions.mdx b/packages/web/src/content/docs/bs/permissions.mdx index e27fa130b3..ea8c3f00f3 100644 --- a/packages/web/src/content/docs/bs/permissions.mdx +++ b/packages/web/src/content/docs/bs/permissions.mdx @@ -125,7 +125,7 @@ Držite ovu listu fokusiranom na pouzdane putanje, a dodatna allow/deny pravila Dozvole OpenCode su označene imenom alata, plus nekoliko sigurnosnih mjera: - `read` — čitanje datoteke (odgovara putanji datoteke) -- `edit` — sve izmjene fajlova (pokriva `edit`, `write`, `patch`, `multiedit`) +- `edit` — sve izmjene fajlova (pokriva `edit`, `write`, `patch`) - `glob` — globbiranje fajla (odgovara glob uzorku) - `grep` — pretraga sadržaja (podudara se sa regularnim izrazom) - `bash` — izvođenje komandi ljuske (podudara se s raščlanjenim komandama kao što je `git status --porcelain`) diff --git a/packages/web/src/content/docs/bs/tools.mdx b/packages/web/src/content/docs/bs/tools.mdx index db04295fd2..c2d5aa2dd2 100644 --- a/packages/web/src/content/docs/bs/tools.mdx +++ b/packages/web/src/content/docs/bs/tools.mdx @@ -95,7 +95,7 @@ Kreira nove datoteke ili prepisuje postojece. Koristite ovo da dozvolite LLM-u kreiranje novih datoteka. Ako datoteka vec postoji, bit ce prepisana. :::note -`write` alat kontrolise `edit` dozvola, koja pokriva sve izmjene datoteka (`edit`, `write`, `patch`, `multiedit`). +`write` alat kontrolise `edit` dozvola, koja pokriva sve izmjene datoteka (`edit`, `write`, `patch`). ::: --- @@ -190,7 +190,7 @@ Primjenjuje zakrpe na datoteke. Ovaj alat primjenjuje patch datoteke na kod. Koristan je za diffs i patch-eve iz razlicitih izvora. :::note -`patch` alat kontrolise `edit` dozvola, koja pokriva sve izmjene datoteka (`edit`, `write`, `patch`, `multiedit`). +`patch` alat kontrolise `edit` dozvola, koja pokriva sve izmjene datoteka (`edit`, `write`, `patch`). ::: --- diff --git a/packages/web/src/content/docs/da/permissions.mdx b/packages/web/src/content/docs/da/permissions.mdx index 176dd568e1..30de7aa863 100644 --- a/packages/web/src/content/docs/da/permissions.mdx +++ b/packages/web/src/content/docs/da/permissions.mdx @@ -130,7 +130,7 @@ Hold listen fokuseret på betroede stier, og lag ekstra tillad eller afvis regle OpenCode tilladelser indtastes efter værktøjsnavn plus et par sikkerhedsafskærmninger: - `read` — læser en fil (matcher filstien) -- `edit` — alle filændringer (dækker `edit`, `write`, `patch`, `multiedit`) +- `edit` — alle filændringer (dækker `edit`, `write`, `patch`) - `glob` — fil-globing (matcher glob-mønsteret) - `grep` — indholdssøgning (matcher regex-mønsteret) - `bash` — kører shell-kommandoer (matcher parsede kommandoer som `git status --porcelain`) diff --git a/packages/web/src/content/docs/da/tools.mdx b/packages/web/src/content/docs/da/tools.mdx index 6f6f95c9c5..31c980082e 100644 --- a/packages/web/src/content/docs/da/tools.mdx +++ b/packages/web/src/content/docs/da/tools.mdx @@ -95,7 +95,7 @@ Opret nye filer eller overskriv eksisterende. Brug denne for at la LLM lage nye filer. Den vil overskrive eksisterende filer hvis de allerede eksisterer. :::note -`write`-verktøyet kontrolleres av tillatelsen `edit`, som dekker alle filendringer (`edit`, `write`, `patch`, `multiedit`). +`write`-verktøyet kontrolleres av tillatelsen `edit`, som dekker alle filendringer (`edit`, `write`, `patch`). ::: --- @@ -190,7 +190,7 @@ Brug patcher på filer. Dette verktøyet bruger opdateringsfiler til kodebasen din. Nyttig for at påføre diff og lapper fra forskjellige kilder. :::note -`patch`-verktøyet kontrolleres av tillatelsen `edit`, som dekker alle filendringer (`edit`, `write`, `patch`, `multiedit`). +`patch`-verktøyet kontrolleres av tillatelsen `edit`, som dekker alle filendringer (`edit`, `write`, `patch`). ::: --- diff --git a/packages/web/src/content/docs/de/permissions.mdx b/packages/web/src/content/docs/de/permissions.mdx index 6b647ca366..6769ae74d3 100644 --- a/packages/web/src/content/docs/de/permissions.mdx +++ b/packages/web/src/content/docs/de/permissions.mdx @@ -130,7 +130,7 @@ Konzentrieren Sie sich in der Liste auf vertrauenswürdige Pfade und fügen Sie OpenCode-Berechtigungen basieren auf Tool-Namen sowie einigen Sicherheitsvorkehrungen: - `read` – eine Datei lesen (entspricht dem Dateipfad) -- `edit` – alle Dateiänderungen (umfasst `edit`, `write`, `patch`, `multiedit`) +- `edit` – alle Dateiänderungen (umfasst `edit`, `write`, `patch`) - `glob` – Datei-Globbing (entspricht dem Glob-Muster) - `grep` – Inhaltssuche (entspricht dem Regex-Muster) - `bash` – Ausführen von Shell-Befehlen (entspricht analysierten Befehlen wie `git status --porcelain`) diff --git a/packages/web/src/content/docs/de/tools.mdx b/packages/web/src/content/docs/de/tools.mdx index 6012148c6a..8f5e7c0715 100644 --- a/packages/web/src/content/docs/de/tools.mdx +++ b/packages/web/src/content/docs/de/tools.mdx @@ -102,7 +102,7 @@ Bestehende Dateien werden dabei ueberschrieben. :::note Das Tool `write` wird ueber die Berechtigung `edit` gesteuert. -`edit` gilt fuer alle Datei-Aenderungen (`edit`, `write`, `patch`, `multiedit`). +`edit` gilt fuer alle Datei-Aenderungen (`edit`, `write`, `patch`). ::: --- @@ -197,7 +197,7 @@ Wendet Patches auf Dateien an. Dieses Tool wendet Patch-Dateien auf deine Codebasis an. Nuetzlich fuer Diffs und Patches aus verschiedenen Quellen. :::note -Das Tool `patch` wird ueber die Berechtigung `edit` gesteuert, welche alle Datei-Aenderungen abdeckt (`edit`, `write`, `patch`, `multiedit`). +Das Tool `patch` wird ueber die Berechtigung `edit` gesteuert, welche alle Datei-Aenderungen abdeckt (`edit`, `write`, `patch`). ::: --- diff --git a/packages/web/src/content/docs/es/permissions.mdx b/packages/web/src/content/docs/es/permissions.mdx index 6923368e40..131ab323b4 100644 --- a/packages/web/src/content/docs/es/permissions.mdx +++ b/packages/web/src/content/docs/es/permissions.mdx @@ -130,7 +130,7 @@ Mantenga la lista centrada en rutas confiables y aplique reglas adicionales de p Los permisos OpenCode están codificados por el nombre de la herramienta, además de un par de medidas de seguridad: - `read` — leer un archivo (coincide con la ruta del archivo) -- `edit` — todas las modificaciones de archivos (cubre `edit`, `write`, `patch`, `multiedit`) +- `edit` — todas las modificaciones de archivos (cubre `edit`, `write`, `patch`) - `glob` — globalización de archivos (coincide con el patrón global) - `grep` — búsqueda de contenido (coincide con el patrón de expresiones regulares) - `bash`: ejecuta comandos de shell (coincide con comandos analizados como `git status --porcelain`) diff --git a/packages/web/src/content/docs/es/tools.mdx b/packages/web/src/content/docs/es/tools.mdx index 83d61f5325..5a0cca969b 100644 --- a/packages/web/src/content/docs/es/tools.mdx +++ b/packages/web/src/content/docs/es/tools.mdx @@ -95,7 +95,7 @@ Cree nuevos archivos o sobrescriba los existentes. Utilice esto para permitir que LLM cree nuevos archivos. Sobrescribirá los archivos existentes si ya existen. :::note -La herramienta `write` está controlada por el permiso `edit`, que cubre todas las modificaciones de archivos (`edit`, `write`, `patch`, `multiedit`). +La herramienta `write` está controlada por el permiso `edit`, que cubre todas las modificaciones de archivos (`edit`, `write`, `patch`). ::: --- @@ -190,7 +190,7 @@ Aplicar parches a los archivos. Esta herramienta aplica archivos de parche a su código base. Útil para aplicar diferencias y parches de diversas fuentes. :::note -La herramienta `patch` está controlada por el permiso `edit`, que cubre todas las modificaciones de archivos (`edit`, `write`, `patch`, `multiedit`). +La herramienta `patch` está controlada por el permiso `edit`, que cubre todas las modificaciones de archivos (`edit`, `write`, `patch`). ::: --- diff --git a/packages/web/src/content/docs/fr/permissions.mdx b/packages/web/src/content/docs/fr/permissions.mdx index b1c1d6800f..b3b9d7e2d1 100644 --- a/packages/web/src/content/docs/fr/permissions.mdx +++ b/packages/web/src/content/docs/fr/permissions.mdx @@ -130,7 +130,7 @@ Gardez la liste centrée sur les chemins approuvés et superposez des règles d' Les autorisations OpenCode sont classées par nom d'outil, plus quelques garde-fous de sécurité : - `read` — lecture d'un fichier (correspond au chemin du fichier) -- `edit` — toutes les modifications de fichiers (couvre `edit`, `write`, `patch`, `multiedit`) +- `edit` — toutes les modifications de fichiers (couvre `edit`, `write`, `patch`) - `glob` — globalisation de fichiers (correspond au modèle global) - `grep` — recherche de contenu (correspond au modèle regex) - `bash` - exécution de commandes shell (correspond aux commandes analysées comme `git status --porcelain`) diff --git a/packages/web/src/content/docs/fr/tools.mdx b/packages/web/src/content/docs/fr/tools.mdx index 4f3f180469..72edf98191 100644 --- a/packages/web/src/content/docs/fr/tools.mdx +++ b/packages/web/src/content/docs/fr/tools.mdx @@ -95,7 +95,7 @@ Créez de nouveaux fichiers ou écrasez ceux existants. Utilisez-le pour permettre au LLM de créer de nouveaux fichiers. Il écrasera les fichiers existants s'ils existent déjà. :::note -L'outil `write` est contrôlé par l'autorisation `edit`, qui couvre toutes les modifications de fichiers (`edit`, `write`, `patch`, `multiedit`). +L'outil `write` est contrôlé par l'autorisation `edit`, qui couvre toutes les modifications de fichiers (`edit`, `write`, `patch`). ::: --- @@ -190,7 +190,7 @@ Appliquez des correctifs aux fichiers. Cet outil applique les fichiers de correctifs à votre base de code. Utile pour appliquer des différences et des correctifs provenant de diverses sources. :::note -L'outil `patch` est contrôlé par l'autorisation `edit`, qui couvre toutes les modifications de fichiers (`edit`, `write`, `patch`, `multiedit`). +L'outil `patch` est contrôlé par l'autorisation `edit`, qui couvre toutes les modifications de fichiers (`edit`, `write`, `patch`). ::: --- diff --git a/packages/web/src/content/docs/it/permissions.mdx b/packages/web/src/content/docs/it/permissions.mdx index 49f0e8e4d3..417bf4a239 100644 --- a/packages/web/src/content/docs/it/permissions.mdx +++ b/packages/web/src/content/docs/it/permissions.mdx @@ -130,7 +130,7 @@ Mantieni l'elenco limitato a percorsi fidati e aggiungi regole extra di allow/de I permessi di OpenCode sono indicizzati per nome dello strumento, piu' un paio di guardrail di sicurezza: - `read` — lettura di un file (corrisponde al percorso del file) -- `edit` — tutte le modifiche ai file (include `edit`, `write`, `patch`, `multiedit`) +- `edit` — tutte le modifiche ai file (include `edit`, `write`, `patch`) - `glob` — ricerca file tramite glob (corrisponde al pattern glob) - `grep` — ricerca nel contenuto (corrisponde al pattern regex) - `bash` — esecuzione comandi di shell (corrisponde a comandi parsati come `git status --porcelain`) diff --git a/packages/web/src/content/docs/it/tools.mdx b/packages/web/src/content/docs/it/tools.mdx index c1e69f8beb..e3f2f23ce9 100644 --- a/packages/web/src/content/docs/it/tools.mdx +++ b/packages/web/src/content/docs/it/tools.mdx @@ -95,7 +95,7 @@ Crea nuovi file o sovrascrive quelli esistenti. Usalo per consentire all'LLM di creare nuovi file. Sovrascrivera' i file esistenti se sono gia' presenti. :::note -Lo strumento `write` e' controllato dal permesso `edit`, che copre tutte le modifiche ai file (`edit`, `write`, `patch`, `multiedit`). +Lo strumento `write` e' controllato dal permesso `edit`, che copre tutte le modifiche ai file (`edit`, `write`, `patch`). ::: --- @@ -190,7 +190,7 @@ Applica patch ai file. Questo strumento applica file patch al tuo codebase. Utile per applicare diff e patch da varie fonti. :::note -Lo strumento `patch` e' controllato dal permesso `edit`, che copre tutte le modifiche ai file (`edit`, `write`, `patch`, `multiedit`). +Lo strumento `patch` e' controllato dal permesso `edit`, che copre tutte le modifiche ai file (`edit`, `write`, `patch`). ::: --- diff --git a/packages/web/src/content/docs/ja/permissions.mdx b/packages/web/src/content/docs/ja/permissions.mdx index f2b0978259..55f6f87007 100644 --- a/packages/web/src/content/docs/ja/permissions.mdx +++ b/packages/web/src/content/docs/ja/permissions.mdx @@ -130,7 +130,7 @@ OpenCode は `permission` 設定を使用して、特定のアクションを自 OpenCode の権限は、ツール名に加えて、いくつかの安全対策によってキー化されます。 - `read` — ファイルの読み取り (ファイルパスと一致) -- `edit` — すべてのファイル変更 (`edit`、`write`、`patch`、`multiedit` をカバー) +- `edit` — すべてのファイル変更 (`edit`、`write`、`patch` をカバー) - `glob` — ファイルのグロビング (グロブパターンと一致) - `grep` — コンテンツ検索 (正規表現パターンと一致) - `bash` — シェルコマンドの実行 (`git status --porcelain` などの解析されたコマンドと一致します) diff --git a/packages/web/src/content/docs/ja/tools.mdx b/packages/web/src/content/docs/ja/tools.mdx index 3945063936..1473a0b360 100644 --- a/packages/web/src/content/docs/ja/tools.mdx +++ b/packages/web/src/content/docs/ja/tools.mdx @@ -95,7 +95,7 @@ OpenCode で利用可能なすべての組み込みツールを次に示しま これを使用して、LLM が新しいファイルを作成できるようにします。既存のファイルがすでに存在する場合は上書きされます。 :::note -`write` ツールは、すべてのファイル変更 (`edit`、`edit`、`write`、`patch`) をカバーする `multiedit` 権限によって制御されます。 +`write` ツールは、すべてのファイル変更 (`edit`、`write`、`patch`) をカバーする `edit` 権限によって制御されます。 ::: --- @@ -190,7 +190,7 @@ OpenCode で利用可能なすべての組み込みツールを次に示しま このツールは、コードベースにパッチファイルを適用します。さまざまなソースからの差分やパッチを適用するのに役立ちます。 :::note -`patch` ツールは、すべてのファイル変更 (`edit`、`edit`、`write`、`patch`) をカバーする `multiedit` 権限によって制御されます。 +`patch` ツールは、すべてのファイル変更 (`edit`、`write`、`patch`) をカバーする `edit` 権限によって制御されます。 ::: --- diff --git a/packages/web/src/content/docs/ko/permissions.mdx b/packages/web/src/content/docs/ko/permissions.mdx index 0742089d6b..e19c3c49c3 100644 --- a/packages/web/src/content/docs/ko/permissions.mdx +++ b/packages/web/src/content/docs/ko/permissions.mdx @@ -130,7 +130,7 @@ Permission 본 사용 간단한 wildcard 일치: opencode 권한은 도구 이름에 의해 키 입력되며, 두 개의 안전 가드 : - `read` - 파일 읽기 (파일 경로의 매칭) -- `edit` - 모든 파일 수정 (covers `edit`, `write`, `patch`, `multiedit`) +- `edit` - 모든 파일 수정 (covers `edit`, `write`, `patch`) - `glob` - 파일 globbing (glob 패턴 매칭) - `grep` - 콘텐츠 검색 ( regex 패턴 매칭) - `bash` - shell 명령 실행 (`git status --porcelain`와 같은 팟 명령) diff --git a/packages/web/src/content/docs/ko/tools.mdx b/packages/web/src/content/docs/ko/tools.mdx index 49bea93cb2..8fe5ab1d0a 100644 --- a/packages/web/src/content/docs/ko/tools.mdx +++ b/packages/web/src/content/docs/ko/tools.mdx @@ -95,7 +95,7 @@ description: LLM이 사용할 수 있는 도구를 관리합니다. LLM을 사용하여 새 파일을 만듭니다. 이미 존재하는 경우 기존 파일을 덮어쓰겠습니다. :::note -`write` 공구는 모든 파일 수정 (`edit`, `write`, `patch`, `multiedit`)를 포함하는 `edit` 허가에 의해 통제됩니다. +`write` 공구는 모든 파일 수정 (`edit`, `write`, `patch`)를 포함하는 `edit` 허가에 의해 통제됩니다. ::: --- @@ -190,7 +190,7 @@ LSP 서버가 프로젝트에 사용할 수 있는 구성하려면 [LSP Servers] 이 도구는 코드베이스에 패치 파일을 적용합니다. 다양한 소스에서 diffs 및 Patch를 적용하는 데 유용합니다. :::note -`patch` 공구는 모든 파일 수정 (`edit`, `write`, `patch`, `multiedit`)를 포함하는 `edit` 허가에 의해 통제됩니다. +`patch` 공구는 모든 파일 수정 (`edit`, `write`, `patch`)를 포함하는 `edit` 허가에 의해 통제됩니다. ::: --- diff --git a/packages/web/src/content/docs/nb/permissions.mdx b/packages/web/src/content/docs/nb/permissions.mdx index 5c63b251e3..5f54d23aa0 100644 --- a/packages/web/src/content/docs/nb/permissions.mdx +++ b/packages/web/src/content/docs/nb/permissions.mdx @@ -130,7 +130,7 @@ Hold listen fokusert på klarerte baner, og lag ekstra tillat eller avslå regle OpenCode-tillatelser tastes inn etter verktøynavn, pluss et par sikkerhetsvakter: - `read` — lesing av en fil (tilsvarer filbanen) -- `edit` — alle filendringer (dekker `edit`, `write`, `patch`, `multiedit`) +- `edit` — alle filendringer (dekker `edit`, `write`, `patch`) - `glob` — fil-globing (tilsvarer glob-mønsteret) - `grep` — innholdssøk (samsvarer med regex-mønsteret) - `bash` — kjører skallkommandoer (matcher analyserte kommandoer som `git status --porcelain`) diff --git a/packages/web/src/content/docs/nb/tools.mdx b/packages/web/src/content/docs/nb/tools.mdx index 8c871f11c9..2e6f61ba07 100644 --- a/packages/web/src/content/docs/nb/tools.mdx +++ b/packages/web/src/content/docs/nb/tools.mdx @@ -95,7 +95,7 @@ Opprett nye filer eller overskriv eksisterende. Bruk denne for å la LLM lage nye filer. Den vil overskrive eksisterende filer hvis de allerede eksisterer. :::note -`write`-verktøyet kontrolleres av tillatelsen `edit`, som dekker alle filendringer (`edit`, `write`, `patch`, `multiedit`). +`write`-verktøyet kontrolleres av tillatelsen `edit`, som dekker alle filendringer (`edit`, `write`, `patch`). ::: --- @@ -190,7 +190,7 @@ Bruk patcher på filer. Dette verktøyet bruker oppdateringsfiler til kodebasen din. Nyttig for å påføre diffs og patcher fra forskjellige kilder. :::note -`patch`-verktøyet kontrolleres av tillatelsen `edit`, som dekker alle filendringer (`edit`, `write`, `patch`, `multiedit`). +`patch`-verktøyet kontrolleres av tillatelsen `edit`, som dekker alle filendringer (`edit`, `write`, `patch`). ::: --- diff --git a/packages/web/src/content/docs/permissions.mdx b/packages/web/src/content/docs/permissions.mdx index 6383b2a3f2..f2efe3a49b 100644 --- a/packages/web/src/content/docs/permissions.mdx +++ b/packages/web/src/content/docs/permissions.mdx @@ -130,7 +130,7 @@ Keep the list focused on trusted paths, and layer extra allow or deny rules as n OpenCode permissions are keyed by tool name, plus a couple of safety guards: - `read` — reading a file (matches the file path) -- `edit` — all file modifications (covers `edit`, `write`, `patch`, `multiedit`) +- `edit` — all file modifications (covers `edit`, `write`, `patch`) - `glob` — file globbing (matches the glob pattern) - `grep` — content search (matches the regex pattern) - `bash` — running shell commands (matches parsed commands like `git status --porcelain`) diff --git a/packages/web/src/content/docs/pl/permissions.mdx b/packages/web/src/content/docs/pl/permissions.mdx index a5c05b6dc6..b17b873472 100644 --- a/packages/web/src/content/docs/pl/permissions.mdx +++ b/packages/web/src/content/docs/pl/permissions.mdx @@ -130,7 +130,7 @@ Skoncentruj listę na zaufanych ścieżkach i dodaj dodatkowe zezwolenie lub odm Uprawnienia opencode są określane na podstawie nazwy narzędzia i kilku zabezpieczeń: - `read` — odczyt pliku (odpowiada ścieżce pliku) -- `edit` — wszystkie modyfikacje plików (obejmuje `edit`, `write`, `patch`, `multiedit`) +- `edit` — wszystkie modyfikacje plików (obejmuje `edit`, `write`, `patch`) - `glob` — maglowanie plików (pasuje do wzorców globowania) - `grep` — wyszukiwanie treści (pasuje do wzorca regularnego) - `bash` — uruchamianie poleceń shell (pasuje do poleceń przeanalizowanych, takich jak `git status --porcelain`) diff --git a/packages/web/src/content/docs/pl/tools.mdx b/packages/web/src/content/docs/pl/tools.mdx index 180e043cd5..6b8a5eeb49 100644 --- a/packages/web/src/content/docs/pl/tools.mdx +++ b/packages/web/src/content/docs/pl/tools.mdx @@ -95,7 +95,7 @@ Twórz nowe pliki lub nadpisuj istniejące. Użyj tego, aby umożliwić LLM tworzenie nowych plików. Zastąpi istniejące pliki, jeśli już istnieją. :::note -Narzędzie `write` jest kontrolowane przez uprawnienie `edit`, które obejmuje wszystkie modyfikacje plików (`edit`, `write`, `patch`, `multiedit`). +Narzędzie `write` jest kontrolowane przez uprawnienie `edit`, które obejmuje wszystkie modyfikacje plików (`edit`, `write`, `patch`). ::: --- @@ -190,7 +190,7 @@ Stosuj łatki (patches) do plików. To narzędzie stosuje pliki różnicowe (diffs) do bazy kodu. Przydatne do stosowania zmian z różnych źródeł. :::note -Narzędzie `patch` jest kontrolowane przez uprawnienie `edit`, które obejmuje wszystkie modyfikacje plików (`edit`, `write`, `patch`, `multiedit`). +Narzędzie `patch` jest kontrolowane przez uprawnienie `edit`, które obejmuje wszystkie modyfikacje plików (`edit`, `write`, `patch`). ::: --- diff --git a/packages/web/src/content/docs/pt-br/permissions.mdx b/packages/web/src/content/docs/pt-br/permissions.mdx index 4facc9f72b..077524b87f 100644 --- a/packages/web/src/content/docs/pt-br/permissions.mdx +++ b/packages/web/src/content/docs/pt-br/permissions.mdx @@ -130,7 +130,7 @@ Mantenha a lista focada em caminhos confiáveis e adicione regras adicionais de As permissões do opencode são indexadas pelo nome da ferramenta, além de alguns guardas de segurança: - `read` — leitura de um arquivo (corresponde ao caminho do arquivo) -- `edit` — todas as modificações de arquivo (cobre `edit`, `write`, `patch`, `multiedit`) +- `edit` — todas as modificações de arquivo (cobre `edit`, `write`, `patch`) - `glob` — globbing de arquivos (corresponde ao padrão glob) - `grep` — busca de conteúdo (corresponde ao padrão regex) - `bash` — execução de comandos de shell (corresponde a comandos analisados como `git status --porcelain`) diff --git a/packages/web/src/content/docs/pt-br/tools.mdx b/packages/web/src/content/docs/pt-br/tools.mdx index 4c7b371971..86fd714d32 100644 --- a/packages/web/src/content/docs/pt-br/tools.mdx +++ b/packages/web/src/content/docs/pt-br/tools.mdx @@ -95,7 +95,7 @@ Crie novos arquivos ou sobrescreva os existentes. Use isso para permitir que o LLM crie novos arquivos. Ele sobrescreverá arquivos existentes se já existirem. :::note -A ferramenta `write` é controlada pela permissão `edit`, que cobre todas as modificações de arquivos (`edit`, `write`, `patch`, `multiedit`). +A ferramenta `write` é controlada pela permissão `edit`, que cobre todas as modificações de arquivos (`edit`, `write`, `patch`). ::: --- @@ -190,7 +190,7 @@ Aplique patches a arquivos. Esta ferramenta aplica arquivos de patch à sua base de código. Útil para aplicar diffs e patches de várias fontes. :::note -A ferramenta `patch` é controlada pela permissão `edit`, que cobre todas as modificações de arquivos (`edit`, `write`, `patch`, `multiedit`). +A ferramenta `patch` é controlada pela permissão `edit`, que cobre todas as modificações de arquivos (`edit`, `write`, `patch`). ::: --- diff --git a/packages/web/src/content/docs/ru/permissions.mdx b/packages/web/src/content/docs/ru/permissions.mdx index 961a068243..7e027029e6 100644 --- a/packages/web/src/content/docs/ru/permissions.mdx +++ b/packages/web/src/content/docs/ru/permissions.mdx @@ -130,7 +130,7 @@ opencode использует конфигурацию `permission`, чтобы Разрешения opencode привязаны к имени инструмента, а также к нескольким мерам безопасности: - `read` — чтение файла (соответствует пути к файлу) -- `edit` — все модификации файлов (охватывает `edit`, `write`, `patch`, `multiedit`) +- `edit` — все модификации файлов (охватывает `edit`, `write`, `patch`) - `glob` — подстановка файла (соответствует шаблону подстановки) - `grep` — поиск по контенту (соответствует шаблону регулярного выражения) - `bash` — запуск shell-команд (соответствует проанализированным командам, например `git status --porcelain`) diff --git a/packages/web/src/content/docs/ru/tools.mdx b/packages/web/src/content/docs/ru/tools.mdx index 35958e036c..b34d30465b 100644 --- a/packages/web/src/content/docs/ru/tools.mdx +++ b/packages/web/src/content/docs/ru/tools.mdx @@ -95,7 +95,7 @@ description: Управляйте инструментами, которые м Используйте это, чтобы позволить LLM создавать новые файлы. Он перезапишет существующие файлы, если они уже существуют. :::note -Инструмент `write` контролируется разрешением `edit`, которое распространяется на все модификации файлов (`edit`, `write`, `patch`, `multiedit`). +Инструмент `write` контролируется разрешением `edit`, которое распространяется на все модификации файлов (`edit`, `write`, `patch`). ::: --- @@ -190,7 +190,7 @@ description: Управляйте инструментами, которые м Этот инструмент применяет файлы исправлений к вашей кодовой базе. Полезно для применения различий и патчей из различных источников. :::note -Инструмент `patch` контролируется разрешением `edit`, которое распространяется на все модификации файлов (`edit`, `write`, `patch`, `multiedit`). +Инструмент `patch` контролируется разрешением `edit`, которое распространяется на все модификации файлов (`edit`, `write`, `patch`). ::: --- diff --git a/packages/web/src/content/docs/th/permissions.mdx b/packages/web/src/content/docs/th/permissions.mdx index 5fed616159..4526b7495d 100644 --- a/packages/web/src/content/docs/th/permissions.mdx +++ b/packages/web/src/content/docs/th/permissions.mdx @@ -130,7 +130,7 @@ OpenCode ใช้การกำหนดค่า `permission` เพื่อ สิทธิ์ของ OpenCode จะกำหนดไว้ตามชื่อเครื่องมือ พร้อมด้วย guardrails อีก 2-3 คน: - `read` — อ่านไฟล์ (ตรงกับเส้นทางของไฟล์) -- `edit` — การแก้ไขไฟล์ทั้งหมด (ครอบคลุมถึง `edit`, `write`, `patch`, `multiedit`) +- `edit` — การแก้ไขไฟล์ทั้งหมด (ครอบคลุมถึง `edit`, `write`, `patch`) - `glob` — ไฟล์ globbing (ตรงกับรูปแบบ glob) - `grep` — การค้นหาเนื้อหา (ตรงกับรูปแบบ regex) - `bash` — การรันคำสั่ง shell (ตรงกับคำสั่งที่แยกวิเคราะห์เช่น `git status --porcelain`) diff --git a/packages/web/src/content/docs/th/tools.mdx b/packages/web/src/content/docs/th/tools.mdx index 0ead638461..5006b41de6 100644 --- a/packages/web/src/content/docs/th/tools.mdx +++ b/packages/web/src/content/docs/th/tools.mdx @@ -95,7 +95,7 @@ description: จัดการเครื่องมือที่ LLM ส ใช้สิ่งนี้เพื่ออนุญาตให้ LLM สร้างไฟล์ใหม่ มันจะเขียนทับไฟล์ที่มีอยู่หากมีอยู่แล้ว :::note -เครื่องมือ `write` ถูกควบคุมโดยสิทธิ์ `edit` ซึ่งครอบคลุมการแก้ไขไฟล์ทั้งหมด (`edit`, `write`, `patch`, `multiedit`) +เครื่องมือ `write` ถูกควบคุมโดยสิทธิ์ `edit` ซึ่งครอบคลุมการแก้ไขไฟล์ทั้งหมด (`edit`, `write`, `patch`) ::: --- @@ -190,7 +190,7 @@ description: จัดการเครื่องมือที่ LLM ส เครื่องมือนี้ใช้ไฟล์แพทช์กับโค้ดเบสของคุณ มีประโยชน์สำหรับการใช้ความแตกต่างและแพตช์จากแหล่งต่างๆ :::note -เครื่องมือ `patch` ถูกควบคุมโดยสิทธิ์ `edit` ซึ่งครอบคลุมการแก้ไขไฟล์ทั้งหมด (`edit`, `write`, `patch`, `multiedit`) +เครื่องมือ `patch` ถูกควบคุมโดยสิทธิ์ `edit` ซึ่งครอบคลุมการแก้ไขไฟล์ทั้งหมด (`edit`, `write`, `patch`) ::: --- diff --git a/packages/web/src/content/docs/tools.mdx b/packages/web/src/content/docs/tools.mdx index f05e980b8c..e8d5e0963a 100644 --- a/packages/web/src/content/docs/tools.mdx +++ b/packages/web/src/content/docs/tools.mdx @@ -95,7 +95,7 @@ Create new files or overwrite existing ones. Use this to allow the LLM to create new files. It will overwrite existing files if they already exist. :::note -The `write` tool is controlled by the `edit` permission, which covers all file modifications (`edit`, `write`, `apply_patch`, `multiedit`). +The `write` tool is controlled by the `edit` permission, which covers all file modifications (`edit`, `write`, `apply_patch`). ::: --- @@ -194,7 +194,7 @@ When handling `tool.execute.before` or `tool.execute.after` hooks, check `input. `apply_patch` uses `output.args.patchText` instead of `output.args.filePath`. Paths are embedded in marker lines within `patchText` and are relative to the project root (for example: `*** Add File: src/new-file.ts`, `*** Update File: src/existing.ts`, `*** Move to: src/renamed.ts`, `*** Delete File: src/obsolete.ts`). :::note -The `apply_patch` tool is controlled by the `edit` permission, which covers all file modifications (`edit`, `write`, `apply_patch`, `multiedit`). +The `apply_patch` tool is controlled by the `edit` permission, which covers all file modifications (`edit`, `write`, `apply_patch`). ::: --- diff --git a/packages/web/src/content/docs/tr/permissions.mdx b/packages/web/src/content/docs/tr/permissions.mdx index 976ee0a7ff..89c0ebb2c5 100644 --- a/packages/web/src/content/docs/tr/permissions.mdx +++ b/packages/web/src/content/docs/tr/permissions.mdx @@ -130,7 +130,7 @@ Listeyi güvenilir yollara odaklı tutun ve diğer araçlar için gereken ekstra opencode izinleri araç adına ve birkaç güvenlik önlemine göre anahtarlanır: - `read` — bir dosyayı okumak (dosya yoluyla eşleşir) -- `edit` — tüm dosya değişiklikleri (`edit`, `write`, `patch`, `multiedit`'yi kapsar) +- `edit` — tüm dosya değişiklikleri (`edit`, `write`, `patch`'i kapsar) - `glob` — dosya genellemesi (glob düzeniyle eşleşir) - `grep` — içerik arama (regex modeliyle eşleşir) - `bash` — kabuk komutlarını çalıştırma (`git status --porcelain` gibi ayrıştırılmış komutlarla eşleşir) diff --git a/packages/web/src/content/docs/tr/tools.mdx b/packages/web/src/content/docs/tr/tools.mdx index 2beb190094..29c1e0054c 100644 --- a/packages/web/src/content/docs/tr/tools.mdx +++ b/packages/web/src/content/docs/tr/tools.mdx @@ -95,7 +95,7 @@ Yeni dosyalar oluşturur veya mevcut dosyaları üzerine yazar. LLM'in yeni dosya oluşturmasına izin vermek için bunu kullanın. Dosya zaten varsa üzerine yazar. :::note -`write` aracı `edit` izniyle kontrol edilir; bu izin tüm dosya değişikliklerini kapsar (`edit`, `write`, `patch`, `multiedit`). +`write` aracı `edit` izniyle kontrol edilir; bu izin tüm dosya değişikliklerini kapsar (`edit`, `write`, `patch`). ::: --- @@ -190,7 +190,7 @@ Dosyalara patch uygular. Bu araç patch dosyalarını kod tabanınıza uygular. Farklı kaynaklardan gelen diff ve patch'leri uygulamak için kullanışlıdır. :::note -`patch` aracı `edit` izniyle kontrol edilir; bu izin tüm dosya değişikliklerini kapsar (`edit`, `write`, `patch`, `multiedit`). +`patch` aracı `edit` izniyle kontrol edilir; bu izin tüm dosya değişikliklerini kapsar (`edit`, `write`, `patch`). ::: --- diff --git a/packages/web/src/content/docs/zh-cn/permissions.mdx b/packages/web/src/content/docs/zh-cn/permissions.mdx index f928554f2a..a497742c09 100644 --- a/packages/web/src/content/docs/zh-cn/permissions.mdx +++ b/packages/web/src/content/docs/zh-cn/permissions.mdx @@ -130,7 +130,7 @@ OpenCode 使用 `permission` 配置来决定某个操作是否应自动运行、 OpenCode 的权限以工具名称为键,外加几个安全防护项: - `read` — 读取文件(匹配文件路径) -- `edit` — 所有文件修改(涵盖 `edit`、`write`、`patch`、`multiedit`) +- `edit` — 所有文件修改(涵盖 `edit`、`write`、`patch`) - `glob` — 文件通配(匹配通配模式) - `grep` — 内容搜索(匹配正则表达式模式) - `bash` — 运行 shell 命令(匹配解析后的命令,如 `git status --porcelain`) diff --git a/packages/web/src/content/docs/zh-cn/tools.mdx b/packages/web/src/content/docs/zh-cn/tools.mdx index 4c60370590..1a58eece5d 100644 --- a/packages/web/src/content/docs/zh-cn/tools.mdx +++ b/packages/web/src/content/docs/zh-cn/tools.mdx @@ -95,7 +95,7 @@ description: 管理 LLM 可以使用的工具。 使用此工具允许 LLM 创建新文件。如果文件已存在,则会覆盖现有文件。 :::note -`write` 工具由 `edit` 权限控制,该权限涵盖所有文件修改操作(`edit`、`write`、`patch`、`multiedit`)。 +`write` 工具由 `edit` 权限控制,该权限涵盖所有文件修改操作(`edit`、`write`、`patch`)。 ::: --- @@ -190,7 +190,7 @@ description: 管理 LLM 可以使用的工具。 该工具将补丁文件应用到您的代码库中。适用于应用来自各种来源的 diff 和补丁。 :::note -`patch` 工具由 `edit` 权限控制,该权限涵盖所有文件修改操作(`edit`、`write`、`patch`、`multiedit`)。 +`patch` 工具由 `edit` 权限控制,该权限涵盖所有文件修改操作(`edit`、`write`、`patch`)。 ::: --- diff --git a/packages/web/src/content/docs/zh-tw/permissions.mdx b/packages/web/src/content/docs/zh-tw/permissions.mdx index bacd87c1ed..5d98bddfb2 100644 --- a/packages/web/src/content/docs/zh-tw/permissions.mdx +++ b/packages/web/src/content/docs/zh-tw/permissions.mdx @@ -130,7 +130,7 @@ OpenCode 使用 `permission` 設定來決定某個操作是否應自動執行、 OpenCode 的權限以工具名稱為鍵,外加幾個安全防護項: - `read` — 讀取檔案(比對檔案路徑) -- `edit` — 所有檔案修改(涵蓋 `edit`、`write`、`patch`、`multiedit`) +- `edit` — 所有檔案修改(涵蓋 `edit`、`write`、`patch`) - `glob` — 檔案萬用字元比對(比對萬用字元模式) - `grep` — 內容搜尋(比對正規表示式模式) - `bash` — 執行 shell 指令(比對解析後的指令,如 `git status --porcelain`) diff --git a/packages/web/src/content/docs/zh-tw/tools.mdx b/packages/web/src/content/docs/zh-tw/tools.mdx index 6ce68d9fb5..763d0ce9a7 100644 --- a/packages/web/src/content/docs/zh-tw/tools.mdx +++ b/packages/web/src/content/docs/zh-tw/tools.mdx @@ -95,7 +95,7 @@ description: 管理 LLM 可以使用的工具。 使用此工具允許 LLM 建立新檔案。如果檔案已存在,則會覆蓋現有檔案。 :::note -`write` 工具由 `edit` 權限控制,該權限涵蓋所有檔案修改操作(`edit`、`write`、`patch`、`multiedit`)。 +`write` 工具由 `edit` 權限控制,該權限涵蓋所有檔案修改操作(`edit`、`write`、`patch`)。 ::: --- @@ -190,7 +190,7 @@ description: 管理 LLM 可以使用的工具。 該工具將補丁檔案套用到您的程式碼庫中。適用於套用來自各種來源的 diff 和補丁。 :::note -`patch` 工具由 `edit` 權限控制,該權限涵蓋所有檔案修改操作(`edit`、`write`、`patch`、`multiedit`)。 +`patch` 工具由 `edit` 權限控制,該權限涵蓋所有檔案修改操作(`edit`、`write`、`patch`)。 ::: --- From 95794292762caff2700128388e2672e4a5f5ab07 Mon Sep 17 00:00:00 2001 From: Kit Langton Date: Tue, 21 Apr 2026 11:54:40 -0400 Subject: [PATCH 22/73] test(opencode): consolidate session prompt tests into Effect style (#23710) --- .../test/session/prompt-effect.test.ts | 1522 ----------- packages/opencode/test/session/prompt.test.ts | 2324 +++++++++++++---- 2 files changed, 1810 insertions(+), 2036 deletions(-) delete mode 100644 packages/opencode/test/session/prompt-effect.test.ts diff --git a/packages/opencode/test/session/prompt-effect.test.ts b/packages/opencode/test/session/prompt-effect.test.ts deleted file mode 100644 index 2f59046840..0000000000 --- a/packages/opencode/test/session/prompt-effect.test.ts +++ /dev/null @@ -1,1522 +0,0 @@ -import { NodeFileSystem } from "@effect/platform-node" -import { FetchHttpClient } from "effect/unstable/http" -import { expect } from "bun:test" -import { Cause, Effect, Exit, Fiber, Layer } from "effect" -import path from "path" -import { Agent as AgentSvc } from "../../src/agent/agent" -import { Bus } from "../../src/bus" -import { Command } from "../../src/command" -import { Config } from "../../src/config" -import { LSP } from "../../src/lsp" -import { MCP } from "../../src/mcp" -import { Permission } from "../../src/permission" -import { Plugin } from "../../src/plugin" -import { Provider as ProviderSvc } from "../../src/provider" -import { Env } from "../../src/env" -import { ModelID, ProviderID } from "../../src/provider/schema" -import { Question } from "../../src/question" -import { Todo } from "../../src/session/todo" -import { Session } from "../../src/session" -import { LLM } from "../../src/session/llm" -import { MessageV2 } from "../../src/session/message-v2" -import { AppFileSystem } from "@opencode-ai/shared/filesystem" -import { SessionCompaction } from "../../src/session/compaction" -import { SessionSummary } from "../../src/session/summary" -import { Instruction } from "../../src/session/instruction" -import { SessionProcessor } from "../../src/session/processor" -import { SessionPrompt } from "../../src/session/prompt" -import { SessionRevert } from "../../src/session/revert" -import { SessionRunState } from "../../src/session/run-state" -import { MessageID, PartID, SessionID } from "../../src/session/schema" -import { SessionStatus } from "../../src/session/status" -import { Skill } from "../../src/skill" -import { SystemPrompt } from "../../src/session/system" -import { Shell } from "../../src/shell/shell" -import { Snapshot } from "../../src/snapshot" -import { ToolRegistry } from "../../src/tool" -import { Truncate } from "../../src/tool" -import { Log } from "../../src/util" -import * as CrossSpawnSpawner from "../../src/effect/cross-spawn-spawner" -import { Ripgrep } from "../../src/file/ripgrep" -import { Format } from "../../src/format" -import { provideTmpdirInstance, provideTmpdirServer } from "../fixture/fixture" -import { testEffect } from "../lib/effect" -import { reply, TestLLMServer } from "../lib/llm-server" - -void Log.init({ print: false }) - -const summary = Layer.succeed( - SessionSummary.Service, - SessionSummary.Service.of({ - summarize: () => Effect.void, - diff: () => Effect.succeed([]), - computeDiff: () => Effect.succeed([]), - }), -) - -const ref = { - providerID: ProviderID.make("test"), - modelID: ModelID.make("test-model"), -} - -function defer() { - let resolve!: (value: T | PromiseLike) => void - const promise = new Promise((done) => { - resolve = done - }) - return { promise, resolve } -} - -function withSh(fx: () => Effect.Effect) { - return Effect.acquireUseRelease( - Effect.sync(() => { - const prev = process.env.SHELL - process.env.SHELL = "/bin/sh" - Shell.preferred.reset() - return prev - }), - () => fx(), - (prev) => - Effect.sync(() => { - if (prev === undefined) delete process.env.SHELL - else process.env.SHELL = prev - Shell.preferred.reset() - }), - ) -} - -function toolPart(parts: MessageV2.Part[]) { - return parts.find((part): part is MessageV2.ToolPart => part.type === "tool") -} - -type CompletedToolPart = MessageV2.ToolPart & { state: MessageV2.ToolStateCompleted } -type ErrorToolPart = MessageV2.ToolPart & { state: MessageV2.ToolStateError } - -function completedTool(parts: MessageV2.Part[]) { - const part = toolPart(parts) - expect(part?.state.status).toBe("completed") - return part?.state.status === "completed" ? (part as CompletedToolPart) : undefined -} - -function errorTool(parts: MessageV2.Part[]) { - const part = toolPart(parts) - expect(part?.state.status).toBe("error") - return part?.state.status === "error" ? (part as ErrorToolPart) : undefined -} - -const mcp = Layer.succeed( - MCP.Service, - MCP.Service.of({ - status: () => Effect.succeed({}), - clients: () => Effect.succeed({}), - tools: () => Effect.succeed({}), - prompts: () => Effect.succeed({}), - resources: () => Effect.succeed({}), - add: () => Effect.succeed({ status: { status: "disabled" as const } }), - connect: () => Effect.void, - disconnect: () => Effect.void, - getPrompt: () => Effect.succeed(undefined), - readResource: () => Effect.succeed(undefined), - startAuth: () => Effect.die("unexpected MCP auth in prompt-effect tests"), - authenticate: () => Effect.die("unexpected MCP auth in prompt-effect tests"), - finishAuth: () => Effect.die("unexpected MCP auth in prompt-effect tests"), - removeAuth: () => Effect.void, - supportsOAuth: () => Effect.succeed(false), - hasStoredTokens: () => Effect.succeed(false), - getAuthStatus: () => Effect.succeed("not_authenticated" as const), - }), -) - -const lsp = Layer.succeed( - LSP.Service, - LSP.Service.of({ - init: () => Effect.void, - status: () => Effect.succeed([]), - hasClients: () => Effect.succeed(false), - touchFile: () => Effect.void, - diagnostics: () => Effect.succeed({}), - hover: () => Effect.succeed(undefined), - definition: () => Effect.succeed([]), - references: () => Effect.succeed([]), - implementation: () => Effect.succeed([]), - documentSymbol: () => Effect.succeed([]), - workspaceSymbol: () => Effect.succeed([]), - prepareCallHierarchy: () => Effect.succeed([]), - incomingCalls: () => Effect.succeed([]), - outgoingCalls: () => Effect.succeed([]), - }), -) - -const status = SessionStatus.layer.pipe(Layer.provideMerge(Bus.layer)) -const run = SessionRunState.layer.pipe(Layer.provide(status)) -const infra = Layer.mergeAll(NodeFileSystem.layer, CrossSpawnSpawner.defaultLayer) -function makeHttp() { - const deps = Layer.mergeAll( - Session.defaultLayer, - Snapshot.defaultLayer, - LLM.defaultLayer, - Env.defaultLayer, - AgentSvc.defaultLayer, - Command.defaultLayer, - Permission.defaultLayer, - Plugin.defaultLayer, - Config.defaultLayer, - ProviderSvc.defaultLayer, - lsp, - mcp, - AppFileSystem.defaultLayer, - status, - ).pipe(Layer.provideMerge(infra)) - const question = Question.layer.pipe(Layer.provideMerge(deps)) - const todo = Todo.layer.pipe(Layer.provideMerge(deps)) - const registry = ToolRegistry.layer.pipe( - Layer.provide(Skill.defaultLayer), - Layer.provide(FetchHttpClient.layer), - Layer.provide(CrossSpawnSpawner.defaultLayer), - Layer.provide(Ripgrep.defaultLayer), - Layer.provide(Format.defaultLayer), - Layer.provideMerge(todo), - Layer.provideMerge(question), - Layer.provideMerge(deps), - ) - const trunc = Truncate.layer.pipe(Layer.provideMerge(deps)) - const proc = SessionProcessor.layer.pipe(Layer.provide(summary), Layer.provideMerge(deps)) - const compact = SessionCompaction.layer.pipe(Layer.provideMerge(proc), Layer.provideMerge(deps)) - return Layer.mergeAll( - TestLLMServer.layer, - SessionPrompt.layer.pipe( - Layer.provide(SessionRevert.defaultLayer), - Layer.provide(summary), - Layer.provideMerge(run), - Layer.provideMerge(compact), - Layer.provideMerge(proc), - Layer.provideMerge(registry), - Layer.provideMerge(trunc), - Layer.provide(Instruction.defaultLayer), - Layer.provide(SystemPrompt.defaultLayer), - Layer.provideMerge(deps), - ), - ).pipe(Layer.provide(summary)) -} - -const it = testEffect(makeHttp()) -const unix = process.platform !== "win32" ? it.live : it.live.skip - -// Config that registers a custom "test" provider with a "test-model" model -// so provider model lookup succeeds inside the loop. -const cfg = { - provider: { - test: { - name: "Test", - id: "test", - env: [], - npm: "@ai-sdk/openai-compatible", - models: { - "test-model": { - id: "test-model", - name: "Test Model", - attachment: false, - reasoning: false, - temperature: false, - tool_call: true, - release_date: "2025-01-01", - limit: { context: 100000, output: 10000 }, - cost: { input: 0, output: 0 }, - options: {}, - }, - }, - options: { - apiKey: "test-key", - baseURL: "http://localhost:1/v1", - }, - }, - }, -} - -function providerCfg(url: string) { - return { - ...cfg, - provider: { - ...cfg.provider, - test: { - ...cfg.provider.test, - options: { - ...cfg.provider.test.options, - baseURL: url, - }, - }, - }, - } -} - -const user = Effect.fn("test.user")(function* (sessionID: SessionID, text: string) { - const session = yield* Session.Service - const msg = yield* session.updateMessage({ - id: MessageID.ascending(), - role: "user", - sessionID, - agent: "build", - model: ref, - time: { created: Date.now() }, - }) - yield* session.updatePart({ - id: PartID.ascending(), - messageID: msg.id, - sessionID, - type: "text", - text, - }) - return msg -}) - -const seed = Effect.fn("test.seed")(function* (sessionID: SessionID, opts?: { finish?: string }) { - const session = yield* Session.Service - const msg = yield* user(sessionID, "hello") - const assistant: MessageV2.Assistant = { - id: MessageID.ascending(), - role: "assistant", - parentID: msg.id, - sessionID, - mode: "build", - agent: "build", - cost: 0, - path: { cwd: "/tmp", root: "/tmp" }, - tokens: { input: 0, output: 0, reasoning: 0, cache: { read: 0, write: 0 } }, - modelID: ref.modelID, - providerID: ref.providerID, - time: { created: Date.now() }, - ...(opts?.finish ? { finish: opts.finish } : {}), - } - yield* session.updateMessage(assistant) - yield* session.updatePart({ - id: PartID.ascending(), - messageID: assistant.id, - sessionID, - type: "text", - text: "hi there", - }) - return { user: msg, assistant } -}) - -const addSubtask = (sessionID: SessionID, messageID: MessageID, model = ref) => - Effect.gen(function* () { - const session = yield* Session.Service - yield* session.updatePart({ - id: PartID.ascending(), - messageID, - sessionID, - type: "subtask", - prompt: "look into the cache key path", - description: "inspect bug", - agent: "general", - model, - }) - }) - -const boot = Effect.fn("test.boot")(function* (input?: { title?: string }) { - const prompt = yield* SessionPrompt.Service - const run = yield* SessionRunState.Service - const sessions = yield* Session.Service - const chat = yield* sessions.create(input ?? { title: "Pinned" }) - return { prompt, run, sessions, chat } -}) - -// Loop semantics - -it.live("loop exits immediately when last assistant has stop finish", () => - provideTmpdirServer( - Effect.fnUntraced(function* ({ llm }) { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const chat = yield* sessions.create({ title: "Pinned" }) - yield* seed(chat.id, { finish: "stop" }) - - const result = yield* prompt.loop({ sessionID: chat.id }) - expect(result.info.role).toBe("assistant") - if (result.info.role === "assistant") expect(result.info.finish).toBe("stop") - expect(yield* llm.calls).toBe(0) - }), - { git: true, config: providerCfg }, - ), -) - -it.live("loop calls LLM and returns assistant message", () => - provideTmpdirServer( - Effect.fnUntraced(function* ({ llm }) { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const chat = yield* sessions.create({ - title: "Pinned", - permission: [{ permission: "*", pattern: "*", action: "allow" }], - }) - yield* prompt.prompt({ - sessionID: chat.id, - agent: "build", - noReply: true, - parts: [{ type: "text", text: "hello" }], - }) - yield* llm.text("world") - - const result = yield* prompt.loop({ sessionID: chat.id }) - expect(result.info.role).toBe("assistant") - const parts = result.parts.filter((p) => p.type === "text") - expect(parts.some((p) => p.type === "text" && p.text === "world")).toBe(true) - expect(yield* llm.hits).toHaveLength(1) - }), - { git: true, config: providerCfg }, - ), -) - -it.live("static loop returns assistant text through local provider", () => - provideTmpdirServer( - Effect.fnUntraced(function* ({ llm }) { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const session = yield* sessions.create({ - title: "Prompt provider", - permission: [{ permission: "*", pattern: "*", action: "allow" }], - }) - - yield* prompt.prompt({ - sessionID: session.id, - agent: "build", - noReply: true, - parts: [{ type: "text", text: "hello" }], - }) - - yield* llm.text("world") - - const result = yield* prompt.loop({ sessionID: session.id }) - expect(result.info.role).toBe("assistant") - expect(result.parts.some((part) => part.type === "text" && part.text === "world")).toBe(true) - expect(yield* llm.hits).toHaveLength(1) - expect(yield* llm.pending).toBe(0) - }), - { git: true, config: providerCfg }, - ), -) - -it.live("static loop consumes queued replies across turns", () => - provideTmpdirServer( - Effect.fnUntraced(function* ({ llm }) { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const session = yield* sessions.create({ - title: "Prompt provider turns", - permission: [{ permission: "*", pattern: "*", action: "allow" }], - }) - - yield* prompt.prompt({ - sessionID: session.id, - agent: "build", - noReply: true, - parts: [{ type: "text", text: "hello one" }], - }) - - yield* llm.text("world one") - - const first = yield* prompt.loop({ sessionID: session.id }) - expect(first.info.role).toBe("assistant") - expect(first.parts.some((part) => part.type === "text" && part.text === "world one")).toBe(true) - - yield* prompt.prompt({ - sessionID: session.id, - agent: "build", - noReply: true, - parts: [{ type: "text", text: "hello two" }], - }) - - yield* llm.text("world two") - - const second = yield* prompt.loop({ sessionID: session.id }) - expect(second.info.role).toBe("assistant") - expect(second.parts.some((part) => part.type === "text" && part.text === "world two")).toBe(true) - - expect(yield* llm.hits).toHaveLength(2) - expect(yield* llm.pending).toBe(0) - }), - { git: true, config: providerCfg }, - ), -) - -it.live("loop continues when finish is tool-calls", () => - provideTmpdirServer( - Effect.fnUntraced(function* ({ llm }) { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const session = yield* sessions.create({ - title: "Pinned", - permission: [{ permission: "*", pattern: "*", action: "allow" }], - }) - yield* prompt.prompt({ - sessionID: session.id, - agent: "build", - noReply: true, - parts: [{ type: "text", text: "hello" }], - }) - yield* llm.tool("first", { value: "first" }) - yield* llm.text("second") - - const result = yield* prompt.loop({ sessionID: session.id }) - expect(yield* llm.calls).toBe(2) - expect(result.info.role).toBe("assistant") - if (result.info.role === "assistant") { - expect(result.parts.some((part) => part.type === "text" && part.text === "second")).toBe(true) - expect(result.info.finish).toBe("stop") - } - }), - { git: true, config: providerCfg }, - ), -) - -it.live("glob tool keeps instance context during prompt runs", () => - provideTmpdirServer( - ({ dir, llm }) => - Effect.gen(function* () { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const session = yield* sessions.create({ - title: "Glob context", - permission: [{ permission: "*", pattern: "*", action: "allow" }], - }) - const file = path.join(dir, "probe.txt") - yield* Effect.promise(() => Bun.write(file, "probe")) - - yield* prompt.prompt({ - sessionID: session.id, - agent: "build", - noReply: true, - parts: [{ type: "text", text: "find text files" }], - }) - yield* llm.tool("glob", { pattern: "**/*.txt" }) - yield* llm.text("done") - - const result = yield* prompt.loop({ sessionID: session.id }) - expect(result.info.role).toBe("assistant") - - const msgs = yield* MessageV2.filterCompactedEffect(session.id) - const tool = msgs - .flatMap((msg) => msg.parts) - .find( - (part): part is CompletedToolPart => - part.type === "tool" && part.tool === "glob" && part.state.status === "completed", - ) - if (!tool) return - - expect(tool.state.output).toContain(file) - expect(tool.state.output).not.toContain("No context found for instance") - expect(result.parts.some((part) => part.type === "text" && part.text === "done")).toBe(true) - }), - { git: true, config: providerCfg }, - ), -) - -it.live("loop continues when finish is stop but assistant has tool parts", () => - provideTmpdirServer( - Effect.fnUntraced(function* ({ llm }) { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const session = yield* sessions.create({ - title: "Pinned", - permission: [{ permission: "*", pattern: "*", action: "allow" }], - }) - yield* prompt.prompt({ - sessionID: session.id, - agent: "build", - noReply: true, - parts: [{ type: "text", text: "hello" }], - }) - yield* llm.push(reply().tool("first", { value: "first" }).stop()) - yield* llm.text("second") - - const result = yield* prompt.loop({ sessionID: session.id }) - expect(yield* llm.calls).toBe(2) - expect(result.info.role).toBe("assistant") - if (result.info.role === "assistant") { - expect(result.parts.some((part) => part.type === "text" && part.text === "second")).toBe(true) - expect(result.info.finish).toBe("stop") - } - }), - { git: true, config: providerCfg }, - ), -) - -it.live("failed subtask preserves metadata on error tool state", () => - provideTmpdirServer( - Effect.fnUntraced(function* ({ llm }) { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const chat = yield* sessions.create({ title: "Pinned" }) - yield* llm.tool("task", { - description: "inspect bug", - prompt: "look into the cache key path", - subagent_type: "general", - }) - yield* llm.text("done") - const msg = yield* user(chat.id, "hello") - yield* addSubtask(chat.id, msg.id) - - const result = yield* prompt.loop({ sessionID: chat.id }) - expect(result.info.role).toBe("assistant") - expect(yield* llm.calls).toBe(2) - - const msgs = yield* MessageV2.filterCompactedEffect(chat.id) - const taskMsg = msgs.find((item) => item.info.role === "assistant" && item.info.agent === "general") - expect(taskMsg?.info.role).toBe("assistant") - if (!taskMsg || taskMsg.info.role !== "assistant") return - - const tool = errorTool(taskMsg.parts) - if (!tool) return - - expect(tool.state.error).toContain("Tool execution failed") - expect(tool.state.metadata).toBeDefined() - expect(tool.state.metadata?.sessionId).toBeDefined() - expect(tool.state.metadata?.model).toEqual({ - providerID: ProviderID.make("test"), - modelID: ModelID.make("missing-model"), - }) - }), - { - git: true, - config: (url) => ({ - ...providerCfg(url), - agent: { - general: { - model: "test/missing-model", - }, - }, - }), - }, - ), -) - -it.live( - "running subtask preserves metadata after tool-call transition", - () => - provideTmpdirServer( - Effect.fnUntraced(function* ({ llm }) { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const chat = yield* sessions.create({ title: "Pinned" }) - yield* llm.hang - const msg = yield* user(chat.id, "hello") - yield* addSubtask(chat.id, msg.id) - - const fiber = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) - - const tool = yield* Effect.promise(async () => { - const end = Date.now() + 5_000 - while (Date.now() < end) { - const msgs = await Effect.runPromise(MessageV2.filterCompactedEffect(chat.id)) - const taskMsg = msgs.find((item) => item.info.role === "assistant" && item.info.agent === "general") - const tool = taskMsg?.parts.find((part): part is MessageV2.ToolPart => part.type === "tool") - if (tool?.state.status === "running" && tool.state.metadata?.sessionId) return tool - await new Promise((done) => setTimeout(done, 20)) - } - throw new Error("timed out waiting for running subtask metadata") - }) - - if (tool.state.status !== "running") return - expect(typeof tool.state.metadata?.sessionId).toBe("string") - expect(tool.state.title).toBeDefined() - expect(tool.state.metadata?.model).toBeDefined() - - yield* prompt.cancel(chat.id) - yield* Fiber.await(fiber) - }), - { git: true, config: providerCfg }, - ), - 5_000, -) - -it.live( - "running task tool preserves metadata after tool-call transition", - () => - provideTmpdirServer( - Effect.fnUntraced(function* ({ llm }) { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const chat = yield* sessions.create({ - title: "Pinned", - permission: [{ permission: "*", pattern: "*", action: "allow" }], - }) - yield* llm.tool("task", { - description: "inspect bug", - prompt: "look into the cache key path", - subagent_type: "general", - }) - yield* llm.hang - yield* user(chat.id, "hello") - - const fiber = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) - - const tool = yield* Effect.promise(async () => { - const end = Date.now() + 5_000 - while (Date.now() < end) { - const msgs = await Effect.runPromise(MessageV2.filterCompactedEffect(chat.id)) - const assistant = msgs.findLast((item) => item.info.role === "assistant" && item.info.agent === "build") - const tool = assistant?.parts.find( - (part): part is MessageV2.ToolPart => part.type === "tool" && part.tool === "task", - ) - if (tool?.state.status === "running" && tool.state.metadata?.sessionId) return tool - await new Promise((done) => setTimeout(done, 20)) - } - throw new Error("timed out waiting for running task metadata") - }) - - if (tool.state.status !== "running") return - expect(typeof tool.state.metadata?.sessionId).toBe("string") - expect(tool.state.title).toBe("inspect bug") - expect(tool.state.metadata?.model).toBeDefined() - - yield* prompt.cancel(chat.id) - yield* Fiber.await(fiber) - }), - { git: true, config: providerCfg }, - ), - 10_000, -) - -it.live( - "loop sets status to busy then idle", - () => - provideTmpdirServer( - Effect.fnUntraced(function* ({ llm }) { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const status = yield* SessionStatus.Service - - yield* llm.hang - - const chat = yield* sessions.create({}) - yield* user(chat.id, "hi") - - const fiber = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) - yield* llm.wait(1) - expect((yield* status.get(chat.id)).type).toBe("busy") - yield* prompt.cancel(chat.id) - yield* Fiber.await(fiber) - expect((yield* status.get(chat.id)).type).toBe("idle") - }), - { git: true, config: providerCfg }, - ), - 3_000, -) - -// Cancel semantics - -it.live( - "cancel interrupts loop and resolves with an assistant message", - () => - provideTmpdirServer( - Effect.fnUntraced(function* ({ llm }) { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const chat = yield* sessions.create({ title: "Pinned" }) - yield* seed(chat.id) - - yield* llm.hang - - yield* user(chat.id, "more") - - const fiber = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) - yield* llm.wait(1) - yield* prompt.cancel(chat.id) - const exit = yield* Fiber.await(fiber) - expect(Exit.isSuccess(exit)).toBe(true) - if (Exit.isSuccess(exit)) { - expect(exit.value.info.role).toBe("assistant") - } - }), - { git: true, config: providerCfg }, - ), - 3_000, -) - -it.live( - "cancel records MessageAbortedError on interrupted process", - () => - provideTmpdirServer( - Effect.fnUntraced(function* ({ llm }) { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const chat = yield* sessions.create({ title: "Pinned" }) - yield* llm.hang - yield* user(chat.id, "hello") - - const fiber = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) - yield* llm.wait(1) - yield* prompt.cancel(chat.id) - const exit = yield* Fiber.await(fiber) - expect(Exit.isSuccess(exit)).toBe(true) - if (Exit.isSuccess(exit)) { - const info = exit.value.info - if (info.role === "assistant") { - expect(info.error?.name).toBe("MessageAbortedError") - } - } - }), - { git: true, config: providerCfg }, - ), - 3_000, -) - -it.live( - "cancel finalizes subtask tool state", - () => - provideTmpdirInstance( - () => - Effect.gen(function* () { - const ready = defer() - const aborted = defer() - const registry = yield* ToolRegistry.Service - const { task } = yield* registry.named() - const original = task.execute - task.execute = (_args, ctx) => - Effect.callback((_resume) => { - ready.resolve() - ctx.abort.addEventListener("abort", () => aborted.resolve(), { once: true }) - return Effect.sync(() => aborted.resolve()) - }) - yield* Effect.addFinalizer(() => Effect.sync(() => void (task.execute = original))) - - const { prompt, chat } = yield* boot() - const msg = yield* user(chat.id, "hello") - yield* addSubtask(chat.id, msg.id) - - const fiber = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) - yield* Effect.promise(() => ready.promise) - yield* prompt.cancel(chat.id) - yield* Effect.promise(() => aborted.promise) - - const exit = yield* Fiber.await(fiber) - expect(Exit.isSuccess(exit)).toBe(true) - - const msgs = yield* MessageV2.filterCompactedEffect(chat.id) - const taskMsg = msgs.find((item) => item.info.role === "assistant" && item.info.agent === "general") - expect(taskMsg?.info.role).toBe("assistant") - if (!taskMsg || taskMsg.info.role !== "assistant") return - - const tool = toolPart(taskMsg.parts) - expect(tool?.type).toBe("tool") - if (!tool) return - - expect(tool.state.status).not.toBe("running") - expect(taskMsg.info.time.completed).toBeDefined() - expect(taskMsg.info.finish).toBeDefined() - }), - { git: true, config: cfg }, - ), - 30_000, -) - -it.live( - "cancel with queued callers resolves all cleanly", - () => - provideTmpdirServer( - Effect.fnUntraced(function* ({ llm }) { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const chat = yield* sessions.create({ title: "Pinned" }) - yield* llm.hang - yield* user(chat.id, "hello") - - const a = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) - yield* llm.wait(1) - const b = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) - yield* Effect.sleep(50) - - yield* prompt.cancel(chat.id) - const [exitA, exitB] = yield* Effect.all([Fiber.await(a), Fiber.await(b)]) - expect(Exit.isSuccess(exitA)).toBe(true) - expect(Exit.isSuccess(exitB)).toBe(true) - if (Exit.isSuccess(exitA) && Exit.isSuccess(exitB)) { - expect(exitA.value.info.id).toBe(exitB.value.info.id) - } - }), - { git: true, config: providerCfg }, - ), - 3_000, -) - -// Queue semantics - -it.live("concurrent loop callers get same result", () => - provideTmpdirInstance( - (_dir) => - Effect.gen(function* () { - const { prompt, run, chat } = yield* boot() - yield* seed(chat.id, { finish: "stop" }) - - const [a, b] = yield* Effect.all([prompt.loop({ sessionID: chat.id }), prompt.loop({ sessionID: chat.id })], { - concurrency: "unbounded", - }) - - expect(a.info.id).toBe(b.info.id) - expect(a.info.role).toBe("assistant") - yield* run.assertNotBusy(chat.id) - }), - { git: true }, - ), -) - -it.live( - "concurrent loop callers all receive same error result", - () => - provideTmpdirServer( - Effect.fnUntraced(function* ({ llm }) { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const chat = yield* sessions.create({ title: "Pinned" }) - - yield* llm.fail("boom") - yield* user(chat.id, "hello") - - const [a, b] = yield* Effect.all([prompt.loop({ sessionID: chat.id }), prompt.loop({ sessionID: chat.id })], { - concurrency: "unbounded", - }) - expect(a.info.id).toBe(b.info.id) - expect(a.info.role).toBe("assistant") - }), - { git: true, config: providerCfg }, - ), - 3_000, -) - -it.live( - "prompt submitted during an active run is included in the next LLM input", - () => - provideTmpdirServer( - Effect.fnUntraced(function* ({ llm }) { - const gate = defer() - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const chat = yield* sessions.create({ title: "Pinned" }) - - yield* llm.hold("first", gate.promise) - yield* llm.text("second") - - const a = yield* prompt - .prompt({ - sessionID: chat.id, - agent: "build", - model: ref, - parts: [{ type: "text", text: "first" }], - }) - .pipe(Effect.forkChild) - - yield* llm.wait(1) - - const id = MessageID.ascending() - const b = yield* prompt - .prompt({ - sessionID: chat.id, - messageID: id, - agent: "build", - model: ref, - parts: [{ type: "text", text: "second" }], - }) - .pipe(Effect.forkChild) - - yield* Effect.promise(async () => { - const end = Date.now() + 5000 - while (Date.now() < end) { - const msgs = await Effect.runPromise(sessions.messages({ sessionID: chat.id })) - if (msgs.some((msg) => msg.info.role === "user" && msg.info.id === id)) return - await new Promise((done) => setTimeout(done, 20)) - } - throw new Error("timed out waiting for second prompt to save") - }) - - gate.resolve() - - const [ea, eb] = yield* Effect.all([Fiber.await(a), Fiber.await(b)]) - expect(Exit.isSuccess(ea)).toBe(true) - expect(Exit.isSuccess(eb)).toBe(true) - expect(yield* llm.calls).toBe(2) - - const msgs = yield* sessions.messages({ sessionID: chat.id }) - const assistants = msgs.filter((msg) => msg.info.role === "assistant") - expect(assistants).toHaveLength(2) - const last = assistants.at(-1) - if (!last || last.info.role !== "assistant") throw new Error("expected second assistant") - expect(last.info.parentID).toBe(id) - expect(last.parts.some((part) => part.type === "text" && part.text === "second")).toBe(true) - - const inputs = yield* llm.inputs - expect(inputs).toHaveLength(2) - expect(JSON.stringify(inputs.at(-1)?.messages)).toContain("second") - }), - { git: true, config: providerCfg }, - ), - 3_000, -) - -it.live( - "assertNotBusy throws BusyError when loop running", - () => - provideTmpdirServer( - Effect.fnUntraced(function* ({ llm }) { - const prompt = yield* SessionPrompt.Service - const run = yield* SessionRunState.Service - const sessions = yield* Session.Service - yield* llm.hang - - const chat = yield* sessions.create({}) - yield* user(chat.id, "hi") - - const fiber = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) - yield* llm.wait(1) - - const exit = yield* run.assertNotBusy(chat.id).pipe(Effect.exit) - expect(Exit.isFailure(exit)).toBe(true) - if (Exit.isFailure(exit)) { - expect(Cause.squash(exit.cause)).toBeInstanceOf(Session.BusyError) - } - - yield* prompt.cancel(chat.id) - yield* Fiber.await(fiber) - }), - { git: true, config: providerCfg }, - ), - 3_000, -) - -it.live("assertNotBusy succeeds when idle", () => - provideTmpdirInstance( - (_dir) => - Effect.gen(function* () { - const run = yield* SessionRunState.Service - const sessions = yield* Session.Service - - const chat = yield* sessions.create({}) - const exit = yield* run.assertNotBusy(chat.id).pipe(Effect.exit) - expect(Exit.isSuccess(exit)).toBe(true) - }), - { git: true }, - ), -) - -// Shell semantics - -it.live( - "shell rejects with BusyError when loop running", - () => - provideTmpdirServer( - Effect.fnUntraced(function* ({ llm }) { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const chat = yield* sessions.create({ title: "Pinned" }) - yield* llm.hang - yield* user(chat.id, "hi") - - const fiber = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) - yield* llm.wait(1) - - const exit = yield* prompt.shell({ sessionID: chat.id, agent: "build", command: "echo hi" }).pipe(Effect.exit) - expect(Exit.isFailure(exit)).toBe(true) - if (Exit.isFailure(exit)) { - expect(Cause.squash(exit.cause)).toBeInstanceOf(Session.BusyError) - } - - yield* prompt.cancel(chat.id) - yield* Fiber.await(fiber) - }), - { git: true, config: providerCfg }, - ), - 3_000, -) - -unix("shell captures stdout and stderr in completed tool output", () => - provideTmpdirInstance( - (_dir) => - Effect.gen(function* () { - const { prompt, run, chat } = yield* boot() - const result = yield* prompt.shell({ - sessionID: chat.id, - agent: "build", - command: "printf out && printf err >&2", - }) - - expect(result.info.role).toBe("assistant") - const tool = completedTool(result.parts) - if (!tool) return - - expect(tool.state.output).toContain("out") - expect(tool.state.output).toContain("err") - expect(tool.state.metadata.output).toContain("out") - expect(tool.state.metadata.output).toContain("err") - yield* run.assertNotBusy(chat.id) - }), - { git: true, config: cfg }, - ), -) - -unix("shell completes a fast command on the preferred shell", () => - provideTmpdirInstance( - (dir) => - Effect.gen(function* () { - const { prompt, run, chat } = yield* boot() - const result = yield* prompt.shell({ - sessionID: chat.id, - agent: "build", - command: "pwd", - }) - - expect(result.info.role).toBe("assistant") - const tool = completedTool(result.parts) - if (!tool) return - - expect(tool.state.input.command).toBe("pwd") - expect(tool.state.output).toContain(dir) - expect(tool.state.metadata.output).toContain(dir) - yield* run.assertNotBusy(chat.id) - }), - { git: true, config: cfg }, - ), -) - -unix("shell lists files from the project directory", () => - provideTmpdirInstance( - (dir) => - Effect.gen(function* () { - const { prompt, run, chat } = yield* boot() - yield* Effect.promise(() => Bun.write(path.join(dir, "README.md"), "# e2e\n")) - - const result = yield* prompt.shell({ - sessionID: chat.id, - agent: "build", - command: "command ls", - }) - - expect(result.info.role).toBe("assistant") - const tool = completedTool(result.parts) - if (!tool) return - - expect(tool.state.input.command).toBe("command ls") - expect(tool.state.output).toContain("README.md") - expect(tool.state.metadata.output).toContain("README.md") - yield* run.assertNotBusy(chat.id) - }), - { git: true, config: cfg }, - ), -) - -unix("shell captures stderr from a failing command", () => - provideTmpdirInstance( - (_dir) => - Effect.gen(function* () { - const { prompt, run, chat } = yield* boot() - const result = yield* prompt.shell({ - sessionID: chat.id, - agent: "build", - command: "command -v __nonexistent_cmd_e2e__ || echo 'not found' >&2; exit 1", - }) - - expect(result.info.role).toBe("assistant") - const tool = completedTool(result.parts) - if (!tool) return - - expect(tool.state.output).toContain("not found") - expect(tool.state.metadata.output).toContain("not found") - yield* run.assertNotBusy(chat.id) - }), - { git: true, config: cfg }, - ), -) - -unix( - "shell updates running metadata before process exit", - () => - withSh(() => - provideTmpdirInstance( - (_dir) => - Effect.gen(function* () { - const { prompt, chat } = yield* boot() - - const fiber = yield* prompt - .shell({ sessionID: chat.id, agent: "build", command: "printf first && sleep 0.2 && printf second" }) - .pipe(Effect.forkChild) - - yield* Effect.promise(async () => { - const start = Date.now() - while (Date.now() - start < 5000) { - const msgs = await MessageV2.filterCompacted(MessageV2.stream(chat.id)) - const taskMsg = msgs.find((item) => item.info.role === "assistant") - const tool = taskMsg ? toolPart(taskMsg.parts) : undefined - if (tool?.state.status === "running" && tool.state.metadata?.output.includes("first")) return - await new Promise((done) => setTimeout(done, 20)) - } - throw new Error("timed out waiting for running shell metadata") - }) - - const exit = yield* Fiber.await(fiber) - expect(Exit.isSuccess(exit)).toBe(true) - }), - { git: true, config: cfg }, - ), - ), - 30_000, -) - -it.live( - "loop waits while shell runs and starts after shell exits", - () => - provideTmpdirServer( - Effect.fnUntraced(function* ({ llm }) { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const chat = yield* sessions.create({ - title: "Pinned", - permission: [{ permission: "*", pattern: "*", action: "allow" }], - }) - yield* llm.text("after-shell") - - const sh = yield* prompt - .shell({ sessionID: chat.id, agent: "build", command: "sleep 0.2" }) - .pipe(Effect.forkChild) - yield* Effect.sleep(50) - - const loop = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) - yield* Effect.sleep(50) - - expect(yield* llm.calls).toBe(0) - - yield* Fiber.await(sh) - const exit = yield* Fiber.await(loop) - - expect(Exit.isSuccess(exit)).toBe(true) - if (Exit.isSuccess(exit)) { - expect(exit.value.info.role).toBe("assistant") - expect(exit.value.parts.some((part) => part.type === "text" && part.text === "after-shell")).toBe(true) - } - expect(yield* llm.calls).toBe(1) - }), - { git: true, config: providerCfg }, - ), - 3_000, -) - -it.live( - "shell completion resumes queued loop callers", - () => - provideTmpdirServer( - Effect.fnUntraced(function* ({ llm }) { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const chat = yield* sessions.create({ - title: "Pinned", - permission: [{ permission: "*", pattern: "*", action: "allow" }], - }) - yield* llm.text("done") - - const sh = yield* prompt - .shell({ sessionID: chat.id, agent: "build", command: "sleep 0.2" }) - .pipe(Effect.forkChild) - yield* Effect.sleep(50) - - const a = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) - const b = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) - yield* Effect.sleep(50) - - expect(yield* llm.calls).toBe(0) - - yield* Fiber.await(sh) - const [ea, eb] = yield* Effect.all([Fiber.await(a), Fiber.await(b)]) - - expect(Exit.isSuccess(ea)).toBe(true) - expect(Exit.isSuccess(eb)).toBe(true) - if (Exit.isSuccess(ea) && Exit.isSuccess(eb)) { - expect(ea.value.info.id).toBe(eb.value.info.id) - expect(ea.value.info.role).toBe("assistant") - } - expect(yield* llm.calls).toBe(1) - }), - { git: true, config: providerCfg }, - ), - 3_000, -) - -unix( - "cancel interrupts shell and resolves cleanly", - () => - withSh(() => - provideTmpdirInstance( - (_dir) => - Effect.gen(function* () { - const { prompt, run, chat } = yield* boot() - - const sh = yield* prompt - .shell({ sessionID: chat.id, agent: "build", command: "sleep 30" }) - .pipe(Effect.forkChild) - yield* Effect.sleep(50) - - yield* prompt.cancel(chat.id) - - const status = yield* SessionStatus.Service - expect((yield* status.get(chat.id)).type).toBe("idle") - const busy = yield* run.assertNotBusy(chat.id).pipe(Effect.exit) - expect(Exit.isSuccess(busy)).toBe(true) - - const exit = yield* Fiber.await(sh) - expect(Exit.isSuccess(exit)).toBe(true) - if (Exit.isSuccess(exit)) { - expect(exit.value.info.role).toBe("assistant") - const tool = completedTool(exit.value.parts) - if (tool) { - expect(tool.state.output).toContain("User aborted the command") - } - } - }), - { git: true, config: cfg }, - ), - ), - 30_000, -) - -unix( - "cancel persists aborted shell result when shell ignores TERM", - () => - withSh(() => - provideTmpdirInstance( - (_dir) => - Effect.gen(function* () { - const { prompt, chat } = yield* boot() - - const sh = yield* prompt - .shell({ sessionID: chat.id, agent: "build", command: "trap '' TERM; sleep 30" }) - .pipe(Effect.forkChild) - yield* Effect.sleep(50) - - yield* prompt.cancel(chat.id) - - const exit = yield* Fiber.await(sh) - expect(Exit.isSuccess(exit)).toBe(true) - if (Exit.isSuccess(exit)) { - expect(exit.value.info.role).toBe("assistant") - const tool = completedTool(exit.value.parts) - if (tool) { - expect(tool.state.output).toContain("User aborted the command") - } - } - }), - { git: true, config: cfg }, - ), - ), - 30_000, -) - -unix( - "cancel finalizes interrupted bash tool output through normal truncation", - () => - provideTmpdirServer( - ({ dir, llm }) => - Effect.gen(function* () { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const chat = yield* sessions.create({ - title: "Interrupted bash truncation", - permission: [{ permission: "*", pattern: "*", action: "allow" }], - }) - - yield* prompt.prompt({ - sessionID: chat.id, - agent: "build", - noReply: true, - parts: [{ type: "text", text: "run bash" }], - }) - - yield* llm.tool("bash", { - command: - 'i=0; while [ "$i" -lt 4000 ]; do printf "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx %05d\\n" "$i"; i=$((i + 1)); done; sleep 30', - description: "Print many lines", - timeout: 30_000, - workdir: path.resolve(dir), - }) - - const run = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) - yield* llm.wait(1) - yield* Effect.sleep(150) - yield* prompt.cancel(chat.id) - - const exit = yield* Fiber.await(run) - expect(Exit.isSuccess(exit)).toBe(true) - if (Exit.isFailure(exit)) return - - const tool = completedTool(exit.value.parts) - if (!tool) return - - expect(tool.state.metadata.truncated).toBe(true) - expect(typeof tool.state.metadata.outputPath).toBe("string") - expect(tool.state.output).toMatch(/\.\.\.output truncated\.\.\./) - expect(tool.state.output).toMatch(/Full output saved to:\s+\S+/) - expect(tool.state.output).not.toContain("Tool execution aborted") - }), - { git: true, config: providerCfg }, - ), - 30_000, -) - -unix( - "cancel interrupts loop queued behind shell", - () => - provideTmpdirInstance( - (_dir) => - Effect.gen(function* () { - const { prompt, chat } = yield* boot() - - const sh = yield* prompt - .shell({ sessionID: chat.id, agent: "build", command: "sleep 30" }) - .pipe(Effect.forkChild) - yield* Effect.sleep(50) - - const loop = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) - yield* Effect.sleep(50) - - yield* prompt.cancel(chat.id) - - const exit = yield* Fiber.await(loop) - expect(Exit.isSuccess(exit)).toBe(true) - - yield* Fiber.await(sh) - }), - { git: true, config: cfg }, - ), - 30_000, -) - -unix( - "shell rejects when another shell is already running", - () => - withSh(() => - provideTmpdirInstance( - (_dir) => - Effect.gen(function* () { - const { prompt, chat } = yield* boot() - - const a = yield* prompt - .shell({ sessionID: chat.id, agent: "build", command: "sleep 30" }) - .pipe(Effect.forkChild) - yield* Effect.sleep(50) - - const exit = yield* prompt - .shell({ sessionID: chat.id, agent: "build", command: "echo hi" }) - .pipe(Effect.exit) - expect(Exit.isFailure(exit)).toBe(true) - if (Exit.isFailure(exit)) { - expect(Cause.squash(exit.cause)).toBeInstanceOf(Session.BusyError) - } - - yield* prompt.cancel(chat.id) - yield* Fiber.await(a) - }), - { git: true, config: cfg }, - ), - ), - 30_000, -) - -// Abort signal propagation tests for inline tool execution - -/** Override a tool's execute to hang until aborted. Returns ready/aborted defers and a finalizer. */ -function hangUntilAborted(tool: { execute: (...args: any[]) => any }) { - const ready = defer() - const aborted = defer() - const original = tool.execute - tool.execute = (_args: any, ctx: any) => { - ready.resolve() - ctx.abort.addEventListener("abort", () => aborted.resolve(), { once: true }) - return Effect.callback(() => {}) - } - const restore = Effect.addFinalizer(() => Effect.sync(() => void (tool.execute = original))) - return { ready, aborted, restore } -} - -it.live( - "interrupt propagates abort signal to read tool via file part (text/plain)", - () => - provideTmpdirInstance( - (dir) => - Effect.gen(function* () { - const registry = yield* ToolRegistry.Service - const { read } = yield* registry.named() - const { ready, aborted, restore } = hangUntilAborted(read) - yield* restore - - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const chat = yield* sessions.create({ title: "Abort Test" }) - - const testFile = path.join(dir, "test.txt") - yield* Effect.promise(() => Bun.write(testFile, "hello world")) - - const fiber = yield* prompt - .prompt({ - sessionID: chat.id, - agent: "build", - parts: [ - { type: "text", text: "read this" }, - { type: "file", url: `file://${testFile}`, filename: "test.txt", mime: "text/plain" }, - ], - }) - .pipe(Effect.forkChild) - - yield* Effect.promise(() => ready.promise) - yield* Fiber.interrupt(fiber) - - yield* Effect.promise(() => - Promise.race([ - aborted.promise, - new Promise((_, reject) => - setTimeout(() => reject(new Error("abort signal not propagated within 2s")), 2_000), - ), - ]), - ) - }), - { git: true, config: cfg }, - ), - 30_000, -) - -it.live( - "interrupt propagates abort signal to read tool via file part (directory)", - () => - provideTmpdirInstance( - (dir) => - Effect.gen(function* () { - const registry = yield* ToolRegistry.Service - const { read } = yield* registry.named() - const { ready, aborted, restore } = hangUntilAborted(read) - yield* restore - - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const chat = yield* sessions.create({ title: "Abort Test" }) - - const fiber = yield* prompt - .prompt({ - sessionID: chat.id, - agent: "build", - parts: [ - { type: "text", text: "read this" }, - { type: "file", url: `file://${dir}`, filename: "dir", mime: "application/x-directory" }, - ], - }) - .pipe(Effect.forkChild) - - yield* Effect.promise(() => ready.promise) - yield* Fiber.interrupt(fiber) - - yield* Effect.promise(() => - Promise.race([ - aborted.promise, - new Promise((_, reject) => - setTimeout(() => reject(new Error("abort signal not propagated within 2s")), 2_000), - ), - ]), - ) - }), - { git: true, config: cfg }, - ), - 30_000, -) diff --git a/packages/opencode/test/session/prompt.test.ts b/packages/opencode/test/session/prompt.test.ts index 2b489da9e9..8ffb20f154 100644 --- a/packages/opencode/test/session/prompt.test.ts +++ b/packages/opencode/test/session/prompt.test.ts @@ -1,22 +1,64 @@ +import { NodeFileSystem } from "@effect/platform-node" +import { FetchHttpClient } from "effect/unstable/http" +import { expect } from "bun:test" +import { Cause, Effect, Exit, Fiber, Layer } from "effect" import path from "path" -import { describe, expect, test } from "bun:test" -import { NamedError } from "@opencode-ai/shared/util/error" import { fileURLToPath } from "url" -import { Effect, Layer } from "effect" -import { Instance } from "../../src/project/instance" +import { NamedError } from "@opencode-ai/shared/util/error" +import { Agent as AgentSvc } from "../../src/agent/agent" +import { Bus } from "../../src/bus" +import { Command } from "../../src/command" +import { Config } from "../../src/config" +import { LSP } from "../../src/lsp" +import { MCP } from "../../src/mcp" +import { Permission } from "../../src/permission" +import { Plugin } from "../../src/plugin" +import { Provider as ProviderSvc } from "../../src/provider" +import { Env } from "../../src/env" import { ModelID, ProviderID } from "../../src/provider/schema" +import { Question } from "../../src/question" +import { Todo } from "../../src/session/todo" import { Session } from "../../src/session" +import { LLM } from "../../src/session/llm" import { MessageV2 } from "../../src/session/message-v2" +import { AppFileSystem } from "@opencode-ai/shared/filesystem" +import { SessionCompaction } from "../../src/session/compaction" +import { SessionSummary } from "../../src/session/summary" +import { Instruction } from "../../src/session/instruction" +import { SessionProcessor } from "../../src/session/processor" import { SessionPrompt } from "../../src/session/prompt" +import { SessionRevert } from "../../src/session/revert" +import { SessionRunState } from "../../src/session/run-state" +import { MessageID, PartID, SessionID } from "../../src/session/schema" +import { SessionStatus } from "../../src/session/status" +import { Skill } from "../../src/skill" +import { SystemPrompt } from "../../src/session/system" +import { Shell } from "../../src/shell/shell" +import { Snapshot } from "../../src/snapshot" +import { ToolRegistry } from "../../src/tool" +import { Truncate } from "../../src/tool" import { Log } from "../../src/util" -import { tmpdir } from "../fixture/fixture" +import * as CrossSpawnSpawner from "../../src/effect/cross-spawn-spawner" +import { Ripgrep } from "../../src/file/ripgrep" +import { Format } from "../../src/format" +import { provideTmpdirInstance, provideTmpdirServer } from "../fixture/fixture" +import { testEffect } from "../lib/effect" +import { reply, TestLLMServer } from "../lib/llm-server" void Log.init({ print: false }) -function run(fx: Effect.Effect) { - return Effect.runPromise( - fx.pipe(Effect.scoped, Effect.provide(Layer.mergeAll(SessionPrompt.defaultLayer, Session.defaultLayer))), - ) +const summary = Layer.succeed( + SessionSummary.Service, + SessionSummary.Service.of({ + summarize: () => Effect.void, + diff: () => Effect.succeed([]), + computeDiff: () => Effect.succeed([]), + }), +) + +const ref = { + providerID: ProviderID.make("test"), + modelID: ModelID.make("test-model"), } function defer() { @@ -27,563 +69,1817 @@ function defer() { return { promise, resolve } } -function chat(text: string) { - const payload = - [ - `data: ${JSON.stringify({ - id: "chatcmpl-1", - object: "chat.completion.chunk", - choices: [{ delta: { role: "assistant" } }], - })}`, - `data: ${JSON.stringify({ - id: "chatcmpl-1", - object: "chat.completion.chunk", - choices: [{ delta: { content: text } }], - })}`, - `data: ${JSON.stringify({ - id: "chatcmpl-1", - object: "chat.completion.chunk", - choices: [{ delta: {}, finish_reason: "stop" }], - })}`, - "data: [DONE]", - ].join("\n\n") + "\n\n" - - const encoder = new TextEncoder() - return new ReadableStream({ - start(ctrl) { - ctrl.enqueue(encoder.encode(payload)) - ctrl.close() - }, - }) +function withSh(fx: () => Effect.Effect) { + return Effect.acquireUseRelease( + Effect.sync(() => { + const prev = process.env.SHELL + process.env.SHELL = "/bin/sh" + Shell.preferred.reset() + return prev + }), + () => fx(), + (prev) => + Effect.sync(() => { + if (prev === undefined) delete process.env.SHELL + else process.env.SHELL = prev + Shell.preferred.reset() + }), + ) } -function hanging(ready: () => void) { - const encoder = new TextEncoder() - let timer: ReturnType | undefined - const first = `data: ${JSON.stringify({ - id: "chatcmpl-1", - object: "chat.completion.chunk", - choices: [{ delta: { role: "assistant" } }], - })}\n\n` - const rest = - [ - `data: ${JSON.stringify({ - id: "chatcmpl-1", - object: "chat.completion.chunk", - choices: [{ delta: { content: "late" } }], - })}`, - `data: ${JSON.stringify({ - id: "chatcmpl-1", - object: "chat.completion.chunk", - choices: [{ delta: {}, finish_reason: "stop" }], - })}`, - "data: [DONE]", - ].join("\n\n") + "\n\n" - - return new ReadableStream({ - start(ctrl) { - ctrl.enqueue(encoder.encode(first)) - ready() - timer = setTimeout(() => { - ctrl.enqueue(encoder.encode(rest)) - ctrl.close() - }, 10000) - }, - cancel() { - if (timer) clearTimeout(timer) - }, - }) +function toolPart(parts: MessageV2.Part[]) { + return parts.find((part): part is MessageV2.ToolPart => part.type === "tool") } -describe("session.prompt missing file", () => { - test("does not fail the prompt when a file part is missing", async () => { - await using tmp = await tmpdir({ - git: true, - config: { - agent: { - build: { - model: "openai/gpt-5.2", - }, +type CompletedToolPart = MessageV2.ToolPart & { state: MessageV2.ToolStateCompleted } +type ErrorToolPart = MessageV2.ToolPart & { state: MessageV2.ToolStateError } + +function completedTool(parts: MessageV2.Part[]) { + const part = toolPart(parts) + expect(part?.state.status).toBe("completed") + return part?.state.status === "completed" ? (part as CompletedToolPart) : undefined +} + +function errorTool(parts: MessageV2.Part[]) { + const part = toolPart(parts) + expect(part?.state.status).toBe("error") + return part?.state.status === "error" ? (part as ErrorToolPart) : undefined +} + +const mcp = Layer.succeed( + MCP.Service, + MCP.Service.of({ + status: () => Effect.succeed({}), + clients: () => Effect.succeed({}), + tools: () => Effect.succeed({}), + prompts: () => Effect.succeed({}), + resources: () => Effect.succeed({}), + add: () => Effect.succeed({ status: { status: "disabled" as const } }), + connect: () => Effect.void, + disconnect: () => Effect.void, + getPrompt: () => Effect.succeed(undefined), + readResource: () => Effect.succeed(undefined), + startAuth: () => Effect.die("unexpected MCP auth in prompt-effect tests"), + authenticate: () => Effect.die("unexpected MCP auth in prompt-effect tests"), + finishAuth: () => Effect.die("unexpected MCP auth in prompt-effect tests"), + removeAuth: () => Effect.void, + supportsOAuth: () => Effect.succeed(false), + hasStoredTokens: () => Effect.succeed(false), + getAuthStatus: () => Effect.succeed("not_authenticated" as const), + }), +) + +const lsp = Layer.succeed( + LSP.Service, + LSP.Service.of({ + init: () => Effect.void, + status: () => Effect.succeed([]), + hasClients: () => Effect.succeed(false), + touchFile: () => Effect.void, + diagnostics: () => Effect.succeed({}), + hover: () => Effect.succeed(undefined), + definition: () => Effect.succeed([]), + references: () => Effect.succeed([]), + implementation: () => Effect.succeed([]), + documentSymbol: () => Effect.succeed([]), + workspaceSymbol: () => Effect.succeed([]), + prepareCallHierarchy: () => Effect.succeed([]), + incomingCalls: () => Effect.succeed([]), + outgoingCalls: () => Effect.succeed([]), + }), +) + +const status = SessionStatus.layer.pipe(Layer.provideMerge(Bus.layer)) +const run = SessionRunState.layer.pipe(Layer.provide(status)) +const infra = Layer.mergeAll(NodeFileSystem.layer, CrossSpawnSpawner.defaultLayer) +function makeHttp() { + const deps = Layer.mergeAll( + Session.defaultLayer, + Snapshot.defaultLayer, + LLM.defaultLayer, + Env.defaultLayer, + AgentSvc.defaultLayer, + Command.defaultLayer, + Permission.defaultLayer, + Plugin.defaultLayer, + Config.defaultLayer, + ProviderSvc.defaultLayer, + lsp, + mcp, + AppFileSystem.defaultLayer, + status, + ).pipe(Layer.provideMerge(infra)) + const question = Question.layer.pipe(Layer.provideMerge(deps)) + const todo = Todo.layer.pipe(Layer.provideMerge(deps)) + const registry = ToolRegistry.layer.pipe( + Layer.provide(Skill.defaultLayer), + Layer.provide(FetchHttpClient.layer), + Layer.provide(CrossSpawnSpawner.defaultLayer), + Layer.provide(Ripgrep.defaultLayer), + Layer.provide(Format.defaultLayer), + Layer.provideMerge(todo), + Layer.provideMerge(question), + Layer.provideMerge(deps), + ) + const trunc = Truncate.layer.pipe(Layer.provideMerge(deps)) + const proc = SessionProcessor.layer.pipe(Layer.provide(summary), Layer.provideMerge(deps)) + const compact = SessionCompaction.layer.pipe(Layer.provideMerge(proc), Layer.provideMerge(deps)) + return Layer.mergeAll( + TestLLMServer.layer, + SessionPrompt.layer.pipe( + Layer.provide(SessionRevert.defaultLayer), + Layer.provide(summary), + Layer.provideMerge(run), + Layer.provideMerge(compact), + Layer.provideMerge(proc), + Layer.provideMerge(registry), + Layer.provideMerge(trunc), + Layer.provide(Instruction.defaultLayer), + Layer.provide(SystemPrompt.defaultLayer), + Layer.provideMerge(deps), + ), + ).pipe(Layer.provide(summary)) +} + +const it = testEffect(makeHttp()) +const unix = process.platform !== "win32" ? it.live : it.live.skip + +// Config that registers a custom "test" provider with a "test-model" model +// so provider model lookup succeeds inside the loop. +const cfg = { + provider: { + test: { + name: "Test", + id: "test", + env: [], + npm: "@ai-sdk/openai-compatible", + models: { + "test-model": { + id: "test-model", + name: "Test Model", + attachment: false, + reasoning: false, + temperature: false, + tool_call: true, + release_date: "2025-01-01", + limit: { context: 100000, output: 10000 }, + cost: { input: 0, output: 0 }, + options: {}, }, }, - }) + options: { + apiKey: "test-key", + baseURL: "http://localhost:1/v1", + }, + }, + }, +} - await Instance.provide({ - directory: tmp.path, - fn: () => - run( - Effect.gen(function* () { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const session = yield* sessions.create({}) - - const missing = path.join(tmp.path, "does-not-exist.ts") - const msg = yield* prompt.prompt({ - sessionID: session.id, - agent: "build", - noReply: true, - parts: [ - { type: "text", text: "please review @does-not-exist.ts" }, - { - type: "file", - mime: "text/plain", - url: `file://${missing}`, - filename: "does-not-exist.ts", - }, - ], - }) - - if (msg.info.role !== "user") throw new Error("expected user message") - - const hasFailure = msg.parts.some( - (part) => part.type === "text" && part.synthetic && part.text.includes("Read tool failed to read"), - ) - expect(hasFailure).toBe(true) - - yield* sessions.remove(session.id) - }), - ), - }) - }) - - test("keeps stored part order stable when file resolution is async", async () => { - await using tmp = await tmpdir({ - git: true, - config: { - agent: { - build: { - model: "openai/gpt-5.2", - }, +function providerCfg(url: string) { + return { + ...cfg, + provider: { + ...cfg.provider, + test: { + ...cfg.provider.test, + options: { + ...cfg.provider.test.options, + baseURL: url, }, }, - }) + }, + } +} - await Instance.provide({ - directory: tmp.path, - fn: () => - run( - Effect.gen(function* () { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const session = yield* sessions.create({}) - - const missing = path.join(tmp.path, "still-missing.ts") - const msg = yield* prompt.prompt({ - sessionID: session.id, - agent: "build", - noReply: true, - parts: [ - { - type: "file", - mime: "text/plain", - url: `file://${missing}`, - filename: "still-missing.ts", - }, - { type: "text", text: "after-file" }, - ], - }) - - if (msg.info.role !== "user") throw new Error("expected user message") - - const stored = MessageV2.get({ - sessionID: session.id, - messageID: msg.info.id, - }) - const text = stored.parts.filter((part) => part.type === "text").map((part) => part.text) - - expect(text[0]?.startsWith("Called the Read tool with the following input:")).toBe(true) - expect(text[1]?.includes("Read tool failed to read")).toBe(true) - expect(text[2]).toBe("after-file") - - yield* sessions.remove(session.id) - }), - ), - }) +const user = Effect.fn("test.user")(function* (sessionID: SessionID, text: string) { + const session = yield* Session.Service + const msg = yield* session.updateMessage({ + id: MessageID.ascending(), + role: "user", + sessionID, + agent: "build", + model: ref, + time: { created: Date.now() }, }) + yield* session.updatePart({ + id: PartID.ascending(), + messageID: msg.id, + sessionID, + type: "text", + text, + }) + return msg }) -describe("session.prompt special characters", () => { - test("handles filenames with # character", async () => { - await using tmp = await tmpdir({ - git: true, - init: async (dir) => { - await Bun.write(path.join(dir, "file#name.txt"), "special content\n") - }, - }) - - await Instance.provide({ - directory: tmp.path, - fn: () => - run( - Effect.gen(function* () { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const session = yield* sessions.create({}) - const template = "Read @file#name.txt" - const parts = yield* prompt.resolvePromptParts(template) - const fileParts = parts.filter((part) => part.type === "file") - - expect(fileParts.length).toBe(1) - expect(fileParts[0].filename).toBe("file#name.txt") - expect(fileParts[0].url).toContain("%23") - - const decodedPath = fileURLToPath(fileParts[0].url) - expect(decodedPath).toBe(path.join(tmp.path, "file#name.txt")) - - const message = yield* prompt.prompt({ - sessionID: session.id, - parts, - noReply: true, - }) - const stored = MessageV2.get({ sessionID: session.id, messageID: message.info.id }) - const textParts = stored.parts.filter((part) => part.type === "text") - const hasContent = textParts.some((part) => part.text.includes("special content")) - expect(hasContent).toBe(true) - - yield* sessions.remove(session.id) - }), - ), - }) +const seed = Effect.fn("test.seed")(function* (sessionID: SessionID, opts?: { finish?: string }) { + const session = yield* Session.Service + const msg = yield* user(sessionID, "hello") + const assistant: MessageV2.Assistant = { + id: MessageID.ascending(), + role: "assistant", + parentID: msg.id, + sessionID, + mode: "build", + agent: "build", + cost: 0, + path: { cwd: "/tmp", root: "/tmp" }, + tokens: { input: 0, output: 0, reasoning: 0, cache: { read: 0, write: 0 } }, + modelID: ref.modelID, + providerID: ref.providerID, + time: { created: Date.now() }, + ...(opts?.finish ? { finish: opts.finish } : {}), + } + yield* session.updateMessage(assistant) + yield* session.updatePart({ + id: PartID.ascending(), + messageID: assistant.id, + sessionID, + type: "text", + text: "hi there", }) + return { user: msg, assistant } }) -describe("session.prompt regression", () => { - test("does not loop empty assistant turns for a simple reply", async () => { - let calls = 0 - const server = Bun.serve({ - port: 0, - fetch(req) { - const url = new URL(req.url) - if (!url.pathname.endsWith("/chat/completions")) { - return new Response("not found", { status: 404 }) - } - calls++ - return new Response(chat("packages/opencode/src/session/processor.ts"), { - status: 200, - headers: { "Content-Type": "text/event-stream" }, +const addSubtask = (sessionID: SessionID, messageID: MessageID, model = ref) => + Effect.gen(function* () { + const session = yield* Session.Service + yield* session.updatePart({ + id: PartID.ascending(), + messageID, + sessionID, + type: "subtask", + prompt: "look into the cache key path", + description: "inspect bug", + agent: "general", + model, + }) + }) + +const boot = Effect.fn("test.boot")(function* (input?: { title?: string }) { + const prompt = yield* SessionPrompt.Service + const run = yield* SessionRunState.Service + const sessions = yield* Session.Service + const chat = yield* sessions.create(input ?? { title: "Pinned" }) + return { prompt, run, sessions, chat } +}) + +// Loop semantics + +it.live("loop exits immediately when last assistant has stop finish", () => + provideTmpdirServer( + Effect.fnUntraced(function* ({ llm }) { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const chat = yield* sessions.create({ title: "Pinned" }) + yield* seed(chat.id, { finish: "stop" }) + + const result = yield* prompt.loop({ sessionID: chat.id }) + expect(result.info.role).toBe("assistant") + if (result.info.role === "assistant") expect(result.info.finish).toBe("stop") + expect(yield* llm.calls).toBe(0) + }), + { git: true, config: providerCfg }, + ), +) + +it.live("loop calls LLM and returns assistant message", () => + provideTmpdirServer( + Effect.fnUntraced(function* ({ llm }) { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const chat = yield* sessions.create({ + title: "Pinned", + permission: [{ permission: "*", pattern: "*", action: "allow" }], + }) + yield* prompt.prompt({ + sessionID: chat.id, + agent: "build", + noReply: true, + parts: [{ type: "text", text: "hello" }], + }) + yield* llm.text("world") + + const result = yield* prompt.loop({ sessionID: chat.id }) + expect(result.info.role).toBe("assistant") + const parts = result.parts.filter((p) => p.type === "text") + expect(parts.some((p) => p.type === "text" && p.text === "world")).toBe(true) + expect(yield* llm.hits).toHaveLength(1) + }), + { git: true, config: providerCfg }, + ), +) + +it.live("static loop returns assistant text through local provider", () => + provideTmpdirServer( + Effect.fnUntraced(function* ({ llm }) { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const session = yield* sessions.create({ + title: "Prompt provider", + permission: [{ permission: "*", pattern: "*", action: "allow" }], + }) + + yield* prompt.prompt({ + sessionID: session.id, + agent: "build", + noReply: true, + parts: [{ type: "text", text: "hello" }], + }) + + yield* llm.text("world") + + const result = yield* prompt.loop({ sessionID: session.id }) + expect(result.info.role).toBe("assistant") + expect(result.parts.some((part) => part.type === "text" && part.text === "world")).toBe(true) + expect(yield* llm.hits).toHaveLength(1) + expect(yield* llm.pending).toBe(0) + }), + { git: true, config: providerCfg }, + ), +) + +it.live("static loop consumes queued replies across turns", () => + provideTmpdirServer( + Effect.fnUntraced(function* ({ llm }) { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const session = yield* sessions.create({ + title: "Prompt provider turns", + permission: [{ permission: "*", pattern: "*", action: "allow" }], + }) + + yield* prompt.prompt({ + sessionID: session.id, + agent: "build", + noReply: true, + parts: [{ type: "text", text: "hello one" }], + }) + + yield* llm.text("world one") + + const first = yield* prompt.loop({ sessionID: session.id }) + expect(first.info.role).toBe("assistant") + expect(first.parts.some((part) => part.type === "text" && part.text === "world one")).toBe(true) + + yield* prompt.prompt({ + sessionID: session.id, + agent: "build", + noReply: true, + parts: [{ type: "text", text: "hello two" }], + }) + + yield* llm.text("world two") + + const second = yield* prompt.loop({ sessionID: session.id }) + expect(second.info.role).toBe("assistant") + expect(second.parts.some((part) => part.type === "text" && part.text === "world two")).toBe(true) + + expect(yield* llm.hits).toHaveLength(2) + expect(yield* llm.pending).toBe(0) + }), + { git: true, config: providerCfg }, + ), +) + +it.live("loop continues when finish is tool-calls", () => + provideTmpdirServer( + Effect.fnUntraced(function* ({ llm }) { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const session = yield* sessions.create({ + title: "Pinned", + permission: [{ permission: "*", pattern: "*", action: "allow" }], + }) + yield* prompt.prompt({ + sessionID: session.id, + agent: "build", + noReply: true, + parts: [{ type: "text", text: "hello" }], + }) + yield* llm.tool("first", { value: "first" }) + yield* llm.text("second") + + const result = yield* prompt.loop({ sessionID: session.id }) + expect(yield* llm.calls).toBe(2) + expect(result.info.role).toBe("assistant") + if (result.info.role === "assistant") { + expect(result.parts.some((part) => part.type === "text" && part.text === "second")).toBe(true) + expect(result.info.finish).toBe("stop") + } + }), + { git: true, config: providerCfg }, + ), +) + +it.live("glob tool keeps instance context during prompt runs", () => + provideTmpdirServer( + ({ dir, llm }) => + Effect.gen(function* () { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const session = yield* sessions.create({ + title: "Glob context", + permission: [{ permission: "*", pattern: "*", action: "allow" }], }) - }, - }) + const file = path.join(dir, "probe.txt") + yield* Effect.promise(() => Bun.write(file, "probe")) - try { - await using tmp = await tmpdir({ - git: true, - init: async (dir) => { - await Bun.write( - path.join(dir, "opencode.json"), - JSON.stringify({ - $schema: "https://opencode.ai/config.json", - enabled_providers: ["alibaba"], - provider: { - alibaba: { - options: { - apiKey: "test-key", - baseURL: `${server.url.origin}/v1`, - }, - }, - }, - agent: { - build: { - model: "alibaba/qwen-plus", - }, - }, - }), + yield* prompt.prompt({ + sessionID: session.id, + agent: "build", + noReply: true, + parts: [{ type: "text", text: "find text files" }], + }) + yield* llm.tool("glob", { pattern: "**/*.txt" }) + yield* llm.text("done") + + const result = yield* prompt.loop({ sessionID: session.id }) + expect(result.info.role).toBe("assistant") + + const msgs = yield* MessageV2.filterCompactedEffect(session.id) + const tool = msgs + .flatMap((msg) => msg.parts) + .find( + (part): part is CompletedToolPart => + part.type === "tool" && part.tool === "glob" && part.state.status === "completed", ) - }, + if (!tool) return + + expect(tool.state.output).toContain(file) + expect(tool.state.output).not.toContain("No context found for instance") + expect(result.parts.some((part) => part.type === "text" && part.text === "done")).toBe(true) + }), + { git: true, config: providerCfg }, + ), +) + +it.live("loop continues when finish is stop but assistant has tool parts", () => + provideTmpdirServer( + Effect.fnUntraced(function* ({ llm }) { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const session = yield* sessions.create({ + title: "Pinned", + permission: [{ permission: "*", pattern: "*", action: "allow" }], }) - - await Instance.provide({ - directory: tmp.path, - fn: () => - run( - Effect.gen(function* () { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const session = yield* sessions.create({ title: "Prompt regression" }) - const result = yield* prompt.prompt({ - sessionID: session.id, - agent: "build", - parts: [{ type: "text", text: "Where is SessionProcessor?" }], - }) - - expect(result.info.role).toBe("assistant") - expect(result.parts.some((part) => part.type === "text" && part.text.includes("processor.ts"))).toBe(true) - - const msgs = yield* sessions.messages({ sessionID: session.id }) - expect(msgs.filter((msg) => msg.info.role === "assistant")).toHaveLength(1) - expect(calls).toBe(1) - }), - ), + yield* prompt.prompt({ + sessionID: session.id, + agent: "build", + noReply: true, + parts: [{ type: "text", text: "hello" }], }) - } finally { - void server.stop(true) - } - }) + yield* llm.push(reply().tool("first", { value: "first" }).stop()) + yield* llm.text("second") - test("records aborted errors when prompt is cancelled mid-stream", async () => { - const ready = defer() - const server = Bun.serve({ - port: 0, - fetch(req) { - const url = new URL(req.url) - if (!url.pathname.endsWith("/chat/completions")) { - return new Response("not found", { status: 404 }) - } - return new Response( - hanging(() => ready.resolve()), - { - status: 200, - headers: { "Content-Type": "text/event-stream" }, + const result = yield* prompt.loop({ sessionID: session.id }) + expect(yield* llm.calls).toBe(2) + expect(result.info.role).toBe("assistant") + if (result.info.role === "assistant") { + expect(result.parts.some((part) => part.type === "text" && part.text === "second")).toBe(true) + expect(result.info.finish).toBe("stop") + } + }), + { git: true, config: providerCfg }, + ), +) + +it.live("failed subtask preserves metadata on error tool state", () => + provideTmpdirServer( + Effect.fnUntraced(function* ({ llm }) { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const chat = yield* sessions.create({ title: "Pinned" }) + yield* llm.tool("task", { + description: "inspect bug", + prompt: "look into the cache key path", + subagent_type: "general", + }) + yield* llm.text("done") + const msg = yield* user(chat.id, "hello") + yield* addSubtask(chat.id, msg.id) + + const result = yield* prompt.loop({ sessionID: chat.id }) + expect(result.info.role).toBe("assistant") + expect(yield* llm.calls).toBe(2) + + const msgs = yield* MessageV2.filterCompactedEffect(chat.id) + const taskMsg = msgs.find((item) => item.info.role === "assistant" && item.info.agent === "general") + expect(taskMsg?.info.role).toBe("assistant") + if (!taskMsg || taskMsg.info.role !== "assistant") return + + const tool = errorTool(taskMsg.parts) + if (!tool) return + + expect(tool.state.error).toContain("Tool execution failed") + expect(tool.state.metadata).toBeDefined() + expect(tool.state.metadata?.sessionId).toBeDefined() + expect(tool.state.metadata?.model).toEqual({ + providerID: ProviderID.make("test"), + modelID: ModelID.make("missing-model"), + }) + }), + { + git: true, + config: (url) => ({ + ...providerCfg(url), + agent: { + general: { + model: "test/missing-model", }, - ) - }, - }) - - try { - await using tmp = await tmpdir({ - git: true, - init: async (dir) => { - await Bun.write( - path.join(dir, "opencode.json"), - JSON.stringify({ - $schema: "https://opencode.ai/config.json", - enabled_providers: ["alibaba"], - provider: { - alibaba: { - options: { - apiKey: "test-key", - baseURL: `${server.url.origin}/v1`, - }, - }, - }, - agent: { - build: { - model: "alibaba/qwen-plus", - }, - }, - }), - ) }, + }), + }, + ), +) + +it.live( + "running subtask preserves metadata after tool-call transition", + () => + provideTmpdirServer( + Effect.fnUntraced(function* ({ llm }) { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const chat = yield* sessions.create({ title: "Pinned" }) + yield* llm.hang + const msg = yield* user(chat.id, "hello") + yield* addSubtask(chat.id, msg.id) + + const fiber = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) + + const tool = yield* Effect.promise(async () => { + const end = Date.now() + 5_000 + while (Date.now() < end) { + const msgs = await Effect.runPromise(MessageV2.filterCompactedEffect(chat.id)) + const taskMsg = msgs.find((item) => item.info.role === "assistant" && item.info.agent === "general") + const tool = taskMsg?.parts.find((part): part is MessageV2.ToolPart => part.type === "tool") + if (tool?.state.status === "running" && tool.state.metadata?.sessionId) return tool + await new Promise((done) => setTimeout(done, 20)) + } + throw new Error("timed out waiting for running subtask metadata") + }) + + if (tool.state.status !== "running") return + expect(typeof tool.state.metadata?.sessionId).toBe("string") + expect(tool.state.title).toBeDefined() + expect(tool.state.metadata?.model).toBeDefined() + + yield* prompt.cancel(chat.id) + yield* Fiber.await(fiber) + }), + { git: true, config: providerCfg }, + ), + 5_000, +) + +it.live( + "running task tool preserves metadata after tool-call transition", + () => + provideTmpdirServer( + Effect.fnUntraced(function* ({ llm }) { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const chat = yield* sessions.create({ + title: "Pinned", + permission: [{ permission: "*", pattern: "*", action: "allow" }], + }) + yield* llm.tool("task", { + description: "inspect bug", + prompt: "look into the cache key path", + subagent_type: "general", + }) + yield* llm.hang + yield* user(chat.id, "hello") + + const fiber = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) + + const tool = yield* Effect.promise(async () => { + const end = Date.now() + 5_000 + while (Date.now() < end) { + const msgs = await Effect.runPromise(MessageV2.filterCompactedEffect(chat.id)) + const assistant = msgs.findLast((item) => item.info.role === "assistant" && item.info.agent === "build") + const tool = assistant?.parts.find( + (part): part is MessageV2.ToolPart => part.type === "tool" && part.tool === "task", + ) + if (tool?.state.status === "running" && tool.state.metadata?.sessionId) return tool + await new Promise((done) => setTimeout(done, 20)) + } + throw new Error("timed out waiting for running task metadata") + }) + + if (tool.state.status !== "running") return + expect(typeof tool.state.metadata?.sessionId).toBe("string") + expect(tool.state.title).toBe("inspect bug") + expect(tool.state.metadata?.model).toBeDefined() + + yield* prompt.cancel(chat.id) + yield* Fiber.await(fiber) + }), + { git: true, config: providerCfg }, + ), + 10_000, +) + +it.live( + "loop sets status to busy then idle", + () => + provideTmpdirServer( + Effect.fnUntraced(function* ({ llm }) { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const status = yield* SessionStatus.Service + + yield* llm.hang + + const chat = yield* sessions.create({}) + yield* user(chat.id, "hi") + + const fiber = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) + yield* llm.wait(1) + expect((yield* status.get(chat.id)).type).toBe("busy") + yield* prompt.cancel(chat.id) + yield* Fiber.await(fiber) + expect((yield* status.get(chat.id)).type).toBe("idle") + }), + { git: true, config: providerCfg }, + ), + 3_000, +) + +// Cancel semantics + +it.live( + "cancel interrupts loop and resolves with an assistant message", + () => + provideTmpdirServer( + Effect.fnUntraced(function* ({ llm }) { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const chat = yield* sessions.create({ title: "Pinned" }) + yield* seed(chat.id) + + yield* llm.hang + + yield* user(chat.id, "more") + + const fiber = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) + yield* llm.wait(1) + yield* prompt.cancel(chat.id) + const exit = yield* Fiber.await(fiber) + expect(Exit.isSuccess(exit)).toBe(true) + if (Exit.isSuccess(exit)) { + expect(exit.value.info.role).toBe("assistant") + } + }), + { git: true, config: providerCfg }, + ), + 3_000, +) + +it.live( + "cancel records MessageAbortedError on interrupted process", + () => + provideTmpdirServer( + Effect.fnUntraced(function* ({ llm }) { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const chat = yield* sessions.create({ title: "Pinned" }) + yield* llm.hang + yield* user(chat.id, "hello") + + const fiber = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) + yield* llm.wait(1) + yield* prompt.cancel(chat.id) + const exit = yield* Fiber.await(fiber) + expect(Exit.isSuccess(exit)).toBe(true) + if (Exit.isSuccess(exit)) { + const info = exit.value.info + if (info.role === "assistant") { + expect(info.error?.name).toBe("MessageAbortedError") + } + } + }), + { git: true, config: providerCfg }, + ), + 3_000, +) + +it.live( + "cancel finalizes subtask tool state", + () => + provideTmpdirInstance( + () => + Effect.gen(function* () { + const ready = defer() + const aborted = defer() + const registry = yield* ToolRegistry.Service + const { task } = yield* registry.named() + const original = task.execute + task.execute = (_args, ctx) => + Effect.callback((_resume) => { + ready.resolve() + ctx.abort.addEventListener("abort", () => aborted.resolve(), { once: true }) + return Effect.sync(() => aborted.resolve()) + }) + yield* Effect.addFinalizer(() => Effect.sync(() => void (task.execute = original))) + + const { prompt, chat } = yield* boot() + const msg = yield* user(chat.id, "hello") + yield* addSubtask(chat.id, msg.id) + + const fiber = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) + yield* Effect.promise(() => ready.promise) + yield* prompt.cancel(chat.id) + yield* Effect.promise(() => aborted.promise) + + const exit = yield* Fiber.await(fiber) + expect(Exit.isSuccess(exit)).toBe(true) + + const msgs = yield* MessageV2.filterCompactedEffect(chat.id) + const taskMsg = msgs.find((item) => item.info.role === "assistant" && item.info.agent === "general") + expect(taskMsg?.info.role).toBe("assistant") + if (!taskMsg || taskMsg.info.role !== "assistant") return + + const tool = toolPart(taskMsg.parts) + expect(tool?.type).toBe("tool") + if (!tool) return + + expect(tool.state.status).not.toBe("running") + expect(taskMsg.info.time.completed).toBeDefined() + expect(taskMsg.info.finish).toBeDefined() + }), + { git: true, config: cfg }, + ), + 30_000, +) + +it.live( + "cancel with queued callers resolves all cleanly", + () => + provideTmpdirServer( + Effect.fnUntraced(function* ({ llm }) { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const chat = yield* sessions.create({ title: "Pinned" }) + yield* llm.hang + yield* user(chat.id, "hello") + + const a = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) + yield* llm.wait(1) + const b = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) + yield* Effect.sleep(50) + + yield* prompt.cancel(chat.id) + const [exitA, exitB] = yield* Effect.all([Fiber.await(a), Fiber.await(b)]) + expect(Exit.isSuccess(exitA)).toBe(true) + expect(Exit.isSuccess(exitB)).toBe(true) + if (Exit.isSuccess(exitA) && Exit.isSuccess(exitB)) { + expect(exitA.value.info.id).toBe(exitB.value.info.id) + } + }), + { git: true, config: providerCfg }, + ), + 3_000, +) + +// Queue semantics + +it.live("concurrent loop callers get same result", () => + provideTmpdirInstance( + (_dir) => + Effect.gen(function* () { + const { prompt, run, chat } = yield* boot() + yield* seed(chat.id, { finish: "stop" }) + + const [a, b] = yield* Effect.all([prompt.loop({ sessionID: chat.id }), prompt.loop({ sessionID: chat.id })], { + concurrency: "unbounded", + }) + + expect(a.info.id).toBe(b.info.id) + expect(a.info.role).toBe("assistant") + yield* run.assertNotBusy(chat.id) + }), + { git: true }, + ), +) + +it.live( + "concurrent loop callers all receive same error result", + () => + provideTmpdirServer( + Effect.fnUntraced(function* ({ llm }) { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const chat = yield* sessions.create({ title: "Pinned" }) + + yield* llm.fail("boom") + yield* user(chat.id, "hello") + + const [a, b] = yield* Effect.all([prompt.loop({ sessionID: chat.id }), prompt.loop({ sessionID: chat.id })], { + concurrency: "unbounded", + }) + expect(a.info.id).toBe(b.info.id) + expect(a.info.role).toBe("assistant") + }), + { git: true, config: providerCfg }, + ), + 3_000, +) + +it.live( + "prompt submitted during an active run is included in the next LLM input", + () => + provideTmpdirServer( + Effect.fnUntraced(function* ({ llm }) { + const gate = defer() + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const chat = yield* sessions.create({ title: "Pinned" }) + + yield* llm.hold("first", gate.promise) + yield* llm.text("second") + + const a = yield* prompt + .prompt({ + sessionID: chat.id, + agent: "build", + model: ref, + parts: [{ type: "text", text: "first" }], + }) + .pipe(Effect.forkChild) + + yield* llm.wait(1) + + const id = MessageID.ascending() + const b = yield* prompt + .prompt({ + sessionID: chat.id, + messageID: id, + agent: "build", + model: ref, + parts: [{ type: "text", text: "second" }], + }) + .pipe(Effect.forkChild) + + yield* Effect.promise(async () => { + const end = Date.now() + 5000 + while (Date.now() < end) { + const msgs = await Effect.runPromise(sessions.messages({ sessionID: chat.id })) + if (msgs.some((msg) => msg.info.role === "user" && msg.info.id === id)) return + await new Promise((done) => setTimeout(done, 20)) + } + throw new Error("timed out waiting for second prompt to save") + }) + + gate.resolve() + + const [ea, eb] = yield* Effect.all([Fiber.await(a), Fiber.await(b)]) + expect(Exit.isSuccess(ea)).toBe(true) + expect(Exit.isSuccess(eb)).toBe(true) + expect(yield* llm.calls).toBe(2) + + const msgs = yield* sessions.messages({ sessionID: chat.id }) + const assistants = msgs.filter((msg) => msg.info.role === "assistant") + expect(assistants).toHaveLength(2) + const last = assistants.at(-1) + if (!last || last.info.role !== "assistant") throw new Error("expected second assistant") + expect(last.info.parentID).toBe(id) + expect(last.parts.some((part) => part.type === "text" && part.text === "second")).toBe(true) + + const inputs = yield* llm.inputs + expect(inputs).toHaveLength(2) + expect(JSON.stringify(inputs.at(-1)?.messages)).toContain("second") + }), + { git: true, config: providerCfg }, + ), + 3_000, +) + +it.live( + "assertNotBusy throws BusyError when loop running", + () => + provideTmpdirServer( + Effect.fnUntraced(function* ({ llm }) { + const prompt = yield* SessionPrompt.Service + const run = yield* SessionRunState.Service + const sessions = yield* Session.Service + yield* llm.hang + + const chat = yield* sessions.create({}) + yield* user(chat.id, "hi") + + const fiber = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) + yield* llm.wait(1) + + const exit = yield* run.assertNotBusy(chat.id).pipe(Effect.exit) + expect(Exit.isFailure(exit)).toBe(true) + if (Exit.isFailure(exit)) { + expect(Cause.squash(exit.cause)).toBeInstanceOf(Session.BusyError) + } + + yield* prompt.cancel(chat.id) + yield* Fiber.await(fiber) + }), + { git: true, config: providerCfg }, + ), + 3_000, +) + +it.live("assertNotBusy succeeds when idle", () => + provideTmpdirInstance( + (_dir) => + Effect.gen(function* () { + const run = yield* SessionRunState.Service + const sessions = yield* Session.Service + + const chat = yield* sessions.create({}) + const exit = yield* run.assertNotBusy(chat.id).pipe(Effect.exit) + expect(Exit.isSuccess(exit)).toBe(true) + }), + { git: true }, + ), +) + +// Shell semantics + +it.live( + "shell rejects with BusyError when loop running", + () => + provideTmpdirServer( + Effect.fnUntraced(function* ({ llm }) { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const chat = yield* sessions.create({ title: "Pinned" }) + yield* llm.hang + yield* user(chat.id, "hi") + + const fiber = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) + yield* llm.wait(1) + + const exit = yield* prompt.shell({ sessionID: chat.id, agent: "build", command: "echo hi" }).pipe(Effect.exit) + expect(Exit.isFailure(exit)).toBe(true) + if (Exit.isFailure(exit)) { + expect(Cause.squash(exit.cause)).toBeInstanceOf(Session.BusyError) + } + + yield* prompt.cancel(chat.id) + yield* Fiber.await(fiber) + }), + { git: true, config: providerCfg }, + ), + 3_000, +) + +unix("shell captures stdout and stderr in completed tool output", () => + provideTmpdirInstance( + (_dir) => + Effect.gen(function* () { + const { prompt, run, chat } = yield* boot() + const result = yield* prompt.shell({ + sessionID: chat.id, + agent: "build", + command: "printf out && printf err >&2", + }) + + expect(result.info.role).toBe("assistant") + const tool = completedTool(result.parts) + if (!tool) return + + expect(tool.state.output).toContain("out") + expect(tool.state.output).toContain("err") + expect(tool.state.metadata.output).toContain("out") + expect(tool.state.metadata.output).toContain("err") + yield* run.assertNotBusy(chat.id) + }), + { git: true, config: cfg }, + ), +) + +unix("shell completes a fast command on the preferred shell", () => + provideTmpdirInstance( + (dir) => + Effect.gen(function* () { + const { prompt, run, chat } = yield* boot() + const result = yield* prompt.shell({ + sessionID: chat.id, + agent: "build", + command: "pwd", + }) + + expect(result.info.role).toBe("assistant") + const tool = completedTool(result.parts) + if (!tool) return + + expect(tool.state.input.command).toBe("pwd") + expect(tool.state.output).toContain(dir) + expect(tool.state.metadata.output).toContain(dir) + yield* run.assertNotBusy(chat.id) + }), + { git: true, config: cfg }, + ), +) + +unix("shell lists files from the project directory", () => + provideTmpdirInstance( + (dir) => + Effect.gen(function* () { + const { prompt, run, chat } = yield* boot() + yield* Effect.promise(() => Bun.write(path.join(dir, "README.md"), "# e2e\n")) + + const result = yield* prompt.shell({ + sessionID: chat.id, + agent: "build", + command: "command ls", + }) + + expect(result.info.role).toBe("assistant") + const tool = completedTool(result.parts) + if (!tool) return + + expect(tool.state.input.command).toBe("command ls") + expect(tool.state.output).toContain("README.md") + expect(tool.state.metadata.output).toContain("README.md") + yield* run.assertNotBusy(chat.id) + }), + { git: true, config: cfg }, + ), +) + +unix("shell captures stderr from a failing command", () => + provideTmpdirInstance( + (_dir) => + Effect.gen(function* () { + const { prompt, run, chat } = yield* boot() + const result = yield* prompt.shell({ + sessionID: chat.id, + agent: "build", + command: "command -v __nonexistent_cmd_e2e__ || echo 'not found' >&2; exit 1", + }) + + expect(result.info.role).toBe("assistant") + const tool = completedTool(result.parts) + if (!tool) return + + expect(tool.state.output).toContain("not found") + expect(tool.state.metadata.output).toContain("not found") + yield* run.assertNotBusy(chat.id) + }), + { git: true, config: cfg }, + ), +) + +unix( + "shell updates running metadata before process exit", + () => + withSh(() => + provideTmpdirInstance( + (_dir) => + Effect.gen(function* () { + const { prompt, chat } = yield* boot() + + const fiber = yield* prompt + .shell({ sessionID: chat.id, agent: "build", command: "printf first && sleep 0.2 && printf second" }) + .pipe(Effect.forkChild) + + yield* Effect.promise(async () => { + const start = Date.now() + while (Date.now() - start < 5000) { + const msgs = await MessageV2.filterCompacted(MessageV2.stream(chat.id)) + const taskMsg = msgs.find((item) => item.info.role === "assistant") + const tool = taskMsg ? toolPart(taskMsg.parts) : undefined + if (tool?.state.status === "running" && tool.state.metadata?.output.includes("first")) return + await new Promise((done) => setTimeout(done, 20)) + } + throw new Error("timed out waiting for running shell metadata") + }) + + const exit = yield* Fiber.await(fiber) + expect(Exit.isSuccess(exit)).toBe(true) + }), + { git: true, config: cfg }, + ), + ), + 30_000, +) + +it.live( + "loop waits while shell runs and starts after shell exits", + () => + provideTmpdirServer( + Effect.fnUntraced(function* ({ llm }) { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const chat = yield* sessions.create({ + title: "Pinned", + permission: [{ permission: "*", pattern: "*", action: "allow" }], + }) + yield* llm.text("after-shell") + + const sh = yield* prompt + .shell({ sessionID: chat.id, agent: "build", command: "sleep 0.2" }) + .pipe(Effect.forkChild) + yield* Effect.sleep(50) + + const loop = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) + yield* Effect.sleep(50) + + expect(yield* llm.calls).toBe(0) + + yield* Fiber.await(sh) + const exit = yield* Fiber.await(loop) + + expect(Exit.isSuccess(exit)).toBe(true) + if (Exit.isSuccess(exit)) { + expect(exit.value.info.role).toBe("assistant") + expect(exit.value.parts.some((part) => part.type === "text" && part.text === "after-shell")).toBe(true) + } + expect(yield* llm.calls).toBe(1) + }), + { git: true, config: providerCfg }, + ), + 3_000, +) + +it.live( + "shell completion resumes queued loop callers", + () => + provideTmpdirServer( + Effect.fnUntraced(function* ({ llm }) { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const chat = yield* sessions.create({ + title: "Pinned", + permission: [{ permission: "*", pattern: "*", action: "allow" }], + }) + yield* llm.text("done") + + const sh = yield* prompt + .shell({ sessionID: chat.id, agent: "build", command: "sleep 0.2" }) + .pipe(Effect.forkChild) + yield* Effect.sleep(50) + + const a = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) + const b = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) + yield* Effect.sleep(50) + + expect(yield* llm.calls).toBe(0) + + yield* Fiber.await(sh) + const [ea, eb] = yield* Effect.all([Fiber.await(a), Fiber.await(b)]) + + expect(Exit.isSuccess(ea)).toBe(true) + expect(Exit.isSuccess(eb)).toBe(true) + if (Exit.isSuccess(ea) && Exit.isSuccess(eb)) { + expect(ea.value.info.id).toBe(eb.value.info.id) + expect(ea.value.info.role).toBe("assistant") + } + expect(yield* llm.calls).toBe(1) + }), + { git: true, config: providerCfg }, + ), + 3_000, +) + +unix( + "cancel interrupts shell and resolves cleanly", + () => + withSh(() => + provideTmpdirInstance( + (_dir) => + Effect.gen(function* () { + const { prompt, run, chat } = yield* boot() + + const sh = yield* prompt + .shell({ sessionID: chat.id, agent: "build", command: "sleep 30" }) + .pipe(Effect.forkChild) + yield* Effect.sleep(50) + + yield* prompt.cancel(chat.id) + + const status = yield* SessionStatus.Service + expect((yield* status.get(chat.id)).type).toBe("idle") + const busy = yield* run.assertNotBusy(chat.id).pipe(Effect.exit) + expect(Exit.isSuccess(busy)).toBe(true) + + const exit = yield* Fiber.await(sh) + expect(Exit.isSuccess(exit)).toBe(true) + if (Exit.isSuccess(exit)) { + expect(exit.value.info.role).toBe("assistant") + const tool = completedTool(exit.value.parts) + if (tool) { + expect(tool.state.output).toContain("User aborted the command") + } + } + }), + { git: true, config: cfg }, + ), + ), + 30_000, +) + +unix( + "cancel persists aborted shell result when shell ignores TERM", + () => + withSh(() => + provideTmpdirInstance( + (_dir) => + Effect.gen(function* () { + const { prompt, chat } = yield* boot() + + const sh = yield* prompt + .shell({ sessionID: chat.id, agent: "build", command: "trap '' TERM; sleep 30" }) + .pipe(Effect.forkChild) + yield* Effect.sleep(50) + + yield* prompt.cancel(chat.id) + + const exit = yield* Fiber.await(sh) + expect(Exit.isSuccess(exit)).toBe(true) + if (Exit.isSuccess(exit)) { + expect(exit.value.info.role).toBe("assistant") + const tool = completedTool(exit.value.parts) + if (tool) { + expect(tool.state.output).toContain("User aborted the command") + } + } + }), + { git: true, config: cfg }, + ), + ), + 30_000, +) + +unix( + "cancel finalizes interrupted bash tool output through normal truncation", + () => + provideTmpdirServer( + ({ dir, llm }) => + Effect.gen(function* () { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const chat = yield* sessions.create({ + title: "Interrupted bash truncation", + permission: [{ permission: "*", pattern: "*", action: "allow" }], + }) + + yield* prompt.prompt({ + sessionID: chat.id, + agent: "build", + noReply: true, + parts: [{ type: "text", text: "run bash" }], + }) + + yield* llm.tool("bash", { + command: + 'i=0; while [ "$i" -lt 4000 ]; do printf "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx %05d\\n" "$i"; i=$((i + 1)); done; sleep 30', + description: "Print many lines", + timeout: 30_000, + workdir: path.resolve(dir), + }) + + const run = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) + yield* llm.wait(1) + yield* Effect.sleep(150) + yield* prompt.cancel(chat.id) + + const exit = yield* Fiber.await(run) + expect(Exit.isSuccess(exit)).toBe(true) + if (Exit.isFailure(exit)) return + + const tool = completedTool(exit.value.parts) + if (!tool) return + + expect(tool.state.metadata.truncated).toBe(true) + expect(typeof tool.state.metadata.outputPath).toBe("string") + expect(tool.state.output).toMatch(/\.\.\.output truncated\.\.\./) + expect(tool.state.output).toMatch(/Full output saved to:\s+\S+/) + expect(tool.state.output).not.toContain("Tool execution aborted") + }), + { git: true, config: providerCfg }, + ), + 30_000, +) + +unix( + "cancel interrupts loop queued behind shell", + () => + provideTmpdirInstance( + (_dir) => + Effect.gen(function* () { + const { prompt, chat } = yield* boot() + + const sh = yield* prompt + .shell({ sessionID: chat.id, agent: "build", command: "sleep 30" }) + .pipe(Effect.forkChild) + yield* Effect.sleep(50) + + const loop = yield* prompt.loop({ sessionID: chat.id }).pipe(Effect.forkChild) + yield* Effect.sleep(50) + + yield* prompt.cancel(chat.id) + + const exit = yield* Fiber.await(loop) + expect(Exit.isSuccess(exit)).toBe(true) + + yield* Fiber.await(sh) + }), + { git: true, config: cfg }, + ), + 30_000, +) + +unix( + "shell rejects when another shell is already running", + () => + withSh(() => + provideTmpdirInstance( + (_dir) => + Effect.gen(function* () { + const { prompt, chat } = yield* boot() + + const a = yield* prompt + .shell({ sessionID: chat.id, agent: "build", command: "sleep 30" }) + .pipe(Effect.forkChild) + yield* Effect.sleep(50) + + const exit = yield* prompt + .shell({ sessionID: chat.id, agent: "build", command: "echo hi" }) + .pipe(Effect.exit) + expect(Exit.isFailure(exit)).toBe(true) + if (Exit.isFailure(exit)) { + expect(Cause.squash(exit.cause)).toBeInstanceOf(Session.BusyError) + } + + yield* prompt.cancel(chat.id) + yield* Fiber.await(a) + }), + { git: true, config: cfg }, + ), + ), + 30_000, +) + +// Abort signal propagation tests for inline tool execution + +/** Override a tool's execute to hang until aborted. Returns ready/aborted defers and a finalizer. */ +function hangUntilAborted(tool: { execute: (...args: any[]) => any }) { + const ready = defer() + const aborted = defer() + const original = tool.execute + tool.execute = (_args: any, ctx: any) => { + ready.resolve() + ctx.abort.addEventListener("abort", () => aborted.resolve(), { once: true }) + return Effect.callback(() => {}) + } + const restore = Effect.addFinalizer(() => Effect.sync(() => void (tool.execute = original))) + return { ready, aborted, restore } +} + +it.live( + "interrupt propagates abort signal to read tool via file part (text/plain)", + () => + provideTmpdirInstance( + (dir) => + Effect.gen(function* () { + const registry = yield* ToolRegistry.Service + const { read } = yield* registry.named() + const { ready, aborted, restore } = hangUntilAborted(read) + yield* restore + + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const chat = yield* sessions.create({ title: "Abort Test" }) + + const testFile = path.join(dir, "test.txt") + yield* Effect.promise(() => Bun.write(testFile, "hello world")) + + const fiber = yield* prompt + .prompt({ + sessionID: chat.id, + agent: "build", + parts: [ + { type: "text", text: "read this" }, + { type: "file", url: `file://${testFile}`, filename: "test.txt", mime: "text/plain" }, + ], + }) + .pipe(Effect.forkChild) + + yield* Effect.promise(() => ready.promise) + yield* Fiber.interrupt(fiber) + + yield* Effect.promise(() => + Promise.race([ + aborted.promise, + new Promise((_, reject) => + setTimeout(() => reject(new Error("abort signal not propagated within 2s")), 2_000), + ), + ]), + ) + }), + { git: true, config: cfg }, + ), + 30_000, +) + +it.live( + "interrupt propagates abort signal to read tool via file part (directory)", + () => + provideTmpdirInstance( + (dir) => + Effect.gen(function* () { + const registry = yield* ToolRegistry.Service + const { read } = yield* registry.named() + const { ready, aborted, restore } = hangUntilAborted(read) + yield* restore + + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const chat = yield* sessions.create({ title: "Abort Test" }) + + const fiber = yield* prompt + .prompt({ + sessionID: chat.id, + agent: "build", + parts: [ + { type: "text", text: "read this" }, + { type: "file", url: `file://${dir}`, filename: "dir", mime: "application/x-directory" }, + ], + }) + .pipe(Effect.forkChild) + + yield* Effect.promise(() => ready.promise) + yield* Fiber.interrupt(fiber) + + yield* Effect.promise(() => + Promise.race([ + aborted.promise, + new Promise((_, reject) => + setTimeout(() => reject(new Error("abort signal not propagated within 2s")), 2_000), + ), + ]), + ) + }), + { git: true, config: cfg }, + ), + 30_000, +) + +// Missing file handling + +it.live("does not fail the prompt when a file part is missing", () => + provideTmpdirInstance( + (dir) => + Effect.gen(function* () { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const session = yield* sessions.create({}) + + const missing = path.join(dir, "does-not-exist.ts") + const msg = yield* prompt.prompt({ + sessionID: session.id, + agent: "build", + noReply: true, + parts: [ + { type: "text", text: "please review @does-not-exist.ts" }, + { + type: "file", + mime: "text/plain", + url: `file://${missing}`, + filename: "does-not-exist.ts", + }, + ], + }) + + if (msg.info.role !== "user") throw new Error("expected user message") + const hasFailure = msg.parts.some( + (part) => part.type === "text" && part.synthetic && part.text.includes("Read tool failed to read"), + ) + expect(hasFailure).toBe(true) + + yield* sessions.remove(session.id) + }), + { git: true, config: cfg }, + ), +) + +it.live("keeps stored part order stable when file resolution is async", () => + provideTmpdirInstance( + (dir) => + Effect.gen(function* () { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const session = yield* sessions.create({}) + + const missing = path.join(dir, "still-missing.ts") + const msg = yield* prompt.prompt({ + sessionID: session.id, + agent: "build", + noReply: true, + parts: [ + { + type: "file", + mime: "text/plain", + url: `file://${missing}`, + filename: "still-missing.ts", + }, + { type: "text", text: "after-file" }, + ], + }) + + if (msg.info.role !== "user") throw new Error("expected user message") + + const stored = MessageV2.get({ + sessionID: session.id, + messageID: msg.info.id, + }) + const text = stored.parts.filter((part) => part.type === "text").map((part) => part.text) + + expect(text[0]?.startsWith("Called the Read tool with the following input:")).toBe(true) + expect(text[1]?.includes("Read tool failed to read")).toBe(true) + expect(text[2]).toBe("after-file") + + yield* sessions.remove(session.id) + }), + { git: true, config: cfg }, + ), +) + +// Special characters in filenames + +it.live("handles filenames with # character", () => + provideTmpdirInstance( + (dir) => + Effect.gen(function* () { + yield* Effect.promise(() => Bun.write(path.join(dir, "file#name.txt"), "special content\n")) + + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const session = yield* sessions.create({}) + const parts = yield* prompt.resolvePromptParts("Read @file#name.txt") + const fileParts = parts.filter((part) => part.type === "file") + + expect(fileParts.length).toBe(1) + expect(fileParts[0].filename).toBe("file#name.txt") + expect(fileParts[0].url).toContain("%23") + + const decodedPath = fileURLToPath(fileParts[0].url) + expect(decodedPath).toBe(path.join(dir, "file#name.txt")) + + const message = yield* prompt.prompt({ + sessionID: session.id, + parts, + noReply: true, + }) + const stored = MessageV2.get({ sessionID: session.id, messageID: message.info.id }) + const textParts = stored.parts.filter((part) => part.type === "text") + const hasContent = textParts.some((part) => part.text.includes("special content")) + expect(hasContent).toBe(true) + + yield* sessions.remove(session.id) + }), + { git: true, config: cfg }, + ), +) + +// Regression: empty assistant turn loop + +it.live("does not loop empty assistant turns for a simple reply", () => + provideTmpdirServer( + Effect.fnUntraced(function* ({ llm }) { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const session = yield* sessions.create({ title: "Prompt regression" }) + + yield* llm.text("packages/opencode/src/session/processor.ts") + + const result = yield* prompt.prompt({ + sessionID: session.id, + agent: "build", + parts: [{ type: "text", text: "Where is SessionProcessor?" }], }) - await Instance.provide({ - directory: tmp.path, - fn: () => - run( - Effect.gen(function* () { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const session = yield* sessions.create({ title: "Prompt cancel regression" }) - const task = Effect.runPromise( - prompt.prompt({ - sessionID: session.id, - agent: "build", - parts: [{ type: "text", text: "Cancel me" }], - }), - ) + expect(result.info.role).toBe("assistant") + expect(result.parts.some((part) => part.type === "text" && part.text.includes("processor.ts"))).toBe(true) - yield* Effect.promise(() => ready.promise) - yield* prompt.cancel(session.id) + const msgs = yield* sessions.messages({ sessionID: session.id }) + expect(msgs.filter((msg) => msg.info.role === "assistant")).toHaveLength(1) + expect(yield* llm.calls).toBe(1) + }), + { git: true, config: providerCfg }, + ), +) - const result = yield* Effect.promise(() => - Promise.race([ - task, - new Promise((_, reject) => - setTimeout(() => reject(new Error("timed out waiting for cancel")), 1000), - ), - ]), - ) +it.live( + "records aborted errors when prompt is cancelled mid-stream", + () => + provideTmpdirServer( + Effect.fnUntraced(function* ({ llm }) { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const session = yield* sessions.create({ title: "Prompt cancel regression" }) - expect(result.info.role).toBe("assistant") - if (result.info.role === "assistant") { - expect(result.info.error?.name).toBe("MessageAbortedError") - } + yield* llm.hang - const msgs = yield* sessions.messages({ sessionID: session.id }) - const last = msgs.findLast((msg) => msg.info.role === "assistant") - expect(last?.info.role).toBe("assistant") - if (last?.info.role === "assistant") { - expect(last.info.error?.name).toBe("MessageAbortedError") - } - }), - ), - }) - } finally { - void server.stop(true) - } - }) -}) + const fiber = yield* prompt + .prompt({ + sessionID: session.id, + agent: "build", + parts: [{ type: "text", text: "Cancel me" }], + }) + .pipe(Effect.forkChild) -describe("session.prompt agent variant", () => { - test("applies agent variant only when using agent model", async () => { - const prev = process.env.OPENAI_API_KEY - process.env.OPENAI_API_KEY = "test-openai-key" + yield* llm.wait(1) + yield* prompt.cancel(session.id) - try { - await using tmp = await tmpdir({ - git: true, - config: { - agent: { - build: { - model: "openai/gpt-5.2", - variant: "xhigh", + const exit = yield* Fiber.await(fiber) + expect(Exit.isSuccess(exit)).toBe(true) + if (Exit.isSuccess(exit)) { + expect(exit.value.info.role).toBe("assistant") + if (exit.value.info.role === "assistant") { + expect(exit.value.info.error?.name).toBe("MessageAbortedError") + } + } + + const msgs = yield* sessions.messages({ sessionID: session.id }) + const last = msgs.findLast((msg) => msg.info.role === "assistant") + expect(last?.info.role).toBe("assistant") + if (last?.info.role === "assistant") { + expect(last.info.error?.name).toBe("MessageAbortedError") + } + }), + { git: true, config: providerCfg }, + ), + 3_000, +) + +// Agent variant + +it.live("applies agent variant only when using agent model", () => + provideTmpdirInstance( + (_dir) => + Effect.gen(function* () { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const session = yield* sessions.create({}) + + const other = yield* prompt.prompt({ + sessionID: session.id, + agent: "build", + model: { providerID: ProviderID.make("opencode"), modelID: ModelID.make("kimi-k2.5-free") }, + noReply: true, + parts: [{ type: "text", text: "hello" }], + }) + if (other.info.role !== "user") throw new Error("expected user message") + expect(other.info.model.variant).toBeUndefined() + + const match = yield* prompt.prompt({ + sessionID: session.id, + agent: "build", + noReply: true, + parts: [{ type: "text", text: "hello again" }], + }) + if (match.info.role !== "user") throw new Error("expected user message") + expect(match.info.model).toEqual({ + providerID: ProviderID.make("test"), + modelID: ModelID.make("test-model"), + variant: "xhigh", + }) + expect(match.info.model.variant).toBe("xhigh") + + const override = yield* prompt.prompt({ + sessionID: session.id, + agent: "build", + noReply: true, + variant: "high", + parts: [{ type: "text", text: "hello third" }], + }) + if (override.info.role !== "user") throw new Error("expected user message") + expect(override.info.model.variant).toBe("high") + + yield* sessions.remove(session.id) + }), + { + git: true, + config: { + ...cfg, + provider: { + ...cfg.provider, + test: { + ...cfg.provider.test, + models: { + "test-model": { + ...cfg.provider.test.models["test-model"], + variants: { xhigh: {}, high: {} }, + }, }, }, }, - }) + agent: { + build: { + model: "test/test-model", + variant: "xhigh", + }, + }, + }, + }, + ), +) - await Instance.provide({ - directory: tmp.path, - fn: () => - run( - Effect.gen(function* () { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const session = yield* sessions.create({}) +// Agent / command resolution errors - const other = yield* prompt.prompt({ - sessionID: session.id, - agent: "build", - model: { providerID: ProviderID.make("opencode"), modelID: ModelID.make("kimi-k2.5-free") }, - noReply: true, - parts: [{ type: "text", text: "hello" }], - }) - if (other.info.role !== "user") throw new Error("expected user message") - expect(other.info.model.variant).toBeUndefined() +it.live( + "unknown agent throws typed error", + () => + provideTmpdirInstance( + (_dir) => + Effect.gen(function* () { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const session = yield* sessions.create({}) + const exit = yield* prompt + .prompt({ + sessionID: session.id, + agent: "nonexistent-agent-xyz", + noReply: true, + parts: [{ type: "text", text: "hello" }], + }) + .pipe(Effect.exit) - const match = yield* prompt.prompt({ - sessionID: session.id, - agent: "build", - noReply: true, - parts: [{ type: "text", text: "hello again" }], - }) - if (match.info.role !== "user") throw new Error("expected user message") - expect(match.info.model).toEqual({ - providerID: ProviderID.make("openai"), - modelID: ModelID.make("gpt-5.2"), - variant: "xhigh", - }) - expect(match.info.model.variant).toBe("xhigh") - - const override = yield* prompt.prompt({ - sessionID: session.id, - agent: "build", - noReply: true, - variant: "high", - parts: [{ type: "text", text: "hello third" }], - }) - if (override.info.role !== "user") throw new Error("expected user message") - expect(override.info.model.variant).toBe("high") - - yield* sessions.remove(session.id) - }), - ), - }) - } finally { - if (prev === undefined) delete process.env.OPENAI_API_KEY - else process.env.OPENAI_API_KEY = prev - } - }) -}) - -describe("session.agent-resolution", () => { - test("unknown agent throws typed error", async () => { - await using tmp = await tmpdir({ git: true }) - await Instance.provide({ - directory: tmp.path, - fn: () => - run( - Effect.gen(function* () { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const session = yield* sessions.create({}) - const err = yield* Effect.promise(() => - Effect.runPromise( - prompt.prompt({ - sessionID: session.id, - agent: "nonexistent-agent-xyz", - noReply: true, - parts: [{ type: "text", text: "hello" }], - }), - ).then( - () => undefined, - (e) => e, - ), - ) - expect(err).toBeDefined() + expect(Exit.isFailure(exit)).toBe(true) + if (Exit.isFailure(exit)) { + const err = Cause.squash(exit.cause) expect(err).not.toBeInstanceOf(TypeError) expect(NamedError.Unknown.isInstance(err)).toBe(true) if (NamedError.Unknown.isInstance(err)) { expect(err.data.message).toContain('Agent not found: "nonexistent-agent-xyz"') } - }), - ), - }) - }, 30000) + } + }), + { git: true }, + ), + 30_000, +) - test("unknown agent error includes available agent names", async () => { - await using tmp = await tmpdir({ git: true }) - await Instance.provide({ - directory: tmp.path, - fn: () => - run( - Effect.gen(function* () { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const session = yield* sessions.create({}) - const err = yield* Effect.promise(() => - Effect.runPromise( - prompt.prompt({ - sessionID: session.id, - agent: "nonexistent-agent-xyz", - noReply: true, - parts: [{ type: "text", text: "hello" }], - }), - ).then( - () => undefined, - (e) => e, - ), - ) +it.live( + "unknown agent error includes available agent names", + () => + provideTmpdirInstance( + (_dir) => + Effect.gen(function* () { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const session = yield* sessions.create({}) + const exit = yield* prompt + .prompt({ + sessionID: session.id, + agent: "nonexistent-agent-xyz", + noReply: true, + parts: [{ type: "text", text: "hello" }], + }) + .pipe(Effect.exit) + + expect(Exit.isFailure(exit)).toBe(true) + if (Exit.isFailure(exit)) { + const err = Cause.squash(exit.cause) expect(NamedError.Unknown.isInstance(err)).toBe(true) if (NamedError.Unknown.isInstance(err)) { expect(err.data.message).toContain("build") } - }), - ), - }) - }, 30000) + } + }), + { git: true }, + ), + 30_000, +) - test("unknown command throws typed error with available names", async () => { - await using tmp = await tmpdir({ git: true }) - await Instance.provide({ - directory: tmp.path, - fn: () => - run( - Effect.gen(function* () { - const prompt = yield* SessionPrompt.Service - const sessions = yield* Session.Service - const session = yield* sessions.create({}) - const err = yield* Effect.promise(() => - Effect.runPromise( - prompt.command({ - sessionID: session.id, - command: "nonexistent-command-xyz", - arguments: "", - }), - ).then( - () => undefined, - (e) => e, - ), - ) - expect(err).toBeDefined() +it.live( + "unknown command throws typed error with available names", + () => + provideTmpdirInstance( + (_dir) => + Effect.gen(function* () { + const prompt = yield* SessionPrompt.Service + const sessions = yield* Session.Service + const session = yield* sessions.create({}) + const exit = yield* prompt + .command({ + sessionID: session.id, + command: "nonexistent-command-xyz", + arguments: "", + }) + .pipe(Effect.exit) + + expect(Exit.isFailure(exit)).toBe(true) + if (Exit.isFailure(exit)) { + const err = Cause.squash(exit.cause) expect(err).not.toBeInstanceOf(TypeError) expect(NamedError.Unknown.isInstance(err)).toBe(true) if (NamedError.Unknown.isInstance(err)) { expect(err.data.message).toContain('Command not found: "nonexistent-command-xyz"') expect(err.data.message).toContain("init") } - }), - ), - }) - }, 30000) -}) + } + }), + { git: true }, + ), + 30_000, +) From 96a534d8c639db794012281b5419e727df538e97 Mon Sep 17 00:00:00 2001 From: Kit Langton Date: Tue, 21 Apr 2026 12:05:23 -0400 Subject: [PATCH 23/73] feat(core): bridge GET /config through experimental HttpApi (#23712) --- packages/opencode/specs/effect/http-api.md | 16 +++++------ packages/opencode/src/config/agent.ts | 3 ++- packages/opencode/src/config/config.ts | 2 +- packages/opencode/src/config/mcp.ts | 27 ++++++++++--------- packages/opencode/src/config/provider.ts | 9 ++++--- packages/opencode/src/config/server.ts | 10 ++++--- .../server/routes/instance/httpapi/config.ts | 22 ++++++++++++--- .../src/server/routes/instance/index.ts | 1 + 8 files changed, 56 insertions(+), 34 deletions(-) diff --git a/packages/opencode/specs/effect/http-api.md b/packages/opencode/specs/effect/http-api.md index 93ef81a325..d882857ba1 100644 --- a/packages/opencode/specs/effect/http-api.md +++ b/packages/opencode/specs/effect/http-api.md @@ -224,7 +224,7 @@ When to use each: Promoting a previously-anonymous schema to Schema.Class is acceptable when it is top-level or endpoint-facing, but call it out in the PR — it is an additive SDK change (`export type Foo = ...` newly appears) even if it preserves the JSON shape. -Schemas that are **not** pure objects (enums, unions, records, tuples) cannot use Schema.Class. For those, add `.annotate({ identifier: "FooName" })` to get the same named-ref behavior: +Schemas that are **not** pure objects (enums, unions, records, tuples) cannot use Schema.Class. For those — and for pure-object schemas where handlers populate plain objects rather than class instances — add `.annotate({ identifier: "FooName" })` to get the same named-ref behavior without the `instanceof` requirement: ```ts export const Action = Schema.Literals(["ask", "allow", "deny"]).annotate({ identifier: "PermissionActionConfig" }) @@ -373,9 +373,9 @@ The first slice is successful if: - `Schema.Class` works well for route DTOs such as `Question.Request`, `Question.Info`, and `Question.Reply`. - scalar or collection schemas such as `Question.Answer` should stay as schemas and use helpers like `withStatics(...)` instead of being forced into classes. -- if an `HttpApi` success schema uses `Schema.Class`, the handler or underlying service needs to return real schema instances rather than plain objects. +- if an `HttpApi` success schema uses `Schema.Class`, the handler or underlying service needs to return real schema instances rather than plain objects. `Schema.Class`'s Declaration AST enforces `input instanceof self || input.[ClassTypeId]` during encode (see effect-smol `Schema.ts:10479-10484`). Plain objects from zod parse fail with `Expected Foo, got {...}`. This surfaced on `GET /config` where the service returns zod-parsed plain objects and `Config.InfoSchema` referenced `ConfigProvider.Info` (class). The fix was to convert pure-object classes to `Schema.Struct(...).annotate({ identifier: "..." })` — same named SDK `$ref`, no instance requirement. Verified byte-identical `types.gen.ts` vs `dev`. - internal event payloads can stay anonymous when we want to avoid adding extra named OpenAPI component churn for non-route shapes. -- `Schema.Class` emits named `$ref` in OpenAPI — only use it for types that already had `.meta({ ref })` in the old Zod schema. Inner/nested types should stay as `Schema.Struct` to avoid SDK shape changes. +- `Schema.Class` emits named `$ref` in OpenAPI — only use it for types that already had `.meta({ ref })` in the old Zod schema **and** when the handler/service returns real instances. For schemas that need a named `$ref` but are populated from plain objects, use `Schema.Struct(...).annotate({ identifier: "..." })` instead. Inner/nested types should stay as `Schema.Struct` to avoid SDK shape changes. ### Integration @@ -404,8 +404,7 @@ Current instance route inventory: - `provider` - `bridged` endpoints: `GET /provider`, `GET /provider/auth`, `POST /provider/:providerID/oauth/authorize`, `POST /provider/:providerID/oauth/callback` - `config` - `bridged` (partial) - bridged endpoint: `GET /config/providers` - later endpoint: `GET /config` + bridged endpoints: `GET /config`, `GET /config/providers` defer `PATCH /config` for now - `project` - `bridged` (partial) bridged endpoints: `GET /project`, `GET /project/current` @@ -431,9 +430,8 @@ Current instance route inventory: Recommended near-term sequence: 1. `workspace` read endpoints (`GET /experimental/workspace/adaptor`, `GET /experimental/workspace`, `GET /experimental/workspace/status`) -2. `config` full read endpoint (`GET /config`) -3. `file` JSON read endpoints -4. `mcp` JSON read endpoints +2. `file` JSON read endpoints +3. `mcp` JSON read endpoints ## Checklist @@ -449,8 +447,8 @@ Recommended near-term sequence: - [x] port remaining provider endpoints (`GET /provider`, OAuth mutations) - [x] port `config` providers read endpoint - [x] port `project` read endpoints (`GET /project`, `GET /project/current`) +- [x] port `GET /config` full read endpoint - [ ] port `workspace` read endpoints -- [ ] port `GET /config` full read endpoint - [ ] port `file` JSON read endpoints - [ ] decide when to remove the flag and make Effect routes the default diff --git a/packages/opencode/src/config/agent.ts b/packages/opencode/src/config/agent.ts index 85a214e122..9755c20375 100644 --- a/packages/opencode/src/config/agent.ts +++ b/packages/opencode/src/config/agent.ts @@ -101,7 +101,8 @@ const normalize = (agent: z.infer) => { } globalThis.Object.assign(permission, agent.permission) - return { ...agent, options, permission, steps: agent.steps ?? agent.maxSteps } + const steps = agent.steps ?? agent.maxSteps + return { ...agent, options, permission, ...(steps !== undefined ? { steps } : {}) } } export const Info = zod(AgentSchema).transform(normalize).meta({ ref: "AgentConfig" }) as unknown as z.ZodType< diff --git a/packages/opencode/src/config/config.ts b/packages/opencode/src/config/config.ts index cbe7cf7a2a..7fe337176a 100644 --- a/packages/opencode/src/config/config.ts +++ b/packages/opencode/src/config/config.ts @@ -91,7 +91,7 @@ const LogLevelRef = Schema.Any.annotate({ [ZodOverride]: Log.Level }) const PositiveInt = Schema.Number.check(Schema.isInt()).check(Schema.isGreaterThan(0)) const NonNegativeInt = Schema.Number.check(Schema.isInt()).check(Schema.isGreaterThanOrEqualTo(0)) -const InfoSchema = Schema.Struct({ +export const InfoSchema = Schema.Struct({ $schema: Schema.optional(Schema.String).annotate({ description: "JSON schema reference for configuration validation", }), diff --git a/packages/opencode/src/config/mcp.ts b/packages/opencode/src/config/mcp.ts index 8b77bc4c28..0887fa984a 100644 --- a/packages/opencode/src/config/mcp.ts +++ b/packages/opencode/src/config/mcp.ts @@ -2,7 +2,7 @@ import { Schema } from "effect" import { zod } from "@/util/effect-zod" import { withStatics } from "@/util/schema" -export class Local extends Schema.Class("McpLocalConfig")({ +export const Local = Schema.Struct({ type: Schema.Literal("local").annotate({ description: "Type of MCP server connection" }), command: Schema.mutable(Schema.Array(Schema.String)).annotate({ description: "Command and arguments to run the MCP server", @@ -16,11 +16,12 @@ export class Local extends Schema.Class("McpLocalConfig")({ timeout: Schema.optional(Schema.Number).annotate({ description: "Timeout in ms for MCP server requests. Defaults to 5000 (5 seconds) if not specified.", }), -}) { - static readonly zod = zod(this) -} +}) + .annotate({ identifier: "McpLocalConfig" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type Local = Schema.Schema.Type -export class OAuth extends Schema.Class("McpOAuthConfig")({ +export const OAuth = Schema.Struct({ clientId: Schema.optional(Schema.String).annotate({ description: "OAuth client ID. If not provided, dynamic client registration (RFC 7591) will be attempted.", }), @@ -31,11 +32,12 @@ export class OAuth extends Schema.Class("McpOAuthConfig")({ redirectUri: Schema.optional(Schema.String).annotate({ description: "OAuth redirect URI (default: http://127.0.0.1:19876/mcp/oauth/callback).", }), -}) { - static readonly zod = zod(this) -} +}) + .annotate({ identifier: "McpOAuthConfig" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type OAuth = Schema.Schema.Type -export class Remote extends Schema.Class("McpRemoteConfig")({ +export const Remote = Schema.Struct({ type: Schema.Literal("remote").annotate({ description: "Type of MCP server connection" }), url: Schema.String.annotate({ description: "URL of the remote MCP server" }), enabled: Schema.optional(Schema.Boolean).annotate({ @@ -50,9 +52,10 @@ export class Remote extends Schema.Class("McpRemoteConfig")({ timeout: Schema.optional(Schema.Number).annotate({ description: "Timeout in ms for MCP server requests. Defaults to 5000 (5 seconds) if not specified.", }), -}) { - static readonly zod = zod(this) -} +}) + .annotate({ identifier: "McpRemoteConfig" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type Remote = Schema.Schema.Type export const Info = Schema.Union([Local, Remote]) .annotate({ discriminator: "type" }) diff --git a/packages/opencode/src/config/provider.ts b/packages/opencode/src/config/provider.ts index 212e716251..bd6ae35996 100644 --- a/packages/opencode/src/config/provider.ts +++ b/packages/opencode/src/config/provider.ts @@ -70,7 +70,7 @@ export const Model = Schema.Struct({ ), }).pipe(withStatics((s) => ({ zod: zod(s) }))) -export class Info extends Schema.Class("ProviderConfig")({ +export const Info = Schema.Struct({ api: Schema.optional(Schema.String), name: Schema.optional(Schema.String), env: Schema.optional(Schema.mutable(Schema.Array(Schema.String))), @@ -107,8 +107,9 @@ export class Info extends Schema.Class("ProviderConfig")({ ), ), models: Schema.optional(Schema.Record(Schema.String, Model)), -}) { - static readonly zod = zod(this) -} +}) + .annotate({ identifier: "ProviderConfig" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type Info = Schema.Schema.Type export * as ConfigProvider from "./provider" diff --git a/packages/opencode/src/config/server.ts b/packages/opencode/src/config/server.ts index 969a79964b..3ce4fe6262 100644 --- a/packages/opencode/src/config/server.ts +++ b/packages/opencode/src/config/server.ts @@ -1,7 +1,8 @@ import { Schema } from "effect" import { zod } from "@/util/effect-zod" +import { withStatics } from "@/util/schema" -export class Server extends Schema.Class("ServerConfig")({ +export const Server = Schema.Struct({ port: Schema.optional(Schema.Number.check(Schema.isInt()).check(Schema.isGreaterThan(0))).annotate({ description: "Port to listen on", }), @@ -13,8 +14,9 @@ export class Server extends Schema.Class("ServerConfig")({ cors: Schema.optional(Schema.mutable(Schema.Array(Schema.String))).annotate({ description: "Additional domains to allow for CORS", }), -}) { - static readonly zod = zod(this) -} +}) + .annotate({ identifier: "ServerConfig" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type Server = Schema.Schema.Type export * as ConfigServer from "./server" diff --git a/packages/opencode/src/server/routes/instance/httpapi/config.ts b/packages/opencode/src/server/routes/instance/httpapi/config.ts index 14aa94f9fc..678e96e33f 100644 --- a/packages/opencode/src/server/routes/instance/httpapi/config.ts +++ b/packages/opencode/src/server/routes/instance/httpapi/config.ts @@ -9,6 +9,15 @@ export const ConfigApi = HttpApi.make("config") .add( HttpApiGroup.make("config") .add( + HttpApiEndpoint.get("get", root, { + success: Config.InfoSchema, + }).annotateMerge( + OpenApi.annotations({ + identifier: "config.get", + summary: "Get configuration", + description: "Retrieve the current OpenCode configuration settings and preferences.", + }), + ), HttpApiEndpoint.get("providers", `${root}/providers`, { success: Provider.ConfigProvidersResult, }).annotateMerge( @@ -36,16 +45,23 @@ export const ConfigApi = HttpApi.make("config") export const configHandlers = Layer.unwrap( Effect.gen(function* () { - const svc = yield* Provider.Service + const providerSvc = yield* Provider.Service + const configSvc = yield* Config.Service + + const get = Effect.fn("ConfigHttpApi.get")(function* () { + return yield* configSvc.get() + }) const providers = Effect.fn("ConfigHttpApi.providers")(function* () { - const providers = yield* svc.list() + const providers = yield* providerSvc.list() return { providers: Object.values(providers), default: Provider.defaultModelIDs(providers), } }) - return HttpApiBuilder.group(ConfigApi, "config", (handlers) => handlers.handle("providers", providers)) + return HttpApiBuilder.group(ConfigApi, "config", (handlers) => + handlers.handle("get", get).handle("providers", providers), + ) }), ).pipe(Layer.provide(Provider.defaultLayer), Layer.provide(Config.defaultLayer)) diff --git a/packages/opencode/src/server/routes/instance/index.ts b/packages/opencode/src/server/routes/instance/index.ts index 5cc51d27ab..0038c59619 100644 --- a/packages/opencode/src/server/routes/instance/index.ts +++ b/packages/opencode/src/server/routes/instance/index.ts @@ -40,6 +40,7 @@ export const InstanceRoutes = (upgrade: UpgradeWebSocket): Hono => { app.post("/question/:requestID/reject", (c) => handler(c.req.raw, context)) app.get("/permission", (c) => handler(c.req.raw, context)) app.post("/permission/:requestID/reply", (c) => handler(c.req.raw, context)) + app.get("/config", (c) => handler(c.req.raw, context)) app.get("/config/providers", (c) => handler(c.req.raw, context)) app.get("/provider", (c) => handler(c.req.raw, context)) app.get("/provider/auth", (c) => handler(c.req.raw, context)) From e95474df05d7054f905dc9294148e5e425f2e656 Mon Sep 17 00:00:00 2001 From: Aiden Cline <63023139+rekram1-node@users.noreply.github.com> Date: Tue, 21 Apr 2026 12:08:12 -0400 Subject: [PATCH 24/73] fix: revert parts of a824064c4 which caused system theme regression (#23714) --- bun.lock | 28 ++++++------- packages/opencode/package.json | 4 +- packages/opencode/src/cli/cmd/tui/app.tsx | 8 +++- .../opencode/src/cli/cmd/tui/util/terminal.ts | 39 +++++++++++++++++++ packages/plugin/package.json | 8 ++-- 5 files changed, 66 insertions(+), 21 deletions(-) diff --git a/bun.lock b/bun.lock index 0ba00b23f8..ff186b6750 100644 --- a/bun.lock +++ b/bun.lock @@ -367,8 +367,8 @@ "@opentelemetry/exporter-trace-otlp-http": "0.214.0", "@opentelemetry/sdk-trace-base": "2.6.1", "@opentelemetry/sdk-trace-node": "2.6.1", - "@opentui/core": "0.1.101", - "@opentui/solid": "0.1.101", + "@opentui/core": "0.1.99", + "@opentui/solid": "0.1.99", "@parcel/watcher": "2.5.1", "@pierre/diffs": "catalog:", "@solid-primitives/event-bus": "1.1.2", @@ -466,16 +466,16 @@ "zod": "catalog:", }, "devDependencies": { - "@opentui/core": "0.1.101", - "@opentui/solid": "0.1.101", + "@opentui/core": "0.1.99", + "@opentui/solid": "0.1.99", "@tsconfig/node22": "catalog:", "@types/node": "catalog:", "@typescript/native-preview": "catalog:", "typescript": "catalog:", }, "peerDependencies": { - "@opentui/core": ">=0.1.101", - "@opentui/solid": ">=0.1.101", + "@opentui/core": ">=0.1.99", + "@opentui/solid": ">=0.1.99", }, "optionalPeers": [ "@opentui/core", @@ -1604,21 +1604,21 @@ "@opentelemetry/semantic-conventions": ["@opentelemetry/semantic-conventions@1.40.0", "", {}, "sha512-cifvXDhcqMwwTlTK04GBNeIe7yyo28Mfby85QXFe1Yk8nmi36Ab/5UQwptOx84SsoGNRg+EVSjwzfSZMy6pmlw=="], - "@opentui/core": ["@opentui/core@0.1.101", "", { "dependencies": { "bun-ffi-structs": "0.1.2", "diff": "8.0.2", "jimp": "1.6.0", "marked": "17.0.1", "yoga-layout": "3.2.1" }, "optionalDependencies": { "@dimforge/rapier2d-simd-compat": "^0.17.3", "@opentui/core-darwin-arm64": "0.1.101", "@opentui/core-darwin-x64": "0.1.101", "@opentui/core-linux-arm64": "0.1.101", "@opentui/core-linux-x64": "0.1.101", "@opentui/core-win32-arm64": "0.1.101", "@opentui/core-win32-x64": "0.1.101", "bun-webgpu": "0.1.5", "planck": "^1.4.2", "three": "0.177.0" }, "peerDependencies": { "web-tree-sitter": "0.25.10" } }, "sha512-8jUhNKnwCDO3Y2iiEmagoQLjgX5l1WbddQiwky8B5JU4FW0/WRHairBmU1kRAQBmhdeg57dVinSG4iu2PAtKEA=="], + "@opentui/core": ["@opentui/core@0.1.99", "", { "dependencies": { "bun-ffi-structs": "0.1.2", "diff": "8.0.2", "jimp": "1.6.0", "marked": "17.0.1", "yoga-layout": "3.2.1" }, "optionalDependencies": { "@dimforge/rapier2d-simd-compat": "^0.17.3", "@opentui/core-darwin-arm64": "0.1.99", "@opentui/core-darwin-x64": "0.1.99", "@opentui/core-linux-arm64": "0.1.99", "@opentui/core-linux-x64": "0.1.99", "@opentui/core-win32-arm64": "0.1.99", "@opentui/core-win32-x64": "0.1.99", "bun-webgpu": "0.1.5", "planck": "^1.4.2", "three": "0.177.0" }, "peerDependencies": { "web-tree-sitter": "0.25.10" } }, "sha512-I3+AEgGzqNWIpWX9g2WOscSPwtQDNOm4KlBjxBWCZjLxkF07u77heWXF7OiAdhKLtNUW6TFiyt6yznqAZPdG3A=="], - "@opentui/core-darwin-arm64": ["@opentui/core-darwin-arm64@0.1.101", "", { "os": "darwin", "cpu": "arm64" }, "sha512-HtqZh8TIKCH1Nge5J0etBCpzYfPY4fVcq110uJm2As6D/dTTPv8r4J+KkrqoSphkpj/Y2b4t7KpqNHthXA0EVw=="], + "@opentui/core-darwin-arm64": ["@opentui/core-darwin-arm64@0.1.99", "", { "os": "darwin", "cpu": "arm64" }, "sha512-bzVrqeX2vb5iWrc/ftOUOqeUY8XO+qSgoTwj5TXHuwagavgwD3Hpeyjx8+icnTTeM4pao0som1WR9xfye6/X5Q=="], - "@opentui/core-darwin-x64": ["@opentui/core-darwin-x64@0.1.101", "", { "os": "darwin", "cpu": "x64" }, "sha512-o5ClQWnGG1inRE2YZAatPw1jPEAJni00amcoIfKBj8e1WS+fQA+iQTq1xFunNcyNPObLDCVuW1X+NrbK9xmPvQ=="], + "@opentui/core-darwin-x64": ["@opentui/core-darwin-x64@0.1.99", "", { "os": "darwin", "cpu": "x64" }, "sha512-VE4FrXBYpkxnvkqcCV1a8aN9jyyMJMihVW+V2NLCtp+4yQsj0AapG5TiUSN76XnmSZRptxDy5rBmEempeoIZbg=="], - "@opentui/core-linux-arm64": ["@opentui/core-linux-arm64@0.1.101", "", { "os": "linux", "cpu": "arm64" }, "sha512-E/weY7DQpaPWGYDPD0CROHowUotqnVlk7Kb6l9+iZCrxm9s7HPRHkcMDVmcWDqHEqa/J879EJcqaUDzDArqC+w=="], + "@opentui/core-linux-arm64": ["@opentui/core-linux-arm64@0.1.99", "", { "os": "linux", "cpu": "arm64" }, "sha512-viXQsbpS7yHjYkl7+am32JdvG96QU9lvHh1UiZtpOxcNUUqiYmA2ZwZFPD2Bi54jNyj5l2hjH6YkD3DzE2FEWA=="], - "@opentui/core-linux-x64": ["@opentui/core-linux-x64@0.1.101", "", { "os": "linux", "cpu": "x64" }, "sha512-+Bfr8jLbbR1WREUMCCvSZ44G1+WU2lPqJx7x1StTa9iFNEdicxCdd0QQsO6cnKn5yW+2Pr/FdrqHbxSQw3ejbA=="], + "@opentui/core-linux-x64": ["@opentui/core-linux-x64@0.1.99", "", { "os": "linux", "cpu": "x64" }, "sha512-WLoEFINOSp0tZSR9y4LUuGc7n4Y7H1wcpjUPzQ9vChkYDXrfZltEanzoDWbDcQ4kZQW5tHVC7LrZHpAsRLwFZg=="], - "@opentui/core-win32-arm64": ["@opentui/core-win32-arm64@0.1.101", "", { "os": "win32", "cpu": "arm64" }, "sha512-LTMIHJzJrVqS8mgpp+tuyVHuqYlicQTvFi/sTsJ6Xswf1asatsvZYsbQByhBLpFT80j10G7uvDa361S5gjCUDA=="], + "@opentui/core-win32-arm64": ["@opentui/core-win32-arm64@0.1.99", "", { "os": "win32", "cpu": "arm64" }, "sha512-yWMOLWCEO8HdrctU1dMkgZC8qGkiO4Dwr4/e11tTvVpRmYhDsP/IR89ZjEEtOwnKwFOFuB/MxvflqaEWVQ2g5Q=="], - "@opentui/core-win32-x64": ["@opentui/core-win32-x64@0.1.101", "", { "os": "win32", "cpu": "x64" }, "sha512-VaMs5bg6y0tYKptaEK8Hy5wTp4m//wJRKUdW8uvrS9cFgxyovZGuw0+TfK3NgbdeX+8jWm8LEAiak4jle5BABg=="], + "@opentui/core-win32-x64": ["@opentui/core-win32-x64@0.1.99", "", { "os": "win32", "cpu": "x64" }, "sha512-aYRlsL2w8YRL6vPd7/hrqlNVkXU3QowWb01TOvAcHS8UAsXaGFUr47kSDyjxDi1wg1MzmVduCfsC7T3NoThV1w=="], - "@opentui/solid": ["@opentui/solid@0.1.101", "", { "dependencies": { "@babel/core": "7.28.0", "@babel/preset-typescript": "7.27.1", "@opentui/core": "0.1.101", "babel-plugin-module-resolver": "5.0.2", "babel-preset-solid": "1.9.10", "entities": "7.0.1", "s-js": "^0.4.9" }, "peerDependencies": { "solid-js": "1.9.11" } }, "sha512-STY2FQYtVS2rhUgpslG6mM0EAkgobBDF91+B+SNmvXIkJwP+ydP6UVgcuIo5McIbb9GIbAODx5X2Q48PSR7hgw=="], + "@opentui/solid": ["@opentui/solid@0.1.99", "", { "dependencies": { "@babel/core": "7.28.0", "@babel/preset-typescript": "7.27.1", "@opentui/core": "0.1.99", "babel-plugin-module-resolver": "5.0.2", "babel-preset-solid": "1.9.10", "entities": "7.0.1", "s-js": "^0.4.9" }, "peerDependencies": { "solid-js": "1.9.11" } }, "sha512-DrqqO4h2V88FmeIP2cErYkMU0ZK5MrUsZw3w6IzZpoXyyiL4/9qpWzUq+CXx+r16VP2iGxDJwGKUmtFAzUch2Q=="], "@oslojs/asn1": ["@oslojs/asn1@1.0.0", "", { "dependencies": { "@oslojs/binary": "1.0.0" } }, "sha512-zw/wn0sj0j0QKbIXfIlnEcTviaCzYOY3V5rAyjR6YtOByFtJiT574+8p9Wlach0lZH9fddD4yb9laEAIl4vXQA=="], diff --git a/packages/opencode/package.json b/packages/opencode/package.json index 5d8fd4b540..199f4b2153 100644 --- a/packages/opencode/package.json +++ b/packages/opencode/package.json @@ -123,8 +123,8 @@ "@opentelemetry/exporter-trace-otlp-http": "0.214.0", "@opentelemetry/sdk-trace-base": "2.6.1", "@opentelemetry/sdk-trace-node": "2.6.1", - "@opentui/core": "0.1.101", - "@opentui/solid": "0.1.101", + "@opentui/core": "0.1.99", + "@opentui/solid": "0.1.99", "@parcel/watcher": "2.5.1", "@pierre/diffs": "catalog:", "@solid-primitives/event-bus": "1.1.2", diff --git a/packages/opencode/src/cli/cmd/tui/app.tsx b/packages/opencode/src/cli/cmd/tui/app.tsx index 5da2740cce..2b31d078cb 100644 --- a/packages/opencode/src/cli/cmd/tui/app.tsx +++ b/packages/opencode/src/cli/cmd/tui/app.tsx @@ -1,6 +1,7 @@ import { render, TimeToFirstDraw, useKeyboard, useRenderer, useTerminalDimensions } from "@opentui/solid" import * as Clipboard from "@tui/util/clipboard" import * as Selection from "@tui/util/selection" +import * as Terminal from "@tui/util/terminal" import { createCliRenderer, MouseButton, type CliRendererConfig } from "@opentui/core" import { RouteProvider, useRoute } from "@tui/context/route" import { @@ -119,6 +120,12 @@ export function tui(input: { const unguard = win32InstallCtrlCGuard() win32DisableProcessedInput() + const mode = await Terminal.getTerminalBackgroundColor() + + // Re-clear after getTerminalBackgroundColor() because setRawMode(false) + // restores the original console mode, including processed input on Windows. + win32DisableProcessedInput() + const onExit = async () => { unguard?.() resolve() @@ -129,7 +136,6 @@ export function tui(input: { } const renderer = await createCliRenderer(rendererConfig(input.config)) - const mode = (await renderer.waitForThemeMode(1000)) ?? "dark" await render(() => { return ( diff --git a/packages/opencode/src/cli/cmd/tui/util/terminal.ts b/packages/opencode/src/cli/cmd/tui/util/terminal.ts index c026b7381c..a61390f2cf 100644 --- a/packages/opencode/src/cli/cmd/tui/util/terminal.ts +++ b/packages/opencode/src/cli/cmd/tui/util/terminal.ts @@ -17,6 +17,12 @@ function parse(color: string): RGBA | null { return null } +function mode(background: RGBA | null): "dark" | "light" { + if (!background) return "dark" + const luminance = (0.299 * background.r + 0.587 * background.g + 0.114 * background.b) / 255 + return luminance > 0.5 ? "light" : "dark" +} + /** * Query terminal colors including background, foreground, and palette (0-15). * Uses OSC escape sequences to retrieve actual terminal color values. @@ -94,3 +100,36 @@ export async function colors(): Promise<{ }, 1000) }) } + +// Keep startup mode detection separate from `colors()`: the TUI boot path only +// needs OSC 11 and should resolve on the first background response instead of +// waiting on the full palette query used by system theme generation. +export async function getTerminalBackgroundColor(): Promise<"dark" | "light"> { + if (!process.stdin.isTTY) return "dark" + + return new Promise((resolve) => { + let timeout: NodeJS.Timeout + + const cleanup = () => { + process.stdin.setRawMode(false) + process.stdin.removeListener("data", handler) + clearTimeout(timeout) + } + + const handler = (data: Buffer) => { + const match = data.toString().match(/\x1b]11;([^\x07\x1b]+)/) + if (!match) return + cleanup() + resolve(mode(parse(match[1]))) + } + + process.stdin.setRawMode(true) + process.stdin.on("data", handler) + process.stdout.write("\x1b]11;?\x07") + + timeout = setTimeout(() => { + cleanup() + resolve("dark") + }, 1000) + }) +} diff --git a/packages/plugin/package.json b/packages/plugin/package.json index 110d6a0916..231d16e5e6 100644 --- a/packages/plugin/package.json +++ b/packages/plugin/package.json @@ -22,8 +22,8 @@ "zod": "catalog:" }, "peerDependencies": { - "@opentui/core": ">=0.1.101", - "@opentui/solid": ">=0.1.101" + "@opentui/core": ">=0.1.99", + "@opentui/solid": ">=0.1.99" }, "peerDependenciesMeta": { "@opentui/core": { @@ -34,8 +34,8 @@ } }, "devDependencies": { - "@opentui/core": "0.1.101", - "@opentui/solid": "0.1.101", + "@opentui/core": "0.1.99", + "@opentui/solid": "0.1.99", "@tsconfig/node22": "catalog:", "@types/node": "catalog:", "typescript": "catalog:", From 3205f122eb7a1c97c63ee18f7069ea2248e2b2b4 Mon Sep 17 00:00:00 2001 From: "opencode-agent[bot]" Date: Tue, 21 Apr 2026 16:57:27 +0000 Subject: [PATCH 25/73] chore: update nix node_modules hashes --- nix/hashes.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nix/hashes.json b/nix/hashes.json index e68adae5ba..21279a327d 100644 --- a/nix/hashes.json +++ b/nix/hashes.json @@ -1,8 +1,8 @@ { "nodeModules": { - "x86_64-linux": "sha256-DOGOZdPdkcuyDhVAyWHGsL4rrV28S+YFZj/VORuoQ8Q=", - "aarch64-linux": "sha256-WRnAaEoKvgFFZ+UkbYtD9gBw0HtV1jdUqv7yUE2uTAQ=", - "aarch64-darwin": "sha256-LxIj/dsL88M99T3WLaD9FL6Qdu2TV+kr1RMZaZ3i4WM=", - "x86_64-darwin": "sha256-PgIvplw6yz9KN5nBWox3BXZIXDbkJ3ZuDPKKSVF82MU=" + "x86_64-linux": "sha256-NczRp8MPppkqP8PQfWMUWJ/Wofvf2YVy5m4i22Pi3jg=", + "aarch64-linux": "sha256-QIxGOu8Fj+sWgc9hKvm1BLiIErxEtd17SPlwZGac9sQ=", + "aarch64-darwin": "sha256-Rb9qbMM+ARn0iBCaZurwcoUBCplbMXEZwrXVKextp3I=", + "x86_64-darwin": "sha256-KVxOKkaVV7W+K4reEk14MTLgmtoqwCYDqDNXNeS6ync=" } } From ecc06a3d8f7783d3759061c3404341b0cdc537ec Mon Sep 17 00:00:00 2001 From: Kit Langton Date: Tue, 21 Apr 2026 14:06:47 -0400 Subject: [PATCH 26/73] refactor(core): make Config.Info canonical Effect Schema (#23716) --- packages/opencode/script/schema.ts | 2 +- packages/opencode/src/config/config.ts | 35 ++++++++++++------- packages/opencode/src/server/routes/global.ts | 6 ++-- .../src/server/routes/instance/config.ts | 6 ++-- .../server/routes/instance/httpapi/config.ts | 2 +- packages/opencode/test/config/config.test.ts | 10 +++--- .../opencode/test/session/compaction.test.ts | 2 +- 7 files changed, 36 insertions(+), 27 deletions(-) diff --git a/packages/opencode/script/schema.ts b/packages/opencode/script/schema.ts index c0f302f21a..448760ae1a 100755 --- a/packages/opencode/script/schema.ts +++ b/packages/opencode/script/schema.ts @@ -55,7 +55,7 @@ const configFile = process.argv[2] const tuiFile = process.argv[3] console.log(configFile) -await Bun.write(configFile, JSON.stringify(generate(Config.Info), null, 2)) +await Bun.write(configFile, JSON.stringify(generate(Config.Info.zod), null, 2)) if (tuiFile) { console.log(tuiFile) diff --git a/packages/opencode/src/config/config.ts b/packages/opencode/src/config/config.ts index 7fe337176a..b4f4ace67e 100644 --- a/packages/opencode/src/config/config.ts +++ b/packages/opencode/src/config/config.ts @@ -25,6 +25,7 @@ import { Context, Duration, Effect, Exit, Fiber, Layer, Option, Schema } from "e import { EffectFlock } from "@opencode-ai/shared/util/effect-flock" import { InstanceRef } from "@/effect/instance-ref" import { zod, ZodOverride } from "@/util/effect-zod" +import { withStatics } from "@/util/schema" import { ConfigAgent } from "./agent" import { ConfigCommand } from "./command" import { ConfigFormatter } from "./formatter" @@ -91,7 +92,15 @@ const LogLevelRef = Schema.Any.annotate({ [ZodOverride]: Log.Level }) const PositiveInt = Schema.Number.check(Schema.isInt()).check(Schema.isGreaterThan(0)) const NonNegativeInt = Schema.Number.check(Schema.isInt()).check(Schema.isGreaterThanOrEqualTo(0)) -export const InfoSchema = Schema.Struct({ +// The Effect Schema is the canonical source of truth. The `.zod` compatibility +// surface is derived so existing Hono validators keep working without a parallel +// Zod definition. +// +// The walker emits `z.object({...})` which is non-strict by default. Config +// historically uses `.strict()` (additionalProperties: false in openapi.json), +// so layer that on after derivation. Re-apply the Config ref afterward +// since `.strict()` strips the walker's meta annotation. +export const Info = Schema.Struct({ $schema: Schema.optional(Schema.String).annotate({ description: "JSON schema reference for configuration validation", }), @@ -235,6 +244,14 @@ export const InfoSchema = Schema.Struct({ }), ), }) + .annotate({ identifier: "Config" }) + .pipe( + withStatics((s) => ({ + zod: (zod(s) as unknown as z.ZodObject) + .strict() + .meta({ ref: "Config" }) as unknown as z.ZodType>>, + })), + ) // Schema.Struct produces readonly types by default, but the service code // below mutates Info objects directly (e.g. `config.mode = ...`). Strip the @@ -256,15 +273,7 @@ type DeepMutable = T extends readonly [unknown, ...unknown[]] ? { -readonly [K in keyof T]: DeepMutable } : T -// The walker emits `z.object({...})` which is non-strict by default. Config -// historically uses `.strict()` (additionalProperties: false in openapi.json), -// so layer that on after derivation. Re-apply the Config ref afterward -// since `.strict()` strips the walker's meta annotation. -export const Info = (zod(InfoSchema) as unknown as z.ZodObject) - .strict() - .meta({ ref: "Config" }) as unknown as z.ZodType>> - -export type Info = z.output & { +export type Info = DeepMutable> & { // plugin_origins is derived state, not a persisted config field. It keeps each winning plugin spec together // with the file and scope it came from so later runtime code can make location-sensitive decisions. plugin_origins?: ConfigPlugin.Origin[] @@ -361,7 +370,7 @@ export const layer = Layer.effect( ), ) const parsed = ConfigParse.jsonc(expanded, source) - const data = ConfigParse.schema(Info, normalizeLoadedConfig(parsed, source), source) + const data = ConfigParse.schema(Info.zod, normalizeLoadedConfig(parsed, source), source) if (!("path" in options)) return data yield* Effect.promise(() => resolveLoadedPlugins(data, options.path)) @@ -753,13 +762,13 @@ export const layer = Layer.effect( let next: Info if (!file.endsWith(".jsonc")) { - const existing = ConfigParse.schema(Info, ConfigParse.jsonc(before, file), file) + const existing = ConfigParse.schema(Info.zod, ConfigParse.jsonc(before, file), file) const merged = mergeDeep(writable(existing), writable(config)) yield* fs.writeFileString(file, JSON.stringify(merged, null, 2)).pipe(Effect.orDie) next = merged } else { const updated = patchJsonc(before, writable(config)) - next = ConfigParse.schema(Info, ConfigParse.jsonc(updated, file), file) + next = ConfigParse.schema(Info.zod, ConfigParse.jsonc(updated, file), file) yield* fs.writeFileString(file, updated).pipe(Effect.orDie) } diff --git a/packages/opencode/src/server/routes/global.ts b/packages/opencode/src/server/routes/global.ts index 8208cf9669..54f9972e02 100644 --- a/packages/opencode/src/server/routes/global.ts +++ b/packages/opencode/src/server/routes/global.ts @@ -147,7 +147,7 @@ export const GlobalRoutes = lazy(() => description: "Get global config info", content: { "application/json": { - schema: resolver(Config.Info), + schema: resolver(Config.Info.zod), }, }, }, @@ -168,14 +168,14 @@ export const GlobalRoutes = lazy(() => description: "Successfully updated global config", content: { "application/json": { - schema: resolver(Config.Info), + schema: resolver(Config.Info.zod), }, }, }, ...errors(400), }, }), - validator("json", Config.Info), + validator("json", Config.Info.zod), async (c) => { const config = c.req.valid("json") const next = await AppRuntime.runPromise(Config.Service.use((cfg) => cfg.updateGlobal(config))) diff --git a/packages/opencode/src/server/routes/instance/config.ts b/packages/opencode/src/server/routes/instance/config.ts index 7f368cd31c..88e5feef9d 100644 --- a/packages/opencode/src/server/routes/instance/config.ts +++ b/packages/opencode/src/server/routes/instance/config.ts @@ -20,7 +20,7 @@ export const ConfigRoutes = lazy(() => description: "Get config info", content: { "application/json": { - schema: resolver(Config.Info), + schema: resolver(Config.Info.zod), }, }, }, @@ -43,14 +43,14 @@ export const ConfigRoutes = lazy(() => description: "Successfully updated config", content: { "application/json": { - schema: resolver(Config.Info), + schema: resolver(Config.Info.zod), }, }, }, ...errors(400), }, }), - validator("json", Config.Info), + validator("json", Config.Info.zod), async (c) => jsonRequest("ConfigRoutes.update", c, function* () { const config = c.req.valid("json") diff --git a/packages/opencode/src/server/routes/instance/httpapi/config.ts b/packages/opencode/src/server/routes/instance/httpapi/config.ts index 678e96e33f..2dfdec172a 100644 --- a/packages/opencode/src/server/routes/instance/httpapi/config.ts +++ b/packages/opencode/src/server/routes/instance/httpapi/config.ts @@ -10,7 +10,7 @@ export const ConfigApi = HttpApi.make("config") HttpApiGroup.make("config") .add( HttpApiEndpoint.get("get", root, { - success: Config.InfoSchema, + success: Config.Info, }).annotateMerge( OpenApi.annotations({ identifier: "config.get", diff --git a/packages/opencode/test/config/config.test.ts b/packages/opencode/test/config/config.test.ts index 3fafdadaa6..e9b0538193 100644 --- a/packages/opencode/test/config/config.test.ts +++ b/packages/opencode/test/config/config.test.ts @@ -2221,7 +2221,7 @@ describe("OPENCODE_CONFIG_CONTENT token substitution", () => { test("parseManagedPlist strips MDM metadata keys", async () => { const config = ConfigParse.schema( - Config.Info, + Config.Info.zod, ConfigParse.jsonc( await ConfigManaged.parseManagedPlist( JSON.stringify({ @@ -2249,7 +2249,7 @@ test("parseManagedPlist strips MDM metadata keys", async () => { test("parseManagedPlist parses server settings", async () => { const config = ConfigParse.schema( - Config.Info, + Config.Info.zod, ConfigParse.jsonc( await ConfigManaged.parseManagedPlist( JSON.stringify({ @@ -2269,7 +2269,7 @@ test("parseManagedPlist parses server settings", async () => { test("parseManagedPlist parses permission rules", async () => { const config = ConfigParse.schema( - Config.Info, + Config.Info.zod, ConfigParse.jsonc( await ConfigManaged.parseManagedPlist( JSON.stringify({ @@ -2299,7 +2299,7 @@ test("parseManagedPlist parses permission rules", async () => { test("parseManagedPlist parses enabled_providers", async () => { const config = ConfigParse.schema( - Config.Info, + Config.Info.zod, ConfigParse.jsonc( await ConfigManaged.parseManagedPlist( JSON.stringify({ @@ -2316,7 +2316,7 @@ test("parseManagedPlist parses enabled_providers", async () => { test("parseManagedPlist handles empty config", async () => { const config = ConfigParse.schema( - Config.Info, + Config.Info.zod, ConfigParse.jsonc( await ConfigManaged.parseManagedPlist(JSON.stringify({ $schema: "https://opencode.ai/config.json" })), "test:mobileconfig", diff --git a/packages/opencode/test/session/compaction.test.ts b/packages/opencode/test/session/compaction.test.ts index 14b47922b4..0e2b179f00 100644 --- a/packages/opencode/test/session/compaction.test.ts +++ b/packages/opencode/test/session/compaction.test.ts @@ -168,7 +168,7 @@ function layer(result: "continue" | "compact") { } function cfg(compaction?: Config.Info["compaction"]) { - const base = Config.Info.parse({}) + const base = Config.Info.zod.parse({}) return Layer.mock(Config.Service)({ get: () => Effect.succeed({ ...base, compaction }), }) From 1e1a500603de7891f2f71dcd79b56678288fce20 Mon Sep 17 00:00:00 2001 From: "opencode-agent[bot]" Date: Tue, 21 Apr 2026 18:08:04 +0000 Subject: [PATCH 27/73] chore: generate --- packages/opencode/src/config/config.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/opencode/src/config/config.ts b/packages/opencode/src/config/config.ts index b4f4ace67e..4af079127e 100644 --- a/packages/opencode/src/config/config.ts +++ b/packages/opencode/src/config/config.ts @@ -247,9 +247,9 @@ export const Info = Schema.Struct({ .annotate({ identifier: "Config" }) .pipe( withStatics((s) => ({ - zod: (zod(s) as unknown as z.ZodObject) - .strict() - .meta({ ref: "Config" }) as unknown as z.ZodType>>, + zod: (zod(s) as unknown as z.ZodObject).strict().meta({ ref: "Config" }) as unknown as z.ZodType< + DeepMutable> + >, })), ) From c9fb8d0ce70b632d262bcb594793e5496d16b61a Mon Sep 17 00:00:00 2001 From: opencode Date: Tue, 21 Apr 2026 19:07:36 +0000 Subject: [PATCH 28/73] sync release versions for v1.14.20 --- bun.lock | 32 +++++++++++++------------- packages/app/package.json | 2 +- packages/console/app/package.json | 2 +- packages/console/core/package.json | 2 +- packages/console/function/package.json | 2 +- packages/console/mail/package.json | 2 +- packages/desktop-electron/package.json | 2 +- packages/desktop/package.json | 2 +- packages/enterprise/package.json | 2 +- packages/extensions/zed/extension.toml | 12 +++++----- packages/function/package.json | 2 +- packages/opencode/package.json | 2 +- packages/plugin/package.json | 2 +- packages/sdk/js/package.json | 2 +- packages/shared/package.json | 2 +- packages/slack/package.json | 2 +- packages/ui/package.json | 2 +- packages/web/package.json | 2 +- sdks/vscode/package.json | 2 +- 19 files changed, 39 insertions(+), 39 deletions(-) diff --git a/bun.lock b/bun.lock index ff186b6750..77ab24240b 100644 --- a/bun.lock +++ b/bun.lock @@ -29,7 +29,7 @@ }, "packages/app": { "name": "@opencode-ai/app", - "version": "1.14.19", + "version": "1.14.20", "dependencies": { "@kobalte/core": "catalog:", "@opencode-ai/sdk": "workspace:*", @@ -83,7 +83,7 @@ }, "packages/console/app": { "name": "@opencode-ai/console-app", - "version": "1.14.19", + "version": "1.14.20", "dependencies": { "@cloudflare/vite-plugin": "1.15.2", "@ibm/plex": "6.4.1", @@ -117,7 +117,7 @@ }, "packages/console/core": { "name": "@opencode-ai/console-core", - "version": "1.14.19", + "version": "1.14.20", "dependencies": { "@aws-sdk/client-sts": "3.782.0", "@jsx-email/render": "1.1.1", @@ -144,7 +144,7 @@ }, "packages/console/function": { "name": "@opencode-ai/console-function", - "version": "1.14.19", + "version": "1.14.20", "dependencies": { "@ai-sdk/anthropic": "3.0.64", "@ai-sdk/openai": "3.0.48", @@ -168,7 +168,7 @@ }, "packages/console/mail": { "name": "@opencode-ai/console-mail", - "version": "1.14.19", + "version": "1.14.20", "dependencies": { "@jsx-email/all": "2.2.3", "@jsx-email/cli": "1.4.3", @@ -192,7 +192,7 @@ }, "packages/desktop": { "name": "@opencode-ai/desktop", - "version": "1.14.19", + "version": "1.14.20", "dependencies": { "@opencode-ai/app": "workspace:*", "@opencode-ai/ui": "workspace:*", @@ -225,7 +225,7 @@ }, "packages/desktop-electron": { "name": "@opencode-ai/desktop-electron", - "version": "1.14.19", + "version": "1.14.20", "dependencies": { "drizzle-orm": "catalog:", "effect": "catalog:", @@ -269,7 +269,7 @@ }, "packages/enterprise": { "name": "@opencode-ai/enterprise", - "version": "1.14.19", + "version": "1.14.20", "dependencies": { "@opencode-ai/shared": "workspace:*", "@opencode-ai/ui": "workspace:*", @@ -298,7 +298,7 @@ }, "packages/function": { "name": "@opencode-ai/function", - "version": "1.14.19", + "version": "1.14.20", "dependencies": { "@octokit/auth-app": "8.0.1", "@octokit/rest": "catalog:", @@ -314,7 +314,7 @@ }, "packages/opencode": { "name": "opencode", - "version": "1.14.19", + "version": "1.14.20", "bin": { "opencode": "./bin/opencode", }, @@ -459,7 +459,7 @@ }, "packages/plugin": { "name": "@opencode-ai/plugin", - "version": "1.14.19", + "version": "1.14.20", "dependencies": { "@opencode-ai/sdk": "workspace:*", "effect": "catalog:", @@ -494,7 +494,7 @@ }, "packages/sdk/js": { "name": "@opencode-ai/sdk", - "version": "1.14.19", + "version": "1.14.20", "dependencies": { "cross-spawn": "catalog:", }, @@ -509,7 +509,7 @@ }, "packages/shared": { "name": "@opencode-ai/shared", - "version": "1.14.19", + "version": "1.14.20", "bin": { "opencode": "./bin/opencode", }, @@ -533,7 +533,7 @@ }, "packages/slack": { "name": "@opencode-ai/slack", - "version": "1.14.19", + "version": "1.14.20", "dependencies": { "@opencode-ai/sdk": "workspace:*", "@slack/bolt": "^3.17.1", @@ -568,7 +568,7 @@ }, "packages/ui": { "name": "@opencode-ai/ui", - "version": "1.14.19", + "version": "1.14.20", "dependencies": { "@kobalte/core": "catalog:", "@opencode-ai/sdk": "workspace:*", @@ -617,7 +617,7 @@ }, "packages/web": { "name": "@opencode-ai/web", - "version": "1.14.19", + "version": "1.14.20", "dependencies": { "@astrojs/cloudflare": "12.6.3", "@astrojs/markdown-remark": "6.3.1", diff --git a/packages/app/package.json b/packages/app/package.json index 73a648cb6f..f461459fcb 100644 --- a/packages/app/package.json +++ b/packages/app/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/app", - "version": "1.14.19", + "version": "1.14.20", "description": "", "type": "module", "exports": { diff --git a/packages/console/app/package.json b/packages/console/app/package.json index 6f63db1526..d8c9434bfa 100644 --- a/packages/console/app/package.json +++ b/packages/console/app/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/console-app", - "version": "1.14.19", + "version": "1.14.20", "type": "module", "license": "MIT", "scripts": { diff --git a/packages/console/core/package.json b/packages/console/core/package.json index 3605cfb0ee..090e5c59d9 100644 --- a/packages/console/core/package.json +++ b/packages/console/core/package.json @@ -1,7 +1,7 @@ { "$schema": "https://json.schemastore.org/package.json", "name": "@opencode-ai/console-core", - "version": "1.14.19", + "version": "1.14.20", "private": true, "type": "module", "license": "MIT", diff --git a/packages/console/function/package.json b/packages/console/function/package.json index da73bc61fc..a2e0c5f03a 100644 --- a/packages/console/function/package.json +++ b/packages/console/function/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/console-function", - "version": "1.14.19", + "version": "1.14.20", "$schema": "https://json.schemastore.org/package.json", "private": true, "type": "module", diff --git a/packages/console/mail/package.json b/packages/console/mail/package.json index b66296670f..7aa0cb7574 100644 --- a/packages/console/mail/package.json +++ b/packages/console/mail/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/console-mail", - "version": "1.14.19", + "version": "1.14.20", "dependencies": { "@jsx-email/all": "2.2.3", "@jsx-email/cli": "1.4.3", diff --git a/packages/desktop-electron/package.json b/packages/desktop-electron/package.json index 7105cb50ef..155e43f5e9 100644 --- a/packages/desktop-electron/package.json +++ b/packages/desktop-electron/package.json @@ -1,7 +1,7 @@ { "name": "@opencode-ai/desktop-electron", "private": true, - "version": "1.14.19", + "version": "1.14.20", "type": "module", "license": "MIT", "homepage": "https://opencode.ai", diff --git a/packages/desktop/package.json b/packages/desktop/package.json index 7b60658f43..a3b6eac6b0 100644 --- a/packages/desktop/package.json +++ b/packages/desktop/package.json @@ -1,7 +1,7 @@ { "name": "@opencode-ai/desktop", "private": true, - "version": "1.14.19", + "version": "1.14.20", "type": "module", "license": "MIT", "scripts": { diff --git a/packages/enterprise/package.json b/packages/enterprise/package.json index 4783381f1f..0168636506 100644 --- a/packages/enterprise/package.json +++ b/packages/enterprise/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/enterprise", - "version": "1.14.19", + "version": "1.14.20", "private": true, "type": "module", "license": "MIT", diff --git a/packages/extensions/zed/extension.toml b/packages/extensions/zed/extension.toml index cf48deae11..cee69b11af 100644 --- a/packages/extensions/zed/extension.toml +++ b/packages/extensions/zed/extension.toml @@ -1,7 +1,7 @@ id = "opencode" name = "OpenCode" description = "The open source coding agent." -version = "1.14.19" +version = "1.14.20" schema_version = 1 authors = ["Anomaly"] repository = "https://github.com/anomalyco/opencode" @@ -11,26 +11,26 @@ name = "OpenCode" icon = "./icons/opencode.svg" [agent_servers.opencode.targets.darwin-aarch64] -archive = "https://github.com/anomalyco/opencode/releases/download/v1.14.19/opencode-darwin-arm64.zip" +archive = "https://github.com/anomalyco/opencode/releases/download/v1.14.20/opencode-darwin-arm64.zip" cmd = "./opencode" args = ["acp"] [agent_servers.opencode.targets.darwin-x86_64] -archive = "https://github.com/anomalyco/opencode/releases/download/v1.14.19/opencode-darwin-x64.zip" +archive = "https://github.com/anomalyco/opencode/releases/download/v1.14.20/opencode-darwin-x64.zip" cmd = "./opencode" args = ["acp"] [agent_servers.opencode.targets.linux-aarch64] -archive = "https://github.com/anomalyco/opencode/releases/download/v1.14.19/opencode-linux-arm64.tar.gz" +archive = "https://github.com/anomalyco/opencode/releases/download/v1.14.20/opencode-linux-arm64.tar.gz" cmd = "./opencode" args = ["acp"] [agent_servers.opencode.targets.linux-x86_64] -archive = "https://github.com/anomalyco/opencode/releases/download/v1.14.19/opencode-linux-x64.tar.gz" +archive = "https://github.com/anomalyco/opencode/releases/download/v1.14.20/opencode-linux-x64.tar.gz" cmd = "./opencode" args = ["acp"] [agent_servers.opencode.targets.windows-x86_64] -archive = "https://github.com/anomalyco/opencode/releases/download/v1.14.19/opencode-windows-x64.zip" +archive = "https://github.com/anomalyco/opencode/releases/download/v1.14.20/opencode-windows-x64.zip" cmd = "./opencode.exe" args = ["acp"] diff --git a/packages/function/package.json b/packages/function/package.json index f01d607e3e..4e920b6889 100644 --- a/packages/function/package.json +++ b/packages/function/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/function", - "version": "1.14.19", + "version": "1.14.20", "$schema": "https://json.schemastore.org/package.json", "private": true, "type": "module", diff --git a/packages/opencode/package.json b/packages/opencode/package.json index 199f4b2153..dcd0a9ad6c 100644 --- a/packages/opencode/package.json +++ b/packages/opencode/package.json @@ -1,6 +1,6 @@ { "$schema": "https://json.schemastore.org/package.json", - "version": "1.14.19", + "version": "1.14.20", "name": "opencode", "type": "module", "license": "MIT", diff --git a/packages/plugin/package.json b/packages/plugin/package.json index 231d16e5e6..64d12eb42d 100644 --- a/packages/plugin/package.json +++ b/packages/plugin/package.json @@ -1,7 +1,7 @@ { "$schema": "https://json.schemastore.org/package.json", "name": "@opencode-ai/plugin", - "version": "1.14.19", + "version": "1.14.20", "type": "module", "license": "MIT", "scripts": { diff --git a/packages/sdk/js/package.json b/packages/sdk/js/package.json index 6769ba391c..a6ba6ee9f4 100644 --- a/packages/sdk/js/package.json +++ b/packages/sdk/js/package.json @@ -1,7 +1,7 @@ { "$schema": "https://json.schemastore.org/package.json", "name": "@opencode-ai/sdk", - "version": "1.14.19", + "version": "1.14.20", "type": "module", "license": "MIT", "scripts": { diff --git a/packages/shared/package.json b/packages/shared/package.json index cb2b04ee50..22a8c9c071 100644 --- a/packages/shared/package.json +++ b/packages/shared/package.json @@ -1,6 +1,6 @@ { "$schema": "https://json.schemastore.org/package.json", - "version": "1.14.19", + "version": "1.14.20", "name": "@opencode-ai/shared", "type": "module", "license": "MIT", diff --git a/packages/slack/package.json b/packages/slack/package.json index 67dd7ef352..7a016907ae 100644 --- a/packages/slack/package.json +++ b/packages/slack/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/slack", - "version": "1.14.19", + "version": "1.14.20", "type": "module", "license": "MIT", "scripts": { diff --git a/packages/ui/package.json b/packages/ui/package.json index 9dccd909a8..dcf57345e8 100644 --- a/packages/ui/package.json +++ b/packages/ui/package.json @@ -1,6 +1,6 @@ { "name": "@opencode-ai/ui", - "version": "1.14.19", + "version": "1.14.20", "type": "module", "license": "MIT", "exports": { diff --git a/packages/web/package.json b/packages/web/package.json index d29cc6fc50..8fd33564d7 100644 --- a/packages/web/package.json +++ b/packages/web/package.json @@ -2,7 +2,7 @@ "name": "@opencode-ai/web", "type": "module", "license": "MIT", - "version": "1.14.19", + "version": "1.14.20", "scripts": { "dev": "astro dev", "dev:remote": "VITE_API_URL=https://api.opencode.ai astro dev", diff --git a/sdks/vscode/package.json b/sdks/vscode/package.json index bef2049874..720c70196d 100644 --- a/sdks/vscode/package.json +++ b/sdks/vscode/package.json @@ -2,7 +2,7 @@ "name": "opencode", "displayName": "opencode", "description": "opencode for VS Code", - "version": "1.14.19", + "version": "1.14.20", "publisher": "sst-dev", "repository": { "type": "git", From cd6415f332f53993d25f5371801cc46a123c6ef3 Mon Sep 17 00:00:00 2001 From: Rahul Iyer <4255590+rahuliyer95@users.noreply.github.com> Date: Tue, 21 Apr 2026 15:30:15 -0400 Subject: [PATCH 29/73] fix(tui): don't check for version upgrades if it's disabled by the user (#20089) Co-authored-by: Aiden Cline <63023139+rekram1-node@users.noreply.github.com> --- packages/opencode/src/cli/upgrade.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/opencode/src/cli/upgrade.ts b/packages/opencode/src/cli/upgrade.ts index 7c6f08874b..a3e3f3013d 100644 --- a/packages/opencode/src/cli/upgrade.ts +++ b/packages/opencode/src/cli/upgrade.ts @@ -7,6 +7,7 @@ import { InstallationVersion } from "@/installation/version" export async function upgrade() { const config = await AppRuntime.runPromise(Config.Service.use((cfg) => cfg.getGlobal())) + if (config.autoupdate === false || Flag.OPENCODE_DISABLE_AUTOUPDATE) return const method = await AppRuntime.runPromise(Installation.Service.use((svc) => svc.method())) const latest = await AppRuntime.runPromise(Installation.Service.use((svc) => svc.latest(method))).catch(() => {}) if (!latest) return @@ -17,7 +18,6 @@ export async function upgrade() { } if (InstallationVersion === latest) return - if (config.autoupdate === false || Flag.OPENCODE_DISABLE_AUTOUPDATE) return const kind = Installation.getReleaseType(InstallationVersion, latest) From 58232d896eb9f358780e224a2bf1b6540fd8bb5a Mon Sep 17 00:00:00 2001 From: Aiden Cline <63023139+rekram1-node@users.noreply.github.com> Date: Tue, 21 Apr 2026 15:33:35 -0400 Subject: [PATCH 30/73] fix: dont show variants for kimi models that dont support them (#23696) --- packages/opencode/src/provider/transform.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts index 4ed43ce994..863f012731 100644 --- a/packages/opencode/src/provider/transform.ts +++ b/packages/opencode/src/provider/transform.ts @@ -410,7 +410,7 @@ export function variants(model: Provider.Model): Record Date: Tue, 21 Apr 2026 20:17:13 +0000 Subject: [PATCH 31/73] Update VOUCHED list https://github.com/anomalyco/opencode/issues/23735#issuecomment-4291498854 --- .github/VOUCHED.td | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/VOUCHED.td b/.github/VOUCHED.td index 8bc1d8e2b8..65bda9804a 100644 --- a/.github/VOUCHED.td +++ b/.github/VOUCHED.td @@ -27,6 +27,7 @@ r44vc0rp rekram1-node -ricardo-m-l -robinmordasiewicz +rubdos shantur simonklee -spider-yamet clawdbot/llm psychosis, spam pinging the team From 1a20703469e46e4cc3682843d549b0f3235946d1 Mon Sep 17 00:00:00 2001 From: Ruben De Smet Date: Tue, 21 Apr 2026 22:45:06 +0200 Subject: [PATCH 32/73] feat: add Mistral Small reasoning variant support (issue #19479) (#23735) --- packages/opencode/src/provider/transform.ts | 10 ++++-- .../opencode/test/provider/transform.test.ts | 35 ++++++++++++++++++- 2 files changed, 42 insertions(+), 3 deletions(-) diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts index 863f012731..1d84c7c931 100644 --- a/packages/opencode/src/provider/transform.ts +++ b/packages/opencode/src/provider/transform.ts @@ -408,7 +408,6 @@ export function variants(model: Provider.Model): Record { expect(result).toEqual({}) }) - test("mistral returns empty object", () => { + test("mistral with reasoning returns variants", () => { + const model = createMockModel({ + id: "mistral/mistral-small-latest", + providerID: "mistral", + api: { + id: "mistral-small-latest", + url: "https://api.mistral.com", + npm: "@ai-sdk/mistral", + }, + capabilities: { reasoning: true }, + }) + const result = ProviderTransform.variants(model) + expect(result).toEqual({ + high: { reasoningEffort: "high" }, + }) + }) + + test("mistral without reasoning returns empty object", () => { const model = createMockModel({ id: "mistral/mistral-large", providerID: "mistral", @@ -2122,6 +2139,22 @@ describe("ProviderTransform.variants", () => { url: "https://api.mistral.com", npm: "@ai-sdk/mistral", }, + capabilities: { reasoning: false }, + }) + const result = ProviderTransform.variants(model) + expect(result).toEqual({}) + }) + + test("mistral large with reasoning returns empty object (only small supports reasoning)", () => { + const model = createMockModel({ + id: "mistral/mistral-large", + providerID: "mistral", + api: { + id: "mistral-large-latest", + url: "https://api.mistral.com", + npm: "@ai-sdk/mistral", + }, + capabilities: { reasoning: true }, }) const result = ProviderTransform.variants(model) expect(result).toEqual({}) From caaddf096424193ef3cba4bf2a8f9ae6876915bc Mon Sep 17 00:00:00 2001 From: Frank Date: Tue, 21 Apr 2026 17:06:29 -0400 Subject: [PATCH 33/73] zen: ling 2.6 free --- packages/web/src/content/docs/ar/zen.mdx | 8 ++++++-- packages/web/src/content/docs/bs/zen.mdx | 10 +++++++--- packages/web/src/content/docs/da/zen.mdx | 10 +++++++--- packages/web/src/content/docs/de/zen.mdx | 8 ++++++-- packages/web/src/content/docs/es/zen.mdx | 10 +++++++--- packages/web/src/content/docs/fr/zen.mdx | 8 ++++++-- packages/web/src/content/docs/it/zen.mdx | 10 +++++++--- packages/web/src/content/docs/ja/zen.mdx | 8 ++++++-- packages/web/src/content/docs/ko/zen.mdx | 8 ++++++-- packages/web/src/content/docs/nb/zen.mdx | 10 +++++++--- packages/web/src/content/docs/pl/zen.mdx | 10 +++++++--- packages/web/src/content/docs/pt-br/zen.mdx | 8 ++++++-- packages/web/src/content/docs/ru/zen.mdx | 10 +++++++--- packages/web/src/content/docs/th/zen.mdx | 8 ++++++-- packages/web/src/content/docs/tr/zen.mdx | 8 ++++++-- packages/web/src/content/docs/zen.mdx | 10 +++++++--- packages/web/src/content/docs/zh-cn/zen.mdx | 8 ++++++-- packages/web/src/content/docs/zh-tw/zen.mdx | 8 ++++++-- 18 files changed, 116 insertions(+), 44 deletions(-) diff --git a/packages/web/src/content/docs/ar/zen.mdx b/packages/web/src/content/docs/ar/zen.mdx index 6150c72951..c30cf3287f 100644 --- a/packages/web/src/content/docs/ar/zen.mdx +++ b/packages/web/src/content/docs/ar/zen.mdx @@ -95,9 +95,10 @@ OpenCode Zen هي بوابة AI تتيح لك الوصول إلى هذه الن | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Ling 2.6 Flash | ling-2.6-flash | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -يستخدم [معرّف النموذج](/docs/config/#models) في إعدادات OpenCode الصيغة `opencode/`. على سبيل المثال، بالنسبة إلى GPT 5.3 Codex، ستستخدم `opencode/gpt-5.3-codex` في إعداداتك. +يستخدم [معرّف النموذج](/docs/config/#models) في إعدادات OpenCode الصيغة `opencode/`. على سبيل المثال، بالنسبة إلى GPT 5.4، ستستخدم `opencode/gpt-5.4` في إعداداتك. --- @@ -118,8 +119,9 @@ https://opencode.ai/zen/v1/models | النموذج | الإدخال | الإخراج | القراءة المخزنة | الكتابة المخزنة | | --------------------------------- | ------- | ------- | --------------- | --------------- | | Big Pickle | Free | Free | Free | - | -| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| Ling 2.6 Flash Free | Free | Free | Free | - | +| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | @@ -167,6 +169,7 @@ https://opencode.ai/zen/v1/models النماذج المجانية: - MiniMax M2.5 Free متاح على OpenCode لفترة محدودة. يستخدم الفريق هذه الفترة لجمع الملاحظات وتحسين النموذج. +- Ling 2.6 Flash Free متاح على OpenCode لفترة محدودة. يستخدم الفريق هذه الفترة لجمع الملاحظات وتحسين النموذج. - Nemotron 3 Super Free متاح على OpenCode لفترة محدودة. يستخدم الفريق هذه الفترة لجمع الملاحظات وتحسين النموذج. - Big Pickle نموذج خفي ومتاح مجانا على OpenCode لفترة محدودة. يستخدم الفريق هذه الفترة لجمع الملاحظات وتحسين النموذج. @@ -210,6 +213,7 @@ https://opencode.ai/zen/v1/models - Big Pickle: خلال فترته المجانية، قد تُستخدم البيانات المجمعة لتحسين النموذج. - MiniMax M2.5 Free: خلال فترته المجانية، قد تُستخدم البيانات المجمعة لتحسين النموذج. +- Ling 2.6 Flash Free: خلال فترته المجانية، قد تُستخدم البيانات المجمعة لتحسين النموذج. - Nemotron 3 Super Free (نقاط نهاية NVIDIA المجانية): يُقدَّم بموجب [شروط خدمة النسخة التجريبية من واجهة NVIDIA API](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). للاستخدام التجريبي فقط، وليس للإنتاج أو البيانات الحساسة. تقوم NVIDIA بتسجيل المطالبات والمخرجات لتحسين نماذجها وخدماتها. لا ترسل بيانات شخصية أو سرية. - OpenAI APIs: يتم الاحتفاظ بالطلبات لمدة 30 يوما وفقا لـ [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data). - Anthropic APIs: يتم الاحتفاظ بالطلبات لمدة 30 يوما وفقا لـ [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage). diff --git a/packages/web/src/content/docs/bs/zen.mdx b/packages/web/src/content/docs/bs/zen.mdx index 70ac7641f4..b6fc90fa03 100644 --- a/packages/web/src/content/docs/bs/zen.mdx +++ b/packages/web/src/content/docs/bs/zen.mdx @@ -100,11 +100,12 @@ Našim modelima možete pristupiti i preko sljedećih API endpointa. | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Ling 2.6 Flash | ling-2.6-flash | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | [model id](/docs/config/#models) u vašoj OpenCode konfiguraciji koristi format -`opencode/`. Na primjer, za GPT 5.3 Codex u konfiguraciji biste -koristili `opencode/gpt-5.3-codex`. +`opencode/`. Na primjer, za GPT 5.4 u konfiguraciji biste +koristili `opencode/gpt-5.4`. --- @@ -125,8 +126,9 @@ Podržavamo pay-as-you-go model. Ispod su cijene **po 1M tokena**. | Model | Input | Output | Cached Read | Cached Write | | --------------------------------- | ------ | ------- | ----------- | ------------ | | Big Pickle | Free | Free | Free | - | -| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| Ling 2.6 Flash Free | Free | Free | Free | - | +| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | @@ -174,6 +176,7 @@ Naknade za kreditne kartice prosljeđujemo po stvarnom trošku (4.4% + $0.30 po Besplatni modeli: - MiniMax M2.5 Free je dostupan na OpenCode ograničeno vrijeme. Tim koristi ovo vrijeme da prikupi povratne informacije i poboljša model. +- Ling 2.6 Flash Free je dostupan na OpenCode ograničeno vrijeme. Tim koristi ovo vrijeme da prikupi povratne informacije i poboljša model. - Nemotron 3 Super Free je dostupan na OpenCode ograničeno vrijeme. Tim koristi ovo vrijeme da prikupi povratne informacije i poboljša model. - Big Pickle je stealth model koji je besplatan na OpenCode ograničeno vrijeme. Tim koristi ovo vrijeme da prikupi povratne informacije i poboljša model. @@ -222,6 +225,7 @@ i ne koriste vaše podatke za treniranje modela, uz sljedeće izuzetke: - Big Pickle: Tokom besplatnog perioda, prikupljeni podaci mogu se koristiti za poboljšanje modela. - MiniMax M2.5 Free: Tokom besplatnog perioda, prikupljeni podaci mogu se koristiti za poboljšanje modela. +- Ling 2.6 Flash Free: Tokom besplatnog perioda, prikupljeni podaci mogu se koristiti za poboljšanje modela. - Nemotron 3 Super Free (besplatni NVIDIA endpointi): Dostupan je prema [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Samo za probnu upotrebu, nije za produkciju niti osjetljive podatke. NVIDIA bilježi promptove i izlaze radi poboljšanja svojih modela i usluga. Nemojte slati lične ili povjerljive podatke. - OpenAI APIs: Requests are retained for 30 days in accordance with [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data). - Anthropic APIs: Requests are retained for 30 days in accordance with [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage). diff --git a/packages/web/src/content/docs/da/zen.mdx b/packages/web/src/content/docs/da/zen.mdx index c497f35b7b..ea5189ec21 100644 --- a/packages/web/src/content/docs/da/zen.mdx +++ b/packages/web/src/content/docs/da/zen.mdx @@ -100,11 +100,12 @@ Du kan også få adgang til vores modeller gennem følgende API-endpoints. | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Ling 2.6 Flash | ling-2.6-flash | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | [model id](/docs/config/#models) i din OpenCode-konfiguration -bruger formatet `opencode/`. For eksempel ville du for GPT 5.3 Codex -bruge `opencode/gpt-5.3-codex` i din konfiguration. +bruger formatet `opencode/`. For eksempel ville du for GPT 5.4 +bruge `opencode/gpt-5.4` i din konfiguration. --- @@ -125,8 +126,9 @@ Vi understøtter en pay-as-you-go-model. Nedenfor er priserne **pr. 1M tokens**. | Model | Input | Output | Cached Read | Cached Write | | --------------------------------- | ------ | ------- | ----------- | ------------ | | Big Pickle | Free | Free | Free | - | -| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| Ling 2.6 Flash Free | Free | Free | Free | - | +| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | @@ -174,6 +176,7 @@ Kreditkortgebyrer videregives til kostpris (4.4% + $0.30 pr. transaktion); vi op De gratis modeller: - MiniMax M2.5 Free er tilgængelig på OpenCode i en begrænset periode. Teamet bruger denne tid til at indsamle feedback og forbedre modellen. +- Ling 2.6 Flash Free er tilgængelig på OpenCode i en begrænset periode. Teamet bruger denne tid til at indsamle feedback og forbedre modellen. - Nemotron 3 Super Free er tilgængelig på OpenCode i en begrænset periode. Teamet bruger denne tid til at indsamle feedback og forbedre modellen. - Big Pickle er en stealth-model, som er gratis på OpenCode i en begrænset periode. Teamet bruger denne tid til at indsamle feedback og forbedre modellen. @@ -220,6 +223,7 @@ Alle vores modeller hostes i US. Vores udbydere følger en nul-opbevaringspoliti - Big Pickle: I den gratis periode kan indsamlede data blive brugt til at forbedre modellen. - MiniMax M2.5 Free: I den gratis periode kan indsamlede data blive brugt til at forbedre modellen. +- Ling 2.6 Flash Free: I den gratis periode kan indsamlede data blive brugt til at forbedre modellen. - Nemotron 3 Super Free (gratis NVIDIA-endpoints): Leveres under [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Kun til prøvebrug, ikke til produktion eller følsomme data. Prompts og outputs logges af NVIDIA for at forbedre deres modeller og tjenester. Indsend ikke personlige eller fortrolige data. - OpenAI APIs: Anmodninger opbevares i 30 dage i overensstemmelse med [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data). - Anthropic APIs: Anmodninger opbevares i 30 dage i overensstemmelse med [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage). diff --git a/packages/web/src/content/docs/de/zen.mdx b/packages/web/src/content/docs/de/zen.mdx index 0dfc728501..b8a09497c1 100644 --- a/packages/web/src/content/docs/de/zen.mdx +++ b/packages/web/src/content/docs/de/zen.mdx @@ -91,9 +91,10 @@ Du kannst auch über die folgenden API-Endpunkte auf unsere Modelle zugreifen. | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Ling 2.6 Flash | ling-2.6-flash | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -Die [Model-ID](/docs/config/#models) in deiner OpenCode-Konfiguration verwendet das Format `opencode/`. Für GPT 5.3 Codex würdest du zum Beispiel `opencode/gpt-5.3-codex` in deiner Konfiguration verwenden. +Die [Model-ID](/docs/config/#models) in deiner OpenCode-Konfiguration verwendet das Format `opencode/`. Für GPT 5.4 würdest du zum Beispiel `opencode/gpt-5.4` in deiner Konfiguration verwenden. --- @@ -114,8 +115,9 @@ Wir unterstützen ein Pay-as-you-go-Modell. Unten findest du die Preise **pro 1M | Model | Input | Output | Cached Read | Cached Write | | --------------------------------- | ------ | ------- | ----------- | ------------ | | Big Pickle | Free | Free | Free | - | -| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| Ling 2.6 Flash Free | Free | Free | Free | - | +| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | @@ -163,6 +165,7 @@ Kreditkartengebühren werden zum Selbstkostenpreis weitergegeben (4.4% + $0.30 p Die kostenlosen Modelle: - MiniMax M2.5 Free ist für begrenzte Zeit auf OpenCode verfügbar. Das Team nutzt diese Zeit, um Feedback zu sammeln und das Modell zu verbessern. +- Ling 2.6 Flash Free ist für begrenzte Zeit auf OpenCode verfügbar. Das Team nutzt diese Zeit, um Feedback zu sammeln und das Modell zu verbessern. - Nemotron 3 Super Free ist für begrenzte Zeit auf OpenCode verfügbar. Das Team nutzt diese Zeit, um Feedback zu sammeln und das Modell zu verbessern. - Big Pickle ist ein Stealth-Modell, das für begrenzte Zeit kostenlos auf OpenCode verfügbar ist. Das Team nutzt diese Zeit, um Feedback zu sammeln und das Modell zu verbessern. @@ -206,6 +209,7 @@ Alle unsere Modelle werden in den USA gehostet. Unsere Provider folgen einer Zer - Big Pickle: Während des kostenlosen Zeitraums können gesammelte Daten zur Verbesserung des Modells verwendet werden. - MiniMax M2.5 Free: Während des kostenlosen Zeitraums können gesammelte Daten zur Verbesserung des Modells verwendet werden. +- Ling 2.6 Flash Free: Während des kostenlosen Zeitraums können gesammelte Daten zur Verbesserung des Modells verwendet werden. - Nemotron 3 Super Free (kostenlose NVIDIA-Endpunkte): Bereitgestellt gemäß den [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Nur für Testzwecke, nicht für Produktion oder sensible Daten. Eingaben und Ausgaben werden von NVIDIA protokolliert, um seine Modelle und Dienste zu verbessern. Übermitteln Sie keine personenbezogenen oder vertraulichen Daten. - OpenAI APIs: Anfragen werden in Übereinstimmung mit [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data) 30 Tage lang gespeichert. - Anthropic APIs: Anfragen werden in Übereinstimmung mit [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage) 30 Tage lang gespeichert. diff --git a/packages/web/src/content/docs/es/zen.mdx b/packages/web/src/content/docs/es/zen.mdx index 4a2866aa79..b8999a050f 100644 --- a/packages/web/src/content/docs/es/zen.mdx +++ b/packages/web/src/content/docs/es/zen.mdx @@ -100,11 +100,12 @@ También puedes acceder a nuestros modelos a través de los siguientes endpoints | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Ling 2.6 Flash | ling-2.6-flash | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | El [identificador del modelo](/docs/config/#models) en tu configuración de OpenCode -usa el formato `opencode/`. Por ejemplo, para GPT 5.3 Codex, usarías -`opencode/gpt-5.3-codex` en tu configuración. +usa el formato `opencode/`. Por ejemplo, para GPT 5.4, usarías +`opencode/gpt-5.4` en tu configuración. --- @@ -125,8 +126,9 @@ Admitimos un modelo de pago por uso. A continuación se muestran los precios **p | Modelo | Entrada | Salida | Lectura en caché | Escritura en caché | | --------------------------------- | ------- | ------- | ---------------- | ------------------ | | Big Pickle | Free | Free | Free | - | -| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| Ling 2.6 Flash Free | Free | Free | Free | - | +| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | @@ -174,6 +176,7 @@ Las comisiones de tarjeta de crédito se trasladan al costo (4.4% + $0.30 por tr Los modelos gratuitos: - MiniMax M2.5 Free está disponible en OpenCode por tiempo limitado. El equipo está usando este tiempo para recopilar comentarios y mejorar el modelo. +- Ling 2.6 Flash Free está disponible en OpenCode por tiempo limitado. El equipo está usando este tiempo para recopilar comentarios y mejorar el modelo. - Nemotron 3 Super Free está disponible en OpenCode por tiempo limitado. El equipo está usando este tiempo para recopilar comentarios y mejorar el modelo. - Big Pickle es un modelo stealth que es gratuito en OpenCode por tiempo limitado. El equipo está usando este tiempo para recopilar comentarios y mejorar el modelo. @@ -220,6 +223,7 @@ Todos nuestros modelos están alojados en US. Nuestros proveedores siguen una po - Big Pickle: Durante su período gratuito, los datos recopilados pueden usarse para mejorar el modelo. - MiniMax M2.5 Free: Durante su período gratuito, los datos recopilados pueden usarse para mejorar el modelo. +- Ling 2.6 Flash Free: Durante su período gratuito, los datos recopilados pueden usarse para mejorar el modelo. - Nemotron 3 Super Free (endpoints gratuitos de NVIDIA): Se ofrece bajo los [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Solo para uso de prueba, no para producción ni datos sensibles. NVIDIA registra los prompts y las salidas para mejorar sus modelos y servicios. No envíes datos personales ni confidenciales. - OpenAI APIs: Las solicitudes se conservan durante 30 días de acuerdo con [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data). - Anthropic APIs: Las solicitudes se conservan durante 30 días de acuerdo con [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage). diff --git a/packages/web/src/content/docs/fr/zen.mdx b/packages/web/src/content/docs/fr/zen.mdx index 247ae00d2d..ab759f053d 100644 --- a/packages/web/src/content/docs/fr/zen.mdx +++ b/packages/web/src/content/docs/fr/zen.mdx @@ -91,9 +91,10 @@ Vous pouvez également accéder à nos modèles via les points de terminaison AP | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Ling 2.6 Flash | ling-2.6-flash | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -Le [model id](/docs/config/#models) dans votre configuration OpenCode utilise le format `opencode/`. Par exemple, pour GPT 5.3 Codex, vous utiliseriez `opencode/gpt-5.3-codex` dans votre configuration. +Le [model id](/docs/config/#models) dans votre configuration OpenCode utilise le format `opencode/`. Par exemple, pour GPT 5.4, vous utiliseriez `opencode/gpt-5.4` dans votre configuration. --- @@ -114,8 +115,9 @@ Nous prenons en charge un modèle de paiement à l'utilisation. Vous trouverez c | Modèle | Input | Output | Cached Read | Cached Write | | --------------------------------- | ------ | ------- | ----------- | ------------ | | Big Pickle | Free | Free | Free | - | -| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| Ling 2.6 Flash Free | Free | Free | Free | - | +| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | @@ -163,6 +165,7 @@ Les frais de carte de crédit sont répercutés au prix coûtant (4.4% + $0.30 p Les modèles gratuits : - MiniMax M2.5 Free est disponible sur OpenCode pour une durée limitée. L'équipe utilise cette période pour recueillir des retours et améliorer le modèle. +- Ling 2.6 Flash Free est disponible sur OpenCode pour une durée limitée. L'équipe utilise cette période pour recueillir des retours et améliorer le modèle. - Nemotron 3 Super Free est disponible sur OpenCode pour une durée limitée. L'équipe utilise cette période pour recueillir des retours et améliorer le modèle. - Big Pickle est un modèle stealth gratuit sur OpenCode pour une durée limitée. L'équipe utilise cette période pour recueillir des retours et améliorer le modèle. @@ -206,6 +209,7 @@ Tous nos modèles sont hébergés aux US. Nos fournisseurs suivent une politique - Big Pickle : Pendant sa période gratuite, les données collectées peuvent être utilisées pour améliorer le modèle. - MiniMax M2.5 Free : Pendant sa période gratuite, les données collectées peuvent être utilisées pour améliorer le modèle. +- Ling 2.6 Flash Free : Pendant sa période gratuite, les données collectées peuvent être utilisées pour améliorer le modèle. - Nemotron 3 Super Free (endpoints NVIDIA gratuits) : Fourni dans le cadre des [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Réservé à un usage d'essai, pas à la production ni aux données sensibles. Les prompts et les sorties sont journalisés par NVIDIA pour améliorer ses modèles et services. N'envoyez pas de données personnelles ou confidentielles. - OpenAI APIs : Les requêtes sont conservées pendant 30 jours conformément à [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data). - Anthropic APIs : Les requêtes sont conservées pendant 30 jours conformément à [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage). diff --git a/packages/web/src/content/docs/it/zen.mdx b/packages/web/src/content/docs/it/zen.mdx index 0922c51a5a..38c69b021e 100644 --- a/packages/web/src/content/docs/it/zen.mdx +++ b/packages/web/src/content/docs/it/zen.mdx @@ -100,11 +100,12 @@ Puoi anche accedere ai nostri modelli tramite i seguenti endpoint API. | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Ling 2.6 Flash | ling-2.6-flash | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | Il [model id](/docs/config/#models) nella config di OpenCode -usa il formato `opencode/`. Per esempio, per GPT 5.3 Codex, useresti -`opencode/gpt-5.3-codex` nella tua config. +usa il formato `opencode/`. Per esempio, per GPT 5.4, useresti +`opencode/gpt-5.4` nella tua config. --- @@ -125,8 +126,9 @@ Supportiamo un modello pay-as-you-go. Qui sotto trovi i prezzi **per 1M token**. | Modello | Input | Output | Cached Read | Cached Write | | --------------------------------- | ------ | ------- | ----------- | ------------ | | Big Pickle | Free | Free | Free | - | -| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| Ling 2.6 Flash Free | Free | Free | Free | - | +| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | @@ -174,6 +176,7 @@ Le commissioni della carta di credito vengono trasferite al costo (4.4% + $0.30 I modelli gratuiti: - MiniMax M2.5 Free è disponibile su OpenCode per un periodo limitato. Il team usa questo periodo per raccogliere feedback e migliorare il modello. +- Ling 2.6 Flash Free è disponibile su OpenCode per un periodo limitato. Il team usa questo periodo per raccogliere feedback e migliorare il modello. - Nemotron 3 Super Free è disponibile su OpenCode per un periodo limitato. Il team usa questo periodo per raccogliere feedback e migliorare il modello. - Big Pickle è un modello stealth che è gratuito su OpenCode per un periodo limitato. Il team usa questo periodo per raccogliere feedback e migliorare il modello. @@ -220,6 +223,7 @@ Tutti i nostri modelli sono ospitati negli US. I nostri provider seguono una pol - Big Pickle: durante il periodo gratuito, i dati raccolti possono essere usati per migliorare il modello. - MiniMax M2.5 Free: durante il periodo gratuito, i dati raccolti possono essere usati per migliorare il modello. +- Ling 2.6 Flash Free: durante il periodo gratuito, i dati raccolti possono essere usati per migliorare il modello. - Nemotron 3 Super Free (endpoint NVIDIA gratuiti): fornito secondo i [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Solo per uso di prova, non per produzione o dati sensibili. NVIDIA registra prompt e output per migliorare i propri modelli e servizi. Non inviare dati personali o riservati. - OpenAI APIs: le richieste vengono conservate per 30 giorni in conformità con [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data). - Anthropic APIs: le richieste vengono conservate per 30 giorni in conformità con [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage). diff --git a/packages/web/src/content/docs/ja/zen.mdx b/packages/web/src/content/docs/ja/zen.mdx index 7419bd4c4b..8f95473d6d 100644 --- a/packages/web/src/content/docs/ja/zen.mdx +++ b/packages/web/src/content/docs/ja/zen.mdx @@ -91,9 +91,10 @@ OpenCode Zen は、OpenCode のほかのプロバイダーと同じように動 | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Ling 2.6 Flash | ling-2.6-flash | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -OpenCode 設定で使う [model id](/docs/config/#models) は `opencode/` 形式です。たとえば、GPT 5.3 Codex では設定に `opencode/gpt-5.3-codex` を使用します。 +OpenCode 設定で使う [model id](/docs/config/#models) は `opencode/` 形式です。たとえば、GPT 5.4 では設定に `opencode/gpt-5.4` を使用します。 --- @@ -114,8 +115,9 @@ https://opencode.ai/zen/v1/models | Model | Input | Output | Cached Read | Cached Write | | --------------------------------- | ------ | ------- | ----------- | ------------ | | Big Pickle | Free | Free | Free | - | -| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| Ling 2.6 Flash Free | Free | Free | Free | - | +| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | @@ -163,6 +165,7 @@ https://opencode.ai/zen/v1/models 無料モデル: - MiniMax M2.5 Free は期間限定で OpenCode で利用できます。チームはこの期間中にフィードバックを集め、モデルを改善しています。 +- Ling 2.6 Flash Free は期間限定で OpenCode で利用できます。チームはこの期間中にフィードバックを集め、モデルを改善しています。 - Nemotron 3 Super Free は期間限定で OpenCode で利用できます。チームはこの期間中にフィードバックを集め、モデルを改善しています。 - Big Pickle はステルスモデルで、期間限定で OpenCode で無料提供されています。チームはこの期間中にフィードバックを集め、モデルを改善しています。 @@ -206,6 +209,7 @@ https://opencode.ai/zen/v1/models - Big Pickle: 無料提供期間中、収集されたデータがモデル改善に使われる場合があります。 - MiniMax M2.5 Free: 無料提供期間中、収集されたデータがモデル改善に使われる場合があります。 +- Ling 2.6 Flash Free: 無料提供期間中、収集されたデータがモデル改善に使われる場合があります。 - Nemotron 3 Super Free(NVIDIA の無料エンドポイント): [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf) に基づいて提供されます。試用専用であり、本番環境や機密性の高いデータには使用しないでください。プロンプトと出力は、NVIDIA が自社のモデルとサービスを改善するために記録します。個人情報や機密データは送信しないでください。 - OpenAI APIs: リクエストは [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data) に従って 30 日間保持されます。 - Anthropic APIs: リクエストは [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage) に従って 30 日間保持されます。 diff --git a/packages/web/src/content/docs/ko/zen.mdx b/packages/web/src/content/docs/ko/zen.mdx index 3d796ee992..59d7448061 100644 --- a/packages/web/src/content/docs/ko/zen.mdx +++ b/packages/web/src/content/docs/ko/zen.mdx @@ -91,9 +91,10 @@ OpenCode Zen은 OpenCode의 다른 provider와 똑같이 작동합니다. | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Ling 2.6 Flash | ling-2.6-flash | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -OpenCode config에서 사용하는 [모델 ID](/docs/config/#models)는 `opencode/` 형식입니다. 예를 들어 GPT 5.3 Codex를 사용하려면 config에서 `opencode/gpt-5.3-codex`를 사용하면 됩니다. +OpenCode config에서 사용하는 [모델 ID](/docs/config/#models)는 `opencode/` 형식입니다. 예를 들어 GPT 5.4를 사용하려면 config에서 `opencode/gpt-5.4`를 사용하면 됩니다. --- @@ -114,8 +115,9 @@ https://opencode.ai/zen/v1/models | 모델 | 입력 | 출력 | Cached Read | Cached Write | | --------------------------------- | ------ | ------- | ----------- | ------------ | | Big Pickle | Free | Free | Free | - | -| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| Ling 2.6 Flash Free | Free | Free | Free | - | +| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | @@ -163,6 +165,7 @@ https://opencode.ai/zen/v1/models 무료 모델: - MiniMax M2.5 Free는 한정된 기간 동안 OpenCode에서 제공됩니다. 팀은 이 기간에 피드백을 수집하고 모델을 개선합니다. +- Ling 2.6 Flash Free는 한정된 기간 동안 OpenCode에서 제공됩니다. 팀은 이 기간에 피드백을 수집하고 모델을 개선합니다. - Nemotron 3 Super Free는 한정된 기간 동안 OpenCode에서 제공됩니다. 팀은 이 기간에 피드백을 수집하고 모델을 개선합니다. - Big Pickle은 한정된 기간 동안 OpenCode에서 무료로 제공되는 stealth model입니다. 팀은 이 기간에 피드백을 수집하고 모델을 개선합니다. @@ -206,6 +209,7 @@ https://opencode.ai/zen/v1/models - Big Pickle: 무료 제공 기간에는 수집된 데이터가 모델 개선에 사용될 수 있습니다. - MiniMax M2.5 Free: 무료 제공 기간에는 수집된 데이터가 모델 개선에 사용될 수 있습니다. +- Ling 2.6 Flash Free: 무료 제공 기간에는 수집된 데이터가 모델 개선에 사용될 수 있습니다. - Nemotron 3 Super Free(NVIDIA 무료 엔드포인트): [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf)에 따라 제공됩니다. 평가판 전용이며 프로덕션 환경이나 민감한 데이터에는 사용할 수 없습니다. NVIDIA는 자사 모델과 서비스를 개선하기 위해 프롬프트와 출력을 기록합니다. 개인 정보나 기밀 데이터는 제출하지 마세요. - OpenAI APIs: 요청은 [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data)에 따라 30일 동안 보관됩니다. - Anthropic APIs: 요청은 [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage)에 따라 30일 동안 보관됩니다. diff --git a/packages/web/src/content/docs/nb/zen.mdx b/packages/web/src/content/docs/nb/zen.mdx index 139d13da8f..15c942732a 100644 --- a/packages/web/src/content/docs/nb/zen.mdx +++ b/packages/web/src/content/docs/nb/zen.mdx @@ -100,11 +100,12 @@ Du kan også få tilgang til modellene våre gjennom følgende API-endepunkter. | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Ling 2.6 Flash | ling-2.6-flash | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | [modell-id](/docs/config/#models) i OpenCode-konfigurasjonen din -bruker formatet `opencode/`. For eksempel, for GPT 5.3 Codex, ville du -brukt `opencode/gpt-5.3-codex` i konfigurasjonen din. +bruker formatet `opencode/`. For eksempel, for GPT 5.4, ville du +brukt `opencode/gpt-5.4` i konfigurasjonen din. --- @@ -125,8 +126,9 @@ Vi støtter en pay-as-you-go-modell. Nedenfor er prisene **per 1M tokens**. | Modell | Inndata | Utdata | Bufret lesing | Bufret skriving | | --------------------------------- | ------- | ------- | ------------- | --------------- | | Big Pickle | Free | Free | Free | - | -| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| Ling 2.6 Flash Free | Free | Free | Free | - | +| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | @@ -174,6 +176,7 @@ Kredittkortgebyrer videreføres til kostpris (4.4% + $0.30 per transaction); vi Gratis-modellene: - MiniMax M2.5 Free er tilgjengelig på OpenCode i en begrenset periode. Teamet bruker denne tiden til å samle inn tilbakemeldinger og forbedre modellen. +- Ling 2.6 Flash Free er tilgjengelig på OpenCode i en begrenset periode. Teamet bruker denne tiden til å samle inn tilbakemeldinger og forbedre modellen. - Nemotron 3 Super Free er tilgjengelig på OpenCode i en begrenset periode. Teamet bruker denne tiden til å samle inn tilbakemeldinger og forbedre modellen. - Big Pickle er en stealth-modell som er gratis på OpenCode i en begrenset periode. Teamet bruker denne tiden til å samle inn tilbakemeldinger og forbedre modellen. @@ -220,6 +223,7 @@ Alle modellene våre hostes i US. Leverandørene våre følger en policy for zer - Big Pickle: I gratisperioden kan innsamlede data brukes til å forbedre modellen. - MiniMax M2.5 Free: I gratisperioden kan innsamlede data brukes til å forbedre modellen. +- Ling 2.6 Flash Free: I gratisperioden kan innsamlede data brukes til å forbedre modellen. - Nemotron 3 Super Free (gratis NVIDIA-endepunkter): Leveres under [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Kun for prøvebruk, ikke for produksjon eller sensitive data. Prompter og svar logges av NVIDIA for å forbedre modellene og tjenestene deres. Ikke send inn personopplysninger eller konfidensielle data. - OpenAI APIs: Forespørsler lagres i 30 dager i samsvar med [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data). - Anthropic APIs: Forespørsler lagres i 30 dager i samsvar med [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage). diff --git a/packages/web/src/content/docs/pl/zen.mdx b/packages/web/src/content/docs/pl/zen.mdx index 42a9bb3d13..483a3cedfd 100644 --- a/packages/web/src/content/docs/pl/zen.mdx +++ b/packages/web/src/content/docs/pl/zen.mdx @@ -100,11 +100,12 @@ Możesz też uzyskać dostęp do naszych modeli przez poniższe endpointy API. | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Ling 2.6 Flash | ling-2.6-flash | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | [ID modelu](/docs/config/#models) w Twojej konfiguracji OpenCode używa formatu -`opencode/`. Na przykład dla GPT 5.3 Codex użyjesz w konfiguracji -`opencode/gpt-5.3-codex`. +`opencode/`. Na przykład dla GPT 5.4 użyjesz w konfiguracji +`opencode/gpt-5.4`. --- @@ -125,8 +126,9 @@ Obsługujemy model pay-as-you-go. Poniżej znajdują się ceny **za 1M tokenów* | Model | Wejście | Wyjście | Odczyt z cache | Zapis do cache | | --------------------------------- | ------- | ------- | -------------- | -------------- | | Big Pickle | Free | Free | Free | - | -| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| Ling 2.6 Flash Free | Free | Free | Free | - | +| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | @@ -175,6 +177,7 @@ Opłaty za karty kredytowe są przenoszone po kosztach (4.4% + $0.30 per transac Darmowe modele: - MiniMax M2.5 Free jest dostępny w OpenCode przez ograniczony czas. Zespół wykorzystuje ten czas do zbierania opinii i ulepszania modelu. +- Ling 2.6 Flash Free jest dostępny w OpenCode przez ograniczony czas. Zespół wykorzystuje ten czas do zbierania opinii i ulepszania modelu. - Nemotron 3 Super Free jest dostępny w OpenCode przez ograniczony czas. Zespół wykorzystuje ten czas do zbierania opinii i ulepszania modelu. - Big Pickle to stealth model, który jest darmowy w OpenCode przez ograniczony czas. Zespół wykorzystuje ten czas do zbierania opinii i ulepszania modelu. @@ -221,6 +224,7 @@ Wszystkie nasze modele są hostowane w US. Nasi dostawcy stosują politykę zero - Big Pickle: W czasie darmowego okresu zebrane dane mogą być wykorzystywane do ulepszania modelu. - MiniMax M2.5 Free: W czasie darmowego okresu zebrane dane mogą być wykorzystywane do ulepszania modelu. +- Ling 2.6 Flash Free: W czasie darmowego okresu zebrane dane mogą być wykorzystywane do ulepszania modelu. - Nemotron 3 Super Free (darmowe endpointy NVIDIA): Udostępniany zgodnie z [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Tylko do użytku próbnego, nie do produkcji ani danych wrażliwych. NVIDIA rejestruje prompty i odpowiedzi, aby ulepszać swoje modele i usługi. Nie przesyłaj danych osobowych ani poufnych. - OpenAI APIs: Żądania są przechowywane przez 30 dni zgodnie z [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data). - Anthropic APIs: Żądania są przechowywane przez 30 dni zgodnie z [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage). diff --git a/packages/web/src/content/docs/pt-br/zen.mdx b/packages/web/src/content/docs/pt-br/zen.mdx index a2bb269ce1..1112066e27 100644 --- a/packages/web/src/content/docs/pt-br/zen.mdx +++ b/packages/web/src/content/docs/pt-br/zen.mdx @@ -91,9 +91,10 @@ Você também pode acessar nossos modelos pelos seguintes endpoints de API. | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Ling 2.6 Flash | ling-2.6-flash | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -O [model id](/docs/config/#models) na sua configuração do OpenCode usa o formato `opencode/`. Por exemplo, para GPT 5.3 Codex, você usaria `opencode/gpt-5.3-codex` na sua configuração. +O [model id](/docs/config/#models) na sua configuração do OpenCode usa o formato `opencode/`. Por exemplo, para GPT 5.4, você usaria `opencode/gpt-5.4` na sua configuração. --- @@ -114,8 +115,9 @@ Oferecemos um modelo pay-as-you-go. Abaixo estão os preços **por 1M tokens**. | Modelo | Entrada | Saída | Leitura em cache | Escrita em cache | | --------------------------------- | ------- | ------- | ---------------- | ---------------- | | Big Pickle | Free | Free | Free | - | -| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| Ling 2.6 Flash Free | Free | Free | Free | - | +| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | @@ -163,6 +165,7 @@ As taxas de cartão de crédito são repassadas a preço de custo (4.4% + $0.30 Os modelos gratuitos: - MiniMax M2.5 Free está disponível no OpenCode por tempo limitado. A equipe está usando esse período para coletar feedback e melhorar o modelo. +- Ling 2.6 Flash Free está disponível no OpenCode por tempo limitado. A equipe está usando esse período para coletar feedback e melhorar o modelo. - Nemotron 3 Super Free está disponível no OpenCode por tempo limitado. A equipe está usando esse período para coletar feedback e melhorar o modelo. - Big Pickle é um modelo stealth que está gratuito no OpenCode por tempo limitado. A equipe está usando esse período para coletar feedback e melhorar o modelo. @@ -206,6 +209,7 @@ Todos os nossos modelos são hospedados nos US. Nossos provedores seguem uma pol - Big Pickle: Durante seu período gratuito, os dados coletados podem ser usados para melhorar o modelo. - MiniMax M2.5 Free: Durante seu período gratuito, os dados coletados podem ser usados para melhorar o modelo. +- Ling 2.6 Flash Free: Durante seu período gratuito, os dados coletados podem ser usados para melhorar o modelo. - Nemotron 3 Super Free (endpoints gratuitos da NVIDIA): Fornecido sob os [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Apenas para uso de avaliação, não para produção nem dados sensíveis. A NVIDIA registra prompts e saídas para melhorar seus modelos e serviços. Não envie dados pessoais ou confidenciais. - OpenAI APIs: As solicitações são retidas por 30 dias de acordo com [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data). - Anthropic APIs: As solicitações são retidas por 30 dias de acordo com [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage). diff --git a/packages/web/src/content/docs/ru/zen.mdx b/packages/web/src/content/docs/ru/zen.mdx index 8d1b11a108..27d81354eb 100644 --- a/packages/web/src/content/docs/ru/zen.mdx +++ b/packages/web/src/content/docs/ru/zen.mdx @@ -100,11 +100,12 @@ OpenCode Zen работает как любой другой провайдер | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Ling 2.6 Flash | ling-2.6-flash | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | [идентификатор модели](/docs/config/#models) в вашей конфигурации OpenCode -использует формат `opencode/`. Например, для GPT 5.3 Codex вам нужно -использовать `opencode/gpt-5.3-codex` в своей конфигурации. +использует формат `opencode/`. Например, для GPT 5.4 вам нужно +использовать `opencode/gpt-5.4` в своей конфигурации. --- @@ -125,8 +126,9 @@ https://opencode.ai/zen/v1/models | Модель | Вход | Выход | Cached Read | Cached Write | | --------------------------------- | ------ | ------- | ----------- | ------------ | | Big Pickle | Free | Free | Free | - | -| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| Ling 2.6 Flash Free | Free | Free | Free | - | +| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | @@ -174,6 +176,7 @@ https://opencode.ai/zen/v1/models Бесплатные модели: - MiniMax M2.5 Free доступна в OpenCode ограниченное время. Команда использует это время, чтобы собирать отзывы и улучшать модель. +- Ling 2.6 Flash Free доступна в OpenCode ограниченное время. Команда использует это время, чтобы собирать отзывы и улучшать модель. - Nemotron 3 Super Free доступна в OpenCode ограниченное время. Команда использует это время, чтобы собирать отзывы и улучшать модель. - Big Pickle — это скрытая модель, которая доступна бесплатно в OpenCode ограниченное время. Команда использует это время, чтобы собирать отзывы и улучшать модель. @@ -220,6 +223,7 @@ https://opencode.ai/zen/v1/models - Big Pickle: во время бесплатного периода собранные данные могут использоваться для улучшения модели. - MiniMax M2.5 Free: во время бесплатного периода собранные данные могут использоваться для улучшения модели. +- Ling 2.6 Flash Free: во время бесплатного периода собранные данные могут использоваться для улучшения модели. - Nemotron 3 Super Free (бесплатные эндпоинты NVIDIA): предоставляется в соответствии с [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Только для пробного использования, не для продакшена и не для чувствительных данных. NVIDIA логирует запросы и ответы, чтобы улучшать свои модели и сервисы. Не отправляйте персональные или конфиденциальные данные. - OpenAI APIs: запросы хранятся 30 дней в соответствии с [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data). - Anthropic APIs: запросы хранятся 30 дней в соответствии с [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage). diff --git a/packages/web/src/content/docs/th/zen.mdx b/packages/web/src/content/docs/th/zen.mdx index c3b298a329..78de61a9ba 100644 --- a/packages/web/src/content/docs/th/zen.mdx +++ b/packages/web/src/content/docs/th/zen.mdx @@ -93,9 +93,10 @@ OpenCode Zen ทำงานเหมือน provider อื่น ๆ ใน | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Ling 2.6 Flash | ling-2.6-flash | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -[model id](/docs/config/#models) ใน OpenCode config ของคุณใช้รูปแบบ `opencode/` ตัวอย่างเช่น สำหรับ GPT 5.3 Codex คุณจะใช้ `opencode/gpt-5.3-codex` ใน config ของคุณ +[model id](/docs/config/#models) ใน OpenCode config ของคุณใช้รูปแบบ `opencode/` ตัวอย่างเช่น สำหรับ GPT 5.4 คุณจะใช้ `opencode/gpt-5.4` ใน config ของคุณ --- @@ -116,8 +117,9 @@ https://opencode.ai/zen/v1/models | Model | Input | Output | Cached Read | Cached Write | | --------------------------------- | ------ | ------- | ----------- | ------------ | | Big Pickle | Free | Free | Free | - | -| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| Ling 2.6 Flash Free | Free | Free | Free | - | +| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | @@ -165,6 +167,7 @@ https://opencode.ai/zen/v1/models โมเดลฟรี: - MiniMax M2.5 Free เปิดให้ใช้บน OpenCode ในช่วงเวลาจำกัด ทีมกำลังใช้ช่วงเวลานี้เพื่อเก็บ feedback และปรับปรุงโมเดล +- Ling 2.6 Flash Free เปิดให้ใช้บน OpenCode ในช่วงเวลาจำกัด ทีมกำลังใช้ช่วงเวลานี้เพื่อเก็บ feedback และปรับปรุงโมเดล - Nemotron 3 Super Free เปิดให้ใช้บน OpenCode ในช่วงเวลาจำกัด ทีมกำลังใช้ช่วงเวลานี้เพื่อเก็บ feedback และปรับปรุงโมเดล - Big Pickle เป็น stealth model ที่ใช้งานฟรีบน OpenCode ในช่วงเวลาจำกัด ทีมกำลังใช้ช่วงเวลานี้เพื่อเก็บ feedback และปรับปรุงโมเดล @@ -208,6 +211,7 @@ https://opencode.ai/zen/v1/models - Big Pickle: ระหว่างช่วงที่เปิดให้ใช้ฟรี ข้อมูลที่เก็บรวบรวมอาจถูกนำไปใช้เพื่อปรับปรุงโมเดล - MiniMax M2.5 Free: ระหว่างช่วงที่เปิดให้ใช้ฟรี ข้อมูลที่เก็บรวบรวมอาจถูกนำไปใช้เพื่อปรับปรุงโมเดล +- Ling 2.6 Flash Free: ระหว่างช่วงที่เปิดให้ใช้ฟรี ข้อมูลที่เก็บรวบรวมอาจถูกนำไปใช้เพื่อปรับปรุงโมเดล - Nemotron 3 Super Free (endpoint ฟรีของ NVIDIA): ให้บริการภายใต้ [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf) ใช้สำหรับการทดลองเท่านั้น ไม่เหมาะสำหรับ production หรือข้อมูลที่อ่อนไหว NVIDIA จะบันทึก prompt และ output เพื่อนำไปปรับปรุงโมเดลและบริการของตน โปรดอย่าส่งข้อมูลส่วนบุคคลหรือข้อมูลลับ. - OpenAI APIs: คำขอจะถูกเก็บไว้เป็นเวลา 30 วันตาม [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data). - Anthropic APIs: คำขอจะถูกเก็บไว้เป็นเวลา 30 วันตาม [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage). diff --git a/packages/web/src/content/docs/tr/zen.mdx b/packages/web/src/content/docs/tr/zen.mdx index 7540293054..409c11a218 100644 --- a/packages/web/src/content/docs/tr/zen.mdx +++ b/packages/web/src/content/docs/tr/zen.mdx @@ -91,9 +91,10 @@ Modellerimize aşağıdaki API uç noktaları aracılığıyla da erişebilirsin | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Ling 2.6 Flash | ling-2.6-flash | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -OpenCode yapılandırmanızdaki [model id](/docs/config/#models) `opencode/` biçimini kullanır. Örneğin, GPT 5.3 Codex için yapılandırmanızda `opencode/gpt-5.3-codex` kullanırsınız. +OpenCode yapılandırmanızdaki [model id](/docs/config/#models) `opencode/` biçimini kullanır. Örneğin, GPT 5.4 için yapılandırmanızda `opencode/gpt-5.4` kullanırsınız. --- @@ -114,8 +115,9 @@ Kullandıkça öde modelini destekliyoruz. Aşağıda **1M token başına** fiya | Model | Input | Output | Cached Read | Cached Write | | --------------------------------- | ------ | ------- | ----------- | ------------ | | Big Pickle | Free | Free | Free | - | -| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| Ling 2.6 Flash Free | Free | Free | Free | - | +| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | @@ -163,6 +165,7 @@ Kredi kartı ücretleri maliyet üzerinden yansıtılır (%4.4 + işlem başına Ücretsiz modeller: - MiniMax M2.5 Free, sınırlı bir süre için OpenCode'da ücretsizdir. Ekip bu süreyi geri bildirim toplamak ve modeli iyileştirmek için kullanıyor. +- Ling 2.6 Flash Free, sınırlı bir süre için OpenCode'da ücretsizdir. Ekip bu süreyi geri bildirim toplamak ve modeli iyileştirmek için kullanıyor. - Nemotron 3 Super Free, sınırlı bir süre için OpenCode'da ücretsizdir. Ekip bu süreyi geri bildirim toplamak ve modeli iyileştirmek için kullanıyor. - Big Pickle, sınırlı bir süre için OpenCode'da ücretsiz olan gizli bir modeldir. Ekip bu süreyi geri bildirim toplamak ve modeli iyileştirmek için kullanıyor. @@ -206,6 +209,7 @@ Tüm modellerimiz US'de barındırılıyor. Sağlayıcılarımız zero-retention - Big Pickle: Ücretsiz döneminde toplanan veriler modeli iyileştirmek için kullanılabilir. - MiniMax M2.5 Free: Ücretsiz döneminde toplanan veriler modeli iyileştirmek için kullanılabilir. +- Ling 2.6 Flash Free: Ücretsiz döneminde toplanan veriler modeli iyileştirmek için kullanılabilir. - Nemotron 3 Super Free (ücretsiz NVIDIA uç noktaları): [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf) kapsamında sunulur. Yalnızca deneme amaçlıdır; üretim veya hassas veriler için uygun değildir. NVIDIA, modellerini ve hizmetlerini geliştirmek için promptları ve çıktıları kaydeder. Kişisel veya gizli veri göndermeyin. - OpenAI APIs: İstekler [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data) uyarınca 30 gün boyunca saklanır. - Anthropic APIs: İstekler [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage) uyarınca 30 gün boyunca saklanır. diff --git a/packages/web/src/content/docs/zen.mdx b/packages/web/src/content/docs/zen.mdx index ec44a0548a..1c59c00f74 100644 --- a/packages/web/src/content/docs/zen.mdx +++ b/packages/web/src/content/docs/zen.mdx @@ -100,11 +100,12 @@ You can also access our models through the following API endpoints. | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Ling 2.6 Flash | ling-2.6-flash | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | The [model id](/docs/config/#models) in your OpenCode config -uses the format `opencode/`. For example, for GPT 5.3 Codex, you would -use `opencode/gpt-5.3-codex` in your config. +uses the format `opencode/`. For example, for GPT 5.4, you would +use `opencode/gpt-5.4` in your config. --- @@ -125,8 +126,9 @@ We support a pay-as-you-go model. Below are the prices **per 1M tokens**. | Model | Input | Output | Cached Read | Cached Write | | --------------------------------- | ------ | ------- | ----------- | ------------ | | Big Pickle | Free | Free | Free | - | -| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| Ling 2.6 Flash Free | Free | Free | Free | - | +| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | @@ -174,6 +176,7 @@ Credit card fees are passed along at cost (4.4% + $0.30 per transaction); we don The free models: - MiniMax M2.5 Free is available on OpenCode for a limited time. The team is using this time to collect feedback and improve the model. +- Ling 2.6 Flash Free is available on OpenCode for a limited time. The team is using this time to collect feedback and improve the model. - Nemotron 3 Super Free is available on OpenCode for a limited time. The team is using this time to collect feedback and improve the model. - Big Pickle is a stealth model that's free on OpenCode for a limited time. The team is using this time to collect feedback and improve the model. @@ -220,6 +223,7 @@ All our models are hosted in the US. Our providers follow a zero-retention polic - Big Pickle: During its free period, collected data may be used to improve the model. - MiniMax M2.5 Free: During its free period, collected data may be used to improve the model. +- Ling 2.6 Flash Free: During its free period, collected data may be used to improve the model. - Nemotron 3 Super Free (NVIDIA free endpoints): Provided under the [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf). Trial use only — not for production or sensitive data. Prompts and outputs are logged by NVIDIA to improve its models and services. Do not submit personal or confidential data. - OpenAI APIs: Requests are retained for 30 days in accordance with [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data). - Anthropic APIs: Requests are retained for 30 days in accordance with [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage). diff --git a/packages/web/src/content/docs/zh-cn/zen.mdx b/packages/web/src/content/docs/zh-cn/zen.mdx index 8eedcf31cf..017c5b6c9c 100644 --- a/packages/web/src/content/docs/zh-cn/zen.mdx +++ b/packages/web/src/content/docs/zh-cn/zen.mdx @@ -91,9 +91,10 @@ OpenCode Zen 的工作方式与 OpenCode 中的任何其他提供商相同。 | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Ling 2.6 Flash | ling-2.6-flash | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | -在你的 OpenCode 配置中,[模型 ID](/docs/config/#models) 使用 `opencode/` 格式。例如,对于 GPT 5.3 Codex,你需要在配置中使用 `opencode/gpt-5.3-codex`。 +在你的 OpenCode 配置中,[模型 ID](/docs/config/#models) 使用 `opencode/` 格式。例如,对于 GPT 5.4,你需要在配置中使用 `opencode/gpt-5.4`。 --- @@ -114,8 +115,9 @@ https://opencode.ai/zen/v1/models | 模型 | 输入 | 输出 | 缓存读取 | 缓存写入 | | --------------------------------- | ------ | ------- | -------- | -------- | | Big Pickle | Free | Free | Free | - | -| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| Ling 2.6 Flash Free | Free | Free | Free | - | +| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | @@ -163,6 +165,7 @@ https://opencode.ai/zen/v1/models 免费模型: - MiniMax M2.5 Free 目前在 OpenCode 上限时免费提供。团队正在利用这段时间收集反馈并改进模型。 +- Ling 2.6 Flash Free 目前在 OpenCode 上限时免费提供。团队正在利用这段时间收集反馈并改进模型。 - Nemotron 3 Super Free 目前在 OpenCode 上限时免费提供。团队正在利用这段时间收集反馈并改进模型。 - Big Pickle 是一个隐身模型,目前在 OpenCode 上限时免费提供。团队正在利用这段时间收集反馈并改进模型。 @@ -206,6 +209,7 @@ https://opencode.ai/zen/v1/models - Big Pickle:在免费期间,收集的数据可能会被用于改进模型。 - MiniMax M2.5 Free:在免费期间,收集的数据可能会被用于改进模型。 +- Ling 2.6 Flash Free:在免费期间,收集的数据可能会被用于改进模型。 - Nemotron 3 Super Free(NVIDIA 免费端点):根据 [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf) 提供。仅供试用,不适用于生产环境或敏感数据。NVIDIA 会记录提示词和输出内容,以改进其模型和服务。请勿提交个人或机密数据。 - OpenAI APIs:请求会根据 [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data) 保留 30 天。 - Anthropic APIs:请求会根据 [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage) 保留 30 天。 diff --git a/packages/web/src/content/docs/zh-tw/zen.mdx b/packages/web/src/content/docs/zh-tw/zen.mdx index 5ccc36785b..0afaf10b66 100644 --- a/packages/web/src/content/docs/zh-tw/zen.mdx +++ b/packages/web/src/content/docs/zh-tw/zen.mdx @@ -95,10 +95,11 @@ OpenCode Zen 的運作方式和 OpenCode 中的其他供應商一樣。 | Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Big Pickle | big-pickle | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Ling 2.6 Flash | ling-2.6-flash | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | | Nemotron 3 Super Free | nemotron-3-super-free | `https://opencode.ai/zen/v1/chat/completions` | `@ai-sdk/openai-compatible` | OpenCode 設定中的 [模型 ID](/docs/config/#models) 會使用 `opencode/` -格式。例如,如果是 GPT 5.3 Codex,你會在設定中使用 `opencode/gpt-5.3-codex`。 +格式。例如,如果是 GPT 5.4,你會在設定中使用 `opencode/gpt-5.4`。 --- @@ -119,8 +120,9 @@ https://opencode.ai/zen/v1/models | 模型 | 輸入 | 輸出 | 快取讀取 | 快取寫入 | | --------------------------------- | ------ | ------- | -------- | -------- | | Big Pickle | Free | Free | Free | - | -| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.5 Free | Free | Free | Free | - | +| Ling 2.6 Flash Free | Free | Free | Free | - | +| Nemotron 3 Super Free | Free | Free | Free | - | | MiniMax M2.7 | $0.30 | $1.20 | $0.06 | $0.375 | | MiniMax M2.5 | $0.30 | $1.20 | $0.06 | $0.375 | | GLM 5.1 | $1.40 | $4.40 | $0.26 | - | @@ -169,6 +171,7 @@ https://opencode.ai/zen/v1/models 免費模型: - MiniMax M2.5 Free 在 OpenCode 上限時提供。團隊正在利用這段時間收集回饋並改進模型。 +- Ling 2.6 Flash Free 在 OpenCode 上限時提供。團隊正在利用這段時間收集回饋並改進模型。 - Nemotron 3 Super Free 在 OpenCode 上限時提供。團隊正在利用這段時間收集回饋並改進模型。 - Big Pickle 是一個隱身模型,在 OpenCode 上限時免費提供。團隊正在利用這段時間收集回饋並改進模型。 @@ -213,6 +216,7 @@ https://opencode.ai/zen/v1/models - Big Pickle: 在免費期間,收集到的資料可能會用於改進模型。 - MiniMax M2.5 Free: 在免費期間,收集到的資料可能會用於改進模型。 +- Ling 2.6 Flash Free: 在免費期間,收集到的資料可能會用於改進模型。 - Nemotron 3 Super Free(NVIDIA 免費端點):依據 [NVIDIA API Trial Terms of Service](https://assets.ngc.nvidia.com/products/api-catalog/legal/NVIDIA%20API%20Trial%20Terms%20of%20Service.pdf) 提供。僅供試用,不適用於正式環境或敏感資料。NVIDIA 會記錄提示詞與輸出內容,以改進其模型與服務。請勿提交個人或機密資料。 - OpenAI APIs: 請求會依據 [OpenAI's Data Policies](https://platform.openai.com/docs/guides/your-data) 保留 30 天。 - Anthropic APIs: 請求會依據 [Anthropic's Data Policies](https://docs.anthropic.com/en/docs/claude-code/data-usage) 保留 30 天。 From 79336571356e392c2fea9379c041bfbaa9bcebcd Mon Sep 17 00:00:00 2001 From: Kit Langton Date: Tue, 21 Apr 2026 17:26:50 -0400 Subject: [PATCH 34/73] migrate LSP data schemas to Effect Schema (#23745) --- packages/opencode/src/lsp/lsp.ts | 97 +++++++++---------- .../src/server/routes/instance/file.ts | 2 +- .../src/server/routes/instance/index.ts | 2 +- packages/opencode/src/session/message-v2.ts | 2 +- 4 files changed, 49 insertions(+), 54 deletions(-) diff --git a/packages/opencode/src/lsp/lsp.ts b/packages/opencode/src/lsp/lsp.ts index aa519f9f7e..833285e7b5 100644 --- a/packages/opencode/src/lsp/lsp.ts +++ b/packages/opencode/src/lsp/lsp.ts @@ -10,9 +10,11 @@ import { Config } from "../config" import { Flag } from "@/flag/flag" import { Process } from "../util" import { spawn as lspspawn } from "./launch" -import { Effect, Layer, Context } from "effect" +import { Effect, Layer, Context, Schema } from "effect" import { InstanceState } from "@/effect" import { AppFileSystem } from "@opencode-ai/shared/filesystem" +import { withStatics } from "@/util/schema" +import { zod, ZodOverride } from "@/util/effect-zod" const log = Log.create({ service: "lsp" }) @@ -20,60 +22,53 @@ export const Event = { Updated: BusEvent.define("lsp.updated", z.object({})), } -export const Range = z - .object({ - start: z.object({ - line: z.number(), - character: z.number(), - }), - end: z.object({ - line: z.number(), - character: z.number(), - }), - }) - .meta({ - ref: "Range", - }) -export type Range = z.infer +const Position = Schema.Struct({ + line: Schema.Number, + character: Schema.Number, +}) -export const Symbol = z - .object({ - name: z.string(), - kind: z.number(), - location: z.object({ - uri: z.string(), - range: Range, - }), - }) - .meta({ - ref: "Symbol", - }) -export type Symbol = z.infer +export const Range = Schema.Struct({ + start: Position, + end: Position, +}) + .annotate({ identifier: "Range" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type Range = typeof Range.Type -export const DocumentSymbol = z - .object({ - name: z.string(), - detail: z.string().optional(), - kind: z.number(), +export const Symbol = Schema.Struct({ + name: Schema.String, + kind: Schema.Number, + location: Schema.Struct({ + uri: Schema.String, range: Range, - selectionRange: Range, - }) - .meta({ - ref: "DocumentSymbol", - }) -export type DocumentSymbol = z.infer + }), +}) + .annotate({ identifier: "Symbol" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type Symbol = typeof Symbol.Type -export const Status = z - .object({ - id: z.string(), - name: z.string(), - root: z.string(), - status: z.union([z.literal("connected"), z.literal("error")]), - }) - .meta({ - ref: "LSPStatus", - }) -export type Status = z.infer +export const DocumentSymbol = Schema.Struct({ + name: Schema.String, + detail: Schema.optional(Schema.String), + kind: Schema.Number, + range: Range, + selectionRange: Range, +}) + .annotate({ identifier: "DocumentSymbol" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type DocumentSymbol = typeof DocumentSymbol.Type + +export const Status = Schema.Struct({ + id: Schema.String, + name: Schema.String, + root: Schema.String, + status: Schema.Literals(["connected", "error"]).annotate({ + [ZodOverride]: z.union([z.literal("connected"), z.literal("error")]), + }), +}) + .annotate({ identifier: "LSPStatus" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type Status = typeof Status.Type enum SymbolKind { File = 1, diff --git a/packages/opencode/src/server/routes/instance/file.ts b/packages/opencode/src/server/routes/instance/file.ts index bbef679a85..f92fe6e7e5 100644 --- a/packages/opencode/src/server/routes/instance/file.ts +++ b/packages/opencode/src/server/routes/instance/file.ts @@ -90,7 +90,7 @@ export const FileRoutes = lazy(() => description: "Symbols", content: { "application/json": { - schema: resolver(LSP.Symbol.array()), + schema: resolver(LSP.Symbol.zod.array()), }, }, }, diff --git a/packages/opencode/src/server/routes/instance/index.ts b/packages/opencode/src/server/routes/instance/index.ts index 0038c59619..e8a038fabc 100644 --- a/packages/opencode/src/server/routes/instance/index.ts +++ b/packages/opencode/src/server/routes/instance/index.ts @@ -260,7 +260,7 @@ export const InstanceRoutes = (upgrade: UpgradeWebSocket): Hono => { description: "LSP server status", content: { "application/json": { - schema: resolver(LSP.Status.array()), + schema: resolver(LSP.Status.zod.array()), }, }, }, diff --git a/packages/opencode/src/session/message-v2.ts b/packages/opencode/src/session/message-v2.ts index 20528763b8..58bee8d1eb 100644 --- a/packages/opencode/src/session/message-v2.ts +++ b/packages/opencode/src/session/message-v2.ts @@ -159,7 +159,7 @@ export const FileSource = FilePartSourceBase.extend({ export const SymbolSource = FilePartSourceBase.extend({ type: z.literal("symbol"), path: z.string(), - range: LSP.Range, + range: LSP.Range.zod, name: z.string(), kind: z.number().int(), }).meta({ From 2ae64f426b7d210f64151124a3b7d729f28af7ca Mon Sep 17 00:00:00 2001 From: Kit Langton Date: Tue, 21 Apr 2026 17:30:08 -0400 Subject: [PATCH 35/73] refactor(core): migrate MessageV2.Format to Effect Schema (#23744) --- packages/opencode/specs/effect/schema.md | 74 +++++++++++++++++++ packages/opencode/src/session/message-v2.ts | 43 +++++------ packages/opencode/src/session/prompt.ts | 2 +- .../test/session/structured-output.test.ts | 12 +-- 4 files changed, 103 insertions(+), 28 deletions(-) diff --git a/packages/opencode/specs/effect/schema.md b/packages/opencode/specs/effect/schema.md index 2fcbfc12be..6deea49655 100644 --- a/packages/opencode/specs/effect/schema.md +++ b/packages/opencode/specs/effect/schema.md @@ -186,6 +186,80 @@ schema module with a clear domain. Major cluster. Message + event types flow through the SSE API and every SDK output, so byte-identical SDK surface is critical. +Suggested order for this cluster, starting from the leaves that `session.ts` +and the SSE/event surface depend on: + +1. `src/session/schema.ts` ✅ already migrated +2. `src/provider/schema.ts` if `message-v2.ts` still relies on zod-first IDs +3. `src/lsp/*` schema leaves needed by `LSP.Range` +4. `src/snapshot/*` leaves used by `Snapshot.FileDiff` +5. `src/session/message-v2.ts` +6. `src/session/message.ts` +7. `src/session/prompt.ts` +8. `src/session/revert.ts` +9. `src/session/summary.ts` +10. `src/session/status.ts` +11. `src/session/todo.ts` +12. `src/session/session.ts` +13. `src/session/compaction.ts` + +Dependency sketch: + +```text +session.ts +|- project/schema.ts +|- control-plane/schema.ts +|- permission/schema.ts +|- snapshot/* +|- message-v2.ts +| |- provider/schema.ts +| |- lsp/* +| |- snapshot/* +| |- sync/index.ts +| `- bus/bus-event.ts +|- sync/index.ts +|- bus/bus-event.ts +`- util/update-schema.ts +``` + +Working rule for this cluster: + +- migrate reusable leaf schemas and nested payload objects first +- migrate aggregate DTOs like `Session.Info` after their nested pieces exist as + named Schema values +- leave zod-only event/update helpers in place temporarily when converting + them would force unrelated churn across sync/bus boundaries + +`message-v2.ts` first-pass outline: + +1. Schema-backed imports already available + - `SessionID`, `MessageID`, `PartID` + - `ProviderID`, `ModelID` +2. Local leaf objects to extract and migrate first + - output format payloads + - common part bases like `PartBase` + - timestamp/range helper objects like `time.start/end` + - file/source helper objects + - token/cost/model helper objects +3. Part variants built from those leaves + - `SnapshotPart`, `PatchPart`, `TextPart`, `ReasoningPart` + - `FilePart`, `AgentPart`, `CompactionPart`, `SubtaskPart` + - retry/step/tool related parts +4. Higher-level unions and DTOs + - `FilePartSource` + - part unions + - message unions and assistant/user payloads +5. Errors and event payloads last + - `NamedError.create(...)` shapes can stay temporarily if converting them to + `Schema.TaggedErrorClass` would force unrelated churn + - `SyncEvent.define(...)` and `BusEvent.define(...)` payloads can keep using + derived `.zod` until the sync/bus layers are migrated + +Possible later tightening after the Schema-first migration is stable: + +- promote repeated opaque strings and timestamp numbers into branded/newtype + leaf schemas where that adds domain value without changing the wire format + - [ ] `src/session/compaction.ts` - [ ] `src/session/message-v2.ts` - [ ] `src/session/message.ts` diff --git a/packages/opencode/src/session/message-v2.ts b/packages/opencode/src/session/message-v2.ts index 58bee8d1eb..980e5e2c6c 100644 --- a/packages/opencode/src/session/message-v2.ts +++ b/packages/opencode/src/session/message-v2.ts @@ -15,7 +15,8 @@ import { isMedia } from "@/util/media" import type { SystemError } from "bun" import type { Provider } from "@/provider" import { ModelID, ProviderID } from "@/provider/schema" -import { Effect } from "effect" +import { Effect, Schema } from "effect" +import { zod } from "@/util/effect-zod" import { EffectLogger } from "@/effect" /** Error shape thrown by Bun's fetch() when gzip/br decompression fails mid-stream */ @@ -61,28 +62,28 @@ export const ContextOverflowError = NamedError.create( z.object({ message: z.string(), responseBody: z.string().optional() }), ) -export const OutputFormatText = z - .object({ - type: z.literal("text"), - }) - .meta({ - ref: "OutputFormatText", - }) +export class OutputFormatText extends Schema.Class("OutputFormatText")({ + type: Schema.Literal("text"), +}) { + static readonly zod = zod(this) +} -export const OutputFormatJsonSchema = z - .object({ - type: z.literal("json_schema"), - schema: z.record(z.string(), z.any()).meta({ ref: "JSONSchema" }), - retryCount: z.number().int().min(0).default(2), - }) - .meta({ - ref: "OutputFormatJsonSchema", - }) +export class OutputFormatJsonSchema extends Schema.Class("OutputFormatJsonSchema")({ + type: Schema.Literal("json_schema"), + schema: Schema.Record(Schema.String, Schema.Any).annotate({ identifier: "JSONSchema" }), + retryCount: Schema.Number.check(Schema.isInt()) + .check(Schema.isGreaterThanOrEqualTo(0)) + .pipe(Schema.optional, Schema.withDecodingDefault(Effect.succeed(2))), +}) { + static readonly zod = zod(this) +} -export const Format = z.discriminatedUnion("type", [OutputFormatText, OutputFormatJsonSchema]).meta({ - ref: "OutputFormat", +const _Format = Schema.Union([OutputFormatText, OutputFormatJsonSchema]).annotate({ + discriminator: "type", + identifier: "OutputFormat", }) -export type OutputFormat = z.infer +export const Format = Object.assign(_Format, { zod: zod(_Format) }) +export type OutputFormat = Schema.Schema.Type const PartBase = z.object({ id: PartID.zod, @@ -360,7 +361,7 @@ export const User = Base.extend({ time: z.object({ created: z.number(), }), - format: Format.optional(), + format: Format.zod.optional(), summary: z .object({ title: z.string().optional(), diff --git a/packages/opencode/src/session/prompt.ts b/packages/opencode/src/session/prompt.ts index 431189d19c..6dcec04592 100644 --- a/packages/opencode/src/session/prompt.ts +++ b/packages/opencode/src/session/prompt.ts @@ -1716,7 +1716,7 @@ export const PromptInput = z.object({ .record(z.string(), z.boolean()) .optional() .describe("@deprecated tools and permissions have been merged, you can set permissions on the session itself now"), - format: MessageV2.Format.optional(), + format: MessageV2.Format.zod.optional(), system: z.string().optional(), variant: z.string().optional(), parts: z.array( diff --git a/packages/opencode/test/session/structured-output.test.ts b/packages/opencode/test/session/structured-output.test.ts index 2debfb76d5..a91446bf42 100644 --- a/packages/opencode/test/session/structured-output.test.ts +++ b/packages/opencode/test/session/structured-output.test.ts @@ -5,7 +5,7 @@ import { SessionID, MessageID } from "../../src/session/schema" describe("structured-output.OutputFormat", () => { test("parses text format", () => { - const result = MessageV2.Format.safeParse({ type: "text" }) + const result = MessageV2.Format.zod.safeParse({ type: "text" }) expect(result.success).toBe(true) if (result.success) { expect(result.data.type).toBe("text") @@ -13,7 +13,7 @@ describe("structured-output.OutputFormat", () => { }) test("parses json_schema format with defaults", () => { - const result = MessageV2.Format.safeParse({ + const result = MessageV2.Format.zod.safeParse({ type: "json_schema", schema: { type: "object", properties: { name: { type: "string" } } }, }) @@ -27,7 +27,7 @@ describe("structured-output.OutputFormat", () => { }) test("parses json_schema format with custom retryCount", () => { - const result = MessageV2.Format.safeParse({ + const result = MessageV2.Format.zod.safeParse({ type: "json_schema", schema: { type: "object" }, retryCount: 5, @@ -39,17 +39,17 @@ describe("structured-output.OutputFormat", () => { }) test("rejects invalid type", () => { - const result = MessageV2.Format.safeParse({ type: "invalid" }) + const result = MessageV2.Format.zod.safeParse({ type: "invalid" }) expect(result.success).toBe(false) }) test("rejects json_schema without schema", () => { - const result = MessageV2.Format.safeParse({ type: "json_schema" }) + const result = MessageV2.Format.zod.safeParse({ type: "json_schema" }) expect(result.success).toBe(false) }) test("rejects negative retryCount", () => { - const result = MessageV2.Format.safeParse({ + const result = MessageV2.Format.zod.safeParse({ type: "json_schema", schema: { type: "object" }, retryCount: -1, From b0f565b74a7dd21f1f2aba00718c9130524a4d6a Mon Sep 17 00:00:00 2001 From: Kit Langton Date: Tue, 21 Apr 2026 17:33:13 -0400 Subject: [PATCH 36/73] refactor(core): migrate ConfigPermission.Info to Effect Schema canonical (#23740) --- packages/opencode/src/config/agent.ts | 8 +- packages/opencode/src/config/config.ts | 3 +- packages/opencode/src/config/permission.ts | 73 +++++------ packages/opencode/src/permission/index.ts | 12 +- packages/opencode/src/util/effect-zod.ts | 41 +----- packages/opencode/test/config/config.test.ts | 22 +++- .../opencode/test/permission/next.test.ts | 61 +++++++++ .../opencode/test/util/effect-zod.test.ts | 117 +----------------- packages/sdk/js/src/v2/gen/types.gen.ts | 5 +- 9 files changed, 133 insertions(+), 209 deletions(-) diff --git a/packages/opencode/src/config/agent.ts b/packages/opencode/src/config/agent.ts index 9755c20375..5a91a38995 100644 --- a/packages/opencode/src/config/agent.ts +++ b/packages/opencode/src/config/agent.ts @@ -22,12 +22,6 @@ const Color = Schema.Union([ Schema.Literals(["primary", "secondary", "accent", "success", "warning", "error", "info"]), ]) -// ConfigPermission.Info is a zod schema (its `.preprocess(...).transform(...)` -// shape lives outside the Effect Schema type system), so the walker reaches it -// via ZodOverride rather than a pure Schema reference. This preserves the -// `$ref: PermissionConfig` emitted in openapi.json. -const PermissionRef = Schema.Any.annotate({ [ZodOverride]: ConfigPermission.Info }) - const AgentSchema = Schema.StructWithRest( Schema.Struct({ model: Schema.optional(ConfigModelID), @@ -54,7 +48,7 @@ const AgentSchema = Schema.StructWithRest( description: "Maximum number of agentic iterations before forcing text-only response", }), maxSteps: Schema.optional(PositiveInt).annotate({ description: "@deprecated Use 'steps' field instead." }), - permission: Schema.optional(PermissionRef), + permission: Schema.optional(ConfigPermission.Info), }), [Schema.Record(Schema.String, Schema.Any)], ) diff --git a/packages/opencode/src/config/config.ts b/packages/opencode/src/config/config.ts index 4af079127e..5423ba3baf 100644 --- a/packages/opencode/src/config/config.ts +++ b/packages/opencode/src/config/config.ts @@ -86,7 +86,6 @@ export type Layout = ConfigLayout.Layout // ZodOverride-annotated Schema.Any. Walker sees the annotation and emits the // exact zod directly, preserving component $refs. const AgentRef = Schema.Any.annotate({ [ZodOverride]: ConfigAgent.Info }) -const PermissionRef = Schema.Any.annotate({ [ZodOverride]: ConfigPermission.Info }) const LogLevelRef = Schema.Any.annotate({ [ZodOverride]: Log.Level }) const PositiveInt = Schema.Number.check(Schema.isInt()).check(Schema.isGreaterThan(0)) @@ -198,7 +197,7 @@ export const Info = Schema.Struct({ description: "Additional instruction files or patterns to include", }), layout: Schema.optional(ConfigLayout.Layout).annotate({ description: "@deprecated Always uses stretch layout." }), - permission: Schema.optional(PermissionRef), + permission: Schema.optional(ConfigPermission.Info), tools: Schema.optional(Schema.Record(Schema.String, Schema.Boolean)), enterprise: Schema.optional( Schema.Struct({ diff --git a/packages/opencode/src/config/permission.ts b/packages/opencode/src/config/permission.ts index d4883ed8c1..fdd5746837 100644 --- a/packages/opencode/src/config/permission.ts +++ b/packages/opencode/src/config/permission.ts @@ -1,6 +1,6 @@ export * as ConfigPermission from "./permission" -import { Schema } from "effect" -import { zod, ZodPreprocess } from "@/util/effect-zod" +import { Schema, SchemaGetter } from "effect" +import { zod } from "@/util/effect-zod" import { withStatics } from "@/util/schema" export const Action = Schema.Literals(["ask", "allow", "deny"]) @@ -18,21 +18,19 @@ export const Rule = Schema.Union([Action, Object]) .pipe(withStatics((s) => ({ zod: zod(s) }))) export type Rule = Schema.Schema.Type -// Captures the user's original property insertion order before Schema.Struct -// canonicalises the object. See the `ZodPreprocess` comment in -// `util/effect-zod.ts` for the full rationale — in short: rule precedence is -// encoded in JSON key order (`evaluate.ts` uses `findLast`, so later keys win) -// and `Schema.StructWithRest` would otherwise drop that order. -const permissionPreprocess = (val: unknown) => { - if (typeof val === "object" && val !== null && !Array.isArray(val)) { - return { __originalKeys: globalThis.Object.keys(val), ...val } - } - return val -} - -const ObjectShape = Schema.StructWithRest( +// Known permission keys get explicit types — most are full Rule (either a +// single Action or a per-pattern object), but a handful of tools take no +// sub-target patterns and are Action-only. Unknown keys fall through the +// Record rest signature as Rule. +// +// StructWithRest canonicalises key order on decode (known first, then rest), +// which used to require the `__originalKeys` preprocess hack because +// `Permission.fromConfig` depended on the user's insertion order. That +// dependency is gone — `fromConfig` now sorts top-level keys so wildcard +// permissions come before specifics, making the final precedence +// order-independent. +const InputObject = Schema.StructWithRest( Schema.Struct({ - __originalKeys: Schema.optional(Schema.mutable(Schema.Array(Schema.String))), read: Schema.optional(Rule), edit: Schema.optional(Rule), glob: Schema.optional(Rule), @@ -53,24 +51,29 @@ const ObjectShape = Schema.StructWithRest( [Schema.Record(Schema.String, Rule)], ) -const InnerSchema = Schema.Union([ObjectShape, Action]).annotate({ - [ZodPreprocess]: permissionPreprocess, -}) +// Input the user writes in config: either a single Action (shorthand for "*") +// or an object of per-target rules. +const InputSchema = Schema.Union([Action, InputObject]) -// Post-parse: drop the __originalKeys metadata and rebuild the rule map in the -// user's original insertion order. A plain string input (the Action branch of -// the union) becomes `{ "*": action }`. -const transform = (x: unknown): Record => { - if (typeof x === "string") return { "*": x as Action } - const obj = x as { __originalKeys?: string[] } & Record - const { __originalKeys, ...rest } = obj - if (!__originalKeys) return rest as Record - const result: Record = {} - for (const key of __originalKeys) { - if (key in rest) result[key] = rest[key] as Rule - } - return result -} +// Normalise the Action shorthand into `{ "*": action }`. Object inputs pass +// through untouched. +const normalizeInput = (input: Schema.Schema.Type): Schema.Schema.Type => + typeof input === "string" ? { "*": input } : input -export const Info = zod(InnerSchema).transform(transform).meta({ ref: "PermissionConfig" }) -export type Info = Record +export const Info = InputSchema.pipe( + Schema.decodeTo(InputObject, { + decode: SchemaGetter.transform(normalizeInput), + // Not perfectly invertible (we lose whether the user originally typed an + // Action shorthand), but the object form is always a valid representation + // of the same rules. + encode: SchemaGetter.passthrough({ strict: false }), + }), +) + .annotate({ identifier: "PermissionConfig" }) + .pipe( + // Walker already emits the decodeTo transform into the derived zod (see + // `encoded()` in effect-zod.ts), so just expose that directly. + withStatics((s) => ({ zod: zod(s) })), + ) +type _Info = Schema.Schema.Type +export type Info = { -readonly [K in keyof _Info]: _Info[K] } diff --git a/packages/opencode/src/permission/index.ts b/packages/opencode/src/permission/index.ts index caf66cc947..6943b3d93b 100644 --- a/packages/opencode/src/permission/index.ts +++ b/packages/opencode/src/permission/index.ts @@ -290,8 +290,18 @@ function expand(pattern: string): string { } export function fromConfig(permission: ConfigPermission.Info) { + // Sort top-level keys so wildcard permissions (`*`, `mcp_*`) come before + // specific ones. Combined with `findLast` in evaluate(), this gives the + // intuitive semantic "specific tool rules override the `*` fallback" + // regardless of the user's JSON key order. Sub-pattern order inside a + // single permission key is preserved — only top-level keys are sorted. + const entries = Object.entries(permission).sort(([a], [b]) => { + const aWild = a.includes("*") + const bWild = b.includes("*") + return aWild === bWild ? 0 : aWild ? -1 : 1 + }) const ruleset: Ruleset = [] - for (const [key, value] of Object.entries(permission)) { + for (const [key, value] of entries) { if (typeof value === "string") { ruleset.push({ permission: key, action: value, pattern: "*" }) continue diff --git a/packages/opencode/src/util/effect-zod.ts b/packages/opencode/src/util/effect-zod.ts index bf1caa035b..f6d2c5e5c0 100644 --- a/packages/opencode/src/util/effect-zod.ts +++ b/packages/opencode/src/util/effect-zod.ts @@ -8,43 +8,6 @@ import z from "zod" */ export const ZodOverride: unique symbol = Symbol.for("effect-zod/override") -/** - * Annotation key for a pre-parse transform that runs on the raw input before - * the derived Zod schema validates it. The walker emits - * `z.preprocess(fn, inner)` when this annotation is present. - * - * Models zod's `z.preprocess(fn, schema)` pattern — useful when the schema - * needs to inspect the user's raw input (e.g. to capture insertion order) - * before `Schema.Struct` canonicalises the object. - * - * TODO: This exists to paper over a missing Effect Schema feature. The - * parser canonicalises open struct output (known fields first in - * declaration order, then catchall fields) before any user-defined - * transform sees the value, and there is no pre-parse hook — so the - * user's original property insertion order is gone by the time - * `Schema.decodeTo` or `middlewareDecoding` runs. - * - * That canonicalisation is a reasonable default, but `config/permission.ts` - * encodes rule precedence in the user's JSON key order (`evaluate.ts` - * uses `findLast`, so later entries win), which the canonicalisation - * silently destroys. - * - * The cleanest upstream fix would be either: - * - * 1. A `preserveInputOrder` option on `Schema.Struct` / - * `Schema.StructWithRest` that keeps the input's insertion order in - * the parsed object (opt-in; canonical order stays default). - * 2. A generic pre-parse hook (`Schema.preprocess(schema, fn)` or a - * transformation whose decode receives the raw `unknown`). - * - * Either of those would let us delete `ZodPreprocess` and the - * `__originalKeys` hack. Alternatively, the permission model could move - * to specificity-based precedence (exact keys beat wildcards) or an - * explicit ordered array of rules, which removes the ordering - * dependency at the data-model level. - */ -export const ZodPreprocess: unique symbol = Symbol.for("effect-zod/preprocess") - // AST nodes are immutable and frequently shared across schemas (e.g. a single // Schema.Class embedded in multiple parents). Memoizing by node identity // avoids rebuilding equivalent Zod subtrees and keeps derived children stable @@ -85,11 +48,9 @@ function walkUncached(ast: SchemaAST.AST): z.ZodTypeAny { const hasTransform = hasEncoding && !(SchemaAST.isOptional(ast) && extractDefault(ast) !== undefined) const base = hasTransform ? encoded(ast) : body(ast) const checked = ast.checks?.length ? applyChecks(base, ast.checks, ast) : base - const preprocess = (ast.annotations as { [ZodPreprocess]?: (val: unknown) => unknown } | undefined)?.[ZodPreprocess] - const out = preprocess ? z.preprocess(preprocess, checked) : checked const desc = SchemaAST.resolveDescription(ast) const ref = SchemaAST.resolveIdentifier(ast) - const described = desc ? out.describe(desc) : out + const described = desc ? checked.describe(desc) : checked return ref ? described.meta({ ref }) : described } diff --git a/packages/opencode/test/config/config.test.ts b/packages/opencode/test/config/config.test.ts index e9b0538193..73dd46e319 100644 --- a/packages/opencode/test/config/config.test.ts +++ b/packages/opencode/test/config/config.test.ts @@ -1495,7 +1495,16 @@ test("merges legacy tools with existing permission config", async () => { }) }) -test("permission config preserves key order", async () => { +test("permission config canonicalises known keys first, preserves rest-key insertion order", async () => { + // ConfigPermission.Info is a StructWithRest schema — the decoder reorders + // keys into declaration-order for known permission names (edit, read, + // todowrite, external_directory are declared in `config/permission.ts`), + // followed by rest keys in the user's insertion order. + // + // Rule precedence is NOT affected by this reordering: `Permission.fromConfig` + // sorts wildcards before specifics before iterating. See the + // "fromConfig - specific key beats wildcard regardless of JSON key order" + // test in test/permission/next.test.ts for the behavioural guarantee. await using tmp = await tmpdir({ init: async (dir) => { await Filesystem.write( @@ -1523,12 +1532,15 @@ test("permission config preserves key order", async () => { fn: async () => { const config = await load() expect(Object.keys(config.permission!)).toEqual([ - "*", - "edit", - "write", - "external_directory", + // known fields that the user provided, in declaration order from + // config/permission.ts (read, edit, ..., external_directory, todowrite) "read", + "edit", + "external_directory", "todowrite", + // rest keys (not in the known list), in user's insertion order + "*", + "write", "thoughts_*", "reasoning_model_*", "tools_*", diff --git a/packages/opencode/test/permission/next.test.ts b/packages/opencode/test/permission/next.test.ts index 21a9d8400b..372e1be7eb 100644 --- a/packages/opencode/test/permission/next.test.ts +++ b/packages/opencode/test/permission/next.test.ts @@ -128,6 +128,67 @@ test("fromConfig - does not expand tilde in middle of path", () => { expect(result).toEqual([{ permission: "external_directory", pattern: "/some/~/path", action: "allow" }]) }) +// Top-level wildcard-vs-specific precedence semantics. +// +// fromConfig sorts top-level keys so wildcard permissions (containing "*") +// come before specific permissions. Combined with `findLast` in evaluate(), +// this gives the intuitive semantic "specific tool rules override the `*` +// fallback", regardless of the order the user wrote the keys in their JSON. +// +// Sub-pattern order inside a single permission key (e.g. `bash: { "*": "allow", "rm": "deny" }`) +// still depends on insertion order — only top-level keys are sorted. + +test("fromConfig - specific key beats wildcard regardless of JSON key order", () => { + const wildcardFirst = Permission.fromConfig({ "*": "deny", bash: "allow" }) + const specificFirst = Permission.fromConfig({ bash: "allow", "*": "deny" }) + + // Both orderings produce the same ruleset + expect(wildcardFirst).toEqual(specificFirst) + + // And both evaluate bash → allow (bash rule wins over * fallback) + expect(Permission.evaluate("bash", "ls", wildcardFirst).action).toBe("allow") + expect(Permission.evaluate("bash", "ls", specificFirst).action).toBe("allow") +}) + +test("fromConfig - wildcard acts as fallback for permissions with no specific rule", () => { + const ruleset = Permission.fromConfig({ bash: "allow", "*": "ask" }) + expect(Permission.evaluate("edit", "foo.ts", ruleset).action).toBe("ask") + expect(Permission.evaluate("bash", "ls", ruleset).action).toBe("allow") +}) + +test("fromConfig - top-level ordering: wildcards first, specifics after", () => { + const ruleset = Permission.fromConfig({ + bash: "allow", + "*": "ask", + edit: "deny", + "mcp_*": "allow", + }) + // wildcards (* and mcp_*) come before specifics (bash, edit) + const permissions = ruleset.map((r) => r.permission) + expect(permissions.slice(0, 2).sort()).toEqual(["*", "mcp_*"]) + expect(permissions.slice(2)).toEqual(["bash", "edit"]) +}) + +test("fromConfig - sub-pattern insertion order inside a tool key is preserved (only top-level sorts)", () => { + // Sub-patterns within a single tool key use the documented "`*` first, + // specific patterns after" convention (findLast picks specifics). The + // top-level sort must not touch sub-pattern ordering. + const ruleset = Permission.fromConfig({ bash: { "*": "deny", "git *": "allow" } }) + expect(ruleset.map((r) => r.pattern)).toEqual(["*", "git *"]) + // * fallback for unknown commands + expect(Permission.evaluate("bash", "rm foo", ruleset).action).toBe("deny") + // specific pattern wins for git commands (it's last, findLast picks it) + expect(Permission.evaluate("bash", "git status", ruleset).action).toBe("allow") +}) + +test("fromConfig - canonical documented example unchanged", () => { + // Regression guard for the example in docs/permissions.mdx + const ruleset = Permission.fromConfig({ "*": "ask", bash: "allow", edit: "deny" }) + expect(Permission.evaluate("bash", "ls", ruleset).action).toBe("allow") + expect(Permission.evaluate("edit", "foo.ts", ruleset).action).toBe("deny") + expect(Permission.evaluate("read", "foo.ts", ruleset).action).toBe("ask") +}) + test("fromConfig - expands exact tilde to home directory", () => { const result = Permission.fromConfig({ external_directory: { "~": "allow" } }) expect(result).toEqual([{ permission: "external_directory", pattern: os.homedir(), action: "allow" }]) diff --git a/packages/opencode/test/util/effect-zod.test.ts b/packages/opencode/test/util/effect-zod.test.ts index 003945b434..70cd8f0e64 100644 --- a/packages/opencode/test/util/effect-zod.test.ts +++ b/packages/opencode/test/util/effect-zod.test.ts @@ -2,7 +2,7 @@ import { describe, expect, test } from "bun:test" import { Effect, Schema, SchemaGetter } from "effect" import z from "zod" -import { zod, ZodOverride, ZodPreprocess } from "../../src/util/effect-zod" +import { zod, ZodOverride } from "../../src/util/effect-zod" function json(schema: z.ZodTypeAny) { const { $schema: _, ...rest } = z.toJSONSchema(schema) @@ -751,119 +751,4 @@ describe("util.effect-zod", () => { expect(schema.parse({ foo: "hi" })).toEqual({ foo: "hi" }) }) }) - - describe("ZodPreprocess annotation", () => { - test("preprocess runs on raw input before the inner schema parses", () => { - // Models the permission.ts __originalKeys pattern: capture the original - // insertion order of a user-provided object BEFORE Schema parsing - // canonicalises the keys. - const preprocess = (val: unknown) => { - if (typeof val === "object" && val !== null && !Array.isArray(val)) { - return { __keys: Object.keys(val), ...(val as Record) } - } - return val - } - const Inner = Schema.Struct({ - __keys: Schema.optional(Schema.mutable(Schema.Array(Schema.String))), - a: Schema.optional(Schema.String), - b: Schema.optional(Schema.String), - }).annotate({ [ZodPreprocess]: preprocess }) - - const schema = zod(Inner) - const parsed = schema.parse({ b: "1", a: "2" }) as { - __keys?: string[] - a?: string - b?: string - } - expect(parsed.__keys).toEqual(["b", "a"]) - expect(parsed.a).toBe("2") - expect(parsed.b).toBe("1") - }) - - test("preprocess does not transform already-shaped input", () => { - // When the user passes an object that already has __keys, preprocess - // returns it unchanged because spreading preserves any existing key. - const preprocess = (val: unknown) => { - if (typeof val === "object" && val !== null && !("__keys" in val)) { - return { __keys: Object.keys(val), ...(val as Record) } - } - return val - } - const Inner = Schema.Struct({ - __keys: Schema.optional(Schema.mutable(Schema.Array(Schema.String))), - a: Schema.optional(Schema.String), - }).annotate({ [ZodPreprocess]: preprocess }) - - const schema = zod(Inner) - const parsed = schema.parse({ __keys: ["existing"], a: "hi" }) as { - __keys?: string[] - a?: string - } - expect(parsed.__keys).toEqual(["existing"]) - }) - - test("preprocess composes with a union (either object or string)", () => { - // Mirrors permission.ts exactly: input can be either an object (with - // preprocess injecting metadata) or a plain string action. - const Action = Schema.Literals(["ask", "allow", "deny"]) - const Obj = Schema.Struct({ - __keys: Schema.optional(Schema.mutable(Schema.Array(Schema.String))), - read: Schema.optional(Action), - write: Schema.optional(Action), - }) - const preprocess = (val: unknown) => { - if (typeof val === "object" && val !== null && !Array.isArray(val)) { - return { __keys: Object.keys(val), ...(val as Record) } - } - return val - } - const Inner = Schema.Union([Obj, Action]).annotate({ [ZodPreprocess]: preprocess }) - const schema = zod(Inner) - - // String branch — passes through preprocess unchanged - expect(schema.parse("allow")).toBe("allow") - - // Object branch — __keys injected, preserves order - const parsed = schema.parse({ write: "allow", read: "deny" }) as { - __keys?: string[] - read?: string - write?: string - } - expect(parsed.__keys).toEqual(["write", "read"]) - expect(parsed.write).toBe("allow") - expect(parsed.read).toBe("deny") - }) - - test("JSON Schema output comes from the inner schema — preprocess is runtime-only", () => { - const Inner = Schema.Struct({ - a: Schema.optional(Schema.String), - b: Schema.optional(Schema.Number), - }).annotate({ [ZodPreprocess]: (v: unknown) => v }) - const shape = json(zod(Inner)) as any - expect(shape.type).toBe("object") - expect(shape.properties.a.type).toBe("string") - expect(shape.properties.b.type).toBe("number") - }) - - test("identifier + description propagate through the preprocess wrapper", () => { - const Inner = Schema.Struct({ - x: Schema.optional(Schema.String), - }).annotate({ - identifier: "WithPreproc", - description: "A schema with preprocess", - [ZodPreprocess]: (v: unknown) => v, - }) - const schema = zod(Inner) - expect(schema.meta()?.ref).toBe("WithPreproc") - expect(schema.meta()?.description).toBe("A schema with preprocess") - }) - - test("preprocess inside a struct field applies only to that field", () => { - const Inner = Schema.String.annotate({ - [ZodPreprocess]: (v: unknown) => (typeof v === "number" ? String(v) : v), - }) - const schema = zod(Schema.Struct({ name: Inner, raw: Schema.Number })) - expect(schema.parse({ name: 42, raw: 7 })).toEqual({ name: "42", raw: 7 }) - }) - }) }) diff --git a/packages/sdk/js/src/v2/gen/types.gen.ts b/packages/sdk/js/src/v2/gen/types.gen.ts index d14fab1919..1fcab2eda6 100644 --- a/packages/sdk/js/src/v2/gen/types.gen.ts +++ b/packages/sdk/js/src/v2/gen/types.gen.ts @@ -1205,8 +1205,8 @@ export type PermissionObjectConfig = { export type PermissionRuleConfig = PermissionActionConfig | PermissionObjectConfig export type PermissionConfig = + | PermissionActionConfig | { - __originalKeys?: Array read?: PermissionRuleConfig edit?: PermissionRuleConfig glob?: PermissionRuleConfig @@ -1223,9 +1223,8 @@ export type PermissionConfig = lsp?: PermissionRuleConfig doom_loop?: PermissionActionConfig skill?: PermissionRuleConfig - [key: string]: PermissionRuleConfig | Array | PermissionActionConfig | undefined + [key: string]: PermissionRuleConfig | PermissionActionConfig | undefined } - | PermissionActionConfig export type AgentConfig = { model?: string From b1c3095edd901a74aa3ed94b5b6bffe6a4217b24 Mon Sep 17 00:00:00 2001 From: "opencode-agent[bot]" Date: Tue, 21 Apr 2026 21:34:17 +0000 Subject: [PATCH 37/73] chore: generate --- packages/sdk/openapi.json | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/packages/sdk/openapi.json b/packages/sdk/openapi.json index dbd85874fc..d9954d915f 100644 --- a/packages/sdk/openapi.json +++ b/packages/sdk/openapi.json @@ -10980,15 +10980,12 @@ }, "PermissionConfig": { "anyOf": [ + { + "$ref": "#/components/schemas/PermissionActionConfig" + }, { "type": "object", "properties": { - "__originalKeys": { - "type": "array", - "items": { - "type": "string" - } - }, "read": { "$ref": "#/components/schemas/PermissionRuleConfig" }, @@ -11041,9 +11038,6 @@ "additionalProperties": { "$ref": "#/components/schemas/PermissionRuleConfig" } - }, - { - "$ref": "#/components/schemas/PermissionActionConfig" } ] }, From 0bcf734a67e3b46b26f9c68d1800086365ed67e7 Mon Sep 17 00:00:00 2001 From: Kit Langton Date: Tue, 21 Apr 2026 17:37:27 -0400 Subject: [PATCH 38/73] migrate Snapshot schemas to Effect Schema (#23747) --- .../src/server/routes/instance/session.ts | 2 +- packages/opencode/src/session/message-v2.ts | 2 +- packages/opencode/src/session/session.ts | 4 +-- packages/opencode/src/snapshot/index.ts | 36 +++++++++---------- packages/opencode/src/tool/edit.ts | 14 ++++---- 5 files changed, 30 insertions(+), 28 deletions(-) diff --git a/packages/opencode/src/server/routes/instance/session.ts b/packages/opencode/src/server/routes/instance/session.ts index bf713935b0..a46c2f3bf3 100644 --- a/packages/opencode/src/server/routes/instance/session.ts +++ b/packages/opencode/src/server/routes/instance/session.ts @@ -471,7 +471,7 @@ export const SessionRoutes = lazy(() => description: "Successfully retrieved diff", content: { "application/json": { - schema: resolver(Snapshot.FileDiff.array()), + schema: resolver(Snapshot.FileDiff.zod.array()), }, }, }, diff --git a/packages/opencode/src/session/message-v2.ts b/packages/opencode/src/session/message-v2.ts index 980e5e2c6c..83477d12ba 100644 --- a/packages/opencode/src/session/message-v2.ts +++ b/packages/opencode/src/session/message-v2.ts @@ -366,7 +366,7 @@ export const User = Base.extend({ .object({ title: z.string().optional(), body: z.string().optional(), - diffs: Snapshot.FileDiff.array(), + diffs: Snapshot.FileDiff.zod.array(), }) .optional(), agent: z.string(), diff --git a/packages/opencode/src/session/session.ts b/packages/opencode/src/session/session.ts index ba144da9f0..6e9fb5c5d8 100644 --- a/packages/opencode/src/session/session.ts +++ b/packages/opencode/src/session/session.ts @@ -127,7 +127,7 @@ export const Info = z additions: z.number(), deletions: z.number(), files: z.number(), - diffs: Snapshot.FileDiff.array().optional(), + diffs: Snapshot.FileDiff.zod.array().optional(), }) .optional(), share: z @@ -239,7 +239,7 @@ export const Event = { "session.diff", z.object({ sessionID: SessionID.zod, - diff: Snapshot.FileDiff.array(), + diff: Snapshot.FileDiff.zod.array(), }), ), Error: BusEvent.define( diff --git a/packages/opencode/src/snapshot/index.ts b/packages/opencode/src/snapshot/index.ts index d38034e998..ddc4cb29ea 100644 --- a/packages/opencode/src/snapshot/index.ts +++ b/packages/opencode/src/snapshot/index.ts @@ -1,4 +1,4 @@ -import { Cause, Duration, Effect, Layer, Schedule, Semaphore, Context, Stream } from "effect" +import { Cause, Duration, Effect, Layer, Schedule, Schema, Semaphore, Context, Stream } from "effect" import { ChildProcess, ChildProcessSpawner } from "effect/unstable/process" import { formatPatch, structuredPatch } from "diff" import path from "path" @@ -10,25 +10,25 @@ import { Hash } from "@opencode-ai/shared/util/hash" import { Config } from "../config" import { Global } from "../global" import { Log } from "../util" +import { withStatics } from "@/util/schema" +import { zod } from "@/util/effect-zod" -export const Patch = z.object({ - hash: z.string(), - files: z.string().array(), +export const Patch = Schema.Struct({ + hash: Schema.String, + files: Schema.mutable(Schema.Array(Schema.String)), +}).pipe(withStatics((s) => ({ zod: zod(s) }))) +export type Patch = typeof Patch.Type + +export const FileDiff = Schema.Struct({ + file: Schema.String, + patch: Schema.String, + additions: Schema.Number, + deletions: Schema.Number, + status: Schema.optional(Schema.Literals(["added", "deleted", "modified"])), }) -export type Patch = z.infer - -export const FileDiff = z - .object({ - file: z.string(), - patch: z.string(), - additions: z.number(), - deletions: z.number(), - status: z.enum(["added", "deleted", "modified"]).optional(), - }) - .meta({ - ref: "SnapshotFileDiff", - }) -export type FileDiff = z.infer + .annotate({ identifier: "SnapshotFileDiff" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type FileDiff = typeof FileDiff.Type const log = Log.create({ service: "snapshot" }) const prune = "7.days" diff --git a/packages/opencode/src/tool/edit.ts b/packages/opencode/src/tool/edit.ts index 2f53cd1949..2c6c2c1308 100644 --- a/packages/opencode/src/tool/edit.ts +++ b/packages/opencode/src/tool/edit.ts @@ -153,15 +153,17 @@ export const EditTool = Tool.define( }).pipe(Effect.orDie), ) + let additions = 0 + let deletions = 0 + for (const change of diffLines(contentOld, contentNew)) { + if (change.added) additions += change.count || 0 + if (change.removed) deletions += change.count || 0 + } const filediff: Snapshot.FileDiff = { file: filePath, patch: diff, - additions: 0, - deletions: 0, - } - for (const change of diffLines(contentOld, contentNew)) { - if (change.added) filediff.additions += change.count || 0 - if (change.removed) filediff.deletions += change.count || 0 + additions, + deletions, } yield* ctx.metadata({ From d6dea3f3e00598a734d2fb61dbc9d74ccbd1781c Mon Sep 17 00:00:00 2001 From: Kit Langton Date: Tue, 21 Apr 2026 17:40:54 -0400 Subject: [PATCH 39/73] chore(core): clean up after ConfigPermission Effect Schema migration (#23749) --- packages/opencode/specs/effect/schema.md | 16 ++-------------- packages/opencode/src/config/agent.ts | 2 +- 2 files changed, 3 insertions(+), 15 deletions(-) diff --git a/packages/opencode/specs/effect/schema.md b/packages/opencode/specs/effect/schema.md index 6deea49655..df3cc0881d 100644 --- a/packages/opencode/specs/effect/schema.md +++ b/packages/opencode/specs/effect/schema.md @@ -97,7 +97,7 @@ creating a parallel schema source of truth. ## Escape hatches -The walker in `@/util/effect-zod` exposes three explicit escape hatches for +The walker in `@/util/effect-zod` exposes two explicit escape hatches for cases the pure-Schema path cannot express. Each one stays in the codebase only as long as its upstream or local dependency requires it — inline comments document when each can be deleted. @@ -109,19 +109,7 @@ Replaces the entire derivation with a hand-crafted zod schema. Used when: - the target carries external `$ref` metadata (e.g. `config/model-id.ts` points at `https://models.dev/...`) - the target is a zod-only schema that cannot yet be expressed as Schema - (e.g. `ConfigAgent.Info`, `ConfigPermission.Info`, `Log.Level`) - -### `ZodPreprocess` annotation - -Wraps the derived zod schema with `z.preprocess(fn, inner)`. Used by -`config/permission.ts` to inject `__originalKeys` before parsing, because -`Schema.StructWithRest` canonicalises output (known fields first, catchall -after) and destroys the user's original property order — which permission -rule precedence depends on. - -Tracked upstream as `effect:core/wlh553`: "Schema: add preserveInputOrder -(or pre-parse hook) for open structs." Once that lands, `ZodPreprocess` and -the `__originalKeys` hack can both be deleted. + (e.g. `ConfigAgent.Info`, `Log.Level`) ### Local `DeepMutable` in `config/config.ts` diff --git a/packages/opencode/src/config/agent.ts b/packages/opencode/src/config/agent.ts index 5a91a38995..85021407c7 100644 --- a/packages/opencode/src/config/agent.ts +++ b/packages/opencode/src/config/agent.ts @@ -3,7 +3,7 @@ export * as ConfigAgent from "./agent" import { Schema } from "effect" import z from "zod" import { Bus } from "@/bus" -import { zod, ZodOverride } from "@/util/effect-zod" +import { zod } from "@/util/effect-zod" import { Log } from "../util" import { NamedError } from "@opencode-ai/shared/util/error" import { Glob } from "@opencode-ai/shared/util/glob" From df0c1f649c306bd1c125c0b532bd17b76bf888c0 Mon Sep 17 00:00:00 2001 From: Kit Langton Date: Tue, 21 Apr 2026 17:47:50 -0400 Subject: [PATCH 40/73] refactor(core): migrate MessageV2 tool state schemas to Effect Schema (#23752) --- packages/opencode/src/session/message-v2.ts | 138 ++++++++++---------- 1 file changed, 72 insertions(+), 66 deletions(-) diff --git a/packages/opencode/src/session/message-v2.ts b/packages/opencode/src/session/message-v2.ts index 83477d12ba..04cb15ef8d 100644 --- a/packages/opencode/src/session/message-v2.ts +++ b/packages/opencode/src/session/message-v2.ts @@ -15,8 +15,9 @@ import { isMedia } from "@/util/media" import type { SystemError } from "bun" import type { Provider } from "@/provider" import { ModelID, ProviderID } from "@/provider/schema" -import { Effect, Schema } from "effect" -import { zod } from "@/util/effect-zod" +import { Effect, Schema, Types } from "effect" +import { zod, ZodOverride } from "@/util/effect-zod" +import { withStatics } from "@/util/schema" import { EffectLogger } from "@/effect" /** Error shape thrown by Bun's fetch() when gzip/br decompression fails mid-stream */ @@ -272,79 +273,84 @@ export const StepFinishPart = PartBase.extend({ }) export type StepFinishPart = z.infer -export const ToolStatePending = z - .object({ - status: z.literal("pending"), - input: z.record(z.string(), z.any()), - raw: z.string(), - }) - .meta({ - ref: "ToolStatePending", - }) +export const ToolStatePending = Schema.Struct({ + status: Schema.Literal("pending"), + input: Schema.Record(Schema.String, Schema.Any), + raw: Schema.String, +}) + .annotate({ identifier: "ToolStatePending" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type ToolStatePending = Types.DeepMutable> -export type ToolStatePending = z.infer +export const ToolStateRunning = Schema.Struct({ + status: Schema.Literal("running"), + input: Schema.Record(Schema.String, Schema.Any), + title: Schema.optional(Schema.String), + metadata: Schema.optional(Schema.Record(Schema.String, Schema.Any)), + time: Schema.Struct({ + start: Schema.Number, + }), +}) + .annotate({ identifier: "ToolStateRunning" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type ToolStateRunning = Types.DeepMutable> -export const ToolStateRunning = z - .object({ - status: z.literal("running"), - input: z.record(z.string(), z.any()), - title: z.string().optional(), - metadata: z.record(z.string(), z.any()).optional(), - time: z.object({ - start: z.number(), - }), - }) - .meta({ - ref: "ToolStateRunning", - }) -export type ToolStateRunning = z.infer +export const ToolStateCompleted = Schema.Struct({ + status: Schema.Literal("completed"), + input: Schema.Record(Schema.String, Schema.Any), + output: Schema.String, + title: Schema.String, + metadata: Schema.Record(Schema.String, Schema.Any), + time: Schema.Struct({ + start: Schema.Number, + end: Schema.Number, + compacted: Schema.optional(Schema.Number), + }), + // FilePart is still Zod-first this slice; bridge via ZodOverride so the + // derived Zod + JSON Schema still emit `$ref: FilePart` array items. + attachments: Schema.optional(Schema.Any.annotate({ [ZodOverride]: FilePart.array() })), +}) + .annotate({ identifier: "ToolStateCompleted" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type ToolStateCompleted = Omit< + Types.DeepMutable>, + "attachments" +> & { + attachments?: FilePart[] +} -export const ToolStateCompleted = z - .object({ - status: z.literal("completed"), - input: z.record(z.string(), z.any()), - output: z.string(), - title: z.string(), - metadata: z.record(z.string(), z.any()), - time: z.object({ - start: z.number(), - end: z.number(), - compacted: z.number().optional(), - }), - attachments: FilePart.array().optional(), - }) - .meta({ - ref: "ToolStateCompleted", - }) -export type ToolStateCompleted = z.infer +export const ToolStateError = Schema.Struct({ + status: Schema.Literal("error"), + input: Schema.Record(Schema.String, Schema.Any), + error: Schema.String, + metadata: Schema.optional(Schema.Record(Schema.String, Schema.Any)), + time: Schema.Struct({ + start: Schema.Number, + end: Schema.Number, + }), +}) + .annotate({ identifier: "ToolStateError" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type ToolStateError = Types.DeepMutable> -export const ToolStateError = z - .object({ - status: z.literal("error"), - input: z.record(z.string(), z.any()), - error: z.string(), - metadata: z.record(z.string(), z.any()).optional(), - time: z.object({ - start: z.number(), - end: z.number(), - }), - }) - .meta({ - ref: "ToolStateError", - }) -export type ToolStateError = z.infer - -export const ToolState = z - .discriminatedUnion("status", [ToolStatePending, ToolStateRunning, ToolStateCompleted, ToolStateError]) - .meta({ - ref: "ToolState", - }) +const _ToolState = Schema.Union([ToolStatePending, ToolStateRunning, ToolStateCompleted, ToolStateError]).annotate({ + discriminator: "status", + identifier: "ToolState", +}) +// Cast the derived zod so downstream z.infer sees the same mutable shape that +// our exported TS types expose (the pre-migration Zod inferences were mutable). +export const ToolState = Object.assign(_ToolState, { + zod: zod(_ToolState) as unknown as z.ZodType< + ToolStatePending | ToolStateRunning | ToolStateCompleted | ToolStateError + >, +}) +export type ToolState = ToolStatePending | ToolStateRunning | ToolStateCompleted | ToolStateError export const ToolPart = PartBase.extend({ type: z.literal("tool"), callID: z.string(), tool: z.string(), - state: ToolState, + state: ToolState.zod, metadata: z.record(z.string(), z.any()).optional(), }).meta({ ref: "ToolPart", From 2da6d860e0e2c5309a0030a5591c8218fffd6a99 Mon Sep 17 00:00:00 2001 From: Kit Langton Date: Tue, 21 Apr 2026 17:49:24 -0400 Subject: [PATCH 41/73] refactor(core): derive provider schema .zod via effect-zod walker (#23753) --- packages/opencode/specs/effect/schema.md | 2 +- packages/opencode/src/provider/schema.ts | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/opencode/specs/effect/schema.md b/packages/opencode/specs/effect/schema.md index df3cc0881d..3ed0b825d5 100644 --- a/packages/opencode/specs/effect/schema.md +++ b/packages/opencode/specs/effect/schema.md @@ -162,7 +162,7 @@ schema module with a clear domain. - [ ] `src/control-plane/schema.ts` - [ ] `src/permission/schema.ts` - [ ] `src/project/schema.ts` -- [ ] `src/provider/schema.ts` +- [x] `src/provider/schema.ts` - [ ] `src/pty/schema.ts` - [ ] `src/question/schema.ts` - [ ] `src/session/schema.ts` diff --git a/packages/opencode/src/provider/schema.ts b/packages/opencode/src/provider/schema.ts index 702616018a..ea3cac3424 100644 --- a/packages/opencode/src/provider/schema.ts +++ b/packages/opencode/src/provider/schema.ts @@ -1,6 +1,6 @@ import { Schema } from "effect" -import z from "zod" +import { zod } from "@/util/effect-zod" import { withStatics } from "@/util/schema" const providerIdSchema = Schema.String.pipe(Schema.brand("ProviderID")) @@ -9,7 +9,7 @@ export type ProviderID = typeof providerIdSchema.Type export const ProviderID = providerIdSchema.pipe( withStatics((schema: typeof providerIdSchema) => ({ - zod: z.string().pipe(z.custom()), + zod: zod(schema), // Well-known providers opencode: schema.make("opencode"), anthropic: schema.make("anthropic"), @@ -30,7 +30,7 @@ const modelIdSchema = Schema.String.pipe(Schema.brand("ModelID")) export type ModelID = typeof modelIdSchema.Type export const ModelID = modelIdSchema.pipe( - withStatics((_schema: typeof modelIdSchema) => ({ - zod: z.string().pipe(z.custom()), + withStatics((schema: typeof modelIdSchema) => ({ + zod: zod(schema), })), ) From 5e9fb3cc9045f9ef9a5c33ff6c295cf0438da39b Mon Sep 17 00:00:00 2001 From: Mathews Bryan <37884221+jmbryan4@users.noreply.github.com> Date: Tue, 21 Apr 2026 17:03:09 -0500 Subject: [PATCH 42/73] feat: replace csharp-ls with roslyn-language-server (#14463) Co-authored-by: Mathews --- packages/opencode/src/lsp/server.ts | 34 +++++++++++++++++++---------- 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/packages/opencode/src/lsp/server.ts b/packages/opencode/src/lsp/server.ts index 9182368063..a07e336864 100644 --- a/packages/opencode/src/lsp/server.ts +++ b/packages/opencode/src/lsp/server.ts @@ -705,32 +705,42 @@ export const CSharp: Info = { root: NearestRoot([".slnx", ".sln", ".csproj", "global.json"]), extensions: [".cs"], async spawn(root) { - let bin = which("csharp-ls") + let bin = which("roslyn-language-server") if (!bin) { if (!which("dotnet")) { - log.error(".NET SDK is required to install csharp-ls") + log.error(".NET SDK is required to install roslyn-language-server") return } if (Flag.OPENCODE_DISABLE_LSP_DOWNLOAD) return - log.info("installing csharp-ls via dotnet tool") - const proc = Process.spawn(["dotnet", "tool", "install", "csharp-ls", "--tool-path", Global.Path.bin], { - stdout: "pipe", - stderr: "pipe", - stdin: "pipe", - }) + log.info("installing roslyn-language-server via dotnet tool") + const proc = Process.spawn( + [ + "dotnet", + "tool", + "install", + "--global", + "roslyn-language-server", + "--prerelease", + ], + { + stdout: "pipe", + stderr: "pipe", + stdin: "pipe", + }, + ) const exit = await proc.exited if (exit !== 0) { - log.error("Failed to install csharp-ls") + log.error("Failed to install roslyn-language-server") return } - bin = path.join(Global.Path.bin, "csharp-ls" + (process.platform === "win32" ? ".exe" : "")) - log.info(`installed csharp-ls`, { bin }) + bin = path.join(Global.Path.bin, "roslyn-language-server" + (process.platform === "win32" ? ".exe" : "")) + log.info(`installed roslyn-language-server`, { bin }) } return { - process: spawn(bin, { + process: spawn(bin, ["--stdio", "--autoLoadProjects"], { cwd: root, }), } From d2181e9273bfcd9727a387527b25aa017ca15410 Mon Sep 17 00:00:00 2001 From: "opencode-agent[bot]" Date: Tue, 21 Apr 2026 22:04:16 +0000 Subject: [PATCH 43/73] chore: generate --- packages/opencode/src/lsp/server.ts | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/packages/opencode/src/lsp/server.ts b/packages/opencode/src/lsp/server.ts index a07e336864..8bb70a5116 100644 --- a/packages/opencode/src/lsp/server.ts +++ b/packages/opencode/src/lsp/server.ts @@ -714,21 +714,11 @@ export const CSharp: Info = { if (Flag.OPENCODE_DISABLE_LSP_DOWNLOAD) return log.info("installing roslyn-language-server via dotnet tool") - const proc = Process.spawn( - [ - "dotnet", - "tool", - "install", - "--global", - "roslyn-language-server", - "--prerelease", - ], - { - stdout: "pipe", - stderr: "pipe", - stdin: "pipe", - }, - ) + const proc = Process.spawn(["dotnet", "tool", "install", "--global", "roslyn-language-server", "--prerelease"], { + stdout: "pipe", + stderr: "pipe", + stdin: "pipe", + }) const exit = await proc.exited if (exit !== 0) { log.error("Failed to install roslyn-language-server") From 8043cfa68dcf97547ede3e26a9325af55583e1e4 Mon Sep 17 00:00:00 2001 From: NN708 Date: Wed, 22 Apr 2026 07:19:04 +0800 Subject: [PATCH 44/73] fix(desktop): update desktop file and MetaInfo file (#14933) --- packages/desktop/src-tauri/release/appstream.metainfo.xml | 5 ++++- packages/desktop/src-tauri/tauri.conf.json | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/packages/desktop/src-tauri/release/appstream.metainfo.xml b/packages/desktop/src-tauri/release/appstream.metainfo.xml index 16aa2bfcb2..d7dd49081b 100644 --- a/packages/desktop/src-tauri/release/appstream.metainfo.xml +++ b/packages/desktop/src-tauri/release/appstream.metainfo.xml @@ -28,11 +28,14 @@ - https://opencode.ai/docs/_astro/screenshot.Bs5D4atL_ZvsvFu.webp + https://raw.githubusercontent.com/anomalyco/opencode/b75d4d1c5ec449585d515c756fc81f080a157a9a/packages/web/src/assets/lander/screenshot.png + + https://github.com/anomalyco/opencode/releases/tag/v1.4.0 + https://github.com/anomalyco/opencode/releases/tag/v1.0.223 diff --git a/packages/desktop/src-tauri/tauri.conf.json b/packages/desktop/src-tauri/tauri.conf.json index 265044625b..c4f125a273 100644 --- a/packages/desktop/src-tauri/tauri.conf.json +++ b/packages/desktop/src-tauri/tauri.conf.json @@ -32,6 +32,7 @@ "icons/dev/icon.ico" ], "active": true, + "category": "DeveloperTool", "targets": ["deb", "rpm", "dmg", "nsis", "app"], "externalBin": ["sidecars/opencode-cli"], "linux": { From ad7ae7353fd5aeb0800120b60667e1b84edd8e98 Mon Sep 17 00:00:00 2001 From: Kit Langton Date: Tue, 21 Apr 2026 22:51:18 -0400 Subject: [PATCH 45/73] refactor(core): derive all schema.ts leaves' .zod via effect-zod walker (#23754) --- packages/opencode/specs/effect/schema.md | 16 ++++++++-------- packages/opencode/src/control-plane/schema.ts | 5 ++--- packages/opencode/src/permission/schema.ts | 5 ++--- packages/opencode/src/project/schema.ts | 4 ++-- packages/opencode/src/pty/schema.ts | 5 ++--- packages/opencode/src/question/schema.ts | 5 ++--- packages/opencode/src/session/schema.ts | 9 ++++----- packages/opencode/src/sync/schema.ts | 5 ++--- packages/opencode/src/tool/schema.ts | 5 ++--- 9 files changed, 26 insertions(+), 33 deletions(-) diff --git a/packages/opencode/specs/effect/schema.md b/packages/opencode/specs/effect/schema.md index 3ed0b825d5..9ff6859cee 100644 --- a/packages/opencode/specs/effect/schema.md +++ b/packages/opencode/specs/effect/schema.md @@ -159,15 +159,15 @@ Schema at source. These are the highest-priority next targets. Each is a small, self-contained schema module with a clear domain. -- [ ] `src/control-plane/schema.ts` -- [ ] `src/permission/schema.ts` -- [ ] `src/project/schema.ts` +- [x] `src/control-plane/schema.ts` +- [x] `src/permission/schema.ts` +- [x] `src/project/schema.ts` - [x] `src/provider/schema.ts` -- [ ] `src/pty/schema.ts` -- [ ] `src/question/schema.ts` -- [ ] `src/session/schema.ts` -- [ ] `src/sync/schema.ts` -- [ ] `src/tool/schema.ts` +- [x] `src/pty/schema.ts` +- [x] `src/question/schema.ts` +- [x] `src/session/schema.ts` +- [x] `src/sync/schema.ts` +- [x] `src/tool/schema.ts` ### Session domain diff --git a/packages/opencode/src/control-plane/schema.ts b/packages/opencode/src/control-plane/schema.ts index 4c7ced010d..5a0850a249 100644 --- a/packages/opencode/src/control-plane/schema.ts +++ b/packages/opencode/src/control-plane/schema.ts @@ -1,8 +1,7 @@ import { Schema } from "effect" -import z from "zod" import { Identifier } from "@/id/id" -import { ZodOverride } from "@/util/effect-zod" +import { zod, ZodOverride } from "@/util/effect-zod" import { withStatics } from "@/util/schema" const workspaceIdSchema = Schema.String.annotate({ [ZodOverride]: Identifier.schema("workspace") }).pipe( @@ -14,6 +13,6 @@ export type WorkspaceID = typeof workspaceIdSchema.Type export const WorkspaceID = workspaceIdSchema.pipe( withStatics((schema: typeof workspaceIdSchema) => ({ ascending: (id?: string) => schema.make(Identifier.ascending("workspace", id)), - zod: Identifier.schema("workspace").pipe(z.custom()), + zod: zod(schema), })), ) diff --git a/packages/opencode/src/permission/schema.ts b/packages/opencode/src/permission/schema.ts index 6ac9389a58..4eddc6a47a 100644 --- a/packages/opencode/src/permission/schema.ts +++ b/packages/opencode/src/permission/schema.ts @@ -1,8 +1,7 @@ import { Schema } from "effect" -import z from "zod" import { Identifier } from "@/id/id" -import { ZodOverride } from "@/util/effect-zod" +import { zod, ZodOverride } from "@/util/effect-zod" import { Newtype } from "@/util/schema" export class PermissionID extends Newtype()( @@ -13,5 +12,5 @@ export class PermissionID extends Newtype()( return this.make(Identifier.ascending("permission", id)) } - static readonly zod = Identifier.schema("permission") as unknown as z.ZodType + static readonly zod = zod(this) } diff --git a/packages/opencode/src/project/schema.ts b/packages/opencode/src/project/schema.ts index d10c82e2c3..7708b8de1e 100644 --- a/packages/opencode/src/project/schema.ts +++ b/packages/opencode/src/project/schema.ts @@ -1,6 +1,6 @@ import { Schema } from "effect" -import z from "zod" +import { zod } from "@/util/effect-zod" import { withStatics } from "@/util/schema" const projectIdSchema = Schema.String.pipe(Schema.brand("ProjectID")) @@ -10,6 +10,6 @@ export type ProjectID = typeof projectIdSchema.Type export const ProjectID = projectIdSchema.pipe( withStatics((schema: typeof projectIdSchema) => ({ global: schema.make("global"), - zod: z.string().pipe(z.custom()), + zod: zod(schema), })), ) diff --git a/packages/opencode/src/pty/schema.ts b/packages/opencode/src/pty/schema.ts index 0758fe8206..6b4d779f26 100644 --- a/packages/opencode/src/pty/schema.ts +++ b/packages/opencode/src/pty/schema.ts @@ -1,8 +1,7 @@ import { Schema } from "effect" -import z from "zod" import { Identifier } from "@/id/id" -import { ZodOverride } from "@/util/effect-zod" +import { zod, ZodOverride } from "@/util/effect-zod" import { withStatics } from "@/util/schema" const ptyIdSchema = Schema.String.annotate({ [ZodOverride]: Identifier.schema("pty") }).pipe(Schema.brand("PtyID")) @@ -12,6 +11,6 @@ export type PtyID = typeof ptyIdSchema.Type export const PtyID = ptyIdSchema.pipe( withStatics((schema: typeof ptyIdSchema) => ({ ascending: (id?: string) => schema.make(Identifier.ascending("pty", id)), - zod: Identifier.schema("pty").pipe(z.custom()), + zod: zod(schema), })), ) diff --git a/packages/opencode/src/question/schema.ts b/packages/opencode/src/question/schema.ts index 41186161d0..f7a0e096a3 100644 --- a/packages/opencode/src/question/schema.ts +++ b/packages/opencode/src/question/schema.ts @@ -1,8 +1,7 @@ import { Schema } from "effect" -import z from "zod" import { Identifier } from "@/id/id" -import { ZodOverride } from "@/util/effect-zod" +import { zod, ZodOverride } from "@/util/effect-zod" import { Newtype } from "@/util/schema" export class QuestionID extends Newtype()( @@ -13,5 +12,5 @@ export class QuestionID extends Newtype()( return this.make(Identifier.ascending("question", id)) } - static readonly zod = Identifier.schema("question") as unknown as z.ZodType + static readonly zod = zod(this) } diff --git a/packages/opencode/src/session/schema.ts b/packages/opencode/src/session/schema.ts index efed280c98..487cbcd34a 100644 --- a/packages/opencode/src/session/schema.ts +++ b/packages/opencode/src/session/schema.ts @@ -1,15 +1,14 @@ import { Schema } from "effect" -import z from "zod" import { Identifier } from "@/id/id" -import { ZodOverride } from "@/util/effect-zod" +import { zod, ZodOverride } from "@/util/effect-zod" import { withStatics } from "@/util/schema" export const SessionID = Schema.String.annotate({ [ZodOverride]: Identifier.schema("session") }).pipe( Schema.brand("SessionID"), withStatics((s) => ({ descending: (id?: string) => s.make(Identifier.descending("session", id)), - zod: Identifier.schema("session").pipe(z.custom>()), + zod: zod(s), })), ) @@ -19,7 +18,7 @@ export const MessageID = Schema.String.annotate({ [ZodOverride]: Identifier.sche Schema.brand("MessageID"), withStatics((s) => ({ ascending: (id?: string) => s.make(Identifier.ascending("message", id)), - zod: Identifier.schema("message").pipe(z.custom>()), + zod: zod(s), })), ) @@ -29,7 +28,7 @@ export const PartID = Schema.String.annotate({ [ZodOverride]: Identifier.schema( Schema.brand("PartID"), withStatics((s) => ({ ascending: (id?: string) => s.make(Identifier.ascending("part", id)), - zod: Identifier.schema("part").pipe(z.custom>()), + zod: zod(s), })), ) diff --git a/packages/opencode/src/sync/schema.ts b/packages/opencode/src/sync/schema.ts index 37cdbd718f..e714b86ae0 100644 --- a/packages/opencode/src/sync/schema.ts +++ b/packages/opencode/src/sync/schema.ts @@ -1,14 +1,13 @@ import { Schema } from "effect" -import z from "zod" import { Identifier } from "@/id/id" -import { ZodOverride } from "@/util/effect-zod" +import { zod, ZodOverride } from "@/util/effect-zod" import { withStatics } from "@/util/schema" export const EventID = Schema.String.annotate({ [ZodOverride]: Identifier.schema("event") }).pipe( Schema.brand("EventID"), withStatics((s) => ({ ascending: (id?: string) => s.make(Identifier.ascending("event", id)), - zod: Identifier.schema("event").pipe(z.custom>()), + zod: zod(s), })), ) diff --git a/packages/opencode/src/tool/schema.ts b/packages/opencode/src/tool/schema.ts index ac41fd1606..9ce7bece2b 100644 --- a/packages/opencode/src/tool/schema.ts +++ b/packages/opencode/src/tool/schema.ts @@ -1,8 +1,7 @@ import { Schema } from "effect" -import z from "zod" import { Identifier } from "@/id/id" -import { ZodOverride } from "@/util/effect-zod" +import { zod, ZodOverride } from "@/util/effect-zod" import { withStatics } from "@/util/schema" const toolIdSchema = Schema.String.annotate({ [ZodOverride]: Identifier.schema("tool") }).pipe(Schema.brand("ToolID")) @@ -12,6 +11,6 @@ export type ToolID = typeof toolIdSchema.Type export const ToolID = toolIdSchema.pipe( withStatics((schema: typeof toolIdSchema) => ({ ascending: (id?: string) => schema.make(Identifier.ascending("tool", id)), - zod: Identifier.schema("tool").pipe(z.custom()), + zod: zod(schema), })), ) From 628102ad04f8acfadd93e112ca6592e2f7a3d697 Mon Sep 17 00:00:00 2001 From: Frank Date: Tue, 21 Apr 2026 23:13:42 -0400 Subject: [PATCH 46/73] zen: handle alibaba format --- .../console/app/src/routes/zen/util/provider/anthropic.ts | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/packages/console/app/src/routes/zen/util/provider/anthropic.ts b/packages/console/app/src/routes/zen/util/provider/anthropic.ts index 0f6f11da78..de49cddc1b 100644 --- a/packages/console/app/src/routes/zen/util/provider/anthropic.ts +++ b/packages/console/app/src/routes/zen/util/provider/anthropic.ts @@ -148,11 +148,13 @@ export const anthropicHelper: ProviderHelper = ({ reqModel, providerModel }) => return { parse: (chunk: string) => { const data = chunk.split("\n")[1] - if (!data.startsWith("data: ")) return + // Claude models start with "data: {" + // Alibaba models start with "data:{" + if (!data.startsWith("data:")) return let json try { - json = JSON.parse(data.slice(6)) + json = JSON.parse(data.replace(/^data:\s*/, "")) } catch { return } From fa623964a262958f1225afef1e4b286b682ea19f Mon Sep 17 00:00:00 2001 From: Kit Langton Date: Tue, 21 Apr 2026 23:17:23 -0400 Subject: [PATCH 47/73] refactor(core): migrate MessageV2 part leaves + ToolPart to Effect Schema (#23756) --- .../src/server/routes/instance/session.ts | 6 +- packages/opencode/src/session/message-v2.ts | 509 +++++++++++------- packages/opencode/src/session/prompt.ts | 72 +-- 3 files changed, 336 insertions(+), 251 deletions(-) diff --git a/packages/opencode/src/server/routes/instance/session.ts b/packages/opencode/src/server/routes/instance/session.ts index a46c2f3bf3..adafb8f360 100644 --- a/packages/opencode/src/server/routes/instance/session.ts +++ b/packages/opencode/src/server/routes/instance/session.ts @@ -882,7 +882,9 @@ export const SessionRoutes = lazy(() => const msg = await runRequest( "SessionRoutes.prompt", c, - SessionPrompt.Service.use((svc) => svc.prompt({ ...body, sessionID })), + SessionPrompt.Service.use((svc) => + svc.prompt({ ...body, sessionID } as unknown as SessionPrompt.PromptInput), + ), ) void stream.write(JSON.stringify(msg)) }) @@ -915,7 +917,7 @@ export const SessionRoutes = lazy(() => void runRequest( "SessionRoutes.prompt_async", c, - SessionPrompt.Service.use((svc) => svc.prompt({ ...body, sessionID })), + SessionPrompt.Service.use((svc) => svc.prompt({ ...body, sessionID } as unknown as SessionPrompt.PromptInput)), ).catch((err) => { log.error("prompt_async failed", { sessionID, error: err }) void Bus.publish(Session.Event.Error, { diff --git a/packages/opencode/src/session/message-v2.ts b/packages/opencode/src/session/message-v2.ts index 04cb15ef8d..1a12b51eb8 100644 --- a/packages/opencode/src/session/message-v2.ts +++ b/packages/opencode/src/session/message-v2.ts @@ -86,192 +86,207 @@ const _Format = Schema.Union([OutputFormatText, OutputFormatJsonSchema]).annotat export const Format = Object.assign(_Format, { zod: zod(_Format) }) export type OutputFormat = Schema.Schema.Type -const PartBase = z.object({ - id: PartID.zod, - sessionID: SessionID.zod, - messageID: MessageID.zod, -}) +const partBase = { + id: PartID, + sessionID: SessionID, + messageID: MessageID, +} -export const SnapshotPart = PartBase.extend({ - type: z.literal("snapshot"), - snapshot: z.string(), -}).meta({ - ref: "SnapshotPart", +export const SnapshotPart = Schema.Struct({ + ...partBase, + type: Schema.Literal("snapshot"), + snapshot: Schema.String, }) -export type SnapshotPart = z.infer + .annotate({ identifier: "SnapshotPart" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type SnapshotPart = Types.DeepMutable> -export const PatchPart = PartBase.extend({ - type: z.literal("patch"), - hash: z.string(), - files: z.string().array(), -}).meta({ - ref: "PatchPart", +export const PatchPart = Schema.Struct({ + ...partBase, + type: Schema.Literal("patch"), + hash: Schema.String, + files: Schema.Array(Schema.String), }) -export type PatchPart = z.infer + .annotate({ identifier: "PatchPart" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type PatchPart = Types.DeepMutable> -export const TextPart = PartBase.extend({ - type: z.literal("text"), - text: z.string(), - synthetic: z.boolean().optional(), - ignored: z.boolean().optional(), - time: z - .object({ - start: z.number(), - end: z.number().optional(), - }) - .optional(), - metadata: z.record(z.string(), z.any()).optional(), -}).meta({ - ref: "TextPart", -}) -export type TextPart = z.infer - -export const ReasoningPart = PartBase.extend({ - type: z.literal("reasoning"), - text: z.string(), - metadata: z.record(z.string(), z.any()).optional(), - time: z.object({ - start: z.number(), - end: z.number().optional(), - }), -}).meta({ - ref: "ReasoningPart", -}) -export type ReasoningPart = z.infer - -const FilePartSourceBase = z.object({ - text: z - .object({ - value: z.string(), - start: z.number().int(), - end: z.number().int(), - }) - .meta({ - ref: "FilePartSourceText", +export const TextPart = Schema.Struct({ + ...partBase, + type: Schema.Literal("text"), + text: Schema.String, + synthetic: Schema.optional(Schema.Boolean), + ignored: Schema.optional(Schema.Boolean), + time: Schema.optional( + Schema.Struct({ + start: Schema.Number, + end: Schema.optional(Schema.Number), }), + ), + metadata: Schema.optional(Schema.Record(Schema.String, Schema.Any)), }) + .annotate({ identifier: "TextPart" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type TextPart = Types.DeepMutable> -export const FileSource = FilePartSourceBase.extend({ - type: z.literal("file"), - path: z.string(), -}).meta({ - ref: "FileSource", -}) - -export const SymbolSource = FilePartSourceBase.extend({ - type: z.literal("symbol"), - path: z.string(), - range: LSP.Range.zod, - name: z.string(), - kind: z.number().int(), -}).meta({ - ref: "SymbolSource", -}) - -export const ResourceSource = FilePartSourceBase.extend({ - type: z.literal("resource"), - clientName: z.string(), - uri: z.string(), -}).meta({ - ref: "ResourceSource", -}) - -export const FilePartSource = z.discriminatedUnion("type", [FileSource, SymbolSource, ResourceSource]).meta({ - ref: "FilePartSource", -}) - -export const FilePart = PartBase.extend({ - type: z.literal("file"), - mime: z.string(), - filename: z.string().optional(), - url: z.string(), - source: FilePartSource.optional(), -}).meta({ - ref: "FilePart", -}) -export type FilePart = z.infer - -export const AgentPart = PartBase.extend({ - type: z.literal("agent"), - name: z.string(), - source: z - .object({ - value: z.string(), - start: z.number().int(), - end: z.number().int(), - }) - .optional(), -}).meta({ - ref: "AgentPart", -}) -export type AgentPart = z.infer - -export const CompactionPart = PartBase.extend({ - type: z.literal("compaction"), - auto: z.boolean(), - overflow: z.boolean().optional(), - tail_start_id: MessageID.zod.optional(), -}).meta({ - ref: "CompactionPart", -}) -export type CompactionPart = z.infer - -export const SubtaskPart = PartBase.extend({ - type: z.literal("subtask"), - prompt: z.string(), - description: z.string(), - agent: z.string(), - model: z - .object({ - providerID: ProviderID.zod, - modelID: ModelID.zod, - }) - .optional(), - command: z.string().optional(), -}).meta({ - ref: "SubtaskPart", -}) -export type SubtaskPart = z.infer - -export const RetryPart = PartBase.extend({ - type: z.literal("retry"), - attempt: z.number(), - error: APIError.Schema, - time: z.object({ - created: z.number(), +export const ReasoningPart = Schema.Struct({ + ...partBase, + type: Schema.Literal("reasoning"), + text: Schema.String, + metadata: Schema.optional(Schema.Record(Schema.String, Schema.Any)), + time: Schema.Struct({ + start: Schema.Number, + end: Schema.optional(Schema.Number), }), -}).meta({ - ref: "RetryPart", }) -export type RetryPart = z.infer + .annotate({ identifier: "ReasoningPart" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type ReasoningPart = Types.DeepMutable> -export const StepStartPart = PartBase.extend({ - type: z.literal("step-start"), - snapshot: z.string().optional(), -}).meta({ - ref: "StepStartPart", +const filePartSourceBase = { + text: Schema.Struct({ + value: Schema.String, + start: Schema.Number.check(Schema.isInt()), + end: Schema.Number.check(Schema.isInt()), + }).annotate({ identifier: "FilePartSourceText" }), +} + +export const FileSource = Schema.Struct({ + ...filePartSourceBase, + type: Schema.Literal("file"), + path: Schema.String, }) -export type StepStartPart = z.infer + .annotate({ identifier: "FileSource" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) -export const StepFinishPart = PartBase.extend({ - type: z.literal("step-finish"), - reason: z.string(), - snapshot: z.string().optional(), - cost: z.number(), - tokens: z.object({ - total: z.number().optional(), - input: z.number(), - output: z.number(), - reasoning: z.number(), - cache: z.object({ - read: z.number(), - write: z.number(), +export const SymbolSource = Schema.Struct({ + ...filePartSourceBase, + type: Schema.Literal("symbol"), + path: Schema.String, + range: LSP.Range, + name: Schema.String, + kind: Schema.Number.check(Schema.isInt()), +}) + .annotate({ identifier: "SymbolSource" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) + +export const ResourceSource = Schema.Struct({ + ...filePartSourceBase, + type: Schema.Literal("resource"), + clientName: Schema.String, + uri: Schema.String, +}) + .annotate({ identifier: "ResourceSource" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) + +const _FilePartSource = Schema.Union([FileSource, SymbolSource, ResourceSource]).annotate({ + discriminator: "type", + identifier: "FilePartSource", +}) +export const FilePartSource = Object.assign(_FilePartSource, { zod: zod(_FilePartSource) }) + +export const FilePart = Schema.Struct({ + ...partBase, + type: Schema.Literal("file"), + mime: Schema.String, + filename: Schema.optional(Schema.String), + url: Schema.String, + source: Schema.optional(_FilePartSource), +}) + .annotate({ identifier: "FilePart" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type FilePart = Types.DeepMutable> + +export const AgentPart = Schema.Struct({ + ...partBase, + type: Schema.Literal("agent"), + name: Schema.String, + source: Schema.optional( + Schema.Struct({ + value: Schema.String, + start: Schema.Number.check(Schema.isInt()), + end: Schema.Number.check(Schema.isInt()), + }), + ), +}) + .annotate({ identifier: "AgentPart" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type AgentPart = Types.DeepMutable> + +export const CompactionPart = Schema.Struct({ + ...partBase, + type: Schema.Literal("compaction"), + auto: Schema.Boolean, + overflow: Schema.optional(Schema.Boolean), + tail_start_id: Schema.optional(MessageID), +}) + .annotate({ identifier: "CompactionPart" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type CompactionPart = Types.DeepMutable> + +export const SubtaskPart = Schema.Struct({ + ...partBase, + type: Schema.Literal("subtask"), + prompt: Schema.String, + description: Schema.String, + agent: Schema.String, + model: Schema.optional( + Schema.Struct({ + providerID: ProviderID, + modelID: ModelID, + }), + ), + command: Schema.optional(Schema.String), +}) + .annotate({ identifier: "SubtaskPart" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type SubtaskPart = Types.DeepMutable> + +export const RetryPart = Schema.Struct({ + ...partBase, + type: Schema.Literal("retry"), + attempt: Schema.Number, + // APIError is still NamedError-based Zod; bridge via ZodOverride until errors migrate. + error: Schema.Any.annotate({ [ZodOverride]: APIError.Schema }), + time: Schema.Struct({ + created: Schema.Number, + }), +}) + .annotate({ identifier: "RetryPart" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type RetryPart = Omit>, "error"> & { + error: APIError +} + +export const StepStartPart = Schema.Struct({ + ...partBase, + type: Schema.Literal("step-start"), + snapshot: Schema.optional(Schema.String), +}) + .annotate({ identifier: "StepStartPart" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type StepStartPart = Types.DeepMutable> + +export const StepFinishPart = Schema.Struct({ + ...partBase, + type: Schema.Literal("step-finish"), + reason: Schema.String, + snapshot: Schema.optional(Schema.String), + cost: Schema.Number, + tokens: Schema.Struct({ + total: Schema.optional(Schema.Number), + input: Schema.Number, + output: Schema.Number, + reasoning: Schema.Number, + cache: Schema.Struct({ + read: Schema.Number, + write: Schema.Number, }), }), -}).meta({ - ref: "StepFinishPart", }) -export type StepFinishPart = z.infer + .annotate({ identifier: "StepFinishPart" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type StepFinishPart = Types.DeepMutable> export const ToolStatePending = Schema.Struct({ status: Schema.Literal("pending"), @@ -306,18 +321,11 @@ export const ToolStateCompleted = Schema.Struct({ end: Schema.Number, compacted: Schema.optional(Schema.Number), }), - // FilePart is still Zod-first this slice; bridge via ZodOverride so the - // derived Zod + JSON Schema still emit `$ref: FilePart` array items. - attachments: Schema.optional(Schema.Any.annotate({ [ZodOverride]: FilePart.array() })), + attachments: Schema.optional(Schema.Array(FilePart)), }) .annotate({ identifier: "ToolStateCompleted" }) .pipe(withStatics((s) => ({ zod: zod(s) }))) -export type ToolStateCompleted = Omit< - Types.DeepMutable>, - "attachments" -> & { - attachments?: FilePart[] -} +export type ToolStateCompleted = Types.DeepMutable> export const ToolStateError = Schema.Struct({ status: Schema.Literal("error"), @@ -346,16 +354,19 @@ export const ToolState = Object.assign(_ToolState, { }) export type ToolState = ToolStatePending | ToolStateRunning | ToolStateCompleted | ToolStateError -export const ToolPart = PartBase.extend({ - type: z.literal("tool"), - callID: z.string(), - tool: z.string(), - state: ToolState.zod, - metadata: z.record(z.string(), z.any()).optional(), -}).meta({ - ref: "ToolPart", +export const ToolPart = Schema.Struct({ + ...partBase, + type: Schema.Literal("tool"), + callID: Schema.String, + tool: Schema.String, + state: _ToolState, + metadata: Schema.optional(Schema.Record(Schema.String, Schema.Any)), }) -export type ToolPart = z.infer + .annotate({ identifier: "ToolPart" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type ToolPart = Omit>, "state"> & { + state: ToolState +} const Base = z.object({ id: MessageID.zod, @@ -388,25 +399,114 @@ export const User = Base.extend({ }) export type User = z.infer +export type Part = + | TextPart + | SubtaskPart + | ReasoningPart + | FilePart + | ToolPart + | StepStartPart + | StepFinishPart + | SnapshotPart + | PatchPart + | AgentPart + | RetryPart + | CompactionPart + +// The derived `.zod` on each leaf is typed as `z.ZodType<...>`, but the walker +// always emits a `z.ZodObject` at runtime. `z.discriminatedUnion` and +// `z.infer` both rely on the ZodObject structural type, so cast here so the +// resulting Part behaves like the pre-migration Zod union. export const Part = z .discriminatedUnion("type", [ - TextPart, - SubtaskPart, - ReasoningPart, - FilePart, - ToolPart, - StepStartPart, - StepFinishPart, - SnapshotPart, - PatchPart, - AgentPart, - RetryPart, - CompactionPart, + TextPart.zod as unknown as z.ZodObject, + SubtaskPart.zod as unknown as z.ZodObject, + ReasoningPart.zod as unknown as z.ZodObject, + FilePart.zod as unknown as z.ZodObject, + ToolPart.zod as unknown as z.ZodObject, + StepStartPart.zod as unknown as z.ZodObject, + StepFinishPart.zod as unknown as z.ZodObject, + SnapshotPart.zod as unknown as z.ZodObject, + PatchPart.zod as unknown as z.ZodObject, + AgentPart.zod as unknown as z.ZodObject, + RetryPart.zod as unknown as z.ZodObject, + CompactionPart.zod as unknown as z.ZodObject, ]) .meta({ ref: "Part", - }) -export type Part = z.infer + }) as unknown as z.ZodType + +// ── Prompt input schemas ───────────────────────────────────────────────────── +// +// Consumers of `SessionPrompt.PromptInput.parts` send part drafts without the +// ambient IDs (`messageID`, `sessionID`) that live on stored parts, and may +// omit `id` to let the server allocate one. These Schema-Struct variants +// carry that shape, and `SessionPrompt.PromptInput` just references the +// derived `.zod` (no omit/partial gymnastics needed at the call site). + +export const TextPartInput = Schema.Struct({ + id: Schema.optional(PartID), + type: Schema.Literal("text"), + text: Schema.String, + synthetic: Schema.optional(Schema.Boolean), + ignored: Schema.optional(Schema.Boolean), + time: Schema.optional( + Schema.Struct({ + start: Schema.Number, + end: Schema.optional(Schema.Number), + }), + ), + metadata: Schema.optional(Schema.Record(Schema.String, Schema.Any)), +}) + .annotate({ identifier: "TextPartInput" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type TextPartInput = Types.DeepMutable> + +export const FilePartInput = Schema.Struct({ + id: Schema.optional(PartID), + type: Schema.Literal("file"), + mime: Schema.String, + filename: Schema.optional(Schema.String), + url: Schema.String, + source: Schema.optional(_FilePartSource), +}) + .annotate({ identifier: "FilePartInput" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type FilePartInput = Types.DeepMutable> + +export const AgentPartInput = Schema.Struct({ + id: Schema.optional(PartID), + type: Schema.Literal("agent"), + name: Schema.String, + source: Schema.optional( + Schema.Struct({ + value: Schema.String, + start: Schema.Number.check(Schema.isInt()), + end: Schema.Number.check(Schema.isInt()), + }), + ), +}) + .annotate({ identifier: "AgentPartInput" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type AgentPartInput = Types.DeepMutable> + +export const SubtaskPartInput = Schema.Struct({ + id: Schema.optional(PartID), + type: Schema.Literal("subtask"), + prompt: Schema.String, + description: Schema.String, + agent: Schema.String, + model: Schema.optional( + Schema.Struct({ + providerID: ProviderID, + modelID: ModelID, + }), + ), + command: Schema.optional(Schema.String), +}) + .annotate({ identifier: "SubtaskPartInput" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type SubtaskPartInput = Types.DeepMutable> export const Assistant = Base.extend({ role: z.literal("assistant"), @@ -517,7 +617,10 @@ export const WithParts = z.object({ info: Info, parts: z.array(Part), }) -export type WithParts = z.infer +export type WithParts = { + info: Info + parts: Part[] +} const Cursor = z.object({ id: MessageID.zod, diff --git a/packages/opencode/src/session/prompt.ts b/packages/opencode/src/session/prompt.ts index 6dcec04592..9d50db4afb 100644 --- a/packages/opencode/src/session/prompt.ts +++ b/packages/opencode/src/session/prompt.ts @@ -1721,50 +1721,25 @@ export const PromptInput = z.object({ variant: z.string().optional(), parts: z.array( z.discriminatedUnion("type", [ - MessageV2.TextPart.omit({ - messageID: true, - sessionID: true, - }) - .partial({ - id: true, - }) - .meta({ - ref: "TextPartInput", - }), - MessageV2.FilePart.omit({ - messageID: true, - sessionID: true, - }) - .partial({ - id: true, - }) - .meta({ - ref: "FilePartInput", - }), - MessageV2.AgentPart.omit({ - messageID: true, - sessionID: true, - }) - .partial({ - id: true, - }) - .meta({ - ref: "AgentPartInput", - }), - MessageV2.SubtaskPart.omit({ - messageID: true, - sessionID: true, - }) - .partial({ - id: true, - }) - .meta({ - ref: "SubtaskPartInput", - }), + MessageV2.TextPartInput.zod as unknown as z.ZodObject, + MessageV2.FilePartInput.zod as unknown as z.ZodObject, + MessageV2.AgentPartInput.zod as unknown as z.ZodObject, + MessageV2.SubtaskPartInput.zod as unknown as z.ZodObject, ]), ), }) -export type PromptInput = z.infer +// `z.discriminatedUnion` erases the discriminated members' shapes back to +// `{}` because the derived `.zod` on each input is typed as an opaque +// `z.ZodType`. Restore the precise `parts` type from the exported Schema +// input types so callers see a proper tagged union. +type PartInputUnion = + | MessageV2.TextPartInput + | MessageV2.FilePartInput + | MessageV2.AgentPartInput + | MessageV2.SubtaskPartInput +export type PromptInput = Omit, "parts"> & { + parts: PartInputUnion[] +} export const LoopInput = z.object({ sessionID: SessionID.zod, @@ -1792,14 +1767,19 @@ export const CommandInput = z.object({ arguments: z.string(), command: z.string(), variant: z.string().optional(), + // Inlined (no `.meta({ ref })`) to keep the original SDK output — the + // PromptInput call site below references FilePartInput by ref via the + // Schema export in message-v2.ts. parts: z .array( z.discriminatedUnion("type", [ - MessageV2.FilePart.omit({ - messageID: true, - sessionID: true, - }).partial({ - id: true, + z.object({ + id: PartID.zod.optional(), + type: z.literal("file"), + mime: z.string(), + filename: z.string().optional(), + url: z.string(), + source: MessageV2.FilePartSource.zod.optional(), }), ]), ) From 1a76799fd876d856893fdb73197545315bc06b2a Mon Sep 17 00:00:00 2001 From: "opencode-agent[bot]" Date: Wed, 22 Apr 2026 03:18:25 +0000 Subject: [PATCH 48/73] chore: generate --- .../opencode/src/server/routes/instance/session.ts | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/packages/opencode/src/server/routes/instance/session.ts b/packages/opencode/src/server/routes/instance/session.ts index adafb8f360..5d1f869310 100644 --- a/packages/opencode/src/server/routes/instance/session.ts +++ b/packages/opencode/src/server/routes/instance/session.ts @@ -882,9 +882,9 @@ export const SessionRoutes = lazy(() => const msg = await runRequest( "SessionRoutes.prompt", c, - SessionPrompt.Service.use((svc) => - svc.prompt({ ...body, sessionID } as unknown as SessionPrompt.PromptInput), - ), + SessionPrompt.Service.use((svc) => + svc.prompt({ ...body, sessionID } as unknown as SessionPrompt.PromptInput), + ), ) void stream.write(JSON.stringify(msg)) }) @@ -917,7 +917,9 @@ export const SessionRoutes = lazy(() => void runRequest( "SessionRoutes.prompt_async", c, - SessionPrompt.Service.use((svc) => svc.prompt({ ...body, sessionID } as unknown as SessionPrompt.PromptInput)), + SessionPrompt.Service.use((svc) => + svc.prompt({ ...body, sessionID } as unknown as SessionPrompt.PromptInput), + ), ).catch((err) => { log.error("prompt_async failed", { sessionID, error: err }) void Bus.publish(Session.Event.Error, { From e89543811ca02067732b9ae6637bc1c1572dc7c1 Mon Sep 17 00:00:00 2001 From: Kit Langton Date: Tue, 21 Apr 2026 23:26:12 -0400 Subject: [PATCH 49/73] refactor(core): migrate MessageV2 message DTOs (User/Assistant/Part/Info/WithParts) to Effect Schema (#23757) --- packages/opencode/src/cli/cmd/import.ts | 4 +- .../src/server/routes/instance/session.ts | 20 +- packages/opencode/src/session/message-v2.ts | 210 ++++++++++-------- packages/opencode/src/session/prompt.ts | 4 +- packages/opencode/src/session/session.ts | 2 +- .../test/session/structured-output.test.ts | 8 +- 6 files changed, 132 insertions(+), 116 deletions(-) diff --git a/packages/opencode/src/cli/cmd/import.ts b/packages/opencode/src/cli/cmd/import.ts index 8da254f159..309ec6d950 100644 --- a/packages/opencode/src/cli/cmd/import.ts +++ b/packages/opencode/src/cli/cmd/import.ts @@ -168,7 +168,7 @@ export const ImportCommand = cmd({ ) for (const msg of exportData.messages) { - const msgInfo = MessageV2.Info.parse(msg.info) + const msgInfo = MessageV2.Info.zod.parse(msg.info) const { id, sessionID: _, ...msgData } = msgInfo Database.use((db) => db @@ -184,7 +184,7 @@ export const ImportCommand = cmd({ ) for (const part of msg.parts) { - const partInfo = MessageV2.Part.parse(part) + const partInfo = MessageV2.Part.zod.parse(part) const { id: partId, sessionID: _s, messageID, ...partData } = partInfo Database.use((db) => db diff --git a/packages/opencode/src/server/routes/instance/session.ts b/packages/opencode/src/server/routes/instance/session.ts index 5d1f869310..8d03024260 100644 --- a/packages/opencode/src/server/routes/instance/session.ts +++ b/packages/opencode/src/server/routes/instance/session.ts @@ -611,7 +611,7 @@ export const SessionRoutes = lazy(() => description: "List of messages", content: { "application/json": { - schema: resolver(MessageV2.WithParts.array()), + schema: resolver(MessageV2.WithParts.zod.array()), }, }, }, @@ -701,8 +701,8 @@ export const SessionRoutes = lazy(() => "application/json": { schema: resolver( z.object({ - info: MessageV2.Info, - parts: MessageV2.Part.array(), + info: MessageV2.Info.zod, + parts: MessageV2.Part.zod.array(), }), ), }, @@ -813,7 +813,7 @@ export const SessionRoutes = lazy(() => description: "Successfully updated part", content: { "application/json": { - schema: resolver(MessageV2.Part), + schema: resolver(MessageV2.Part.zod), }, }, }, @@ -828,7 +828,7 @@ export const SessionRoutes = lazy(() => partID: PartID.zod, }), ), - validator("json", MessageV2.Part), + validator("json", MessageV2.Part.zod), async (c) => { const params = c.req.valid("param") const body = c.req.valid("json") @@ -856,8 +856,8 @@ export const SessionRoutes = lazy(() => "application/json": { schema: resolver( z.object({ - info: MessageV2.Assistant, - parts: MessageV2.Part.array(), + info: MessageV2.Assistant.zod, + parts: MessageV2.Part.zod.array(), }), ), }, @@ -944,8 +944,8 @@ export const SessionRoutes = lazy(() => "application/json": { schema: resolver( z.object({ - info: MessageV2.Assistant, - parts: MessageV2.Part.array(), + info: MessageV2.Assistant.zod, + parts: MessageV2.Part.zod.array(), }), ), }, @@ -980,7 +980,7 @@ export const SessionRoutes = lazy(() => description: "Created message", content: { "application/json": { - schema: resolver(MessageV2.WithParts), + schema: resolver(MessageV2.WithParts.zod), }, }, }, diff --git a/packages/opencode/src/session/message-v2.ts b/packages/opencode/src/session/message-v2.ts index 1a12b51eb8..f1cb6db218 100644 --- a/packages/opencode/src/session/message-v2.ts +++ b/packages/opencode/src/session/message-v2.ts @@ -368,37 +368,68 @@ export type ToolPart = Omit + .annotate({ identifier: "UserMessage" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type User = Types.DeepMutable> +const _Part = Schema.Union([ + TextPart, + SubtaskPart, + ReasoningPart, + FilePart, + ToolPart, + StepStartPart, + StepFinishPart, + SnapshotPart, + PatchPart, + AgentPart, + RetryPart, + CompactionPart, +]).annotate({ discriminator: "type", identifier: "Part" }) +export const Part = Object.assign(_Part, { + zod: zod(_Part) as unknown as z.ZodType< + | TextPart + | SubtaskPart + | ReasoningPart + | FilePart + | ToolPart + | StepStartPart + | StepFinishPart + | SnapshotPart + | PatchPart + | AgentPart + | RetryPart + | CompactionPart + >, +}) export type Part = | TextPart | SubtaskPart @@ -413,28 +444,19 @@ export type Part = | RetryPart | CompactionPart -// The derived `.zod` on each leaf is typed as `z.ZodType<...>`, but the walker -// always emits a `z.ZodObject` at runtime. `z.discriminatedUnion` and -// `z.infer` both rely on the ZodObject structural type, so cast here so the -// resulting Part behaves like the pre-migration Zod union. -export const Part = z - .discriminatedUnion("type", [ - TextPart.zod as unknown as z.ZodObject, - SubtaskPart.zod as unknown as z.ZodObject, - ReasoningPart.zod as unknown as z.ZodObject, - FilePart.zod as unknown as z.ZodObject, - ToolPart.zod as unknown as z.ZodObject, - StepStartPart.zod as unknown as z.ZodObject, - StepFinishPart.zod as unknown as z.ZodObject, - SnapshotPart.zod as unknown as z.ZodObject, - PatchPart.zod as unknown as z.ZodObject, - AgentPart.zod as unknown as z.ZodObject, - RetryPart.zod as unknown as z.ZodObject, - CompactionPart.zod as unknown as z.ZodObject, - ]) - .meta({ - ref: "Part", - }) as unknown as z.ZodType +// Errors are still NamedError-based Zod; bridge via ZodOverride so the derived +// Zod + JSON Schema emit the original discriminatedUnion shape. Migrating the +// error classes to Schema.TaggedErrorClass is a separate slice. +const AssistantErrorZod = z.discriminatedUnion("name", [ + AuthError.Schema, + NamedError.Unknown.Schema, + OutputLengthError.Schema, + AbortedError.Schema, + StructuredOutputError.Schema, + ContextOverflowError.Schema, + APIError.Schema, +]) +type AssistantError = z.infer // ── Prompt input schemas ───────────────────────────────────────────────────── // @@ -508,59 +530,53 @@ export const SubtaskPartInput = Schema.Struct({ .pipe(withStatics((s) => ({ zod: zod(s) }))) export type SubtaskPartInput = Types.DeepMutable> -export const Assistant = Base.extend({ - role: z.literal("assistant"), - time: z.object({ - created: z.number(), - completed: z.number().optional(), +export const Assistant = Schema.Struct({ + ...messageBase, + role: Schema.Literal("assistant"), + time: Schema.Struct({ + created: Schema.Number, + completed: Schema.optional(Schema.Number), }), - error: z - .discriminatedUnion("name", [ - AuthError.Schema, - NamedError.Unknown.Schema, - OutputLengthError.Schema, - AbortedError.Schema, - StructuredOutputError.Schema, - ContextOverflowError.Schema, - APIError.Schema, - ]) - .optional(), - parentID: MessageID.zod, - modelID: ModelID.zod, - providerID: ProviderID.zod, + error: Schema.optional(Schema.Any.annotate({ [ZodOverride]: AssistantErrorZod })), + parentID: MessageID, + modelID: ModelID, + providerID: ProviderID, /** * @deprecated */ - mode: z.string(), - agent: z.string(), - path: z.object({ - cwd: z.string(), - root: z.string(), + mode: Schema.String, + agent: Schema.String, + path: Schema.Struct({ + cwd: Schema.String, + root: Schema.String, }), - summary: z.boolean().optional(), - cost: z.number(), - tokens: z.object({ - total: z.number().optional(), - input: z.number(), - output: z.number(), - reasoning: z.number(), - cache: z.object({ - read: z.number(), - write: z.number(), + summary: Schema.optional(Schema.Boolean), + cost: Schema.Number, + tokens: Schema.Struct({ + total: Schema.optional(Schema.Number), + input: Schema.Number, + output: Schema.Number, + reasoning: Schema.Number, + cache: Schema.Struct({ + read: Schema.Number, + write: Schema.Number, }), }), - structured: z.any().optional(), - variant: z.string().optional(), - finish: z.string().optional(), -}).meta({ - ref: "AssistantMessage", + structured: Schema.optional(Schema.Any), + variant: Schema.optional(Schema.String), + finish: Schema.optional(Schema.String), }) -export type Assistant = z.infer + .annotate({ identifier: "AssistantMessage" }) + .pipe(withStatics((s) => ({ zod: zod(s) }))) +export type Assistant = Omit>, "error"> & { + error?: AssistantError +} -export const Info = z.discriminatedUnion("role", [User, Assistant]).meta({ - ref: "Message", +const _Info = Schema.Union([User, Assistant]).annotate({ discriminator: "role", identifier: "Message" }) +export const Info = Object.assign(_Info, { + zod: zod(_Info) as unknown as z.ZodType, }) -export type Info = z.infer +export type Info = User | Assistant export const Event = { Updated: SyncEvent.define({ @@ -569,7 +585,7 @@ export const Event = { aggregate: "sessionID", schema: z.object({ sessionID: SessionID.zod, - info: Info, + info: Info.zod, }), }), Removed: SyncEvent.define({ @@ -587,7 +603,7 @@ export const Event = { aggregate: "sessionID", schema: z.object({ sessionID: SessionID.zod, - part: Part, + part: Part.zod, time: z.number(), }), }), @@ -613,10 +629,10 @@ export const Event = { }), } -export const WithParts = z.object({ - info: Info, - parts: z.array(Part), -}) +export const WithParts = Schema.Struct({ + info: _Info, + parts: Schema.Array(_Part), +}).pipe(withStatics((s) => ({ zod: zod(s) }))) export type WithParts = { info: Info parts: Part[] diff --git a/packages/opencode/src/session/prompt.ts b/packages/opencode/src/session/prompt.ts index 9d50db4afb..508c72cc8f 100644 --- a/packages/opencode/src/session/prompt.ts +++ b/packages/opencode/src/session/prompt.ts @@ -1243,7 +1243,7 @@ NOTE: At any point in time through this workflow you should feel free to ask the { message: info, parts }, ) - const parsed = MessageV2.Info.safeParse(info) + const parsed = MessageV2.Info.zod.safeParse(info) if (!parsed.success) { log.error("invalid user message before save", { sessionID: input.sessionID, @@ -1254,7 +1254,7 @@ NOTE: At any point in time through this workflow you should feel free to ask the }) } parts.forEach((part, index) => { - const p = MessageV2.Part.safeParse(part) + const p = MessageV2.Part.zod.safeParse(part) if (p.success) return log.error("invalid user part before save", { sessionID: input.sessionID, diff --git a/packages/opencode/src/session/session.ts b/packages/opencode/src/session/session.ts index 6e9fb5c5d8..a7607798ba 100644 --- a/packages/opencode/src/session/session.ts +++ b/packages/opencode/src/session/session.ts @@ -247,7 +247,7 @@ export const Event = { z.object({ sessionID: SessionID.zod.optional(), // z.lazy defers access to break circular dep: session → message-v2 → provider → plugin → session - error: z.lazy(() => MessageV2.Assistant.shape.error), + error: z.lazy(() => (MessageV2.Assistant.zod as unknown as z.ZodObject).shape.error), }), ), } diff --git a/packages/opencode/test/session/structured-output.test.ts b/packages/opencode/test/session/structured-output.test.ts index a91446bf42..c734a182ae 100644 --- a/packages/opencode/test/session/structured-output.test.ts +++ b/packages/opencode/test/session/structured-output.test.ts @@ -95,7 +95,7 @@ describe("structured-output.StructuredOutputError", () => { describe("structured-output.UserMessage", () => { test("user message accepts outputFormat", () => { - const result = MessageV2.User.safeParse({ + const result = MessageV2.User.zod.safeParse({ id: MessageID.ascending(), sessionID: SessionID.descending(), role: "user", @@ -111,7 +111,7 @@ describe("structured-output.UserMessage", () => { }) test("user message works without outputFormat (optional)", () => { - const result = MessageV2.User.safeParse({ + const result = MessageV2.User.zod.safeParse({ id: MessageID.ascending(), sessionID: SessionID.descending(), role: "user", @@ -140,7 +140,7 @@ describe("structured-output.AssistantMessage", () => { } test("assistant message accepts structured", () => { - const result = MessageV2.Assistant.safeParse({ + const result = MessageV2.Assistant.zod.safeParse({ ...baseAssistantMessage, structured: { company: "Anthropic", founded: 2021 }, }) @@ -151,7 +151,7 @@ describe("structured-output.AssistantMessage", () => { }) test("assistant message works without structured_output (optional)", () => { - const result = MessageV2.Assistant.safeParse(baseAssistantMessage) + const result = MessageV2.Assistant.zod.safeParse(baseAssistantMessage) expect(result.success).toBe(true) }) }) From 1593c3ed16369001f24252d0091092da8db26bf3 Mon Sep 17 00:00:00 2001 From: Kit Langton Date: Tue, 21 Apr 2026 23:28:33 -0400 Subject: [PATCH 50/73] refactor(core): migrate MessageV2 internal Cursor to Effect Schema (#23763) --- packages/opencode/src/session/message-v2.ts | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/packages/opencode/src/session/message-v2.ts b/packages/opencode/src/session/message-v2.ts index f1cb6db218..aceecd9b8c 100644 --- a/packages/opencode/src/session/message-v2.ts +++ b/packages/opencode/src/session/message-v2.ts @@ -638,18 +638,20 @@ export type WithParts = { parts: Part[] } -const Cursor = z.object({ - id: MessageID.zod, - time: z.number(), +const Cursor = Schema.Struct({ + id: MessageID, + time: Schema.Number, }) -type Cursor = z.infer +type Cursor = typeof Cursor.Type + +const decodeCursor = Schema.decodeUnknownSync(Cursor) export const cursor = { encode(input: Cursor) { return Buffer.from(JSON.stringify(input)).toString("base64url") }, decode(input: string) { - return Cursor.parse(JSON.parse(Buffer.from(input, "base64url").toString("utf8"))) + return decodeCursor(JSON.parse(Buffer.from(input, "base64url").toString("utf8"))) }, } From ed802fd121c46dd4efc3b87e7bf4865b630ccb21 Mon Sep 17 00:00:00 2001 From: Kit Langton Date: Tue, 21 Apr 2026 23:40:32 -0400 Subject: [PATCH 51/73] refactor(core): migrate MessageV2 errors to Schema-backed named errors (#23764) --- packages/opencode/src/cli/cmd/github.ts | 6 +- packages/opencode/src/session/message-v2.ts | 54 ++++++++--------- .../opencode/src/util/named-schema-error.ts | 59 +++++++++++++++++++ 3 files changed, 86 insertions(+), 33 deletions(-) create mode 100644 packages/opencode/src/util/named-schema-error.ts diff --git a/packages/opencode/src/cli/cmd/github.ts b/packages/opencode/src/cli/cmd/github.ts index ed1ca2124d..fe8e233dd1 100644 --- a/packages/opencode/src/cli/cmd/github.ts +++ b/packages/opencode/src/cli/cmd/github.ts @@ -985,7 +985,8 @@ export const GithubRunCommand = cmd({ const err = result.info.error console.error("Agent error:", err) if (err.name === "ContextOverflowError") throw new Error(formatPromptTooLargeError(files)) - throw new Error(`${err.name}: ${err.data?.message || ""}`) + const message = "message" in err.data ? err.data.message : "" + throw new Error(`${err.name}: ${message}`) } const text = extractResponseText(result.parts) @@ -1014,7 +1015,8 @@ export const GithubRunCommand = cmd({ const err = summary.info.error console.error("Summary agent error:", err) if (err.name === "ContextOverflowError") throw new Error(formatPromptTooLargeError(files)) - throw new Error(`${err.name}: ${err.data?.message || ""}`) + const message = "message" in err.data ? err.data.message : "" + throw new Error(`${err.name}: ${message}`) } const summaryText = extractResponseText(summary.parts) diff --git a/packages/opencode/src/session/message-v2.ts b/packages/opencode/src/session/message-v2.ts index aceecd9b8c..123f7b5401 100644 --- a/packages/opencode/src/session/message-v2.ts +++ b/packages/opencode/src/session/message-v2.ts @@ -18,6 +18,7 @@ import { ModelID, ProviderID } from "@/provider/schema" import { Effect, Schema, Types } from "effect" import { zod, ZodOverride } from "@/util/effect-zod" import { withStatics } from "@/util/schema" +import { namedSchemaError } from "@/util/named-schema-error" import { EffectLogger } from "@/effect" /** Error shape thrown by Bun's fetch() when gzip/br decompression fails mid-stream */ @@ -30,38 +31,29 @@ interface FetchDecompressionError extends Error { export const SYNTHETIC_ATTACHMENT_PROMPT = "Attached image(s) from tool result:" export { isMedia } -export const OutputLengthError = NamedError.create("MessageOutputLengthError", z.object({})) -export const AbortedError = NamedError.create("MessageAbortedError", z.object({ message: z.string() })) -export const StructuredOutputError = NamedError.create( - "StructuredOutputError", - z.object({ - message: z.string(), - retries: z.number(), - }), -) -export const AuthError = NamedError.create( - "ProviderAuthError", - z.object({ - providerID: z.string(), - message: z.string(), - }), -) -export const APIError = NamedError.create( - "APIError", - z.object({ - message: z.string(), - statusCode: z.number().optional(), - isRetryable: z.boolean(), - responseHeaders: z.record(z.string(), z.string()).optional(), - responseBody: z.string().optional(), - metadata: z.record(z.string(), z.string()).optional(), - }), -) +export const OutputLengthError = namedSchemaError("MessageOutputLengthError", {}) +export const AbortedError = namedSchemaError("MessageAbortedError", { message: Schema.String }) +export const StructuredOutputError = namedSchemaError("StructuredOutputError", { + message: Schema.String, + retries: Schema.Number, +}) +export const AuthError = namedSchemaError("ProviderAuthError", { + providerID: Schema.String, + message: Schema.String, +}) +export const APIError = namedSchemaError("APIError", { + message: Schema.String, + statusCode: Schema.optional(Schema.Number), + isRetryable: Schema.Boolean, + responseHeaders: Schema.optional(Schema.Record(Schema.String, Schema.String)), + responseBody: Schema.optional(Schema.String), + metadata: Schema.optional(Schema.Record(Schema.String, Schema.String)), +}) export type APIError = z.infer -export const ContextOverflowError = NamedError.create( - "ContextOverflowError", - z.object({ message: z.string(), responseBody: z.string().optional() }), -) +export const ContextOverflowError = namedSchemaError("ContextOverflowError", { + message: Schema.String, + responseBody: Schema.optional(Schema.String), +}) export class OutputFormatText extends Schema.Class("OutputFormatText")({ type: Schema.Literal("text"), diff --git a/packages/opencode/src/util/named-schema-error.ts b/packages/opencode/src/util/named-schema-error.ts new file mode 100644 index 0000000000..5fcc93cba3 --- /dev/null +++ b/packages/opencode/src/util/named-schema-error.ts @@ -0,0 +1,59 @@ +import { Schema } from "effect" +import z from "zod" +import { zod } from "@/util/effect-zod" + +/** + * Create a Schema-backed NamedError-shaped class. + * + * Drop-in replacement for `NamedError.create(tag, zodShape)` but backed by + * `Schema.Struct` under the hood. The wire shape emitted by the derived + * `.Schema` is still `{ name: tag, data: {...fields} }` so the generated + * OpenAPI/SDK output is byte-identical to the original NamedError schema. + * + * Preserves the existing surface: + * - static `Schema` (Zod schema of the wire shape) + * - static `isInstance(x)` + * - instance `toObject()` returning `{ name, data }` + * - `new X({ ...data }, { cause })` + */ +export function namedSchemaError(tag: Tag, fields: Fields) { + // Wire shape matches the original NamedError output so the SDK stays stable. + const dataSchema = Schema.Struct(fields) + const wire = z + .object({ + name: z.literal(tag), + data: zod(dataSchema), + }) + .meta({ ref: tag }) + + type Data = Schema.Schema.Type + + class NamedSchemaError extends Error { + static readonly Schema = wire + static readonly tag = tag + public static isInstance(input: unknown): input is NamedSchemaError { + return ( + typeof input === "object" && + input !== null && + "name" in input && + (input as { name: unknown }).name === tag + ) + } + + public override readonly name: Tag = tag + public readonly data: Data + + constructor(data: Data, options?: ErrorOptions) { + super(tag, options) + this.data = data + } + + toObject(): { name: Tag; data: Data } { + return { name: tag, data: this.data } + } + } + + Object.defineProperty(NamedSchemaError, "name", { value: tag }) + + return NamedSchemaError +} From b0455583aa8c73be47936c42c7fd96839c11e3ba Mon Sep 17 00:00:00 2001 From: "opencode-agent[bot]" Date: Wed, 22 Apr 2026 03:41:42 +0000 Subject: [PATCH 52/73] chore: generate --- packages/opencode/src/util/named-schema-error.ts | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/packages/opencode/src/util/named-schema-error.ts b/packages/opencode/src/util/named-schema-error.ts index 5fcc93cba3..e144f2f906 100644 --- a/packages/opencode/src/util/named-schema-error.ts +++ b/packages/opencode/src/util/named-schema-error.ts @@ -32,12 +32,7 @@ export function namedSchemaError Date: Wed, 22 Apr 2026 00:35:48 -0400 Subject: [PATCH 53/73] feat: update codex plugin to support 5.5 (#23789) --- packages/opencode/src/plugin/codex.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/opencode/src/plugin/codex.ts b/packages/opencode/src/plugin/codex.ts index c61cb78509..84d314f476 100644 --- a/packages/opencode/src/plugin/codex.ts +++ b/packages/opencode/src/plugin/codex.ts @@ -374,6 +374,7 @@ export async function CodexAuthPlugin(input: PluginInput): Promise { "gpt-5.3-codex", "gpt-5.4", "gpt-5.4-mini", + "gpt-5.5", ]) for (const [modelId, model] of Object.entries(provider.models)) { if (modelId.includes("codex")) continue From 69e2f3b7ba12ce45ba2964ca3df2fe7c5a22a793 Mon Sep 17 00:00:00 2001 From: Luke Parker <10430890+Hona@users.noreply.github.com> Date: Wed, 22 Apr 2026 15:18:51 +1000 Subject: [PATCH 54/73] chore: bump Bun to 1.3.13 (#23791) --- bun.lock | 6 +++--- package.json | 4 ++-- packages/containers/bun-node/Dockerfile | 2 +- packages/ui/src/components/timeline-playground.stories.tsx | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/bun.lock b/bun.lock index 77ab24240b..64b32feac4 100644 --- a/bun.lock +++ b/bun.lock @@ -688,7 +688,7 @@ "@tailwindcss/vite": "4.1.11", "@tsconfig/bun": "1.0.9", "@tsconfig/node22": "22.0.2", - "@types/bun": "1.3.11", + "@types/bun": "1.3.12", "@types/cross-spawn": "6.0.6", "@types/luxon": "3.7.1", "@types/node": "22.13.9", @@ -2302,7 +2302,7 @@ "@types/braces": ["@types/braces@3.0.5", "", {}, "sha512-SQFof9H+LXeWNz8wDe7oN5zu7ket0qwMu5vZubW4GCJ8Kkeh6nBWUz87+KTz/G3Kqsrp0j/W253XJb3KMEeg3w=="], - "@types/bun": ["@types/bun@1.3.11", "", { "dependencies": { "bun-types": "1.3.11" } }, "sha512-5vPne5QvtpjGpsGYXiFyycfpDF2ECyPcTSsFBMa0fraoxiQyMJ3SmuQIGhzPg2WJuWxVBoxWJ2kClYTcw/4fAg=="], + "@types/bun": ["@types/bun@1.3.12", "", { "dependencies": { "bun-types": "1.3.12" } }, "sha512-DBv81elK+/VSwXHDlnH3Qduw+KxkTIWi7TXkAeh24zpi5l0B2kUg9Ga3tb4nJaPcOFswflgi/yAvMVBPrxMB+A=="], "@types/cacache": ["@types/cacache@20.0.1", "", { "dependencies": { "@types/node": "*", "minipass": "*" } }, "sha512-QlKW3AFoFr/hvPHwFHMIVUH/ZCYeetBNou3PCmxu5LaNDvrtBlPJtIA6uhmU9JRt9oxj7IYoqoLcpxtzpPiTcw=="], @@ -2720,7 +2720,7 @@ "bun-pty": ["bun-pty@0.4.8", "", {}, "sha512-rO70Mrbr13+jxHHHu2YBkk2pNqrJE5cJn29WE++PUr+GFA0hq/VgtQPZANJ8dJo6d7XImvBk37Innt8GM7O28w=="], - "bun-types": ["bun-types@1.3.11", "", { "dependencies": { "@types/node": "*" } }, "sha512-1KGPpoxQWl9f6wcZh57LvrPIInQMn2TQ7jsgxqpRzg+l0QPOFvJVH7HmvHo/AiPgwXy+/Thf6Ov3EdVn1vOabg=="], + "bun-types": ["bun-types@1.3.12", "", { "dependencies": { "@types/node": "*" } }, "sha512-HqOLj5PoFajAQciOMRiIZGNoKxDJSr6qigAttOX40vJuSp6DN/CxWp9s3C1Xwm4oH7ybueITwiaOcWXoYVoRkA=="], "bun-webgpu": ["bun-webgpu@0.1.5", "", { "dependencies": { "@webgpu/types": "^0.1.60" }, "optionalDependencies": { "bun-webgpu-darwin-arm64": "^0.1.5", "bun-webgpu-darwin-x64": "^0.1.5", "bun-webgpu-linux-x64": "^0.1.5", "bun-webgpu-win32-x64": "^0.1.5" } }, "sha512-91/K6S5whZKX7CWAm9AylhyKrLGRz6BUiiPiM/kXadSnD4rffljCD/q9cNFftm5YXhx4MvLqw33yEilxogJvwA=="], diff --git a/package.json b/package.json index 06bf9c91ae..f918bcd025 100644 --- a/package.json +++ b/package.json @@ -4,7 +4,7 @@ "description": "AI-powered development tool", "private": true, "type": "module", - "packageManager": "bun@1.3.11", + "packageManager": "bun@1.3.13", "scripts": { "dev": "bun run --cwd packages/opencode --conditions=browser src/index.ts", "dev:desktop": "bun --cwd packages/desktop-electron dev", @@ -30,7 +30,7 @@ "@effect/opentelemetry": "4.0.0-beta.48", "@effect/platform-node": "4.0.0-beta.48", "@npmcli/arborist": "9.4.0", - "@types/bun": "1.3.11", + "@types/bun": "1.3.12", "@types/cross-spawn": "6.0.6", "@octokit/rest": "22.0.0", "@hono/zod-validator": "0.4.2", diff --git a/packages/containers/bun-node/Dockerfile b/packages/containers/bun-node/Dockerfile index 485375dd9f..d6f4729bf5 100644 --- a/packages/containers/bun-node/Dockerfile +++ b/packages/containers/bun-node/Dockerfile @@ -4,7 +4,7 @@ FROM ${REGISTRY}/build/base:24.04 SHELL ["/bin/bash", "-lc"] ARG NODE_VERSION=24.4.0 -ARG BUN_VERSION=1.3.11 +ARG BUN_VERSION=1.3.13 ENV BUN_INSTALL=/opt/bun ENV PATH=/opt/bun/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin diff --git a/packages/ui/src/components/timeline-playground.stories.tsx b/packages/ui/src/components/timeline-playground.stories.tsx index c071db303b..72f5730612 100644 --- a/packages/ui/src/components/timeline-playground.stories.tsx +++ b/packages/ui/src/components/timeline-playground.stories.tsx @@ -318,7 +318,7 @@ const TOOL_SAMPLES = { tool: "bash", input: { command: "bun test --filter session", description: "Run session tests" }, output: - "bun test v1.3.11\n\n✓ session-turn.test.tsx (3 tests) 45ms\n✓ message-part.test.tsx (7 tests) 120ms\n\nTest Suites: 2 passed, 2 total\nTests: 10 passed, 10 total\nTime: 0.89s", + "bun test v1.3.13\n\n✓ session-turn.test.tsx (3 tests) 45ms\n✓ message-part.test.tsx (7 tests) 120ms\n\nTest Suites: 2 passed, 2 total\nTests: 10 passed, 10 total\nTime: 0.89s", title: "Run session tests", metadata: { command: "bun test --filter session" }, }, From a45d9a9b0aee7f2852bda832313ff7fed5063415 Mon Sep 17 00:00:00 2001 From: Brendan Allan <14191578+Brendonovich@users.noreply.github.com> Date: Wed, 22 Apr 2026 13:36:20 +0800 Subject: [PATCH 55/73] fix(app): improve icon override handling in project edit dialog (#23768) --- .../src/components/dialog-edit-project.tsx | 36 ++++++++++--------- packages/app/src/context/layout.tsx | 2 +- .../app/src/pages/layout/sidebar-items.tsx | 4 ++- 3 files changed, 23 insertions(+), 19 deletions(-) diff --git a/packages/app/src/components/dialog-edit-project.tsx b/packages/app/src/components/dialog-edit-project.tsx index ea5d70065a..621d56646d 100644 --- a/packages/app/src/components/dialog-edit-project.tsx +++ b/packages/app/src/components/dialog-edit-project.tsx @@ -26,8 +26,8 @@ export function DialogEditProject(props: { project: LocalProject }) { const [store, setStore] = createStore({ name: defaultName(), - color: props.project.icon?.color || "pink", - iconUrl: props.project.icon?.override || "", + color: props.project.icon?.color, + iconOverride: props.project.icon?.override, startup: props.project.commands?.start ?? "", dragOver: false, iconHover: false, @@ -39,7 +39,7 @@ export function DialogEditProject(props: { project: LocalProject }) { if (!file.type.startsWith("image/")) return const reader = new FileReader() reader.onload = (e) => { - setStore("iconUrl", e.target?.result as string) + setStore("iconOverride", e.target?.result as string) setStore("iconHover", false) } reader.readAsDataURL(file) @@ -68,7 +68,7 @@ export function DialogEditProject(props: { project: LocalProject }) { } function clearIcon() { - setStore("iconUrl", "") + setStore("iconOverride", "") } const saveMutation = useMutation(() => ({ @@ -81,17 +81,17 @@ export function DialogEditProject(props: { project: LocalProject }) { projectID: props.project.id, directory: props.project.worktree, name, - icon: { color: store.color, override: store.iconUrl }, + icon: { color: store.color || "", override: store.iconOverride || "" }, commands: { start }, }) - globalSync.project.icon(props.project.worktree, store.iconUrl || undefined) + globalSync.project.icon(props.project.worktree, store.iconOverride || undefined) dialog.close() return } globalSync.project.meta(props.project.worktree, { name, - icon: { color: store.color, override: store.iconUrl || undefined }, + icon: { color: store.color || undefined, override: store.iconOverride || undefined }, commands: { start: start || undefined }, }) dialog.close() @@ -130,13 +130,13 @@ export function DialogEditProject(props: { project: LocalProject }) { classList={{ "border-text-interactive-base bg-surface-info-base/20": store.dragOver, "border-border-base hover:border-border-strong": !store.dragOver, - "overflow-hidden": !!store.iconUrl, + "overflow-hidden": !!store.iconOverride, }} onDrop={handleDrop} onDragOver={handleDragOver} onDragLeave={handleDragLeave} onClick={() => { - if (store.iconUrl && store.iconHover) { + if (store.iconOverride && store.iconHover) { clearIcon() } else { iconInput?.click() @@ -144,7 +144,7 @@ export function DialogEditProject(props: { project: LocalProject }) { }} > {language.t("dialog.project.edit.icon.alt")} @@ -165,8 +165,8 @@ export function DialogEditProject(props: { project: LocalProject }) {
@@ -174,8 +174,8 @@ export function DialogEditProject(props: { project: LocalProject }) {
@@ -198,7 +198,7 @@ export function DialogEditProject(props: { project: LocalProject }) {
- +
@@ -215,7 +215,9 @@ export function DialogEditProject(props: { project: LocalProject }) { "bg-transparent border border-transparent hover:bg-surface-base-hover hover:border-border-weak-base": store.color !== color, }} - onClick={() => setStore("color", color)} + onClick={() => { + setStore("color", store.color === color ? undefined : color) + }} > Date: Wed, 22 Apr 2026 06:13:39 +0000 Subject: [PATCH 56/73] chore: update nix node_modules hashes --- nix/hashes.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nix/hashes.json b/nix/hashes.json index 21279a327d..c096046106 100644 --- a/nix/hashes.json +++ b/nix/hashes.json @@ -1,8 +1,8 @@ { "nodeModules": { - "x86_64-linux": "sha256-NczRp8MPppkqP8PQfWMUWJ/Wofvf2YVy5m4i22Pi3jg=", - "aarch64-linux": "sha256-QIxGOu8Fj+sWgc9hKvm1BLiIErxEtd17SPlwZGac9sQ=", - "aarch64-darwin": "sha256-Rb9qbMM+ARn0iBCaZurwcoUBCplbMXEZwrXVKextp3I=", - "x86_64-darwin": "sha256-KVxOKkaVV7W+K4reEk14MTLgmtoqwCYDqDNXNeS6ync=" + "x86_64-linux": "sha256-AgHhYsiygxbsBo3JN4HqHXKAwh8n1qeuSCe2qqxlxW4=", + "aarch64-linux": "sha256-h2lpWRQ5EDYnjpqZXtUAp1mxKLQxJ4m8MspgSY8Ev78=", + "aarch64-darwin": "sha256-xnd91+WyeAqn06run2ajsekxJvTMiLsnqNPe/rR8VTM=", + "x86_64-darwin": "sha256-rXpz45IOjGEk73xhP9VY86eOj2CZBg2l1vzwzTIOOOQ=" } } From bb696485b645fc323ef7deeefa25685bc14da856 Mon Sep 17 00:00:00 2001 From: Luke Parker <10430890+Hona@users.noreply.github.com> Date: Wed, 22 Apr 2026 18:03:34 +1000 Subject: [PATCH 57/73] fix: preserve BOM in text tool round-trips (#23797) --- packages/opencode/src/format/index.ts | 17 +++-- packages/opencode/src/patch/index.ts | 18 +++--- packages/opencode/src/tool/apply_patch.ts | 32 +++++++--- packages/opencode/src/tool/edit.ts | 29 ++++++--- packages/opencode/src/tool/write.ts | 15 +++-- packages/opencode/src/util/bom.ts | 31 +++++++++ packages/opencode/test/format/format.test.ts | 34 +++++++++- .../opencode/test/tool/apply_patch.test.ts | 28 +++++++++ packages/opencode/test/tool/edit.test.ts | 63 +++++++++++++++++++ packages/opencode/test/tool/write.test.ts | 48 ++++++++++++++ 10 files changed, 276 insertions(+), 39 deletions(-) create mode 100644 packages/opencode/src/util/bom.ts diff --git a/packages/opencode/src/format/index.ts b/packages/opencode/src/format/index.ts index 85934ce9c9..53a2c10119 100644 --- a/packages/opencode/src/format/index.ts +++ b/packages/opencode/src/format/index.ts @@ -25,7 +25,7 @@ export type Status = z.infer export interface Interface { readonly init: () => Effect.Effect readonly status: () => Effect.Effect - readonly file: (filepath: string) => Effect.Effect + readonly file: (filepath: string) => Effect.Effect } export class Service extends Context.Service()("@opencode/Format") {} @@ -70,16 +70,19 @@ export const layer = Layer.effect( } }), ) - return checks.filter((x) => x.cmd).map((x) => ({ item: x.item, cmd: x.cmd! })) + return checks + .filter((x): x is { item: Formatter.Info; cmd: string[] } => x.cmd !== false) + .map((x) => ({ item: x.item, cmd: x.cmd })) } function formatFile(filepath: string) { return Effect.gen(function* () { log.info("formatting", { file: filepath }) - const ext = path.extname(filepath) + const formatters = yield* Effect.promise(() => getFormatter(path.extname(filepath))) - for (const { item, cmd } of yield* Effect.promise(() => getFormatter(ext))) { - if (cmd === false) continue + if (!formatters.length) return false + + for (const { item, cmd } of formatters) { log.info("running", { command: cmd }) const replaced = cmd.map((x) => x.replace("$FILE", filepath)) const dir = yield* InstanceState.directory @@ -113,6 +116,8 @@ export const layer = Layer.effect( }) } } + + return true }) } @@ -188,7 +193,7 @@ export const layer = Layer.effect( const file = Effect.fn("Format.file")(function* (filepath: string) { const { formatFile } = yield* InstanceState.get(state) - yield* formatFile(filepath) + return yield* formatFile(filepath) }) return Service.of({ init, status, file }) diff --git a/packages/opencode/src/patch/index.ts b/packages/opencode/src/patch/index.ts index 19e1d7555b..3662f9e908 100644 --- a/packages/opencode/src/patch/index.ts +++ b/packages/opencode/src/patch/index.ts @@ -3,6 +3,7 @@ import * as path from "path" import * as fs from "fs/promises" import { readFileSync } from "fs" import { Log } from "../util" +import * as Bom from "../util/bom" const log = Log.create({ service: "patch" }) @@ -305,18 +306,19 @@ export function maybeParseApplyPatch( interface ApplyPatchFileUpdate { unified_diff: string content: string + bom: boolean } export function deriveNewContentsFromChunks(filePath: string, chunks: UpdateFileChunk[]): ApplyPatchFileUpdate { // Read original file content - let originalContent: string + let originalContent: ReturnType try { - originalContent = readFileSync(filePath, "utf-8") + originalContent = Bom.split(readFileSync(filePath, "utf-8")) } catch (error) { throw new Error(`Failed to read file ${filePath}: ${error}`, { cause: error }) } - let originalLines = originalContent.split("\n") + let originalLines = originalContent.text.split("\n") // Drop trailing empty element for consistent line counting if (originalLines.length > 0 && originalLines[originalLines.length - 1] === "") { @@ -331,14 +333,16 @@ export function deriveNewContentsFromChunks(filePath: string, chunks: UpdateFile newLines.push("") } - const newContent = newLines.join("\n") + const next = Bom.split(newLines.join("\n")) + const newContent = next.text // Generate unified diff - const unifiedDiff = generateUnifiedDiff(originalContent, newContent) + const unifiedDiff = generateUnifiedDiff(originalContent.text, newContent) return { unified_diff: unifiedDiff, content: newContent, + bom: originalContent.bom || next.bom, } } @@ -553,13 +557,13 @@ export async function applyHunksToFiles(hunks: Hunk[]): Promise { await fs.mkdir(moveDir, { recursive: true }) } - await fs.writeFile(hunk.move_path, fileUpdate.content, "utf-8") + await fs.writeFile(hunk.move_path, Bom.join(fileUpdate.content, fileUpdate.bom), "utf-8") await fs.unlink(hunk.path) modified.push(hunk.move_path) log.info(`Moved file: ${hunk.path} -> ${hunk.move_path}`) } else { // Regular update - await fs.writeFile(hunk.path, fileUpdate.content, "utf-8") + await fs.writeFile(hunk.path, Bom.join(fileUpdate.content, fileUpdate.bom), "utf-8") modified.push(hunk.path) log.info(`Updated file: ${hunk.path}`) } diff --git a/packages/opencode/src/tool/apply_patch.ts b/packages/opencode/src/tool/apply_patch.ts index 7da7dd255c..e36d5a65d8 100644 --- a/packages/opencode/src/tool/apply_patch.ts +++ b/packages/opencode/src/tool/apply_patch.ts @@ -14,6 +14,7 @@ import { AppFileSystem } from "@opencode-ai/shared/filesystem" import DESCRIPTION from "./apply_patch.txt" import { File } from "../file" import { Format } from "../format" +import * as Bom from "@/util/bom" const PatchParams = z.object({ patchText: z.string().describe("The full patch text that describes all changes to be made"), @@ -59,6 +60,7 @@ export const ApplyPatchTool = Tool.define( diff: string additions: number deletions: number + bom: boolean }> = [] let totalDiff = "" @@ -72,11 +74,12 @@ export const ApplyPatchTool = Tool.define( const oldContent = "" const newContent = hunk.contents.length === 0 || hunk.contents.endsWith("\n") ? hunk.contents : `${hunk.contents}\n` - const diff = trimDiff(createTwoFilesPatch(filePath, filePath, oldContent, newContent)) + const next = Bom.split(newContent) + const diff = trimDiff(createTwoFilesPatch(filePath, filePath, oldContent, next.text)) let additions = 0 let deletions = 0 - for (const change of diffLines(oldContent, newContent)) { + for (const change of diffLines(oldContent, next.text)) { if (change.added) additions += change.count || 0 if (change.removed) deletions += change.count || 0 } @@ -84,11 +87,12 @@ export const ApplyPatchTool = Tool.define( fileChanges.push({ filePath, oldContent, - newContent, + newContent: next.text, type: "add", diff, additions, deletions, + bom: next.bom, }) totalDiff += diff + "\n" @@ -104,13 +108,16 @@ export const ApplyPatchTool = Tool.define( ) } - const oldContent = yield* afs.readFileString(filePath) + const source = yield* Bom.readFile(afs, filePath) + const oldContent = source.text let newContent = oldContent + let bom = source.bom // Apply the update chunks to get new content try { const fileUpdate = Patch.deriveNewContentsFromChunks(filePath, hunk.chunks) newContent = fileUpdate.content + bom = fileUpdate.bom } catch (error) { return yield* Effect.fail(new Error(`apply_patch verification failed: ${error}`)) } @@ -136,6 +143,7 @@ export const ApplyPatchTool = Tool.define( diff, additions, deletions, + bom, }) totalDiff += diff + "\n" @@ -143,8 +151,8 @@ export const ApplyPatchTool = Tool.define( } case "delete": { - const contentToDelete = yield* afs - .readFileString(filePath) + const source = yield* Bom + .readFile(afs, filePath) .pipe( Effect.catch((error) => Effect.fail( @@ -154,6 +162,7 @@ export const ApplyPatchTool = Tool.define( ), ), ) + const contentToDelete = source.text const deleteDiff = trimDiff(createTwoFilesPatch(filePath, filePath, contentToDelete, "")) const deletions = contentToDelete.split("\n").length @@ -166,6 +175,7 @@ export const ApplyPatchTool = Tool.define( diff: deleteDiff, additions: 0, deletions, + bom: source.bom, }) totalDiff += deleteDiff + "\n" @@ -207,12 +217,12 @@ export const ApplyPatchTool = Tool.define( case "add": // Create parent directories (recursive: true is safe on existing/root dirs) - yield* afs.writeWithDirs(change.filePath, change.newContent) + yield* afs.writeWithDirs(change.filePath, Bom.join(change.newContent, change.bom)) updates.push({ file: change.filePath, event: "add" }) break case "update": - yield* afs.writeWithDirs(change.filePath, change.newContent) + yield* afs.writeWithDirs(change.filePath, Bom.join(change.newContent, change.bom)) updates.push({ file: change.filePath, event: "change" }) break @@ -220,7 +230,7 @@ export const ApplyPatchTool = Tool.define( if (change.movePath) { // Create parent directories (recursive: true is safe on existing/root dirs) - yield* afs.writeWithDirs(change.movePath!, change.newContent) + yield* afs.writeWithDirs(change.movePath!, Bom.join(change.newContent, change.bom)) yield* afs.remove(change.filePath) updates.push({ file: change.filePath, event: "unlink" }) updates.push({ file: change.movePath, event: "add" }) @@ -234,7 +244,9 @@ export const ApplyPatchTool = Tool.define( } if (edited) { - yield* format.file(edited) + if (yield* format.file(edited)) { + yield* Bom.syncFile(afs, edited, change.bom) + } yield* bus.publish(File.Event.Edited, { file: edited }) } } diff --git a/packages/opencode/src/tool/edit.ts b/packages/opencode/src/tool/edit.ts index 2c6c2c1308..858d14e043 100644 --- a/packages/opencode/src/tool/edit.ts +++ b/packages/opencode/src/tool/edit.ts @@ -18,6 +18,7 @@ import { Instance } from "../project/instance" import { Snapshot } from "@/snapshot" import { assertExternalDirectoryEffect } from "./external-directory" import { AppFileSystem } from "@opencode-ai/shared/filesystem" +import * as Bom from "@/util/bom" function normalizeLineEndings(text: string): string { return text.replaceAll("\r\n", "\n") @@ -84,7 +85,11 @@ export const EditTool = Tool.define( Effect.gen(function* () { if (params.oldString === "") { const existed = yield* afs.existsSafe(filePath) - contentNew = params.newString + const source = existed ? yield* Bom.readFile(afs, filePath) : { bom: false, text: "" } + const next = Bom.split(params.newString) + const desiredBom = source.bom || next.bom + contentOld = source.text + contentNew = next.text diff = trimDiff(createTwoFilesPatch(filePath, filePath, contentOld, contentNew)) yield* ctx.ask({ permission: "edit", @@ -95,8 +100,10 @@ export const EditTool = Tool.define( diff, }, }) - yield* afs.writeWithDirs(filePath, params.newString) - yield* format.file(filePath) + yield* afs.writeWithDirs(filePath, Bom.join(contentNew, desiredBom)) + if (yield* format.file(filePath)) { + contentNew = yield* Bom.syncFile(afs, filePath, desiredBom) + } yield* bus.publish(File.Event.Edited, { file: filePath }) yield* bus.publish(FileWatcher.Event.Updated, { file: filePath, @@ -108,13 +115,16 @@ export const EditTool = Tool.define( const info = yield* afs.stat(filePath).pipe(Effect.catch(() => Effect.succeed(undefined))) if (!info) throw new Error(`File ${filePath} not found`) if (info.type === "Directory") throw new Error(`Path is a directory, not a file: ${filePath}`) - contentOld = yield* afs.readFileString(filePath) + const source = yield* Bom.readFile(afs, filePath) + contentOld = source.text const ending = detectLineEnding(contentOld) const old = convertToLineEnding(normalizeLineEndings(params.oldString), ending) - const next = convertToLineEnding(normalizeLineEndings(params.newString), ending) + const replacement = convertToLineEnding(normalizeLineEndings(params.newString), ending) - contentNew = replace(contentOld, old, next, params.replaceAll) + const next = Bom.split(replace(contentOld, old, replacement, params.replaceAll)) + const desiredBom = source.bom || next.bom + contentNew = next.text diff = trimDiff( createTwoFilesPatch( @@ -134,14 +144,15 @@ export const EditTool = Tool.define( }, }) - yield* afs.writeWithDirs(filePath, contentNew) - yield* format.file(filePath) + yield* afs.writeWithDirs(filePath, Bom.join(contentNew, desiredBom)) + if (yield* format.file(filePath)) { + contentNew = yield* Bom.syncFile(afs, filePath, desiredBom) + } yield* bus.publish(File.Event.Edited, { file: filePath }) yield* bus.publish(FileWatcher.Event.Updated, { file: filePath, event: "change", }) - contentNew = yield* afs.readFileString(filePath) diff = trimDiff( createTwoFilesPatch( filePath, diff --git a/packages/opencode/src/tool/write.ts b/packages/opencode/src/tool/write.ts index 741091b21d..79ed585198 100644 --- a/packages/opencode/src/tool/write.ts +++ b/packages/opencode/src/tool/write.ts @@ -13,6 +13,7 @@ import { AppFileSystem } from "@opencode-ai/shared/filesystem" import { Instance } from "../project/instance" import { trimDiff } from "./edit" import { assertExternalDirectoryEffect } from "./external-directory" +import * as Bom from "@/util/bom" const MAX_PROJECT_DIAGNOSTICS_FILES = 5 @@ -38,9 +39,13 @@ export const WriteTool = Tool.define( yield* assertExternalDirectoryEffect(ctx, filepath) const exists = yield* fs.existsSafe(filepath) - const contentOld = exists ? yield* fs.readFileString(filepath) : "" + const source = exists ? yield* Bom.readFile(fs, filepath) : { bom: false, text: "" } + const next = Bom.split(params.content) + const desiredBom = source.bom || next.bom + const contentOld = source.text + const contentNew = next.text - const diff = trimDiff(createTwoFilesPatch(filepath, filepath, contentOld, params.content)) + const diff = trimDiff(createTwoFilesPatch(filepath, filepath, contentOld, contentNew)) yield* ctx.ask({ permission: "edit", patterns: [path.relative(Instance.worktree, filepath)], @@ -51,8 +56,10 @@ export const WriteTool = Tool.define( }, }) - yield* fs.writeWithDirs(filepath, params.content) - yield* format.file(filepath) + yield* fs.writeWithDirs(filepath, Bom.join(contentNew, desiredBom)) + if (yield* format.file(filepath)) { + yield* Bom.syncFile(fs, filepath, desiredBom) + } yield* bus.publish(File.Event.Edited, { file: filepath }) yield* bus.publish(FileWatcher.Event.Updated, { file: filepath, diff --git a/packages/opencode/src/util/bom.ts b/packages/opencode/src/util/bom.ts new file mode 100644 index 0000000000..484228f3d4 --- /dev/null +++ b/packages/opencode/src/util/bom.ts @@ -0,0 +1,31 @@ +import { Effect } from "effect" +import { AppFileSystem } from "@opencode-ai/shared/filesystem" + +const BOM_CODE = 0xfeff +const BOM = String.fromCharCode(BOM_CODE) + +export function split(text: string) { + if (text.charCodeAt(0) !== BOM_CODE) return { bom: false, text } + return { bom: true, text: text.slice(1) } +} + +export function join(text: string, bom: boolean) { + const stripped = split(text).text + if (!bom) return stripped + return BOM + stripped +} + +export const readFile = Effect.fn("Bom.readFile")(function* (fs: AppFileSystem.Interface, filePath: string) { + return split(new TextDecoder("utf-8", { ignoreBOM: true }).decode(yield* fs.readFile(filePath))) +}) + +export const syncFile = Effect.fn("Bom.syncFile")(function* ( + fs: AppFileSystem.Interface, + filePath: string, + bom: boolean, +) { + const current = yield* readFile(fs, filePath) + if (current.bom === bom) return current.text + yield* fs.writeWithDirs(filePath, join(current.text, bom)) + return current.text +}) diff --git a/packages/opencode/test/format/format.test.ts b/packages/opencode/test/format/format.test.ts index 5530e195b2..2f6f235aa1 100644 --- a/packages/opencode/test/format/format.test.ts +++ b/packages/opencode/test/format/format.test.ts @@ -126,6 +126,24 @@ describe("Format", () => { it.live("service initializes without error", () => provideTmpdirInstance(() => Format.Service.use(() => Effect.void))) + it.live("file() returns false when no formatter runs", () => + provideTmpdirInstance( + (dir) => + Effect.gen(function* () { + const file = `${dir}/test.txt` + yield* Effect.promise(() => Bun.write(file, "x")) + + const formatted = yield* Format.Service.use((fmt) => fmt.file(file)) + expect(formatted).toBe(false) + }), + { + config: { + formatter: false, + }, + }, + ), + ) + it.live("status() initializes formatter state per directory", () => Effect.gen(function* () { const a = yield* provideTmpdirInstance(() => Format.Service.use((fmt) => fmt.status()), { @@ -219,7 +237,7 @@ describe("Format", () => { yield* Format.Service.use((fmt) => Effect.gen(function* () { yield* fmt.init() - yield* fmt.file(file) + expect(yield* fmt.file(file)).toBe(true) }), ) @@ -229,11 +247,21 @@ describe("Format", () => { config: { formatter: { first: { - command: ["sh", "-c", 'sleep 0.05; v=$(cat "$1"); printf \'%sA\' "$v" > "$1"', "sh", "$FILE"], + command: [ + "node", + "-e", + "const fs = require('fs'); const file = process.argv[1]; fs.writeFileSync(file, fs.readFileSync(file, 'utf8') + 'A')", + "$FILE", + ], extensions: [".seq"], }, second: { - command: ["sh", "-c", 'v=$(cat "$1"); printf \'%sB\' "$v" > "$1"', "sh", "$FILE"], + command: [ + "node", + "-e", + "const fs = require('fs'); const file = process.argv[1]; fs.writeFileSync(file, fs.readFileSync(file, 'utf8') + 'B')", + "$FILE", + ], extensions: [".seq"], }, }, diff --git a/packages/opencode/test/tool/apply_patch.test.ts b/packages/opencode/test/tool/apply_patch.test.ts index ebfa9a531e..7ce483726b 100644 --- a/packages/opencode/test/tool/apply_patch.test.ts +++ b/packages/opencode/test/tool/apply_patch.test.ts @@ -195,6 +195,34 @@ describe("tool.apply_patch freeform", () => { }) }) + test("does not invent a first-line diff for BOM files", async () => { + await using fixture = await tmpdir() + const { ctx, calls } = makeCtx() + + await Instance.provide({ + directory: fixture.path, + fn: async () => { + const bom = String.fromCharCode(0xfeff) + const target = path.join(fixture.path, "example.cs") + await fs.writeFile(target, `${bom}using System;\n\nclass Test {}\n`, "utf-8") + + const patchText = "*** Begin Patch\n*** Update File: example.cs\n@@\n class Test {}\n+class Next {}\n*** End Patch" + + await execute({ patchText }, ctx) + + expect(calls.length).toBe(1) + const shown = calls[0].metadata.files[0]?.patch ?? "" + expect(shown).not.toContain(bom) + expect(shown).not.toContain("-using System;") + expect(shown).not.toContain("+using System;") + + const content = await fs.readFile(target, "utf-8") + expect(content.charCodeAt(0)).toBe(0xfeff) + expect(content.slice(1)).toBe("using System;\n\nclass Test {}\nclass Next {}\n") + }, + }) + }) + test("inserts lines with insert-only hunk", async () => { await using fixture = await tmpdir() const { ctx } = makeCtx() diff --git a/packages/opencode/test/tool/edit.test.ts b/packages/opencode/test/tool/edit.test.ts index b5fbc0a67d..82e1b4a7fd 100644 --- a/packages/opencode/test/tool/edit.test.ts +++ b/packages/opencode/test/tool/edit.test.ts @@ -96,6 +96,37 @@ describe("tool.edit", () => { }) }) + test("preserves BOM when oldString is empty on existing files", async () => { + await using tmp = await tmpdir() + const filepath = path.join(tmp.path, "existing.cs") + const bom = String.fromCharCode(0xfeff) + await fs.writeFile(filepath, `${bom}using System;\n`, "utf-8") + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const edit = await resolve() + const result = await Effect.runPromise( + edit.execute( + { + filePath: filepath, + oldString: "", + newString: "using Up;\n", + }, + ctx, + ), + ) + + expect(result.metadata.diff).toContain("-using System;") + expect(result.metadata.diff).toContain("+using Up;") + + const content = await fs.readFile(filepath, "utf-8") + expect(content.charCodeAt(0)).toBe(0xfeff) + expect(content.slice(1)).toBe("using Up;\n") + }, + }) + }) + test("creates new file with nested directories", async () => { await using tmp = await tmpdir() const filepath = path.join(tmp.path, "nested", "dir", "file.txt") @@ -183,6 +214,38 @@ describe("tool.edit", () => { }) }) + test("replaces the first visible line in BOM files", async () => { + await using tmp = await tmpdir() + const filepath = path.join(tmp.path, "existing.cs") + const bom = String.fromCharCode(0xfeff) + await fs.writeFile(filepath, `${bom}using System;\nclass Test {}\n`, "utf-8") + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const edit = await resolve() + const result = await Effect.runPromise( + edit.execute( + { + filePath: filepath, + oldString: "using System;", + newString: "using Up;", + }, + ctx, + ), + ) + + expect(result.metadata.diff).toContain("-using System;") + expect(result.metadata.diff).toContain("+using Up;") + expect(result.metadata.diff).not.toContain(bom) + + const content = await fs.readFile(filepath, "utf-8") + expect(content.charCodeAt(0)).toBe(0xfeff) + expect(content.slice(1)).toBe("using Up;\nclass Test {}\n") + }, + }) + }) + test("throws error when file does not exist", async () => { await using tmp = await tmpdir() const filepath = path.join(tmp.path, "nonexistent.txt") diff --git a/packages/opencode/test/tool/write.test.ts b/packages/opencode/test/tool/write.test.ts index 50d3b57527..36131f9596 100644 --- a/packages/opencode/test/tool/write.test.ts +++ b/packages/opencode/test/tool/write.test.ts @@ -114,6 +114,54 @@ describe("tool.write", () => { ), ) + it.live("preserves BOM when overwriting existing files", () => + provideTmpdirInstance((dir) => + Effect.gen(function* () { + const filepath = path.join(dir, "existing.cs") + const bom = String.fromCharCode(0xfeff) + yield* Effect.promise(() => fs.writeFile(filepath, `${bom}using System;\n`, "utf-8")) + + yield* run({ filePath: filepath, content: "using Up;\n" }) + + const content = yield* Effect.promise(() => fs.readFile(filepath, "utf-8")) + expect(content.charCodeAt(0)).toBe(0xfeff) + expect(content.slice(1)).toBe("using Up;\n") + }), + ), + ) + + it.live("restores BOM after formatter strips it", () => + provideTmpdirInstance( + (dir) => + Effect.gen(function* () { + const filepath = path.join(dir, "formatted.cs") + const bom = String.fromCharCode(0xfeff) + yield* Effect.promise(() => fs.writeFile(filepath, `${bom}using System;\n`, "utf-8")) + + yield* run({ filePath: filepath, content: "using Up;\n" }) + + const content = yield* Effect.promise(() => fs.readFile(filepath, "utf-8")) + expect(content.charCodeAt(0)).toBe(0xfeff) + expect(content.slice(1)).toBe("using Up;\n") + }), + { + config: { + formatter: { + stripbom: { + extensions: [".cs"], + command: [ + "node", + "-e", + "const fs = require('fs'); const file = process.argv[1]; let text = fs.readFileSync(file, 'utf8'); if (text.charCodeAt(0) === 0xfeff) text = text.slice(1); fs.writeFileSync(file, text, 'utf8')", + "$FILE", + ], + }, + }, + }, + }, + ), + ) + it.live("returns diff in metadata for existing files", () => provideTmpdirInstance((dir) => Effect.gen(function* () { From bfb954e7116bd3b9b43a30a35f02fae302062455 Mon Sep 17 00:00:00 2001 From: "opencode-agent[bot]" Date: Wed, 22 Apr 2026 08:06:06 +0000 Subject: [PATCH 58/73] chore: generate --- packages/opencode/src/tool/apply_patch.ts | 16 +++++++--------- packages/opencode/test/tool/apply_patch.test.ts | 3 ++- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/packages/opencode/src/tool/apply_patch.ts b/packages/opencode/src/tool/apply_patch.ts index e36d5a65d8..a4cf1e853f 100644 --- a/packages/opencode/src/tool/apply_patch.ts +++ b/packages/opencode/src/tool/apply_patch.ts @@ -151,17 +151,15 @@ export const ApplyPatchTool = Tool.define( } case "delete": { - const source = yield* Bom - .readFile(afs, filePath) - .pipe( - Effect.catch((error) => - Effect.fail( - new Error( - `apply_patch verification failed: ${error instanceof Error ? error.message : String(error)}`, - ), + const source = yield* Bom.readFile(afs, filePath).pipe( + Effect.catch((error) => + Effect.fail( + new Error( + `apply_patch verification failed: ${error instanceof Error ? error.message : String(error)}`, ), ), - ) + ), + ) const contentToDelete = source.text const deleteDiff = trimDiff(createTwoFilesPatch(filePath, filePath, contentToDelete, "")) diff --git a/packages/opencode/test/tool/apply_patch.test.ts b/packages/opencode/test/tool/apply_patch.test.ts index 7ce483726b..fa88432136 100644 --- a/packages/opencode/test/tool/apply_patch.test.ts +++ b/packages/opencode/test/tool/apply_patch.test.ts @@ -206,7 +206,8 @@ describe("tool.apply_patch freeform", () => { const target = path.join(fixture.path, "example.cs") await fs.writeFile(target, `${bom}using System;\n\nclass Test {}\n`, "utf-8") - const patchText = "*** Begin Patch\n*** Update File: example.cs\n@@\n class Test {}\n+class Next {}\n*** End Patch" + const patchText = + "*** Begin Patch\n*** Update File: example.cs\n@@\n class Test {}\n+class Next {}\n*** End Patch" await execute({ patchText }, ctx) From 0595c289046d7f45d82a563ad0c76b3ccfca050b Mon Sep 17 00:00:00 2001 From: Luke Parker <10430890+Hona@users.noreply.github.com> Date: Wed, 22 Apr 2026 18:17:35 +1000 Subject: [PATCH 59/73] test: fix cross-spawn stderr race on Windows CI (#23808) --- packages/opencode/test/effect/cross-spawn-spawner.test.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/packages/opencode/test/effect/cross-spawn-spawner.test.ts b/packages/opencode/test/effect/cross-spawn-spawner.test.ts index 5990635aa2..201d998667 100644 --- a/packages/opencode/test/effect/cross-spawn-spawner.test.ts +++ b/packages/opencode/test/effect/cross-spawn-spawner.test.ts @@ -169,7 +169,10 @@ describe("cross-spawn spawner", () => { 'process.stderr.write("stderr\\n", done)', ].join("\n"), ) - const [stdout, stderr] = yield* Effect.all([decodeByteStream(handle.stdout), decodeByteStream(handle.stderr)]) + const [stdout, stderr] = yield* Effect.all( + [decodeByteStream(handle.stdout), decodeByteStream(handle.stderr)], + { concurrency: 2 }, + ) expect(stdout).toBe("stdout") expect(stderr).toBe("stderr") }), From 6aa475fcac39cacda4730142314985c64b200bb5 Mon Sep 17 00:00:00 2001 From: "opencode-agent[bot]" Date: Wed, 22 Apr 2026 08:18:44 +0000 Subject: [PATCH 60/73] chore: generate --- packages/opencode/test/effect/cross-spawn-spawner.test.ts | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/packages/opencode/test/effect/cross-spawn-spawner.test.ts b/packages/opencode/test/effect/cross-spawn-spawner.test.ts index 201d998667..b4e52529c1 100644 --- a/packages/opencode/test/effect/cross-spawn-spawner.test.ts +++ b/packages/opencode/test/effect/cross-spawn-spawner.test.ts @@ -169,10 +169,9 @@ describe("cross-spawn spawner", () => { 'process.stderr.write("stderr\\n", done)', ].join("\n"), ) - const [stdout, stderr] = yield* Effect.all( - [decodeByteStream(handle.stdout), decodeByteStream(handle.stderr)], - { concurrency: 2 }, - ) + const [stdout, stderr] = yield* Effect.all([decodeByteStream(handle.stdout), decodeByteStream(handle.stderr)], { + concurrency: 2, + }) expect(stdout).toBe("stdout") expect(stderr).toBe("stderr") }), From 88c5f6bb19ecac5c60e9c42dcb2c497a416d390b Mon Sep 17 00:00:00 2001 From: Brendan Allan <14191578+Brendonovich@users.noreply.github.com> Date: Wed, 22 Apr 2026 17:09:00 +0800 Subject: [PATCH 61/73] fix: consolidate project avatar source logic (#23819) --- .../src/components/dialog-edit-project.tsx | 20 +++++++++++++------ .../app/src/pages/layout/sidebar-items.tsx | 16 ++++++++------- 2 files changed, 23 insertions(+), 13 deletions(-) diff --git a/packages/app/src/components/dialog-edit-project.tsx b/packages/app/src/components/dialog-edit-project.tsx index 621d56646d..8eb12daf52 100644 --- a/packages/app/src/components/dialog-edit-project.tsx +++ b/packages/app/src/components/dialog-edit-project.tsx @@ -12,6 +12,7 @@ import { type LocalProject, getAvatarColors } from "@/context/layout" import { getFilename } from "@opencode-ai/shared/util/path" import { Avatar } from "@opencode-ai/ui/avatar" import { useLanguage } from "@/context/language" +import { getProjectAvatarSource } from "@/pages/layout/sidebar-items" const AVATAR_COLOR_KEYS = ["pink", "mint", "orange", "purple", "cyan", "lime"] as const @@ -144,7 +145,11 @@ export function DialogEditProject(props: { project: LocalProject }) { }} > } > - {language.t("dialog.project.edit.icon.alt")} + {(src) => ( + {language.t("dialog.project.edit.icon.alt")} + )}
{ + if (store.color === color && !props.project.icon?.url) return setStore("color", store.color === color ? undefined : color) }} > diff --git a/packages/app/src/pages/layout/sidebar-items.tsx b/packages/app/src/pages/layout/sidebar-items.tsx index 88d50db3ed..5170311a7b 100644 --- a/packages/app/src/pages/layout/sidebar-items.tsx +++ b/packages/app/src/pages/layout/sidebar-items.tsx @@ -19,6 +19,14 @@ import { childSessionOnPath, hasProjectPermissions } from "./helpers" const OPENCODE_PROJECT_ID = "4b0ea68d7af9a6031a7ffda7ad66e0cb83315750" +export function getProjectAvatarSource(id?: string, icon?: { color?: string; url?: string; override?: string }) { + return id === OPENCODE_PROJECT_ID + ? "https://opencode.ai/favicon.svg" + : icon?.color + ? undefined + : icon?.override || icon?.url +} + export const ProjectIcon = (props: { project: LocalProject; class?: string; notify?: boolean }): JSX.Element => { const globalSync = useGlobalSync() const notification = useNotification() @@ -42,13 +50,7 @@ export const ProjectIcon = (props: { project: LocalProject; class?: string; noti
Date: Wed, 22 Apr 2026 16:35:13 +0530 Subject: [PATCH 62/73] fix(tui): fail fast on invalid session startup (#23837) --- packages/opencode/src/cli/cmd/tui/attach.ts | 16 ++++++ .../src/cli/cmd/tui/routes/session/index.tsx | 55 ++++++++++++------- packages/opencode/src/cli/cmd/tui/thread.ts | 14 +++++ .../src/cli/cmd/tui/validate-session.ts | 24 ++++++++ packages/opencode/src/util/error.ts | 9 +++ 5 files changed, 97 insertions(+), 21 deletions(-) create mode 100644 packages/opencode/src/cli/cmd/tui/validate-session.ts diff --git a/packages/opencode/src/cli/cmd/tui/attach.ts b/packages/opencode/src/cli/cmd/tui/attach.ts index 9a93f3f57a..cb6b95a56c 100644 --- a/packages/opencode/src/cli/cmd/tui/attach.ts +++ b/packages/opencode/src/cli/cmd/tui/attach.ts @@ -3,6 +3,8 @@ import { UI } from "@/cli/ui" import { tui } from "./app" import { win32DisableProcessedInput, win32InstallCtrlCGuard } from "./win32" import { TuiConfig } from "@/cli/cmd/tui/config/tui" +import { errorMessage } from "@/util/error" +import { validateSession } from "./validate-session" export const AttachCommand = cmd({ command: "attach ", @@ -65,6 +67,20 @@ export const AttachCommand = cmd({ return { Authorization: auth } })() const config = await TuiConfig.get() + + try { + await validateSession({ + url: args.url, + sessionID: args.session, + directory, + headers, + }) + } catch (error) { + UI.error(errorMessage(error)) + process.exitCode = 1 + return + } + await tui({ url: args.url, config, diff --git a/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx b/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx index 06be5dfbef..2f5da1d231 100644 --- a/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx +++ b/packages/opencode/src/cli/cmd/tui/routes/session/index.tsx @@ -68,6 +68,7 @@ import { Flag } from "@/flag/flag" import { LANGUAGE_EXTENSIONS } from "@/lsp/language" import parsers from "../../../../../../parsers-config.ts" import * as Clipboard from "../../util/clipboard" +import { errorMessage } from "@/util/error" import { Toast, useToast } from "../../ui/toast" import { useKV } from "../../context/kv.tsx" import * as Editor from "../../util/editor" @@ -180,31 +181,43 @@ export function Session() { const toast = useToast() const sdk = useSDK() - createEffect(async () => { - const previousWorkspace = project.workspace.current() - const result = await sdk.client.session.get({ sessionID: route.sessionID }, { throwOnError: true }) - if (!result.data) { + createEffect(() => { + const sessionID = route.sessionID + void (async () => { + const previousWorkspace = project.workspace.current() + const result = await sdk.client.session.get({ sessionID }, { throwOnError: true }) + if (!result.data) { + toast.show({ + message: `Session not found: ${sessionID}`, + variant: "error", + duration: 5000, + }) + navigate({ type: "home" }) + return + } + + if (result.data.workspaceID !== previousWorkspace) { + project.workspace.set(result.data.workspaceID) + + // Sync all the data for this workspace. Note that this + // workspace may not exist anymore which is why this is not + // fatal. If it doesn't we still want to show the session + // (which will be non-interactive) + try { + await sync.bootstrap({ fatal: false }) + } catch {} + } + await sync.session.sync(sessionID) + if (route.sessionID === sessionID && scroll) scroll.scrollBy(100_000) + })().catch((error) => { + if (route.sessionID !== sessionID) return toast.show({ - message: `Session not found: ${route.sessionID}`, + message: errorMessage(error), variant: "error", + duration: 5000, }) navigate({ type: "home" }) - return - } - - if (result.data.workspaceID !== previousWorkspace) { - project.workspace.set(result.data.workspaceID) - - // Sync all the data for this workspace. Note that this - // workspace may not exist anymore which is why this is not - // fatal. If it doesn't we still want to show the session - // (which will be non-interactive) - try { - await sync.bootstrap({ fatal: false }) - } catch (e) {} - } - await sync.session.sync(route.sessionID) - if (scroll) scroll.scrollBy(100_000) + }) }) let lastSwitch: string | undefined = undefined diff --git a/packages/opencode/src/cli/cmd/tui/thread.ts b/packages/opencode/src/cli/cmd/tui/thread.ts index e3e9eb8117..a2a53ecafa 100644 --- a/packages/opencode/src/cli/cmd/tui/thread.ts +++ b/packages/opencode/src/cli/cmd/tui/thread.ts @@ -16,6 +16,7 @@ import { win32DisableProcessedInput, win32InstallCtrlCGuard } from "./win32" import { writeHeapSnapshot } from "v8" import { TuiConfig } from "./config/tui" import { OPENCODE_PROCESS_ROLE, OPENCODE_RUN_ID, ensureRunID, sanitizedProcessEnv } from "@/util/opencode-process" +import { validateSession } from "./validate-session" declare global { const OPENCODE_WORKER_PATH: string @@ -202,6 +203,19 @@ export const TuiThreadCommand = cmd({ events: createEventSource(client), } + try { + await validateSession({ + url: transport.url, + sessionID: args.session, + directory: cwd, + fetch: transport.fetch, + }) + } catch (error) { + UI.error(errorMessage(error)) + process.exitCode = 1 + return + } + setTimeout(() => { client.call("checkUpgrade", { directory: cwd }).catch(() => {}) }, 1000).unref?.() diff --git a/packages/opencode/src/cli/cmd/tui/validate-session.ts b/packages/opencode/src/cli/cmd/tui/validate-session.ts new file mode 100644 index 0000000000..e2a21d51e1 --- /dev/null +++ b/packages/opencode/src/cli/cmd/tui/validate-session.ts @@ -0,0 +1,24 @@ +import { createOpencodeClient } from "@opencode-ai/sdk/v2" +import { SessionID } from "@/session/schema" + +export async function validateSession(input: { + url: string + sessionID?: string + directory?: string + fetch?: typeof fetch + headers?: RequestInit["headers"] +}) { + if (!input.sessionID) return + + const result = SessionID.zod.safeParse(input.sessionID) + if (!result.success) { + throw new Error(`Invalid session ID: ${result.error.issues.at(0)?.message ?? "unknown error"}`) + } + + await createOpencodeClient({ + baseUrl: input.url, + directory: input.directory, + fetch: input.fetch, + headers: input.headers, + }).session.get({ sessionID: result.data }, { throwOnError: true }) +} diff --git a/packages/opencode/src/util/error.ts b/packages/opencode/src/util/error.ts index 75fef9fc9a..76cb9c7cf1 100644 --- a/packages/opencode/src/util/error.ts +++ b/packages/opencode/src/util/error.ts @@ -26,6 +26,15 @@ export function errorMessage(error: unknown): string { return error.message } + if ( + isRecord(error) && + isRecord(error.data) && + typeof error.data.message === "string" && + error.data.message + ) { + return error.data.message + } + const text = String(error) if (text && text !== "[object Object]") return text From 266e965572ccc499b585e4a3558b93e56625e10d Mon Sep 17 00:00:00 2001 From: "opencode-agent[bot]" Date: Wed, 22 Apr 2026 11:07:04 +0000 Subject: [PATCH 63/73] chore: generate --- packages/opencode/src/util/error.ts | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/packages/opencode/src/util/error.ts b/packages/opencode/src/util/error.ts index 76cb9c7cf1..fbda2dc50e 100644 --- a/packages/opencode/src/util/error.ts +++ b/packages/opencode/src/util/error.ts @@ -26,12 +26,7 @@ export function errorMessage(error: unknown): string { return error.message } - if ( - isRecord(error) && - isRecord(error.data) && - typeof error.data.message === "string" && - error.data.message - ) { + if (isRecord(error) && isRecord(error.data) && typeof error.data.message === "string" && error.data.message) { return error.data.message } From 85ec11d5d2ce8721d2ea97f9674a7c49f2002f75 Mon Sep 17 00:00:00 2001 From: Shoubhit Dash Date: Wed, 22 Apr 2026 20:37:32 +0530 Subject: [PATCH 64/73] fix(session): improve session compaction (#23870) --- .../opencode/src/agent/prompt/compaction.txt | 19 +- packages/opencode/src/session/compaction.ts | 198 ++++++++++--- packages/opencode/src/session/message-v2.ts | 14 +- .../opencode/test/session/compaction.test.ts | 279 ++++++++++++++++-- .../opencode/test/session/message-v2.test.ts | 70 +++++ .../test/session/messages-pagination.test.ts | 64 ++++ 6 files changed, 561 insertions(+), 83 deletions(-) diff --git a/packages/opencode/src/agent/prompt/compaction.txt b/packages/opencode/src/agent/prompt/compaction.txt index c5831bb30e..c7cb838bba 100644 --- a/packages/opencode/src/agent/prompt/compaction.txt +++ b/packages/opencode/src/agent/prompt/compaction.txt @@ -1,16 +1,9 @@ -You are a helpful AI assistant tasked with summarizing conversations. +You are an anchored context summarization assistant for coding sessions. -When asked to summarize, provide a detailed but concise summary of the older conversation history. -The most recent turns may be preserved verbatim outside your summary, so focus on information that would still be needed to continue the work with that recent context available. -Focus on information that would be helpful for continuing the conversation, including: -- What was done -- What is currently being worked on -- Which files are being modified -- What needs to be done next -- Key user requests, constraints, or preferences that should persist -- Important technical decisions and why they were made +Summarize only the conversation history you are given. The newest turns may be kept verbatim outside your summary, so focus on the older context that still matters for continuing the work. -Your summary should be comprehensive enough to provide context but concise enough to be quickly understood. +If the prompt includes a block, treat it as the current anchored summary. Update it with the new history by preserving still-true details, removing stale details, and merging in new facts. -Do not respond to any questions in the conversation, only output the summary. -Respond in the same language the user used in the conversation. +Always follow the exact output structure requested by the user prompt. Keep every section, preserve exact file paths and identifiers when known, and prefer terse bullets over paragraphs. + +Do not answer the conversation itself. Do not mention that you are summarizing, compacting, or merging context. Respond in the same language as the conversation. diff --git a/packages/opencode/src/session/compaction.ts b/packages/opencode/src/session/compaction.ts index 037543064e..defdb870d7 100644 --- a/packages/opencode/src/session/compaction.ts +++ b/packages/opencode/src/session/compaction.ts @@ -32,16 +32,105 @@ export const Event = { export const PRUNE_MINIMUM = 20_000 export const PRUNE_PROTECT = 40_000 +const TOOL_OUTPUT_MAX_CHARS = 2_000 const PRUNE_PROTECTED_TOOLS = ["skill"] const DEFAULT_TAIL_TURNS = 2 const MIN_PRESERVE_RECENT_TOKENS = 2_000 const MAX_PRESERVE_RECENT_TOKENS = 8_000 +const SUMMARY_TEMPLATE = `Output exactly this Markdown structure and keep the section order unchanged: +--- +## Goal +- [single-sentence task summary] + +## Constraints & Preferences +- [user constraints, preferences, specs, or "(none)"] + +## Progress +### Done +- [completed work or "(none)"] + +### In Progress +- [current work or "(none)"] + +### Blocked +- [blockers or "(none)"] + +## Key Decisions +- [decision and why, or "(none)"] + +## Next Steps +- [ordered next actions or "(none)"] + +## Critical Context +- [important technical facts, errors, open questions, or "(none)"] + +## Relevant Files +- [file or directory path: why it matters, or "(none)"] +--- + +Rules: +- Keep every section, even when empty. +- Use terse bullets, not prose paragraphs. +- Preserve exact file paths, commands, error strings, and identifiers when known. +- Do not mention the summary process or that context was compacted.` type Turn = { start: number end: number id: MessageID } +type Tail = { + start: number + id: MessageID +} + +type CompletedCompaction = { + userIndex: number + assistantIndex: number + summary: string | undefined +} + +function summaryText(message: MessageV2.WithParts) { + const text = message.parts + .filter((part): part is MessageV2.TextPart => part.type === "text") + .map((part) => part.text.trim()) + .filter(Boolean) + .join("\n\n") + .trim() + return text || undefined +} + +function completedCompactions(messages: MessageV2.WithParts[]) { + const users = new Map() + for (let i = 0; i < messages.length; i++) { + const msg = messages[i] + if (msg.info.role !== "user") continue + if (!msg.parts.some((part) => part.type === "compaction")) continue + users.set(msg.info.id, i) + } + + return messages.flatMap((msg, assistantIndex): CompletedCompaction[] => { + if (msg.info.role !== "assistant") return [] + if (!msg.info.summary || !msg.info.finish || msg.info.error) return [] + const userIndex = users.get(msg.info.parentID) + if (userIndex === undefined) return [] + return [{ userIndex, assistantIndex, summary: summaryText(msg) }] + }) +} + +function buildPrompt(input: { previousSummary?: string; context: string[] }) { + const anchor = input.previousSummary + ? [ + "Update the anchored summary below using the conversation history above.", + "Preserve still-true details, remove stale details, and merge in the new facts.", + "", + input.previousSummary, + "", + ].join("\n") + : "Create a new anchored summary from the conversation history above." + return [anchor, SUMMARY_TEMPLATE, ...input.context].join("\n\n") +} + function preserveRecentBudget(input: { cfg: Config.Info; model: Provider.Model }) { return ( input.cfg.compaction?.preserve_recent_tokens ?? @@ -67,6 +156,31 @@ function turns(messages: MessageV2.WithParts[]) { return result } +function splitTurn(input: { + messages: MessageV2.WithParts[] + turn: Turn + model: Provider.Model + budget: number + estimate: (input: { messages: MessageV2.WithParts[]; model: Provider.Model }) => Effect.Effect +}) { + return Effect.gen(function* () { + if (input.budget <= 0) return undefined + if (input.turn.end - input.turn.start <= 1) return undefined + for (let start = input.turn.start + 1; start < input.turn.end; start++) { + const size = yield* input.estimate({ + messages: input.messages.slice(start, input.turn.end), + model: input.model, + }) + if (size > input.budget) continue + return { + start, + id: input.messages[start]!.info.id, + } satisfies Tail + } + return undefined + }) +} + export interface Interface { readonly isOverflow: (input: { tokens: MessageV2.Assistant["tokens"] @@ -147,18 +261,28 @@ export const layer: Layer.Layer< }), { concurrency: 1 }, ) - if (sizes.at(-1)! > budget) { - log.info("tail fallback", { budget, size: sizes.at(-1) }) - return { head: input.messages, tail_start_id: undefined } - } let total = 0 - let keep: Turn | undefined + let keep: Tail | undefined for (let i = recent.length - 1; i >= 0; i--) { + const turn = recent[i]! const size = sizes[i] - if (total + size > budget) break - total += size - keep = recent[i] + if (total + size <= budget) { + total += size + keep = { start: turn.start, id: turn.id } + continue + } + const remaining = budget - total + const split = yield* splitTurn({ + messages: input.messages, + turn, + model: input.model, + budget: remaining, + estimate, + }) + if (split) keep = split + else if (!keep) log.info("tail fallback", { budget, size, total }) + break } if (!keep || keep.start === 0) return { head: input.messages, tail_start_id: undefined } @@ -192,17 +316,15 @@ export const layer: Layer.Layer< if (msg.info.role === "assistant" && msg.info.summary) break loop for (let partIndex = msg.parts.length - 1; partIndex >= 0; partIndex--) { const part = msg.parts[partIndex] - if (part.type === "tool") - if (part.state.status === "completed") { - if (PRUNE_PROTECTED_TOOLS.includes(part.tool)) continue - if (part.state.time.compacted) break loop - const estimate = Token.estimate(part.state.output) - total += estimate - if (total > PRUNE_PROTECT) { - pruned += estimate - toPrune.push(part) - } - } + if (part.type !== "tool") continue + if (part.state.status !== "completed") continue + if (PRUNE_PROTECTED_TOOLS.includes(part.tool)) continue + if (part.state.time.compacted) break loop + const estimate = Token.estimate(part.state.output) + total += estimate + if (total <= PRUNE_PROTECT) continue + pruned += estimate + toPrune.push(part) } } @@ -263,8 +385,11 @@ export const layer: Layer.Layer< : yield* provider.getModel(userMessage.model.providerID, userMessage.model.modelID) const cfg = yield* config.get() const history = compactionPart && messages.at(-1)?.info.id === input.parentID ? messages.slice(0, -1) : messages + const prior = completedCompactions(history) + const hidden = new Set(prior.flatMap((item) => [item.userIndex, item.assistantIndex])) + const previousSummary = prior.at(-1)?.summary const selected = yield* select({ - messages: history, + messages: history.filter((_, index) => !hidden.has(index)), cfg, model, }) @@ -274,34 +399,13 @@ export const layer: Layer.Layer< { sessionID: input.sessionID }, { context: [], prompt: undefined }, ) - const defaultPrompt = `When constructing the summary, try to stick to this template: ---- -## Goal - -[What goal(s) is the user trying to accomplish?] - -## Instructions - -- [What important instructions did the user give you that are relevant] -- [If there is a plan or spec, include information about it so next agent can continue using it] - -## Discoveries - -[What notable things were learned during this conversation that would be useful for the next agent to know when continuing the work] - -## Accomplished - -[What work has been completed, what work is still in progress, and what work is left?] - -## Relevant files / directories - -[Construct a structured list of relevant files that have been read, edited, or created that pertain to the task at hand. If all the files in a directory are relevant, include the path to the directory.] ----` - - const prompt = compacting.prompt ?? [defaultPrompt, ...compacting.context].join("\n\n") + const nextPrompt = compacting.prompt ?? buildPrompt({ previousSummary, context: compacting.context }) const msgs = structuredClone(selected.head) yield* plugin.trigger("experimental.chat.messages.transform", {}, { messages: msgs }) - const modelMessages = yield* MessageV2.toModelMessagesEffect(msgs, model, { stripMedia: true }) + const modelMessages = yield* MessageV2.toModelMessagesEffect(msgs, model, { + stripMedia: true, + toolOutputMaxChars: TOOL_OUTPUT_MAX_CHARS, + }) const ctx = yield* InstanceState.context const msg: MessageV2.Assistant = { id: MessageID.ascending(), @@ -345,7 +449,7 @@ export const layer: Layer.Layer< ...modelMessages, { role: "user", - content: [{ type: "text", text: prompt }], + content: [{ type: "text", text: nextPrompt }], }, ], model, diff --git a/packages/opencode/src/session/message-v2.ts b/packages/opencode/src/session/message-v2.ts index 123f7b5401..980dd4da84 100644 --- a/packages/opencode/src/session/message-v2.ts +++ b/packages/opencode/src/session/message-v2.ts @@ -319,6 +319,12 @@ export const ToolStateCompleted = Schema.Struct({ .pipe(withStatics((s) => ({ zod: zod(s) }))) export type ToolStateCompleted = Types.DeepMutable> +function truncateToolOutput(text: string, maxChars?: number) { + if (!maxChars || text.length <= maxChars) return text + const omitted = text.length - maxChars + return `${text.slice(0, maxChars)}\n[Tool output truncated for compaction: omitted ${omitted} chars]` +} + export const ToolStateError = Schema.Struct({ status: Schema.Literal("error"), input: Schema.Record(Schema.String, Schema.Any), @@ -700,7 +706,7 @@ function providerMeta(metadata: Record | undefined) { export const toModelMessagesEffect = Effect.fnUntraced(function* ( input: WithParts[], model: Provider.Model, - options?: { stripMedia?: boolean }, + options?: { stripMedia?: boolean; toolOutputMaxChars?: number }, ) { const result: UIMessage[] = [] const toolNames = new Set() @@ -839,7 +845,9 @@ export const toModelMessagesEffect = Effect.fnUntraced(function* ( if (part.type === "tool") { toolNames.add(part.tool) if (part.state.status === "completed") { - const outputText = part.state.time.compacted ? "[Old tool result content cleared]" : part.state.output + const outputText = part.state.time.compacted + ? "[Old tool result content cleared]" + : truncateToolOutput(part.state.output, options?.toolOutputMaxChars) const attachments = part.state.time.compacted || options?.stripMedia ? [] : (part.state.attachments ?? []) // For providers that don't support media in tool results, extract media files @@ -955,7 +963,7 @@ export const toModelMessagesEffect = Effect.fnUntraced(function* ( export function toModelMessages( input: WithParts[], model: Provider.Model, - options?: { stripMedia?: boolean }, + options?: { stripMedia?: boolean; toolOutputMaxChars?: number }, ): Promise { return Effect.runPromise(toModelMessagesEffect(input, model, options).pipe(Effect.provide(EffectLogger.layer))) } diff --git a/packages/opencode/test/session/compaction.test.ts b/packages/opencode/test/session/compaction.test.ts index 0e2b179f00..2188d8d7cd 100644 --- a/packages/opencode/test/session/compaction.test.ts +++ b/packages/opencode/test/session/compaction.test.ts @@ -143,6 +143,43 @@ async function assistant(sessionID: SessionID, parentID: MessageID, root: string return msg } +async function summaryAssistant(sessionID: SessionID, parentID: MessageID, root: string, text: string) { + const msg: MessageV2.Assistant = { + id: MessageID.ascending(), + role: "assistant", + sessionID, + mode: "compaction", + agent: "compaction", + path: { cwd: root, root }, + cost: 0, + tokens: { + output: 0, + input: 0, + reasoning: 0, + cache: { read: 0, write: 0 }, + }, + modelID: ref.modelID, + providerID: ref.providerID, + parentID, + summary: true, + time: { created: Date.now() }, + finish: "end_turn", + } + await svc.updateMessage(msg) + await svc.updatePart({ + id: PartID.ascending(), + messageID: msg.id, + sessionID, + type: "text", + text, + }) + return msg +} + +async function lastCompactionPart(sessionID: SessionID) { + return (await svc.messages({ sessionID })).at(-2)?.parts.find((item): item is MessageV2.CompactionPart => item.type === "compaction") +} + function fake( input: Parameters[0], result: "continue" | "compact", @@ -946,12 +983,9 @@ describe("session.compaction.process", () => { ), ) - const part = (await svc.messages({ sessionID: session.id })) - .at(-2) - ?.parts.find((item) => item.type === "compaction") - + const part = await lastCompactionPart(session.id) expect(part?.type).toBe("compaction") - if (part?.type === "compaction") expect(part.tail_start_id).toBe(keep.id) + expect(part?.tail_start_id).toBe(keep.id) } finally { await rt.dispose() } @@ -991,12 +1025,9 @@ describe("session.compaction.process", () => { ), ) - const part = (await svc.messages({ sessionID: session.id })) - .at(-2) - ?.parts.find((item) => item.type === "compaction") - + const part = await lastCompactionPart(session.id) expect(part?.type).toBe("compaction") - if (part?.type === "compaction") expect(part.tail_start_id).toBe(keep.id) + expect(part?.tail_start_id).toBe(keep.id) } finally { await rt.dispose() } @@ -1042,12 +1073,9 @@ describe("session.compaction.process", () => { ), ) - const part = (await svc.messages({ sessionID: session.id })) - .at(-2) - ?.parts.find((item) => item.type === "compaction") - + const part = await lastCompactionPart(session.id) expect(part?.type).toBe("compaction") - if (part?.type === "compaction") expect(part.tail_start_id).toBeUndefined() + expect(part?.tail_start_id).toBeUndefined() expect(captured).toContain("yyyy") } finally { await rt.dispose() @@ -1103,12 +1131,9 @@ describe("session.compaction.process", () => { ), ) - const part = (await svc.messages({ sessionID: session.id })) - .at(-2) - ?.parts.find((item) => item.type === "compaction") - + const part = await lastCompactionPart(session.id) expect(part?.type).toBe("compaction") - if (part?.type === "compaction") expect(part.tail_start_id).toBeUndefined() + expect(part?.tail_start_id).toBeUndefined() expect(captured).toContain("recent image turn") expect(captured).toContain("Attached image/png: big.png") } finally { @@ -1118,6 +1143,76 @@ describe("session.compaction.process", () => { }) }) + test("retains a split turn suffix when a later message fits the preserve token budget", async () => { + await using tmp = await tmpdir({ git: true }) + const stub = llm() + let captured = "" + stub.push( + reply("summary", (input) => { + captured = JSON.stringify(input.messages) + }), + ) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const session = await svc.create({}) + await user(session.id, "older") + const recent = await user(session.id, "recent turn") + const large = await assistant(session.id, recent.id, tmp.path) + await svc.updatePart({ + id: PartID.ascending(), + messageID: large.id, + sessionID: session.id, + type: "text", + text: "z".repeat(2_000), + }) + const keep = await assistant(session.id, recent.id, tmp.path) + await svc.updatePart({ + id: PartID.ascending(), + messageID: keep.id, + sessionID: session.id, + type: "text", + text: "keep tail", + }) + await SessionCompaction.create({ + sessionID: session.id, + agent: "build", + model: ref, + auto: false, + }) + + const rt = liveRuntime(stub.layer, wide(), cfg({ tail_turns: 1, preserve_recent_tokens: 100 })) + try { + const msgs = await svc.messages({ sessionID: session.id }) + const parent = msgs.at(-1)?.info.id + expect(parent).toBeTruthy() + await rt.runPromise( + SessionCompaction.Service.use((svc) => + svc.process({ + parentID: parent!, + messages: msgs, + sessionID: session.id, + auto: false, + }), + ), + ) + + const part = await lastCompactionPart(session.id) + expect(part?.type).toBe("compaction") + expect(part?.tail_start_id).toBe(keep.id) + expect(captured).toContain("zzzz") + expect(captured).not.toContain("keep tail") + + const filtered = MessageV2.filterCompacted(MessageV2.stream(session.id)) + expect(filtered[0]?.info.id).toBe(keep.id) + expect(filtered.map((msg) => msg.info.id)).not.toContain(large.id) + } finally { + await rt.dispose() + } + }, + }) + }) + test("allows plugins to disable synthetic continue prompt", async () => { await using tmp = await tmpdir() await Instance.provide({ @@ -1530,6 +1625,80 @@ describe("session.compaction.process", () => { }) }) + test("anchors repeated compactions with the previous summary", async () => { + const stub = llm() + let captured = "" + stub.push(reply("summary one")) + stub.push( + reply("summary two", (input) => { + captured = JSON.stringify(input.messages) + }), + ) + + await using tmp = await tmpdir({ git: true }) + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const session = await svc.create({}) + await user(session.id, "older context") + await user(session.id, "keep this turn") + await SessionCompaction.create({ + sessionID: session.id, + agent: "build", + model: ref, + auto: false, + }) + + const rt = liveRuntime(stub.layer, wide()) + try { + let msgs = await svc.messages({ sessionID: session.id }) + let parent = msgs.at(-1)?.info.id + expect(parent).toBeTruthy() + await rt.runPromise( + SessionCompaction.Service.use((svc) => + svc.process({ + parentID: parent!, + messages: msgs, + sessionID: session.id, + auto: false, + }), + ), + ) + + await user(session.id, "latest turn") + await SessionCompaction.create({ + sessionID: session.id, + agent: "build", + model: ref, + auto: false, + }) + + msgs = MessageV2.filterCompacted(MessageV2.stream(session.id)) + parent = msgs.at(-1)?.info.id + expect(parent).toBeTruthy() + await rt.runPromise( + SessionCompaction.Service.use((svc) => + svc.process({ + parentID: parent!, + messages: msgs, + sessionID: session.id, + auto: false, + }), + ), + ) + + expect(captured).toContain("") + expect(captured).toContain("summary one") + expect(captured.match(/summary one/g)?.length).toBe(1) + expect(captured).toContain("## Constraints & Preferences") + expect(captured).toContain("## Progress") + } finally { + await rt.dispose() + } + }, + }) + }) + test("keeps recent pre-compaction turns across repeated compactions", async () => { const stub = llm() stub.push(reply("summary one")) @@ -1604,6 +1773,76 @@ describe("session.compaction.process", () => { }, }) }) + + test("ignores previous summaries when sizing the retained tail", async () => { + await using tmp = await tmpdir() + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const session = await svc.create({}) + await user(session.id, "older") + const keep = await user(session.id, "keep this turn") + const keepReply = await assistant(session.id, keep.id, tmp.path) + await svc.updatePart({ + id: PartID.ascending(), + messageID: keepReply.id, + sessionID: session.id, + type: "text", + text: "keep reply", + }) + + await SessionCompaction.create({ + sessionID: session.id, + agent: "build", + model: ref, + auto: false, + }) + const firstCompaction = (await svc.messages({ sessionID: session.id })).at(-1)?.info.id + expect(firstCompaction).toBeTruthy() + await summaryAssistant(session.id, firstCompaction!, tmp.path, "summary ".repeat(800)) + + const recent = await user(session.id, "recent turn") + const recentReply = await assistant(session.id, recent.id, tmp.path) + await svc.updatePart({ + id: PartID.ascending(), + messageID: recentReply.id, + sessionID: session.id, + type: "text", + text: "recent reply", + }) + + await SessionCompaction.create({ + sessionID: session.id, + agent: "build", + model: ref, + auto: false, + }) + + const rt = runtime("continue", Plugin.defaultLayer, wide(), cfg({ tail_turns: 2, preserve_recent_tokens: 500 })) + try { + const msgs = await svc.messages({ sessionID: session.id }) + const parent = msgs.at(-1)?.info.id + expect(parent).toBeTruthy() + await rt.runPromise( + SessionCompaction.Service.use((svc) => + svc.process({ + parentID: parent!, + messages: msgs, + sessionID: session.id, + auto: false, + }), + ), + ) + + const part = await lastCompactionPart(session.id) + expect(part?.type).toBe("compaction") + expect(part?.tail_start_id).toBe(keep.id) + } finally { + await rt.dispose() + } + }, + }) + }) }) describe("util.token.estimate", () => { diff --git a/packages/opencode/test/session/message-v2.test.ts b/packages/opencode/test/session/message-v2.test.ts index 55ae65c560..231d58c21a 100644 --- a/packages/opencode/test/session/message-v2.test.ts +++ b/packages/opencode/test/session/message-v2.test.ts @@ -585,6 +585,76 @@ describe("session.message-v2.toModelMessage", () => { ]) }) + test("truncates tool output when requested", async () => { + const userID = "m-user" + const assistantID = "m-assistant" + + const input: MessageV2.WithParts[] = [ + { + info: userInfo(userID), + parts: [ + { + ...basePart(userID, "u1"), + type: "text", + text: "run tool", + }, + ] as MessageV2.Part[], + }, + { + info: assistantInfo(assistantID, userID), + parts: [ + { + ...basePart(assistantID, "a1"), + type: "tool", + callID: "call-1", + tool: "bash", + state: { + status: "completed", + input: { cmd: "ls" }, + output: "abcdefghij", + title: "Bash", + metadata: {}, + time: { start: 0, end: 1 }, + }, + }, + ] as MessageV2.Part[], + }, + ] + + expect(await MessageV2.toModelMessages(input, model, { toolOutputMaxChars: 4 })).toStrictEqual([ + { + role: "user", + content: [{ type: "text", text: "run tool" }], + }, + { + role: "assistant", + content: [ + { + type: "tool-call", + toolCallId: "call-1", + toolName: "bash", + input: { cmd: "ls" }, + providerExecuted: undefined, + }, + ], + }, + { + role: "tool", + content: [ + { + type: "tool-result", + toolCallId: "call-1", + toolName: "bash", + output: { + type: "text", + value: "abcd\n[Tool output truncated for compaction: omitted 6 chars]", + }, + }, + ], + }, + ]) + }) + test("converts assistant tool error into error-text tool result", async () => { const userID = "m-user" const assistantID = "m-assistant" diff --git a/packages/opencode/test/session/messages-pagination.test.ts b/packages/opencode/test/session/messages-pagination.test.ts index d8dcf5e7cb..df2d18b9f1 100644 --- a/packages/opencode/test/session/messages-pagination.test.ts +++ b/packages/opencode/test/session/messages-pagination.test.ts @@ -837,6 +837,70 @@ describe("MessageV2.filterCompacted", () => { }) }) + test("retains an assistant tail when compaction starts inside a turn", async () => { + await Instance.provide({ + directory: root, + fn: async () => { + const session = await svc.create({}) + + const u1 = await addUser(session.id, "first") + const a1 = await addAssistant(session.id, u1, { finish: "end_turn" }) + await svc.updatePart({ + id: PartID.ascending(), + sessionID: session.id, + messageID: a1, + type: "text", + text: "first reply", + }) + + const u2 = await addUser(session.id, "second") + const a2 = await addAssistant(session.id, u2, { finish: "end_turn" }) + await svc.updatePart({ + id: PartID.ascending(), + sessionID: session.id, + messageID: a2, + type: "text", + text: "second reply", + }) + const a3 = await addAssistant(session.id, u2, { finish: "end_turn" }) + await svc.updatePart({ + id: PartID.ascending(), + sessionID: session.id, + messageID: a3, + type: "text", + text: "tail reply", + }) + + const c1 = await addUser(session.id) + await addCompactionPart(session.id, c1, a3) + const s1 = await addAssistant(session.id, c1, { summary: true, finish: "end_turn" }) + await svc.updatePart({ + id: PartID.ascending(), + sessionID: session.id, + messageID: s1, + type: "text", + text: "summary", + }) + + const u3 = await addUser(session.id, "third") + const a4 = await addAssistant(session.id, u3, { finish: "end_turn" }) + await svc.updatePart({ + id: PartID.ascending(), + sessionID: session.id, + messageID: a4, + type: "text", + text: "third reply", + }) + + const result = MessageV2.filterCompacted(MessageV2.stream(session.id)) + + expect(result.map((item) => item.info.id)).toEqual([a3, c1, s1, u3, a4]) + + await svc.remove(session.id) + }, + }) + }) + test("prefers latest compaction boundary when repeated compactions exist", async () => { await Instance.provide({ directory: root, From e300209db978f903f3e355f50005fa1f5a1a6b7a Mon Sep 17 00:00:00 2001 From: "opencode-agent[bot]" Date: Wed, 22 Apr 2026 15:09:51 +0000 Subject: [PATCH 65/73] chore: generate --- packages/opencode/test/session/compaction.test.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/opencode/test/session/compaction.test.ts b/packages/opencode/test/session/compaction.test.ts index 2188d8d7cd..037613d469 100644 --- a/packages/opencode/test/session/compaction.test.ts +++ b/packages/opencode/test/session/compaction.test.ts @@ -177,7 +177,9 @@ async function summaryAssistant(sessionID: SessionID, parentID: MessageID, root: } async function lastCompactionPart(sessionID: SessionID) { - return (await svc.messages({ sessionID })).at(-2)?.parts.find((item): item is MessageV2.CompactionPart => item.type === "compaction") + return (await svc.messages({ sessionID })) + .at(-2) + ?.parts.find((item): item is MessageV2.CompactionPart => item.type === "compaction") } function fake( From a7fafe4c7bbbded211541ae2c2df3b78b46253fc Mon Sep 17 00:00:00 2001 From: "Steven T. Cramer" Date: Wed, 22 Apr 2026 23:02:10 +0700 Subject: [PATCH 66/73] fix(project): use git common dir for bare repo project cache (#19054) --- packages/opencode/src/project/project.ts | 12 +-- .../opencode/test/project/project.test.ts | 84 +++++++++++++++++++ 2 files changed, 90 insertions(+), 6 deletions(-) diff --git a/packages/opencode/src/project/project.ts b/packages/opencode/src/project/project.ts index 6a2132274a..d628f87f97 100644 --- a/packages/opencode/src/project/project.ts +++ b/packages/opencode/src/project/project.ts @@ -207,13 +207,13 @@ export const layer: Layer.Layer< vcs: fakeVcs, } } - const worktree = (() => { - const common = resolveGitPath(sandbox, commonDir.text.trim()) - return common === sandbox ? sandbox : pathSvc.dirname(common) - })() + const common = resolveGitPath(sandbox, commonDir.text.trim()) + const bareCheck = yield* git(["config", "--bool", "core.bare"], { cwd: sandbox }) + const isBareRepo = bareCheck.code === 0 && bareCheck.text.trim() === "true" + const worktree = common === sandbox ? sandbox : isBareRepo ? common : pathSvc.dirname(common) if (id == null) { - id = yield* readCachedProjectId(pathSvc.join(worktree, ".git")) + id = yield* readCachedProjectId(common) } if (!id) { @@ -226,7 +226,7 @@ export const layer: Layer.Layer< id = roots[0] ? ProjectID.make(roots[0]) : undefined if (id) { - yield* fs.writeFileString(pathSvc.join(worktree, ".git", "opencode"), id).pipe(Effect.ignore) + yield* fs.writeFileString(pathSvc.join(common, "opencode"), id).pipe(Effect.ignore) } } diff --git a/packages/opencode/test/project/project.test.ts b/packages/opencode/test/project/project.test.ts index 4dc9ee5efa..4664b6c258 100644 --- a/packages/opencode/test/project/project.test.ts +++ b/packages/opencode/test/project/project.test.ts @@ -472,3 +472,87 @@ describe("Project.addSandbox and Project.removeSandbox", () => { expect(events.some((e) => e.payload.type === Project.Event.Updated.type)).toBe(true) }) }) + +describe("Project.fromDirectory with bare repos", () => { + test("worktree from bare repo should cache in bare repo, not parent", async () => { + await using tmp = await tmpdir({ git: true }) + + const parentDir = path.dirname(tmp.path) + const barePath = path.join(parentDir, `bare-${Date.now()}.git`) + const worktreePath = path.join(parentDir, `worktree-${Date.now()}`) + + try { + await $`git clone --bare ${tmp.path} ${barePath}`.quiet() + await $`git worktree add ${worktreePath} HEAD`.cwd(barePath).quiet() + + const { project } = await run((svc) => svc.fromDirectory(worktreePath)) + + expect(project.id).not.toBe(ProjectID.global) + expect(project.worktree).toBe(barePath) + + const correctCache = path.join(barePath, "opencode") + const wrongCache = path.join(parentDir, ".git", "opencode") + + expect(await Bun.file(correctCache).exists()).toBe(true) + expect(await Bun.file(wrongCache).exists()).toBe(false) + } finally { + await $`rm -rf ${barePath} ${worktreePath}`.quiet().nothrow() + } + }) + + test("different bare repos under same parent should not share project ID", async () => { + await using tmp1 = await tmpdir({ git: true }) + await using tmp2 = await tmpdir({ git: true }) + + const parentDir = path.dirname(tmp1.path) + const bareA = path.join(parentDir, `bare-a-${Date.now()}.git`) + const bareB = path.join(parentDir, `bare-b-${Date.now()}.git`) + const worktreeA = path.join(parentDir, `wt-a-${Date.now()}`) + const worktreeB = path.join(parentDir, `wt-b-${Date.now()}`) + + try { + await $`git clone --bare ${tmp1.path} ${bareA}`.quiet() + await $`git clone --bare ${tmp2.path} ${bareB}`.quiet() + await $`git worktree add ${worktreeA} HEAD`.cwd(bareA).quiet() + await $`git worktree add ${worktreeB} HEAD`.cwd(bareB).quiet() + + const { project: projA } = await run((svc) => svc.fromDirectory(worktreeA)) + const { project: projB } = await run((svc) => svc.fromDirectory(worktreeB)) + + expect(projA.id).not.toBe(projB.id) + + const cacheA = path.join(bareA, "opencode") + const cacheB = path.join(bareB, "opencode") + const wrongCache = path.join(parentDir, ".git", "opencode") + + expect(await Bun.file(cacheA).exists()).toBe(true) + expect(await Bun.file(cacheB).exists()).toBe(true) + expect(await Bun.file(wrongCache).exists()).toBe(false) + } finally { + await $`rm -rf ${bareA} ${bareB} ${worktreeA} ${worktreeB}`.quiet().nothrow() + } + }) + + test("bare repo without .git suffix is still detected via core.bare", async () => { + await using tmp = await tmpdir({ git: true }) + + const parentDir = path.dirname(tmp.path) + const barePath = path.join(parentDir, `bare-no-suffix-${Date.now()}`) + const worktreePath = path.join(parentDir, `worktree-${Date.now()}`) + + try { + await $`git clone --bare ${tmp.path} ${barePath}`.quiet() + await $`git worktree add ${worktreePath} HEAD`.cwd(barePath).quiet() + + const { project } = await run((svc) => svc.fromDirectory(worktreePath)) + + expect(project.id).not.toBe(ProjectID.global) + expect(project.worktree).toBe(barePath) + + const correctCache = path.join(barePath, "opencode") + expect(await Bun.file(correctCache).exists()).toBe(true) + } finally { + await $`rm -rf ${barePath} ${worktreePath}`.quiet().nothrow() + } + }) +}) From d61096b60bb0b8bbc475f80eb14ae8b309f04c90 Mon Sep 17 00:00:00 2001 From: Jack Date: Thu, 23 Apr 2026 00:28:20 +0800 Subject: [PATCH 67/73] docs: add MiMo V2.5 to Go pages (#23876) --- packages/console/app/src/i18n/ar.ts | 8 ++++---- packages/console/app/src/i18n/br.ts | 8 ++++---- packages/console/app/src/i18n/da.ts | 8 ++++---- packages/console/app/src/i18n/de.ts | 8 ++++---- packages/console/app/src/i18n/en.ts | 8 ++++---- packages/console/app/src/i18n/es.ts | 8 ++++---- packages/console/app/src/i18n/fr.ts | 8 ++++---- packages/console/app/src/i18n/it.ts | 8 ++++---- packages/console/app/src/i18n/ja.ts | 8 ++++---- packages/console/app/src/i18n/ko.ts | 8 ++++---- packages/console/app/src/i18n/no.ts | 8 ++++---- packages/console/app/src/i18n/pl.ts | 8 ++++---- packages/console/app/src/i18n/ru.ts | 8 ++++---- packages/console/app/src/i18n/th.ts | 8 ++++---- packages/console/app/src/i18n/tr.ts | 8 ++++---- packages/console/app/src/i18n/zh.ts | 8 ++++---- packages/console/app/src/i18n/zht.ts | 8 ++++---- packages/console/app/src/routes/go/index.tsx | 4 +++- .../src/routes/workspace/[id]/go/lite-section.tsx | 6 ++++-- packages/web/src/content/docs/ar/go.mdx | 12 ++++++++++-- packages/web/src/content/docs/bs/go.mdx | 12 ++++++++++-- packages/web/src/content/docs/da/go.mdx | 12 ++++++++++-- packages/web/src/content/docs/de/go.mdx | 12 ++++++++++-- packages/web/src/content/docs/es/go.mdx | 12 ++++++++++-- packages/web/src/content/docs/fr/go.mdx | 12 ++++++++++-- packages/web/src/content/docs/go.mdx | 12 ++++++++++-- packages/web/src/content/docs/it/go.mdx | 12 ++++++++++-- packages/web/src/content/docs/ja/go.mdx | 12 ++++++++++-- packages/web/src/content/docs/ko/go.mdx | 12 ++++++++++-- packages/web/src/content/docs/nb/go.mdx | 12 ++++++++++-- packages/web/src/content/docs/pl/go.mdx | 12 ++++++++++-- packages/web/src/content/docs/pt-br/go.mdx | 12 ++++++++++-- packages/web/src/content/docs/ru/go.mdx | 12 ++++++++++-- packages/web/src/content/docs/th/go.mdx | 12 ++++++++++-- packages/web/src/content/docs/tr/go.mdx | 12 ++++++++++-- packages/web/src/content/docs/zh-cn/go.mdx | 12 ++++++++++-- packages/web/src/content/docs/zh-tw/go.mdx | 12 ++++++++++-- 37 files changed, 255 insertions(+), 107 deletions(-) diff --git a/packages/console/app/src/i18n/ar.ts b/packages/console/app/src/i18n/ar.ts index 73c07c6775..a7883cfe4c 100644 --- a/packages/console/app/src/i18n/ar.ts +++ b/packages/console/app/src/i18n/ar.ts @@ -249,7 +249,7 @@ export const dict = { "go.title": "OpenCode Go | نماذج برمجة منخفضة التكلفة للجميع", "go.meta.description": - "يبدأ Go من $5 للشهر الأول، ثم $10/شهر، مع حدود طلب سخية لمدة 5 ساعات لـ GLM-5.1 وGLM-5 وKimi K2.5 وKimi K2.6 وMiMo-V2-Pro وMiMo-V2-Omni وQwen3.5 Plus وQwen3.6 Plus وMiniMax M2.5 وMiniMax M2.7.", + "يبدأ Go من $5 للشهر الأول، ثم $10/شهر، مع حدود طلب سخية لمدة 5 ساعات لـ GLM-5.1 وGLM-5 وKimi K2.5 وKimi K2.6 وMiMo-V2-Pro وMiMo-V2-Omni وMiMo-V2.5-Pro وMiMo-V2.5 وQwen3.5 Plus وQwen3.6 Plus وMiniMax M2.5 وMiniMax M2.7.", "go.hero.title": "نماذج برمجة منخفضة التكلفة للجميع", "go.hero.body": "يجلب Go البرمجة الوكيلة للمبرمجين حول العالم. يوفر حدودًا سخية ووصولًا موثوقًا إلى أقوى النماذج مفتوحة المصدر، حتى تتمكن من البناء باستخدام وكلاء أقوياء دون القلق بشأن التكلفة أو التوفر.", @@ -300,7 +300,7 @@ export const dict = { "go.problem.item2": "حدود سخية ووصول موثوق", "go.problem.item3": "مصمم لأكبر عدد ممكن من المبرمجين", "go.problem.item4": - "يتضمن GLM-5.1 وGLM-5 وKimi K2.5 وKimi K2.6 وMiMo-V2-Pro وMiMo-V2-Omni وQwen3.5 Plus وQwen3.6 Plus وMiniMax M2.5 وMiniMax M2.7", + "يتضمن GLM-5.1 وGLM-5 وKimi K2.5 وKimi K2.6 وMiMo-V2-Pro وMiMo-V2-Omni وMiMo-V2.5-Pro وMiMo-V2.5 وQwen3.5 Plus وQwen3.6 Plus وMiniMax M2.5 وMiniMax M2.7", "go.how.title": "كيف يعمل Go", "go.how.body": "يبدأ Go من $5 للشهر الأول، ثم $10/شهر. يمكنك استخدامه مع OpenCode أو أي وكيل.", "go.how.step1.title": "أنشئ حسابًا", @@ -324,7 +324,7 @@ export const dict = { "go.faq.a2": "يتضمن Go النماذج المدرجة أدناه، مع حدود سخية وإتاحة موثوقة.", "go.faq.q3": "هل Go هو نفسه Zen؟", "go.faq.a3": - "لا. Zen هو الدفع حسب الاستخدام، بينما يبدأ Go من $5 للشهر الأول، ثم $10/شهر، مع حدود سخية ووصول موثوق إلى نماذج المصدر المفتوح GLM-5.1 وGLM-5 وKimi K2.5 وKimi K2.6 وMiMo-V2-Pro وMiMo-V2-Omni وQwen3.5 Plus وQwen3.6 Plus وMiniMax M2.5 وMiniMax M2.7.", + "لا. Zen هو الدفع حسب الاستخدام، بينما يبدأ Go من $5 للشهر الأول، ثم $10/شهر، مع حدود سخية ووصول موثوق إلى نماذج المصدر المفتوح GLM-5.1 وGLM-5 وKimi K2.5 وKimi K2.6 وMiMo-V2-Pro وMiMo-V2-Omni وMiMo-V2.5-Pro وMiMo-V2.5 وQwen3.5 Plus وQwen3.6 Plus وMiniMax M2.5 وMiniMax M2.7.", "go.faq.q4": "كم تكلفة Go؟", "go.faq.a4.p1.beforePricing": "تكلفة Go", "go.faq.a4.p1.pricingLink": "$5 للشهر الأول", @@ -347,7 +347,7 @@ export const dict = { "go.faq.q9": "ما الفرق بين النماذج المجانية وGo؟", "go.faq.a9": - "تشمل النماذج المجانية Big Pickle بالإضافة إلى النماذج الترويجية المتاحة في ذلك الوقت، مع حصة 200 طلب/يوم. يتضمن Go نماذج GLM-5.1 وGLM-5 وKimi K2.5 وKimi K2.6 وMiMo-V2-Pro وMiMo-V2-Omni وQwen3.5 Plus وQwen3.6 Plus وMiniMax M2.5 وMiniMax M2.7 مع حصص طلبات أعلى مطبقة عبر نوافذ متجددة (5 ساعات، أسبوعيًا، وشهريًا)، تعادل تقريبًا 12 دولارًا كل 5 ساعات، و30 دولارًا في الأسبوع، و60 دولارًا في الشهر (تختلف أعداد الطلبات الفعلية حسب النموذج والاستخدام).", + "تشمل النماذج المجانية Big Pickle بالإضافة إلى النماذج الترويجية المتاحة في ذلك الوقت، مع حصة 200 طلب/يوم. يتضمن Go نماذج GLM-5.1 وGLM-5 وKimi K2.5 وKimi K2.6 وMiMo-V2-Pro وMiMo-V2-Omni وMiMo-V2.5-Pro وMiMo-V2.5 وQwen3.5 Plus وQwen3.6 Plus وMiniMax M2.5 وMiniMax M2.7 مع حصص طلبات أعلى مطبقة عبر نوافذ متجددة (5 ساعات، أسبوعيًا، وشهريًا)، تعادل تقريبًا 12 دولارًا كل 5 ساعات، و30 دولارًا في الأسبوع، و60 دولارًا في الشهر (تختلف أعداد الطلبات الفعلية حسب النموذج والاستخدام).", "zen.api.error.rateLimitExceeded": "تم تجاوز حد الطلبات. يرجى المحاولة مرة أخرى لاحقًا.", "zen.api.error.modelNotSupported": "النموذج {{model}} غير مدعوم", diff --git a/packages/console/app/src/i18n/br.ts b/packages/console/app/src/i18n/br.ts index d79b8350ae..cf7b68d259 100644 --- a/packages/console/app/src/i18n/br.ts +++ b/packages/console/app/src/i18n/br.ts @@ -253,7 +253,7 @@ export const dict = { "go.title": "OpenCode Go | Modelos de codificação de baixo custo para todos", "go.meta.description": - "O Go começa em $5 no primeiro mês, depois $10/mês, com limites generosos de solicitação de 5 horas para GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 e MiniMax M2.7.", + "O Go começa em $5 no primeiro mês, depois $10/mês, com limites generosos de solicitação de 5 horas para GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 e MiniMax M2.7.", "go.hero.title": "Modelos de codificação de baixo custo para todos", "go.hero.body": "O Go traz a codificação com agentes para programadores em todo o mundo. Oferecendo limites generosos e acesso confiável aos modelos de código aberto mais capazes, para que você possa construir com agentes poderosos sem se preocupar com custos ou disponibilidade.", @@ -305,7 +305,7 @@ export const dict = { "go.problem.item2": "Limites generosos e acesso confiável", "go.problem.item3": "Feito para o maior número possível de programadores", "go.problem.item4": - "Inclui GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 e MiniMax M2.7", + "Inclui GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 e MiniMax M2.7", "go.how.title": "Como o Go funciona", "go.how.body": "O Go começa em $5 no primeiro mês, depois $10/mês. Você pode usá-lo com o OpenCode ou qualquer agente.", @@ -331,7 +331,7 @@ export const dict = { "go.faq.a2": "O Go inclui os modelos listados abaixo, com limites generosos e acesso confiável.", "go.faq.q3": "O Go é o mesmo que o Zen?", "go.faq.a3": - "Não. Zen é pay-as-you-go, enquanto o Go começa em $5 no primeiro mês, depois $10/mês, com limites generosos e acesso confiável aos modelos open source GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 e MiniMax M2.7.", + "Não. Zen é pay-as-you-go, enquanto o Go começa em $5 no primeiro mês, depois $10/mês, com limites generosos e acesso confiável aos modelos open source GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 e MiniMax M2.7.", "go.faq.q4": "Quanto custa o Go?", "go.faq.a4.p1.beforePricing": "O Go custa", "go.faq.a4.p1.pricingLink": "$5 no primeiro mês", @@ -355,7 +355,7 @@ export const dict = { "go.faq.q9": "Qual a diferença entre os modelos gratuitos e o Go?", "go.faq.a9": - "Os modelos gratuitos incluem Big Pickle e modelos promocionais disponíveis no momento, com uma cota de 200 requisições/dia. O Go inclui GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 e MiniMax M2.7 com cotas de requisição mais altas aplicadas em janelas móveis (5 horas, semanal e mensal), aproximadamente equivalentes a $12 por 5 horas, $30 por semana e $60 por mês (as contagens reais de requisições variam de acordo com o modelo e o uso).", + "Os modelos gratuitos incluem Big Pickle e modelos promocionais disponíveis no momento, com uma cota de 200 requisições/dia. O Go inclui GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 e MiniMax M2.7 com cotas de requisição mais altas aplicadas em janelas móveis (5 horas, semanal e mensal), aproximadamente equivalentes a $12 por 5 horas, $30 por semana e $60 por mês (as contagens reais de requisições variam de acordo com o modelo e o uso).", "zen.api.error.rateLimitExceeded": "Limite de taxa excedido. Por favor, tente novamente mais tarde.", "zen.api.error.modelNotSupported": "Modelo {{model}} não suportado", diff --git a/packages/console/app/src/i18n/da.ts b/packages/console/app/src/i18n/da.ts index e806983967..90eff469a2 100644 --- a/packages/console/app/src/i18n/da.ts +++ b/packages/console/app/src/i18n/da.ts @@ -251,7 +251,7 @@ export const dict = { "go.title": "OpenCode Go | Kodningsmodeller til lav pris for alle", "go.meta.description": - "Go starter ved $5 for den første måned, derefter $10/måned, med generøse 5-timers anmodningsgrænser for GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 og MiniMax M2.7.", + "Go starter ved $5 for den første måned, derefter $10/måned, med generøse 5-timers anmodningsgrænser for GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 og MiniMax M2.7.", "go.hero.title": "Kodningsmodeller til lav pris for alle", "go.hero.body": "Go bringer agentisk kodning til programmører over hele verden. Med generøse grænser og pålidelig adgang til de mest kapable open source-modeller, så du kan bygge med kraftfulde agenter uden at bekymre dig om omkostninger eller tilgængelighed.", @@ -302,7 +302,7 @@ export const dict = { "go.problem.item2": "Generøse grænser og pålidelig adgang", "go.problem.item3": "Bygget til så mange programmører som muligt", "go.problem.item4": - "Inkluderer GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 og MiniMax M2.7", + "Inkluderer GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 og MiniMax M2.7", "go.how.title": "Hvordan Go virker", "go.how.body": "Go starter ved $5 for den første måned, derefter $10/måned. Du kan bruge det med OpenCode eller enhver agent.", @@ -328,7 +328,7 @@ export const dict = { "go.faq.a2": "Go inkluderer modellerne nedenfor med generøse grænser og pålidelig adgang.", "go.faq.q3": "Er Go det samme som Zen?", "go.faq.a3": - "Nej. Zen er pay-as-you-go, mens Go starter ved $5 for den første måned, derefter $10/måned, med generøse grænser og pålidelig adgang til open source-modellerne GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 og MiniMax M2.7.", + "Nej. Zen er pay-as-you-go, mens Go starter ved $5 for den første måned, derefter $10/måned, med generøse grænser og pålidelig adgang til open source-modellerne GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 og MiniMax M2.7.", "go.faq.q4": "Hvad koster Go?", "go.faq.a4.p1.beforePricing": "Go koster", "go.faq.a4.p1.pricingLink": "$5 første måned", @@ -351,7 +351,7 @@ export const dict = { "go.faq.q9": "Hvad er forskellen på gratis modeller og Go?", "go.faq.a9": - "Gratis modeller inkluderer Big Pickle plus salgsfremmende modeller tilgængelige på det tidspunkt, med en kvote på 200 forespørgsler/dag. Go inkluderer GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 og MiniMax M2.7 med højere anmodningskvoter håndhævet over rullende vinduer (5-timers, ugentlig og månedlig), nogenlunde svarende til $12 pr. 5 timer, $30 pr. uge og $60 pr. måned (faktiske anmodningstal varierer efter model og brug).", + "Gratis modeller inkluderer Big Pickle plus salgsfremmende modeller tilgængelige på det tidspunkt, med en kvote på 200 forespørgsler/dag. Go inkluderer GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 og MiniMax M2.7 med højere anmodningskvoter håndhævet over rullende vinduer (5-timers, ugentlig og månedlig), nogenlunde svarende til $12 pr. 5 timer, $30 pr. uge og $60 pr. måned (faktiske anmodningstal varierer efter model og brug).", "zen.api.error.rateLimitExceeded": "Hastighedsgrænse overskredet. Prøv venligst igen senere.", "zen.api.error.modelNotSupported": "Model {{model}} understøttes ikke", diff --git a/packages/console/app/src/i18n/de.ts b/packages/console/app/src/i18n/de.ts index bdd47e77cf..af339802fa 100644 --- a/packages/console/app/src/i18n/de.ts +++ b/packages/console/app/src/i18n/de.ts @@ -253,7 +253,7 @@ export const dict = { "go.title": "OpenCode Go | Kostengünstige Coding-Modelle für alle", "go.meta.description": - "Go beginnt bei $5 für den ersten Monat, danach $10/Monat, mit großzügigen 5-Stunden-Anfragelimits für GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 und MiniMax M2.7.", + "Go beginnt bei $5 für den ersten Monat, danach $10/Monat, mit großzügigen 5-Stunden-Anfragelimits für GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 und MiniMax M2.7.", "go.hero.title": "Kostengünstige Coding-Modelle für alle", "go.hero.body": "Go bringt Agentic Coding zu Programmierern auf der ganzen Welt. Mit großzügigen Limits und zuverlässigem Zugang zu den leistungsfähigsten Open-Source-Modellen, damit du mit leistungsstarken Agenten entwickeln kannst, ohne dir Gedanken über Kosten oder Verfügbarkeit zu machen.", @@ -304,7 +304,7 @@ export const dict = { "go.problem.item2": "Großzügige Limits und zuverlässiger Zugang", "go.problem.item3": "Für so viele Programmierer wie möglich gebaut", "go.problem.item4": - "Beinhaltet GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 und MiniMax M2.7", + "Beinhaltet GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 und MiniMax M2.7", "go.how.title": "Wie Go funktioniert", "go.how.body": "Go beginnt bei $5 für den ersten Monat, danach $10/Monat. Du kannst es mit OpenCode oder jedem Agenten nutzen.", @@ -330,7 +330,7 @@ export const dict = { "go.faq.a2": "Go umfasst die unten aufgeführten Modelle mit großzügigen Limits und zuverlässigem Zugriff.", "go.faq.q3": "Ist Go dasselbe wie Zen?", "go.faq.a3": - "Nein. Zen ist Pay-as-you-go, während Go bei $5 für den ersten Monat beginnt, danach $10/Monat, mit großzügigen Limits und zuverlässigem Zugang zu den Open-Source-Modellen GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 und MiniMax M2.7.", + "Nein. Zen ist Pay-as-you-go, während Go bei $5 für den ersten Monat beginnt, danach $10/Monat, mit großzügigen Limits und zuverlässigem Zugang zu den Open-Source-Modellen GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 und MiniMax M2.7.", "go.faq.q4": "Wie viel kostet Go?", "go.faq.a4.p1.beforePricing": "Go kostet", "go.faq.a4.p1.pricingLink": "$5 im ersten Monat", @@ -354,7 +354,7 @@ export const dict = { "go.faq.q9": "Was ist der Unterschied zwischen kostenlosen Modellen und Go?", "go.faq.a9": - "Kostenlose Modelle beinhalten Big Pickle sowie Werbemodelle, die zum jeweiligen Zeitpunkt verfügbar sind, mit einem Kontingent von 200 Anfragen/Tag. Go beinhaltet GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 und MiniMax M2.7 mit höheren Anfragekontingenten, die über rollierende Zeitfenster (5 Stunden, wöchentlich und monatlich) durchgesetzt werden, grob äquivalent zu $12 pro 5 Stunden, $30 pro Woche und $60 pro Monat (tatsächliche Anfragezahlen variieren je nach Modell und Nutzung).", + "Kostenlose Modelle beinhalten Big Pickle sowie Werbemodelle, die zum jeweiligen Zeitpunkt verfügbar sind, mit einem Kontingent von 200 Anfragen/Tag. Go beinhaltet GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 und MiniMax M2.7 mit höheren Anfragekontingenten, die über rollierende Zeitfenster (5 Stunden, wöchentlich und monatlich) durchgesetzt werden, grob äquivalent zu $12 pro 5 Stunden, $30 pro Woche und $60 pro Monat (tatsächliche Anfragezahlen variieren je nach Modell und Nutzung).", "zen.api.error.rateLimitExceeded": "Ratenlimit überschritten. Bitte versuche es später erneut.", "zen.api.error.modelNotSupported": "Modell {{model}} wird nicht unterstützt", diff --git a/packages/console/app/src/i18n/en.ts b/packages/console/app/src/i18n/en.ts index a242ff1010..f5cc954e5e 100644 --- a/packages/console/app/src/i18n/en.ts +++ b/packages/console/app/src/i18n/en.ts @@ -248,7 +248,7 @@ export const dict = { "go.title": "OpenCode Go | Low cost coding models for everyone", "go.meta.description": - "Go starts at $5 for your first month, then $10/month, with generous 5-hour request limits for GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5, and MiniMax M2.7.", + "Go starts at $5 for your first month, then $10/month, with generous 5-hour request limits for GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5, and MiniMax M2.7.", "go.banner.badge": "3x", "go.banner.text": "Kimi K2.6 gets 3× usage limits through April 27", "go.hero.title": "Low cost coding models for everyone", @@ -298,7 +298,7 @@ export const dict = { "go.problem.item2": "Generous limits and reliable access", "go.problem.item3": "Built for as many programmers as possible", "go.problem.item4": - "Includes GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5, and MiniMax M2.7", + "Includes GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5, and MiniMax M2.7", "go.how.title": "How Go works", "go.how.body": "Go starts at $5 for your first month, then $10/month. You can use it with OpenCode or any agent.", "go.how.step1.title": "Create an account", @@ -323,7 +323,7 @@ export const dict = { "go.faq.a2": "Go includes the models listed below, with generous limits and reliable access.", "go.faq.q3": "Is Go the same as Zen?", "go.faq.a3": - "No. Zen is pay-as-you-go, while Go starts at $5 for your first month, then $10/month, with generous limits and reliable access to open-source models GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5, and MiniMax M2.7.", + "No. Zen is pay-as-you-go, while Go starts at $5 for your first month, then $10/month, with generous limits and reliable access to open-source models GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5, and MiniMax M2.7.", "go.faq.q4": "How much does Go cost?", "go.faq.a4.p1.beforePricing": "Go costs", "go.faq.a4.p1.pricingLink": "$5 first month", @@ -347,7 +347,7 @@ export const dict = { "go.faq.q9": "What is the difference between free models and Go?", "go.faq.a9": - "Free models include Big Pickle plus promotional models available at the time, with a quota of 200 requests/day. Go includes GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5, and MiniMax M2.7 with higher request quotas enforced across rolling windows (5-hour, weekly, and monthly), roughly equivalent to $12 per 5 hours, $30 per week, and $60 per month (actual request counts vary by model and usage).", + "Free models include Big Pickle plus promotional models available at the time, with a quota of 200 requests/day. Go includes GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5, and MiniMax M2.7 with higher request quotas enforced across rolling windows (5-hour, weekly, and monthly), roughly equivalent to $12 per 5 hours, $30 per week, and $60 per month (actual request counts vary by model and usage).", "zen.api.error.rateLimitExceeded": "Rate limit exceeded. Please try again later.", "zen.api.error.modelNotSupported": "Model {{model}} not supported", diff --git a/packages/console/app/src/i18n/es.ts b/packages/console/app/src/i18n/es.ts index ddeff684b0..fb718e0541 100644 --- a/packages/console/app/src/i18n/es.ts +++ b/packages/console/app/src/i18n/es.ts @@ -254,7 +254,7 @@ export const dict = { "go.title": "OpenCode Go | Modelos de programación de bajo coste para todos", "go.meta.description": - "Go comienza en $5 el primer mes, luego 10 $/mes, con generosos límites de solicitudes de 5 horas para GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 y MiniMax M2.7.", + "Go comienza en $5 el primer mes, luego 10 $/mes, con generosos límites de solicitudes de 5 horas para GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 y MiniMax M2.7.", "go.hero.title": "Modelos de programación de bajo coste para todos", "go.hero.body": "Go lleva la programación agéntica a programadores de todo el mundo. Ofrece límites generosos y acceso fiable a los modelos de código abierto más capaces, para que puedas crear con agentes potentes sin preocuparte por el coste o la disponibilidad.", @@ -306,7 +306,7 @@ export const dict = { "go.problem.item2": "Límites generosos y acceso fiable", "go.problem.item3": "Creado para tantos programadores como sea posible", "go.problem.item4": - "Incluye GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 y MiniMax M2.7", + "Incluye GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 y MiniMax M2.7", "go.how.title": "Cómo funciona Go", "go.how.body": "Go comienza en $5 el primer mes, luego 10 $/mes. Puedes usarlo con OpenCode o cualquier agente.", "go.how.step1.title": "Crear una cuenta", @@ -331,7 +331,7 @@ export const dict = { "go.faq.a2": "Go incluye los modelos que se indican abajo, con límites generosos y acceso confiable.", "go.faq.q3": "¿Es Go lo mismo que Zen?", "go.faq.a3": - "No. Zen es pago por uso, mientras que Go comienza en $5 el primer mes, luego 10 $/mes, con límites generosos y acceso fiable a los modelos de código abierto GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 y MiniMax M2.7.", + "No. Zen es pago por uso, mientras que Go comienza en $5 el primer mes, luego 10 $/mes, con límites generosos y acceso fiable a los modelos de código abierto GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 y MiniMax M2.7.", "go.faq.q4": "¿Cuánto cuesta Go?", "go.faq.a4.p1.beforePricing": "Go cuesta", "go.faq.a4.p1.pricingLink": "$5 el primer mes", @@ -355,7 +355,7 @@ export const dict = { "go.faq.q9": "¿Cuál es la diferencia entre los modelos gratuitos y Go?", "go.faq.a9": - "Los modelos gratuitos incluyen Big Pickle más modelos promocionales disponibles en el momento, con una cuota de 200 solicitudes/día. Go incluye GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 y MiniMax M2.7 con cuotas de solicitud más altas aplicadas a través de ventanas móviles (5 horas, semanal y mensual), aproximadamente equivalente a 12 $ por 5 horas, 30 $ por semana y 60 $ por mes (los recuentos reales de solicitudes varían según el modelo y el uso).", + "Los modelos gratuitos incluyen Big Pickle más modelos promocionales disponibles en el momento, con una cuota de 200 solicitudes/día. Go incluye GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 y MiniMax M2.7 con cuotas de solicitud más altas aplicadas a través de ventanas móviles (5 horas, semanal y mensual), aproximadamente equivalente a 12 $ por 5 horas, 30 $ por semana y 60 $ por mes (los recuentos reales de solicitudes varían según el modelo y el uso).", "zen.api.error.rateLimitExceeded": "Límite de tasa excedido. Por favor, inténtalo de nuevo más tarde.", "zen.api.error.modelNotSupported": "Modelo {{model}} no soportado", diff --git a/packages/console/app/src/i18n/fr.ts b/packages/console/app/src/i18n/fr.ts index df892c98d3..976d93a29a 100644 --- a/packages/console/app/src/i18n/fr.ts +++ b/packages/console/app/src/i18n/fr.ts @@ -255,7 +255,7 @@ export const dict = { "go.title": "OpenCode Go | Modèles de code à faible coût pour tous", "go.meta.description": - "Go commence à $5 pour le premier mois, puis 10 $/mois, avec des limites de requêtes généreuses sur 5 heures pour GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 et MiniMax M2.7.", + "Go commence à $5 pour le premier mois, puis 10 $/mois, avec des limites de requêtes généreuses sur 5 heures pour GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 et MiniMax M2.7.", "go.hero.title": "Modèles de code à faible coût pour tous", "go.hero.body": "Go apporte le codage agentique aux programmeurs du monde entier. Offrant des limites généreuses et un accès fiable aux modèles open source les plus capables, pour que vous puissiez construire avec des agents puissants sans vous soucier du coût ou de la disponibilité.", @@ -306,7 +306,7 @@ export const dict = { "go.problem.item2": "Limites généreuses et accès fiable", "go.problem.item3": "Conçu pour autant de programmeurs que possible", "go.problem.item4": - "Inclut GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 et MiniMax M2.7", + "Inclut GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 et MiniMax M2.7", "go.how.title": "Comment fonctionne Go", "go.how.body": "Go commence à $5 pour le premier mois, puis 10 $/mois. Vous pouvez l'utiliser avec OpenCode ou n'importe quel agent.", @@ -332,7 +332,7 @@ export const dict = { "go.faq.a2": "Go inclut les modèles ci-dessous, avec des limites généreuses et un accès fiable.", "go.faq.q3": "Est-ce que Go est la même chose que Zen ?", "go.faq.a3": - "Non. Zen est un paiement à l'utilisation, tandis que Go commence à $5 pour le premier mois, puis 10 $/mois, avec des limites généreuses et un accès fiable aux modèles open source GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 et MiniMax M2.7.", + "Non. Zen est un paiement à l'utilisation, tandis que Go commence à $5 pour le premier mois, puis 10 $/mois, avec des limites généreuses et un accès fiable aux modèles open source GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 et MiniMax M2.7.", "go.faq.q4": "Combien coûte Go ?", "go.faq.a4.p1.beforePricing": "Go coûte", "go.faq.a4.p1.pricingLink": "$5 le premier mois", @@ -355,7 +355,7 @@ export const dict = { "Oui, vous pouvez utiliser Go avec n'importe quel agent. Suivez les instructions de configuration dans votre agent de code préféré.", "go.faq.q9": "Quelle est la différence entre les modèles gratuits et Go ?", "go.faq.a9": - "Les modèles gratuits incluent Big Pickle ainsi que des modèles promotionnels disponibles à ce moment-là, avec un quota de 200 requêtes/jour. Go inclut GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 et MiniMax M2.7 avec des quotas de requêtes plus élevés appliqués sur des fenêtres glissantes (5 heures, hebdomadaire et mensuelle), à peu près équivalent à 12 $ par 5 heures, 30 $ par semaine et 60 $ par mois (le nombre réel de requêtes varie selon le modèle et l'utilisation).", + "Les modèles gratuits incluent Big Pickle ainsi que des modèles promotionnels disponibles à ce moment-là, avec un quota de 200 requêtes/jour. Go inclut GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 et MiniMax M2.7 avec des quotas de requêtes plus élevés appliqués sur des fenêtres glissantes (5 heures, hebdomadaire et mensuelle), à peu près équivalent à 12 $ par 5 heures, 30 $ par semaine et 60 $ par mois (le nombre réel de requêtes varie selon le modèle et l'utilisation).", "zen.api.error.rateLimitExceeded": "Limite de débit dépassée. Veuillez réessayer plus tard.", "zen.api.error.modelNotSupported": "Modèle {{model}} non pris en charge", diff --git a/packages/console/app/src/i18n/it.ts b/packages/console/app/src/i18n/it.ts index 67a73aaeef..6069ad73ce 100644 --- a/packages/console/app/src/i18n/it.ts +++ b/packages/console/app/src/i18n/it.ts @@ -251,7 +251,7 @@ export const dict = { "go.title": "OpenCode Go | Modelli di coding a basso costo per tutti", "go.meta.description": - "Go inizia a $5 per il primo mese, poi $10/mese, con generosi limiti di richiesta di 5 ore per GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 e MiniMax M2.7.", + "Go inizia a $5 per il primo mese, poi $10/mese, con generosi limiti di richiesta di 5 ore per GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 e MiniMax M2.7.", "go.hero.title": "Modelli di coding a basso costo per tutti", "go.hero.body": "Go porta il coding agentico ai programmatori di tutto il mondo. Offrendo limiti generosi e un accesso affidabile ai modelli open source più capaci, in modo da poter costruire con agenti potenti senza preoccuparsi dei costi o della disponibilità.", @@ -302,7 +302,7 @@ export const dict = { "go.problem.item2": "Limiti generosi e accesso affidabile", "go.problem.item3": "Costruito per il maggior numero possibile di programmatori", "go.problem.item4": - "Include GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 e MiniMax M2.7", + "Include GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 e MiniMax M2.7", "go.how.title": "Come funziona Go", "go.how.body": "Go inizia a $5 per il primo mese, poi $10/mese. Puoi usarlo con OpenCode o qualsiasi agente.", "go.how.step1.title": "Crea un account", @@ -327,7 +327,7 @@ export const dict = { "go.faq.a2": "Go include i modelli elencati di seguito, con limiti generosi e accesso affidabile.", "go.faq.q3": "Go è lo stesso di Zen?", "go.faq.a3": - "No. Zen è a consumo, mentre Go inizia a $5 per il primo mese, poi $10/mese, con limiti generosi e accesso affidabile ai modelli open source GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 e MiniMax M2.7.", + "No. Zen è a consumo, mentre Go inizia a $5 per il primo mese, poi $10/mese, con limiti generosi e accesso affidabile ai modelli open source GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 e MiniMax M2.7.", "go.faq.q4": "Quanto costa Go?", "go.faq.a4.p1.beforePricing": "Go costa", "go.faq.a4.p1.pricingLink": "$5 il primo mese", @@ -351,7 +351,7 @@ export const dict = { "go.faq.q9": "Qual è la differenza tra i modelli gratuiti e Go?", "go.faq.a9": - "I modelli gratuiti includono Big Pickle più modelli promozionali disponibili al momento, con una quota di 200 richieste/giorno. Go include GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 e MiniMax M2.7 con quote di richiesta più elevate applicate su finestre mobili (5 ore, settimanale e mensile), approssimativamente equivalenti a $12 ogni 5 ore, $30 a settimana e $60 al mese (il conteggio effettivo delle richieste varia in base al modello e all'utilizzo).", + "I modelli gratuiti includono Big Pickle più modelli promozionali disponibili al momento, con una quota di 200 richieste/giorno. Go include GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 e MiniMax M2.7 con quote di richiesta più elevate applicate su finestre mobili (5 ore, settimanale e mensile), approssimativamente equivalenti a $12 ogni 5 ore, $30 a settimana e $60 al mese (il conteggio effettivo delle richieste varia in base al modello e all'utilizzo).", "zen.api.error.rateLimitExceeded": "Limite di richieste superato. Riprova più tardi.", "zen.api.error.modelNotSupported": "Modello {{model}} non supportato", diff --git a/packages/console/app/src/i18n/ja.ts b/packages/console/app/src/i18n/ja.ts index 541fdd56c1..dcf2f9b52f 100644 --- a/packages/console/app/src/i18n/ja.ts +++ b/packages/console/app/src/i18n/ja.ts @@ -250,7 +250,7 @@ export const dict = { "go.title": "OpenCode Go | すべての人のための低価格なコーディングモデル", "go.meta.description": - "Goは最初の月$5、その後$10/月で、GLM-5.1、GLM-5、Kimi K2.5、Kimi K2.6、MiMo-V2-Pro、MiMo-V2-Omni、Qwen3.5 Plus、Qwen3.6 Plus、MiniMax M2.5、MiniMax M2.7に対して5時間のゆとりあるリクエスト上限があります。", + "Goは最初の月$5、その後$10/月で、GLM-5.1、GLM-5、Kimi K2.5、Kimi K2.6、MiMo-V2-Pro、MiMo-V2-Omni、MiMo-V2.5-Pro、MiMo-V2.5、Qwen3.5 Plus、Qwen3.6 Plus、MiniMax M2.5、MiniMax M2.7に対して5時間のゆとりあるリクエスト上限があります。", "go.hero.title": "すべての人のための低価格なコーディングモデル", "go.hero.body": "Goは、世界中のプログラマーにエージェント型コーディングをもたらします。最も高性能なオープンソースモデルへの十分な制限と安定したアクセスを提供し、コストや可用性を気にすることなく強力なエージェントで構築できます。", @@ -302,7 +302,7 @@ export const dict = { "go.problem.item2": "十分な制限と安定したアクセス", "go.problem.item3": "できるだけ多くのプログラマーのために構築", "go.problem.item4": - "GLM-5.1、GLM-5、Kimi K2.5、Kimi K2.6、MiMo-V2-Pro、MiMo-V2-Omni、Qwen3.5 Plus、Qwen3.6 Plus、MiniMax M2.5、MiniMax M2.7を含む", + "GLM-5.1、GLM-5、Kimi K2.5、Kimi K2.6、MiMo-V2-Pro、MiMo-V2-Omni、MiMo-V2.5-Pro、MiMo-V2.5、Qwen3.5 Plus、Qwen3.6 Plus、MiniMax M2.5、MiniMax M2.7を含む", "go.how.title": "Goの仕組み", "go.how.body": "Goは最初の月$5、その後$10/月で始まります。OpenCodeまたは任意のエージェントで使えます。", "go.how.step1.title": "アカウントを作成", @@ -327,7 +327,7 @@ export const dict = { "go.faq.a2": "Go には、十分な利用上限と安定したアクセスを備えた、以下のモデルが含まれます。", "go.faq.q3": "GoはZenと同じですか?", "go.faq.a3": - "いいえ。Zenは従量課金制ですが、Goは最初の月$5、その後$10/月で始まり、GLM-5.1、GLM-5、Kimi K2.5、Kimi K2.6、MiMo-V2-Pro、MiMo-V2-Omni、Qwen3.5 Plus、Qwen3.6 Plus、MiniMax M2.5、MiniMax M2.7のオープンソースモデルに対して、ゆとりある上限と信頼できるアクセスを提供します。", + "いいえ。Zenは従量課金制ですが、Goは最初の月$5、その後$10/月で始まり、GLM-5.1、GLM-5、Kimi K2.5、Kimi K2.6、MiMo-V2-Pro、MiMo-V2-Omni、MiMo-V2.5-Pro、MiMo-V2.5、Qwen3.5 Plus、Qwen3.6 Plus、MiniMax M2.5、MiniMax M2.7のオープンソースモデルに対して、ゆとりある上限と信頼できるアクセスを提供します。", "go.faq.q4": "Goの料金は?", "go.faq.a4.p1.beforePricing": "Goは", "go.faq.a4.p1.pricingLink": "最初の月$5", @@ -351,7 +351,7 @@ export const dict = { "go.faq.q9": "無料モデルとGoの違いは何ですか?", "go.faq.a9": - "無料モデルにはBig Pickleと、その時点で利用可能なプロモーションモデルが含まれ、1日200リクエストの制限があります。GoにはGLM-5.1、GLM-5、Kimi K2.5、Kimi K2.6、MiMo-V2-Pro、MiMo-V2-Omni、Qwen3.5 Plus、Qwen3.6 Plus、MiniMax M2.5、MiniMax M2.7が含まれ、ローリングウィンドウ(5時間、週間、月間)全体でより高いリクエスト制限が適用されます。これは概算で5時間あたり$12、週間$30、月間$60相当です(実際のリクエスト数はモデルと使用状況により異なります)。", + "無料モデルにはBig Pickleと、その時点で利用可能なプロモーションモデルが含まれ、1日200リクエストの制限があります。GoにはGLM-5.1、GLM-5、Kimi K2.5、Kimi K2.6、MiMo-V2-Pro、MiMo-V2-Omni、MiMo-V2.5-Pro、MiMo-V2.5、Qwen3.5 Plus、Qwen3.6 Plus、MiniMax M2.5、MiniMax M2.7が含まれ、ローリングウィンドウ(5時間、週間、月間)全体でより高いリクエスト制限が適用されます。これは概算で5時間あたり$12、週間$30、月間$60相当です(実際のリクエスト数はモデルと使用状況により異なります)。", "zen.api.error.rateLimitExceeded": "レート制限を超えました。後でもう一度お試しください。", "zen.api.error.modelNotSupported": "モデル {{model}} はサポートされていません", diff --git a/packages/console/app/src/i18n/ko.ts b/packages/console/app/src/i18n/ko.ts index 5d459425be..f2a67fbbae 100644 --- a/packages/console/app/src/i18n/ko.ts +++ b/packages/console/app/src/i18n/ko.ts @@ -247,7 +247,7 @@ export const dict = { "go.title": "OpenCode Go | 모두를 위한 저비용 코딩 모델", "go.meta.description": - "Go는 첫 달 $5, 이후 $10/월로 시작하며, GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5, MiniMax M2.7에 대해 넉넉한 5시간 요청 한도를 제공합니다.", + "Go는 첫 달 $5, 이후 $10/월로 시작하며, GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5, MiniMax M2.7에 대해 넉넉한 5시간 요청 한도를 제공합니다.", "go.hero.title": "모두를 위한 저비용 코딩 모델", "go.hero.body": "Go는 전 세계 프로그래머들에게 에이전트 코딩을 제공합니다. 가장 유능한 오픈 소스 모델에 대한 넉넉한 한도와 안정적인 액세스를 제공하므로, 비용이나 가용성 걱정 없이 강력한 에이전트로 빌드할 수 있습니다.", @@ -299,7 +299,7 @@ export const dict = { "go.problem.item2": "넉넉한 한도와 안정적인 액세스", "go.problem.item3": "가능한 한 많은 프로그래머를 위해 제작됨", "go.problem.item4": - "GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5, MiniMax M2.7 포함", + "GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5, MiniMax M2.7 포함", "go.how.title": "Go 작동 방식", "go.how.body": "Go는 첫 달 $5, 이후 $10/월로 시작합니다. OpenCode 또는 어떤 에이전트와도 함께 사용할 수 있습니다.", "go.how.step1.title": "계정 생성", @@ -323,7 +323,7 @@ export const dict = { "go.faq.a2": "Go에는 넉넉한 한도와 안정적인 액세스를 제공하는 아래 모델이 포함됩니다.", "go.faq.q3": "Go는 Zen과 같은가요?", "go.faq.a3": - "아니요. Zen은 종량제인 반면, Go는 첫 달 $5, 이후 $10/월로 시작하며, GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5, MiniMax M2.7 오픈 소스 모델에 대한 넉넉한 한도와 안정적인 액세스를 제공합니다.", + "아니요. Zen은 종량제인 반면, Go는 첫 달 $5, 이후 $10/월로 시작하며, GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5, MiniMax M2.7 오픈 소스 모델에 대한 넉넉한 한도와 안정적인 액세스를 제공합니다.", "go.faq.q4": "Go 비용은 얼마인가요?", "go.faq.a4.p1.beforePricing": "Go 비용은", "go.faq.a4.p1.pricingLink": "첫 달 $5", @@ -346,7 +346,7 @@ export const dict = { "go.faq.q9": "무료 모델과 Go의 차이점은 무엇인가요?", "go.faq.a9": - "무료 모델에는 Big Pickle과 당시 사용 가능한 프로모션 모델이 포함되며, 하루 200회 요청 할당량이 적용됩니다. Go는 GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5, MiniMax M2.7를 포함하며, 롤링 윈도우(5시간, 주간, 월간)에 걸쳐 더 높은 요청 할당량을 적용합니다. 이는 대략 5시간당 $12, 주당 $30, 월 $60에 해당합니다(실제 요청 수는 모델 및 사용량에 따라 다름).", + "무료 모델에는 Big Pickle과 당시 사용 가능한 프로모션 모델이 포함되며, 하루 200회 요청 할당량이 적용됩니다. Go는 GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5, MiniMax M2.7를 포함하며, 롤링 윈도우(5시간, 주간, 월간)에 걸쳐 더 높은 요청 할당량을 적용합니다. 이는 대략 5시간당 $12, 주당 $30, 월 $60에 해당합니다(실제 요청 수는 모델 및 사용량에 따라 다름).", "zen.api.error.rateLimitExceeded": "속도 제한을 초과했습니다. 나중에 다시 시도해 주세요.", "zen.api.error.modelNotSupported": "{{model}} 모델은 지원되지 않습니다", diff --git a/packages/console/app/src/i18n/no.ts b/packages/console/app/src/i18n/no.ts index af2b018b00..0207a57760 100644 --- a/packages/console/app/src/i18n/no.ts +++ b/packages/console/app/src/i18n/no.ts @@ -251,7 +251,7 @@ export const dict = { "go.title": "OpenCode Go | Rimelige kodemodeller for alle", "go.meta.description": - "Go starter på $5 for den første måneden, deretter $10/måned, med sjenerøse 5-timers forespørselsgrenser for GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 og MiniMax M2.7.", + "Go starter på $5 for den første måneden, deretter $10/måned, med sjenerøse 5-timers forespørselsgrenser for GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 og MiniMax M2.7.", "go.hero.title": "Rimelige kodemodeller for alle", "go.hero.body": "Go bringer agent-koding til programmerere over hele verden. Med rause grenser og pålitelig tilgang til de mest kapable åpen kildekode-modellene, kan du bygge med kraftige agenter uten å bekymre deg for kostnader eller tilgjengelighet.", @@ -302,7 +302,7 @@ export const dict = { "go.problem.item2": "Rause grenser og pålitelig tilgang", "go.problem.item3": "Bygget for så mange programmerere som mulig", "go.problem.item4": - "Inkluderer GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 og MiniMax M2.7", + "Inkluderer GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 og MiniMax M2.7", "go.how.title": "Hvordan Go fungerer", "go.how.body": "Go starter på $5 for den første måneden, deretter $10/måned. Du kan bruke det med OpenCode eller hvilken som helst agent.", @@ -328,7 +328,7 @@ export const dict = { "go.faq.a2": "Go inkluderer modellene nedenfor, med høye grenser og pålitelig tilgang.", "go.faq.q3": "Er Go det samme som Zen?", "go.faq.a3": - "Nei. Zen er betaling etter bruk, mens Go starter på $5 for den første måneden, deretter $10/måned, med sjenerøse grenser og pålitelig tilgang til åpen kildekode-modellene GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 og MiniMax M2.7.", + "Nei. Zen er betaling etter bruk, mens Go starter på $5 for den første måneden, deretter $10/måned, med sjenerøse grenser og pålitelig tilgang til åpen kildekode-modellene GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 og MiniMax M2.7.", "go.faq.q4": "Hva koster Go?", "go.faq.a4.p1.beforePricing": "Go koster", "go.faq.a4.p1.pricingLink": "$5 første måned", @@ -352,7 +352,7 @@ export const dict = { "go.faq.q9": "Hva er forskjellen mellom gratis modeller og Go?", "go.faq.a9": - "Gratis modeller inkluderer Big Pickle pluss kampanjemodeller tilgjengelig på det tidspunktet, med en kvote på 200 forespørsler/dag. Go inkluderer GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 og MiniMax M2.7 med høyere kvoter håndhevet over rullerende vinduer (5 timer, ukentlig og månedlig), omtrent tilsvarende $12 per 5 timer, $30 per uke og $60 per måned (faktiske forespørselsantall varierer etter modell og bruk).", + "Gratis modeller inkluderer Big Pickle pluss kampanjemodeller tilgjengelig på det tidspunktet, med en kvote på 200 forespørsler/dag. Go inkluderer GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 og MiniMax M2.7 med høyere kvoter håndhevet over rullerende vinduer (5 timer, ukentlig og månedlig), omtrent tilsvarende $12 per 5 timer, $30 per uke og $60 per måned (faktiske forespørselsantall varierer etter modell og bruk).", "zen.api.error.rateLimitExceeded": "Rate limit overskredet. Vennligst prøv igjen senere.", "zen.api.error.modelNotSupported": "Modell {{model}} støttes ikke", diff --git a/packages/console/app/src/i18n/pl.ts b/packages/console/app/src/i18n/pl.ts index f2219487bc..554a2d0aab 100644 --- a/packages/console/app/src/i18n/pl.ts +++ b/packages/console/app/src/i18n/pl.ts @@ -252,7 +252,7 @@ export const dict = { "go.title": "OpenCode Go | Niskokosztowe modele do kodowania dla każdego", "go.meta.description": - "Go zaczyna się od $5 za pierwszy miesiąc, potem $10/miesiąc, z hojnymi 5-godzinnymi limitami zapytań dla GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 i MiniMax M2.7.", + "Go zaczyna się od $5 za pierwszy miesiąc, potem $10/miesiąc, z hojnymi 5-godzinnymi limitami zapytań dla GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 i MiniMax M2.7.", "go.hero.title": "Niskokosztowe modele do kodowania dla każdego", "go.hero.body": "Go udostępnia programowanie z agentami programistom na całym świecie. Oferuje hojne limity i niezawodny dostęp do najzdolniejszych modeli open source, dzięki czemu możesz budować za pomocą potężnych agentów, nie martwiąc się o koszty czy dostępność.", @@ -303,7 +303,7 @@ export const dict = { "go.problem.item2": "Hojne limity i niezawodny dostęp", "go.problem.item3": "Stworzony dla jak największej liczby programistów", "go.problem.item4": - "Zawiera GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 i MiniMax M2.7", + "Zawiera GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 i MiniMax M2.7", "go.how.title": "Jak działa Go", "go.how.body": "Go zaczyna się od $5 za pierwszy miesiąc, potem $10/miesiąc. Możesz go używać z OpenCode lub dowolnym agentem.", @@ -329,7 +329,7 @@ export const dict = { "go.faq.a2": "Go obejmuje poniższe modele z wysokimi limitami i niezawodnym dostępem.", "go.faq.q3": "Czy Go to to samo co Zen?", "go.faq.a3": - "Nie. Zen to model płatności za użycie, podczas gdy Go zaczyna się od $5 za pierwszy miesiąc, potem $10/miesiąc, z hojnymi limitami i niezawodnym dostępem do modeli open source GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 i MiniMax M2.7.", + "Nie. Zen to model płatności za użycie, podczas gdy Go zaczyna się od $5 za pierwszy miesiąc, potem $10/miesiąc, z hojnymi limitami i niezawodnym dostępem do modeli open source GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 i MiniMax M2.7.", "go.faq.q4": "Ile kosztuje Go?", "go.faq.a4.p1.beforePricing": "Go kosztuje", "go.faq.a4.p1.pricingLink": "$5 za pierwszy miesiąc", @@ -353,7 +353,7 @@ export const dict = { "go.faq.q9": "Jaka jest różnica między darmowymi modelami a Go?", "go.faq.a9": - "Darmowe modele obejmują Big Pickle oraz modele promocyjne dostępne w danym momencie, z limitem 200 zapytań/dzień. Go zawiera GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 i MiniMax M2.7 z wyższymi limitami zapytań egzekwowanymi w oknach kroczących (5-godzinnych, tygodniowych i miesięcznych), w przybliżeniu równoważnymi $12 na 5 godzin, $30 tygodniowo i $60 miesięcznie (rzeczywista liczba zapytań zależy od modelu i użycia).", + "Darmowe modele obejmują Big Pickle oraz modele promocyjne dostępne w danym momencie, z limitem 200 zapytań/dzień. Go zawiera GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 i MiniMax M2.7 z wyższymi limitami zapytań egzekwowanymi w oknach kroczących (5-godzinnych, tygodniowych i miesięcznych), w przybliżeniu równoważnymi $12 na 5 godzin, $30 tygodniowo i $60 miesięcznie (rzeczywista liczba zapytań zależy od modelu i użycia).", "zen.api.error.rateLimitExceeded": "Przekroczono limit zapytań. Spróbuj ponownie później.", "zen.api.error.modelNotSupported": "Model {{model}} nie jest obsługiwany", diff --git a/packages/console/app/src/i18n/ru.ts b/packages/console/app/src/i18n/ru.ts index 8fd76226e4..1e50134199 100644 --- a/packages/console/app/src/i18n/ru.ts +++ b/packages/console/app/src/i18n/ru.ts @@ -255,7 +255,7 @@ export const dict = { "go.title": "OpenCode Go | Недорогие модели для кодинга для всех", "go.meta.description": - "Go начинается с $5 за первый месяц, затем $10/месяц, с щедрыми лимитами запросов за 5 часов для GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 и MiniMax M2.7.", + "Go начинается с $5 за первый месяц, затем $10/месяц, с щедрыми лимитами запросов за 5 часов для GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 и MiniMax M2.7.", "go.hero.title": "Недорогие модели для кодинга для всех", "go.hero.body": "Go открывает доступ к агентам-программистам разработчикам по всему миру. Предлагая щедрые лимиты и надежный доступ к наиболее способным моделям с открытым исходным кодом, вы можете создавать проекты с мощными агентами, не беспокоясь о затратах или доступности.", @@ -307,7 +307,7 @@ export const dict = { "go.problem.item2": "Щедрые лимиты и надежный доступ", "go.problem.item3": "Создан для максимального числа программистов", "go.problem.item4": - "Включает GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 и MiniMax M2.7", + "Включает GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 и MiniMax M2.7", "go.how.title": "Как работает Go", "go.how.body": "Go начинается с $5 за первый месяц, затем $10/месяц. Вы можете использовать его с OpenCode или любым агентом.", @@ -333,7 +333,7 @@ export const dict = { "go.faq.a2": "Go включает перечисленные ниже модели с щедрыми лимитами и надежным доступом.", "go.faq.q3": "Go — это то же самое, что и Zen?", "go.faq.a3": - "Нет. Zen - это оплата по мере использования, в то время как Go начинается с $5 за первый месяц, затем $10/месяц, с щедрыми лимитами и надежным доступом к моделям с открытым исходным кодом GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 и MiniMax M2.7.", + "Нет. Zen - это оплата по мере использования, в то время как Go начинается с $5 за первый месяц, затем $10/месяц, с щедрыми лимитами и надежным доступом к моделям с открытым исходным кодом GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 и MiniMax M2.7.", "go.faq.q4": "Сколько стоит Go?", "go.faq.a4.p1.beforePricing": "Go стоит", "go.faq.a4.p1.pricingLink": "$5 за первый месяц", @@ -357,7 +357,7 @@ export const dict = { "go.faq.q9": "В чем разница между бесплатными моделями и Go?", "go.faq.a9": - "Бесплатные модели включают Big Pickle плюс промо-модели, доступные на данный момент, с квотой 200 запросов/день. Go включает GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 и MiniMax M2.7 с более высокими квотами запросов, применяемыми в скользящих окнах (5 часов, неделя и месяц), что примерно эквивалентно $12 за 5 часов, $30 в неделю и $60 в месяц (фактическое количество запросов зависит от модели и использования).", + "Бесплатные модели включают Big Pickle плюс промо-модели, доступные на данный момент, с квотой 200 запросов/день. Go включает GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 и MiniMax M2.7 с более высокими квотами запросов, применяемыми в скользящих окнах (5 часов, неделя и месяц), что примерно эквивалентно $12 за 5 часов, $30 в неделю и $60 в месяц (фактическое количество запросов зависит от модели и использования).", "zen.api.error.rateLimitExceeded": "Превышен лимит запросов. Пожалуйста, попробуйте позже.", "zen.api.error.modelNotSupported": "Модель {{model}} не поддерживается", diff --git a/packages/console/app/src/i18n/th.ts b/packages/console/app/src/i18n/th.ts index efe535094f..3a2dc4ba4c 100644 --- a/packages/console/app/src/i18n/th.ts +++ b/packages/console/app/src/i18n/th.ts @@ -250,7 +250,7 @@ export const dict = { "go.title": "OpenCode Go | โมเดลเขียนโค้ดราคาประหยัดสำหรับทุกคน", "go.meta.description": - "Go เริ่มต้นที่ $5 สำหรับเดือนแรก จากนั้น $10/เดือน พร้อมขีดจำกัดคำขอ 5 ชั่วโมงที่เอื้อเฟื้อสำหรับ GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 และ MiniMax M2.7", + "Go เริ่มต้นที่ $5 สำหรับเดือนแรก จากนั้น $10/เดือน พร้อมขีดจำกัดคำขอ 5 ชั่วโมงที่เอื้อเฟื้อสำหรับ GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 และ MiniMax M2.7", "go.hero.title": "โมเดลเขียนโค้ดราคาประหยัดสำหรับทุกคน", "go.hero.body": "Go นำการเขียนโค้ดแบบเอเจนต์มาสู่นักเขียนโปรแกรมทั่วโลก เสนอขีดจำกัดที่กว้างขวางและการเข้าถึงโมเดลโอเพนซอร์สที่มีความสามารถสูงสุดได้อย่างน่าเชื่อถือ เพื่อให้คุณสามารถสร้างสรรค์ด้วยเอเจนต์ที่ทรงพลังโดยไม่ต้องกังวลเรื่องค่าใช้จ่ายหรือความพร้อมใช้งาน", @@ -300,7 +300,7 @@ export const dict = { "go.problem.item2": "ขีดจำกัดที่กว้างขวางและการเข้าถึงที่เชื่อถือได้", "go.problem.item3": "สร้างขึ้นเพื่อโปรแกรมเมอร์จำนวนมากที่สุดเท่าที่จะเป็นไปได้", "go.problem.item4": - "รวมถึง GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 และ MiniMax M2.7", + "รวมถึง GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 และ MiniMax M2.7", "go.how.title": "Go ทำงานอย่างไร", "go.how.body": "Go เริ่มต้นที่ $5 สำหรับเดือนแรก จากนั้น $10/เดือน คุณสามารถใช้กับ OpenCode หรือเอเจนต์ใดก็ได้", "go.how.step1.title": "สร้างบัญชี", @@ -325,7 +325,7 @@ export const dict = { "go.faq.a2": "Go รวมโมเดลด้านล่างนี้ พร้อมขีดจำกัดที่มากและการเข้าถึงที่เชื่อถือได้", "go.faq.q3": "Go เหมือนกับ Zen หรือไม่?", "go.faq.a3": - "ไม่ Zen เป็นแบบจ่ายตามการใช้งาน ในขณะที่ Go เริ่มต้นที่ $5 สำหรับเดือนแรก จากนั้น $10/เดือน พร้อมขีดจำกัดที่เอื้อเฟื้อและการเข้าถึงโมเดลโอเพนซอร์ส GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 และ MiniMax M2.7 อย่างเชื่อถือได้", + "ไม่ Zen เป็นแบบจ่ายตามการใช้งาน ในขณะที่ Go เริ่มต้นที่ $5 สำหรับเดือนแรก จากนั้น $10/เดือน พร้อมขีดจำกัดที่เอื้อเฟื้อและการเข้าถึงโมเดลโอเพนซอร์ส GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 และ MiniMax M2.7 อย่างเชื่อถือได้", "go.faq.q4": "Go ราคาเท่าไหร่?", "go.faq.a4.p1.beforePricing": "Go ราคา", "go.faq.a4.p1.pricingLink": "$5 เดือนแรก", @@ -348,7 +348,7 @@ export const dict = { "go.faq.q9": "ความแตกต่างระหว่างโมเดลฟรีและ Go คืออะไร?", "go.faq.a9": - "โมเดลฟรีรวมถึง Big Pickle บวกกับโมเดลโปรโมชั่นที่มีให้ในขณะนั้น ด้วยโควต้า 200 คำขอ/วัน Go รวมถึง GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 และ MiniMax M2.7 ที่มีโควต้าคำขอสูงกว่า ซึ่งบังคับใช้ผ่านช่วงเวลาหมุนเวียน (5 ชั่วโมง, รายสัปดาห์ และรายเดือน) เทียบเท่าประมาณ $12 ต่อ 5 ชั่วโมง, $30 ต่อสัปดาห์ และ $60 ต่อเดือน (จำนวนคำขอจริงจะแตกต่างกันไปตามโมเดลและการใช้งาน)", + "โมเดลฟรีรวมถึง Big Pickle บวกกับโมเดลโปรโมชั่นที่มีให้ในขณะนั้น ด้วยโควต้า 200 คำขอ/วัน Go รวมถึง GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 และ MiniMax M2.7 ที่มีโควต้าคำขอสูงกว่า ซึ่งบังคับใช้ผ่านช่วงเวลาหมุนเวียน (5 ชั่วโมง, รายสัปดาห์ และรายเดือน) เทียบเท่าประมาณ $12 ต่อ 5 ชั่วโมง, $30 ต่อสัปดาห์ และ $60 ต่อเดือน (จำนวนคำขอจริงจะแตกต่างกันไปตามโมเดลและการใช้งาน)", "zen.api.error.rateLimitExceeded": "เกินขีดจำกัดอัตราการใช้งาน กรุณาลองใหม่ในภายหลัง", "zen.api.error.modelNotSupported": "ไม่รองรับโมเดล {{model}}", diff --git a/packages/console/app/src/i18n/tr.ts b/packages/console/app/src/i18n/tr.ts index 114dcbdb0d..e2370f83c2 100644 --- a/packages/console/app/src/i18n/tr.ts +++ b/packages/console/app/src/i18n/tr.ts @@ -253,7 +253,7 @@ export const dict = { "go.title": "OpenCode Go | Herkes için düşük maliyetli kodlama modelleri", "go.meta.description": - "Go ilk ay $5, sonrasında ayda 10$ fiyatıyla başlar; GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 ve MiniMax M2.7 için cömert 5 saatlik istek limitleri sunar.", + "Go ilk ay $5, sonrasında ayda 10$ fiyatıyla başlar; GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 ve MiniMax M2.7 için cömert 5 saatlik istek limitleri sunar.", "go.hero.title": "Herkes için düşük maliyetli kodlama modelleri", "go.hero.body": "Go, dünya çapındaki programcılara ajan tabanlı kodlama getiriyor. En yetenekli açık kaynaklı modellere cömert limitler ve güvenilir erişim sunarak, maliyet veya erişilebilirlik konusunda endişelenmeden güçlü ajanlarla geliştirme yapmanızı sağlar.", @@ -305,7 +305,7 @@ export const dict = { "go.problem.item2": "Cömert limitler ve güvenilir erişim", "go.problem.item3": "Mümkün olduğunca çok programcı için geliştirildi", "go.problem.item4": - "GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 ve MiniMax M2.7 içerir", + "GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 ve MiniMax M2.7 içerir", "go.how.title": "Go nasıl çalışır?", "go.how.body": "Go ilk ay $5, sonrasında ayda 10$ fiyatıyla başlar. OpenCode veya herhangi bir ajanla kullanabilirsiniz.", @@ -331,7 +331,7 @@ export const dict = { "go.faq.a2": "Go, aşağıda listelenen modelleri cömert limitler ve güvenilir erişimle sunar.", "go.faq.q3": "Go, Zen ile aynı mı?", "go.faq.a3": - "Hayır. Zen kullandıkça öde modelidir, Go ise ilk ay $5, sonrasında ayda 10$ fiyatıyla başlar; GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 ve MiniMax M2.7 açık kaynak modellerine cömert limitler ve güvenilir erişim sunar.", + "Hayır. Zen kullandıkça öde modelidir, Go ise ilk ay $5, sonrasında ayda 10$ fiyatıyla başlar; GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 ve MiniMax M2.7 açık kaynak modellerine cömert limitler ve güvenilir erişim sunar.", "go.faq.q4": "Go ne kadar?", "go.faq.a4.p1.beforePricing": "Go'nun maliyeti", "go.faq.a4.p1.pricingLink": "İlk ay $5", @@ -355,7 +355,7 @@ export const dict = { "go.faq.q9": "Ücretsiz modeller ve Go arasındaki fark nedir?", "go.faq.a9": - "Ücretsiz modeller, günlük 200 istek kotası ile Big Pickle ve o sırada mevcut olan promosyonel modelleri içerir. Go ise GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 ve MiniMax M2.7 modellerini; yuvarlanan pencereler (5 saatlik, haftalık ve aylık) üzerinden uygulanan daha yüksek istek kotalarıyla içerir. Bu kotalar kabaca her 5 saatte 12$, haftada 30$ ve ayda 60$ değerine eşdeğerdir (gerçek istek sayıları modele ve kullanıma göre değişir).", + "Ücretsiz modeller, günlük 200 istek kotası ile Big Pickle ve o sırada mevcut olan promosyonel modelleri içerir. Go ise GLM-5.1, GLM-5, Kimi K2.5, Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 ve MiniMax M2.7 modellerini; yuvarlanan pencereler (5 saatlik, haftalık ve aylık) üzerinden uygulanan daha yüksek istek kotalarıyla içerir. Bu kotalar kabaca her 5 saatte 12$, haftada 30$ ve ayda 60$ değerine eşdeğerdir (gerçek istek sayıları modele ve kullanıma göre değişir).", "zen.api.error.rateLimitExceeded": "İstek limiti aşıldı. Lütfen daha sonra tekrar deneyin.", "zen.api.error.modelNotSupported": "{{model}} modeli desteklenmiyor", diff --git a/packages/console/app/src/i18n/zh.ts b/packages/console/app/src/i18n/zh.ts index 72a2a4570d..649384c23b 100644 --- a/packages/console/app/src/i18n/zh.ts +++ b/packages/console/app/src/i18n/zh.ts @@ -241,7 +241,7 @@ export const dict = { "go.title": "OpenCode Go | 人人可用的低成本编程模型", "go.meta.description": - "Go 首月 $5,之后 $10/月,提供对 GLM-5.1、GLM-5、Kimi K2.5、Kimi K2.6、MiMo-V2-Pro、MiMo-V2-Omni、Qwen3.5 Plus、Qwen3.6 Plus、MiniMax M2.5 和 MiniMax M2.7 的 5 小时充裕请求额度。", + "Go 首月 $5,之后 $10/月,提供对 GLM-5.1、GLM-5、Kimi K2.5、Kimi K2.6、MiMo-V2-Pro、MiMo-V2-Omni、MiMo-V2.5-Pro、MiMo-V2.5、Qwen3.5 Plus、Qwen3.6 Plus、MiniMax M2.5 和 MiniMax M2.7 的 5 小时充裕请求额度。", "go.hero.title": "人人可用的低成本编程模型", "go.hero.body": "Go 将代理编程带给全世界的程序员。提供充裕的限额和对最强大的开源模型的可靠访问,让您可以利用强大的代理进行构建,而无需担心成本或可用性。", @@ -291,7 +291,7 @@ export const dict = { "go.problem.item2": "充裕的限额和可靠的访问", "go.problem.item3": "为尽可能多的程序员打造", "go.problem.item4": - "包含 GLM-5.1, GLM-5, Kimi K2.5、Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 和 MiniMax M2.7", + "包含 GLM-5.1, GLM-5, Kimi K2.5、Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 和 MiniMax M2.7", "go.how.title": "Go 如何工作", "go.how.body": "Go 起价为首月 $5,之后 $10/月。您可以将其与 OpenCode 或任何代理搭配使用。", "go.how.step1.title": "创建账户", @@ -313,7 +313,7 @@ export const dict = { "go.faq.a2": "Go 包含下方列出的模型,提供充足的限额和可靠的访问。", "go.faq.q3": "Go 和 Zen 一样吗?", "go.faq.a3": - "不。Zen 是按量付费,而 Go 首月 $5,之后 $10/月,提供充裕的额度,并可可靠地访问 GLM-5.1、GLM-5、Kimi K2.5、Kimi K2.6、MiMo-V2-Pro、MiMo-V2-Omni、Qwen3.5 Plus、Qwen3.6 Plus、MiniMax M2.5 和 MiniMax M2.7 等开源模型。", + "不。Zen 是按量付费,而 Go 首月 $5,之后 $10/月,提供充裕的额度,并可可靠地访问 GLM-5.1、GLM-5、Kimi K2.5、Kimi K2.6、MiMo-V2-Pro、MiMo-V2-Omni、MiMo-V2.5-Pro、MiMo-V2.5、Qwen3.5 Plus、Qwen3.6 Plus、MiniMax M2.5 和 MiniMax M2.7 等开源模型。", "go.faq.q4": "Go 多少钱?", "go.faq.a4.p1.beforePricing": "Go 费用为", "go.faq.a4.p1.pricingLink": "首月 $5", @@ -335,7 +335,7 @@ export const dict = { "go.faq.q9": "免费模型和 Go 之间的区别是什么?", "go.faq.a9": - "免费模型包含 Big Pickle 加上当时可用的促销模型,每天有 200 次请求的配额。Go 包含 GLM-5.1, GLM-5, Kimi K2.5、Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 和 MiniMax M2.7,并在滚动窗口(5 小时、每周和每月)内执行更高的请求配额,大致相当于每 5 小时 $12、每周 $30 和每月 $60(实际请求计数因模型和使用情况而异)。", + "免费模型包含 Big Pickle 加上当时可用的促销模型,每天有 200 次请求的配额。Go 包含 GLM-5.1, GLM-5, Kimi K2.5、Kimi K2.6, MiMo-V2-Pro, MiMo-V2-Omni, MiMo-V2.5-Pro, MiMo-V2.5, Qwen3.5 Plus, Qwen3.6 Plus, MiniMax M2.5 和 MiniMax M2.7,并在滚动窗口(5 小时、每周和每月)内执行更高的请求配额,大致相当于每 5 小时 $12、每周 $30 和每月 $60(实际请求计数因模型和使用情况而异)。", "zen.api.error.rateLimitExceeded": "超出速率限制。请稍后重试。", "zen.api.error.modelNotSupported": "不支持模型 {{model}}", diff --git a/packages/console/app/src/i18n/zht.ts b/packages/console/app/src/i18n/zht.ts index caea5c74bb..8e93f989e8 100644 --- a/packages/console/app/src/i18n/zht.ts +++ b/packages/console/app/src/i18n/zht.ts @@ -241,7 +241,7 @@ export const dict = { "go.title": "OpenCode Go | 低成本全民編碼模型", "go.meta.description": - "Go 首月 $5,之後 $10/月,提供對 GLM-5.1、GLM-5、Kimi K2.5、Kimi K2.6、MiMo-V2-Pro、MiMo-V2-Omni、Qwen3.5 Plus、Qwen3.6 Plus、MiniMax M2.5 和 MiniMax M2.7 的 5 小時充裕請求額度。", + "Go 首月 $5,之後 $10/月,提供對 GLM-5.1、GLM-5、Kimi K2.5、Kimi K2.6、MiMo-V2-Pro、MiMo-V2-Omni、MiMo-V2.5-Pro、MiMo-V2.5、Qwen3.5 Plus、Qwen3.6 Plus、MiniMax M2.5 和 MiniMax M2.7 的 5 小時充裕請求額度。", "go.hero.title": "低成本全民編碼模型", "go.hero.body": "Go 將代理編碼帶給全世界的程式設計師。提供寬裕的限額以及對最強大開源模型的穩定存取,讓你可以使用強大的代理進行構建,而無需擔心成本或可用性。", @@ -291,7 +291,7 @@ export const dict = { "go.problem.item2": "寬裕的限額與穩定存取", "go.problem.item3": "專為盡可能多的程式設計師打造", "go.problem.item4": - "包含 GLM-5.1、GLM-5、Kimi K2.5、Kimi K2.6、MiMo-V2-Pro、MiMo-V2-Omni、Qwen3.5 Plus、Qwen3.6 Plus、MiniMax M2.5 與 MiniMax M2.7", + "包含 GLM-5.1、GLM-5、Kimi K2.5、Kimi K2.6、MiMo-V2-Pro、MiMo-V2-Omni、MiMo-V2.5-Pro、MiMo-V2.5、Qwen3.5 Plus、Qwen3.6 Plus、MiniMax M2.5 與 MiniMax M2.7", "go.how.title": "Go 如何運作", "go.how.body": "Go 起價為首月 $5,之後 $10/月。您可以將其與 OpenCode 或任何代理搭配使用。", "go.how.step1.title": "建立帳號", @@ -313,7 +313,7 @@ export const dict = { "go.faq.a2": "Go 包含下方列出的模型,提供充足的額度與穩定的存取。", "go.faq.q3": "Go 與 Zen 一樣嗎?", "go.faq.a3": - "不。Zen 是按量付費,而 Go 首月 $5,之後 $10/月,提供充裕的額度,並可可靠地存取 GLM-5.1、GLM-5、Kimi K2.5、Kimi K2.6、MiMo-V2-Pro、MiMo-V2-Omni、Qwen3.5 Plus、Qwen3.6 Plus、MiniMax M2.5 和 MiniMax M2.7 等開源模型。", + "不。Zen 是按量付費,而 Go 首月 $5,之後 $10/月,提供充裕的額度,並可可靠地存取 GLM-5.1、GLM-5、Kimi K2.5、Kimi K2.6、MiMo-V2-Pro、MiMo-V2-Omni、MiMo-V2.5-Pro、MiMo-V2.5、Qwen3.5 Plus、Qwen3.6 Plus、MiniMax M2.5 和 MiniMax M2.7 等開源模型。", "go.faq.q4": "Go 費用是多少?", "go.faq.a4.p1.beforePricing": "Go 費用為", "go.faq.a4.p1.pricingLink": "首月 $5", @@ -335,7 +335,7 @@ export const dict = { "go.faq.q9": "免費模型與 Go 有什麼區別?", "go.faq.a9": - "免費模型包括 Big Pickle 以及當時可用的促銷模型,配額為 200 次請求/天。Go 包括 GLM-5.1、GLM-5、Kimi K2.5、Kimi K2.6、MiMo-V2-Pro、MiMo-V2-Omni、Qwen3.5 Plus、Qwen3.6 Plus、MiniMax M2.5 與 MiniMax M2.7,並在滾動視窗(5 小時、每週和每月)內執行更高的請求配額,大約相當於每 5 小時 $12、每週 $30 和每月 $60(實際請求數因模型和使用情況而異)。", + "免費模型包括 Big Pickle 以及當時可用的促銷模型,配額為 200 次請求/天。Go 包括 GLM-5.1、GLM-5、Kimi K2.5、Kimi K2.6、MiMo-V2-Pro、MiMo-V2-Omni、MiMo-V2.5-Pro、MiMo-V2.5、Qwen3.5 Plus、Qwen3.6 Plus、MiniMax M2.5 與 MiniMax M2.7,並在滾動視窗(5 小時、每週和每月)內執行更高的請求配額,大約相當於每 5 小時 $12、每週 $30 和每月 $60(實際請求數因模型和使用情況而異)。", "zen.api.error.rateLimitExceeded": "超出頻率限制。請稍後再試。", "zen.api.error.modelNotSupported": "不支援模型 {{model}}", diff --git a/packages/console/app/src/routes/go/index.tsx b/packages/console/app/src/routes/go/index.tsx index bae5ddd283..bfaf2969bc 100644 --- a/packages/console/app/src/routes/go/index.tsx +++ b/packages/console/app/src/routes/go/index.tsx @@ -29,6 +29,8 @@ const models = [ { name: "Kimi K2.6", provider: "Moonshot AI" }, { name: "MiMo-V2-Pro", provider: "Xiaomi MiMo" }, { name: "MiMo-V2-Omni", provider: "Xiaomi MiMo" }, + { name: "MiMo-V2.5-Pro", provider: "Xiaomi MiMo" }, + { name: "MiMo-V2.5", provider: "Xiaomi MiMo" }, { name: "Qwen3.5 Plus", provider: "Alibaba Cloud Model Studio" }, { name: "Qwen3.6 Plus", provider: "Alibaba Cloud Model Studio" }, { name: "MiniMax M2.7", provider: "MiniMax" }, @@ -60,7 +62,7 @@ function LimitsGraph(props: { href: string }) { const graph = [ { id: "glm-5.1", name: "GLM-5.1", req: 880, d: "100ms" }, { id: "kimi-k2.6", name: "Kimi K2.6 (3x usage)", req: 3450, baseReq: 1150, d: "150ms" }, - { id: "mimo-v2-pro", name: "MiMo-V2-Pro", req: 1290, d: "150ms" }, + { id: "mimo-v2.5-pro", name: "MiMo-V2.5-Pro", req: 1290, d: "150ms" }, { id: "qwen3.6-plus", name: "Qwen3.6 Plus", req: 3300, d: "280ms" }, { id: "minimax-m2.7", name: "MiniMax M2.7", req: 3400, d: "300ms" }, { id: "qwen3.5-plus", name: "Qwen3.5 Plus", req: 10200, d: "360ms" }, diff --git a/packages/console/app/src/routes/workspace/[id]/go/lite-section.tsx b/packages/console/app/src/routes/workspace/[id]/go/lite-section.tsx index c894ace104..abd417e06b 100644 --- a/packages/console/app/src/routes/workspace/[id]/go/lite-section.tsx +++ b/packages/console/app/src/routes/workspace/[id]/go/lite-section.tsx @@ -289,8 +289,10 @@ export function LiteSection() {
  • Kimi K2.6
  • GLM-5
  • GLM-5.1
  • -
  • Mimo-V2-Pro
  • -
  • Mimo-V2-Omni
  • +
  • MiMo-V2-Pro
  • +
  • MiMo-V2-Omni
  • +
  • MiMo-V2.5-Pro
  • +
  • MiMo-V2.5
  • MiniMax M2.5
  • MiniMax M2.7
  • Qwen3.5 Plus
  • diff --git a/packages/web/src/content/docs/ar/go.mdx b/packages/web/src/content/docs/ar/go.mdx index 179999af86..008c83468d 100644 --- a/packages/web/src/content/docs/ar/go.mdx +++ b/packages/web/src/content/docs/ar/go.mdx @@ -59,6 +59,8 @@ OpenCode Go حاليًا في المرحلة التجريبية. - **Kimi K2.6** - **MiMo-V2-Pro** - **MiMo-V2-Omni** +- **MiMo-V2.5-Pro** +- **MiMo-V2.5** - **MiniMax M2.5** - **Qwen3.5 Plus** - **Qwen3.6 Plus** @@ -86,8 +88,10 @@ OpenCode Go حاليًا في المرحلة التجريبية. | GLM-5 | 1,150 | 2,880 | 5,750 | | Kimi K2.5 | 1,850 | 4,630 | 9,250 | | Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | | Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | | MiniMax M2.7 | 3,400 | 8,500 | 17,000 | | MiniMax M2.5 | 6,300 | 15,900 | 31,800 | @@ -102,6 +106,8 @@ OpenCode Go حاليًا في المرحلة التجريبية. - Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request - MiMo-V2-Pro — ‏350 input، و41,000 cached، و250 output tokens لكل طلب - MiMo-V2-Omni — ‏1000 input، و60,000 cached، و140 output tokens لكل طلب +- MiMo-V2.5-Pro — ‏350 input، و41,000 cached، و250 output tokens لكل طلب +- MiMo-V2.5 — ‏1000 input، و60,000 cached، و140 output tokens لكل طلب يمكنك تتبّع استخدامك الحالي في **console**. @@ -131,6 +137,8 @@ OpenCode Go حاليًا في المرحلة التجريبية. | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | diff --git a/packages/web/src/content/docs/bs/go.mdx b/packages/web/src/content/docs/bs/go.mdx index b94a78ee75..ece9c0ca52 100644 --- a/packages/web/src/content/docs/bs/go.mdx +++ b/packages/web/src/content/docs/bs/go.mdx @@ -69,6 +69,8 @@ Trenutna lista modela uključuje: - **Kimi K2.6** - **MiMo-V2-Pro** - **MiMo-V2-Omni** +- **MiMo-V2.5-Pro** +- **MiMo-V2.5** - **MiniMax M2.5** - **Qwen3.5 Plus** - **Qwen3.6 Plus** @@ -96,8 +98,10 @@ Tabela ispod pruža procijenjeni broj zahtjeva na osnovu tipičnih obrazaca kori | GLM-5 | 1,150 | 2,880 | 5,750 | | Kimi K2.5 | 1,850 | 4,630 | 9,250 | | Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | | Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | | MiniMax M2.7 | 3,400 | 8,500 | 17,000 | | MiniMax M2.5 | 6,300 | 15,900 | 31,800 | @@ -112,6 +116,8 @@ Procjene se zasnivaju na zapaženim prosječnim obrascima zahtjeva: - Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request - MiMo-V2-Pro — 350 ulaznih, 41,000 keširanih, 250 izlaznih tokena po zahtjevu - MiMo-V2-Omni — 1000 ulaznih, 60,000 keširanih, 140 izlaznih tokena po zahtjevu +- MiMo-V2.5-Pro — 350 ulaznih, 41,000 keširanih, 250 izlaznih tokena po zahtjevu +- MiMo-V2.5 — 1000 ulaznih, 60,000 keširanih, 140 izlaznih tokena po zahtjevu Svoju trenutnu potrošnju možete pratiti u **konzoli**. @@ -143,6 +149,8 @@ Također možete pristupiti Go modelima putem sljedećih API endpointa. | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | diff --git a/packages/web/src/content/docs/da/go.mdx b/packages/web/src/content/docs/da/go.mdx index 0ef5f12226..437e807ecf 100644 --- a/packages/web/src/content/docs/da/go.mdx +++ b/packages/web/src/content/docs/da/go.mdx @@ -69,6 +69,8 @@ Den nuværende liste over modeller inkluderer: - **Kimi K2.6** - **MiMo-V2-Pro** - **MiMo-V2-Omni** +- **MiMo-V2.5-Pro** +- **MiMo-V2.5** - **MiniMax M2.5** - **Qwen3.5 Plus** - **Qwen3.6 Plus** @@ -96,8 +98,10 @@ Tabellen nedenfor giver et estimeret antal anmodninger baseret på typiske Go-fo | GLM-5 | 1,150 | 2,880 | 5,750 | | Kimi K2.5 | 1,850 | 4,630 | 9,250 | | Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | | Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | | MiniMax M2.7 | 3,400 | 8,500 | 17,000 | | MiniMax M2.5 | 6,300 | 15,900 | 31,800 | @@ -112,6 +116,8 @@ Estimaterne er baseret på observerede gennemsnitlige anmodningsmønstre: - Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request - MiMo-V2-Pro — 350 input, 41.000 cachelagrede, 250 output-tokens pr. anmodning - MiMo-V2-Omni — 1000 input, 60.000 cachelagrede, 140 output-tokens pr. anmodning +- MiMo-V2.5-Pro — 350 input, 41.000 cachelagrede, 250 output-tokens pr. anmodning +- MiMo-V2.5 — 1000 input, 60.000 cachelagrede, 140 output-tokens pr. anmodning Du kan spore dit nuværende forbrug i **konsollen**. @@ -143,6 +149,8 @@ Du kan også få adgang til Go-modeller gennem følgende API-endpoints. | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | diff --git a/packages/web/src/content/docs/de/go.mdx b/packages/web/src/content/docs/de/go.mdx index 269f6231ee..8f60af1e9f 100644 --- a/packages/web/src/content/docs/de/go.mdx +++ b/packages/web/src/content/docs/de/go.mdx @@ -61,6 +61,8 @@ Die aktuelle Liste der Modelle umfasst: - **Kimi K2.6** - **MiMo-V2-Pro** - **MiMo-V2-Omni** +- **MiMo-V2.5-Pro** +- **MiMo-V2.5** - **MiniMax M2.5** - **Qwen3.5 Plus** - **Qwen3.6 Plus** @@ -88,8 +90,10 @@ Die folgende Tabelle zeigt eine geschätzte Anzahl von Anfragen basierend auf ty | GLM-5 | 1,150 | 2,880 | 5,750 | | Kimi K2.5 | 1,850 | 4,630 | 9,250 | | Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | | Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | | MiniMax M2.7 | 3,400 | 8,500 | 17,000 | | MiniMax M2.5 | 6,300 | 15,900 | 31,800 | @@ -104,6 +108,8 @@ Die Schätzungen basieren auf beobachteten durchschnittlichen Anfragemustern: - Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request - MiMo-V2-Pro — 350 Input-, 41.000 Cached-, 250 Output-Tokens pro Anfrage - MiMo-V2-Omni — 1.000 Input-, 60.000 Cached-, 140 Output-Tokens pro Anfrage +- MiMo-V2.5-Pro — 350 Input-, 41.000 Cached-, 250 Output-Tokens pro Anfrage +- MiMo-V2.5 — 1.000 Input-, 60.000 Cached-, 140 Output-Tokens pro Anfrage Du kannst deine aktuelle Nutzung in der **Console** verfolgen. @@ -133,6 +139,8 @@ Du kannst auf die Go-Modelle auch über die folgenden API-Endpunkte zugreifen. | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | diff --git a/packages/web/src/content/docs/es/go.mdx b/packages/web/src/content/docs/es/go.mdx index e70e3dd1f9..1d4cf709c4 100644 --- a/packages/web/src/content/docs/es/go.mdx +++ b/packages/web/src/content/docs/es/go.mdx @@ -69,6 +69,8 @@ La lista actual de modelos incluye: - **Kimi K2.6** - **MiMo-V2-Pro** - **MiMo-V2-Omni** +- **MiMo-V2.5-Pro** +- **MiMo-V2.5** - **MiniMax M2.5** - **Qwen3.5 Plus** - **Qwen3.6 Plus** @@ -96,8 +98,10 @@ La siguiente tabla proporciona una cantidad estimada de peticiones basada en los | GLM-5 | 1,150 | 2,880 | 5,750 | | Kimi K2.5 | 1,850 | 4,630 | 9,250 | | Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | | Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | | MiniMax M2.7 | 3,400 | 8,500 | 17,000 | | MiniMax M2.5 | 6,300 | 15,900 | 31,800 | @@ -112,6 +116,8 @@ Las estimaciones se basan en los patrones de peticiones promedio observados: - Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request - MiMo-V2-Pro — 350 tokens de entrada, 41,000 en caché, 250 tokens de salida por petición - MiMo-V2-Omni — 1000 tokens de entrada, 60,000 en caché, 140 tokens de salida por petición +- MiMo-V2.5-Pro — 350 tokens de entrada, 41,000 en caché, 250 tokens de salida por petición +- MiMo-V2.5 — 1000 tokens de entrada, 60,000 en caché, 140 tokens de salida por petición Puedes realizar un seguimiento de tu uso actual en la **consola**. @@ -143,6 +149,8 @@ También puedes acceder a los modelos de Go a través de los siguientes endpoint | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | diff --git a/packages/web/src/content/docs/fr/go.mdx b/packages/web/src/content/docs/fr/go.mdx index 5527d9d865..b288ff1bf9 100644 --- a/packages/web/src/content/docs/fr/go.mdx +++ b/packages/web/src/content/docs/fr/go.mdx @@ -59,6 +59,8 @@ La liste actuelle des modèles comprend : - **Kimi K2.6** - **MiMo-V2-Pro** - **MiMo-V2-Omni** +- **MiMo-V2.5-Pro** +- **MiMo-V2.5** - **MiniMax M2.5** - **Qwen3.5 Plus** - **Qwen3.6 Plus** @@ -86,8 +88,10 @@ Le tableau ci-dessous fournit une estimation du nombre de requêtes basée sur d | GLM-5 | 1,150 | 2,880 | 5,750 | | Kimi K2.5 | 1,850 | 4,630 | 9,250 | | Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | | Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | | MiniMax M2.7 | 3,400 | 8,500 | 17,000 | | MiniMax M2.5 | 6,300 | 15,900 | 31,800 | @@ -102,6 +106,8 @@ Les estimations sont basées sur les modèles de requêtes moyens observés : - Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request - MiMo-V2-Pro — 350 tokens en entrée, 41,000 en cache, 250 tokens en sortie par requête - MiMo-V2-Omni — 1000 tokens en entrée, 60,000 en cache, 140 tokens en sortie par requête +- MiMo-V2.5-Pro — 350 tokens en entrée, 41,000 en cache, 250 tokens en sortie par requête +- MiMo-V2.5 — 1000 tokens en entrée, 60,000 en cache, 140 tokens en sortie par requête Vous pouvez suivre votre utilisation actuelle dans la **console**. @@ -131,6 +137,8 @@ Vous pouvez également accéder aux modèles Go via les points de terminaison d' | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | diff --git a/packages/web/src/content/docs/go.mdx b/packages/web/src/content/docs/go.mdx index a39b6f7d24..cd3e2c8445 100644 --- a/packages/web/src/content/docs/go.mdx +++ b/packages/web/src/content/docs/go.mdx @@ -69,6 +69,8 @@ The current list of models includes: - **Kimi K2.6** - **MiMo-V2-Pro** - **MiMo-V2-Omni** +- **MiMo-V2.5-Pro** +- **MiMo-V2.5** - **MiniMax M2.5** - **MiniMax M2.7** - **Qwen3.5 Plus** @@ -96,8 +98,10 @@ The table below provides an estimated request count based on typical Go usage pa | GLM-5 | 1,150 | 2,880 | 5,750 | | Kimi K2.5 | 1,850 | 4,630 | 9,250 | | Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | | Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | | MiniMax M2.7 | 3,400 | 8,500 | 17,000 | | MiniMax M2.5 | 6,300 | 15,900 | 31,800 | @@ -110,6 +114,8 @@ Estimates are based on observed average request patterns: - MiniMax M2.7/M2.5 — 300 input, 55,000 cached, 125 output tokens per request - MiMo-V2-Pro — 350 input, 41,000 cached, 250 output tokens per request - MiMo-V2-Omni — 1000 input, 60,000 cached, 140 output tokens per request +- MiMo-V2.5-Pro — 350 input, 41,000 cached, 250 output tokens per request +- MiMo-V2.5 — 1000 input, 60,000 cached, 140 output tokens per request - Qwen3.5 Plus — 410 input, 47,000 cached, 140 output tokens per request - Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request @@ -143,6 +149,8 @@ You can also access Go models through the following API endpoints. | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | diff --git a/packages/web/src/content/docs/it/go.mdx b/packages/web/src/content/docs/it/go.mdx index 6cdf7ac6cf..9cf04d77d6 100644 --- a/packages/web/src/content/docs/it/go.mdx +++ b/packages/web/src/content/docs/it/go.mdx @@ -67,6 +67,8 @@ L'elenco attuale dei modelli include: - **Kimi K2.6** - **MiMo-V2-Pro** - **MiMo-V2-Omni** +- **MiMo-V2.5-Pro** +- **MiMo-V2.5** - **MiniMax M2.5** - **Qwen3.5 Plus** - **Qwen3.6 Plus** @@ -94,8 +96,10 @@ La tabella seguente fornisce una stima del conteggio delle richieste in base a p | GLM-5 | 1,150 | 2,880 | 5,750 | | Kimi K2.5 | 1,850 | 4,630 | 9,250 | | Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | | Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | | MiniMax M2.7 | 3,400 | 8,500 | 17,000 | | MiniMax M2.5 | 6,300 | 15,900 | 31,800 | @@ -110,6 +114,8 @@ Le stime si basano sui pattern medi di richieste osservati: - Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request - MiMo-V2-Pro — 350 di input, 41.000 in cache, 250 token di output per richiesta - MiMo-V2-Omni — 1000 di input, 60.000 in cache, 140 token di output per richiesta +- MiMo-V2.5-Pro — 350 di input, 41.000 in cache, 250 token di output per richiesta +- MiMo-V2.5 — 1000 di input, 60.000 in cache, 140 token di output per richiesta Puoi monitorare il tuo utilizzo attuale nella **console**. @@ -141,6 +147,8 @@ Puoi anche accedere ai modelli Go tramite i seguenti endpoint API. | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | diff --git a/packages/web/src/content/docs/ja/go.mdx b/packages/web/src/content/docs/ja/go.mdx index f122d2367b..40c1dbf36e 100644 --- a/packages/web/src/content/docs/ja/go.mdx +++ b/packages/web/src/content/docs/ja/go.mdx @@ -59,6 +59,8 @@ OpenCode Goをサブスクライブできるのは、1つのワークスペー - **Kimi K2.6** - **MiMo-V2-Pro** - **MiMo-V2-Omni** +- **MiMo-V2.5-Pro** +- **MiMo-V2.5** - **MiniMax M2.5** - **Qwen3.5 Plus** - **Qwen3.6 Plus** @@ -86,8 +88,10 @@ OpenCode Goには以下の制限が含まれています: | GLM-5 | 1,150 | 2,880 | 5,750 | | Kimi K2.5 | 1,850 | 4,630 | 9,250 | | Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | | Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | | MiniMax M2.7 | 3,400 | 8,500 | 17,000 | | MiniMax M2.5 | 6,300 | 15,900 | 31,800 | @@ -102,6 +106,8 @@ OpenCode Goには以下の制限が含まれています: - Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request - MiMo-V2-Pro — リクエストあたり 入力 350トークン、キャッシュ 41,000トークン、出力 250トークン - MiMo-V2-Omni — リクエストあたり 入力 1000トークン、キャッシュ 60,000トークン、出力 140トークン +- MiMo-V2.5-Pro — リクエストあたり 入力 350トークン、キャッシュ 41,000トークン、出力 250トークン +- MiMo-V2.5 — リクエストあたり 入力 1000トークン、キャッシュ 60,000トークン、出力 140トークン 現在の利用状況は**コンソール**で追跡できます。 @@ -131,6 +137,8 @@ Zen残高にクレジットがある場合は、コンソールで**Use balance* | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | diff --git a/packages/web/src/content/docs/ko/go.mdx b/packages/web/src/content/docs/ko/go.mdx index cd0b1b8da2..4b3dbd27d0 100644 --- a/packages/web/src/content/docs/ko/go.mdx +++ b/packages/web/src/content/docs/ko/go.mdx @@ -59,6 +59,8 @@ workspace당 한 명의 멤버만 OpenCode Go를 구독할 수 있습니다. - **Kimi K2.6** - **MiMo-V2-Pro** - **MiMo-V2-Omni** +- **MiMo-V2.5-Pro** +- **MiMo-V2.5** - **MiniMax M2.5** - **Qwen3.5 Plus** - **Qwen3.6 Plus** @@ -86,8 +88,10 @@ OpenCode Go에는 다음과 같은 한도가 포함됩니다. | GLM-5 | 1,150 | 2,880 | 5,750 | | Kimi K2.5 | 1,850 | 4,630 | 9,250 | | Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | | Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | | MiniMax M2.7 | 3,400 | 8,500 | 17,000 | | MiniMax M2.5 | 6,300 | 15,900 | 31,800 | @@ -102,6 +106,8 @@ OpenCode Go에는 다음과 같은 한도가 포함됩니다. - Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request - MiMo-V2-Pro — 요청당 입력 350, 캐시 41,000, 출력 토큰 250 - MiMo-V2-Omni — 요청당 입력 1000, 캐시 60,000, 출력 토큰 140 +- MiMo-V2.5-Pro — 요청당 입력 350, 캐시 41,000, 출력 토큰 250 +- MiMo-V2.5 — 요청당 입력 1000, 캐시 60,000, 출력 토큰 140 현재 사용량은 **console**에서 확인할 수 있습니다. @@ -131,6 +137,8 @@ Zen 잔액에 크레딧도 있다면, console에서 **Use balance** 옵션을 | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | diff --git a/packages/web/src/content/docs/nb/go.mdx b/packages/web/src/content/docs/nb/go.mdx index 776cc0c92d..f062683e87 100644 --- a/packages/web/src/content/docs/nb/go.mdx +++ b/packages/web/src/content/docs/nb/go.mdx @@ -69,6 +69,8 @@ Den nåværende listen over modeller inkluderer: - **Kimi K2.6** - **MiMo-V2-Pro** - **MiMo-V2-Omni** +- **MiMo-V2.5-Pro** +- **MiMo-V2.5** - **MiniMax M2.5** - **Qwen3.5 Plus** - **Qwen3.6 Plus** @@ -96,8 +98,10 @@ Tabellen nedenfor gir et estimert antall forespørsler basert på typiske bruksm | GLM-5 | 1,150 | 2,880 | 5,750 | | Kimi K2.5 | 1,850 | 4,630 | 9,250 | | Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | | Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | | MiniMax M2.7 | 3,400 | 8,500 | 17,000 | | MiniMax M2.5 | 6,300 | 15,900 | 31,800 | @@ -112,6 +116,8 @@ Estimatene er basert på observerte gjennomsnittlige forespørselsmønstre: - Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request - MiMo-V2-Pro — 350 input, 41 000 bufret, 250 output-tokens per forespørsel - MiMo-V2-Omni — 1000 input, 60 000 bufret, 140 output-tokens per forespørsel +- MiMo-V2.5-Pro — 350 input, 41 000 bufret, 250 output-tokens per forespørsel +- MiMo-V2.5 — 1000 input, 60 000 bufret, 140 output-tokens per forespørsel Du kan spore din nåværende bruk i **konsollen**. @@ -143,6 +149,8 @@ Du kan også få tilgang til Go-modeller gjennom følgende API-endepunkter. | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | diff --git a/packages/web/src/content/docs/pl/go.mdx b/packages/web/src/content/docs/pl/go.mdx index d99f5e0986..12abb29bc1 100644 --- a/packages/web/src/content/docs/pl/go.mdx +++ b/packages/web/src/content/docs/pl/go.mdx @@ -63,6 +63,8 @@ Obecna lista modeli obejmuje: - **Kimi K2.6** - **MiMo-V2-Pro** - **MiMo-V2-Omni** +- **MiMo-V2.5-Pro** +- **MiMo-V2.5** - **MiniMax M2.5** - **Qwen3.5 Plus** - **Qwen3.6 Plus** @@ -90,8 +92,10 @@ Poniższa tabela przedstawia szacunkową liczbę żądań na podstawie typowych | GLM-5 | 1,150 | 2,880 | 5,750 | | Kimi K2.5 | 1,850 | 4,630 | 9,250 | | Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | | Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | | MiniMax M2.7 | 3,400 | 8,500 | 17,000 | | MiniMax M2.5 | 6,300 | 15,900 | 31,800 | @@ -106,6 +110,8 @@ Szacunki opierają się na zaobserwowanych średnich wzorcach żądań: - Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request - MiMo-V2-Pro — 350 tokenów wejściowych, 41 000 w pamięci podręcznej, 250 tokenów wyjściowych na żądanie - MiMo-V2-Omni — 1000 tokenów wejściowych, 60 000 w pamięci podręcznej, 140 tokenów wyjściowych na żądanie +- MiMo-V2.5-Pro — 350 tokenów wejściowych, 41 000 w pamięci podręcznej, 250 tokenów wyjściowych na żądanie +- MiMo-V2.5 — 1000 tokenów wejściowych, 60 000 w pamięci podręcznej, 140 tokenów wyjściowych na żądanie Możesz śledzić swoje bieżące zużycie w **konsoli**. @@ -135,6 +141,8 @@ Możesz również uzyskać dostęp do modeli Go za pośrednictwem następującyc | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | diff --git a/packages/web/src/content/docs/pt-br/go.mdx b/packages/web/src/content/docs/pt-br/go.mdx index 631038298a..b41cb0d0e7 100644 --- a/packages/web/src/content/docs/pt-br/go.mdx +++ b/packages/web/src/content/docs/pt-br/go.mdx @@ -69,6 +69,8 @@ A lista atual de modelos inclui: - **Kimi K2.6** - **MiMo-V2-Pro** - **MiMo-V2-Omni** +- **MiMo-V2.5-Pro** +- **MiMo-V2.5** - **MiniMax M2.5** - **Qwen3.5 Plus** - **Qwen3.6 Plus** @@ -96,8 +98,10 @@ A tabela abaixo fornece uma contagem estimada de requisições com base nos padr | GLM-5 | 1,150 | 2,880 | 5,750 | | Kimi K2.5 | 1,850 | 4,630 | 9,250 | | Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | | Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | | MiniMax M2.7 | 3,400 | 8,500 | 17,000 | | MiniMax M2.5 | 6,300 | 15,900 | 31,800 | @@ -112,6 +116,8 @@ As estimativas baseiam-se nos padrões médios de requisições observados: - Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request - MiMo-V2-Pro — 350 tokens de entrada, 41.000 em cache, 250 tokens de saída por requisição - MiMo-V2-Omni — 1000 tokens de entrada, 60.000 em cache, 140 tokens de saída por requisição +- MiMo-V2.5-Pro — 350 tokens de entrada, 41.000 em cache, 250 tokens de saída por requisição +- MiMo-V2.5 — 1000 tokens de entrada, 60.000 em cache, 140 tokens de saída por requisição Você pode acompanhar o seu uso atual no **console**. @@ -143,6 +149,8 @@ Você também pode acessar os modelos do Go através dos seguintes endpoints de | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | diff --git a/packages/web/src/content/docs/ru/go.mdx b/packages/web/src/content/docs/ru/go.mdx index 60f01c2b53..62ce99ef9e 100644 --- a/packages/web/src/content/docs/ru/go.mdx +++ b/packages/web/src/content/docs/ru/go.mdx @@ -69,6 +69,8 @@ OpenCode Go работает так же, как и любой другой пр - **Kimi K2.6** - **MiMo-V2-Pro** - **MiMo-V2-Omni** +- **MiMo-V2.5-Pro** +- **MiMo-V2.5** - **MiniMax M2.5** - **Qwen3.5 Plus** - **Qwen3.6 Plus** @@ -96,8 +98,10 @@ OpenCode Go включает следующие лимиты: | GLM-5 | 1,150 | 2,880 | 5,750 | | Kimi K2.5 | 1,850 | 4,630 | 9,250 | | Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | | Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | | MiniMax M2.7 | 3,400 | 8,500 | 17,000 | | MiniMax M2.5 | 6,300 | 15,900 | 31,800 | @@ -112,6 +116,8 @@ OpenCode Go включает следующие лимиты: - Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request - MiMo-V2-Pro — 350 входных, 41,000 кешированных, 250 выходных токенов на запрос - MiMo-V2-Omni — 1000 входных, 60,000 кешированных, 140 выходных токенов на запрос +- MiMo-V2.5-Pro — 350 входных, 41,000 кешированных, 250 выходных токенов на запрос +- MiMo-V2.5 — 1000 входных, 60,000 кешированных, 140 выходных токенов на запрос Вы можете отслеживать текущее использование в **консоли**. @@ -143,6 +149,8 @@ OpenCode Go включает следующие лимиты: | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | diff --git a/packages/web/src/content/docs/th/go.mdx b/packages/web/src/content/docs/th/go.mdx index 3af1eadc9f..84ce3546c1 100644 --- a/packages/web/src/content/docs/th/go.mdx +++ b/packages/web/src/content/docs/th/go.mdx @@ -59,6 +59,8 @@ OpenCode Go ทำงานเหมือนกับผู้ให้บร - **Kimi K2.6** - **MiMo-V2-Pro** - **MiMo-V2-Omni** +- **MiMo-V2.5-Pro** +- **MiMo-V2.5** - **MiniMax M2.5** - **Qwen3.5 Plus** - **Qwen3.6 Plus** @@ -86,8 +88,10 @@ OpenCode Go มีขีดจำกัดดังต่อไปนี้: | GLM-5 | 1,150 | 2,880 | 5,750 | | Kimi K2.5 | 1,850 | 4,630 | 9,250 | | Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | | Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | | MiniMax M2.7 | 3,400 | 8,500 | 17,000 | | MiniMax M2.5 | 6,300 | 15,900 | 31,800 | @@ -102,6 +106,8 @@ OpenCode Go มีขีดจำกัดดังต่อไปนี้: - Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request - MiMo-V2-Pro — 350 input, 41,000 cached, 250 output tokens ต่อ request - MiMo-V2-Omni — 1000 input, 60,000 cached, 140 output tokens ต่อ request +- MiMo-V2.5-Pro — 350 input, 41,000 cached, 250 output tokens ต่อ request +- MiMo-V2.5 — 1000 input, 60,000 cached, 140 output tokens ต่อ request คุณสามารถติดตามการใช้งานปัจจุบันของคุณได้ใน **console** @@ -131,6 +137,8 @@ OpenCode Go มีขีดจำกัดดังต่อไปนี้: | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | diff --git a/packages/web/src/content/docs/tr/go.mdx b/packages/web/src/content/docs/tr/go.mdx index e962c06807..edd102685b 100644 --- a/packages/web/src/content/docs/tr/go.mdx +++ b/packages/web/src/content/docs/tr/go.mdx @@ -59,6 +59,8 @@ Mevcut model listesi şunları içerir: - **Kimi K2.6** - **MiMo-V2-Pro** - **MiMo-V2-Omni** +- **MiMo-V2.5-Pro** +- **MiMo-V2.5** - **MiniMax M2.5** - **Qwen3.5 Plus** - **Qwen3.6 Plus** @@ -86,8 +88,10 @@ Aşağıdaki tablo, tipik Go kullanım modellerine dayalı tahmini bir istek say | GLM-5 | 1,150 | 2,880 | 5,750 | | Kimi K2.5 | 1,850 | 4,630 | 9,250 | | Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | | Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | | MiniMax M2.7 | 3,400 | 8,500 | 17,000 | | MiniMax M2.5 | 6,300 | 15,900 | 31,800 | @@ -102,6 +106,8 @@ Tahminler, gözlemlenen ortalama istek modellerine dayanmaktadır: - Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request - MiMo-V2-Pro — İstek başına 350 girdi, 41.000 önbelleğe alınmış, 250 çıktı token'ı - MiMo-V2-Omni — İstek başına 1000 girdi, 60.000 önbelleğe alınmış, 140 çıktı token'ı +- MiMo-V2.5-Pro — İstek başına 350 girdi, 41.000 önbelleğe alınmış, 250 çıktı token'ı +- MiMo-V2.5 — İstek başına 1000 girdi, 60.000 önbelleğe alınmış, 140 çıktı token'ı Mevcut kullanımınızı **konsoldan** takip edebilirsiniz. @@ -131,6 +137,8 @@ Go modellerine aşağıdaki API uç noktaları aracılığıyla da erişebilirsi | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | diff --git a/packages/web/src/content/docs/zh-cn/go.mdx b/packages/web/src/content/docs/zh-cn/go.mdx index ac3b5f9bf5..0c1cf98274 100644 --- a/packages/web/src/content/docs/zh-cn/go.mdx +++ b/packages/web/src/content/docs/zh-cn/go.mdx @@ -59,6 +59,8 @@ OpenCode Go 的工作方式与 OpenCode 中的其他提供商一样。 - **Kimi K2.6** - **MiMo-V2-Pro** - **MiMo-V2-Omni** +- **MiMo-V2.5-Pro** +- **MiMo-V2.5** - **MiniMax M2.5** - **Qwen3.5 Plus** - **Qwen3.6 Plus** @@ -86,8 +88,10 @@ OpenCode Go 包含以下限制: | GLM-5 | 1,150 | 2,880 | 5,750 | | Kimi K2.6 | 1,150 | 2,880 | 5,750 | | Kimi K2.5 | 1,850 | 4,630 | 9,250 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | | Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | | MiniMax M2.7 | 3,400 | 8,500 | 17,000 | | MiniMax M2.5 | 6,300 | 15,900 | 31,800 | @@ -99,6 +103,8 @@ OpenCode Go 包含以下限制: - Kimi K2.5/K2.6 — 每次请求 870 个输入 token,55,000 个缓存 token,200 个输出 token - MiMo-V2-Pro — 每次请求 350 个输入 token,41,000 个缓存 token,250 个输出 token - MiMo-V2-Omni — 每次请求 1000 个输入 token,60,000 个缓存 token,140 个输出 token +- MiMo-V2.5-Pro — 每次请求 350 个输入 token,41,000 个缓存 token,250 个输出 token +- MiMo-V2.5 — 每次请求 1000 个输入 token,60,000 个缓存 token,140 个输出 token - MiniMax M2.7/M2.5 — 每次请求 300 个输入 token,55,000 个缓存 token,125 个输出 token - Qwen3.5 Plus — 410 input, 47,000 cached, 140 output tokens per request - Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request @@ -131,6 +137,8 @@ OpenCode Go 包含以下限制: | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | diff --git a/packages/web/src/content/docs/zh-tw/go.mdx b/packages/web/src/content/docs/zh-tw/go.mdx index 0621a66940..c1016028ba 100644 --- a/packages/web/src/content/docs/zh-tw/go.mdx +++ b/packages/web/src/content/docs/zh-tw/go.mdx @@ -59,6 +59,8 @@ OpenCode Go 的運作方式與 OpenCode 中的任何其他供應商相同。 - **Kimi K2.6** - **MiMo-V2-Pro** - **MiMo-V2-Omni** +- **MiMo-V2.5-Pro** +- **MiMo-V2.5** - **MiniMax M2.5** - **Qwen3.5 Plus** - **Qwen3.6 Plus** @@ -86,8 +88,10 @@ OpenCode Go 包含以下限制: | GLM-5 | 1,150 | 2,880 | 5,750 | | Kimi K2.5 | 1,850 | 4,630 | 9,250 | | Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | | Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | | MiniMax M2.7 | 3,400 | 8,500 | 17,000 | | MiniMax M2.5 | 6,300 | 15,900 | 31,800 | @@ -102,6 +106,8 @@ OpenCode Go 包含以下限制: - Qwen3.6 Plus — 500 input, 57,000 cached, 190 output tokens per request - MiMo-V2-Pro — 每次請求 350 個輸入 token、41,000 個快取 token、250 個輸出 token - MiMo-V2-Omni — 每次請求 1000 個輸入 token、60,000 個快取 token、140 個輸出 token +- MiMo-V2.5-Pro — 每次請求 350 個輸入 token、41,000 個快取 token、250 個輸出 token +- MiMo-V2.5 — 每次請求 1000 個輸入 token、60,000 個快取 token、140 個輸出 token 您可以在 **console** 中追蹤您目前的使用量。 @@ -131,6 +137,8 @@ OpenCode Go 包含以下限制: | Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | | Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | From 69b7f3b8db82c3ab9dacd72d715a57d375de18e4 Mon Sep 17 00:00:00 2001 From: "opencode-agent[bot]" Date: Wed, 22 Apr 2026 16:29:44 +0000 Subject: [PATCH 68/73] chore: generate --- packages/web/src/content/docs/ar/go.mdx | 54 +++++++++++----------- packages/web/src/content/docs/bs/go.mdx | 54 +++++++++++----------- packages/web/src/content/docs/da/go.mdx | 54 +++++++++++----------- packages/web/src/content/docs/de/go.mdx | 54 +++++++++++----------- packages/web/src/content/docs/es/go.mdx | 54 +++++++++++----------- packages/web/src/content/docs/fr/go.mdx | 54 +++++++++++----------- packages/web/src/content/docs/go.mdx | 54 +++++++++++----------- packages/web/src/content/docs/it/go.mdx | 54 +++++++++++----------- packages/web/src/content/docs/ja/go.mdx | 54 +++++++++++----------- packages/web/src/content/docs/ko/go.mdx | 54 +++++++++++----------- packages/web/src/content/docs/nb/go.mdx | 54 +++++++++++----------- packages/web/src/content/docs/pl/go.mdx | 54 +++++++++++----------- packages/web/src/content/docs/pt-br/go.mdx | 54 +++++++++++----------- packages/web/src/content/docs/ru/go.mdx | 54 +++++++++++----------- packages/web/src/content/docs/th/go.mdx | 54 +++++++++++----------- packages/web/src/content/docs/tr/go.mdx | 54 +++++++++++----------- packages/web/src/content/docs/zh-cn/go.mdx | 54 +++++++++++----------- packages/web/src/content/docs/zh-tw/go.mdx | 54 +++++++++++----------- 18 files changed, 486 insertions(+), 486 deletions(-) diff --git a/packages/web/src/content/docs/ar/go.mdx b/packages/web/src/content/docs/ar/go.mdx index 008c83468d..785ea35b66 100644 --- a/packages/web/src/content/docs/ar/go.mdx +++ b/packages/web/src/content/docs/ar/go.mdx @@ -82,20 +82,20 @@ OpenCode Go حاليًا في المرحلة التجريبية. يوضح الجدول أدناه عددًا تقديريًا للطلبات بناءً على أنماط استخدام Go المعتادة: -| Model | الطلبات لكل 5 ساعات | الطلبات في الأسبوع | الطلبات في الشهر | -| ------------ | ------------------- | ------------------ | ---------------- | -| GLM-5.1 | 880 | 2,150 | 4,300 | -| GLM-5 | 1,150 | 2,880 | 5,750 | -| Kimi K2.5 | 1,850 | 4,630 | 9,250 | -| Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | -| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | -| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | -| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | -| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | -| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | +| Model | الطلبات لكل 5 ساعات | الطلبات في الأسبوع | الطلبات في الشهر | +| ------------- | ------------------- | ------------------ | ---------------- | +| GLM-5.1 | 880 | 2,150 | 4,300 | +| GLM-5 | 1,150 | 2,880 | 5,750 | +| Kimi K2.5 | 1,850 | 4,630 | 9,250 | +| Kimi K2.6 | 1,150 | 2,880 | 5,750 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | +| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | +| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | +| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | +| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | تستند التقديرات إلى متوسطات أنماط الطلبات المرصودة: @@ -129,20 +129,20 @@ OpenCode Go حاليًا في المرحلة التجريبية. يمكنك أيضًا الوصول إلى نماذج Go عبر نقاط نهاية API التالية. -| Model | Model ID | Endpoint | AI SDK Package | -| ------------ | ------------ | ------------------------------------------------ | --------------------------- | -| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Model | Model ID | Endpoint | AI SDK Package | +| ------------- | ------------- | ------------------------------------------------ | --------------------------- | +| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | -| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | يستخدم [model id](/docs/config/#models) في إعدادات OpenCode لديك التنسيق `opencode-go/`. على سبيل المثال، بالنسبة إلى Kimi K2.6، ستستخدم `opencode-go/kimi-k2.6` في إعداداتك. diff --git a/packages/web/src/content/docs/bs/go.mdx b/packages/web/src/content/docs/bs/go.mdx index ece9c0ca52..523f1ef8ed 100644 --- a/packages/web/src/content/docs/bs/go.mdx +++ b/packages/web/src/content/docs/bs/go.mdx @@ -92,20 +92,20 @@ Ograničenja su definisana u dolarskoj vrijednosti. To znači da vaš stvarni br Tabela ispod pruža procijenjeni broj zahtjeva na osnovu tipičnih obrazaca korištenja Go pretplate: -| Model | zahtjeva na 5 sati | zahtjeva sedmično | zahtjeva mjesečno | -| ------------ | ------------------ | ----------------- | ----------------- | -| GLM-5.1 | 880 | 2,150 | 4,300 | -| GLM-5 | 1,150 | 2,880 | 5,750 | -| Kimi K2.5 | 1,850 | 4,630 | 9,250 | -| Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | -| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | -| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | -| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | -| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | -| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | +| Model | zahtjeva na 5 sati | zahtjeva sedmično | zahtjeva mjesečno | +| ------------- | ------------------ | ----------------- | ----------------- | +| GLM-5.1 | 880 | 2,150 | 4,300 | +| GLM-5 | 1,150 | 2,880 | 5,750 | +| Kimi K2.5 | 1,850 | 4,630 | 9,250 | +| Kimi K2.6 | 1,150 | 2,880 | 5,750 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | +| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | +| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | +| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | +| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | Procjene se zasnivaju na zapaženim prosječnim obrascima zahtjeva: @@ -141,20 +141,20 @@ nakon što dostignete ograničenja upotrebe umjesto blokiranja zahtjeva. Također možete pristupiti Go modelima putem sljedećih API endpointa. -| Model | Model ID | Endpoint | AI SDK Paket | -| ------------ | ------------ | ------------------------------------------------ | --------------------------- | -| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Model | Model ID | Endpoint | AI SDK Paket | +| ------------- | ------------- | ------------------------------------------------ | --------------------------- | +| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | -| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | [Model id](/docs/config/#models) u vašoj OpenCode konfiguraciji koristi format `opencode-go/`. Na primjer, za Kimi K2.6, koristili biste diff --git a/packages/web/src/content/docs/da/go.mdx b/packages/web/src/content/docs/da/go.mdx index 437e807ecf..86a834b984 100644 --- a/packages/web/src/content/docs/da/go.mdx +++ b/packages/web/src/content/docs/da/go.mdx @@ -92,20 +92,20 @@ Grænserne er defineret i dollarværdi. Det betyder, at dit faktiske antal anmod Tabellen nedenfor giver et estimeret antal anmodninger baseret på typiske Go-forbrugsmønstre: -| Model | anmodninger pr. 5 timer | anmodninger pr. uge | anmodninger pr. måned | -| ------------ | ----------------------- | ------------------- | --------------------- | -| GLM-5.1 | 880 | 2,150 | 4,300 | -| GLM-5 | 1,150 | 2,880 | 5,750 | -| Kimi K2.5 | 1,850 | 4,630 | 9,250 | -| Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | -| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | -| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | -| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | -| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | -| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | +| Model | anmodninger pr. 5 timer | anmodninger pr. uge | anmodninger pr. måned | +| ------------- | ----------------------- | ------------------- | --------------------- | +| GLM-5.1 | 880 | 2,150 | 4,300 | +| GLM-5 | 1,150 | 2,880 | 5,750 | +| Kimi K2.5 | 1,850 | 4,630 | 9,250 | +| Kimi K2.6 | 1,150 | 2,880 | 5,750 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | +| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | +| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | +| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | +| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | Estimaterne er baseret på observerede gennemsnitlige anmodningsmønstre: @@ -141,20 +141,20 @@ når du har nået dine forbrugsgrænser, i stedet for at blokere anmodninger. Du kan også få adgang til Go-modeller gennem følgende API-endpoints. -| Model | Model ID | Endpoint | AI SDK Package | -| ------------ | ------------ | ------------------------------------------------ | --------------------------- | -| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Model | Model ID | Endpoint | AI SDK Package | +| ------------- | ------------- | ------------------------------------------------ | --------------------------- | +| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | -| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | Dit [model id](/docs/config/#models) i din OpenCode config bruger formatet `opencode-go/`. For eksempel for Kimi K2.6, vil du diff --git a/packages/web/src/content/docs/de/go.mdx b/packages/web/src/content/docs/de/go.mdx index 8f60af1e9f..49c0efda58 100644 --- a/packages/web/src/content/docs/de/go.mdx +++ b/packages/web/src/content/docs/de/go.mdx @@ -84,20 +84,20 @@ Limits sind in Dollarwerten definiert. Das bedeutet, dass die tatsächliche Anza Die folgende Tabelle zeigt eine geschätzte Anzahl von Anfragen basierend auf typischen Go-Nutzungsmustern: -| Model | Anfragen pro 5 Stunden | Anfragen pro Woche | Anfragen pro Monat | -| ------------ | ---------------------- | ------------------ | ------------------ | -| GLM-5.1 | 880 | 2,150 | 4,300 | -| GLM-5 | 1,150 | 2,880 | 5,750 | -| Kimi K2.5 | 1,850 | 4,630 | 9,250 | -| Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | -| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | -| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | -| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | -| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | -| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | +| Model | Anfragen pro 5 Stunden | Anfragen pro Woche | Anfragen pro Monat | +| ------------- | ---------------------- | ------------------ | ------------------ | +| GLM-5.1 | 880 | 2,150 | 4,300 | +| GLM-5 | 1,150 | 2,880 | 5,750 | +| Kimi K2.5 | 1,850 | 4,630 | 9,250 | +| Kimi K2.6 | 1,150 | 2,880 | 5,750 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | +| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | +| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | +| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | +| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | Die Schätzungen basieren auf beobachteten durchschnittlichen Anfragemustern: @@ -131,20 +131,20 @@ Wenn du auch Guthaben auf deinem Zen-Konto hast, kannst du in der Console die Op Du kannst auf die Go-Modelle auch über die folgenden API-Endpunkte zugreifen. -| Modell | Modell-ID | Endpunkt | AI SDK Package | -| ------------ | ------------ | ------------------------------------------------ | --------------------------- | -| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Modell | Modell-ID | Endpunkt | AI SDK Package | +| ------------- | ------------- | ------------------------------------------------ | --------------------------- | +| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | -| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | Die [Modell-ID](/docs/config/#models) in deiner OpenCode Config verwendet das Format `opencode-go/`. Für Kimi K2.6 würdest du beispielsweise `opencode-go/kimi-k2.6` in deiner Config verwenden. diff --git a/packages/web/src/content/docs/es/go.mdx b/packages/web/src/content/docs/es/go.mdx index 1d4cf709c4..a541171caf 100644 --- a/packages/web/src/content/docs/es/go.mdx +++ b/packages/web/src/content/docs/es/go.mdx @@ -92,20 +92,20 @@ Los límites se definen en valor en dólares. Esto significa que tu cantidad rea La siguiente tabla proporciona una cantidad estimada de peticiones basada en los patrones típicos de uso de Go: -| Model | peticiones por 5 horas | peticiones por semana | peticiones por mes | -| ------------ | ---------------------- | --------------------- | ------------------ | -| GLM-5.1 | 880 | 2,150 | 4,300 | -| GLM-5 | 1,150 | 2,880 | 5,750 | -| Kimi K2.5 | 1,850 | 4,630 | 9,250 | -| Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | -| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | -| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | -| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | -| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | -| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | +| Model | peticiones por 5 horas | peticiones por semana | peticiones por mes | +| ------------- | ---------------------- | --------------------- | ------------------ | +| GLM-5.1 | 880 | 2,150 | 4,300 | +| GLM-5 | 1,150 | 2,880 | 5,750 | +| Kimi K2.5 | 1,850 | 4,630 | 9,250 | +| Kimi K2.6 | 1,150 | 2,880 | 5,750 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | +| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | +| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | +| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | +| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | Las estimaciones se basan en los patrones de peticiones promedio observados: @@ -141,20 +141,20 @@ después de que hayas alcanzado tus límites de uso en lugar de bloquear las pet También puedes acceder a los modelos de Go a través de los siguientes endpoints de la API. -| Modelo | ID del modelo | Endpoint | Paquete de AI SDK | -| ------------ | ------------- | ------------------------------------------------ | --------------------------- | -| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Modelo | ID del modelo | Endpoint | Paquete de AI SDK | +| ------------- | ------------- | ------------------------------------------------ | --------------------------- | +| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | -| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | El [ID del modelo](/docs/config/#models) en tu configuración de OpenCode usa el formato `opencode-go/`. Por ejemplo, para Kimi K2.6, usarías diff --git a/packages/web/src/content/docs/fr/go.mdx b/packages/web/src/content/docs/fr/go.mdx index b288ff1bf9..5f55128ed4 100644 --- a/packages/web/src/content/docs/fr/go.mdx +++ b/packages/web/src/content/docs/fr/go.mdx @@ -82,20 +82,20 @@ Les limites sont définies en valeur monétaire (dollars). Cela signifie que vot Le tableau ci-dessous fournit une estimation du nombre de requêtes basée sur des modèles d'utilisation typiques de Go : -| Model | requêtes par 5 heures | requêtes par semaine | requêtes par mois | -| ------------ | --------------------- | -------------------- | ----------------- | -| GLM-5.1 | 880 | 2,150 | 4,300 | -| GLM-5 | 1,150 | 2,880 | 5,750 | -| Kimi K2.5 | 1,850 | 4,630 | 9,250 | -| Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | -| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | -| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | -| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | -| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | -| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | +| Model | requêtes par 5 heures | requêtes par semaine | requêtes par mois | +| ------------- | --------------------- | -------------------- | ----------------- | +| GLM-5.1 | 880 | 2,150 | 4,300 | +| GLM-5 | 1,150 | 2,880 | 5,750 | +| Kimi K2.5 | 1,850 | 4,630 | 9,250 | +| Kimi K2.6 | 1,150 | 2,880 | 5,750 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | +| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | +| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | +| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | +| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | Les estimations sont basées sur les modèles de requêtes moyens observés : @@ -129,20 +129,20 @@ Si vous avez également des crédits sur votre solde Zen, vous pouvez activer l' Vous pouvez également accéder aux modèles Go via les points de terminaison d'API suivants. -| Modèle | ID de modèle | Point de terminaison | Package AI SDK | -| ------------ | ------------ | ------------------------------------------------ | --------------------------- | -| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Modèle | ID de modèle | Point de terminaison | Package AI SDK | +| ------------- | ------------- | ------------------------------------------------ | --------------------------- | +| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | -| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | L'[ID de modèle](/docs/config/#models) dans votre configuration OpenCode utilise le format `opencode-go/`. Par exemple, pour Kimi K2.6, vous utiliseriez `opencode-go/kimi-k2.6` dans votre configuration. diff --git a/packages/web/src/content/docs/go.mdx b/packages/web/src/content/docs/go.mdx index cd3e2c8445..946c70de30 100644 --- a/packages/web/src/content/docs/go.mdx +++ b/packages/web/src/content/docs/go.mdx @@ -92,20 +92,20 @@ Limits are defined in dollar value. This means your actual request count depends The table below provides an estimated request count based on typical Go usage patterns: -| Model | requests per 5 hour | requests per week | requests per month | -| ------------ | ------------------- | ----------------- | ------------------ | -| GLM-5.1 | 880 | 2,150 | 4,300 | -| GLM-5 | 1,150 | 2,880 | 5,750 | -| Kimi K2.5 | 1,850 | 4,630 | 9,250 | -| Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | -| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | -| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | -| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | -| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | -| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | +| Model | requests per 5 hour | requests per week | requests per month | +| ------------- | ------------------- | ----------------- | ------------------ | +| GLM-5.1 | 880 | 2,150 | 4,300 | +| GLM-5 | 1,150 | 2,880 | 5,750 | +| Kimi K2.5 | 1,850 | 4,630 | 9,250 | +| Kimi K2.6 | 1,150 | 2,880 | 5,750 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | +| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | +| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | +| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | +| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | Estimates are based on observed average request patterns: @@ -141,20 +141,20 @@ after you've reached your usage limits instead of blocking requests. You can also access Go models through the following API endpoints. -| Model | Model ID | Endpoint | AI SDK Package | -| ------------ | ------------ | ------------------------------------------------ | --------------------------- | -| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Model | Model ID | Endpoint | AI SDK Package | +| ------------- | ------------- | ------------------------------------------------ | --------------------------- | +| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | -| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | The [model id](/docs/config/#models) in your OpenCode config uses the format `opencode-go/`. For example, for Kimi K2.6, you would diff --git a/packages/web/src/content/docs/it/go.mdx b/packages/web/src/content/docs/it/go.mdx index 9cf04d77d6..341a22c4cb 100644 --- a/packages/web/src/content/docs/it/go.mdx +++ b/packages/web/src/content/docs/it/go.mdx @@ -90,20 +90,20 @@ I limiti sono definiti in valore in dollari. Questo significa che il conteggio e La tabella seguente fornisce una stima del conteggio delle richieste in base a pattern di utilizzo tipici di Go: -| Model | richieste ogni 5 ore | richieste a settimana | richieste al mese | -| ------------ | -------------------- | --------------------- | ----------------- | -| GLM-5.1 | 880 | 2,150 | 4,300 | -| GLM-5 | 1,150 | 2,880 | 5,750 | -| Kimi K2.5 | 1,850 | 4,630 | 9,250 | -| Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | -| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | -| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | -| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | -| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | -| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | +| Model | richieste ogni 5 ore | richieste a settimana | richieste al mese | +| ------------- | -------------------- | --------------------- | ----------------- | +| GLM-5.1 | 880 | 2,150 | 4,300 | +| GLM-5 | 1,150 | 2,880 | 5,750 | +| Kimi K2.5 | 1,850 | 4,630 | 9,250 | +| Kimi K2.6 | 1,150 | 2,880 | 5,750 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | +| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | +| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | +| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | +| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | Le stime si basano sui pattern medi di richieste osservati: @@ -139,20 +139,20 @@ dopo che avrai raggiunto i limiti di utilizzo invece di bloccare le richieste. Puoi anche accedere ai modelli Go tramite i seguenti endpoint API. -| Modello | ID Modello | Endpoint | Pacchetto AI SDK | -| ------------ | ------------ | ------------------------------------------------ | --------------------------- | -| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Modello | ID Modello | Endpoint | Pacchetto AI SDK | +| ------------- | ------------- | ------------------------------------------------ | --------------------------- | +| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | -| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | Il [model id](/docs/config/#models) nella tua OpenCode config utilizza il formato `opencode-go/`. Ad esempio, per Kimi K2.6, useresti diff --git a/packages/web/src/content/docs/ja/go.mdx b/packages/web/src/content/docs/ja/go.mdx index 40c1dbf36e..ddd5a66803 100644 --- a/packages/web/src/content/docs/ja/go.mdx +++ b/packages/web/src/content/docs/ja/go.mdx @@ -82,20 +82,20 @@ OpenCode Goには以下の制限が含まれています: 以下の表は、一般的なGoの利用パターンに基づいた推定リクエスト数を示しています: -| Model | 5時間あたりのリクエスト数 | 週間リクエスト数 | 月間リクエスト数 | -| ------------ | ------------------------- | ---------------- | ---------------- | -| GLM-5.1 | 880 | 2,150 | 4,300 | -| GLM-5 | 1,150 | 2,880 | 5,750 | -| Kimi K2.5 | 1,850 | 4,630 | 9,250 | -| Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | -| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | -| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | -| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | -| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | -| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | +| Model | 5時間あたりのリクエスト数 | 週間リクエスト数 | 月間リクエスト数 | +| ------------- | ------------------------- | ---------------- | ---------------- | +| GLM-5.1 | 880 | 2,150 | 4,300 | +| GLM-5 | 1,150 | 2,880 | 5,750 | +| Kimi K2.5 | 1,850 | 4,630 | 9,250 | +| Kimi K2.6 | 1,150 | 2,880 | 5,750 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | +| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | +| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | +| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | +| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | 推定値は、観測された平均的なリクエストパターンに基づいています: @@ -129,20 +129,20 @@ Zen残高にクレジットがある場合は、コンソールで**Use balance* 以下のAPIエンドポイントを通じて、Goモデルにアクセスすることもできます。 -| Model | Model ID | Endpoint | AI SDK Package | -| ------------ | ------------ | ------------------------------------------------ | --------------------------- | -| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Model | Model ID | Endpoint | AI SDK Package | +| ------------- | ------------- | ------------------------------------------------ | --------------------------- | +| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | -| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | OpenCode設定の[model id](/docs/config/#models)は、`opencode-go/`という形式を使用します。たとえば、Kimi K2.6の場合は、設定で`opencode-go/kimi-k2.6`を使用します。 diff --git a/packages/web/src/content/docs/ko/go.mdx b/packages/web/src/content/docs/ko/go.mdx index 4b3dbd27d0..da787040fb 100644 --- a/packages/web/src/content/docs/ko/go.mdx +++ b/packages/web/src/content/docs/ko/go.mdx @@ -82,20 +82,20 @@ OpenCode Go에는 다음과 같은 한도가 포함됩니다. 아래 표는 일반적인 Go 사용 패턴을 기준으로 한 예상 요청 횟수를 보여줍니다. -| Model | 5시간당 요청 횟수 | 주간 요청 횟수 | 월간 요청 횟수 | -| ------------ | ----------------- | -------------- | -------------- | -| GLM-5.1 | 880 | 2,150 | 4,300 | -| GLM-5 | 1,150 | 2,880 | 5,750 | -| Kimi K2.5 | 1,850 | 4,630 | 9,250 | -| Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | -| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | -| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | -| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | -| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | -| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | +| Model | 5시간당 요청 횟수 | 주간 요청 횟수 | 월간 요청 횟수 | +| ------------- | ----------------- | -------------- | -------------- | +| GLM-5.1 | 880 | 2,150 | 4,300 | +| GLM-5 | 1,150 | 2,880 | 5,750 | +| Kimi K2.5 | 1,850 | 4,630 | 9,250 | +| Kimi K2.6 | 1,150 | 2,880 | 5,750 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | +| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | +| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | +| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | +| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | 예상치는 관찰된 평균 요청 패턴을 기준으로 합니다. @@ -129,20 +129,20 @@ Zen 잔액에 크레딧도 있다면, console에서 **Use balance** 옵션을 다음 API 엔드포인트를 통해서도 Go 모델에 액세스할 수 있습니다. -| 모델 | 모델 ID | 엔드포인트 | AI SDK 패키지 | -| ------------ | ------------ | ------------------------------------------------ | --------------------------- | -| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| 모델 | 모델 ID | 엔드포인트 | AI SDK 패키지 | +| ------------- | ------------- | ------------------------------------------------ | --------------------------- | +| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | -| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | OpenCode config의 [model id](/docs/config/#models)는 `opencode-go/` 형식을 사용합니다. 예를 들어 Kimi K2.6의 경우 config에서 `opencode-go/kimi-k2.6`를 사용하면 됩니다. diff --git a/packages/web/src/content/docs/nb/go.mdx b/packages/web/src/content/docs/nb/go.mdx index f062683e87..95c05417cf 100644 --- a/packages/web/src/content/docs/nb/go.mdx +++ b/packages/web/src/content/docs/nb/go.mdx @@ -92,20 +92,20 @@ Grensene er definert i dollarverdi. Dette betyr at ditt faktiske antall forespø Tabellen nedenfor gir et estimert antall forespørsler basert på typiske bruksmønstre for Go: -| Model | forespørsler per 5 timer | forespørsler per uke | forespørsler per måned | -| ------------ | ------------------------ | -------------------- | ---------------------- | -| GLM-5.1 | 880 | 2,150 | 4,300 | -| GLM-5 | 1,150 | 2,880 | 5,750 | -| Kimi K2.5 | 1,850 | 4,630 | 9,250 | -| Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | -| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | -| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | -| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | -| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | -| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | +| Model | forespørsler per 5 timer | forespørsler per uke | forespørsler per måned | +| ------------- | ------------------------ | -------------------- | ---------------------- | +| GLM-5.1 | 880 | 2,150 | 4,300 | +| GLM-5 | 1,150 | 2,880 | 5,750 | +| Kimi K2.5 | 1,850 | 4,630 | 9,250 | +| Kimi K2.6 | 1,150 | 2,880 | 5,750 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | +| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | +| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | +| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | +| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | Estimatene er basert på observerte gjennomsnittlige forespørselsmønstre: @@ -141,20 +141,20 @@ etter at du har nådd bruksgrensene dine, i stedet for å blokkere forespørsler Du kan også få tilgang til Go-modeller gjennom følgende API-endepunkter. -| Modell | Modell-ID | Endepunkt | AI SDK Package | -| ------------ | ------------ | ------------------------------------------------ | --------------------------- | -| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Modell | Modell-ID | Endepunkt | AI SDK Package | +| ------------- | ------------- | ------------------------------------------------ | --------------------------- | +| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | -| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | [Modell-ID-en](/docs/config/#models) i din OpenCode-konfigurasjon bruker formatet `opencode-go/`. For eksempel, for Kimi K2.6, vil du diff --git a/packages/web/src/content/docs/pl/go.mdx b/packages/web/src/content/docs/pl/go.mdx index 12abb29bc1..9ae3ea34b8 100644 --- a/packages/web/src/content/docs/pl/go.mdx +++ b/packages/web/src/content/docs/pl/go.mdx @@ -86,20 +86,20 @@ Limity są zdefiniowane w wartości w dolarach. Oznacza to, że rzeczywista licz Poniższa tabela przedstawia szacunkową liczbę żądań na podstawie typowych wzorców korzystania z Go: -| Model | żądania na 5 godzin | żądania na tydzień | żądania na miesiąc | -| ------------ | ------------------- | ------------------ | ------------------ | -| GLM-5.1 | 880 | 2,150 | 4,300 | -| GLM-5 | 1,150 | 2,880 | 5,750 | -| Kimi K2.5 | 1,850 | 4,630 | 9,250 | -| Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | -| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | -| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | -| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | -| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | -| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | +| Model | żądania na 5 godzin | żądania na tydzień | żądania na miesiąc | +| ------------- | ------------------- | ------------------ | ------------------ | +| GLM-5.1 | 880 | 2,150 | 4,300 | +| GLM-5 | 1,150 | 2,880 | 5,750 | +| Kimi K2.5 | 1,850 | 4,630 | 9,250 | +| Kimi K2.6 | 1,150 | 2,880 | 5,750 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | +| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | +| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | +| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | +| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | Szacunki opierają się na zaobserwowanych średnich wzorcach żądań: @@ -133,20 +133,20 @@ Jeśli masz również środki na swoim saldzie Zen, możesz włączyć opcję ** Możesz również uzyskać dostęp do modeli Go za pośrednictwem następujących punktów końcowych API. -| Model | ID modelu | Punkt końcowy | Pakiet AI SDK | -| ------------ | ------------ | ------------------------------------------------ | --------------------------- | -| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Model | ID modelu | Punkt końcowy | Pakiet AI SDK | +| ------------- | ------------- | ------------------------------------------------ | --------------------------- | +| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | -| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | [ID modelu](/docs/config/#models) w Twojej konfiguracji OpenCode używa formatu `opencode-go/`. Na przykład dla Kimi K2.6 należy użyć diff --git a/packages/web/src/content/docs/pt-br/go.mdx b/packages/web/src/content/docs/pt-br/go.mdx index b41cb0d0e7..7d4d90ed51 100644 --- a/packages/web/src/content/docs/pt-br/go.mdx +++ b/packages/web/src/content/docs/pt-br/go.mdx @@ -92,20 +92,20 @@ Os limites são definidos em valor em dólares. Isso significa que a sua contage A tabela abaixo fornece uma contagem estimada de requisições com base nos padrões típicos de uso do Go: -| Model | requisições por 5 horas | requisições por semana | requisições por mês | -| ------------ | ----------------------- | ---------------------- | ------------------- | -| GLM-5.1 | 880 | 2,150 | 4,300 | -| GLM-5 | 1,150 | 2,880 | 5,750 | -| Kimi K2.5 | 1,850 | 4,630 | 9,250 | -| Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | -| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | -| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | -| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | -| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | -| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | +| Model | requisições por 5 horas | requisições por semana | requisições por mês | +| ------------- | ----------------------- | ---------------------- | ------------------- | +| GLM-5.1 | 880 | 2,150 | 4,300 | +| GLM-5 | 1,150 | 2,880 | 5,750 | +| Kimi K2.5 | 1,850 | 4,630 | 9,250 | +| Kimi K2.6 | 1,150 | 2,880 | 5,750 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | +| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | +| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | +| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | +| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | As estimativas baseiam-se nos padrões médios de requisições observados: @@ -141,20 +141,20 @@ após você atingir os seus limites de uso em vez de bloquear as requisições. Você também pode acessar os modelos do Go através dos seguintes endpoints de API. -| Modelo | ID do Modelo | Endpoint | Pacote do AI SDK | -| ------------ | ------------ | ------------------------------------------------ | --------------------------- | -| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Modelo | ID do Modelo | Endpoint | Pacote do AI SDK | +| ------------- | ------------- | ------------------------------------------------ | --------------------------- | +| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | -| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | O [ID do modelo](/docs/config/#models) na sua configuração do OpenCode usa o formato `opencode-go/`. Por exemplo, para o Kimi K2.6, você usaria diff --git a/packages/web/src/content/docs/ru/go.mdx b/packages/web/src/content/docs/ru/go.mdx index 62ce99ef9e..a8d33f296d 100644 --- a/packages/web/src/content/docs/ru/go.mdx +++ b/packages/web/src/content/docs/ru/go.mdx @@ -92,20 +92,20 @@ OpenCode Go включает следующие лимиты: В таблице ниже приведено примерное количество запросов на основе типичных сценариев использования Go: -| Model | запросов за 5 часов | запросов в неделю | запросов в месяц | -| ------------ | ------------------- | ----------------- | ---------------- | -| GLM-5.1 | 880 | 2,150 | 4,300 | -| GLM-5 | 1,150 | 2,880 | 5,750 | -| Kimi K2.5 | 1,850 | 4,630 | 9,250 | -| Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | -| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | -| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | -| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | -| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | -| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | +| Model | запросов за 5 часов | запросов в неделю | запросов в месяц | +| ------------- | ------------------- | ----------------- | ---------------- | +| GLM-5.1 | 880 | 2,150 | 4,300 | +| GLM-5 | 1,150 | 2,880 | 5,750 | +| Kimi K2.5 | 1,850 | 4,630 | 9,250 | +| Kimi K2.6 | 1,150 | 2,880 | 5,750 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | +| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | +| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | +| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | +| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | Оценки основаны на наблюдаемых средних показателях запросов: @@ -141,20 +141,20 @@ OpenCode Go включает следующие лимиты: Вы также можете получить доступ к моделям Go через следующие API-эндпоинты. -| Модель | ID модели | Эндпоинт | Пакет AI SDK | -| ------------ | ------------ | ------------------------------------------------ | --------------------------- | -| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Модель | ID модели | Эндпоинт | Пакет AI SDK | +| ------------- | ------------- | ------------------------------------------------ | --------------------------- | +| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | -| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | [ID модели](/docs/config/#models) в вашем конфиге OpenCode использует формат `opencode-go/`. Например, для Kimi K2.6 вам нужно diff --git a/packages/web/src/content/docs/th/go.mdx b/packages/web/src/content/docs/th/go.mdx index 84ce3546c1..fb0262c958 100644 --- a/packages/web/src/content/docs/th/go.mdx +++ b/packages/web/src/content/docs/th/go.mdx @@ -82,20 +82,20 @@ OpenCode Go มีขีดจำกัดดังต่อไปนี้: ตารางด้านล่างแสดงจำนวน request โดยประมาณตามรูปแบบการใช้งานปกติของ Go: -| Model | requests ต่อ 5 ชั่วโมง | requests ต่อสัปดาห์ | requests ต่อเดือน | -| ------------ | ---------------------- | ------------------- | ----------------- | -| GLM-5.1 | 880 | 2,150 | 4,300 | -| GLM-5 | 1,150 | 2,880 | 5,750 | -| Kimi K2.5 | 1,850 | 4,630 | 9,250 | -| Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | -| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | -| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | -| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | -| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | -| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | +| Model | requests ต่อ 5 ชั่วโมง | requests ต่อสัปดาห์ | requests ต่อเดือน | +| ------------- | ---------------------- | ------------------- | ----------------- | +| GLM-5.1 | 880 | 2,150 | 4,300 | +| GLM-5 | 1,150 | 2,880 | 5,750 | +| Kimi K2.5 | 1,850 | 4,630 | 9,250 | +| Kimi K2.6 | 1,150 | 2,880 | 5,750 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | +| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | +| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | +| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | +| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | การประมาณการอ้างอิงจากรูปแบบการใช้งาน request โดยเฉลี่ยที่สังเกตพบ: @@ -129,20 +129,20 @@ OpenCode Go มีขีดจำกัดดังต่อไปนี้: คุณสามารถเข้าถึงโมเดลของ Go ผ่าน API endpoints ต่อไปนี้ได้เช่นกัน -| Model | Model ID | Endpoint | AI SDK Package | -| ------------ | ------------ | ------------------------------------------------ | --------------------------- | -| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Model | Model ID | Endpoint | AI SDK Package | +| ------------- | ------------- | ------------------------------------------------ | --------------------------- | +| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | -| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | [model id](/docs/config/#models) ใน OpenCode config ของคุณจะใช้รูปแบบ `opencode-go/` ตัวอย่างเช่น สำหรับ Kimi K2.6 คุณจะใช้ `opencode-go/kimi-k2.6` ใน config ของคุณ diff --git a/packages/web/src/content/docs/tr/go.mdx b/packages/web/src/content/docs/tr/go.mdx index edd102685b..96a1ca3e2f 100644 --- a/packages/web/src/content/docs/tr/go.mdx +++ b/packages/web/src/content/docs/tr/go.mdx @@ -82,20 +82,20 @@ Limitler dolar değeri üzerinden belirlenmiştir. Bu, gerçek istek sayınızı Aşağıdaki tablo, tipik Go kullanım modellerine dayalı tahmini bir istek sayısı sunmaktadır: -| Model | 5 saatte bir istek | haftalık istek | aylık istek | -| ------------ | ------------------ | -------------- | ----------- | -| GLM-5.1 | 880 | 2,150 | 4,300 | -| GLM-5 | 1,150 | 2,880 | 5,750 | -| Kimi K2.5 | 1,850 | 4,630 | 9,250 | -| Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | -| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | -| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | -| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | -| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | -| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | +| Model | 5 saatte bir istek | haftalık istek | aylık istek | +| ------------- | ------------------ | -------------- | ----------- | +| GLM-5.1 | 880 | 2,150 | 4,300 | +| GLM-5 | 1,150 | 2,880 | 5,750 | +| Kimi K2.5 | 1,850 | 4,630 | 9,250 | +| Kimi K2.6 | 1,150 | 2,880 | 5,750 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | +| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | +| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | +| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | +| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | Tahminler, gözlemlenen ortalama istek modellerine dayanmaktadır: @@ -129,20 +129,20 @@ Eğer Zen bakiyenizde kredileriniz varsa, konsoldan **Bakiye kullan (Use balance Go modellerine aşağıdaki API uç noktaları aracılığıyla da erişebilirsiniz. -| Model | Model ID | Uç Nokta | AI SDK Paketi | -| ------------ | ------------ | ------------------------------------------------ | --------------------------- | -| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Model | Model ID | Uç Nokta | AI SDK Paketi | +| ------------- | ------------- | ------------------------------------------------ | --------------------------- | +| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | -| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | OpenCode yapılandırmanızdaki [model id](/docs/config/#models) formatı `opencode-go/` şeklindedir. Örneğin, Kimi K2.6 için yapılandırmanızda `opencode-go/kimi-k2.6` kullanmalısınız. diff --git a/packages/web/src/content/docs/zh-cn/go.mdx b/packages/web/src/content/docs/zh-cn/go.mdx index 0c1cf98274..f52f5b572e 100644 --- a/packages/web/src/content/docs/zh-cn/go.mdx +++ b/packages/web/src/content/docs/zh-cn/go.mdx @@ -82,20 +82,20 @@ OpenCode Go 包含以下限制: 下表提供了基于典型 Go 使用模式的预估请求数: -| Model | 每 5 小时请求数 | 每周请求数 | 每月请求数 | -| ------------ | --------------- | ---------- | ---------- | -| GLM-5.1 | 880 | 2,150 | 4,300 | -| GLM-5 | 1,150 | 2,880 | 5,750 | -| Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| Kimi K2.5 | 1,850 | 4,630 | 9,250 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | -| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | -| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | -| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | -| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | -| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | +| Model | 每 5 小时请求数 | 每周请求数 | 每月请求数 | +| ------------- | --------------- | ---------- | ---------- | +| GLM-5.1 | 880 | 2,150 | 4,300 | +| GLM-5 | 1,150 | 2,880 | 5,750 | +| Kimi K2.6 | 1,150 | 2,880 | 5,750 | +| Kimi K2.5 | 1,850 | 4,630 | 9,250 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | +| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | +| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | +| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | +| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | 预估值基于观察到的平均请求模式: @@ -129,20 +129,20 @@ OpenCode Go 包含以下限制: 你也可以通过以下 API 端点访问 Go 模型。 -| 模型 | 模型 ID | 端点 | AI SDK 包 | -| ------------ | ------------ | ------------------------------------------------ | --------------------------- | -| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| 模型 | 模型 ID | 端点 | AI SDK 包 | +| ------------- | ------------- | ------------------------------------------------ | --------------------------- | +| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | -| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | 你 OpenCode 配置中的 [模型 ID](/docs/config/#models) 使用 `opencode-go/` 格式。例如,对于 Kimi K2.6,你将在配置中使用 `opencode-go/kimi-k2.6`。 diff --git a/packages/web/src/content/docs/zh-tw/go.mdx b/packages/web/src/content/docs/zh-tw/go.mdx index c1016028ba..481c08cec5 100644 --- a/packages/web/src/content/docs/zh-tw/go.mdx +++ b/packages/web/src/content/docs/zh-tw/go.mdx @@ -82,20 +82,20 @@ OpenCode Go 包含以下限制: 下表提供了基於典型 Go 使用模式的預估請求次數: -| Model | 每 5 小時請求數 | 每週請求數 | 每月請求數 | -| ------------ | --------------- | ---------- | ---------- | -| GLM-5.1 | 880 | 2,150 | 4,300 | -| GLM-5 | 1,150 | 2,880 | 5,750 | -| Kimi K2.5 | 1,850 | 4,630 | 9,250 | -| Kimi K2.6 | 1,150 | 2,880 | 5,750 | -| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | -| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | -| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | -| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | -| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | -| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | -| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | +| Model | 每 5 小時請求數 | 每週請求數 | 每月請求數 | +| ------------- | --------------- | ---------- | ---------- | +| GLM-5.1 | 880 | 2,150 | 4,300 | +| GLM-5 | 1,150 | 2,880 | 5,750 | +| Kimi K2.5 | 1,850 | 4,630 | 9,250 | +| Kimi K2.6 | 1,150 | 2,880 | 5,750 | +| MiMo-V2-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2-Omni | 2,150 | 5,450 | 10,900 | +| MiMo-V2.5-Pro | 1,290 | 3,225 | 6,450 | +| MiMo-V2.5 | 2,150 | 5,450 | 10,900 | +| Qwen3.6 Plus | 3,300 | 8,200 | 16,300 | +| MiniMax M2.7 | 3,400 | 8,500 | 17,000 | +| MiniMax M2.5 | 6,300 | 15,900 | 31,800 | +| Qwen3.5 Plus | 10,200 | 25,200 | 50,500 | 預估值是基於觀察到的平均請求模式: @@ -129,20 +129,20 @@ OpenCode Go 包含以下限制: 您也可以透過以下 API 端點存取 Go 模型。 -| 模型 | 模型 ID | 端點 | AI SDK 套件 | -| ------------ | ------------ | ------------------------------------------------ | --------------------------- | -| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| 模型 | 模型 ID | 端點 | AI SDK 套件 | +| ------------- | ------------- | ------------------------------------------------ | --------------------------- | +| GLM-5.1 | glm-5.1 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| GLM-5 | glm-5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.5 | kimi-k2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| Kimi K2.6 | kimi-k2.6 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Pro | mimo-v2-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiMo-V2-Omni | mimo-v2-omni | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | | MiMo-V2.5-Pro | mimo-v2.5-pro | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | -| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | -| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | -| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| MiMo-V2.5 | mimo-v2.5 | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/openai-compatible` | +| MiniMax M2.7 | minimax-m2.7 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| MiniMax M2.5 | minimax-m2.5 | `https://opencode.ai/zen/go/v1/messages` | `@ai-sdk/anthropic` | +| Qwen3.6 Plus | qwen3.6-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | +| Qwen3.5 Plus | qwen3.5-plus | `https://opencode.ai/zen/go/v1/chat/completions` | `@ai-sdk/alibaba` | 您的 OpenCode 設定中的 [model id](/docs/config/#models) 使用 `opencode-go/` 格式。例如,Kimi K2.6 在設定中應使用 `opencode-go/kimi-k2.6`。 From 5cfd618faf13195a8d8cfdc2c33acae072c20014 Mon Sep 17 00:00:00 2001 From: Caleb Norton Date: Wed, 22 Apr 2026 17:47:59 -0500 Subject: [PATCH 69/73] chore: update nix bun version (#23881) --- flake.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flake.lock b/flake.lock index 805be8739b..1c8e62bd82 100644 --- a/flake.lock +++ b/flake.lock @@ -2,11 +2,11 @@ "nodes": { "nixpkgs": { "locked": { - "lastModified": 1773909469, - "narHash": "sha256-vglVrLfHjFIzIdV9A27Ugul6rh3I1qHbbitGW7dk420=", + "lastModified": 1776683584, + "narHash": "sha256-NuTLMrr10Tng72hurYG8jYQ4XKK8wnpJmOGcPiis96g=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "7149c06513f335be57f26fcbbbe34afda923882b", + "rev": "9dd5558b06dbdacbf635a3dd36dce1b1a7ee3a89", "type": "github" }, "original": { From 8cade05bc668a4889ddc359817eb50627cbd50cf Mon Sep 17 00:00:00 2001 From: Luke Parker <10430890+Hona@users.noreply.github.com> Date: Thu, 23 Apr 2026 09:24:11 +1000 Subject: [PATCH 70/73] feat: support pull diagnostics in the LSP client (C#, Kotlin, etc) (#23771) --- packages/opencode/src/cli/cmd/debug/lsp.ts | 3 +- packages/opencode/src/lsp/client.ts | 574 +++++++++++++++--- packages/opencode/src/lsp/lsp.ts | 16 +- packages/opencode/src/lsp/server.ts | 2 +- packages/opencode/src/npm/index.ts | 15 +- packages/opencode/src/tool/apply_patch.ts | 2 +- packages/opencode/src/tool/edit.ts | 2 +- packages/opencode/src/tool/lsp.ts | 2 +- packages/opencode/src/tool/read.ts | 2 +- packages/opencode/src/tool/write.ts | 2 +- .../test/fixture/lsp/fake-lsp-server.js | 220 ++++++- packages/opencode/test/lsp/client.test.ts | 404 +++++++++++- 12 files changed, 1123 insertions(+), 121 deletions(-) diff --git a/packages/opencode/src/cli/cmd/debug/lsp.ts b/packages/opencode/src/cli/cmd/debug/lsp.ts index 185cab9c75..47db6358b6 100644 --- a/packages/opencode/src/cli/cmd/debug/lsp.ts +++ b/packages/opencode/src/cli/cmd/debug/lsp.ts @@ -23,8 +23,7 @@ const DiagnosticsCommand = cmd({ const out = await AppRuntime.runPromise( LSP.Service.use((lsp) => Effect.gen(function* () { - yield* lsp.touchFile(args.file, true) - yield* Effect.sleep(1000) + yield* lsp.touchFile(args.file, "full") return yield* lsp.diagnostics() }), ), diff --git a/packages/opencode/src/lsp/client.ts b/packages/opencode/src/lsp/client.ts index b20e8ae7f0..b0418ca3f5 100644 --- a/packages/opencode/src/lsp/client.ts +++ b/packages/opencode/src/lsp/client.ts @@ -14,6 +14,16 @@ import { withTimeout } from "../util/timeout" import { Filesystem } from "../util" const DIAGNOSTICS_DEBOUNCE_MS = 150 +const DIAGNOSTICS_DOCUMENT_WAIT_TIMEOUT_MS = 5_000 +const DIAGNOSTICS_FULL_WAIT_TIMEOUT_MS = 10_000 +const DIAGNOSTICS_REQUEST_TIMEOUT_MS = 3_000 + +const INITIALIZE_TIMEOUT_MS = 45_000 + +// LSP spec constants +const FILE_CHANGE_CREATED = 1 +const FILE_CHANGE_CHANGED = 2 +const TEXT_DOCUMENT_SYNC_INCREMENTAL = 2 const log = Log.create({ service: "lsp.client" }) @@ -38,48 +48,194 @@ export const Event = { ), } +type DocumentDiagnosticReport = { + items?: Diagnostic[] + relatedDocuments?: Record +} + +type WorkspaceDiagnosticReport = { + items?: { + uri?: string + items?: Diagnostic[] + }[] +} + +type DiagnosticRequestResult = { + handled: boolean + matched: boolean + byFile: Map +} + +type CapabilityRegistration = { + id: string + method: string + registerOptions?: { + identifier?: string + workspaceDiagnostics?: boolean + } +} + +type ServerCapabilities = { + textDocumentSync?: + | number + | { + change?: number + } + diagnosticProvider?: unknown + [key: string]: unknown +} + +function getFilePath(uri: string) { + if (!uri.startsWith("file://")) return + return Filesystem.normalizePath(fileURLToPath(uri)) +} + +function getSyncKind(capabilities?: ServerCapabilities) { + if (!capabilities) return + const sync = capabilities.textDocumentSync + if (typeof sync === "number") return sync + return sync?.change +} + +function endPosition(text: string) { + const lines = text.split(/\r\n|\r|\n/) + return { + line: lines.length - 1, + character: lines.at(-1)?.length ?? 0, + } +} + +function dedupeDiagnostics(items: Diagnostic[]) { + const seen = new Set() + return items.filter((item) => { + const key = JSON.stringify({ + code: item.code, + severity: item.severity, + message: item.message, + source: item.source, + range: item.range, + }) + if (seen.has(key)) return false + seen.add(key) + return true + }) +} + +function configurationValue(settings: unknown, section?: string) { + if (!section) return settings ?? null + const result = section.split(".").reduce((acc, key) => { + if (!acc || typeof acc !== "object" || !(key in acc)) return undefined + return (acc as Record)[key] + }, settings) + return result ?? null +} + +// TypeScript's built-in LSP pushes diagnostics aggressively on first open. +// We seed the push cache on the very first publish so waitForFreshPush can +// resolve immediately instead of waiting for a second debounced push. +function shouldSeedDiagnosticsOnFirstPush(serverID: string) { + return serverID === "typescript" +} + export async function create(input: { serverID: string; server: LSPServer.Handle; root: string; directory: string }) { - const l = log.clone().tag("serverID", input.serverID) - l.info("starting client") + const logger = log.clone().tag("serverID", input.serverID) + logger.info("starting client") const connection = createMessageConnection( new StreamMessageReader(input.server.process.stdout as any), new StreamMessageWriter(input.server.process.stdin as any), ) + // Server stderr can contain both real errors and routine informational logs, + // which is normal stderr practice for some tools. Keep the raw stream at + // debug so users can opt in with --print-logs --log-level DEBUG without + // polluting normal logs. + input.server.process.stderr?.on("data", (data: Buffer) => { + const text = data.toString().trim() + if (text) logger.debug("server stderr", { text: text.slice(0, 1000) }) + }) + + // --- Connection state --- + + const pushDiagnostics = new Map() + const pullDiagnostics = new Map() + const published = new Map() + const diagnosticRegistrations = new Map() + const registrationListeners = new Set<() => void>() + const mergedDiagnostics = (filePath: string) => + dedupeDiagnostics([...(pushDiagnostics.get(filePath) ?? []), ...(pullDiagnostics.get(filePath) ?? [])]) + const updatePushDiagnostics = (filePath: string, next: Diagnostic[]) => { + pushDiagnostics.set(filePath, next) + Bus.publish(Event.Diagnostics, { path: filePath, serverID: input.serverID }) + } + const updatePullDiagnostics = (filePath: string, next: Diagnostic[]) => { + pullDiagnostics.set(filePath, next) + } + const emitRegistrationChange = () => { + for (const listener of [...registrationListeners]) listener() + } + + // --- LSP connection handlers --- - const diagnostics = new Map() connection.onNotification("textDocument/publishDiagnostics", (params) => { - const filePath = Filesystem.normalizePath(fileURLToPath(params.uri)) - l.info("textDocument/publishDiagnostics", { + const filePath = getFilePath(params.uri) + if (!filePath) return + logger.info("textDocument/publishDiagnostics", { path: filePath, count: params.diagnostics.length, + version: params.version, }) - const exists = diagnostics.has(filePath) - diagnostics.set(filePath, params.diagnostics) - if (!exists && input.serverID === "typescript") return - Bus.publish(Event.Diagnostics, { path: filePath, serverID: input.serverID }) + published.set(filePath, { + at: Date.now(), + version: typeof params.version === "number" ? params.version : undefined, + }) + if (shouldSeedDiagnosticsOnFirstPush(input.serverID) && !pushDiagnostics.has(filePath)) { + pushDiagnostics.set(filePath, params.diagnostics) + return + } + updatePushDiagnostics(filePath, params.diagnostics) }) connection.onRequest("window/workDoneProgress/create", (params) => { - l.info("window/workDoneProgress/create", params) + logger.info("window/workDoneProgress/create", params) return null }) - connection.onRequest("workspace/configuration", async () => { - // Return server initialization options - return [input.server.initialization ?? {}] + connection.onRequest("workspace/configuration", async (params) => { + const items = (params as { items?: { section?: string }[] }).items ?? [] + return items.map((item) => configurationValue(input.server.initialization, item.section)) + }) + connection.onRequest("client/registerCapability", async (params) => { + const registrations = (params as { registrations?: CapabilityRegistration[] }).registrations ?? [] + let changed = false + for (const registration of registrations) { + if (registration.method !== "textDocument/diagnostic") continue + diagnosticRegistrations.set(registration.id, registration) + changed = true + } + if (changed) emitRegistrationChange() + }) + connection.onRequest("client/unregisterCapability", async (params) => { + const registrations = (params as { unregisterations?: { id: string; method: string }[] }).unregisterations ?? [] + let changed = false + for (const registration of registrations) { + if (registration.method !== "textDocument/diagnostic") continue + diagnosticRegistrations.delete(registration.id) + changed = true + } + if (changed) emitRegistrationChange() }) - connection.onRequest("client/registerCapability", async () => {}) - connection.onRequest("client/unregisterCapability", async () => {}) connection.onRequest("workspace/workspaceFolders", async () => [ { name: "workspace", uri: pathToFileURL(input.root).href, }, ]) + connection.onRequest("workspace/diagnostic/refresh", async () => null) connection.listen() - l.info("sending initialize") - await withTimeout( - connection.sendRequest("initialize", { + // --- Initialize handshake --- + + logger.info("sending initialize") + const initialized = await withTimeout( + connection.sendRequest<{ capabilities?: ServerCapabilities }>("initialize", { rootUri: pathToFileURL(input.root).href, processId: input.server.process.pid, workspaceFolders: [ @@ -100,21 +256,28 @@ export async function create(input: { serverID: string; server: LSPServer.Handle didChangeWatchedFiles: { dynamicRegistration: true, }, + diagnostics: { + refreshSupport: false, + }, }, textDocument: { synchronization: { didOpen: true, didChange: true, }, + diagnostic: { + dynamicRegistration: true, + relatedDocumentSupport: true, + }, publishDiagnostics: { - versionSupport: true, + versionSupport: false, }, }, }, }), - 45_000, + INITIALIZE_TIMEOUT_MS, ).catch((err) => { - l.error("initialize error", { error: err }) + logger.error("initialize error", { error: err }) throw new InitializeError( { serverID: input.serverID }, { @@ -123,6 +286,9 @@ export async function create(input: { serverID: string; server: LSPServer.Handle ) }) + const syncKind = getSyncKind(initialized.capabilities) + const hasStaticPullDiagnostics = Boolean(initialized.capabilities?.diagnosticProvider) + await connection.sendNotification("initialized", {}) if (input.server.initialization) { @@ -131,9 +297,271 @@ export async function create(input: { serverID: string; server: LSPServer.Handle }) } - const files: { - [path: string]: number - } = {} + const files: Record = {} + + // --- Diagnostic helpers --- + + const mergeResults = (filePath: string, results: DiagnosticRequestResult[]) => { + const handled = results.some((result) => result.handled) + const matched = results.some((result) => result.matched) + if (!handled) return { handled: false, matched: false } + + const merged = new Map() + for (const result of results) { + for (const [target, items] of result.byFile.entries()) { + const existing = merged.get(target) ?? [] + merged.set(target, existing.concat(items)) + } + } + + if (matched && !merged.has(filePath)) merged.set(filePath, []) + for (const [target, items] of merged.entries()) { + updatePullDiagnostics(target, dedupeDiagnostics(items)) + } + + return { handled, matched } + } + + async function requestDiagnosticReport(filePath: string, identifier?: string): Promise { + const report = await withTimeout( + connection.sendRequest("textDocument/diagnostic", { + ...(identifier ? { identifier } : {}), + textDocument: { + uri: pathToFileURL(filePath).href, + }, + }), + DIAGNOSTICS_REQUEST_TIMEOUT_MS, + ).catch(() => null) + if (!report) return { handled: false, matched: false, byFile: new Map() } + + const byFile = new Map() + const push = (target: string, items: Diagnostic[]) => { + const existing = byFile.get(target) ?? [] + byFile.set(target, existing.concat(items)) + } + + let handled = false + let matched = false + if (Array.isArray(report.items)) { + push(filePath, report.items) + handled = true + matched = true + } + for (const [uri, related] of Object.entries(report.relatedDocuments ?? {})) { + const relatedPath = getFilePath(uri) + if (!relatedPath || !Array.isArray(related.items)) continue + push(relatedPath, related.items) + handled = true + matched = matched || relatedPath === filePath + } + + return { handled, matched, byFile } + } + + async function requestWorkspaceDiagnosticReport(filePath: string, identifier?: string): Promise { + const report = await withTimeout( + connection.sendRequest("workspace/diagnostic", { + ...(identifier ? { identifier } : {}), + previousResultIds: [], + }), + DIAGNOSTICS_REQUEST_TIMEOUT_MS, + ).catch(() => null) + if (!report) return { handled: false, matched: false, byFile: new Map() } + + const byFile = new Map() + let matched = false + for (const item of report.items ?? []) { + const relatedPath = item.uri ? getFilePath(item.uri) : undefined + if (!relatedPath || !Array.isArray(item.items)) continue + const existing = byFile.get(relatedPath) ?? [] + byFile.set(relatedPath, existing.concat(item.items)) + matched = matched || relatedPath === filePath + } + + return { handled: true, matched, byFile } + } + + function documentPullState() { + const documentRegistrations = [...diagnosticRegistrations.values()].filter( + (registration) => registration.registerOptions?.workspaceDiagnostics !== true, + ) + return { + documentIdentifiers: [...new Set(documentRegistrations.flatMap((registration) => registration.registerOptions?.identifier ?? []))], + supported: hasStaticPullDiagnostics || documentRegistrations.length > 0, + } + } + + function workspacePullState() { + const workspaceRegistrations = [...diagnosticRegistrations.values()].filter( + (registration) => registration.registerOptions?.workspaceDiagnostics === true, + ) + return { + workspaceIdentifiers: [...new Set(workspaceRegistrations.flatMap((registration) => registration.registerOptions?.identifier ?? []))], + supported: workspaceRegistrations.length > 0, + } + } + + const hasCurrentFileDiagnostics = (filePath: string, results: DiagnosticRequestResult[]) => + results.some((result) => (result.byFile.get(filePath)?.length ?? 0) > 0) + + async function requestDiagnostics( + filePath: string, + requests: Promise[], + done: (results: DiagnosticRequestResult[]) => boolean, + ) { + if (!requests.length) return { handled: false, matched: false } + + const results: DiagnosticRequestResult[] = [] + return new Promise<{ handled: boolean; matched: boolean }>((resolve) => { + let pending = requests.length + let resolved = false + const finish = (merged: { handled: boolean; matched: boolean }, force = false) => { + if (resolved) return + if (!force && !done(results)) return + resolved = true + resolve(merged) + } + + for (const request of requests) { + request.then((result) => { + results.push(result) + pending -= 1 + const merged = mergeResults(filePath, results) + finish(merged) + if (pending === 0) finish(merged, true) + }) + } + }) + } + + // LATENCY-CRITICAL: dispatch identifier pulls in parallel and unblock once one + // batch already produced diagnostics for the current file. Let slower pulls keep + // merging in the background; do not sequence identifier-by-identifier, and do + // not add a post-match settle/debounce delay. See PR #23771. + async function requestDocumentDiagnostics(filePath: string) { + const state = documentPullState() + if (!state.supported) return { handled: false, matched: false } + return requestDiagnostics( + filePath, + [ + requestDiagnosticReport(filePath), + ...state.documentIdentifiers.map((identifier) => requestDiagnosticReport(filePath, identifier)), + ], + (results) => hasCurrentFileDiagnostics(filePath, results), + ) + } + + async function requestFullDiagnostics(filePath: string) { + const documentState = documentPullState() + const workspaceState = workspacePullState() + if (!documentState.supported && !workspaceState.supported) return { handled: false, matched: false } + return mergeResults( + filePath, + await Promise.all([ + ...(documentState.supported ? [requestDiagnosticReport(filePath)] : []), + ...documentState.documentIdentifiers.map((identifier) => requestDiagnosticReport(filePath, identifier)), + ...(workspaceState.supported ? [requestWorkspaceDiagnosticReport(filePath)] : []), + ...workspaceState.workspaceIdentifiers.map((identifier) => requestWorkspaceDiagnosticReport(filePath, identifier)), + ]), + ) + } + + function waitForRegistrationChange(timeout: number) { + if (timeout <= 0) return Promise.resolve(false) + return new Promise((resolve) => { + let finished = false + let timer: ReturnType | undefined + const finish = (result: boolean) => { + if (finished) return + finished = true + if (timer) clearTimeout(timer) + registrationListeners.delete(listener) + resolve(result) + } + const listener = () => finish(true) + registrationListeners.add(listener) + timer = setTimeout(() => finish(false), timeout) + }) + } + + function waitForFreshPush(request: { path: string; version: number; after: number; timeout: number }) { + if (request.timeout <= 0) return Promise.resolve(false) + return new Promise((resolve) => { + let finished = false + let debounceTimer: ReturnType | undefined + let timeoutTimer: ReturnType | undefined + let unsub: (() => void) | undefined + const finish = (result: boolean) => { + if (finished) return + finished = true + if (debounceTimer) clearTimeout(debounceTimer) + if (timeoutTimer) clearTimeout(timeoutTimer) + unsub?.() + resolve(result) + } + const schedule = () => { + const hit = published.get(request.path) + if (!hit) return + if (typeof hit.version === "number" && hit.version !== request.version) return + if (hit.at < request.after && hit.version !== request.version) return + if (debounceTimer) clearTimeout(debounceTimer) + debounceTimer = setTimeout(() => finish(true), Math.max(0, DIAGNOSTICS_DEBOUNCE_MS - (Date.now() - hit.at))) + } + + timeoutTimer = setTimeout(() => finish(false), request.timeout) + unsub = Bus.subscribe(Event.Diagnostics, (event) => { + if (event.properties.path !== request.path || event.properties.serverID !== input.serverID) return + schedule() + }) + schedule() + }) + } + + async function waitForDocumentDiagnostics(request: { path: string; version: number; after?: number }) { + const startedAt = request.after ?? Date.now() + const pushWait = waitForFreshPush({ + path: request.path, + version: request.version, + after: startedAt, + timeout: DIAGNOSTICS_DOCUMENT_WAIT_TIMEOUT_MS, + }) + + while (Date.now() - startedAt < DIAGNOSTICS_DOCUMENT_WAIT_TIMEOUT_MS) { + const result = await requestDocumentDiagnostics(request.path) + if (result.matched) return + const remaining = DIAGNOSTICS_DOCUMENT_WAIT_TIMEOUT_MS - (Date.now() - startedAt) + if (remaining <= 0) return + const next = await Promise.race([ + pushWait.then((ready) => (ready ? "push" : "timeout" as const)), + waitForRegistrationChange(remaining).then((changed) => (changed ? "registration" : "timeout" as const)), + ]) + if (next !== "registration") return + } + } + + async function waitForFullDiagnostics(request: { path: string; version: number; after?: number }) { + const startedAt = request.after ?? Date.now() + const pushWait = waitForFreshPush({ + path: request.path, + version: request.version, + after: startedAt, + timeout: DIAGNOSTICS_FULL_WAIT_TIMEOUT_MS, + }) + + while (Date.now() - startedAt < DIAGNOSTICS_FULL_WAIT_TIMEOUT_MS) { + const result = await requestFullDiagnostics(request.path) + if (result.handled || result.matched) return + const remaining = DIAGNOSTICS_FULL_WAIT_TIMEOUT_MS - (Date.now() - startedAt) + if (remaining <= 0) return + const next = await Promise.race([ + pushWait.then((ready) => (ready ? "push" : "timeout" as const)), + waitForRegistrationChange(remaining).then((changed) => (changed ? "registration" : "timeout" as const)), + ]) + if (next !== "registration") return + } + } + + // --- Public API --- const result = { root: input.root, @@ -145,26 +573,32 @@ export async function create(input: { serverID: string; server: LSPServer.Handle }, notify: { async open(request: { path: string }) { - request.path = path.isAbsolute(request.path) ? request.path : path.resolve(input.directory, request.path) + request.path = Filesystem.normalizePath( + path.isAbsolute(request.path) ? request.path : path.resolve(input.directory, request.path), + ) const text = await Filesystem.readText(request.path) const extension = path.extname(request.path) const languageId = LANGUAGE_EXTENSIONS[extension] ?? "plaintext" - const version = files[request.path] - if (version !== undefined) { - log.info("workspace/didChangeWatchedFiles", request) + const document = files[request.path] + if (document !== undefined) { + // Do not wipe diagnostics on didChange. Some servers (e.g. clangd) only + // re-emit diagnostics when the content actually changes, so clearing + // here would lose errors for no-op touchFile calls. Let the server's + // next push/pull overwrite naturally. + logger.info("workspace/didChangeWatchedFiles", request) await connection.sendNotification("workspace/didChangeWatchedFiles", { changes: [ { uri: pathToFileURL(request.path).href, - type: 2, // Changed + type: FILE_CHANGE_CHANGED, }, ], }) - const next = version + 1 - files[request.path] = next - log.info("textDocument/didChange", { + const next = document.version + 1 + files[request.path] = { version: next, text } + logger.info("textDocument/didChange", { path: request.path, version: next, }) @@ -173,23 +607,35 @@ export async function create(input: { serverID: string; server: LSPServer.Handle uri: pathToFileURL(request.path).href, version: next, }, - contentChanges: [{ text }], + contentChanges: + syncKind === TEXT_DOCUMENT_SYNC_INCREMENTAL + ? [ + { + range: { + start: { line: 0, character: 0 }, + end: endPosition(document.text), + }, + text, + }, + ] + : [{ text }], }) - return + return next } - log.info("workspace/didChangeWatchedFiles", request) + logger.info("workspace/didChangeWatchedFiles", request) await connection.sendNotification("workspace/didChangeWatchedFiles", { changes: [ { uri: pathToFileURL(request.path).href, - type: 1, // Created + type: FILE_CHANGE_CREATED, }, ], }) - log.info("textDocument/didOpen", request) - diagnostics.delete(request.path) + logger.info("textDocument/didOpen", request) + pushDiagnostics.delete(request.path) + pullDiagnostics.delete(request.path) await connection.sendNotification("textDocument/didOpen", { textDocument: { uri: pathToFileURL(request.path).href, @@ -198,52 +644,42 @@ export async function create(input: { serverID: string; server: LSPServer.Handle text, }, }) - files[request.path] = 0 - return + files[request.path] = { version: 0, text } + return 0 }, }, get diagnostics() { - return diagnostics + const result = new Map() + for (const key of new Set([...pushDiagnostics.keys(), ...pullDiagnostics.keys()])) { + result.set(key, mergedDiagnostics(key)) + } + return result }, - async waitForDiagnostics(request: { path: string }) { + async waitForDiagnostics(request: { path: string; version: number; mode?: "document" | "full"; after?: number }) { const normalizedPath = Filesystem.normalizePath( path.isAbsolute(request.path) ? request.path : path.resolve(input.directory, request.path), ) - log.info("waiting for diagnostics", { path: normalizedPath }) - let unsub: () => void - let debounceTimer: ReturnType | undefined - return await withTimeout( - new Promise((resolve) => { - unsub = Bus.subscribe(Event.Diagnostics, (event) => { - if (event.properties.path === normalizedPath && event.properties.serverID === result.serverID) { - // Debounce to allow LSP to send follow-up diagnostics (e.g., semantic after syntax) - if (debounceTimer) clearTimeout(debounceTimer) - debounceTimer = setTimeout(() => { - log.info("got diagnostics", { path: normalizedPath }) - unsub?.() - resolve() - }, DIAGNOSTICS_DEBOUNCE_MS) - } - }) - }), - 3000, - ) - .catch(() => {}) - .finally(() => { - if (debounceTimer) clearTimeout(debounceTimer) - unsub?.() - }) + logger.info("waiting for diagnostics", { + path: normalizedPath, + mode: request.mode ?? "full", + version: request.version, + }) + if (request.mode === "document") { + await waitForDocumentDiagnostics({ path: normalizedPath, version: request.version, after: request.after }) + return + } + await waitForFullDiagnostics({ path: normalizedPath, version: request.version, after: request.after }) }, async shutdown() { - l.info("shutting down") + logger.info("shutting down") connection.end() connection.dispose() await Process.stop(input.server.process) - l.info("shutdown") + logger.info("shutdown") }, } - l.info("initialized") + logger.info("initialized") return result } diff --git a/packages/opencode/src/lsp/lsp.ts b/packages/opencode/src/lsp/lsp.ts index 833285e7b5..4c46cd9aa7 100644 --- a/packages/opencode/src/lsp/lsp.ts +++ b/packages/opencode/src/lsp/lsp.ts @@ -136,7 +136,7 @@ export interface Interface { readonly init: () => Effect.Effect readonly status: () => Effect.Effect readonly hasClients: (file: string) => Effect.Effect - readonly touchFile: (input: string, waitForDiagnostics?: boolean) => Effect.Effect + readonly touchFile: (input: string, diagnostics?: "document" | "full") => Effect.Effect readonly diagnostics: () => Effect.Effect> readonly hover: (input: LocInput) => Effect.Effect readonly definition: (input: LocInput) => Effect.Effect @@ -358,15 +358,21 @@ export const layer = Layer.effect( }) }) - const touchFile = Effect.fn("LSP.touchFile")(function* (input: string, waitForDiagnostics?: boolean) { + const touchFile = Effect.fn("LSP.touchFile")(function* (input: string, diagnostics?: "document" | "full") { log.info("touching file", { file: input }) const clients = yield* getClients(input) yield* Effect.promise(() => Promise.all( clients.map(async (client) => { - const wait = waitForDiagnostics ? client.waitForDiagnostics({ path: input }) : Promise.resolve() - await client.notify.open({ path: input }) - return wait + const after = Date.now() + const version = await client.notify.open({ path: input }) + if (!diagnostics) return + return client.waitForDiagnostics({ + path: input, + version, + mode: diagnostics, + after, + }) }), ).catch((err) => { log.error("failed to touch file", { err, file: input }) diff --git a/packages/opencode/src/lsp/server.ts b/packages/opencode/src/lsp/server.ts index 8bb70a5116..a0cb8fe388 100644 --- a/packages/opencode/src/lsp/server.ts +++ b/packages/opencode/src/lsp/server.ts @@ -490,7 +490,7 @@ export const Pyright: Info = { const args = [] if (!binary) { if (Flag.OPENCODE_DISABLE_LSP_DOWNLOAD) return - const resolved = await Npm.which("pyright") + const resolved = await Npm.which("pyright", "pyright-langserver") if (!resolved) return binary = resolved } diff --git a/packages/opencode/src/npm/index.ts b/packages/opencode/src/npm/index.ts index 477e99e06a..fc8497d20b 100644 --- a/packages/opencode/src/npm/index.ts +++ b/packages/opencode/src/npm/index.ts @@ -34,7 +34,7 @@ export interface Interface { }, ) => Effect.Effect readonly outdated: (pkg: string, cachedVersion: string) => Effect.Effect - readonly which: (pkg: string) => Effect.Effect> + readonly which: (pkg: string, bin?: string) => Effect.Effect> } export class Service extends Context.Service()("@opencode/Npm") {} @@ -207,7 +207,7 @@ export const layer = Layer.effect( return }, Effect.scoped) - const which = Effect.fn("Npm.which")(function* (pkg: string) { + const which = Effect.fn("Npm.which")(function* (pkg: string, bin?: string) { const dir = directory(pkg) const binDir = path.join(dir, "node_modules", ".bin") @@ -215,6 +215,9 @@ export const layer = Layer.effect( const files = yield* fs.readDirectory(binDir).pipe(Effect.catch(() => Effect.succeed([] as string[]))) if (files.length === 0) return Option.none() + // Caller picked a specific bin (e.g. pyright exposes both `pyright` and + // `pyright-langserver`); trust the hint if the package provides it. + if (bin) return files.includes(bin) ? Option.some(bin) : Option.none() if (files.length === 1) return Option.some(files[0]) const pkgJson = yield* afs.readJson(path.join(dir, "node_modules", pkg, "package.json")).pipe(Effect.option) @@ -223,11 +226,11 @@ export const layer = Layer.effect( const parsed = pkgJson.value as { bin?: string | Record } if (parsed?.bin) { const unscoped = pkg.startsWith("@") ? pkg.split("/")[1] : pkg - const bin = parsed.bin - if (typeof bin === "string") return Option.some(unscoped) - const keys = Object.keys(bin) + const parsedBin = parsed.bin + if (typeof parsedBin === "string") return Option.some(unscoped) + const keys = Object.keys(parsedBin) if (keys.length === 1) return Option.some(keys[0]) - return bin[unscoped] ? Option.some(unscoped) : Option.some(keys[0]) + return parsedBin[unscoped] ? Option.some(unscoped) : Option.some(keys[0]) } } diff --git a/packages/opencode/src/tool/apply_patch.ts b/packages/opencode/src/tool/apply_patch.ts index a4cf1e853f..33112c43c5 100644 --- a/packages/opencode/src/tool/apply_patch.ts +++ b/packages/opencode/src/tool/apply_patch.ts @@ -258,7 +258,7 @@ export const ApplyPatchTool = Tool.define( for (const change of fileChanges) { if (change.type === "delete") continue const target = change.movePath ?? change.filePath - yield* lsp.touchFile(target, true) + yield* lsp.touchFile(target, "document") } const diagnostics = yield* lsp.diagnostics() diff --git a/packages/opencode/src/tool/edit.ts b/packages/opencode/src/tool/edit.ts index 858d14e043..35dd85b476 100644 --- a/packages/opencode/src/tool/edit.ts +++ b/packages/opencode/src/tool/edit.ts @@ -186,7 +186,7 @@ export const EditTool = Tool.define( }) let output = "Edit applied successfully." - yield* lsp.touchFile(filePath, true) + yield* lsp.touchFile(filePath, "document") const diagnostics = yield* lsp.diagnostics() const normalizedFilePath = AppFileSystem.normalizePath(filePath) const block = LSP.Diagnostic.report(filePath, diagnostics[normalizedFilePath] ?? []) diff --git a/packages/opencode/src/tool/lsp.ts b/packages/opencode/src/tool/lsp.ts index 263bfe81d2..0a0edc61ed 100644 --- a/packages/opencode/src/tool/lsp.ts +++ b/packages/opencode/src/tool/lsp.ts @@ -55,7 +55,7 @@ export const LspTool = Tool.define( const available = yield* lsp.hasClients(file) if (!available) throw new Error("No LSP server available for this file type.") - yield* lsp.touchFile(file, true) + yield* lsp.touchFile(file, "document") const result: unknown[] = yield* (() => { switch (args.operation) { diff --git a/packages/opencode/src/tool/read.ts b/packages/opencode/src/tool/read.ts index c9b3048626..a9b95346a1 100644 --- a/packages/opencode/src/tool/read.ts +++ b/packages/opencode/src/tool/read.ts @@ -75,7 +75,7 @@ export const ReadTool = Tool.define( }) const warm = Effect.fn("ReadTool.warm")(function* (filepath: string) { - yield* lsp.touchFile(filepath, false).pipe(Effect.ignore, Effect.forkIn(scope)) + yield* lsp.touchFile(filepath).pipe(Effect.ignore, Effect.forkIn(scope)) }) const readSample = Effect.fn("ReadTool.readSample")(function* ( diff --git a/packages/opencode/src/tool/write.ts b/packages/opencode/src/tool/write.ts index 79ed585198..80198f4555 100644 --- a/packages/opencode/src/tool/write.ts +++ b/packages/opencode/src/tool/write.ts @@ -67,7 +67,7 @@ export const WriteTool = Tool.define( }) let output = "Wrote file successfully." - yield* lsp.touchFile(filepath, true) + yield* lsp.touchFile(filepath, "document") const diagnostics = yield* lsp.diagnostics() const normalizedFilepath = AppFileSystem.normalizePath(filepath) let projectDiagnosticsCount = 0 diff --git a/packages/opencode/test/fixture/lsp/fake-lsp-server.js b/packages/opencode/test/fixture/lsp/fake-lsp-server.js index be62f96f38..e6818009e1 100644 --- a/packages/opencode/test/fixture/lsp/fake-lsp-server.js +++ b/packages/opencode/test/fixture/lsp/fake-lsp-server.js @@ -1,7 +1,23 @@ // Simple JSON-RPC 2.0 LSP-like fake server over stdio -// Implements a minimal LSP handshake and triggers a request upon notification let nextId = 1 +let readBuffer = Buffer.alloc(0) +let lastChange = null +let initializeParams = null +let diagnosticRequestCount = 0 +let registeredCapability = false +const pendingClientRequests = new Map() +let pullConfig = { + delayMs: 0, + registerOn: undefined, + registrations: [], + documentDiagnostics: [], + documentDiagnosticsByIdentifier: {}, + documentDelayMsByIdentifier: {}, + workspaceDiagnostics: [], + workspaceDiagnosticsByIdentifier: {}, + workspaceDelayMsByIdentifier: {}, +} function encode(message) { const json = JSON.stringify(message) @@ -14,29 +30,19 @@ function decodeFrames(buffer) { let idx while ((idx = buffer.indexOf("\r\n\r\n")) !== -1) { const header = buffer.slice(0, idx).toString("utf8") - const m = /Content-Length:\s*(\d+)/i.exec(header) - const len = m ? parseInt(m[1], 10) : 0 + const match = /Content-Length:\s*(\d+)/i.exec(header) + const length = match ? parseInt(match[1], 10) : 0 const bodyStart = idx + 4 - const bodyEnd = bodyStart + len + const bodyEnd = bodyStart + length if (buffer.length < bodyEnd) break - const body = buffer.slice(bodyStart, bodyEnd).toString("utf8") - results.push(body) + results.push(buffer.slice(bodyStart, bodyEnd).toString("utf8")) buffer = buffer.slice(bodyEnd) } return { messages: results, rest: buffer } } -let readBuffer = Buffer.alloc(0) - -process.stdin.on("data", (chunk) => { - readBuffer = Buffer.concat([readBuffer, chunk]) - const { messages, rest } = decodeFrames(readBuffer) - readBuffer = rest - for (const m of messages) handle(m) -}) - -function send(msg) { - process.stdout.write(encode(msg)) +function send(message) { + process.stdout.write(encode(message)) } function sendRequest(method, params) { @@ -45,6 +51,50 @@ function sendRequest(method, params) { return id } +function sendResponse(id, result) { + send({ jsonrpc: "2.0", id, result }) +} + +function sendNotification(method, params) { + send({ jsonrpc: "2.0", method, params }) +} + +function maybeRegister(method) { + if (pullConfig.registerOn !== method || registeredCapability) return + registeredCapability = true + sendRequest("client/registerCapability", { + registrations: pullConfig.registrations.map((registration, index) => ({ + id: registration.id ?? `pull-${index}`, + method: registration.method ?? "textDocument/diagnostic", + registerOptions: registration.registerOptions ?? registration, + })), + }) +} + +function delayed(id, result, delayMs = pullConfig.delayMs) { + if (!delayMs) { + sendResponse(id, result) + return + } + setTimeout(() => sendResponse(id, result), delayMs) +} + +function diagnosticsForIdentifier(identifier) { + return pullConfig.documentDiagnosticsByIdentifier[identifier] ?? pullConfig.documentDiagnostics +} + +function workspaceDiagnosticsForIdentifier(identifier) { + return pullConfig.workspaceDiagnosticsByIdentifier[identifier] ?? pullConfig.workspaceDiagnostics +} + +function documentDelayForIdentifier(identifier) { + return pullConfig.documentDelayMsByIdentifier[identifier] ?? pullConfig.delayMs +} + +function workspaceDelayForIdentifier(identifier) { + return pullConfig.workspaceDelayMsByIdentifier[identifier] ?? pullConfig.delayMs +} + function handle(raw) { let data try { @@ -52,24 +102,148 @@ function handle(raw) { } catch { return } + + if (typeof data.method === "undefined" && typeof data.id !== "undefined") { + const pending = pendingClientRequests.get(data.id) + if (!pending) return + pendingClientRequests.delete(data.id) + sendResponse(pending, data.result ?? null) + return + } + if (data.method === "initialize") { - send({ jsonrpc: "2.0", id: data.id, result: { capabilities: {} } }) + initializeParams = data.params + sendResponse(data.id, { + capabilities: { + textDocumentSync: { + change: 2, + }, + }, + }) return } - if (data.method === "initialized") { + + if (data.method === "test/get-initialize-params") { + sendResponse(data.id, initializeParams) return } - if (data.method === "workspace/didChangeConfiguration") { + + if (data.method === "test/request-configuration") { + const id = sendRequest("workspace/configuration", data.params) + pendingClientRequests.set(id, data.id) return } + + if (data.method === "initialized" || data.method === "workspace/didChangeConfiguration") { + return + } + + if (data.method === "textDocument/didOpen") { + maybeRegister("didOpen") + return + } + + if (data.method === "textDocument/didChange") { + lastChange = data.params + maybeRegister("didChange") + return + } + if (data.method === "test/trigger") { const method = data.params && data.params.method + if (method === "client/registerCapability") { + sendRequest(method, { + registrations: [ + { + id: "test-diagnostic-registration", + method: "textDocument/diagnostic", + registerOptions: { identifier: "syntax" }, + }, + ], + }) + return + } + if (method === "client/unregisterCapability") { + sendRequest(method, { + unregisterations: [{ id: "test-diagnostic-registration", method: "textDocument/diagnostic" }], + }) + return + } if (method) sendRequest(method, {}) return } - if (typeof data.id !== "undefined") { - // Respond OK to any request from client to keep transport flowing - send({ jsonrpc: "2.0", id: data.id, result: null }) + + if (data.method === "test/configure-pull-diagnostics") { + pullConfig = { + delayMs: data.params?.delayMs ?? 0, + registerOn: data.params?.registerOn, + registrations: data.params?.registrations ?? [], + documentDiagnostics: data.params?.documentDiagnostics ?? [], + documentDiagnosticsByIdentifier: data.params?.documentDiagnosticsByIdentifier ?? {}, + documentDelayMsByIdentifier: data.params?.documentDelayMsByIdentifier ?? {}, + workspaceDiagnostics: data.params?.workspaceDiagnostics ?? [], + workspaceDiagnosticsByIdentifier: data.params?.workspaceDiagnosticsByIdentifier ?? {}, + workspaceDelayMsByIdentifier: data.params?.workspaceDelayMsByIdentifier ?? {}, + } + registeredCapability = false + sendResponse(data.id, null) return } + + if (data.method === "test/register-configured-pull-diagnostics") { + maybeRegister(undefined) + sendResponse(data.id, null) + return + } + + if (data.method === "test/publish-diagnostics") { + sendNotification("textDocument/publishDiagnostics", data.params) + return + } + + if (data.method === "test/get-last-change") { + sendResponse(data.id, lastChange) + return + } + + if (data.method === "test/get-diagnostic-request-count") { + sendResponse(data.id, diagnosticRequestCount) + return + } + + if (data.method === "textDocument/diagnostic") { + diagnosticRequestCount += 1 + delayed( + data.id, + { + kind: "full", + items: diagnosticsForIdentifier(data.params?.identifier ?? ""), + }, + documentDelayForIdentifier(data.params?.identifier ?? ""), + ) + return + } + + if (data.method === "workspace/diagnostic") { + diagnosticRequestCount += 1 + delayed( + data.id, + { + items: workspaceDiagnosticsForIdentifier(data.params?.identifier ?? ""), + }, + workspaceDelayForIdentifier(data.params?.identifier ?? ""), + ) + return + } + + if (typeof data.id !== "undefined") { + sendResponse(data.id, null) + } } + +process.stdin.on("data", (chunk) => { + readBuffer = Buffer.concat([readBuffer, chunk]) + const { messages, rest } = decodeFrames(readBuffer) + readBuffer = rest + for (const message of messages) handle(message) +}) diff --git a/packages/opencode/test/lsp/client.test.ts b/packages/opencode/test/lsp/client.test.ts index d6eaa317f9..4862f68394 100644 --- a/packages/opencode/test/lsp/client.test.ts +++ b/packages/opencode/test/lsp/client.test.ts @@ -1,11 +1,12 @@ -import { describe, expect, test, beforeEach } from "bun:test" +import { beforeEach, describe, expect, test } from "bun:test" import path from "path" +import { pathToFileURL } from "url" +import { tmpdir } from "../fixture/fixture" import { LSPClient } from "../../src/lsp" import { LSPServer } from "../../src/lsp" import { Instance } from "../../src/project/instance" import { Log } from "../../src/util" -// Minimal fake LSP server that speaks JSON-RPC over stdio function spawnFakeServer() { const { spawn } = require("child_process") const serverPath = path.join(__dirname, "../fixture/lsp/fake-lsp-server.js") @@ -39,10 +40,8 @@ describe("LSPClient interop", () => { method: "workspace/workspaceFolders", }) - await new Promise((r) => setTimeout(r, 100)) - + await new Promise((resolve) => setTimeout(resolve, 100)) expect(client.connection).toBeDefined() - await client.shutdown() }) @@ -64,10 +63,8 @@ describe("LSPClient interop", () => { method: "client/registerCapability", }) - await new Promise((r) => setTimeout(r, 100)) - + await new Promise((resolve) => setTimeout(resolve, 100)) expect(client.connection).toBeDefined() - await client.shutdown() }) @@ -89,10 +86,397 @@ describe("LSPClient interop", () => { method: "client/unregisterCapability", }) - await new Promise((r) => setTimeout(r, 100)) - + await new Promise((resolve) => setTimeout(resolve, 100)) expect(client.connection).toBeDefined() + await client.shutdown() + }) + + test("initialize does not overclaim unsupported diagnostics capabilities", async () => { + const handle = spawnFakeServer() as any + + const client = await Instance.provide({ + directory: process.cwd(), + fn: () => + LSPClient.create({ + serverID: "fake", + server: handle as unknown as LSPServer.Handle, + root: process.cwd(), + directory: process.cwd(), + }), + }) + + const params = await client.connection.sendRequest("test/get-initialize-params", {}) + expect(params.capabilities.workspace.diagnostics.refreshSupport).toBe(false) + expect(params.capabilities.textDocument.publishDiagnostics.versionSupport).toBe(false) await client.shutdown() }) + + test("workspace/configuration returns one result per requested item", async () => { + const handle = spawnFakeServer() as any + const initialization = { + alpha: { + beta: 1, + }, + gamma: true, + } + + const client = await Instance.provide({ + directory: process.cwd(), + fn: () => + LSPClient.create({ + serverID: "fake", + server: { + ...(handle as unknown as LSPServer.Handle), + initialization, + }, + root: process.cwd(), + directory: process.cwd(), + }), + }) + + const response = await client.connection.sendRequest("test/request-configuration", { + items: [{ section: "alpha" }, { section: "alpha.beta" }, { section: "missing" }, {}], + }) + + expect(response).toEqual([{ beta: 1 }, 1, null, initialization]) + + await client.shutdown() + }) + + test("sends ranged didChange for incremental sync servers", async () => { + const handle = spawnFakeServer() as any + await using tmp = await tmpdir() + const file = path.join(tmp.path, "client.ts") + await Bun.write(file, "first\n") + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const client = await LSPClient.create({ + serverID: "fake", + server: handle as unknown as LSPServer.Handle, + root: tmp.path, + directory: tmp.path, + }) + + await client.notify.open({ path: file }) + await Bun.write(file, "second\nthird\n") + await client.notify.open({ path: file }) + + const change = await client.connection.sendRequest<{ + textDocument: { version: number } + contentChanges: { + range?: { start: { line: number; character: number }; end: { line: number; character: number } } + text: string + }[] + }>("test/get-last-change", {}) + expect(change.textDocument.version).toBe(1) + expect(change.contentChanges).toEqual([ + { + range: { + start: { line: 0, character: 0 }, + end: { line: 1, character: 0 }, + }, + text: "second\nthird\n", + }, + ]) + + await client.shutdown() + }, + }) + }) + + test("document mode falls back to push diagnostics", async () => { + const handle = spawnFakeServer() as any + await using tmp = await tmpdir() + const file = path.join(tmp.path, "client.ts") + await Bun.write(file, "const x = 1\n") + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const client = await LSPClient.create({ + serverID: "fake", + server: handle as unknown as LSPServer.Handle, + root: tmp.path, + directory: tmp.path, + }) + + const version = await client.notify.open({ path: file }) + const wait = client.waitForDiagnostics({ path: file, version, mode: "document" }) + await client.connection.sendNotification("test/publish-diagnostics", { + uri: pathToFileURL(file).href, + version, + diagnostics: [ + { + range: { + start: { line: 0, character: 0 }, + end: { line: 0, character: 5 }, + }, + message: "push diagnostic", + severity: 1, + }, + ], + }) + await wait + + const diagnostics = client.diagnostics.get(file) ?? [] + expect(diagnostics).toHaveLength(1) + expect(diagnostics[0]?.message).toBe("push diagnostic") + + const count = await client.connection.sendRequest("test/get-diagnostic-request-count", {}) + expect(count).toBe(0) + + await client.shutdown() + }, + }) + }) + + test("document mode accepts matching push diagnostics published before waiting", async () => { + const handle = spawnFakeServer() as any + await using tmp = await tmpdir() + const file = path.join(tmp.path, "client.ts") + await Bun.write(file, "const x = 1\n") + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const client = await LSPClient.create({ + serverID: "fake", + server: handle as unknown as LSPServer.Handle, + root: tmp.path, + directory: tmp.path, + }) + + const version = await client.notify.open({ path: file }) + await client.connection.sendNotification("test/publish-diagnostics", { + uri: pathToFileURL(file).href, + version, + diagnostics: [ + { + range: { + start: { line: 0, character: 0 }, + end: { line: 0, character: 5 }, + }, + message: "push diagnostic", + severity: 1, + }, + ], + }) + + for (let i = 0; i < 20 && (client.diagnostics.get(file)?.length ?? 0) === 0; i++) { + await new Promise((resolve) => setTimeout(resolve, 25)) + } + + expect(client.diagnostics.get(file)?.[0]?.message).toBe("push diagnostic") + + const started = Date.now() + await client.waitForDiagnostics({ path: file, version, mode: "document" }) + expect(Date.now() - started).toBeLessThan(1_000) + + await client.shutdown() + }, + }) + }) + + test("document mode waits for pull diagnostics", async () => { + const handle = spawnFakeServer() as any + await using tmp = await tmpdir() + const file = path.join(tmp.path, "client.cs") + await Bun.write(file, "class C {}\n") + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const client = await LSPClient.create({ + serverID: "fake", + server: handle as unknown as LSPServer.Handle, + root: tmp.path, + directory: tmp.path, + }) + + await client.connection.sendRequest("test/configure-pull-diagnostics", { + registerOn: "didOpen", + registrations: [{ identifier: "DocumentCompilerSemantic" }], + documentDiagnosticsByIdentifier: { + DocumentCompilerSemantic: [ + { + range: { + start: { line: 0, character: 0 }, + end: { line: 0, character: 5 }, + }, + message: "pull diagnostic", + severity: 1, + }, + ], + }, + }) + + const version = await client.notify.open({ path: file }) + await client.waitForDiagnostics({ path: file, version, mode: "document" }) + + const diagnostics = client.diagnostics.get(file) ?? [] + expect(diagnostics).toHaveLength(1) + expect(diagnostics[0]?.message).toBe("pull diagnostic") + + const count = await client.connection.sendRequest("test/get-diagnostic-request-count", {}) + expect(count).toBeGreaterThan(0) + + await client.shutdown() + }, + }) + }) + + test("document mode does not wait for the slowest pull identifier after current-file diagnostics arrive", async () => { + const handle = spawnFakeServer() as any + await using tmp = await tmpdir() + const file = path.join(tmp.path, "client.cs") + await Bun.write(file, "class C {}\n") + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const client = await LSPClient.create({ + serverID: "fake", + server: handle as unknown as LSPServer.Handle, + root: tmp.path, + directory: tmp.path, + }) + + await client.connection.sendRequest("test/configure-pull-diagnostics", { + registrations: [{ identifier: "fast" }, { identifier: "slow" }], + documentDiagnosticsByIdentifier: { + fast: [ + { + range: { + start: { line: 0, character: 0 }, + end: { line: 0, character: 5 }, + }, + message: "fast diagnostic", + severity: 1, + }, + ], + slow: [], + }, + documentDelayMsByIdentifier: { + slow: 2_500, + }, + }) + + const version = await client.notify.open({ path: file }) + await client.connection.sendRequest("test/register-configured-pull-diagnostics", {}) + await new Promise((resolve) => setTimeout(resolve, 100)) + const started = Date.now() + await client.waitForDiagnostics({ path: file, version, mode: "document" }) + + expect(Date.now() - started).toBeLessThan(1_000) + expect(client.diagnostics.get(file)?.[0]?.message).toBe("fast diagnostic") + expect(await client.connection.sendRequest("test/get-diagnostic-request-count", {})).toBeGreaterThan(1) + + await client.shutdown() + }, + }) + }) + + test("full mode includes workspace pull diagnostics", async () => { + const handle = spawnFakeServer() as any + await using tmp = await tmpdir() + const file = path.join(tmp.path, "client.cs") + const related = path.join(tmp.path, "other.cs") + await Bun.write(file, "class C {}\n") + await Bun.write(related, "class D {}\n") + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const client = await LSPClient.create({ + serverID: "fake", + server: handle as unknown as LSPServer.Handle, + root: tmp.path, + directory: tmp.path, + }) + + await client.connection.sendRequest("test/configure-pull-diagnostics", { + registerOn: "didOpen", + registrations: [ + { identifier: "DocumentCompilerSemantic" }, + { identifier: "WorkspaceDocumentsAndProject", workspaceDiagnostics: true }, + ], + documentDiagnosticsByIdentifier: { + DocumentCompilerSemantic: [ + { + range: { + start: { line: 0, character: 0 }, + end: { line: 0, character: 5 }, + }, + message: "current file", + severity: 1, + }, + ], + }, + workspaceDiagnosticsByIdentifier: { + WorkspaceDocumentsAndProject: [ + { + uri: pathToFileURL(related).href, + items: [ + { + range: { + start: { line: 0, character: 0 }, + end: { line: 0, character: 5 }, + }, + message: "workspace file", + severity: 1, + }, + ], + }, + ], + }, + }) + + const version = await client.notify.open({ path: file }) + await client.waitForDiagnostics({ path: file, version, mode: "full" }) + + expect(client.diagnostics.get(file)?.[0]?.message).toBe("current file") + expect(client.diagnostics.get(related)?.[0]?.message).toBe("workspace file") + + await client.shutdown() + }, + }) + }) + + test("full mode treats an empty workspace pull response as handled", async () => { + const handle = spawnFakeServer() as any + await using tmp = await tmpdir() + const file = path.join(tmp.path, "client.cs") + await Bun.write(file, "class C {}\n") + + await Instance.provide({ + directory: tmp.path, + fn: async () => { + const client = await LSPClient.create({ + serverID: "fake", + server: handle as unknown as LSPServer.Handle, + root: tmp.path, + directory: tmp.path, + }) + + await client.connection.sendRequest("test/configure-pull-diagnostics", { + registerOn: "didOpen", + registrations: [{ identifier: "WorkspaceDocumentsAndProject", workspaceDiagnostics: true }], + workspaceDiagnosticsByIdentifier: { + WorkspaceDocumentsAndProject: [], + }, + }) + + const version = await client.notify.open({ path: file }) + const started = Date.now() + await client.waitForDiagnostics({ path: file, version, mode: "full" }) + + expect(Date.now() - started).toBeLessThan(1_000) + + await client.shutdown() + }, + }) + }) }) From d3f37273bc07bf12acee12585e05b24bbadc0bb3 Mon Sep 17 00:00:00 2001 From: "opencode-agent[bot]" Date: Wed, 22 Apr 2026 23:25:07 +0000 Subject: [PATCH 71/73] chore: generate --- packages/opencode/src/lsp/client.ts | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/packages/opencode/src/lsp/client.ts b/packages/opencode/src/lsp/client.ts index b0418ca3f5..f6d5110a6c 100644 --- a/packages/opencode/src/lsp/client.ts +++ b/packages/opencode/src/lsp/client.ts @@ -358,7 +358,10 @@ export async function create(input: { serverID: string; server: LSPServer.Handle return { handled, matched, byFile } } - async function requestWorkspaceDiagnosticReport(filePath: string, identifier?: string): Promise { + async function requestWorkspaceDiagnosticReport( + filePath: string, + identifier?: string, + ): Promise { const report = await withTimeout( connection.sendRequest("workspace/diagnostic", { ...(identifier ? { identifier } : {}), @@ -386,7 +389,9 @@ export async function create(input: { serverID: string; server: LSPServer.Handle (registration) => registration.registerOptions?.workspaceDiagnostics !== true, ) return { - documentIdentifiers: [...new Set(documentRegistrations.flatMap((registration) => registration.registerOptions?.identifier ?? []))], + documentIdentifiers: [ + ...new Set(documentRegistrations.flatMap((registration) => registration.registerOptions?.identifier ?? [])), + ], supported: hasStaticPullDiagnostics || documentRegistrations.length > 0, } } @@ -396,7 +401,9 @@ export async function create(input: { serverID: string; server: LSPServer.Handle (registration) => registration.registerOptions?.workspaceDiagnostics === true, ) return { - workspaceIdentifiers: [...new Set(workspaceRegistrations.flatMap((registration) => registration.registerOptions?.identifier ?? []))], + workspaceIdentifiers: [ + ...new Set(workspaceRegistrations.flatMap((registration) => registration.registerOptions?.identifier ?? [])), + ], supported: workspaceRegistrations.length > 0, } } @@ -461,7 +468,9 @@ export async function create(input: { serverID: string; server: LSPServer.Handle ...(documentState.supported ? [requestDiagnosticReport(filePath)] : []), ...documentState.documentIdentifiers.map((identifier) => requestDiagnosticReport(filePath, identifier)), ...(workspaceState.supported ? [requestWorkspaceDiagnosticReport(filePath)] : []), - ...workspaceState.workspaceIdentifiers.map((identifier) => requestWorkspaceDiagnosticReport(filePath, identifier)), + ...workspaceState.workspaceIdentifiers.map((identifier) => + requestWorkspaceDiagnosticReport(filePath, identifier), + ), ]), ) } @@ -532,8 +541,8 @@ export async function create(input: { serverID: string; server: LSPServer.Handle const remaining = DIAGNOSTICS_DOCUMENT_WAIT_TIMEOUT_MS - (Date.now() - startedAt) if (remaining <= 0) return const next = await Promise.race([ - pushWait.then((ready) => (ready ? "push" : "timeout" as const)), - waitForRegistrationChange(remaining).then((changed) => (changed ? "registration" : "timeout" as const)), + pushWait.then((ready) => (ready ? "push" : ("timeout" as const))), + waitForRegistrationChange(remaining).then((changed) => (changed ? "registration" : ("timeout" as const))), ]) if (next !== "registration") return } @@ -554,8 +563,8 @@ export async function create(input: { serverID: string; server: LSPServer.Handle const remaining = DIAGNOSTICS_FULL_WAIT_TIMEOUT_MS - (Date.now() - startedAt) if (remaining <= 0) return const next = await Promise.race([ - pushWait.then((ready) => (ready ? "push" : "timeout" as const)), - waitForRegistrationChange(remaining).then((changed) => (changed ? "registration" : "timeout" as const)), + pushWait.then((ready) => (ready ? "push" : ("timeout" as const))), + waitForRegistrationChange(remaining).then((changed) => (changed ? "registration" : ("timeout" as const))), ]) if (next !== "registration") return } From f8ff6f49abf7245953e53d894e5be53ada80ea4a Mon Sep 17 00:00:00 2001 From: Luke Parker <10430890+Hona@users.noreply.github.com> Date: Thu, 23 Apr 2026 10:08:14 +1000 Subject: [PATCH 72/73] log session sdk errors (#23652) --- packages/app/src/context/global-sync.tsx | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/packages/app/src/context/global-sync.tsx b/packages/app/src/context/global-sync.tsx index 313ff29659..7c819918c0 100644 --- a/packages/app/src/context/global-sync.tsx +++ b/packages/app/src/context/global-sync.tsx @@ -295,6 +295,19 @@ function createGlobalSync() { const event = e.details const recent = bootingRoot || Date.now() - bootedAt < 1500 + if (event.type === "session.error") { + const error = event.properties.error + if (error?.name !== "MessageAbortedError") { + console.error("[global-sync] session error", { + scope: directory === "global" ? "global" : "workspace", + directory: directory === "global" ? undefined : directory, + project: directory === "global" ? undefined : getFilename(directory), + sessionID: event.properties.sessionID, + error, + }) + } + } + if (directory === "global") { applyGlobalEvent({ event, From 1026791076c6a4edf1d44422177e13d06c2930d6 Mon Sep 17 00:00:00 2001 From: Luke Parker <10430890+Hona@users.noreply.github.com> Date: Thu, 23 Apr 2026 11:19:36 +1000 Subject: [PATCH 73/73] fix(beta): PR resolvers/smoke check should typecheck all pacakges (#23913) --- script/beta.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/script/beta.ts b/script/beta.ts index 34d9dab1f8..7c558d1e7e 100755 --- a/script/beta.ts +++ b/script/beta.ts @@ -61,7 +61,7 @@ async function typecheck() { console.log(" Running typecheck...") try { - await $`bun typecheck`.cwd("packages/opencode") + await $`bun typecheck` return true } catch (err) { console.log(`Typecheck failed: ${err}`) @@ -113,7 +113,7 @@ async function fix(pr: PR, files: string[], prs: PR[], applied: number[], idx: n "If bun.lock is conflicted, do not hand-merge it. Delete bun.lock and run bun install after the code conflicts are resolved.", "If a PR already deleted a file/directory, do not re-add it, instead apply changes in the new semantic location.", "If a PR already changed an import, keep that change.", - "After resolving the conflicts, run `bun typecheck` in `packages/opencode`.", + "After resolving the conflicts, run `bun typecheck` at the repo root.", "If typecheck fails, you may also update any files reported by typecheck.", "Keep any non-conflict edits narrowly scoped to restoring a valid merged state for the current PR batch.", "Fix any merge-caused typecheck errors before finishing.", @@ -149,7 +149,7 @@ async function smoke(prs: PR[], applied: number[]) { const prompt = [ "The beta merge batch is complete.", `Merged PRs on HEAD:\n${done}`, - "Run `bun typecheck` in `packages/opencode`.", + "Run `bun typecheck` at the repo root.", "Run `./script/build.ts --single` in `packages/opencode`.", "Fix any merge-caused issues until both commands pass.", "Do not create a commit.",