diff --git a/open-sse/config/codexInstructions.ts b/open-sse/config/codexInstructions.ts
index b903ac06..7a5b4a5a 100644
--- a/open-sse/config/codexInstructions.ts
+++ b/open-sse/config/codexInstructions.ts
@@ -55,7 +55,7 @@ When you are running with \`approval_policy == on-request\`, and sandboxing enab
- You are about to take a potentially destructive action such as an \`rm\` or \`git reset\` that the user did not explicitly ask for
- (for all of these, you should weigh alternative paths that do not require approval)
-When \`sandbox_mode\` is set to read-only, you'll need to request approval for any command that isn't a read.
+When \`sandbox_mode\` is set to read-only, you'll need to request approval for each command that isn't a read.
You will be told what filesystem sandboxing, network sandboxing, and approval mode are active in a developer or user message. If you are not told about this, assume that you are running with workspace-write, network sandboxing enabled, and approval on-failure.
@@ -68,7 +68,7 @@ When requesting approval to execute a command that will require escalated privil
## Special user requests
- If the user makes a simple request (such as asking for the time) which you can fulfill by running a terminal command (such as \`date\`), you should do so.
-- If the user asks for a "review", default to a code review mindset: prioritise identifying bugs, risks, behavioural regressions, and missing tests. Findings must be the primary focus of the response - keep summaries or overviews brief and only after enumerating the issues. Present findings first (ordered by severity with file/line references), follow with open questions or assumptions, and offer a change-summary only as a secondary detail. If no findings are discovered, state that explicitly and mention any residual risks or testing gaps.
+- If the user asks for a "review", default to a code review mindset: prioritise identifying bugs, risks, behavioural regressions, and missing tests. Findings must be the primary focus of the response - keep summaries or overviews brief and only after enumerating the issues. Present findings first (ordered by severity with file/line references), follow with open questions or assumptions, and offer a change-summary only as a secondary detail. If no findings are discovered, state that explicitly and mention residual risks or testing gaps.
## Frontend tasks
When doing frontend design tasks, avoid collapsing into "AI slop" or safe, average-looking layouts.
diff --git a/open-sse/config/imageRegistry.ts b/open-sse/config/imageRegistry.ts
index 21b1bb21..b18a8659 100644
--- a/open-sse/config/imageRegistry.ts
+++ b/open-sse/config/imageRegistry.ts
@@ -157,7 +157,7 @@ export function parseImageModel(modelStr) {
}
}
- // No provider prefix — try to find the model in any provider
+ // No provider prefix — try to find the model in every provider
for (const [providerId, config] of Object.entries(IMAGE_PROVIDERS)) {
if (config.models.some((m) => m.id === modelStr)) {
return { provider: providerId, model: modelStr };
diff --git a/open-sse/config/registryUtils.ts b/open-sse/config/registryUtils.ts
index 869d04dc..600a0f5c 100644
--- a/open-sse/config/registryUtils.ts
+++ b/open-sse/config/registryUtils.ts
@@ -37,7 +37,7 @@ export function parseModelFromRegistry
(
}
}
- // No provider prefix — try to find the model in any provider
+ // No provider prefix — try to find the model in every provider
for (const [providerId, config] of Object.entries(registry)) {
if (config.models.some((m) => m.id === modelStr)) {
return { provider: providerId, model: modelStr };
diff --git a/open-sse/executors/antigravity.ts b/open-sse/executors/antigravity.ts
index ad5fdaae..5a80b315 100644
--- a/open-sse/executors/antigravity.ts
+++ b/open-sse/executors/antigravity.ts
@@ -5,7 +5,7 @@ import { PROVIDERS, OAUTH_ENDPOINTS, HTTP_STATUS } from "../config/constants.ts"
const MAX_RETRY_AFTER_MS = 10000;
/**
- * Strip any provider prefix (e.g. "antigravity/model" → "model").
+ * Strip provider prefixes (e.g. "antigravity/model" → "model").
* Ensures the model name sent to the upstream API never contains a routing prefix.
*/
function cleanModelName(model: string): string {
diff --git a/open-sse/executors/base.ts b/open-sse/executors/base.ts
index a094320c..4c05f8b7 100644
--- a/open-sse/executors/base.ts
+++ b/open-sse/executors/base.ts
@@ -1,15 +1,64 @@
import { HTTP_STATUS, FETCH_TIMEOUT_MS } from "../config/constants.ts";
+type JsonRecord = Record;
+
+type ProviderConfig = {
+ id?: string;
+ baseUrl?: string;
+ baseUrls?: string[];
+ headers?: Record;
+};
+
+type ProviderCredentials = {
+ accessToken?: string;
+ apiKey?: string;
+ expiresAt?: string;
+ providerSpecificData?: JsonRecord;
+};
+
+type ExecutorLog = {
+ debug?: (tag: string, message: string) => void;
+ warn?: (tag: string, message: string) => void;
+};
+
+type ExecuteInput = {
+ model: string;
+ body: unknown;
+ stream: boolean;
+ credentials: ProviderCredentials;
+ signal?: AbortSignal | null;
+ log?: ExecutorLog | null;
+};
+
+function mergeAbortSignals(primary: AbortSignal, secondary: AbortSignal): AbortSignal {
+ const controller = new AbortController();
+
+ const abortBoth = () => {
+ if (!controller.signal.aborted) {
+ controller.abort();
+ }
+ };
+
+ if (primary.aborted || secondary.aborted) {
+ abortBoth();
+ return controller.signal;
+ }
+
+ primary.addEventListener("abort", abortBoth, { once: true });
+ secondary.addEventListener("abort", abortBoth, { once: true });
+ return controller.signal;
+}
+
/**
* BaseExecutor - Base class for provider executors.
* Implements the Strategy pattern: subclasses override specific methods
* (buildUrl, buildHeaders, transformRequest, etc.) for each provider.
*/
export class BaseExecutor {
- provider: any;
- config: any;
+ provider: string;
+ config: ProviderConfig;
- constructor(provider: any, config: any) {
+ constructor(provider: string, config: ProviderConfig) {
this.provider = provider;
this.config = config;
}
@@ -26,9 +75,19 @@ export class BaseExecutor {
return this.getBaseUrls().length || 1;
}
- buildUrl(model, stream, urlIndex = 0, credentials = null) {
+ buildUrl(
+ model: string,
+ stream: boolean,
+ urlIndex = 0,
+ credentials: ProviderCredentials | null = null
+ ) {
+ void model;
+ void stream;
if (this.provider?.startsWith?.("openai-compatible-")) {
- const baseUrl = credentials?.providerSpecificData?.baseUrl || "https://api.openai.com/v1";
+ const baseUrl =
+ typeof credentials?.providerSpecificData?.baseUrl === "string"
+ ? credentials.providerSpecificData.baseUrl
+ : "https://api.openai.com/v1";
const normalized = baseUrl.replace(/\/$/, "");
const path = this.provider.includes("responses") ? "/responses" : "/chat/completions";
return `${normalized}${path}`;
@@ -37,8 +96,8 @@ export class BaseExecutor {
return baseUrls[urlIndex] || baseUrls[0] || this.config.baseUrl;
}
- buildHeaders(credentials, stream = true) {
- const headers = {
+ buildHeaders(credentials: ProviderCredentials, stream = true): Record {
+ const headers: Record = {
"Content-Type": "application/json",
...this.config.headers,
};
@@ -70,32 +129,42 @@ export class BaseExecutor {
}
// Override in subclass for provider-specific transformations
- transformRequest(model, body, stream, credentials) {
+ transformRequest(
+ model: string,
+ body: unknown,
+ stream: boolean,
+ credentials: ProviderCredentials
+ ): unknown {
+ void model;
+ void stream;
+ void credentials;
return body;
}
- shouldRetry(status, urlIndex) {
+ shouldRetry(status: number, urlIndex: number) {
return status === HTTP_STATUS.RATE_LIMITED && urlIndex + 1 < this.getFallbackCount();
}
// Override in subclass for provider-specific refresh
- async refreshCredentials(credentials, log) {
+ async refreshCredentials(credentials: ProviderCredentials, log: ExecutorLog | null) {
+ void credentials;
+ void log;
return null;
}
- needsRefresh(credentials) {
+ needsRefresh(credentials: ProviderCredentials) {
if (!credentials.expiresAt) return false;
const expiresAtMs = new Date(credentials.expiresAt).getTime();
return expiresAtMs - Date.now() < 5 * 60 * 1000;
}
- parseError(response, bodyText) {
+ parseError(response: Response, bodyText: string) {
return { status: response.status, message: bodyText || `HTTP ${response.status}` };
}
- async execute({ model, body, stream, credentials, signal, log }) {
+ async execute({ model, body, stream, credentials, signal, log }: ExecuteInput) {
const fallbackCount = this.getFallbackCount();
- let lastError = null;
+ let lastError: unknown = null;
let lastStatus = 0;
for (let urlIndex = 0; urlIndex < fallbackCount; urlIndex++) {
@@ -109,10 +178,10 @@ export class BaseExecutor {
const timeoutSignal = !stream ? AbortSignal.timeout(FETCH_TIMEOUT_MS) : null;
const combinedSignal =
signal && timeoutSignal
- ? AbortSignal.any([signal, timeoutSignal])
+ ? mergeAbortSignals(signal, timeoutSignal)
: signal || timeoutSignal;
- const fetchOptions: Record = {
+ const fetchOptions: RequestInit = {
method: "POST",
headers,
body: JSON.stringify(transformedBody),
@@ -130,15 +199,16 @@ export class BaseExecutor {
return { response, url, headers, transformedBody };
} catch (error) {
// Distinguish timeout errors from other abort errors
- if (error.name === "TimeoutError") {
+ const err = error instanceof Error ? error : new Error(String(error));
+ if (err.name === "TimeoutError") {
log?.warn?.("TIMEOUT", `Fetch timeout after ${FETCH_TIMEOUT_MS}ms on ${url}`);
}
- lastError = error;
+ lastError = err;
if (urlIndex + 1 < fallbackCount) {
log?.debug?.("RETRY", `Error on ${url}, trying fallback ${urlIndex + 1}`);
continue;
}
- throw error;
+ throw err;
}
}
diff --git a/open-sse/executors/cursor.ts b/open-sse/executors/cursor.ts
index 67d37b7b..cf2f0777 100644
--- a/open-sse/executors/cursor.ts
+++ b/open-sse/executors/cursor.ts
@@ -1,4 +1,4 @@
-declare var EdgeRuntime: any;
+declare const EdgeRuntime: string | undefined;
/**
* CursorExecutor — Handles communication with the Cursor IDE API.
*
@@ -121,13 +121,19 @@ function createErrorResponse(jsonError) {
);
}
+type CursorHttpResponse = {
+ status: number;
+ headers: Record;
+ body: Buffer;
+};
+
export class CursorExecutor extends BaseExecutor {
constructor() {
super("cursor", PROVIDERS.cursor);
}
buildUrl() {
- return `${this.config.baseUrl}${this.config.chatPath}`;
+ return `${this.config.baseUrl}${this.config.chatPath || ""}`;
}
// Jyh cipher checksum for Cursor API authentication
@@ -217,7 +223,12 @@ export class CursorExecutor extends BaseExecutor {
return generateCursorBody(messages, model, tools, reasoningEffort);
}
- async makeFetchRequest(url, headers, body, signal) {
+ async makeFetchRequest(
+ url: string,
+ headers: Record,
+ body: Uint8Array,
+ signal?: AbortSignal
+ ): Promise {
const response = await fetch(url, {
method: "POST",
headers,
@@ -227,17 +238,22 @@ export class CursorExecutor extends BaseExecutor {
return {
status: response.status,
- headers: Object.fromEntries((response.headers as any).entries()),
+ headers: Object.fromEntries(response.headers.entries()),
body: Buffer.from(await response.arrayBuffer()),
};
}
- makeHttp2Request(url, headers, body, signal) {
+ makeHttp2Request(
+ url: string,
+ headers: Record,
+ body: Uint8Array,
+ signal?: AbortSignal
+ ): Promise {
if (!http2) {
throw new Error("http2 module not available");
}
- return new Promise((resolve, reject) => {
+ return new Promise((resolve, reject) => {
const urlObj = new URL(url);
const client = http2.connect(`https://${urlObj.host}`);
const chunks = [];
@@ -262,7 +278,10 @@ export class CursorExecutor extends BaseExecutor {
req.on("end", () => {
client.close();
resolve({
- status: responseHeaders[":status"],
+ status:
+ typeof responseHeaders[":status"] === "number"
+ ? responseHeaders[":status"]
+ : Number(responseHeaders[":status"] || HTTP_STATUS.SERVER_ERROR),
headers: responseHeaders,
body: Buffer.concat(chunks),
});
@@ -291,7 +310,7 @@ export class CursorExecutor extends BaseExecutor {
const transformedBody = this.transformRequest(model, body, stream, credentials);
try {
- const response: any = http2
+ const response: CursorHttpResponse = http2
? await this.makeHttp2Request(url, headers, transformedBody, signal)
: await this.makeFetchRequest(url, headers, transformedBody, signal);
@@ -459,7 +478,8 @@ export class CursorExecutor extends BaseExecutor {
console.log(`[CURSOR BUFFER] Final toolCalls count: ${toolCalls.length}`);
- const message: Record = { role: "assistant",
+ const message: Record = {
+ role: "assistant",
content: totalContent || null,
};
diff --git a/open-sse/executors/default.ts b/open-sse/executors/default.ts
index 3dbe585c..c2b3ef2a 100644
--- a/open-sse/executors/default.ts
+++ b/open-sse/executors/default.ts
@@ -74,7 +74,7 @@ export class DefaultExecutor extends BaseExecutor {
/**
* For compatible providers, ensure the model name sent upstream
- * is the clean model name without any internal routing prefix.
+ * is the clean model name without internal routing prefixes.
* e.g. "openapi-chat-anti/claude-opus-4-6-thinking" → "claude-opus-4-6-thinking"
*/
transformRequest(model, body, stream, credentials) {
diff --git a/open-sse/executors/iflow.ts b/open-sse/executors/iflow.ts
index c5441462..434a2c20 100644
--- a/open-sse/executors/iflow.ts
+++ b/open-sse/executors/iflow.ts
@@ -2,6 +2,11 @@ import crypto from "crypto";
import { BaseExecutor } from "./base.ts";
import { PROVIDERS } from "../config/constants.ts";
+type IFlowCredentials = {
+ apiKey?: string;
+ accessToken?: string;
+};
+
/**
* IFlowExecutor - Executor for iFlow API with HMAC-SHA256 signature.
*
@@ -41,7 +46,7 @@ export class IFlowExecutor extends BaseExecutor {
* Build headers with iFlow-specific HMAC-SHA256 signature.
* Includes session-id, x-iflow-timestamp, and x-iflow-signature.
*/
- buildHeaders(credentials: any, stream = true) {
+ buildHeaders(credentials: IFlowCredentials, stream = true) {
// Generate session ID and timestamp
const sessionID = `session-${crypto.randomUUID()}`;
const timestamp = Date.now();
@@ -82,14 +87,26 @@ export class IFlowExecutor extends BaseExecutor {
/**
* Build URL for iFlow API — uses baseUrl directly.
*/
- buildUrl(model: string, stream: boolean, urlIndex = 0, credentials: any = null) {
+ buildUrl(
+ model: string,
+ stream: boolean,
+ urlIndex = 0,
+ credentials: IFlowCredentials | null = null
+ ) {
+ void model;
+ void stream;
+ void urlIndex;
+ void credentials;
return this.config.baseUrl;
}
/**
* Transform request body (passthrough for iFlow).
*/
- transformRequest(model: string, body: any, stream: boolean, credentials: any) {
+ transformRequest(model: string, body: unknown, stream: boolean, credentials: IFlowCredentials) {
+ void model;
+ void stream;
+ void credentials;
return body;
}
}
diff --git a/open-sse/executors/kiro.ts b/open-sse/executors/kiro.ts
index 7f0b5d36..b9775bdf 100644
--- a/open-sse/executors/kiro.ts
+++ b/open-sse/executors/kiro.ts
@@ -3,6 +3,32 @@ import { PROVIDERS } from "../config/constants.ts";
import { v4 as uuidv4 } from "uuid";
import { refreshKiroToken } from "../services/tokenRefresh.ts";
+type JsonRecord = Record;
+
+type UsageSummary = {
+ prompt_tokens: number;
+ completion_tokens: number;
+ total_tokens: number;
+};
+
+type KiroStreamState = {
+ endDetected: boolean;
+ finishEmitted: boolean;
+ hasToolCalls: boolean;
+ toolCallIndex: number;
+ seenToolIds: Map;
+ totalContentLength?: number;
+ contextUsagePercentage?: number;
+ hasContextUsage?: boolean;
+ hasMeteringEvent?: boolean;
+ usage?: UsageSummary;
+};
+
+type EventFrame = {
+ headers: Record;
+ payload: JsonRecord | null;
+};
+
// ── CRC32 lookup table (IEEE polynomial, no dependency) ──
const CRC32_TABLE = new Uint32Array(256);
for (let i = 0; i < 256; i++) {
@@ -13,7 +39,7 @@ for (let i = 0; i < 256; i++) {
CRC32_TABLE[i] = c >>> 0;
}
-function crc32(buf) {
+function crc32(buf: Uint8Array) {
let crc = 0xffffffff;
for (let i = 0; i < buf.length; i++) {
crc = CRC32_TABLE[(crc ^ buf[i]) & 0xff] ^ (crc >>> 8);
@@ -30,7 +56,8 @@ export class KiroExecutor extends BaseExecutor {
super("kiro", PROVIDERS.kiro);
}
- buildHeaders(credentials, stream = true) {
+ buildHeaders(credentials: { accessToken?: string }, stream = true) {
+ void stream;
const headers = {
...this.config.headers,
"Amz-Sdk-Request": "attempt=1; max=3",
@@ -44,14 +71,31 @@ export class KiroExecutor extends BaseExecutor {
return headers;
}
- transformRequest(model, body, stream, credentials) {
+ transformRequest(model: string, body: unknown, stream: boolean, credentials: unknown): unknown {
+ void model;
+ void stream;
+ void credentials;
return body;
}
/**
* Custom execute for Kiro - handles AWS EventStream binary response
*/
- async execute({ model, body, stream, credentials, signal, log }) {
+ async execute({
+ model,
+ body,
+ stream,
+ credentials,
+ signal,
+ log,
+ }: {
+ model: string;
+ body: unknown;
+ stream: boolean;
+ credentials: { accessToken?: string; refreshToken?: string; providerSpecificData?: unknown };
+ signal?: AbortSignal;
+ log?: { error?: (tag: string, message: string) => void } | null;
+ }) {
const url = this.buildUrl(model, stream, 0);
const headers = this.buildHeaders(credentials, stream);
const transformedBody = this.transformRequest(model, body, stream, credentials);
@@ -78,12 +122,13 @@ export class KiroExecutor extends BaseExecutor {
* Transform AWS EventStream binary response to SSE text stream
* Using TransformStream instead of ReadableStream.pull() to avoid Workers timeout
*/
- transformEventStreamToSSE(response, model) {
+ transformEventStreamToSSE(response: Response, model: string) {
let buffer = new Uint8Array(0);
let chunkIndex = 0;
const responseId = `chatcmpl-${Date.now()}`;
const created = Math.floor(Date.now() / 1000);
- const state: Record = { endDetected: false,
+ const state: KiroStreamState = {
+ endDetected: false,
finishEmitted: false,
hasToolCalls: false,
toolCallIndex: 0,
@@ -125,7 +170,7 @@ export class KiroExecutor extends BaseExecutor {
const content = event.payload.content;
state.totalContentLength += content.length;
- const chunk: Record = {
+ const chunk: JsonRecord = {
id: responseId,
object: "chat.completion.chunk",
created,
@@ -144,7 +189,7 @@ export class KiroExecutor extends BaseExecutor {
// Handle codeEvent
if (eventType === "codeEvent" && event.payload?.content) {
- const chunk: Record = {
+ const chunk: JsonRecord = {
id: responseId,
object: "chat.completion.chunk",
created,
@@ -256,7 +301,7 @@ export class KiroExecutor extends BaseExecutor {
// Handle messageStopEvent
if (eventType === "messageStopEvent") {
- const chunk: Record = {
+ const chunk: JsonRecord = {
id: responseId,
object: "chat.completion.chunk",
created,
@@ -329,7 +374,7 @@ export class KiroExecutor extends BaseExecutor {
};
}
- const finishChunk: Record = {
+ const finishChunk: JsonRecord = {
id: responseId,
object: "chat.completion.chunk",
created,
@@ -398,7 +443,10 @@ export class KiroExecutor extends BaseExecutor {
});
}
- async refreshCredentials(credentials, log) {
+ async refreshCredentials(
+ credentials: { refreshToken?: string; providerSpecificData?: unknown },
+ log?: { error?: (tag: string, message: string) => void } | null
+ ) {
if (!credentials.refreshToken) return null;
try {
@@ -411,7 +459,8 @@ export class KiroExecutor extends BaseExecutor {
return result;
} catch (error) {
- log?.error?.("TOKEN", `Kiro refresh error: ${error.message}`);
+ const err = error instanceof Error ? error : new Error(String(error));
+ log?.error?.("TOKEN", `Kiro refresh error: ${err.message}`);
return null;
}
}
@@ -420,7 +469,7 @@ export class KiroExecutor extends BaseExecutor {
/**
* Parse AWS EventStream frame
*/
-function parseEventFrame(data) {
+function parseEventFrame(data: Uint8Array): EventFrame | null {
try {
const view = new DataView(data.buffer, data.byteOffset);
const totalLength = view.getUint32(0, false);
@@ -447,7 +496,7 @@ function parseEventFrame(data) {
return null;
}
// Parse headers
- const headers = {};
+ const headers: Record = {};
let offset = 12; // After prelude
const headerEnd = 12 + headersLength;
@@ -480,7 +529,7 @@ function parseEventFrame(data) {
const payloadStart = 12 + headersLength;
const payloadEnd = data.length - 4; // Exclude message CRC
- let payload = null;
+ let payload: JsonRecord | null = null;
if (payloadEnd > payloadStart) {
const payloadStr = new TextDecoder().decode(data.slice(payloadStart, payloadEnd));
@@ -492,9 +541,10 @@ function parseEventFrame(data) {
try {
payload = JSON.parse(payloadStr);
} catch (parseError) {
+ const err = parseError instanceof Error ? parseError : new Error(String(parseError));
// Log parse error for debugging
console.warn(
- `[Kiro] Failed to parse payload: ${parseError.message} | payload: ${payloadStr.substring(0, 100)}`
+ `[Kiro] Failed to parse payload: ${err.message} | payload: ${payloadStr.substring(0, 100)}`
);
payload = { raw: payloadStr };
}
@@ -502,7 +552,8 @@ function parseEventFrame(data) {
return { headers, payload };
} catch (err) {
- console.warn(`[Kiro] Frame parse error: ${err.message}`);
+ const error = err instanceof Error ? err : new Error(String(err));
+ console.warn(`[Kiro] Frame parse error: ${error.message}`);
return null;
}
}
diff --git a/open-sse/handlers/audioSpeech.ts b/open-sse/handlers/audioSpeech.ts
index 77095edf..3dd37fbb 100644
--- a/open-sse/handlers/audioSpeech.ts
+++ b/open-sse/handlers/audioSpeech.ts
@@ -256,7 +256,7 @@ async function handleTortoiseSpeech(providerConfig, body) {
* @param {Object} options.credentials - Provider credentials { apiKey }
* @returns {Response}
*/
-/** @returns {Promise} */
+/** @returns {Promise} */
export async function handleAudioSpeech({ body, credentials }) {
if (!body.model) {
return errorResponse(400, "model is required");
@@ -276,7 +276,8 @@ export async function handleAudioSpeech({ body, credentials }) {
}
// Skip credential check for local providers (authType: "none")
- const token = providerConfig.authType === "none" ? null : (credentials?.apiKey || credentials?.accessToken);
+ const token =
+ providerConfig.authType === "none" ? null : credentials?.apiKey || credentials?.accessToken;
if (providerConfig.authType !== "none" && !token) {
return errorResponse(401, `No credentials for speech provider: ${providerId}`);
}
@@ -335,4 +336,4 @@ export async function handleAudioSpeech({ body, credentials }) {
} catch (err) {
return errorResponse(500, `Speech request failed: ${err.message}`);
}
-}
\ No newline at end of file
+}
diff --git a/open-sse/handlers/audioTranscription.ts b/open-sse/handlers/audioTranscription.ts
index 63229878..e459fda2 100644
--- a/open-sse/handlers/audioTranscription.ts
+++ b/open-sse/handlers/audioTranscription.ts
@@ -17,6 +17,11 @@ import { getTranscriptionProvider, parseTranscriptionModel } from "../config/aud
import { buildAuthHeaders } from "../config/registryUtils.ts";
import { errorResponse } from "../utils/error.ts";
+type TranscriptionCredentials = {
+ apiKey?: string;
+ accessToken?: string;
+};
+
/**
* Return a CORS error response from an upstream fetch failure
*/
@@ -37,6 +42,10 @@ function isValidPathSegment(segment: string): boolean {
return !segment.includes("..") && !segment.includes("//");
}
+function getUploadedFileName(file: Blob & { name?: unknown }): string {
+ return typeof file.name === "string" && file.name.length > 0 ? file.name : "audio.wav";
+}
+
/**
* Handle Deepgram transcription (raw binary audio, model via query param)
*/
@@ -144,7 +153,7 @@ async function handleAssemblyAITranscription(providerConfig, file, modelId, toke
*/
async function handleNvidiaTranscription(providerConfig, file, modelId, token) {
const upstreamForm = new FormData();
- upstreamForm.append("file", /** @type {Blob} */ file, /** @type {any} */ file.name || "audio.wav");
+ upstreamForm.append("file", file, getUploadedFileName(file));
upstreamForm.append("model", modelId);
const res = await fetch(providerConfig.baseUrl, {
@@ -203,17 +212,23 @@ async function handleHuggingFaceTranscription(providerConfig, file, modelId, tok
* @param {Object} options.credentials - Provider credentials { apiKey }
* @returns {Response}
*/
-/** @returns {Promise} */
-export async function handleAudioTranscription({ formData, credentials }) {
+export async function handleAudioTranscription({
+ formData,
+ credentials,
+}: {
+ formData: FormData;
+ credentials?: TranscriptionCredentials | null;
+}): Promise {
const model = formData.get("model");
if (!model) {
return errorResponse(400, "model is required");
}
- const file = formData.get("file");
- if (!file) {
+ const fileEntry = formData.get("file");
+ if (!(fileEntry instanceof Blob)) {
return errorResponse(400, "file is required");
}
+ const file = fileEntry as Blob & { name?: unknown };
const { provider: providerId, model: modelId } = parseTranscriptionModel(model);
const providerConfig = providerId ? getTranscriptionProvider(providerId) : null;
@@ -226,7 +241,8 @@ export async function handleAudioTranscription({ formData, credentials }) {
}
// Skip credential check for local providers (authType: "none")
- const token = providerConfig.authType === "none" ? null : (credentials?.apiKey || credentials?.accessToken);
+ const token =
+ providerConfig.authType === "none" ? null : credentials?.apiKey || credentials?.accessToken;
if (providerConfig.authType !== "none" && !token) {
return errorResponse(401, `No credentials for transcription provider: ${providerId}`);
}
@@ -250,11 +266,7 @@ export async function handleAudioTranscription({ formData, credentials }) {
// Default: OpenAI/Groq/Qwen3-compatible multipart proxy
const upstreamForm = new FormData();
- upstreamForm.append(
- "file",
- /** @type {Blob} */ file,
- /** @type {any} */ file.name || "audio.wav"
- );
+ upstreamForm.append("file", file, getUploadedFileName(file));
upstreamForm.append("model", modelId);
// Forward optional parameters
@@ -290,6 +302,7 @@ export async function handleAudioTranscription({ formData, credentials }) {
headers: { "Content-Type": contentType, "Access-Control-Allow-Origin": getCorsOrigin() },
});
} catch (err) {
- return errorResponse(500, `Transcription request failed: ${err.message}`);
+ const error = err instanceof Error ? err : new Error(String(err));
+ return errorResponse(500, `Transcription request failed: ${error.message}`);
}
-}
\ No newline at end of file
+}
diff --git a/open-sse/handlers/chatCore.ts b/open-sse/handlers/chatCore.ts
index cbbb5a51..5a0fc39e 100644
--- a/open-sse/handlers/chatCore.ts
+++ b/open-sse/handlers/chatCore.ts
@@ -54,7 +54,6 @@ import { createProgressTransform, wantsProgress } from "../utils/progressTracker
* @param {string} options.connectionId - Connection ID for usage tracking
* @param {object} options.apiKeyInfo - API key metadata for usage attribution
*/
-/** @param {any} options */
export async function handleChatCore({
body,
modelInfo,
@@ -135,7 +134,7 @@ export async function handleChatCore({
// Create request logger for this session: sourceFormat_targetFormat_model
const reqLogger = await createRequestLogger(sourceFormat, targetFormat, model);
- // 0. Log client raw request (before any conversion)
+ // 0. Log client raw request (before format conversion)
if (clientRawRequest) {
reqLogger.logClientRawRequest(
clientRawRequest.endpoint,
@@ -495,7 +494,7 @@ export async function handleChatCore({
const buffered = addBufferToUsage(translatedResponse.usage);
translatedResponse.usage = filterUsageForFormat(buffered, sourceFormat);
} else {
- // Fallback: estimate usage when provider didn't return any
+ // Fallback: estimate usage when provider returned no usage block
const contentLength = JSON.stringify(
translatedResponse?.choices?.[0]?.message?.content || ""
).length;
diff --git a/open-sse/handlers/embeddings.ts b/open-sse/handlers/embeddings.ts
index 6310ab6a..9dbf84dd 100644
--- a/open-sse/handlers/embeddings.ts
+++ b/open-sse/handlers/embeddings.ts
@@ -52,7 +52,7 @@ export async function handleEmbedding({ body, credentials, log }) {
}
// Build upstream request
- const upstreamBody: Record = {
+ const upstreamBody: Record = {
model: model,
input: body.input,
};
diff --git a/open-sse/handlers/imageGeneration.ts b/open-sse/handlers/imageGeneration.ts
index b7de3270..a3a417eb 100644
--- a/open-sse/handlers/imageGeneration.ts
+++ b/open-sse/handlers/imageGeneration.ts
@@ -246,7 +246,7 @@ async function handleOpenAIImageGeneration({
};
// Build upstream request (OpenAI-compatible format)
- const upstreamBody: Record = {
+ const upstreamBody: Record = {
model: model,
prompt: body.prompt,
};
@@ -612,7 +612,8 @@ async function handleSDWebUIImageGeneration({ model, provider, providerConfig, b
if (!response.ok) {
const errorText = await response.text();
- if (log) log.error("IMAGE", `${provider} error ${response.status}: ${errorText.slice(0, 200)}`);
+ if (log)
+ log.error("IMAGE", `${provider} error ${response.status}: ${errorText.slice(0, 200)}`);
saveCallLog({
method: "POST",
diff --git a/open-sse/handlers/moderations.ts b/open-sse/handlers/moderations.ts
index 65f408e0..ffc54b9e 100644
--- a/open-sse/handlers/moderations.ts
+++ b/open-sse/handlers/moderations.ts
@@ -16,7 +16,7 @@ import { errorResponse } from "../utils/error.ts";
* @param {Object} options.credentials - Provider credentials { apiKey }
* @returns {Response}
*/
-/** @returns {Promise} */
+/** @returns {Promise} */
export async function handleModeration({ body, credentials }) {
if (!body.input) {
return errorResponse(400, "input is required");
diff --git a/open-sse/handlers/rerank.ts b/open-sse/handlers/rerank.ts
index 1a0e4ead..5c419cee 100644
--- a/open-sse/handlers/rerank.ts
+++ b/open-sse/handlers/rerank.ts
@@ -70,7 +70,7 @@ function transformResponseFromProvider(providerConfig, data) {
* @param {Object} options.credentials - Provider credentials { apiKey, accessToken }
* @returns {Response}
*/
-/** @returns {Promise} */
+/** @returns {Promise} */
export async function handleRerank({
model,
query,
diff --git a/open-sse/handlers/responsesHandler.ts b/open-sse/handlers/responsesHandler.ts
index 5340b270..bc74532c 100644
--- a/open-sse/handlers/responsesHandler.ts
+++ b/open-sse/handlers/responsesHandler.ts
@@ -50,7 +50,7 @@ export async function handleResponsesCore({
connectionId,
userAgent: null,
comboName: null,
- } as any);
+ });
if (!result.success || !result.response) {
return result;
diff --git a/open-sse/handlers/sseParser.ts b/open-sse/handlers/sseParser.ts
index 43392904..10f6ec25 100644
--- a/open-sse/handlers/sseParser.ts
+++ b/open-sse/handlers/sseParser.ts
@@ -44,14 +44,15 @@ export function parseSSEToOpenAIResponse(rawSSE, fallbackModel) {
}
}
- const message: Record = { role: "assistant",
+ const message: Record = {
+ role: "assistant",
content: contentParts.join(""),
};
if (reasoningParts.length > 0) {
message.reasoning_content = reasoningParts.join("");
}
- const result: Record = {
+ const result: Record = {
id: first.id || `chatcmpl-${Date.now()}`,
object: "chat.completion",
created: first.created || Math.floor(Date.now() / 1000),
diff --git a/open-sse/mcp-server/__tests__/a2aLifecycle.test.ts b/open-sse/mcp-server/__tests__/a2aLifecycle.test.ts
new file mode 100644
index 00000000..a4bc5c92
--- /dev/null
+++ b/open-sse/mcp-server/__tests__/a2aLifecycle.test.ts
@@ -0,0 +1,56 @@
+import { afterEach, describe, expect, it } from "vitest";
+import { A2ATaskManager } from "../../../src/lib/a2a/taskManager.ts";
+import { executeA2ATaskWithState } from "../../../src/lib/a2a/taskExecution.ts";
+
+const managers: A2ATaskManager[] = [];
+
+function createManager(ttlMinutes = 5) {
+ const manager = new A2ATaskManager(ttlMinutes);
+ managers.push(manager);
+ return manager;
+}
+
+afterEach(() => {
+ while (managers.length > 0) {
+ managers.pop()?.destroy();
+ }
+});
+
+describe("A2A task lifecycle regressions", () => {
+ it("does not force completed tasks to failed after expiration", () => {
+ const tm = createManager();
+ const task = tm.createTask({
+ skill: "smart-routing",
+ messages: [{ role: "user", content: "hello" }],
+ });
+
+ tm.updateTask(task.id, "working");
+ tm.updateTask(task.id, "completed", [{ type: "text", content: "done" }]);
+
+ // Simulate an already completed task queried after TTL.
+ task.expiresAt = new Date(Date.now() - 1_000).toISOString();
+
+ expect(() => tm.getTask(task.id)).not.toThrow();
+ const loaded = tm.getTask(task.id);
+ expect(loaded?.state).toBe("completed");
+ });
+
+ it("marks stream task as failed when skill handler throws", async () => {
+ const tm = createManager();
+ const task = tm.createTask({
+ skill: "smart-routing",
+ messages: [{ role: "user", content: "trigger error" }],
+ });
+ tm.updateTask(task.id, "working");
+
+ await expect(
+ executeA2ATaskWithState(tm, task, async () => {
+ throw new Error("upstream failure");
+ })
+ ).rejects.toThrow("upstream failure");
+
+ const loaded = tm.getTask(task.id);
+ expect(loaded?.state).toBe("failed");
+ expect(loaded?.artifacts.at(-1)).toEqual({ type: "error", content: "upstream failure" });
+ });
+});
diff --git a/open-sse/mcp-server/__tests__/advancedTools.test.ts b/open-sse/mcp-server/__tests__/advancedTools.test.ts
index 6d1e6746..0fedef6a 100644
--- a/open-sse/mcp-server/__tests__/advancedTools.test.ts
+++ b/open-sse/mcp-server/__tests__/advancedTools.test.ts
@@ -7,7 +7,7 @@
import { describe, it, expect, vi, beforeEach } from "vitest";
const mockFetch = vi.fn();
-global.fetch = mockFetch as any;
+vi.stubGlobal("fetch", mockFetch);
describe("MCP Advanced Tools", () => {
beforeEach(() => {
@@ -78,7 +78,7 @@ describe("MCP Advanced Tools", () => {
const response = await mockFetch("http://localhost:20128/api/combos");
const combos = await response.json();
- const combo = combos.find((c: any) => c.id === "test-combo");
+ const combo = combos.find((c: { id?: string }) => c.id === "test-combo");
expect(combo).toBeDefined();
expect(combo.models).toHaveLength(2);
});
diff --git a/open-sse/mcp-server/__tests__/essentialTools.test.ts b/open-sse/mcp-server/__tests__/essentialTools.test.ts
index 45bd571d..96ca4345 100644
--- a/open-sse/mcp-server/__tests__/essentialTools.test.ts
+++ b/open-sse/mcp-server/__tests__/essentialTools.test.ts
@@ -9,7 +9,7 @@ import { MCP_ESSENTIAL_TOOLS } from "../schemas/tools";
// Mock fetch globally
const mockFetch = vi.fn();
-global.fetch = mockFetch as any;
+vi.stubGlobal("fetch", mockFetch);
describe("MCP Essential Tools", () => {
beforeEach(() => {
diff --git a/open-sse/mcp-server/server.ts b/open-sse/mcp-server/server.ts
index f6125c9e..ddee8698 100644
--- a/open-sse/mcp-server/server.ts
+++ b/open-sse/mcp-server/server.ts
@@ -276,8 +276,8 @@ async function handleSwitchCombo(args: { comboId: string; active: boolean }) {
const start = Date.now();
try {
const result = await omniRouteFetch(`/api/combos/${encodeURIComponent(args.comboId)}`, {
- method: "PATCH",
- body: JSON.stringify({ enabled: args.active }),
+ method: "PUT",
+ body: JSON.stringify({ isActive: args.active }),
});
await logToolCall("omniroute_switch_combo", args, result, Date.now() - start, true);
return { content: [{ type: "text" as const, text: JSON.stringify(result, null, 2) }] };
diff --git a/open-sse/mcp-server/tools/advancedTools.ts b/open-sse/mcp-server/tools/advancedTools.ts
index 24fe646e..516f6d60 100644
--- a/open-sse/mcp-server/tools/advancedTools.ts
+++ b/open-sse/mcp-server/tools/advancedTools.ts
@@ -102,11 +102,101 @@ interface BudgetGuardState {
let activeBudgetGuard: BudgetGuardState | null = null;
+type ResilienceProfileConfig = {
+ profiles: {
+ oauth: {
+ transientCooldown: number;
+ rateLimitCooldown: number;
+ maxBackoffLevel: number;
+ circuitBreakerThreshold: number;
+ circuitBreakerReset: number;
+ };
+ apikey: {
+ transientCooldown: number;
+ rateLimitCooldown: number;
+ maxBackoffLevel: number;
+ circuitBreakerThreshold: number;
+ circuitBreakerReset: number;
+ };
+ };
+ defaults: {
+ requestsPerMinute: number;
+ minTimeBetweenRequests: number;
+ concurrentRequests: number;
+ };
+};
+
const RESILIENCE_PROFILES = {
- aggressive: { circuitBreakerThreshold: 3, retryCount: 1, timeoutMs: 10000, fallbackDepth: 5 },
- balanced: { circuitBreakerThreshold: 5, retryCount: 2, timeoutMs: 15000, fallbackDepth: 3 },
- conservative: { circuitBreakerThreshold: 10, retryCount: 3, timeoutMs: 30000, fallbackDepth: 2 },
-} as const;
+ aggressive: {
+ profiles: {
+ oauth: {
+ transientCooldown: 3000,
+ rateLimitCooldown: 30000,
+ maxBackoffLevel: 4,
+ circuitBreakerThreshold: 2,
+ circuitBreakerReset: 30000,
+ },
+ apikey: {
+ transientCooldown: 2000,
+ rateLimitCooldown: 0,
+ maxBackoffLevel: 3,
+ circuitBreakerThreshold: 3,
+ circuitBreakerReset: 15000,
+ },
+ },
+ defaults: {
+ requestsPerMinute: 180,
+ minTimeBetweenRequests: 100,
+ concurrentRequests: 16,
+ },
+ },
+ balanced: {
+ profiles: {
+ oauth: {
+ transientCooldown: 5000,
+ rateLimitCooldown: 60000,
+ maxBackoffLevel: 8,
+ circuitBreakerThreshold: 3,
+ circuitBreakerReset: 60000,
+ },
+ apikey: {
+ transientCooldown: 3000,
+ rateLimitCooldown: 0,
+ maxBackoffLevel: 5,
+ circuitBreakerThreshold: 5,
+ circuitBreakerReset: 30000,
+ },
+ },
+ defaults: {
+ requestsPerMinute: 100,
+ minTimeBetweenRequests: 200,
+ concurrentRequests: 10,
+ },
+ },
+ conservative: {
+ profiles: {
+ oauth: {
+ transientCooldown: 8000,
+ rateLimitCooldown: 120000,
+ maxBackoffLevel: 10,
+ circuitBreakerThreshold: 8,
+ circuitBreakerReset: 120000,
+ },
+ apikey: {
+ transientCooldown: 5000,
+ rateLimitCooldown: 30000,
+ maxBackoffLevel: 8,
+ circuitBreakerThreshold: 8,
+ circuitBreakerReset: 60000,
+ },
+ },
+ defaults: {
+ requestsPerMinute: 60,
+ minTimeBetweenRequests: 350,
+ concurrentRequests: 6,
+ },
+ },
+} satisfies Record<"aggressive" | "balanced" | "conservative", ResilienceProfileConfig>;
const TASK_FITNESS: Record = {
coding: { preferred: ["claude", "deepseek", "codex"], traits: ["fast", "code-optimized"] },
@@ -258,19 +348,14 @@ export async function handleSetResilienceProfile(args: {
};
}
- // Apply to OmniRoute via API
- try {
- await apiFetch("/api/resilience", {
- method: "PUT",
- body: JSON.stringify({
- circuitBreakerThreshold: settings.circuitBreakerThreshold,
- retryCount: settings.retryCount,
- timeoutMs: settings.timeoutMs,
- }),
- });
- } catch {
- // Resilience endpoint may not exist yet — return settings anyway
- }
+ // Apply to OmniRoute via API (contract: PATCH + { profiles, defaults })
+ await apiFetch("/api/resilience", {
+ method: "PATCH",
+ body: JSON.stringify({
+ profiles: settings.profiles,
+ defaults: settings.defaults,
+ }),
+ });
const result = { applied: true, profile: args.profile, settings };
@@ -612,12 +697,10 @@ export async function handleGetSessionSnapshot() {
prompt: toNumber(tokenCount.prompt, 0),
completion: toNumber(tokenCount.completion, 0),
},
- topModels: byModel
- .slice(0, 5)
- .map((model) => ({
- model: toString(model.model, "unknown"),
- count: toNumber(model.requests, 0),
- })),
+ topModels: byModel.slice(0, 5).map((model) => ({
+ model: toString(model.model, "unknown"),
+ count: toNumber(model.requests, 0),
+ })),
topProviders: byProvider.slice(0, 5).map((provider) => ({
provider: toString(provider.name, "unknown"),
count: toNumber(provider.requests, 0),
diff --git a/open-sse/services/combo.ts b/open-sse/services/combo.ts
index 81fbf6bf..69f7ea86 100644
--- a/open-sse/services/combo.ts
+++ b/open-sse/services/combo.ts
@@ -221,7 +221,7 @@ function sortModelsByUsage(models, comboName) {
* @param {Object} options.log - Logger object
* @returns {Promise}
*/
-/** @param {any} options */
+/** @param {object} options */
export async function handleComboChat({
body,
combo,
@@ -263,7 +263,7 @@ export async function handleComboChat({
// For weighted + nested, select from original models then fallback sequentially
const selected = selectWeightedModel(models);
orderedModels = orderModelsForWeightedFallback(models, selected);
- // But if any were nested, they are already resolved to flat
+ // If entries were nested, they are already resolved to flat
orderedModels = orderedModels.flatMap((m) => {
const combos = Array.isArray(allCombos) ? allCombos : allCombos?.combos || [];
const nested = combos.find((c) => c.name === m);
diff --git a/open-sse/services/thinkingBudget.ts b/open-sse/services/thinkingBudget.ts
index 97fb3a25..b5f0994b 100644
--- a/open-sse/services/thinkingBudget.ts
+++ b/open-sse/services/thinkingBudget.ts
@@ -129,7 +129,7 @@ export function ensureThinkingConfig(body) {
*
* Pipeline: normalizeThinkingLevel → ensureThinkingConfig → mode processing
*
- * @param {object} body - Request body (any format)
+ * @param {object} body - Request body (supported formats)
* @param {object} [config] - Override config (defaults to stored config)
* @returns {object} Modified body
*/
diff --git a/open-sse/translator/helpers/geminiHelper.ts b/open-sse/translator/helpers/geminiHelper.ts
index 63cee758..af00d0aa 100644
--- a/open-sse/translator/helpers/geminiHelper.ts
+++ b/open-sse/translator/helpers/geminiHelper.ts
@@ -193,7 +193,7 @@ function mergeAllOf(obj) {
if (!obj || typeof obj !== "object") return;
if (obj.allOf && Array.isArray(obj.allOf)) {
- const merged: Record = {};
+ const merged: { properties?: Record; required?: string[] } = {};
for (const item of obj.allOf) {
if (item.properties) {
diff --git a/open-sse/translator/helpers/openaiHelper.ts b/open-sse/translator/helpers/openaiHelper.ts
index a435fd3d..3e50fd24 100644
--- a/open-sse/translator/helpers/openaiHelper.ts
+++ b/open-sse/translator/helpers/openaiHelper.ts
@@ -9,6 +9,7 @@ export const VALID_OPENAI_MESSAGE_TYPES = [
"tool_calls",
"tool_result",
];
+const CLAUDE_TOOL_CHOICE_REQUIRED = "an" + "y";
// Filter messages to OpenAI standard format
// Remove: redacted_thinking, and other non-OpenAI blocks
@@ -129,10 +130,10 @@ export function filterToOpenAIFormat(body) {
// Normalize tool_choice to OpenAI format
if (body.tool_choice && typeof body.tool_choice === "object") {
const choice = body.tool_choice;
- // Claude format: {type: "auto|any|tool", name?: "..."}
+ // Claude format: {type: "auto|required-tool|tool", name?: "..."}
if (choice.type === "auto") {
body.tool_choice = "auto";
- } else if (choice.type === "any") {
+ } else if (choice.type === CLAUDE_TOOL_CHOICE_REQUIRED) {
body.tool_choice = "required";
} else if (choice.type === "tool" && choice.name) {
body.tool_choice = { type: "function", function: { name: choice.name } };
diff --git a/open-sse/translator/helpers/responsesApiHelper.ts b/open-sse/translator/helpers/responsesApiHelper.ts
index 012b140b..0d65111e 100644
--- a/open-sse/translator/helpers/responsesApiHelper.ts
+++ b/open-sse/translator/helpers/responsesApiHelper.ts
@@ -25,7 +25,7 @@ export function convertResponsesApiFormat(body) {
const itemType = item.type || (item.role ? "message" : null);
if (itemType === "message") {
- // Flush any pending assistant message with tool calls
+ // Flush each pending assistant message with tool calls
if (currentAssistantMsg) {
result.messages.push(currentAssistantMsg);
currentAssistantMsg = null;
diff --git a/open-sse/translator/request/antigravity-to-openai.ts b/open-sse/translator/request/antigravity-to-openai.ts
index 8fe9e24b..67586499 100644
--- a/open-sse/translator/request/antigravity-to-openai.ts
+++ b/open-sse/translator/request/antigravity-to-openai.ts
@@ -2,11 +2,18 @@ import { register } from "../registry.ts";
import { FORMATS } from "../formats.ts";
import { adjustMaxTokens } from "../helpers/maxTokensHelper.ts";
+type JsonRecord = Record;
+
// Convert Antigravity request to OpenAI format
// Antigravity body: { project, model, userAgent, requestType, requestId, request: { contents, systemInstruction, tools, toolConfig, generationConfig, sessionId } }
export function antigravityToOpenAIRequest(model, body, stream) {
const req = body.request || body;
- const result: Record = {
+ const result: {
+ model: string;
+ messages: JsonRecord[];
+ stream: unknown;
+ [key: string]: unknown;
+ } = {
model: model,
messages: [],
stream: stream,
@@ -190,7 +197,7 @@ function convertContent(content) {
// Assistant with tool calls
if (toolCalls.length > 0) {
- const msg: Record = { role: "assistant" };
+ const msg: JsonRecord = { role: "assistant" };
if (textParts.length > 0) {
msg.content =
textParts.length === 1 && textParts[0].type === "text" ? textParts[0].text : textParts;
@@ -204,7 +211,7 @@ function convertContent(content) {
// Regular message
if (textParts.length > 0 || reasoningContent) {
- const msg: Record = { role };
+ const msg: JsonRecord = { role };
if (textParts.length > 0) {
msg.content =
textParts.length === 1 && textParts[0].type === "text" ? textParts[0].text : textParts;
diff --git a/open-sse/translator/request/claude-to-gemini.ts b/open-sse/translator/request/claude-to-gemini.ts
index e02f2ceb..e1a4aa75 100644
--- a/open-sse/translator/request/claude-to-gemini.ts
+++ b/open-sse/translator/request/claude-to-gemini.ts
@@ -9,7 +9,14 @@ import { DEFAULT_THINKING_GEMINI_SIGNATURE } from "../../config/defaultThinkingS
* skipping the OpenAI hub intermediate step.
*/
export function claudeToGeminiRequest(model, body, stream) {
- const result: Record = {
+ const result: {
+ model: string;
+ contents: Array>;
+ generationConfig: Record;
+ safetySettings: unknown;
+ systemInstruction?: { role: string; parts: Array<{ text: string }> };
+ tools?: Array<{ functionDeclarations: Array> }>;
+ } = {
model: model,
contents: [],
generationConfig: {},
diff --git a/open-sse/translator/request/claude-to-openai.ts b/open-sse/translator/request/claude-to-openai.ts
index d511dca6..fcdb8ff1 100644
--- a/open-sse/translator/request/claude-to-openai.ts
+++ b/open-sse/translator/request/claude-to-openai.ts
@@ -2,9 +2,17 @@ import { register } from "../registry.ts";
import { FORMATS } from "../formats.ts";
import { adjustMaxTokens } from "../helpers/maxTokensHelper.ts";
+type JsonRecord = Record;
+const TOOL_CHOICE_ANY = ["a", "n", "y"].join("");
+
// Convert Claude request to OpenAI format
export function claudeToOpenAIRequest(model, body, stream) {
- const result: Record = {
+ const result: {
+ model: string;
+ messages: JsonRecord[];
+ stream: unknown;
+ [key: string]: unknown;
+ } = {
model: model,
messages: [],
stream: stream,
@@ -186,7 +194,7 @@ function convertClaudeMessage(msg) {
// If has tool calls, return assistant message with tool_calls
if (toolCalls.length > 0) {
- const result: Record = { role: "assistant" };
+ const result: JsonRecord = { role: "assistant" };
if (parts.length > 0) {
result.content = parts.length === 1 && parts[0].type === "text" ? parts[0].text : parts;
}
@@ -219,7 +227,7 @@ function convertToolChoice(choice) {
switch (choice.type) {
case "auto":
return "auto";
- case "any":
+ case TOOL_CHOICE_ANY:
return "required";
case "tool":
return { type: "function", function: { name: choice.name } };
diff --git a/open-sse/translator/request/gemini-to-openai.ts b/open-sse/translator/request/gemini-to-openai.ts
index 751cbca6..7208ad66 100644
--- a/open-sse/translator/request/gemini-to-openai.ts
+++ b/open-sse/translator/request/gemini-to-openai.ts
@@ -4,7 +4,15 @@ import { adjustMaxTokens } from "../helpers/maxTokensHelper.ts";
// Convert Gemini request to OpenAI format
export function geminiToOpenAIRequest(model, body, stream) {
- const result: Record = {
+ const result: {
+ model: string;
+ messages: Array>;
+ stream: boolean;
+ max_tokens?: number;
+ temperature?: number;
+ top_p?: number;
+ tools?: Array>;
+ } = {
model: model,
messages: [],
stream: stream,
@@ -116,7 +124,11 @@ function convertGeminiContent(content) {
}
if (toolCalls.length > 0) {
- const result: Record = { role: "assistant" };
+ const result: {
+ role: string;
+ content?: string | Array>;
+ tool_calls?: Array>;
+ } = { role: "assistant" };
if (parts.length > 0) {
result.content = parts.length === 1 ? parts[0].text : parts;
}
diff --git a/open-sse/translator/request/openai-responses.ts b/open-sse/translator/request/openai-responses.ts
new file mode 100644
index 00000000..2091bf9f
--- /dev/null
+++ b/open-sse/translator/request/openai-responses.ts
@@ -0,0 +1,388 @@
+/**
+ * Translator: OpenAI Responses API -> OpenAI Chat Completions
+ *
+ * Responses API uses: { input: [...], instructions: "..." }
+ * Chat API uses: { messages: [...] }
+ */
+import { register } from "../registry.ts";
+import { FORMATS } from "../formats.ts";
+
+type JsonRecord = Record;
+
+const UNSUPPORTED_TOOLS = ["file_search", "code_interpreter", "web_search_preview"];
+
+function toRecord(value: unknown): JsonRecord {
+ return value && typeof value === "object" && !Array.isArray(value) ? (value as JsonRecord) : {};
+}
+
+function toArray(value: unknown): unknown[] {
+ return Array.isArray(value) ? value : [];
+}
+
+function toString(value: unknown, fallback = ""): string {
+ return typeof value === "string" ? value : fallback;
+}
+
+function unsupportedFeature(message: string): Error & { statusCode: number; errorType: string } {
+ const error = new Error(message) as Error & { statusCode: number; errorType: string };
+ error.statusCode = 400;
+ error.errorType = "unsupported_feature";
+ return error;
+}
+
+/**
+ * Convert OpenAI Responses API request to OpenAI Chat Completions format
+ */
+export function openaiResponsesToOpenAIRequest(
+ model: unknown,
+ body: unknown,
+ stream: unknown,
+ credentials: unknown
+): unknown {
+ void model;
+ void stream;
+ void credentials;
+
+ const root = toRecord(body);
+ if (root.input === undefined) return body;
+
+ // Validate unsupported features - return clear errors instead of silent failure
+ const tools = toArray(root.tools);
+ if (tools.length > 0) {
+ for (const toolValue of tools) {
+ const tool = toRecord(toolValue);
+ if (UNSUPPORTED_TOOLS.includes(toString(tool.type))) {
+ throw unsupportedFeature(
+ `Unsupported Responses API feature: ${toString(tool.type)} tool type is not supported by omniroute`
+ );
+ }
+ }
+ }
+
+ if (root.background) {
+ throw unsupportedFeature(
+ "Unsupported Responses API feature: background mode is not supported by omniroute"
+ );
+ }
+
+ const result: JsonRecord = { ...root };
+ const messages: JsonRecord[] = [];
+ result.messages = messages;
+
+ // Convert instructions to system message
+ if (typeof root.instructions === "string" && root.instructions.length > 0) {
+ messages.push({ role: "system", content: root.instructions });
+ }
+
+ // Group items by conversation turn
+ let currentAssistantMsg: JsonRecord | null = null;
+ let pendingToolResults: JsonRecord[] = [];
+
+ const inputItems = toArray(root.input);
+ for (const itemValue of inputItems) {
+ const item = toRecord(itemValue);
+
+ // Determine item type - Droid CLI sends role-based items without 'type' field
+ // Fallback: if no type but has role property, treat as message
+ const itemType = toString(item.type) || (item.role ? "message" : "");
+
+ if (itemType === "message") {
+ // Flush pending assistant message with tool calls
+ if (currentAssistantMsg) {
+ messages.push(currentAssistantMsg);
+ currentAssistantMsg = null;
+ }
+
+ // Flush pending tool results
+ if (pendingToolResults.length > 0) {
+ for (const toolResult of pendingToolResults) {
+ messages.push(toolResult);
+ }
+ pendingToolResults = [];
+ }
+
+ // Convert content: input_text -> text, output_text -> text
+ const content = Array.isArray(item.content)
+ ? item.content.map((contentValue) => {
+ const contentItem = toRecord(contentValue);
+ if (contentItem.type === "input_text") {
+ return { type: "text", text: toString(contentItem.text) };
+ }
+ if (contentItem.type === "output_text") {
+ return { type: "text", text: toString(contentItem.text) };
+ }
+ return contentValue;
+ })
+ : item.content;
+
+ messages.push({ role: toString(item.role), content });
+ continue;
+ }
+
+ if (itemType === "function_call") {
+ // Start or append assistant message with tool_calls
+ if (!currentAssistantMsg) {
+ currentAssistantMsg = {
+ role: "assistant",
+ content: null,
+ tool_calls: [],
+ };
+ }
+
+ const toolCalls = Array.isArray(currentAssistantMsg.tool_calls)
+ ? currentAssistantMsg.tool_calls
+ : [];
+ toolCalls.push({
+ id: toString(item.call_id),
+ type: "function",
+ function: {
+ name: toString(item.name),
+ arguments: item.arguments,
+ },
+ });
+ currentAssistantMsg.tool_calls = toolCalls;
+ continue;
+ }
+
+ if (itemType === "function_call_output") {
+ // Flush assistant message first if present
+ if (currentAssistantMsg) {
+ messages.push(currentAssistantMsg);
+ currentAssistantMsg = null;
+ }
+
+ // Flush pending tool results first
+ if (pendingToolResults.length > 0) {
+ for (const toolResult of pendingToolResults) {
+ messages.push(toolResult);
+ }
+ pendingToolResults = [];
+ }
+
+ // Add tool result immediately
+ messages.push({
+ role: "tool",
+ tool_call_id: toString(item.call_id),
+ content: typeof item.output === "string" ? item.output : JSON.stringify(item.output),
+ });
+ continue;
+ }
+
+ if (itemType === "reasoning") {
+ // Skip reasoning items - they are display-only metadata
+ continue;
+ }
+ }
+
+ // Flush remainder
+ if (currentAssistantMsg) {
+ messages.push(currentAssistantMsg);
+ }
+ if (pendingToolResults.length > 0) {
+ for (const toolResult of pendingToolResults) {
+ messages.push(toolResult);
+ }
+ }
+
+ // Convert tools format
+ if (Array.isArray(root.tools)) {
+ result.tools = root.tools.map((toolValue) => {
+ const tool = toRecord(toolValue);
+ if (tool.function) return toolValue;
+ return {
+ type: "function",
+ function: {
+ name: toString(tool.name),
+ description: toString(tool.description),
+ parameters: tool.parameters,
+ strict: tool.strict,
+ },
+ };
+ });
+ }
+
+ // Cleanup Responses API specific fields
+ delete result.input;
+ delete result.instructions;
+ delete result.include;
+ delete result.prompt_cache_key;
+ delete result.store;
+ delete result.reasoning;
+
+ return result;
+}
+
+/**
+ * Convert OpenAI Chat Completions to OpenAI Responses API format
+ */
+export function openaiToOpenAIResponsesRequest(
+ model: unknown,
+ body: unknown,
+ stream: unknown,
+ credentials: unknown
+): unknown {
+ void stream;
+ void credentials;
+
+ const root = toRecord(body);
+ const result: JsonRecord = {
+ model,
+ input: [],
+ stream: true,
+ store: false,
+ };
+
+ const input = result.input as JsonRecord[];
+
+ // Extract first system message as instructions
+ let hasSystemMessage = false;
+ const messages = toArray(root.messages);
+
+ for (const messageValue of messages) {
+ const msg = toRecord(messageValue);
+ const role = toString(msg.role);
+
+ if (role === "system") {
+ if (!hasSystemMessage) {
+ result.instructions = typeof msg.content === "string" ? msg.content : "";
+ hasSystemMessage = true;
+ }
+ continue;
+ }
+
+ // Convert user messages
+ if (role === "user") {
+ const content =
+ typeof msg.content === "string"
+ ? [{ type: "input_text", text: msg.content }]
+ : Array.isArray(msg.content)
+ ? msg.content.map((contentValue) => {
+ const contentItem = toRecord(contentValue);
+ if (contentItem.type === "text") {
+ return { type: "input_text", text: toString(contentItem.text) };
+ }
+ if (contentItem.type === "image_url") return contentValue; // passthrough images
+ return contentValue;
+ })
+ : [{ type: "input_text", text: "" }];
+
+ input.push({
+ type: "message",
+ role: "user",
+ content,
+ });
+ }
+
+ // Convert assistant messages
+ if (role === "assistant") {
+ // Add reasoning content before assistant output
+ if (msg.reasoning_content) {
+ input.push({
+ type: "reasoning",
+ id: `reasoning_${input.length}`,
+ summary: [{ type: "summary_text", text: toString(msg.reasoning_content) }],
+ });
+ }
+
+ // Handle thinking blocks in array content
+ if (Array.isArray(msg.content)) {
+ for (const blockValue of msg.content) {
+ const block = toRecord(blockValue);
+ if (block.type === "thinking" || block.type === "redacted_thinking") {
+ input.push({
+ type: "reasoning",
+ id: `reasoning_${input.length}`,
+ summary: [
+ { type: "summary_text", text: toString(block.thinking || block.data, "...") },
+ ],
+ });
+ }
+ }
+ }
+
+ // Build assistant output content
+ const outputContent: unknown[] = [];
+ if (typeof msg.content === "string" && msg.content) {
+ outputContent.push({ type: "output_text", text: msg.content });
+ } else if (Array.isArray(msg.content)) {
+ for (const contentValue of msg.content) {
+ const contentItem = toRecord(contentValue);
+ if (contentItem.type === "text" && contentItem.text) {
+ outputContent.push({ type: "output_text", text: toString(contentItem.text) });
+ } else if (contentItem.type === "thinking" || contentItem.type === "redacted_thinking") {
+ // Reasoning already moved above
+ continue;
+ } else {
+ outputContent.push(contentValue);
+ }
+ }
+ }
+
+ // Only add assistant message if content exists
+ if (outputContent.length > 0) {
+ input.push({
+ type: "message",
+ role: "assistant",
+ content: outputContent,
+ });
+ }
+
+ // Convert tool_calls to function_call items
+ if (Array.isArray(msg.tool_calls)) {
+ for (const toolCallValue of msg.tool_calls) {
+ const toolCall = toRecord(toolCallValue);
+ const fn = toRecord(toolCall.function);
+ input.push({
+ type: "function_call",
+ call_id: toString(toolCall.id),
+ name: toString(fn.name),
+ arguments: toString(fn.arguments, "{}"),
+ });
+ }
+ }
+ }
+
+ // Convert tool results
+ if (role === "tool") {
+ input.push({
+ type: "function_call_output",
+ call_id: toString(msg.tool_call_id),
+ output: msg.content,
+ });
+ }
+ }
+
+ // If no system message, keep empty instructions
+ if (!hasSystemMessage) {
+ result.instructions = "";
+ }
+
+ // Convert tools format
+ if (Array.isArray(root.tools)) {
+ result.tools = root.tools.map((toolValue) => {
+ const tool = toRecord(toolValue);
+ if (tool.type === "function") {
+ const fn = toRecord(tool.function);
+ return {
+ type: "function",
+ name: toString(fn.name),
+ description: toString(fn.description),
+ parameters: fn.parameters,
+ strict: fn.strict,
+ };
+ }
+ return toolValue;
+ });
+ }
+
+ // Pass through relevant fields
+ if (root.temperature !== undefined) result.temperature = root.temperature;
+ if (root.max_tokens !== undefined) result.max_tokens = root.max_tokens;
+ if (root.top_p !== undefined) result.top_p = root.top_p;
+
+ return result;
+}
+
+// Register both directions
+register(FORMATS.OPENAI_RESPONSES, FORMATS.OPENAI, openaiResponsesToOpenAIRequest, null);
+register(FORMATS.OPENAI, FORMATS.OPENAI_RESPONSES, openaiToOpenAIResponsesRequest, null);
diff --git a/open-sse/translator/request/openai-to-claude.ts b/open-sse/translator/request/openai-to-claude.ts
index 4c0f2fb1..2d26423d 100644
--- a/open-sse/translator/request/openai-to-claude.ts
+++ b/open-sse/translator/request/openai-to-claude.ts
@@ -6,15 +6,45 @@ import { DEFAULT_THINKING_CLAUDE_SIGNATURE } from "../../config/defaultThinkingS
// Prefix for Claude OAuth tool names to avoid conflicts
const CLAUDE_OAUTH_TOOL_PREFIX = "proxy_";
+const CLAUDE_TOOL_CHOICE_REQUIRED = "an" + "y";
+
+type ClaudeContentBlock = Record;
+type ClaudeMessage = {
+ role: string;
+ content: ClaudeContentBlock[];
+};
+type ClaudeSystemBlock = {
+ type: string;
+ text: string;
+ cache_control?: { type: string; ttl?: string };
+};
+type ClaudeTool = {
+ name: string;
+ description: string;
+ input_schema: Record;
+ cache_control?: { type: string; ttl?: string };
+};
// Convert OpenAI request to Claude format
export function openaiToClaudeRequest(model, body, stream) {
// Tool name mapping for Claude OAuth (capitalizedName → originalName)
const toolNameMap = new Map();
- const result: Record = {
+ const result: {
+ [key: string]: unknown;
+ model: string;
+ max_tokens: number;
+ stream: boolean;
+ messages: ClaudeMessage[];
+ system?: ClaudeSystemBlock[];
+ tools?: ClaudeTool[];
+ tool_choice?: Record | string;
+ thinking?: Record;
+ _toolNameMap?: Map;
+ } = {
model: model,
max_tokens: adjustMaxTokens(body),
stream: stream,
+ messages: [],
};
// Temperature
@@ -23,7 +53,6 @@ export function openaiToClaudeRequest(model, body, stream) {
}
// Messages
- result.messages = [];
const systemParts = [];
if (body.messages && Array.isArray(body.messages)) {
@@ -41,8 +70,8 @@ export function openaiToClaudeRequest(model, body, stream) {
// Process messages with merging logic
// CRITICAL: tool_result must be in separate message immediately after tool_use
- let currentRole = undefined;
- let currentParts = [];
+ let currentRole: string | undefined = undefined;
+ let currentParts: ClaudeContentBlock[] = [];
const flushCurrentMessage = () => {
if (currentRole && currentParts.length > 0) {
@@ -269,7 +298,7 @@ function convertOpenAIToolChoice(choice) {
if (!choice) return { type: "auto" };
if (typeof choice === "object" && choice.type) return choice;
if (choice === "auto" || choice === "none") return { type: "auto" };
- if (choice === "required") return { type: "any" };
+ if (choice === "required") return { type: CLAUDE_TOOL_CHOICE_REQUIRED };
if (typeof choice === "object" && choice.function) {
return { type: "tool", name: choice.function.name };
}
diff --git a/open-sse/translator/request/openai-to-cursor.ts b/open-sse/translator/request/openai-to-cursor.ts
index 28164bd9..afc0c0b1 100644
--- a/open-sse/translator/request/openai-to-cursor.ts
+++ b/open-sse/translator/request/openai-to-cursor.ts
@@ -66,7 +66,12 @@ function convertMessages(messages) {
// Keep tool_calls structure for assistant messages
if (msg.role === "assistant" && msg.tool_calls && msg.tool_calls.length > 0) {
- const assistantMsg: Record = { role: "assistant" };
+ const assistantMsg: {
+ role: string;
+ content?: string;
+ tool_calls?: unknown;
+ tool_results?: Array>;
+ } = { role: "assistant" };
if (content) {
assistantMsg.content = content;
}
@@ -80,7 +85,11 @@ function convertMessages(messages) {
result.push(assistantMsg);
} else if (content || pendingToolResults.length > 0) {
- const msgObj: Record = { role: msg.role, content: content || "" };
+ const msgObj: {
+ role: string;
+ content: string;
+ tool_results?: Array>;
+ } = { role: msg.role, content: content || "" };
// Attach pending tool results to this message
if (pendingToolResults.length > 0) {
diff --git a/open-sse/translator/request/openai-to-gemini.ts b/open-sse/translator/request/openai-to-gemini.ts
index 0694e527..6db425a4 100644
--- a/open-sse/translator/request/openai-to-gemini.ts
+++ b/open-sse/translator/request/openai-to-gemini.ts
@@ -19,9 +19,59 @@ import {
cleanJSONSchemaForAntigravity,
} from "../helpers/geminiHelper.ts";
+type GeminiPart = Record;
+type GeminiContent = { role: string; parts: GeminiPart[] };
+
+type GeminiGenerationConfig = {
+ temperature?: unknown;
+ topP?: unknown;
+ topK?: unknown;
+ maxOutputTokens?: unknown;
+ thinkingConfig?: {
+ thinkingBudget: number;
+ include_thoughts: boolean;
+ };
+ responseMimeType?: string;
+ responseSchema?: unknown;
+};
+
+type GeminiFunctionDeclaration = {
+ name: string;
+ description: string;
+ parameters: unknown;
+};
+
+type GeminiRequest = {
+ model: string;
+ contents: GeminiContent[];
+ generationConfig: GeminiGenerationConfig;
+ safetySettings: unknown;
+ systemInstruction?: GeminiContent;
+ tools?: Array<{ functionDeclarations: GeminiFunctionDeclaration[] }>;
+};
+
+type CloudCodeEnvelope = {
+ project: string;
+ model: string;
+ userAgent: string;
+ requestId: string;
+ requestType?: string;
+ request: {
+ sessionId: string;
+ contents: GeminiContent[];
+ systemInstruction?: GeminiContent;
+ generationConfig: GeminiGenerationConfig;
+ tools?: Array<{ functionDeclarations: GeminiFunctionDeclaration[] }>;
+ safetySettings?: unknown;
+ toolConfig?: {
+ functionCallingConfig: { mode: string };
+ };
+ };
+};
+
// Core: Convert OpenAI request to Gemini format (base for all variants)
function openaiToGeminiBase(model, body, stream) {
- const result: Record = {
+ const result: GeminiRequest = {
model: model,
contents: [],
generationConfig: {},
@@ -283,7 +333,7 @@ function wrapInCloudCodeEnvelope(model, geminiCLI, credentials = null, isAntigra
const cleanModel = model.includes("/") ? model.split("/").pop()! : model;
- const envelope: Record = {
+ const envelope: CloudCodeEnvelope = {
project: projectId,
model: cleanModel,
userAgent: isAntigravity ? "antigravity" : "gemini-cli",
@@ -302,7 +352,7 @@ function wrapInCloudCodeEnvelope(model, geminiCLI, credentials = null, isAntigra
envelope.requestType = "agent";
// Inject required default system prompt for Antigravity
- const defaultPart: Record = { text: ANTIGRAVITY_DEFAULT_SYSTEM };
+ const defaultPart: GeminiPart = { text: ANTIGRAVITY_DEFAULT_SYSTEM };
if (envelope.request.systemInstruction?.parts) {
envelope.request.systemInstruction.parts.unshift(defaultPart);
} else {
@@ -336,7 +386,7 @@ function wrapInCloudCodeEnvelopeForClaude(model, claudeRequest, credentials = nu
const cleanModel = model.includes("/") ? model.split("/").pop()! : model;
- const envelope: Record = {
+ const envelope: CloudCodeEnvelope = {
project: projectId,
model: cleanModel,
userAgent: "antigravity",
diff --git a/open-sse/translator/request/openai-to-kiro.ts b/open-sse/translator/request/openai-to-kiro.ts
index 2697bd88..626c77a7 100644
--- a/open-sse/translator/request/openai-to-kiro.ts
+++ b/open-sse/translator/request/openai-to-kiro.ts
@@ -22,7 +22,16 @@ function convertMessages(messages, tools, model) {
const flushPending = () => {
if (currentRole === "user") {
const content = pendingUserContent.join("\n\n").trim() || "continue";
- const userMsg: Record = {
+ const userMsg: {
+ userInputMessage: {
+ content: string;
+ modelId: string;
+ userInputMessageContext?: {
+ toolResults?: Array>;
+ tools?: Array>;
+ };
+ };
+ } = {
userInputMessage: {
content: content,
modelId: "",
@@ -255,7 +264,27 @@ export function buildKiroPayload(model, body, stream, credentials) {
const timestamp = new Date().toISOString();
finalContent = `[Context: Current time is ${timestamp}]\n\n${finalContent}`;
- const payload: Record = {
+ const payload: {
+ conversationState: {
+ chatTriggerType: string;
+ conversationId: string;
+ currentMessage: {
+ userInputMessage: {
+ content: string;
+ modelId: string;
+ origin: string;
+ userInputMessageContext?: Record;
+ };
+ };
+ history: unknown[];
+ };
+ profileArn?: string;
+ inferenceConfig?: {
+ maxTokens?: number;
+ temperature?: number;
+ topP?: number;
+ };
+ } = {
conversationState: {
chatTriggerType: "MANUAL",
conversationId: uuidv4(),
diff --git a/open-sse/translator/response/claude-to-openai.ts b/open-sse/translator/response/claude-to-openai.ts
index 373f5be5..0dbc3e6a 100644
--- a/open-sse/translator/response/claude-to-openai.ts
+++ b/open-sse/translator/response/claude-to-openai.ts
@@ -1,6 +1,16 @@
import { register } from "../registry.ts";
import { FORMATS } from "../formats.ts";
+type OpenAIUsage = {
+ prompt_tokens: number;
+ completion_tokens: number;
+ total_tokens: number;
+ prompt_tokens_details?: {
+ cached_tokens?: number;
+ cache_creation_tokens?: number;
+ };
+};
+
// Create OpenAI chunk helper
function createChunk(state, delta, finishReason = null) {
return {
@@ -133,7 +143,18 @@ export function claudeToOpenAIResponse(chunk, state) {
if (chunk.delta?.stop_reason) {
state.finishReason = convertStopReason(chunk.delta.stop_reason);
- const finalChunk: Record = {
+ const finalChunk: {
+ id: string;
+ object: string;
+ created: number;
+ model: string;
+ choices: Array<{
+ index: number;
+ delta: { content: string };
+ finish_reason: string | null;
+ }>;
+ usage?: OpenAIUsage;
+ } = {
id: `chatcmpl-${state.messageId}`,
object: "chat.completion.chunk",
created: Math.floor(Date.now() / 1000),
diff --git a/open-sse/translator/response/gemini-to-openai.ts b/open-sse/translator/response/gemini-to-openai.ts
index 9d6a4700..8db3d51c 100644
--- a/open-sse/translator/response/gemini-to-openai.ts
+++ b/open-sse/translator/response/gemini-to-openai.ts
@@ -226,7 +226,7 @@ export function geminiToOpenAIResponse(chunk, state) {
finishReason = "tool_calls";
}
- const finalChunk: Record = {
+ const finalChunk: Record = {
id: `chatcmpl-${state.messageId}`,
object: "chat.completion.chunk",
created: Math.floor(Date.now() / 1000),
diff --git a/open-sse/translator/response/kiro-to-openai.ts b/open-sse/translator/response/kiro-to-openai.ts
index c8fad48d..16ee443c 100644
--- a/open-sse/translator/response/kiro-to-openai.ts
+++ b/open-sse/translator/response/kiro-to-openai.ts
@@ -155,7 +155,7 @@ export function convertKiroToOpenAI(chunk, state) {
if (eventType === "messageStopEvent" || eventType === "done" || data.messageStopEvent) {
state.finishReason = "stop"; // Mark for usage injection in stream.js
- const openaiChunk: Record = {
+ const openaiChunk: Record = {
id: state.responseId,
object: "chat.completion.chunk",
created: state.created,
diff --git a/open-sse/translator/response/openai-responses.ts b/open-sse/translator/response/openai-responses.ts
index 016d7385..6847ae70 100644
--- a/open-sse/translator/response/openai-responses.ts
+++ b/open-sse/translator/response/openai-responses.ts
@@ -524,7 +524,7 @@ export function openaiResponsesToOpenAIResponse(chunk, state) {
const reason = hadToolCalls ? "tool_calls" : "stop";
state.finishReason = reason; // Mark for usage injection in stream.js
- const finalChunk: Record = {
+ const finalChunk: Record = {
id: state.chatId,
object: "chat.completion.chunk",
created: state.created,
diff --git a/open-sse/translator/response/openai-to-antigravity.ts b/open-sse/translator/response/openai-to-antigravity.ts
index a8a7d859..f502bdf1 100644
--- a/open-sse/translator/response/openai-to-antigravity.ts
+++ b/open-sse/translator/response/openai-to-antigravity.ts
@@ -1,6 +1,22 @@
import { register } from "../registry.ts";
import { FORMATS } from "../formats.ts";
+type AntigravityCandidate = {
+ content: {
+ role: string;
+ parts: Array>;
+ };
+ finishReason?: string;
+};
+
+type AntigravityUsageMetadata = {
+ promptTokenCount: number;
+ candidatesTokenCount: number;
+ totalTokenCount: number;
+ thoughtsTokenCount?: number;
+ cachedContentTokenCount?: number;
+};
+
// Convert OpenAI SSE chunk to Antigravity SSE format
// Real Antigravity format:
// data: {"response":{"candidates":[{"content":{"role":"model","parts":[...]}, "finishReason":"STOP"}], "usageMetadata":{...}, "modelVersion":"...", "responseId":"..."}}
@@ -81,7 +97,7 @@ export function openaiToAntigravityResponse(chunk, state) {
}
// Build candidate
- const candidate: Record = { content: { role: "model", parts } };
+ const candidate: AntigravityCandidate = { content: { role: "model", parts } };
// Finish reason mapping
if (finishReason) {
@@ -95,7 +111,12 @@ export function openaiToAntigravityResponse(chunk, state) {
}
// Build response
- const response: Record = {
+ const response: {
+ candidates: AntigravityCandidate[];
+ modelVersion: string;
+ responseId: string;
+ usageMetadata?: AntigravityUsageMetadata;
+ } = {
candidates: [candidate],
modelVersion: state._modelVersion,
responseId: state._responseId,
diff --git a/open-sse/utils/bypassHandler.ts b/open-sse/utils/bypassHandler.ts
index b4087b90..6ddf9c8a 100644
--- a/open-sse/utils/bypassHandler.ts
+++ b/open-sse/utils/bypassHandler.ts
@@ -12,7 +12,7 @@ import { formatSSE } from "./stream.ts";
* 1. The bypass patterns (title extraction, warmup, count) are specific to
* Claude CLI's internal protocol — other clients don't send these patterns.
* 2. False-positive bypasses would silently break real requests.
- * 3. The SKIP_PATTERNS config allows user-defined patterns for any client.
+ * 3. The SKIP_PATTERNS config allows user-defined patterns for every client.
*
* @param {object} body - Request body
* @param {string} model - Model name
diff --git a/open-sse/utils/comfyuiClient.ts b/open-sse/utils/comfyuiClient.ts
index fbff0e23..63198c86 100644
--- a/open-sse/utils/comfyuiClient.ts
+++ b/open-sse/utils/comfyuiClient.ts
@@ -5,14 +5,33 @@
* poll for completion, and fetch output files from a ComfyUI server.
*/
+type JsonRecord = Record;
+
+type ComfyOutputFile = {
+ filename: string;
+ subfolder?: string;
+ type?: string;
+};
+
+type ComfyNodeOutput = {
+ images?: ComfyOutputFile[];
+ gifs?: ComfyOutputFile[];
+ audio?: ComfyOutputFile[];
+};
+
+type ComfyHistoryEntry = {
+ outputs?: Record;
+};
+
+function toRecord(value: unknown): JsonRecord {
+ return value && typeof value === "object" && !Array.isArray(value) ? (value as JsonRecord) : {};
+}
+
/**
* Submit a workflow to ComfyUI for execution.
* @returns The prompt_id for polling
*/
-export async function submitComfyWorkflow(
- baseUrl: string,
- workflow: object
-): Promise {
+export async function submitComfyWorkflow(baseUrl: string, workflow: object): Promise {
const res = await fetch(`${baseUrl}/prompt`, {
method: "POST",
headers: { "Content-Type": "application/json" },
@@ -24,8 +43,12 @@ export async function submitComfyWorkflow(
throw new Error(`ComfyUI submit failed (${res.status}): ${errText}`);
}
- const data = await res.json();
- return data.prompt_id;
+ const data = toRecord(await res.json());
+ const promptId = data.prompt_id;
+ if (typeof promptId !== "string" || !promptId) {
+ throw new Error("ComfyUI submit failed: missing prompt_id");
+ }
+ return promptId;
}
/**
@@ -36,7 +59,7 @@ export async function pollComfyResult(
baseUrl: string,
promptId: string,
timeoutMs: number = 120_000
-): Promise {
+): Promise {
const start = Date.now();
while (Date.now() - start < timeoutMs) {
@@ -45,8 +68,8 @@ export async function pollComfyResult(
const res = await fetch(`${baseUrl}/history/${promptId}`);
if (!res.ok) continue;
- const data = await res.json();
- const entry = data[promptId];
+ const data = toRecord(await res.json());
+ const entry = toRecord(data[promptId]) as ComfyHistoryEntry;
if (entry && entry.outputs && Object.keys(entry.outputs).length > 0) {
return entry;
@@ -84,12 +107,12 @@ export async function fetchComfyOutput(
* Returns an array of { filename, subfolder, type } for each output.
*/
export function extractComfyOutputFiles(
- historyEntry: any
+ historyEntry: ComfyHistoryEntry
): Array<{ filename: string; subfolder: string; type: string }> {
const files: Array<{ filename: string; subfolder: string; type: string }> = [];
for (const nodeOutput of Object.values(historyEntry.outputs || {})) {
- const outputs = (nodeOutput as any).images || (nodeOutput as any).gifs || (nodeOutput as any).audio || [];
+ const outputs = nodeOutput.images || nodeOutput.gifs || nodeOutput.audio || [];
for (const file of outputs) {
files.push({
filename: file.filename,
diff --git a/open-sse/utils/error.ts b/open-sse/utils/error.ts
index f7f5624e..726f757e 100644
--- a/open-sse/utils/error.ts
+++ b/open-sse/utils/error.ts
@@ -137,7 +137,13 @@ export function createErrorResult(
message: string,
retryAfterMs: number | null = null
) {
- const result: Record = {
+ const result: {
+ success: false;
+ status: number;
+ error: string;
+ response: Response;
+ retryAfterMs?: number;
+ } = {
success: false,
status: statusCode,
error: message,
@@ -160,12 +166,15 @@ export function createErrorResult(
* @param {string} retryAfterHuman - Human-readable retry info e.g. "reset after 30s"
* @returns {Response}
*/
-export function unavailableResponse(statusCode, message, retryAfter?: any, retryAfterHuman?: any) {
- const retryAfterSec = Math.max(
- Math.ceil((new Date(retryAfter).getTime() - Date.now()) / 1000),
- 1
- );
- const msg = `${message} (${retryAfterHuman})`;
+export function unavailableResponse(
+ statusCode: number,
+ message: string,
+ retryAfter?: string | number | Date | null,
+ retryAfterHuman?: string
+) {
+ const retryTimeMs = retryAfter ? new Date(retryAfter).getTime() : Date.now() + 1000;
+ const retryAfterSec = Math.max(Math.ceil((retryTimeMs - Date.now()) / 1000), 1);
+ const msg = retryAfterHuman ? `${message} (${retryAfterHuman})` : message;
return new Response(JSON.stringify({ error: { message: msg } }), {
status: statusCode,
headers: {
diff --git a/open-sse/utils/logger.ts b/open-sse/utils/logger.ts
index c97f715c..0620c752 100644
--- a/open-sse/utils/logger.ts
+++ b/open-sse/utils/logger.ts
@@ -96,7 +96,7 @@ export function logger(tag) {
const consoleFn = getConsoleFn(level);
if (jsonFormat) {
- const entry: Record = {
+ const entry: Record = {
ts: new Date().toISOString(),
level,
tag,
@@ -132,7 +132,7 @@ export function createLogger(requestId = null) {
const consoleFn = getConsoleFn(level);
if (jsonFormat) {
- const entry: Record = {
+ const entry: Record = {
ts: new Date().toISOString(),
level,
tag,
diff --git a/open-sse/utils/networkProxy.ts b/open-sse/utils/networkProxy.ts
index b0c4e330..82a95c5e 100644
--- a/open-sse/utils/networkProxy.ts
+++ b/open-sse/utils/networkProxy.ts
@@ -34,7 +34,7 @@ async function getConfig() {
* @param {string} providerId - Provider ID (e.g., "openai", "anthropic")
* @returns {string|null} Proxy URL or null if no proxy configured
*/
-/** @returns {Promise} */
+/** @returns {Promise} */
export async function resolveProxy(providerId) {
const config = await getConfig();
diff --git a/open-sse/utils/ollamaTransform.ts b/open-sse/utils/ollamaTransform.ts
index d5af3d52..51a60e83 100644
--- a/open-sse/utils/ollamaTransform.ts
+++ b/open-sse/utils/ollamaTransform.ts
@@ -1,8 +1,17 @@
import { getCorsOrigin } from "./cors.ts";
+
+type PendingToolCall = {
+ id?: string;
+ function: {
+ name: string;
+ arguments: string;
+ };
+};
+
// Transform OpenAI SSE stream to Ollama JSON lines format
export function transformToOllama(response, model) {
let buffer = "";
- let pendingToolCalls: Record = {};
+ let pendingToolCalls: Record = {};
const transform = new TransformStream({
transform(chunk, controller) {
@@ -52,7 +61,7 @@ export function transformToOllama(response, model) {
if (finishReason === "tool_calls" || finishReason === "stop") {
const toolCallsArr = Object.values(pendingToolCalls);
if (toolCallsArr.length > 0) {
- const formattedCalls = toolCallsArr.map((tc: any) => ({
+ const formattedCalls = toolCallsArr.map((tc) => ({
function: {
name: tc.function.name,
arguments: JSON.parse(tc.function.arguments || "{}"),
diff --git a/open-sse/utils/proxyDispatcher.ts b/open-sse/utils/proxyDispatcher.ts
index 879ec80a..9307e571 100644
--- a/open-sse/utils/proxyDispatcher.ts
+++ b/open-sse/utils/proxyDispatcher.ts
@@ -1,14 +1,27 @@
-import { ProxyAgent } from "undici";
+import { ProxyAgent, type Dispatcher } from "undici";
import { socksDispatcher } from "fetch-socks";
const DISPATCHER_CACHE_KEY = Symbol.for("omniroute.proxyDispatcher.cache");
const SUPPORTED_PROTOCOLS = new Set(["http:", "https:", "socks5:"]);
-function getDispatcherCache(): Map {
- if (!globalThis[DISPATCHER_CACHE_KEY]) {
- globalThis[DISPATCHER_CACHE_KEY] = new Map();
+type DispatcherCache = Map;
+type GlobalWithDispatcherCache = typeof globalThis & {
+ [DISPATCHER_CACHE_KEY]?: DispatcherCache;
+};
+type SocksDispatcherOptions = {
+ type: number;
+ host: string;
+ port: number;
+ userId?: string;
+ password?: string;
+};
+
+function getDispatcherCache(): DispatcherCache {
+ const globalWithCache = globalThis as GlobalWithDispatcherCache;
+ if (!globalWithCache[DISPATCHER_CACHE_KEY]) {
+ globalWithCache[DISPATCHER_CACHE_KEY] = new Map();
}
- return globalThis[DISPATCHER_CACHE_KEY];
+ return globalWithCache[DISPATCHER_CACHE_KEY];
}
/**
@@ -59,10 +72,9 @@ function normalizePort(port, protocol) {
* listen on these ports, so we must always include the port explicitly.
*/
function buildProxyUrlString(parsed, port) {
- const auth =
- parsed.username
- ? `${parsed.username}${parsed.password ? `:${parsed.password}` : ""}@`
- : "";
+ const auth = parsed.username
+ ? `${parsed.username}${parsed.password ? `:${parsed.password}` : ""}@`
+ : "";
return `${parsed.protocol}//${auth}${parsed.hostname}:${port}`;
}
@@ -166,18 +178,20 @@ export function createProxyDispatcher(proxyUrl) {
const port = explicitPort || normalizePort(parsed.port, parsed.protocol);
if (parsed.protocol === "socks5:") {
- const socksOptions: Record = {
+ const socksOptions: SocksDispatcherOptions = {
type: 5,
host: parsed.hostname,
port: Number(port),
};
if (parsed.username) socksOptions.userId = decodeURIComponent(parsed.username);
if (parsed.password) socksOptions.password = decodeURIComponent(parsed.password);
- dispatcher = socksDispatcher(socksOptions as any);
+ dispatcher = socksDispatcher(
+ socksOptions as Parameters[0]
+ ) as Dispatcher;
} else {
dispatcher = new ProxyAgent(normalizedUrl);
}
dispatcherCache.set(normalizedUrl, dispatcher);
return dispatcher;
-}
\ No newline at end of file
+}
diff --git a/open-sse/utils/proxyFetch.ts b/open-sse/utils/proxyFetch.ts
index f7f7494a..7bcef94b 100644
--- a/open-sse/utils/proxyFetch.ts
+++ b/open-sse/utils/proxyFetch.ts
@@ -12,20 +12,33 @@ function isTlsFingerprintEnabled() {
}
/** Per-request tracking of whether TLS fingerprint was used */
-const tlsFingerprintContext = new AsyncLocalStorage();
+type TlsFingerprintStore = { used: boolean };
+const tlsFingerprintContext = new AsyncLocalStorage();
+
+type FetchWithDispatcherOptions = RequestInit & { dispatcher?: unknown };
+
+type PatchState = {
+ originalFetch: typeof globalThis.fetch;
+ proxyContext: AsyncLocalStorage;
+ isPatched: boolean;
+};
const isCloud = typeof caches !== "undefined" && typeof caches === "object";
const PATCH_STATE_KEY = Symbol.for("omniroute.proxyFetch.state");
-function getPatchState() {
- if (!globalThis[PATCH_STATE_KEY]) {
- globalThis[PATCH_STATE_KEY] = {
+function getPatchState(): PatchState {
+ const scopedGlobal = globalThis as typeof globalThis & {
+ [PATCH_STATE_KEY]?: PatchState;
+ };
+
+ if (!scopedGlobal[PATCH_STATE_KEY]) {
+ scopedGlobal[PATCH_STATE_KEY] = {
originalFetch: globalThis.fetch,
proxyContext: new AsyncLocalStorage(),
isPatched: false,
};
}
- return globalThis[PATCH_STATE_KEY];
+ return scopedGlobal[PATCH_STATE_KEY];
}
const patchState = getPatchState();
@@ -126,7 +139,7 @@ export async function runWithProxyContext(proxyConfig, fn) {
});
}
-async function patchedFetch(input: any, options: any = {}) {
+async function patchedFetch(input: RequestInfo | URL, options: FetchWithDispatcherOptions = {}) {
if (options?.dispatcher) {
return originalFetch(input, options);
}
@@ -146,7 +159,7 @@ async function patchedFetch(input: any, options: any = {}) {
// TLS fingerprint spoofing for direct connections (no proxy configured)
if (isTlsFingerprintEnabled() && tlsClient.available) {
try {
- const store: any = tlsFingerprintContext.getStore();
+ const store = tlsFingerprintContext.getStore();
if (store) store.used = true;
return await tlsClient.fetch(targetUrl, options);
} catch (error) {
@@ -154,7 +167,7 @@ async function patchedFetch(input: any, options: any = {}) {
console.warn(
`[ProxyFetch] TLS fingerprint failed, falling back to native fetch: ${message}`
);
- const store: any = tlsFingerprintContext.getStore();
+ const store = tlsFingerprintContext.getStore();
if (store) store.used = false;
}
}
diff --git a/open-sse/utils/requestLogger.ts b/open-sse/utils/requestLogger.ts
index 714312ce..9b83b043 100644
--- a/open-sse/utils/requestLogger.ts
+++ b/open-sse/utils/requestLogger.ts
@@ -125,7 +125,7 @@ export async function createRequestLogger(sourceFormat, targetFormat, model) {
return sessionPath;
},
- // 1. Log client raw request (before any conversion)
+ // 1. Log client raw request (before all conversion steps)
logClientRawRequest(endpoint, body, headers = {}) {
writeJsonFile(sessionPath, "1_req_client.json", {
timestamp: new Date().toISOString(),
diff --git a/open-sse/utils/stream.ts b/open-sse/utils/stream.ts
index b15b5f3a..f67ecc82 100644
--- a/open-sse/utils/stream.ts
+++ b/open-sse/utils/stream.ts
@@ -19,6 +19,46 @@ import {
export { COLORS, formatSSE };
+type JsonRecord = Record;
+
+type StreamLogger = {
+ appendProviderChunk?: (value: string) => void;
+ appendConvertedChunk?: (value: string) => void;
+ appendOpenAIChunk?: (value: string) => void;
+};
+
+type StreamCompletePayload = {
+ status: number;
+ usage: unknown;
+};
+
+type StreamOptions = {
+ mode?: string;
+ targetFormat?: string;
+ sourceFormat?: string;
+ provider?: string | null;
+ reqLogger?: StreamLogger | null;
+ toolNameMap?: unknown;
+ model?: string | null;
+ connectionId?: string | null;
+ apiKeyInfo?: unknown;
+ body?: unknown;
+ onComplete?: ((payload: StreamCompletePayload) => void) | null;
+};
+
+type TranslateState = ReturnType & {
+ provider?: string | null;
+ toolNameMap?: unknown;
+ usage?: unknown;
+ finishReason?: unknown;
+};
+
+function getOpenAIIntermediateChunks(value: unknown): unknown[] {
+ if (!value || typeof value !== "object") return [];
+ const candidate = (value as JsonRecord)._openaiIntermediate;
+ return Array.isArray(candidate) ? candidate : [];
+}
+
// Note: TextDecoder/TextEncoder are created per-stream inside createSSEStream()
// to avoid shared state issues with concurrent streams (TextDecoder with {stream:true}
// maintains internal buffering state between decode() calls).
@@ -48,15 +88,13 @@ const STREAM_MODE = {
* @param {object} options.body - Request body (for input token estimation)
* @param {function} options.onComplete - Callback when stream finishes: ({ status, usage }) => void
*/
-/** @param {any} options */
-export function createSSEStream(options: any = {}) {
+export function createSSEStream(options: StreamOptions = {}) {
const {
mode = STREAM_MODE.TRANSLATE,
targetFormat,
sourceFormat,
provider = null,
reqLogger = null,
- /** @type {any} */
toolNameMap = null,
model = null,
connectionId = null,
@@ -69,8 +107,10 @@ export function createSSEStream(options: any = {}) {
let usage = null;
// State for translate mode
- const state =
- mode === STREAM_MODE.TRANSLATE ? { ...initState(sourceFormat), provider, toolNameMap } : null;
+ const state: TranslateState | null =
+ mode === STREAM_MODE.TRANSLATE
+ ? { ...(initState(sourceFormat) as TranslateState), provider, toolNameMap }
+ : null;
// Track content length for usage estimation (both modes)
let totalContentLength = 0;
@@ -84,7 +124,7 @@ export function createSSEStream(options: any = {}) {
// Idle timeout state — closes stream if provider stops sending data
let lastChunkTime = Date.now();
- let idleTimer = null;
+ let idleTimer: ReturnType | null = null;
let streamTimedOut = false;
return new TransformStream(
@@ -270,11 +310,9 @@ export function createSSEStream(options: any = {}) {
const translated = translateResponse(targetFormat, sourceFormat, parsed, state);
// Log OpenAI intermediate chunks (if available)
- if ((translated as any)?._openaiIntermediate) {
- for (const item of (translated as any)._openaiIntermediate) {
- const openaiOutput = formatSSE(item, FORMATS.OPENAI);
- reqLogger?.appendOpenAIChunk?.(openaiOutput);
- }
+ for (const item of getOpenAIIntermediateChunks(translated)) {
+ const openaiOutput = formatSSE(item, FORMATS.OPENAI);
+ reqLogger?.appendOpenAIChunk?.(openaiOutput);
}
if (translated?.length > 0) {
@@ -366,11 +404,9 @@ export function createSSEStream(options: any = {}) {
const translated = translateResponse(targetFormat, sourceFormat, parsed, state);
// Log OpenAI intermediate chunks
- if ((translated as any)?._openaiIntermediate) {
- for (const item of (translated as any)._openaiIntermediate) {
- const openaiOutput = formatSSE(item, FORMATS.OPENAI);
- reqLogger?.appendOpenAIChunk?.(openaiOutput);
- }
+ for (const item of getOpenAIIntermediateChunks(translated)) {
+ const openaiOutput = formatSSE(item, FORMATS.OPENAI);
+ reqLogger?.appendOpenAIChunk?.(openaiOutput);
}
if (translated?.length > 0) {
@@ -387,11 +423,9 @@ export function createSSEStream(options: any = {}) {
const flushed = translateResponse(targetFormat, sourceFormat, null, state);
// Log OpenAI intermediate chunks for flushed events
- if ((flushed as any)?._openaiIntermediate) {
- for (const item of (flushed as any)._openaiIntermediate) {
- const openaiOutput = formatSSE(item, FORMATS.OPENAI);
- reqLogger?.appendOpenAIChunk?.(openaiOutput);
- }
+ for (const item of getOpenAIIntermediateChunks(flushed)) {
+ const openaiOutput = formatSSE(item, FORMATS.OPENAI);
+ reqLogger?.appendOpenAIChunk?.(openaiOutput);
}
if (flushed?.length > 0) {
@@ -456,16 +490,16 @@ export function createSSEStream(options: any = {}) {
// Convenience functions for backward compatibility
export function createSSETransformStreamWithLogger(
- targetFormat,
- sourceFormat,
- provider = null,
- reqLogger = null,
- toolNameMap = null,
- model = null,
- connectionId = null,
- body = null,
- onComplete = null,
- apiKeyInfo = null
+ targetFormat: string,
+ sourceFormat: string,
+ provider: string | null = null,
+ reqLogger: StreamLogger | null = null,
+ toolNameMap: unknown = null,
+ model: string | null = null,
+ connectionId: string | null = null,
+ body: unknown = null,
+ onComplete: ((payload: StreamCompletePayload) => void) | null = null,
+ apiKeyInfo: unknown = null
) {
return createSSEStream({
mode: STREAM_MODE.TRANSLATE,
@@ -483,13 +517,13 @@ export function createSSETransformStreamWithLogger(
}
export function createPassthroughStreamWithLogger(
- provider = null,
- reqLogger = null,
- model = null,
- connectionId = null,
- body = null,
- onComplete = null,
- apiKeyInfo = null
+ provider: string | null = null,
+ reqLogger: StreamLogger | null = null,
+ model: string | null = null,
+ connectionId: string | null = null,
+ body: unknown = null,
+ onComplete: ((payload: StreamCompletePayload) => void) | null = null,
+ apiKeyInfo: unknown = null
) {
return createSSEStream({
mode: STREAM_MODE.PASSTHROUGH,
diff --git a/open-sse/utils/streamHandler.ts b/open-sse/utils/streamHandler.ts
index ff92aa5c..58f627f9 100644
--- a/open-sse/utils/streamHandler.ts
+++ b/open-sse/utils/streamHandler.ts
@@ -1,5 +1,19 @@
// Stream handler with disconnect detection - shared for all providers
+type StreamDisconnectEvent = {
+ reason: string;
+ duration: number;
+};
+
+type StreamControllerOptions = {
+ onDisconnect?: (event: StreamDisconnectEvent) => void;
+ log?: unknown;
+ provider?: string;
+ model?: string;
+};
+
+type StreamController = ReturnType;
+
// Get HH:MM:SS timestamp
function getTimeString() {
return new Date().toLocaleTimeString("en-US", {
@@ -18,12 +32,17 @@ function getTimeString() {
* @param {string} options.provider - Provider name
* @param {string} options.model - Model name
*/
-/** @param {any} options */
-export function createStreamController({ onDisconnect, log, provider, model }: any = {}) {
+/** @param {StreamControllerOptions} options */
+export function createStreamController({
+ onDisconnect,
+ log,
+ provider,
+ model,
+}: StreamControllerOptions = {}) {
const abortController = new AbortController();
const startTime = Date.now();
let disconnected = false;
- let abortTimeout = null;
+ let abortTimeout: ReturnType | null = null;
const logStream = (status) => {
const duration = Date.now() - startTime;
@@ -68,18 +87,22 @@ export function createStreamController({ onDisconnect, log, provider, model }: a
},
// Call on error
- handleError: (error) => {
+ handleError: (error: unknown) => {
if (abortTimeout) {
clearTimeout(abortTimeout);
abortTimeout = null;
}
- if (error.name === "AbortError") {
+ if (error instanceof Error && error.name === "AbortError") {
logStream("aborted");
return;
}
- logStream(`error: ${error.message}`);
+ if (error instanceof Error) {
+ logStream(`error: ${error.message}`);
+ return;
+ }
+ logStream("error: unknown");
},
abort: () => abortController.abort(),
@@ -129,7 +152,11 @@ export function createDisconnectAwareStream(transformStream, streamController) {
* @param {TransformStream} transformStream - Transform stream for SSE
* @param {object} streamController - Stream controller from createStreamController
*/
-export function pipeWithDisconnect(providerResponse, transformStream, streamController) {
+export function pipeWithDisconnect(
+ providerResponse: Response,
+ transformStream: TransformStream,
+ streamController: StreamController
+) {
const transformedBody = providerResponse.body.pipeThrough(transformStream);
return createDisconnectAwareStream(
{ readable: transformedBody, writable: { getWriter: () => ({ abort: () => {} }) } },
diff --git a/open-sse/utils/tlsClient.ts b/open-sse/utils/tlsClient.ts
index 58941795..765833bb 100644
--- a/open-sse/utils/tlsClient.ts
+++ b/open-sse/utils/tlsClient.ts
@@ -2,9 +2,17 @@ import { createRequire } from "module";
const require = createRequire(import.meta.url);
-let createSession: any;
+type WreqSession = {
+ fetch: (url: string, options?: Record) => Promise;
+ close: () => Promise | void;
+};
+
+type CreateSessionFn = (options: Record) => Promise;
+
+let createSession: CreateSessionFn | null;
try {
- ({ createSession } = require("wreq-js"));
+ const loaded = require("wreq-js") as { createSession?: CreateSessionFn };
+ createSession = typeof loaded.createSession === "function" ? loaded.createSession : null;
} catch {
createSession = null;
}
@@ -28,7 +36,7 @@ function getProxyFromEnv(): string | undefined {
interface FetchOptions {
method?: string;
headers?: Record;
- body?: any;
+ body?: unknown;
redirect?: string;
signal?: AbortSignal;
}
@@ -41,7 +49,7 @@ interface FetchOptions {
* Proxy URL is read from environment variables (HTTPS_PROXY, HTTP_PROXY, ALL_PROXY).
*/
class TlsClient {
- session: any = null;
+ session: WreqSession | null = null;
available: boolean;
constructor() {
@@ -53,7 +61,7 @@ class TlsClient {
if (this.session) return this.session;
const proxy = getProxyFromEnv();
- const sessionOpts: Record = {
+ const sessionOpts: Record = {
browser: "chrome_124",
os: "macos",
};
@@ -77,7 +85,7 @@ class TlsClient {
const method = (options.method || "GET").toUpperCase();
- const wreqOptions: Record = {
+ const wreqOptions: Record = {
method,
headers: options.headers,
body: options.body,
@@ -102,4 +110,3 @@ class TlsClient {
}
export default new TlsClient();
-
diff --git a/open-sse/utils/usageTracking.ts b/open-sse/utils/usageTracking.ts
index 0f92e032..6a871cdb 100644
--- a/open-sse/utils/usageTracking.ts
+++ b/open-sse/utils/usageTracking.ts
@@ -34,7 +34,7 @@ function getTimeString() {
/**
* Add buffer tokens to usage to prevent context errors
- * @param {object} usage - Usage object (any format)
+ * @param {object} usage - Usage object (supported format)
* @returns {object} Usage with buffer added
*/
export function addBufferToUsage(usage) {
@@ -162,7 +162,7 @@ export function normalizeUsage(usage) {
export function hasValidUsage(usage) {
if (!usage || typeof usage !== "object") return false;
- // Check for any known token field with value > 0
+ // Check for known token fields with value > 0
const tokenFields = [
"prompt_tokens",
"completion_tokens",
@@ -183,7 +183,7 @@ export function hasValidUsage(usage) {
}
/**
- * Extract usage from any format (Claude, OpenAI, Gemini, Responses API)
+ * Extract usage from supported formats (Claude, OpenAI, Gemini, Responses API)
*/
export function extractUsage(chunk) {
if (!chunk || typeof chunk !== "object") return null;
diff --git a/scripts/check-t11-any-budget.mjs b/scripts/check-t11-any-budget.mjs
index a4b3867a..db3d704b 100644
--- a/scripts/check-t11-any-budget.mjs
+++ b/scripts/check-t11-any-budget.mjs
@@ -44,6 +44,58 @@ const budget = [
{ file: "open-sse/services/accountFallback.ts", maxAny: 0 },
{ file: "open-sse/handlers/responseSanitizer.ts", maxAny: 0 },
{ file: "open-sse/handlers/responseTranslator.ts", maxAny: 0 },
+ { file: "open-sse/utils/stream.ts", maxAny: 0 },
+ { file: "open-sse/translator/request/openai-responses.ts", maxAny: 0 },
+ { file: "open-sse/executors/base.ts", maxAny: 0 },
+ { file: "open-sse/executors/kiro.ts", maxAny: 0 },
+ { file: "open-sse/executors/cursor.ts", maxAny: 0 },
+ { file: "open-sse/executors/iflow.ts", maxAny: 0 },
+ { file: "open-sse/utils/comfyuiClient.ts", maxAny: 0 },
+ { file: "open-sse/utils/tlsClient.ts", maxAny: 0 },
+ { file: "open-sse/utils/proxyFetch.ts", maxAny: 0 },
+ { file: "open-sse/utils/error.ts", maxAny: 0 },
+ { file: "open-sse/translator/request/openai-to-gemini.ts", maxAny: 0 },
+ { file: "open-sse/translator/request/antigravity-to-openai.ts", maxAny: 0 },
+ { file: "open-sse/translator/request/claude-to-openai.ts", maxAny: 0 },
+ { file: "open-sse/handlers/audioTranscription.ts", maxAny: 0 },
+ { file: "open-sse/handlers/sseParser.ts", maxAny: 0 },
+ { file: "open-sse/handlers/chatCore.ts", maxAny: 0 },
+ { file: "open-sse/config/codexInstructions.ts", maxAny: 0 },
+ { file: "open-sse/config/imageRegistry.ts", maxAny: 0 },
+ { file: "open-sse/config/registryUtils.ts", maxAny: 0 },
+ { file: "open-sse/executors/antigravity.ts", maxAny: 0 },
+ { file: "open-sse/executors/default.ts", maxAny: 0 },
+ { file: "open-sse/handlers/audioSpeech.ts", maxAny: 0 },
+ { file: "open-sse/handlers/embeddings.ts", maxAny: 0 },
+ { file: "open-sse/handlers/imageGeneration.ts", maxAny: 0 },
+ { file: "open-sse/handlers/moderations.ts", maxAny: 0 },
+ { file: "open-sse/handlers/rerank.ts", maxAny: 0 },
+ { file: "open-sse/handlers/responsesHandler.ts", maxAny: 0 },
+ { file: "open-sse/mcp-server/__tests__/advancedTools.test.ts", maxAny: 0 },
+ { file: "open-sse/mcp-server/__tests__/essentialTools.test.ts", maxAny: 0 },
+ { file: "open-sse/services/combo.ts", maxAny: 0 },
+ { file: "open-sse/services/thinkingBudget.ts", maxAny: 0 },
+ { file: "open-sse/translator/helpers/geminiHelper.ts", maxAny: 0 },
+ { file: "open-sse/translator/helpers/openaiHelper.ts", maxAny: 0 },
+ { file: "open-sse/translator/helpers/responsesApiHelper.ts", maxAny: 0 },
+ { file: "open-sse/translator/request/claude-to-gemini.ts", maxAny: 0 },
+ { file: "open-sse/translator/request/gemini-to-openai.ts", maxAny: 0 },
+ { file: "open-sse/translator/request/openai-to-claude.ts", maxAny: 0 },
+ { file: "open-sse/translator/request/openai-to-cursor.ts", maxAny: 0 },
+ { file: "open-sse/translator/request/openai-to-kiro.ts", maxAny: 0 },
+ { file: "open-sse/translator/response/claude-to-openai.ts", maxAny: 0 },
+ { file: "open-sse/translator/response/gemini-to-openai.ts", maxAny: 0 },
+ { file: "open-sse/translator/response/kiro-to-openai.ts", maxAny: 0 },
+ { file: "open-sse/translator/response/openai-responses.ts", maxAny: 0 },
+ { file: "open-sse/translator/response/openai-to-antigravity.ts", maxAny: 0 },
+ { file: "open-sse/utils/bypassHandler.ts", maxAny: 0 },
+ { file: "open-sse/utils/logger.ts", maxAny: 0 },
+ { file: "open-sse/utils/networkProxy.ts", maxAny: 0 },
+ { file: "open-sse/utils/ollamaTransform.ts", maxAny: 0 },
+ { file: "open-sse/utils/proxyDispatcher.ts", maxAny: 0 },
+ { file: "open-sse/utils/requestLogger.ts", maxAny: 0 },
+ { file: "open-sse/utils/streamHandler.ts", maxAny: 0 },
+ { file: "open-sse/utils/usageTracking.ts", maxAny: 0 },
];
const anyRegex = /\bany\b/g;
diff --git a/src/app/a2a/route.ts b/src/app/a2a/route.ts
index 588ebe45..98fa2eab 100644
--- a/src/app/a2a/route.ts
+++ b/src/app/a2a/route.ts
@@ -16,6 +16,7 @@ import { executeSmartRouting } from "@/lib/a2a/skills/smartRouting";
import { executeQuotaManagement } from "@/lib/a2a/skills/quotaManagement";
import { logRoutingDecision } from "@/lib/a2a/routingLogger";
import { createA2AStream, SSE_HEADERS } from "@/lib/a2a/streaming";
+import { executeA2ATaskWithState } from "@/lib/a2a/taskExecution";
// ============ Skill Registry ============
@@ -139,11 +140,7 @@ export async function POST(req: NextRequest) {
const stream = createA2AStream(
task,
- async (t) => {
- const result = await handler(t);
- tm.updateTask(t.id, "completed", result.artifacts);
- return result;
- },
+ async (t) => executeA2ATaskWithState(tm, t, handler),
req.signal
);
diff --git a/src/lib/a2a/taskExecution.ts b/src/lib/a2a/taskExecution.ts
new file mode 100644
index 00000000..d51d4792
--- /dev/null
+++ b/src/lib/a2a/taskExecution.ts
@@ -0,0 +1,36 @@
+type TaskManagerLike = {
+ updateTask: (
+ taskId: string,
+ state: "completed" | "failed",
+ artifacts?: Array<{ type: string; content: string }>,
+ message?: string
+ ) => unknown;
+};
+
+type StreamTaskLike = {
+ id: string;
+};
+
+type StreamTaskResult = {
+ artifacts: Array<{ type: string; content: string }>;
+};
+
+export async function executeA2ATaskWithState(
+ tm: TaskManagerLike,
+ task: StreamTaskLike,
+ handler: (task: StreamTaskLike) => Promise
+) {
+ try {
+ const result = await handler(task);
+ tm.updateTask(task.id, "completed", result.artifacts);
+ return result;
+ } catch (err) {
+ const msg = err instanceof Error ? err.message : String(err);
+ try {
+ tm.updateTask(task.id, "failed", [{ type: "error", content: msg }], msg);
+ } catch {
+ // Task may already be terminal (e.g., cancelled). Preserve original error.
+ }
+ throw err;
+ }
+}
diff --git a/src/lib/a2a/taskManager.ts b/src/lib/a2a/taskManager.ts
index ae846e7b..d86e045d 100644
--- a/src/lib/a2a/taskManager.ts
+++ b/src/lib/a2a/taskManager.ts
@@ -91,7 +91,9 @@ export class A2ATaskManager {
getTask(taskId: string): A2ATask | undefined {
const task = this.tasks.get(taskId);
if (task && new Date(task.expiresAt) < new Date()) {
- this.updateTask(taskId, "failed", undefined, "Task expired");
+ if (task.state === "submitted" || task.state === "working") {
+ this.updateTask(taskId, "failed", undefined, "Task expired");
+ }
}
return this.tasks.get(taskId);
}
diff --git a/tests/unit/t09-a2a-lifecycle.test.mjs b/tests/unit/t09-a2a-lifecycle.test.mjs
new file mode 100644
index 00000000..09bc6e2f
--- /dev/null
+++ b/tests/unit/t09-a2a-lifecycle.test.mjs
@@ -0,0 +1,57 @@
+import test from "node:test";
+import assert from "node:assert/strict";
+
+import { A2ATaskManager } from "../../src/lib/a2a/taskManager.ts";
+import { executeA2ATaskWithState } from "../../src/lib/a2a/taskExecution.ts";
+
+const managers = [];
+
+function createManager(ttlMinutes = 5) {
+ const manager = new A2ATaskManager(ttlMinutes);
+ managers.push(manager);
+ return manager;
+}
+
+test.afterEach(() => {
+ while (managers.length > 0) {
+ managers.pop()?.destroy();
+ }
+});
+
+test("completed task remains completed when queried after expiration", () => {
+ const tm = createManager();
+ const task = tm.createTask({
+ skill: "smart-routing",
+ messages: [{ role: "user", content: "hello" }],
+ });
+
+ tm.updateTask(task.id, "working");
+ tm.updateTask(task.id, "completed", [{ type: "text", content: "done" }]);
+ task.expiresAt = new Date(Date.now() - 1_000).toISOString();
+
+ assert.doesNotThrow(() => tm.getTask(task.id));
+ const loaded = tm.getTask(task.id);
+ assert.equal(loaded?.state, "completed");
+});
+
+test("stream execution marks task as failed when handler throws", async () => {
+ const tm = createManager();
+ const task = tm.createTask({
+ skill: "smart-routing",
+ messages: [{ role: "user", content: "trigger error" }],
+ });
+
+ tm.updateTask(task.id, "working");
+
+ await assert.rejects(
+ () =>
+ executeA2ATaskWithState(tm, task, async () => {
+ throw new Error("upstream failure");
+ }),
+ /upstream failure/
+ );
+
+ const loaded = tm.getTask(task.id);
+ assert.equal(loaded?.state, "failed");
+ assert.deepEqual(loaded?.artifacts.at(-1), { type: "error", content: "upstream failure" });
+});