fix(openai): harden image auth fallback

Co-authored-by: Tak Hoffman <781889+Takhoffman@users.noreply.github.com>
This commit is contained in:
Peter Steinberger 2026-04-23 23:37:06 +01:00
parent 539083811e
commit c2cf3c49d3
No known key found for this signature in database
3 changed files with 476 additions and 32 deletions

View file

@ -16,6 +16,7 @@ Docs: https://docs.openclaw.ai
- Memory/QMD: recreate stale managed QMD collections when startup repair finds the collection name already exists, so root memory narrows back to `MEMORY.md` instead of staying on broad workspace markdown indexing.
- Agents/OpenAI: surface selected-model capacity failures from PI, Codex, and auto-reply harness paths with a model-switch hint instead of the generic empty-response error. Thanks @vincentkoc.
- Providers/OpenAI: route `openai/gpt-image-2` through configured Codex OAuth directly when an `openai-codex` profile is active, instead of probing `OPENAI_API_KEY` first.
- Providers/OpenAI: harden image generation auth routing and Codex OAuth response parsing so fallback only applies to public OpenAI API routes and bounded SSE results. Thanks @Takhoffman.
- Providers/OpenAI: stop advertising the removed `gpt-5.3-codex-spark` Codex model through fallback catalogs, and suppress stale rows with a GPT-5.5 recovery hint.
- Plugins/QR: replace legacy `qrcode-terminal` QR rendering with bounded `qrcode-tui` helpers for plugin login/setup flows. (#65969) Thanks @vincentkoc.
- Voice-call/realtime: wait for OpenAI session configuration before greeting or forwarding buffered audio, and reject non-allowlisted Twilio callers before stream setup. (#43501) Thanks @forrestblount.

View file

@ -11,6 +11,7 @@ const {
assertOkOrThrowHttpErrorMock,
resolveProviderHttpRequestConfigMock,
sanitizeConfiguredModelProviderRequestMock,
logInfoMock,
} = vi.hoisted(() => ({
ensureAuthProfileStoreMock: vi.fn(() => ({ version: 1, profiles: {} })),
isProviderApiKeyConfiguredMock: vi.fn<
@ -39,6 +40,7 @@ const {
dispatcherPolicy: undefined,
})),
sanitizeConfiguredModelProviderRequestMock: vi.fn((request) => request),
logInfoMock: vi.fn(),
}));
vi.mock("openclaw/plugin-sdk/provider-auth", () => ({
@ -59,6 +61,15 @@ vi.mock("openclaw/plugin-sdk/provider-http", () => ({
sanitizeConfiguredModelProviderRequest: sanitizeConfiguredModelProviderRequestMock,
}));
vi.mock("openclaw/plugin-sdk/logging-core", () => ({
createSubsystemLogger: vi.fn(() => ({
info: logInfoMock,
warn: vi.fn(),
error: vi.fn(),
debug: vi.fn(),
})),
}));
function mockGeneratedPngResponse() {
const response = {
json: async () => ({
@ -101,6 +112,42 @@ function mockCodexImageStream(params: { imageData?: string; revisedPrompt?: stri
}));
}
function mockCodexCompletedImageStream(
params: {
imageData?: string;
revisedPrompt?: string;
} = {},
) {
const image = Buffer.from(params.imageData ?? "codex-completed-png-bytes").toString("base64");
const events = [
{
type: "response.completed",
response: {
output: [
{
type: "image_generation_call",
result: image,
...(params.revisedPrompt ? { revised_prompt: params.revisedPrompt } : {}),
},
],
usage: { input_tokens: 11, output_tokens: 22, total_tokens: 33 },
},
},
];
const body = events.map((event) => `data: ${JSON.stringify(event)}\n\n`).join("");
postJsonRequestMock.mockImplementation(async () => ({
response: new Response(body),
release: vi.fn(async () => {}),
}));
}
function mockCodexRawStream(body: string) {
postJsonRequestMock.mockImplementation(async () => ({
response: new Response(body),
release: vi.fn(async () => {}),
}));
}
function mockCodexAuthOnly() {
resolveApiKeyForProviderMock.mockImplementation(async (params?: { provider?: string }) => {
if (params?.provider === "openai-codex") {
@ -139,6 +186,7 @@ describe("openai image generation provider", () => {
assertOkOrThrowHttpErrorMock.mockClear();
resolveProviderHttpRequestConfigMock.mockClear();
sanitizeConfiguredModelProviderRequestMock.mockClear();
logInfoMock.mockClear();
vi.unstubAllEnvs();
});
@ -182,6 +230,54 @@ describe("openai image generation provider", () => {
expect(provider.isConfigured?.({ agentDir: "/tmp/agent" })).toBe(false);
});
it("does not report Codex OAuth image auth as configured for custom OpenAI endpoints", () => {
const provider = buildOpenAIImageGenerationProvider();
isProviderApiKeyConfiguredMock.mockImplementation((params?: { provider?: string }) => {
return params?.provider === "openai-codex";
});
expect(
provider.isConfigured?.({
agentDir: "/tmp/agent",
cfg: {
models: {
providers: {
openai: {
baseUrl: "https://openai-compatible.example.test/v1",
models: [],
},
},
},
},
}),
).toBe(false);
});
it("does not report Codex OAuth image auth as configured for non-exact public OpenAI URLs", () => {
const provider = buildOpenAIImageGenerationProvider();
isProviderApiKeyConfiguredMock.mockImplementation((params?: { provider?: string }) => {
return params?.provider === "openai-codex";
});
expect(
provider.isConfigured?.({
agentDir: "/tmp/agent",
cfg: {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1?proxy=1",
models: [],
},
},
},
},
}),
).toBe(false);
});
it("does not auto-allow local baseUrl overrides for image requests", async () => {
mockGeneratedPngResponse();
@ -410,6 +506,7 @@ describe("openai image generation provider", () => {
expect(postJsonRequestMock).toHaveBeenCalledWith(
expect.objectContaining({
url: "https://chatgpt.com/backend-api/codex/responses",
timeoutMs: 180_000,
body: expect.objectContaining({
model: "gpt-5.4",
instructions: "You are an image generation assistant.",
@ -427,6 +524,9 @@ describe("openai image generation provider", () => {
}),
);
expect(postMultipartRequestMock).not.toHaveBeenCalled();
expect(logInfoMock).toHaveBeenCalledWith(
"image auth selected: provider=openai-codex mode=oauth transport=codex-responses requestedModel=gpt-image-2 responsesModel=gpt-5.4 timeoutMs=180000",
);
expect(result.images).toEqual([
{
buffer: Buffer.from("codex-image"),
@ -484,9 +584,161 @@ describe("openai image generation provider", () => {
url: "https://chatgpt.com/backend-api/codex/responses",
}),
);
expect(logInfoMock).toHaveBeenCalledWith(
"image auth selected: provider=openai-codex mode=oauth transport=codex-responses requestedModel=gpt-image-2 responsesModel=gpt-5.4 timeoutMs=180000",
);
expect(result.images[0]?.buffer).toEqual(Buffer.from("codex-image"));
});
it("does not fall back to Codex OAuth for custom OpenAI-compatible image endpoints", async () => {
mockCodexAuthOnly();
mockCodexImageStream({ imageData: "codex-image" });
const provider = buildOpenAIImageGenerationProvider();
await expect(
provider.generateImage({
provider: "openai",
model: "gpt-image-2",
prompt: "Draw through a custom endpoint",
cfg: {
models: {
providers: {
openai: {
baseUrl: "https://openai-compatible.example.test/v1",
models: [],
},
},
},
},
}),
).rejects.toThrow("OpenAI API key missing");
expect(resolveApiKeyForProviderMock).toHaveBeenCalledTimes(1);
expect(resolveApiKeyForProviderMock).toHaveBeenCalledWith(
expect.objectContaining({ provider: "openai" }),
);
expect(postJsonRequestMock).not.toHaveBeenCalled();
});
it("does not fall back to Codex OAuth for non-exact public OpenAI URLs", async () => {
mockCodexAuthOnly();
mockCodexImageStream({ imageData: "codex-image" });
const provider = buildOpenAIImageGenerationProvider();
await expect(
provider.generateImage({
provider: "openai",
model: "gpt-image-2",
prompt: "Draw through public OpenAI with query params",
cfg: {
models: {
providers: {
openai: {
baseUrl: "https://api.openai.com/v1?proxy=1",
models: [],
},
},
},
},
}),
).rejects.toThrow("OpenAI API key missing");
expect(resolveApiKeyForProviderMock).toHaveBeenCalledTimes(1);
expect(resolveApiKeyForProviderMock).toHaveBeenCalledWith(
expect.objectContaining({ provider: "openai" }),
);
expect(postJsonRequestMock).not.toHaveBeenCalled();
});
it("does not fall back to Codex OAuth when direct OpenAI auth resolution fails unexpectedly", async () => {
resolveApiKeyForProviderMock.mockImplementation(async (params?: { provider?: string }) => {
if (params?.provider === "openai") {
throw new Error("Keychain unavailable");
}
if (params?.provider === "openai-codex") {
return { apiKey: "codex-key", source: "profile:openai-codex:default", mode: "oauth" };
}
return {};
});
mockCodexImageStream({ imageData: "codex-image" });
const provider = buildOpenAIImageGenerationProvider();
await expect(
provider.generateImage({
provider: "openai",
model: "gpt-image-2",
prompt: "Draw after an auth error",
cfg: {},
}),
).rejects.toThrow("Keychain unavailable");
expect(resolveApiKeyForProviderMock).toHaveBeenCalledTimes(1);
expect(resolveApiKeyForProviderMock).toHaveBeenCalledWith(
expect.objectContaining({ provider: "openai" }),
);
expect(postJsonRequestMock).not.toHaveBeenCalled();
});
it("sanitizes Codex OAuth image auth log values", async () => {
resolveApiKeyForProviderMock.mockImplementation(async (params?: { provider?: string }) => {
if (params?.provider === "openai-codex") {
return {
apiKey: "codex-key",
source: "profile:openai-codex:default",
mode: "oauth\nfake\u202eignored",
};
}
return {};
});
mockCodexImageStream({ imageData: "codex-image" });
const provider = buildOpenAIImageGenerationProvider();
await provider.generateImage({
provider: "openai",
model: "gpt-image-2\r\nforged=true\u2028next",
prompt: "Draw using configured Codex auth",
cfg: {},
authStore: createCodexOAuthAuthStore(),
});
expect(logInfoMock).toHaveBeenCalledWith(
"image auth selected: provider=openai-codex mode=oauth fakeignored transport=codex-responses requestedModel=gpt-image-2 forged=true next responsesModel=gpt-5.4 timeoutMs=180000",
);
});
it("parses Codex completed response output image payloads", async () => {
mockCodexAuthOnly();
mockCodexCompletedImageStream({
imageData: "codex-completed-image",
revisedPrompt: "completed prompt",
});
const provider = buildOpenAIImageGenerationProvider();
const result = await provider.generateImage({
provider: "openai",
model: "gpt-image-2",
prompt: "Draw from completed output",
cfg: {},
});
expect(result.images).toEqual([
{
buffer: Buffer.from("codex-completed-image"),
mimeType: "image/png",
fileName: "image-1.png",
revisedPrompt: "completed prompt",
},
]);
expect(result.metadata).toEqual({
responses: [
{
usage: { input_tokens: 11, output_tokens: 22, total_tokens: 33 },
toolUsage: undefined,
},
],
});
});
it("honors configured Codex transport overrides for OAuth image generation", async () => {
mockCodexAuthOnly();
mockCodexImageStream({ imageData: "codex-image" });
@ -638,6 +890,48 @@ describe("openai image generation provider", () => {
expect(result.images.map((image) => image.fileName)).toEqual(["image-1.png", "image-2.png"]);
});
it("caps Codex image request count at provider maximum", async () => {
mockCodexAuthOnly();
mockCodexImageStream({ imageData: "codex-image" });
const provider = buildOpenAIImageGenerationProvider();
const result = await provider.generateImage({
provider: "openai",
model: "gpt-image-2",
prompt: "Draw many Codex icons",
cfg: {},
count: 12,
});
expect(postJsonRequestMock).toHaveBeenCalledTimes(4);
expect(result.images.map((image) => image.fileName)).toEqual([
"image-1.png",
"image-2.png",
"image-3.png",
"image-4.png",
]);
});
it("rejects oversized Codex image SSE event streams", async () => {
mockCodexAuthOnly();
const body = Array.from(
{ length: 513 },
(_, index) =>
`data: ${JSON.stringify({ type: "response.output_text.delta", delta: String(index) })}\n\n`,
).join("");
mockCodexRawStream(body);
const provider = buildOpenAIImageGenerationProvider();
await expect(
provider.generateImage({
provider: "openai",
model: "gpt-image-2",
prompt: "Draw after noisy SSE",
cfg: {},
}),
).rejects.toThrow("OpenAI Codex image generation response exceeded event limit");
});
it("forwards SSRF guard fields to multipart edit requests", async () => {
mockGeneratedPngResponse();

View file

@ -5,6 +5,7 @@ import type {
ImageGenerationResult,
ImageGenerationSourceImage,
} from "openclaw/plugin-sdk/image-generation";
import { createSubsystemLogger } from "openclaw/plugin-sdk/logging-core";
import {
ensureAuthProfileStore,
isProviderApiKeyConfigured,
@ -25,6 +26,7 @@ import { resolveConfiguredOpenAIBaseUrl } from "./shared.js";
const DEFAULT_OPENAI_IMAGE_BASE_URL = "https://api.openai.com/v1";
const DEFAULT_OPENAI_CODEX_IMAGE_BASE_URL = "https://chatgpt.com/backend-api/codex";
const OPENAI_CODEX_IMAGE_INSTRUCTIONS = "You are an image generation assistant.";
const DEFAULT_OPENAI_IMAGE_TIMEOUT_MS = 180_000;
const DEFAULT_OUTPUT_MIME = "image/png";
const DEFAULT_SIZE = "1024x1024";
const OPENAI_SUPPORTED_SIZES = [
@ -37,7 +39,13 @@ const OPENAI_SUPPORTED_SIZES = [
"2160x3840",
] as const;
const OPENAI_MAX_INPUT_IMAGES = 5;
const OPENAI_MAX_IMAGE_RESULTS = 4;
const MAX_CODEX_IMAGE_SSE_BYTES = 64 * 1024 * 1024;
const MAX_CODEX_IMAGE_SSE_EVENTS = 512;
const MAX_CODEX_IMAGE_BASE64_CHARS = 64 * 1024 * 1024;
const LOG_VALUE_MAX_CHARS = 256;
const MOCK_OPENAI_PROVIDER_ID = "mock-openai";
const log = createSubsystemLogger("image-generation/openai");
const AZURE_HOSTNAME_SUFFIXES = [
".openai.azure.com",
@ -47,6 +55,61 @@ const AZURE_HOSTNAME_SUFFIXES = [
const DEFAULT_AZURE_OPENAI_API_VERSION = "2024-12-01-preview";
function sanitizeLogValue(value: unknown): string {
const raw =
typeof value === "string"
? value
: typeof value === "number" || typeof value === "boolean"
? String(value)
: "";
const cleaned = raw
.replace(/[\r\n\u2028\u2029]+/g, " ")
.replace(/[\u200e\u200f\u202a-\u202e\u2066-\u2069]/gi, "")
.replace(/\p{Cc}+/gu, " ")
.replace(/\s+/g, " ")
.trim();
if (!cleaned) {
return "unknown";
}
return cleaned.length > LOG_VALUE_MAX_CHARS
? `${cleaned.slice(0, LOG_VALUE_MAX_CHARS)}...`
: cleaned;
}
function resolveOpenAIImageTimeoutMs(timeoutMs: number | undefined): number {
return timeoutMs ?? DEFAULT_OPENAI_IMAGE_TIMEOUT_MS;
}
function resolveOpenAIImageCount(count: number | undefined): number {
if (typeof count !== "number" || !Number.isFinite(count)) {
return 1;
}
return Math.max(1, Math.min(OPENAI_MAX_IMAGE_RESULTS, Math.trunc(count)));
}
function isPublicOpenAIImageBaseUrl(baseUrl: string): boolean {
const trimmed = baseUrl.trim();
if (!trimmed) {
return false;
}
try {
const parsed = new URL(trimmed);
const pathName = parsed.pathname.replace(/\/+$/, "");
return (
parsed.protocol === "https:" &&
parsed.hostname.toLowerCase() === "api.openai.com" &&
parsed.port === "" &&
parsed.username === "" &&
parsed.password === "" &&
parsed.search === "" &&
parsed.hash === "" &&
pathName === "/v1"
);
} catch {
return false;
}
}
function isAzureOpenAIBaseUrl(baseUrl?: string): boolean {
const trimmed = baseUrl?.trim();
if (!trimmed) {
@ -173,6 +236,11 @@ type OpenAICodexImageGenerationEvent = {
revised_prompt?: string;
};
response?: {
output?: Array<{
type?: string;
result?: string;
revised_prompt?: string;
}>;
usage?: unknown;
tool_usage?: unknown;
};
@ -204,20 +272,32 @@ function toOpenAIDataUrl(image: ImageGenerationSourceImage): string {
async function readResponseBodyText(response: Response): Promise<string> {
if (!response.body) {
return await response.text();
const text = await response.text();
if (Buffer.byteLength(text, "utf8") > MAX_CODEX_IMAGE_SSE_BYTES) {
throw new Error("OpenAI Codex image generation response exceeded size limit");
}
return text;
}
const reader = response.body.getReader();
const decoder = new TextDecoder();
let text = "";
const chunks: string[] = [];
let byteLength = 0;
try {
while (true) {
const { value, done } = await reader.read();
if (value) {
text += decoder.decode(value, { stream: !done });
byteLength += value.byteLength;
if (byteLength > MAX_CODEX_IMAGE_SSE_BYTES) {
throw new Error("OpenAI Codex image generation response exceeded size limit");
}
chunks.push(decoder.decode(value, { stream: !done }));
}
if (done) {
text += decoder.decode();
return text;
const tail = decoder.decode();
if (tail) {
chunks.push(tail);
}
return chunks.join("");
}
}
} finally {
@ -235,16 +315,46 @@ function parseCodexImageGenerationEvents(body: string): OpenAICodexImageGenerati
if (!data || data === "[DONE]") {
continue;
}
let event: OpenAICodexImageGenerationEvent;
try {
events.push(JSON.parse(data) as OpenAICodexImageGenerationEvent);
event = JSON.parse(data) as OpenAICodexImageGenerationEvent;
} catch {
// Ignore non-JSON SSE payloads from intermediaries; failed HTTP statuses
// are handled before this parser runs.
continue;
}
events.push(event);
if (events.length > MAX_CODEX_IMAGE_SSE_EVENTS) {
throw new Error("OpenAI Codex image generation response exceeded event limit");
}
}
return events;
}
function decodeCodexImagePayload(payload: string): Buffer {
if (payload.length > MAX_CODEX_IMAGE_BASE64_CHARS) {
throw new Error("OpenAI Codex image generation result exceeded size limit");
}
return Buffer.from(payload, "base64");
}
function toCodexImage(
entry: { result?: string; revised_prompt?: string },
index: number,
): ImageGenerationResult["images"][number] | null {
if (typeof entry.result !== "string" || entry.result.length === 0) {
return null;
}
return Object.assign(
{
buffer: decodeCodexImagePayload(entry.result),
mimeType: DEFAULT_OUTPUT_MIME,
fileName: `image-${index + 1}.png`,
},
entry.revised_prompt ? { revisedPrompt: entry.revised_prompt } : {},
);
}
function extractCodexImageGenerationResult(params: {
body: string;
model: string;
@ -261,7 +371,7 @@ function extractCodexImageGenerationResult(params: {
throw new Error(message || "OpenAI Codex image generation failed");
}
const completedResponse = events.find((event) => event.type === "response.completed");
const images = events
const outputItemImages = events
.filter(
(event) =>
event.type === "response.output_item.done" &&
@ -269,16 +379,15 @@ function extractCodexImageGenerationResult(params: {
typeof event.item.result === "string" &&
event.item.result.length > 0,
)
.map((event, index) =>
Object.assign(
{
buffer: Buffer.from(event.item?.result ?? "", "base64"),
mimeType: DEFAULT_OUTPUT_MIME,
fileName: `image-${index + 1}.png`,
},
event.item?.revised_prompt ? { revisedPrompt: event.item.revised_prompt } : {},
),
);
.slice(0, OPENAI_MAX_IMAGE_RESULTS)
.map((event, index) => (event.item ? toCodexImage(event.item, index) : null))
.filter((image): image is NonNullable<typeof image> => image !== null);
const completedOutputImages = (completedResponse?.response?.output ?? [])
.filter((entry) => entry.type === "image_generation_call")
.slice(0, OPENAI_MAX_IMAGE_RESULTS)
.map((entry, index) => toCodexImage(entry, index))
.filter((image): image is NonNullable<typeof image> => image !== null);
const images = outputItemImages.length > 0 ? outputItemImages : completedOutputImages;
return {
images,
@ -334,11 +443,31 @@ async function resolveOptionalApiKeyForProvider(
) {
try {
return await resolveApiKeyForProvider(params);
} catch {
} catch (error) {
const provider = params?.provider ?? "";
const message = error instanceof Error ? error.message : "";
if (!message.startsWith(`No API key found for provider "${provider}".`)) {
throw error;
}
return null;
}
}
function logCodexImageAuthSelected(params: {
req: Parameters<ImageGenerationProvider["generateImage"]>[0];
authMode?: unknown;
timeoutMs: number;
}) {
const model = params.req.model || DEFAULT_OPENAI_IMAGE_MODEL;
log.info(
`image auth selected: provider=openai-codex mode=${sanitizeLogValue(
params.authMode,
)} transport=codex-responses requestedModel=${sanitizeLogValue(
model,
)} responsesModel=gpt-5.4 timeoutMs=${params.timeoutMs}`,
);
}
async function generateOpenAICodexImage(params: {
req: Parameters<ImageGenerationProvider["generateImage"]>[0];
apiKey: string;
@ -362,8 +491,9 @@ async function generateOpenAICodexImage(params: {
});
const model = req.model || DEFAULT_OPENAI_IMAGE_MODEL;
const count = req.count ?? 1;
const count = resolveOpenAIImageCount(req.count);
const size = req.size ?? DEFAULT_SIZE;
const timeoutMs = resolveOpenAIImageTimeoutMs(req.timeoutMs);
headers.set("Content-Type", "application/json");
const content: Array<Record<string, unknown>> = [
{ type: "input_text", text: req.prompt },
@ -398,7 +528,7 @@ async function generateOpenAICodexImage(params: {
stream: true,
store: false,
},
timeoutMs: req.timeoutMs,
timeoutMs,
fetchFn: fetch,
allowPrivateNetwork,
dispatcherPolicy,
@ -434,20 +564,32 @@ export function buildOpenAIImageGenerationProvider(): ImageGenerationProvider {
return createOpenAIImageGenerationProviderBase({
id: "openai",
label: "OpenAI",
isConfigured: ({ agentDir }) =>
isProviderApiKeyConfigured({
provider: "openai",
agentDir,
}) ||
isProviderApiKeyConfigured({
isConfigured: ({ cfg, agentDir }) => {
if (
isProviderApiKeyConfigured({
provider: "openai",
agentDir,
})
) {
return true;
}
if (!isPublicOpenAIImageBaseUrl(resolveConfiguredOpenAIBaseUrl(cfg))) {
return false;
}
return isProviderApiKeyConfigured({
provider: "openai-codex",
agentDir,
}),
});
},
async generateImage(req) {
const inputImages = req.inputImages ?? [];
const isEdit = inputImages.length > 0;
const rawBaseUrl = resolveConfiguredOpenAIBaseUrl(req.cfg);
const publicOpenAIBaseUrl = isPublicOpenAIImageBaseUrl(rawBaseUrl);
const useCodexOAuthRoute =
!hasExplicitOpenAIDirectProviderConfig(req.cfg) && hasCodexOAuthProfileConfigured(req);
publicOpenAIBaseUrl &&
!hasExplicitOpenAIDirectProviderConfig(req.cfg) &&
hasCodexOAuthProfileConfigured(req);
if (useCodexOAuthRoute) {
const codexAuth = await resolveApiKeyForProvider({
provider: "openai-codex",
@ -458,6 +600,8 @@ export function buildOpenAIImageGenerationProvider(): ImageGenerationProvider {
if (!codexAuth.apiKey) {
throw new Error("OpenAI Codex OAuth missing");
}
const timeoutMs = resolveOpenAIImageTimeoutMs(req.timeoutMs);
logCodexImageAuthSelected({ req, authMode: codexAuth.mode, timeoutMs });
return generateOpenAICodexImage({ req, apiKey: codexAuth.apiKey });
}
@ -468,6 +612,9 @@ export function buildOpenAIImageGenerationProvider(): ImageGenerationProvider {
store: req.authStore,
});
if (!auth?.apiKey) {
if (!publicOpenAIBaseUrl) {
throw new Error("OpenAI API key missing");
}
const codexAuth = await resolveOptionalApiKeyForProvider({
provider: "openai-codex",
cfg: req.cfg,
@ -475,11 +622,12 @@ export function buildOpenAIImageGenerationProvider(): ImageGenerationProvider {
store: req.authStore,
});
if (codexAuth?.apiKey) {
const timeoutMs = resolveOpenAIImageTimeoutMs(req.timeoutMs);
logCodexImageAuthSelected({ req, authMode: codexAuth.mode, timeoutMs });
return generateOpenAICodexImage({ req, apiKey: codexAuth.apiKey });
}
throw new Error("OpenAI API key or Codex OAuth missing");
}
const rawBaseUrl = resolveConfiguredOpenAIBaseUrl(req.cfg);
const isAzure = isAzureOpenAIBaseUrl(rawBaseUrl);
const { baseUrl, allowPrivateNetwork, headers, dispatcherPolicy } =
@ -496,8 +644,9 @@ export function buildOpenAIImageGenerationProvider(): ImageGenerationProvider {
});
const model = req.model || DEFAULT_OPENAI_IMAGE_MODEL;
const count = req.count ?? 1;
const count = resolveOpenAIImageCount(req.count);
const size = req.size ?? DEFAULT_SIZE;
const timeoutMs = resolveOpenAIImageTimeoutMs(req.timeoutMs);
const url = isAzure
? buildAzureImageUrl(rawBaseUrl, model, isEdit ? "edits" : "generations")
: `${baseUrl}/images/${isEdit ? "edits" : "generations"}`;
@ -527,7 +676,7 @@ export function buildOpenAIImageGenerationProvider(): ImageGenerationProvider {
url,
headers: multipartHeaders,
body: form,
timeoutMs: req.timeoutMs,
timeoutMs,
fetchFn: fetch,
allowPrivateNetwork,
dispatcherPolicy,
@ -545,7 +694,7 @@ export function buildOpenAIImageGenerationProvider(): ImageGenerationProvider {
n: count,
size,
},
timeoutMs: req.timeoutMs,
timeoutMs,
fetchFn: fetch,
allowPrivateNetwork,
dispatcherPolicy,