mirror of
https://github.com/openclaw/openclaw.git
synced 2026-02-09 05:19:32 +08:00
* Model: allow forward-compatible OpenAI Codex GPT-5 IDs * Model: scope Codex fallback to gpt-5.3-codex * fix: reorder codex fallback before providerCfg, add ordering test, changelog (#9989) (thanks @w1kke) --------- Co-authored-by: Robin <4robinlehmann@gmail.com>
This commit is contained in:
@@ -33,6 +33,7 @@ Docs: https://docs.openclaw.ai
|
||||
|
||||
### Fixes
|
||||
|
||||
- Models: add forward-compat fallback for `openai-codex/gpt-5.3-codex` when model registry hasn't discovered it yet. (#9989) Thanks @w1kke.
|
||||
- Auto-reply/Docs: normalize `extra-high` (and spaced variants) to `xhigh` for Codex thinking levels, and align Codex 5.3 FAQ examples. (#9976) Thanks @slonce70.
|
||||
- Compaction: remove orphaned `tool_result` messages during history pruning to prevent session corruption from aborted tool calls. (#9868, fixes #9769, #9724, #9672)
|
||||
- Telegram: pass `parentPeer` for forum topic binding inheritance so group-level bindings apply to all topics within the group. (#9789, fixes #9545, #9351)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { describe, expect, it, vi } from "vitest";
|
||||
import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
|
||||
vi.mock("../pi-model-discovery.js", () => ({
|
||||
discoverAuthStorage: vi.fn(() => ({ mocked: true })),
|
||||
@@ -6,6 +6,7 @@ vi.mock("../pi-model-discovery.js", () => ({
|
||||
}));
|
||||
|
||||
import type { OpenClawConfig } from "../../config/config.js";
|
||||
import { discoverModels } from "../pi-model-discovery.js";
|
||||
import { buildInlineProviderModels, resolveModel } from "./model.js";
|
||||
|
||||
const makeModel = (id: string) => ({
|
||||
@@ -18,6 +19,12 @@ const makeModel = (id: string) => ({
|
||||
maxTokens: 1,
|
||||
});
|
||||
|
||||
beforeEach(() => {
|
||||
vi.mocked(discoverModels).mockReturnValue({
|
||||
find: vi.fn(() => null),
|
||||
} as unknown as ReturnType<typeof discoverModels>);
|
||||
});
|
||||
|
||||
describe("buildInlineProviderModels", () => {
|
||||
it("attaches provider ids to inline models", () => {
|
||||
const providers = {
|
||||
@@ -127,4 +134,74 @@ describe("resolveModel", () => {
|
||||
expect(result.model?.provider).toBe("custom");
|
||||
expect(result.model?.id).toBe("missing-model");
|
||||
});
|
||||
|
||||
it("builds an openai-codex fallback for gpt-5.3-codex", () => {
|
||||
const templateModel = {
|
||||
id: "gpt-5.2-codex",
|
||||
name: "GPT-5.2 Codex",
|
||||
provider: "openai-codex",
|
||||
api: "openai-codex-responses",
|
||||
baseUrl: "https://chatgpt.com/backend-api",
|
||||
reasoning: true,
|
||||
input: ["text", "image"] as const,
|
||||
cost: { input: 1.75, output: 14, cacheRead: 0.175, cacheWrite: 0 },
|
||||
contextWindow: 272000,
|
||||
maxTokens: 128000,
|
||||
};
|
||||
|
||||
vi.mocked(discoverModels).mockReturnValue({
|
||||
find: vi.fn((provider: string, modelId: string) => {
|
||||
if (provider === "openai-codex" && modelId === "gpt-5.2-codex") {
|
||||
return templateModel;
|
||||
}
|
||||
return null;
|
||||
}),
|
||||
} as unknown as ReturnType<typeof discoverModels>);
|
||||
|
||||
const result = resolveModel("openai-codex", "gpt-5.3-codex", "/tmp/agent");
|
||||
|
||||
expect(result.error).toBeUndefined();
|
||||
expect(result.model).toMatchObject({
|
||||
provider: "openai-codex",
|
||||
id: "gpt-5.3-codex",
|
||||
api: "openai-codex-responses",
|
||||
baseUrl: "https://chatgpt.com/backend-api",
|
||||
reasoning: true,
|
||||
contextWindow: 272000,
|
||||
maxTokens: 128000,
|
||||
});
|
||||
});
|
||||
|
||||
it("keeps unknown-model errors for non-gpt-5 openai-codex ids", () => {
|
||||
const result = resolveModel("openai-codex", "gpt-4.1-mini", "/tmp/agent");
|
||||
expect(result.model).toBeUndefined();
|
||||
expect(result.error).toBe("Unknown model: openai-codex/gpt-4.1-mini");
|
||||
});
|
||||
|
||||
it("uses codex fallback even when openai-codex provider is configured", () => {
|
||||
// This test verifies the ordering: codex fallback must fire BEFORE the generic providerCfg fallback.
|
||||
// If ordering is wrong, the generic fallback would use api: "openai-responses" (the default)
|
||||
// instead of "openai-codex-responses".
|
||||
const cfg: OpenClawConfig = {
|
||||
models: {
|
||||
providers: {
|
||||
"openai-codex": {
|
||||
baseUrl: "https://custom.example.com",
|
||||
// No models array, or models without gpt-5.3-codex
|
||||
},
|
||||
},
|
||||
},
|
||||
} as OpenClawConfig;
|
||||
|
||||
vi.mocked(discoverModels).mockReturnValue({
|
||||
find: vi.fn(() => null),
|
||||
} as unknown as ReturnType<typeof discoverModels>);
|
||||
|
||||
const result = resolveModel("openai-codex", "gpt-5.3-codex", "/tmp/agent", cfg);
|
||||
|
||||
expect(result.error).toBeUndefined();
|
||||
expect(result.model?.api).toBe("openai-codex-responses");
|
||||
expect(result.model?.id).toBe("gpt-5.3-codex");
|
||||
expect(result.model?.provider).toBe("openai-codex");
|
||||
});
|
||||
});
|
||||
|
||||
@@ -19,6 +19,50 @@ type InlineProviderConfig = {
|
||||
models?: ModelDefinitionConfig[];
|
||||
};
|
||||
|
||||
const OPENAI_CODEX_GPT_53_MODEL_ID = "gpt-5.3-codex";
|
||||
|
||||
const OPENAI_CODEX_TEMPLATE_MODEL_IDS = ["gpt-5.2-codex"] as const;
|
||||
|
||||
function resolveOpenAICodexGpt53FallbackModel(
|
||||
provider: string,
|
||||
modelId: string,
|
||||
modelRegistry: ModelRegistry,
|
||||
): Model<Api> | undefined {
|
||||
const normalizedProvider = normalizeProviderId(provider);
|
||||
const trimmedModelId = modelId.trim();
|
||||
if (normalizedProvider !== "openai-codex") {
|
||||
return undefined;
|
||||
}
|
||||
if (trimmedModelId.toLowerCase() !== OPENAI_CODEX_GPT_53_MODEL_ID) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
for (const templateId of OPENAI_CODEX_TEMPLATE_MODEL_IDS) {
|
||||
const template = modelRegistry.find(normalizedProvider, templateId) as Model<Api> | null;
|
||||
if (!template) {
|
||||
continue;
|
||||
}
|
||||
return normalizeModelCompat({
|
||||
...template,
|
||||
id: trimmedModelId,
|
||||
name: trimmedModelId,
|
||||
} as Model<Api>);
|
||||
}
|
||||
|
||||
return normalizeModelCompat({
|
||||
id: trimmedModelId,
|
||||
name: trimmedModelId,
|
||||
api: "openai-codex-responses",
|
||||
provider: normalizedProvider,
|
||||
baseUrl: "https://chatgpt.com/backend-api",
|
||||
reasoning: true,
|
||||
input: ["text", "image"],
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
||||
contextWindow: DEFAULT_CONTEXT_TOKENS,
|
||||
maxTokens: DEFAULT_CONTEXT_TOKENS,
|
||||
} as Model<Api>);
|
||||
}
|
||||
|
||||
export function buildInlineProviderModels(
|
||||
providers: Record<string, InlineProviderConfig>,
|
||||
): InlineModelEntry[] {
|
||||
@@ -85,6 +129,17 @@ export function resolveModel(
|
||||
modelRegistry,
|
||||
};
|
||||
}
|
||||
// Codex gpt-5.3 forward-compat fallback must be checked BEFORE the generic providerCfg fallback.
|
||||
// Otherwise, if cfg.models.providers["openai-codex"] is configured, the generic fallback fires
|
||||
// with api: "openai-responses" instead of the correct "openai-codex-responses".
|
||||
const codexForwardCompat = resolveOpenAICodexGpt53FallbackModel(
|
||||
provider,
|
||||
modelId,
|
||||
modelRegistry,
|
||||
);
|
||||
if (codexForwardCompat) {
|
||||
return { model: codexForwardCompat, authStorage, modelRegistry };
|
||||
}
|
||||
const providerCfg = providers[provider];
|
||||
if (providerCfg || modelId.startsWith("mock-")) {
|
||||
const fallbackModel: Model<Api> = normalizeModelCompat({
|
||||
|
||||
Reference in New Issue
Block a user