diff --git a/CHANGELOG.md b/CHANGELOG.md index fffe895a4d..7891f7f4a5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ Docs: https://docs.openclaw.ai ### Changes - Models: default Anthropic model to `anthropic/claude-opus-4-6`. (#9853) Thanks @TinyTb. +- Models/Onboarding: refresh provider defaults, update OpenAI/OpenAI Codex wizard defaults, and harden model allowlist initialization for first-time configs with matching docs/tests. (#9911) Thanks @gumadeiras. - Telegram: auto-inject forum topic `threadId` in message tool and subagent announce so media, buttons, and subagent results land in the correct topic instead of General. (#7235) Thanks @Lukavyi. - CLI: sort `openclaw --help` commands (and options) alphabetically. (#8068) Thanks @deepsoumya617. - Telegram: remove last `@ts-nocheck` from `bot-handlers.ts`, use Grammy types directly, deduplicate `StickerMetadata`. Zero `@ts-nocheck` remaining in `src/telegram/`. (#9206) diff --git a/README.md b/README.md index ba3fce1951..c954b93cbd 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ New install? Start here: [Getting started](https://docs.openclaw.ai/start/gettin - **[Anthropic](https://www.anthropic.com/)** (Claude Pro/Max) - **[OpenAI](https://openai.com/)** (ChatGPT/Codex) -Model note: while any model is supported, I strongly recommend **Anthropic Pro/Max (100/200) + Opus 4.5** for long‑context strength and better prompt‑injection resistance. See [Onboarding](https://docs.openclaw.ai/start/onboarding). +Model note: while any model is supported, I strongly recommend **Anthropic Pro/Max (100/200) + Opus 4.6** for long‑context strength and better prompt‑injection resistance. See [Onboarding](https://docs.openclaw.ai/start/onboarding). ## Models (selection + auth) @@ -316,7 +316,7 @@ Minimal `~/.openclaw/openclaw.json` (model + defaults): ```json5 { agent: { - model: "anthropic/claude-opus-4-5", + model: "anthropic/claude-opus-4-6", }, } ``` diff --git a/apps/macos/Sources/OpenClaw/OnboardingView+Pages.swift b/apps/macos/Sources/OpenClaw/OnboardingView+Pages.swift index 48a1baf7ec..309c4aa026 100644 --- a/apps/macos/Sources/OpenClaw/OnboardingView+Pages.swift +++ b/apps/macos/Sources/OpenClaw/OnboardingView+Pages.swift @@ -335,7 +335,7 @@ extension OnboardingView { .multilineTextAlignment(.center) .frame(maxWidth: 540) .fixedSize(horizontal: false, vertical: true) - Text("OpenClaw supports any model — we strongly recommend Opus 4.5 for the best experience.") + Text("OpenClaw supports any model — we strongly recommend Opus 4.6 for the best experience.") .font(.callout) .foregroundStyle(.secondary) .multilineTextAlignment(.center) diff --git a/apps/macos/Sources/OpenClaw/SessionData.swift b/apps/macos/Sources/OpenClaw/SessionData.swift index a106cf9dc6..defd4fe8aa 100644 --- a/apps/macos/Sources/OpenClaw/SessionData.swift +++ b/apps/macos/Sources/OpenClaw/SessionData.swift @@ -169,7 +169,7 @@ extension SessionRow { systemSent: true, abortedLastRun: true, tokens: SessionTokenStats(input: 5000, output: 1200, total: 6200, contextTokens: 200_000), - model: "claude-opus-4-5"), + model: "claude-opus-4-6"), SessionRow( id: "global", key: "global", @@ -242,7 +242,7 @@ struct SessionStoreSnapshot { @MainActor enum SessionLoader { - static let fallbackModel = "claude-opus-4-5" + static let fallbackModel = "claude-opus-4-6" static let fallbackContextTokens = 200_000 static let defaultStorePath = standardize( diff --git a/apps/macos/Tests/OpenClawIPCTests/MenuSessionsInjectorTests.swift b/apps/macos/Tests/OpenClawIPCTests/MenuSessionsInjectorTests.swift index 0228101f57..8395ed145c 100644 --- a/apps/macos/Tests/OpenClawIPCTests/MenuSessionsInjectorTests.swift +++ b/apps/macos/Tests/OpenClawIPCTests/MenuSessionsInjectorTests.swift @@ -23,7 +23,7 @@ struct MenuSessionsInjectorTests { let injector = MenuSessionsInjector() injector.setTestingControlChannelConnected(true) - let defaults = SessionDefaults(model: "anthropic/claude-opus-4-5", contextTokens: 200_000) + let defaults = SessionDefaults(model: "anthropic/claude-opus-4-6", contextTokens: 200_000) let rows = [ SessionRow( id: "main", @@ -41,7 +41,7 @@ struct MenuSessionsInjectorTests { systemSent: false, abortedLastRun: false, tokens: SessionTokenStats(input: 10, output: 20, total: 30, contextTokens: 200_000), - model: "claude-opus-4-5"), + model: "claude-opus-4-6"), SessionRow( id: "discord:group:alpha", key: "discord:group:alpha", @@ -58,7 +58,7 @@ struct MenuSessionsInjectorTests { systemSent: true, abortedLastRun: true, tokens: SessionTokenStats(input: 50, output: 50, total: 100, contextTokens: 200_000), - model: "claude-opus-4-5"), + model: "claude-opus-4-6"), ] let snapshot = SessionStoreSnapshot( storePath: "/tmp/sessions.json", diff --git a/docs/bedrock.md b/docs/bedrock.md index 57d2ebc6e9..34c759dbb5 100644 --- a/docs/bedrock.md +++ b/docs/bedrock.md @@ -78,8 +78,8 @@ export AWS_BEARER_TOKEN_BEDROCK="..." auth: "aws-sdk", models: [ { - id: "anthropic.claude-opus-4-5-20251101-v1:0", - name: "Claude Opus 4.5 (Bedrock)", + id: "us.anthropic.claude-opus-4-6-v1:0", + name: "Claude Opus 4.6 (Bedrock)", reasoning: true, input: ["text", "image"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, @@ -92,7 +92,7 @@ export AWS_BEARER_TOKEN_BEDROCK="..." }, agents: { defaults: { - model: { primary: "amazon-bedrock/anthropic.claude-opus-4-5-20251101-v1:0" }, + model: { primary: "amazon-bedrock/us.anthropic.claude-opus-4-6-v1:0" }, }, }, } diff --git a/docs/concepts/model-providers.md b/docs/concepts/model-providers.md index 6af91f29dd..4d313cf0f2 100644 --- a/docs/concepts/model-providers.md +++ b/docs/concepts/model-providers.md @@ -13,7 +13,7 @@ For model selection rules, see [/concepts/models](/concepts/models). ## Quick rules -- Model refs use `provider/model` (example: `opencode/claude-opus-4-5`). +- Model refs use `provider/model` (example: `opencode/claude-opus-4-6`). - If you set `agents.defaults.models`, it becomes the allowlist. - CLI helpers: `openclaw onboard`, `openclaw models list`, `openclaw models set `. @@ -26,12 +26,12 @@ OpenClaw ships with the pi‑ai catalog. These providers require **no** - Provider: `openai` - Auth: `OPENAI_API_KEY` -- Example model: `openai/gpt-5.2` +- Example model: `openai/gpt-5.1-codex` - CLI: `openclaw onboard --auth-choice openai-api-key` ```json5 { - agents: { defaults: { model: { primary: "openai/gpt-5.2" } } }, + agents: { defaults: { model: { primary: "openai/gpt-5.1-codex" } } }, } ``` @@ -39,12 +39,12 @@ OpenClaw ships with the pi‑ai catalog. These providers require **no** - Provider: `anthropic` - Auth: `ANTHROPIC_API_KEY` or `claude setup-token` -- Example model: `anthropic/claude-opus-4-5` +- Example model: `anthropic/claude-opus-4-6` - CLI: `openclaw onboard --auth-choice token` (paste setup-token) or `openclaw models auth paste-token --provider anthropic` ```json5 { - agents: { defaults: { model: { primary: "anthropic/claude-opus-4-5" } } }, + agents: { defaults: { model: { primary: "anthropic/claude-opus-4-6" } } }, } ``` @@ -52,12 +52,12 @@ OpenClaw ships with the pi‑ai catalog. These providers require **no** - Provider: `openai-codex` - Auth: OAuth (ChatGPT) -- Example model: `openai-codex/gpt-5.2` +- Example model: `openai-codex/gpt-5.3-codex` - CLI: `openclaw onboard --auth-choice openai-codex` or `openclaw models auth login --provider openai-codex` ```json5 { - agents: { defaults: { model: { primary: "openai-codex/gpt-5.2" } } }, + agents: { defaults: { model: { primary: "openai-codex/gpt-5.3-codex" } } }, } ``` @@ -65,12 +65,12 @@ OpenClaw ships with the pi‑ai catalog. These providers require **no** - Provider: `opencode` - Auth: `OPENCODE_API_KEY` (or `OPENCODE_ZEN_API_KEY`) -- Example model: `opencode/claude-opus-4-5` +- Example model: `opencode/claude-opus-4-6` - CLI: `openclaw onboard --auth-choice opencode-zen` ```json5 { - agents: { defaults: { model: { primary: "opencode/claude-opus-4-5" } } }, + agents: { defaults: { model: { primary: "opencode/claude-opus-4-6" } } }, } ``` @@ -106,7 +106,7 @@ OpenClaw ships with the pi‑ai catalog. These providers require **no** - Provider: `vercel-ai-gateway` - Auth: `AI_GATEWAY_API_KEY` -- Example model: `vercel-ai-gateway/anthropic/claude-opus-4.5` +- Example model: `vercel-ai-gateway/anthropic/claude-opus-4.6` - CLI: `openclaw onboard --auth-choice ai-gateway-api-key` ### Other built-in providers @@ -309,7 +309,7 @@ Notes: ```bash openclaw onboard --auth-choice opencode-zen -openclaw models set opencode/claude-opus-4-5 +openclaw models set opencode/claude-opus-4-6 openclaw models list ``` diff --git a/docs/concepts/models.md b/docs/concepts/models.md index 244afa5d34..1f602bac75 100644 --- a/docs/concepts/models.md +++ b/docs/concepts/models.md @@ -83,7 +83,7 @@ Example allowlist config: model: { primary: "anthropic/claude-sonnet-4-5" }, models: { "anthropic/claude-sonnet-4-5": { alias: "Sonnet" }, - "anthropic/claude-opus-4-5": { alias: "Opus" }, + "anthropic/claude-opus-4-6": { alias: "Opus" }, }, }, } diff --git a/docs/concepts/multi-agent.md b/docs/concepts/multi-agent.md index 4713833376..9952319731 100644 --- a/docs/concepts/multi-agent.md +++ b/docs/concepts/multi-agent.md @@ -221,7 +221,7 @@ Split by channel: route WhatsApp to a fast everyday agent and Telegram to an Opu id: "opus", name: "Deep Work", workspace: "~/.openclaw/workspace-opus", - model: "anthropic/claude-opus-4-5", + model: "anthropic/claude-opus-4-6", }, ], }, @@ -255,7 +255,7 @@ Keep WhatsApp on the fast agent, but route one DM to Opus: id: "opus", name: "Deep Work", workspace: "~/.openclaw/workspace-opus", - model: "anthropic/claude-opus-4-5", + model: "anthropic/claude-opus-4-6", }, ], }, diff --git a/docs/gateway/cli-backends.md b/docs/gateway/cli-backends.md index 8e81f66206..186a5355d3 100644 --- a/docs/gateway/cli-backends.md +++ b/docs/gateway/cli-backends.md @@ -25,13 +25,13 @@ want “always works” text responses without relying on external APIs. You can use Claude Code CLI **without any config** (OpenClaw ships a built-in default): ```bash -openclaw agent --message "hi" --model claude-cli/opus-4.5 +openclaw agent --message "hi" --model claude-cli/opus-4.6 ``` Codex CLI also works out of the box: ```bash -openclaw agent --message "hi" --model codex-cli/gpt-5.2-codex +openclaw agent --message "hi" --model codex-cli/gpt-5.3-codex ``` If your gateway runs under launchd/systemd and PATH is minimal, add just the @@ -62,11 +62,12 @@ Add a CLI backend to your fallback list so it only runs when primary models fail agents: { defaults: { model: { - primary: "anthropic/claude-opus-4-5", - fallbacks: ["claude-cli/opus-4.5"], + primary: "anthropic/claude-opus-4-6", + fallbacks: ["claude-cli/opus-4.6", "claude-cli/opus-4.5"], }, models: { - "anthropic/claude-opus-4-5": { alias: "Opus" }, + "anthropic/claude-opus-4-6": { alias: "Opus" }, + "claude-cli/opus-4.6": {}, "claude-cli/opus-4.5": {}, }, }, @@ -112,6 +113,7 @@ The provider id becomes the left side of your model ref: input: "arg", modelArg: "--model", modelAliases: { + "claude-opus-4-6": "opus", "claude-opus-4-5": "opus", "claude-sonnet-4-5": "sonnet", }, diff --git a/docs/gateway/configuration-examples.md b/docs/gateway/configuration-examples.md index 6924bc5366..79b6d2acd1 100644 --- a/docs/gateway/configuration-examples.md +++ b/docs/gateway/configuration-examples.md @@ -226,13 +226,13 @@ Save to `~/.openclaw/openclaw.json` and you can DM the bot from that number. userTimezone: "America/Chicago", model: { primary: "anthropic/claude-sonnet-4-5", - fallbacks: ["anthropic/claude-opus-4-5", "openai/gpt-5.2"], + fallbacks: ["anthropic/claude-opus-4-6", "openai/gpt-5.2"], }, imageModel: { primary: "openrouter/anthropic/claude-sonnet-4-5", }, models: { - "anthropic/claude-opus-4-5": { alias: "opus" }, + "anthropic/claude-opus-4-6": { alias: "opus" }, "anthropic/claude-sonnet-4-5": { alias: "sonnet" }, "openai/gpt-5.2": { alias: "gpt" }, }, @@ -496,7 +496,7 @@ If more than one person can DM your bot (multiple entries in `allowFrom`, pairin workspace: "~/.openclaw/workspace", model: { primary: "anthropic/claude-sonnet-4-5", - fallbacks: ["anthropic/claude-opus-4-5"], + fallbacks: ["anthropic/claude-opus-4-6"], }, }, } @@ -534,7 +534,7 @@ If more than one person can DM your bot (multiple entries in `allowFrom`, pairin agent: { workspace: "~/.openclaw/workspace", model: { - primary: "anthropic/claude-opus-4-5", + primary: "anthropic/claude-opus-4-6", fallbacks: ["minimax/MiniMax-M2.1"], }, }, diff --git a/docs/gateway/configuration.md b/docs/gateway/configuration.md index fe8ff4d5f2..2c71447b5d 100644 --- a/docs/gateway/configuration.md +++ b/docs/gateway/configuration.md @@ -1547,8 +1547,8 @@ The `responsePrefix` string can include template variables that resolve dynamica | Variable | Description | Example | | ----------------- | ---------------------- | --------------------------- | -| `{model}` | Short model name | `claude-opus-4-5`, `gpt-4o` | -| `{modelFull}` | Full model identifier | `anthropic/claude-opus-4-5` | +| `{model}` | Short model name | `claude-opus-4-6`, `gpt-4o` | +| `{modelFull}` | Full model identifier | `anthropic/claude-opus-4-6` | | `{provider}` | Provider name | `anthropic`, `openai` | | `{thinkingLevel}` | Current thinking level | `high`, `low`, `off` | | `{identity.name}` | Agent identity name | (same as `"auto"` mode) | @@ -1564,7 +1564,7 @@ Unresolved variables remain as literal text. } ``` -Example output: `[claude-opus-4-5 | think:high] Here's my response...` +Example output: `[claude-opus-4-6 | think:high] Here's my response...` WhatsApp inbound prefix is configured via `channels.whatsapp.messagePrefix` (deprecated: `messages.messagePrefix`). Default stays **unchanged**: `"[openclaw]"` when @@ -1710,7 +1710,7 @@ Z.AI GLM-4.x models automatically enable thinking mode unless you: OpenClaw also ships a few built-in alias shorthands. Defaults only apply when the model is already present in `agents.defaults.models`: -- `opus` -> `anthropic/claude-opus-4-5` +- `opus` -> `anthropic/claude-opus-4-6` - `sonnet` -> `anthropic/claude-sonnet-4-5` - `gpt` -> `openai/gpt-5.2` - `gpt-mini` -> `openai/gpt-5-mini` @@ -1719,18 +1719,18 @@ is already present in `agents.defaults.models`: If you configure the same alias name (case-insensitive) yourself, your value wins (defaults never override). -Example: Opus 4.5 primary with MiniMax M2.1 fallback (hosted MiniMax): +Example: Opus 4.6 primary with MiniMax M2.1 fallback (hosted MiniMax): ```json5 { agents: { defaults: { models: { - "anthropic/claude-opus-4-5": { alias: "opus" }, + "anthropic/claude-opus-4-6": { alias: "opus" }, "minimax/MiniMax-M2.1": { alias: "minimax" }, }, model: { - primary: "anthropic/claude-opus-4-5", + primary: "anthropic/claude-opus-4-6", fallbacks: ["minimax/MiniMax-M2.1"], }, }, @@ -1786,7 +1786,7 @@ Example: agents: { defaults: { models: { - "anthropic/claude-opus-4-5": { alias: "Opus" }, + "anthropic/claude-opus-4-6": { alias: "Opus" }, "anthropic/claude-sonnet-4-1": { alias: "Sonnet" }, "openrouter/deepseek/deepseek-r1:free": {}, "zai/glm-4.7": { @@ -1800,7 +1800,7 @@ Example: }, }, model: { - primary: "anthropic/claude-opus-4-5", + primary: "anthropic/claude-opus-4-6", fallbacks: [ "openrouter/deepseek/deepseek-r1:free", "openrouter/meta-llama/llama-3.3-70b-instruct:free", @@ -2011,7 +2011,7 @@ Typing indicators: - `session.typingIntervalSeconds`: per-session override for the refresh interval. See [/concepts/typing-indicators](/concepts/typing-indicators) for behavior details. -`agents.defaults.model.primary` should be set as `provider/model` (e.g. `anthropic/claude-opus-4-5`). +`agents.defaults.model.primary` should be set as `provider/model` (e.g. `anthropic/claude-opus-4-6`). Aliases come from `agents.defaults.models.*.alias` (e.g. `Opus`). If you omit the provider, OpenClaw currently assumes `anthropic` as a temporary deprecation fallback. @@ -2485,7 +2485,7 @@ the built-in `opencode` provider from pi-ai; set `OPENCODE_API_KEY` (or Notes: -- Model refs use `opencode/` (example: `opencode/claude-opus-4-5`). +- Model refs use `opencode/` (example: `opencode/claude-opus-4-6`). - If you enable an allowlist via `agents.defaults.models`, add each model you plan to use. - Shortcut: `openclaw onboard --auth-choice opencode-zen`. @@ -2493,8 +2493,8 @@ Notes: { agents: { defaults: { - model: { primary: "opencode/claude-opus-4-5" }, - models: { "opencode/claude-opus-4-5": { alias: "Opus" } }, + model: { primary: "opencode/claude-opus-4-6" }, + models: { "opencode/claude-opus-4-6": { alias: "Opus" } }, }, }, } @@ -2652,7 +2652,7 @@ Use MiniMax M2.1 directly without LM Studio: agent: { model: { primary: "minimax/MiniMax-M2.1" }, models: { - "anthropic/claude-opus-4-5": { alias: "Opus" }, + "anthropic/claude-opus-4-6": { alias: "Opus" }, "minimax/MiniMax-M2.1": { alias: "Minimax" }, }, }, diff --git a/docs/gateway/heartbeat.md b/docs/gateway/heartbeat.md index 1d10d7a3a8..287581ab29 100644 --- a/docs/gateway/heartbeat.md +++ b/docs/gateway/heartbeat.md @@ -83,7 +83,7 @@ and logged; a message that is only `HEARTBEAT_OK` is dropped. defaults: { heartbeat: { every: "30m", // default: 30m (0m disables) - model: "anthropic/claude-opus-4-5", + model: "anthropic/claude-opus-4-6", includeReasoning: false, // default: false (deliver separate Reasoning: message when available) target: "last", // last | none | (core or plugin, e.g. "bluebubbles") to: "+15551234567", // optional channel-specific override diff --git a/docs/gateway/local-models.md b/docs/gateway/local-models.md index 24f152eac6..fe715ab055 100644 --- a/docs/gateway/local-models.md +++ b/docs/gateway/local-models.md @@ -21,7 +21,7 @@ Best current local stack. Load MiniMax M2.1 in LM Studio, enable the local serve defaults: { model: { primary: "lmstudio/minimax-m2.1-gs32" }, models: { - "anthropic/claude-opus-4-5": { alias: "Opus" }, + "anthropic/claude-opus-4-6": { alias: "Opus" }, "lmstudio/minimax-m2.1-gs32": { alias: "Minimax" }, }, }, @@ -68,12 +68,12 @@ Keep hosted models configured even when running local; use `models.mode: "merge" defaults: { model: { primary: "anthropic/claude-sonnet-4-5", - fallbacks: ["lmstudio/minimax-m2.1-gs32", "anthropic/claude-opus-4-5"], + fallbacks: ["lmstudio/minimax-m2.1-gs32", "anthropic/claude-opus-4-6"], }, models: { "anthropic/claude-sonnet-4-5": { alias: "Sonnet" }, "lmstudio/minimax-m2.1-gs32": { alias: "MiniMax Local" }, - "anthropic/claude-opus-4-5": { alias: "Opus" }, + "anthropic/claude-opus-4-6": { alias: "Opus" }, }, }, }, diff --git a/docs/gateway/security/index.md b/docs/gateway/security/index.md index f9f9fe2daf..c6b521048e 100644 --- a/docs/gateway/security/index.md +++ b/docs/gateway/security/index.md @@ -243,7 +243,7 @@ Even with strong system prompts, **prompt injection is not solved**. System prom - Run sensitive tool execution in a sandbox; keep secrets out of the agent’s reachable filesystem. - Note: sandboxing is opt-in. If sandbox mode is off, exec runs on the gateway host even though tools.exec.host defaults to sandbox, and host exec does not require approvals unless you set host=gateway and configure exec approvals. - Limit high-risk tools (`exec`, `browser`, `web_fetch`, `web_search`) to trusted agents or explicit allowlists. -- **Model choice matters:** older/legacy models can be less robust against prompt injection and tool misuse. Prefer modern, instruction-hardened models for any bot with tools. We recommend Anthropic Opus 4.5 because it’s quite good at recognizing prompt injections (see [“A step forward on safety”](https://www.anthropic.com/news/claude-opus-4-5)). +- **Model choice matters:** older/legacy models can be less robust against prompt injection and tool misuse. Prefer modern, instruction-hardened models for any bot with tools. We recommend Anthropic Opus 4.6 (or the latest Opus) because it’s strong at recognizing prompt injections (see [“A step forward on safety”](https://www.anthropic.com/news/claude-opus-4-5)). Red flags to treat as untrusted: diff --git a/docs/help/faq.md b/docs/help/faq.md index a9348b69f1..0e1fd2faf5 100644 --- a/docs/help/faq.md +++ b/docs/help/faq.md @@ -707,7 +707,7 @@ Yes - via pi-ai's **Amazon Bedrock (Converse)** provider with **manual config**. ### How does Codex auth work -OpenClaw supports **OpenAI Code (Codex)** via OAuth (ChatGPT sign-in). The wizard can run the OAuth flow and will set the default model to `openai-codex/gpt-5.2` when appropriate. See [Model providers](/concepts/model-providers) and [Wizard](/start/wizard). +OpenClaw supports **OpenAI Code (Codex)** via OAuth (ChatGPT sign-in). The wizard can run the OAuth flow and will set the default model to `openai-codex/gpt-5.3-codex` when appropriate. See [Model providers](/concepts/model-providers) and [Wizard](/start/wizard). ### Do you support OpenAI subscription auth Codex OAuth @@ -1936,11 +1936,11 @@ OpenClaw's default model is whatever you set as: agents.defaults.model.primary ``` -Models are referenced as `provider/model` (example: `anthropic/claude-opus-4-5`). If you omit the provider, OpenClaw currently assumes `anthropic` as a temporary deprecation fallback - but you should still **explicitly** set `provider/model`. +Models are referenced as `provider/model` (example: `anthropic/claude-opus-4-6`). If you omit the provider, OpenClaw currently assumes `anthropic` as a temporary deprecation fallback - but you should still **explicitly** set `provider/model`. ### What model do you recommend -**Recommended default:** `anthropic/claude-opus-4-5`. +**Recommended default:** `anthropic/claude-opus-4-6`. **Good alternative:** `anthropic/claude-sonnet-4-5`. **Reliable (less character):** `openai/gpt-5.2` - nearly as good as Opus, just less personality. **Budget:** `zai/glm-4.7`. @@ -1989,7 +1989,7 @@ Docs: [Models](/concepts/models), [Configure](/cli/configure), [Config](/cli/con ### What do OpenClaw, Flawd, and Krill use for models -- **OpenClaw + Flawd:** Anthropic Opus (`anthropic/claude-opus-4-5`) - see [Anthropic](/providers/anthropic). +- **OpenClaw + Flawd:** Anthropic Opus (`anthropic/claude-opus-4-6`) - see [Anthropic](/providers/anthropic). - **Krill:** MiniMax M2.1 (`minimax/MiniMax-M2.1`) - see [MiniMax](/providers/minimax). ### How do I switch models on the fly without restarting @@ -2029,7 +2029,7 @@ It also shows the configured provider endpoint (`baseUrl`) and API mode (`api`) Re-run `/model` **without** the `@profile` suffix: ``` -/model anthropic/claude-opus-4-5 +/model anthropic/claude-opus-4-6 ``` If you want to return to the default, pick it from `/model` (or send `/model `). @@ -2039,8 +2039,8 @@ Use `/model status` to confirm which auth profile is active. Yes. Set one as default and switch as needed: -- **Quick switch (per session):** `/model gpt-5.2` for daily tasks, `/model gpt-5.2-codex` for coding. -- **Default + switch:** set `agents.defaults.model.primary` to `openai-codex/gpt-5.2`, then switch to `openai-codex/gpt-5.2-codex` when coding (or the other way around). +- **Quick switch (per session):** `/model gpt-5.2` for daily tasks, `/model gpt-5.3-codex` for coding. +- **Default + switch:** set `agents.defaults.model.primary` to `openai-codex/gpt-5.3-codex`, then switch to `openai-codex/gpt-5.3-codex-codex` when coding (or the other way around). - **Sub-agents:** route coding tasks to sub-agents with a different default model. See [Models](/concepts/models) and [Slash commands](/tools/slash-commands). @@ -2118,7 +2118,7 @@ Docs: [Models](/concepts/models), [Multi-Agent Routing](/concepts/multi-agent), Yes. OpenClaw ships a few default shorthands (only applied when the model exists in `agents.defaults.models`): -- `opus` → `anthropic/claude-opus-4-5` +- `opus` → `anthropic/claude-opus-4-6` - `sonnet` → `anthropic/claude-sonnet-4-5` - `gpt` → `openai/gpt-5.2` - `gpt-mini` → `openai/gpt-5-mini` @@ -2135,9 +2135,9 @@ Aliases come from `agents.defaults.models..alias`. Example: { agents: { defaults: { - model: { primary: "anthropic/claude-opus-4-5" }, + model: { primary: "anthropic/claude-opus-4-6" }, models: { - "anthropic/claude-opus-4-5": { alias: "opus" }, + "anthropic/claude-opus-4-6": { alias: "opus" }, "anthropic/claude-sonnet-4-5": { alias: "sonnet" }, "anthropic/claude-haiku-4-5": { alias: "haiku" }, }, @@ -2823,7 +2823,7 @@ You can add options like `debounce:2s cap:25 drop:summarize` for followup modes. **Q: "What's the default model for Anthropic with an API key?"** -**A:** In OpenClaw, credentials and model selection are separate. Setting `ANTHROPIC_API_KEY` (or storing an Anthropic API key in auth profiles) enables authentication, but the actual default model is whatever you configure in `agents.defaults.model.primary` (for example, `anthropic/claude-sonnet-4-5` or `anthropic/claude-opus-4-5`). If you see `No credentials found for profile "anthropic:default"`, it means the Gateway couldn't find Anthropic credentials in the expected `auth-profiles.json` for the agent that's running. +**A:** In OpenClaw, credentials and model selection are separate. Setting `ANTHROPIC_API_KEY` (or storing an Anthropic API key in auth profiles) enables authentication, but the actual default model is whatever you configure in `agents.defaults.model.primary` (for example, `anthropic/claude-sonnet-4-5` or `anthropic/claude-opus-4-6`). If you see `No credentials found for profile "anthropic:default"`, it means the Gateway couldn't find Anthropic credentials in the expected `auth-profiles.json` for the agent that's running. --- diff --git a/docs/nodes/media-understanding.md b/docs/nodes/media-understanding.md index 485497bf92..ed5fa00909 100644 --- a/docs/nodes/media-understanding.md +++ b/docs/nodes/media-understanding.md @@ -186,7 +186,7 @@ If you omit `capabilities`, the entry is eligible for the list it appears in. **Image** - Prefer your active model if it supports images. -- Good defaults: `openai/gpt-5.2`, `anthropic/claude-opus-4-5`, `google/gemini-3-pro-preview`. +- Good defaults: `openai/gpt-5.2`, `anthropic/claude-opus-4-6`, `google/gemini-3-pro-preview`. **Audio** @@ -300,7 +300,7 @@ When `mode: "all"`, outputs are labeled `[Image 1/2]`, `[Audio 2/2]`, etc. maxChars: 500, models: [ { provider: "openai", model: "gpt-5.2" }, - { provider: "anthropic", model: "claude-opus-4-5" }, + { provider: "anthropic", model: "claude-opus-4-6" }, { type: "cli", command: "gemini", diff --git a/docs/platforms/fly.md b/docs/platforms/fly.md index a3eadd9b41..0e0745c126 100644 --- a/docs/platforms/fly.md +++ b/docs/platforms/fly.md @@ -148,7 +148,7 @@ cat > /data/openclaw.json << 'EOF' "agents": { "defaults": { "model": { - "primary": "anthropic/claude-opus-4-5", + "primary": "anthropic/claude-opus-4-6", "fallbacks": ["anthropic/claude-sonnet-4-5", "openai/gpt-4o"] }, "maxConcurrent": 4 diff --git a/docs/providers/anthropic.md b/docs/providers/anthropic.md index b86cc141f3..5f2374fe14 100644 --- a/docs/providers/anthropic.md +++ b/docs/providers/anthropic.md @@ -31,7 +31,7 @@ openclaw onboard --anthropic-api-key "$ANTHROPIC_API_KEY" ```json5 { env: { ANTHROPIC_API_KEY: "sk-ant-..." }, - agents: { defaults: { model: { primary: "anthropic/claude-opus-4-5" } } }, + agents: { defaults: { model: { primary: "anthropic/claude-opus-4-6" } } }, } ``` @@ -54,7 +54,7 @@ Use the `cacheRetention` parameter in your model config: agents: { defaults: { models: { - "anthropic/claude-opus-4-5": { + "anthropic/claude-opus-4-6": { params: { cacheRetention: "long" }, }, }, @@ -114,7 +114,7 @@ openclaw onboard --auth-choice setup-token ```json5 { - agents: { defaults: { model: { primary: "anthropic/claude-opus-4-5" } } }, + agents: { defaults: { model: { primary: "anthropic/claude-opus-4-6" } } }, } ``` diff --git a/docs/providers/index.md b/docs/providers/index.md index cc1dad7ee5..7bdf660134 100644 --- a/docs/providers/index.md +++ b/docs/providers/index.md @@ -29,7 +29,7 @@ See [Venice AI](/providers/venice). ```json5 { - agents: { defaults: { model: { primary: "anthropic/claude-opus-4-5" } } }, + agents: { defaults: { model: { primary: "anthropic/claude-opus-4-6" } } }, } ``` diff --git a/docs/providers/minimax.md b/docs/providers/minimax.md index c709e7581d..f19478a49f 100644 --- a/docs/providers/minimax.md +++ b/docs/providers/minimax.md @@ -96,7 +96,7 @@ Configure via CLI: ### MiniMax M2.1 as fallback (Opus primary) -**Best for:** keep Opus 4.5 as primary, fail over to MiniMax M2.1. +**Best for:** keep Opus 4.6 as primary, fail over to MiniMax M2.1. ```json5 { @@ -104,11 +104,11 @@ Configure via CLI: agents: { defaults: { models: { - "anthropic/claude-opus-4-5": { alias: "opus" }, + "anthropic/claude-opus-4-6": { alias: "opus" }, "minimax/MiniMax-M2.1": { alias: "minimax" }, }, model: { - primary: "anthropic/claude-opus-4-5", + primary: "anthropic/claude-opus-4-6", fallbacks: ["minimax/MiniMax-M2.1"], }, }, diff --git a/docs/providers/models.md b/docs/providers/models.md index 64c7d865ec..b5dcf11f06 100644 --- a/docs/providers/models.md +++ b/docs/providers/models.md @@ -27,7 +27,7 @@ See [Venice AI](/providers/venice). ```json5 { - agents: { defaults: { model: { primary: "anthropic/claude-opus-4-5" } } }, + agents: { defaults: { model: { primary: "anthropic/claude-opus-4-6" } } }, } ``` diff --git a/docs/providers/openai.md b/docs/providers/openai.md index a3ea26e3f2..509fb56405 100644 --- a/docs/providers/openai.md +++ b/docs/providers/openai.md @@ -29,7 +29,7 @@ openclaw onboard --openai-api-key "$OPENAI_API_KEY" ```json5 { env: { OPENAI_API_KEY: "sk-..." }, - agents: { defaults: { model: { primary: "openai/gpt-5.2" } } }, + agents: { defaults: { model: { primary: "openai/gpt-5.1-codex" } } }, } ``` @@ -52,7 +52,7 @@ openclaw models auth login --provider openai-codex ```json5 { - agents: { defaults: { model: { primary: "openai-codex/gpt-5.2" } } }, + agents: { defaults: { model: { primary: "openai-codex/gpt-5.3-codex" } } }, } ``` diff --git a/docs/providers/opencode.md b/docs/providers/opencode.md index 7b8f790c4f..aa0614bff8 100644 --- a/docs/providers/opencode.md +++ b/docs/providers/opencode.md @@ -25,7 +25,7 @@ openclaw onboard --opencode-zen-api-key "$OPENCODE_API_KEY" ```json5 { env: { OPENCODE_API_KEY: "sk-..." }, - agents: { defaults: { model: { primary: "opencode/claude-opus-4-5" } } }, + agents: { defaults: { model: { primary: "opencode/claude-opus-4-6" } } }, } ``` diff --git a/docs/providers/vercel-ai-gateway.md b/docs/providers/vercel-ai-gateway.md index 5c4b169f61..726a6040fc 100644 --- a/docs/providers/vercel-ai-gateway.md +++ b/docs/providers/vercel-ai-gateway.md @@ -28,7 +28,7 @@ openclaw onboard --auth-choice ai-gateway-api-key { agents: { defaults: { - model: { primary: "vercel-ai-gateway/anthropic/claude-opus-4.5" }, + model: { primary: "vercel-ai-gateway/anthropic/claude-opus-4.6" }, }, }, } diff --git a/docs/start/openclaw.md b/docs/start/openclaw.md index 9187c9c4aa..563c88c9b6 100644 --- a/docs/start/openclaw.md +++ b/docs/start/openclaw.md @@ -142,7 +142,7 @@ Example: { logging: { level: "info" }, agent: { - model: "anthropic/claude-opus-4-5", + model: "anthropic/claude-opus-4-6", workspace: "~/.openclaw/workspace", thinkingDefault: "high", timeoutSeconds: 1800, diff --git a/docs/start/wizard-cli-reference.md b/docs/start/wizard-cli-reference.md index 52ff3a8beb..392aa0478f 100644 --- a/docs/start/wizard-cli-reference.md +++ b/docs/start/wizard-cli-reference.md @@ -135,12 +135,15 @@ What you set: Browser flow; paste `code#state`. - Sets `agents.defaults.model` to `openai-codex/gpt-5.2` when model is unset or `openai/*`. + Sets `agents.defaults.model` to `openai-codex/gpt-5.3-codex` when model is unset or `openai/*`. Uses `OPENAI_API_KEY` if present or prompts for a key, then saves it to `~/.openclaw/.env` so launchd can read it. + + Sets `agents.defaults.model` to `openai/gpt-5.1-codex` when model is unset, `openai/*`, or `openai-codex/*`. + Prompts for `OPENCODE_API_KEY` (or `OPENCODE_ZEN_API_KEY`). diff --git a/docs/testing.md b/docs/testing.md index 75c2762529..317f6ef961 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -110,7 +110,7 @@ Live tests are split into two layers so we can isolate failures: - How to select models: - `OPENCLAW_LIVE_MODELS=modern` to run the modern allowlist (Opus/Sonnet/Haiku 4.5, GPT-5.x + Codex, Gemini 3, GLM 4.7, MiniMax M2.1, Grok 4) - `OPENCLAW_LIVE_MODELS=all` is an alias for the modern allowlist - - or `OPENCLAW_LIVE_MODELS="openai/gpt-5.2,anthropic/claude-opus-4-5,..."` (comma allowlist) + - or `OPENCLAW_LIVE_MODELS="openai/gpt-5.2,anthropic/claude-opus-4-6,..."` (comma allowlist) - How to select providers: - `OPENCLAW_LIVE_PROVIDERS="google,google-antigravity,google-gemini-cli"` (comma allowlist) - Where keys come from: @@ -172,7 +172,7 @@ openclaw models list --json - Profile: `OPENCLAW_LIVE_SETUP_TOKEN_PROFILE=anthropic:setup-token-test` - Raw token: `OPENCLAW_LIVE_SETUP_TOKEN_VALUE=sk-ant-oat01-...` - Model override (optional): - - `OPENCLAW_LIVE_SETUP_TOKEN_MODEL=anthropic/claude-opus-4-5` + - `OPENCLAW_LIVE_SETUP_TOKEN_MODEL=anthropic/claude-opus-4-6` Setup example: @@ -193,8 +193,8 @@ OPENCLAW_LIVE_SETUP_TOKEN=1 OPENCLAW_LIVE_SETUP_TOKEN_PROFILE=anthropic:setup-to - Command: `claude` - Args: `["-p","--output-format","json","--dangerously-skip-permissions"]` - Overrides (optional): - - `OPENCLAW_LIVE_CLI_BACKEND_MODEL="claude-cli/claude-opus-4-5"` - - `OPENCLAW_LIVE_CLI_BACKEND_MODEL="codex-cli/gpt-5.2-codex"` + - `OPENCLAW_LIVE_CLI_BACKEND_MODEL="claude-cli/claude-opus-4-6"` + - `OPENCLAW_LIVE_CLI_BACKEND_MODEL="codex-cli/gpt-5.3-codex"` - `OPENCLAW_LIVE_CLI_BACKEND_COMMAND="/full/path/to/claude"` - `OPENCLAW_LIVE_CLI_BACKEND_ARGS='["-p","--output-format","json","--permission-mode","bypassPermissions"]'` - `OPENCLAW_LIVE_CLI_BACKEND_CLEAR_ENV='["ANTHROPIC_API_KEY","ANTHROPIC_API_KEY_OLD"]'` @@ -223,7 +223,7 @@ Narrow, explicit allowlists are fastest and least flaky: - `OPENCLAW_LIVE_GATEWAY_MODELS="openai/gpt-5.2" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts` - Tool calling across several providers: - - `OPENCLAW_LIVE_GATEWAY_MODELS="openai/gpt-5.2,anthropic/claude-opus-4-5,google/gemini-3-flash-preview,zai/glm-4.7,minimax/minimax-m2.1" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts` + - `OPENCLAW_LIVE_GATEWAY_MODELS="openai/gpt-5.2,anthropic/claude-opus-4-6,google/gemini-3-flash-preview,zai/glm-4.7,minimax/minimax-m2.1" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts` - Google focus (Gemini API key + Antigravity): - Gemini (API key): `OPENCLAW_LIVE_GATEWAY_MODELS="google/gemini-3-flash-preview" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts` @@ -247,22 +247,22 @@ There is no fixed “CI model list” (live is opt-in), but these are the **reco This is the “common models” run we expect to keep working: - OpenAI (non-Codex): `openai/gpt-5.2` (optional: `openai/gpt-5.1`) -- OpenAI Codex: `openai-codex/gpt-5.2` (optional: `openai-codex/gpt-5.2-codex`) -- Anthropic: `anthropic/claude-opus-4-5` (or `anthropic/claude-sonnet-4-5`) +- OpenAI Codex: `openai-codex/gpt-5.3-codex` (optional: `openai-codex/gpt-5.3-codex-codex`) +- Anthropic: `anthropic/claude-opus-4-6` (or `anthropic/claude-sonnet-4-5`) - Google (Gemini API): `google/gemini-3-pro-preview` and `google/gemini-3-flash-preview` (avoid older Gemini 2.x models) - Google (Antigravity): `google-antigravity/claude-opus-4-5-thinking` and `google-antigravity/gemini-3-flash` - Z.AI (GLM): `zai/glm-4.7` - MiniMax: `minimax/minimax-m2.1` Run gateway smoke with tools + image: -`OPENCLAW_LIVE_GATEWAY_MODELS="openai/gpt-5.2,openai-codex/gpt-5.2,anthropic/claude-opus-4-5,google/gemini-3-pro-preview,google/gemini-3-flash-preview,google-antigravity/claude-opus-4-5-thinking,google-antigravity/gemini-3-flash,zai/glm-4.7,minimax/minimax-m2.1" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts` +`OPENCLAW_LIVE_GATEWAY_MODELS="openai/gpt-5.2,openai-codex/gpt-5.3-codex,anthropic/claude-opus-4-6,google/gemini-3-pro-preview,google/gemini-3-flash-preview,google-antigravity/claude-opus-4-5-thinking,google-antigravity/gemini-3-flash,zai/glm-4.7,minimax/minimax-m2.1" pnpm test:live src/gateway/gateway-models.profiles.live.test.ts` ### Baseline: tool calling (Read + optional Exec) Pick at least one per provider family: - OpenAI: `openai/gpt-5.2` (or `openai/gpt-5-mini`) -- Anthropic: `anthropic/claude-opus-4-5` (or `anthropic/claude-sonnet-4-5`) +- Anthropic: `anthropic/claude-opus-4-6` (or `anthropic/claude-sonnet-4-5`) - Google: `google/gemini-3-flash-preview` (or `google/gemini-3-pro-preview`) - Z.AI (GLM): `zai/glm-4.7` - MiniMax: `minimax/minimax-m2.1` diff --git a/docs/token-use.md b/docs/token-use.md index cc5a7ab5dc..7f8dcb7fbb 100644 --- a/docs/token-use.md +++ b/docs/token-use.md @@ -93,9 +93,9 @@ https://docs.anthropic.com/docs/build-with-claude/prompt-caching agents: defaults: model: - primary: "anthropic/claude-opus-4-5" + primary: "anthropic/claude-opus-4-6" models: - "anthropic/claude-opus-4-5": + "anthropic/claude-opus-4-6": params: cacheRetention: "long" heartbeat: diff --git a/docs/tools/llm-task.md b/docs/tools/llm-task.md index 5b023103b1..16ae39e5e2 100644 --- a/docs/tools/llm-task.md +++ b/docs/tools/llm-task.md @@ -55,7 +55,7 @@ without writing custom OpenClaw code for each workflow. "defaultProvider": "openai-codex", "defaultModel": "gpt-5.2", "defaultAuthProfileId": "main", - "allowedModels": ["openai-codex/gpt-5.2"], + "allowedModels": ["openai-codex/gpt-5.3-codex"], "maxTokens": 800, "timeoutMs": 30000 } diff --git a/extensions/copilot-proxy/index.ts b/extensions/copilot-proxy/index.ts index ae674bd0dc..e56693b076 100644 --- a/extensions/copilot-proxy/index.ts +++ b/extensions/copilot-proxy/index.ts @@ -11,6 +11,7 @@ const DEFAULT_MODEL_IDS = [ "gpt-5.1-codex", "gpt-5.1-codex-max", "gpt-5-mini", + "claude-opus-4.6", "claude-opus-4.5", "claude-sonnet-4.5", "claude-haiku-4.5", diff --git a/extensions/tlon/src/monitor/utils.ts b/extensions/tlon/src/monitor/utils.ts index 31d2721394..3c0103a723 100644 --- a/extensions/tlon/src/monitor/utils.ts +++ b/extensions/tlon/src/monitor/utils.ts @@ -6,6 +6,7 @@ export function formatModelName(modelString?: string | null): string { } const modelName = modelString.includes("/") ? modelString.split("/")[1] : modelString; const modelMappings: Record = { + "claude-opus-4-6": "Claude Opus 4.6", "claude-opus-4-5": "Claude Opus 4.5", "claude-sonnet-4-5": "Claude Sonnet 4.5", "claude-sonnet-3-5": "Claude Sonnet 3.5", diff --git a/package.json b/package.json index 8f9d580cc5..c48e8fe025 100644 --- a/package.json +++ b/package.json @@ -108,10 +108,10 @@ "@larksuiteoapi/node-sdk": "^1.58.0", "@line/bot-sdk": "^10.6.0", "@lydell/node-pty": "1.2.0-beta.3", - "@mariozechner/pi-agent-core": "0.52.0", - "@mariozechner/pi-ai": "0.52.0", - "@mariozechner/pi-coding-agent": "0.52.0", - "@mariozechner/pi-tui": "0.52.0", + "@mariozechner/pi-agent-core": "0.52.2", + "@mariozechner/pi-ai": "0.52.2", + "@mariozechner/pi-coding-agent": "0.52.2", + "@mariozechner/pi-tui": "0.52.2", "@mozilla/readability": "^0.6.0", "@sinclair/typebox": "0.34.48", "@slack/bolt": "^4.6.0", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 5e4808de34..5c9cf4b0da 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -49,17 +49,17 @@ importers: specifier: 1.2.0-beta.3 version: 1.2.0-beta.3 '@mariozechner/pi-agent-core': - specifier: 0.52.0 - version: 0.52.0(ws@8.19.0)(zod@4.3.6) + specifier: 0.52.2 + version: 0.52.2(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-ai': - specifier: 0.52.0 - version: 0.52.0(ws@8.19.0)(zod@4.3.6) + specifier: 0.52.2 + version: 0.52.2(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-coding-agent': - specifier: 0.52.0 - version: 0.52.0(ws@8.19.0)(zod@4.3.6) + specifier: 0.52.2 + version: 0.52.2(ws@8.19.0)(zod@4.3.6) '@mariozechner/pi-tui': - specifier: 0.52.0 - version: 0.52.0 + specifier: 0.52.2 + version: 0.52.2 '@mozilla/readability': specifier: ^0.6.0 version: 0.6.0 @@ -593,8 +593,8 @@ packages: peerDependencies: zod: ^3.25.0 || ^4.0.0 - '@anthropic-ai/sdk@0.71.2': - resolution: {integrity: sha512-TGNDEUuEstk/DKu0/TflXAEt+p+p/WhTlFzEnoosvbaDU2LTjm42igSdlL0VijrKpWejtOKxX0b8A7uc+XiSAQ==} + '@anthropic-ai/sdk@0.73.0': + resolution: {integrity: sha512-URURVzhxXGJDGUGFunIOtBlSl7KWvZiAAKY/ttTkZAkXT9bTPqdk2eK0b8qqSxXpikh3QKPnPYpiyX98zf5ebw==} hasBin: true peerDependencies: zod: ^3.25.0 || ^4.0.0 @@ -619,8 +619,8 @@ packages: '@aws-crypto/util@5.2.0': resolution: {integrity: sha512-4RkU9EsI6ZpBve5fseQlGNUWKMa1RLPQ1dnjnQoe07ldfIzcsGb5hC5W0Dm7u423KWzawlrpbjXBrXCEv9zazQ==} - '@aws-sdk/client-bedrock-runtime@3.983.0': - resolution: {integrity: sha512-uur/DX7OKtWe05gSZ2PGCHIhV0etoi12h8EGDht5blmtI4njLzD/gL6vX2L8CUgsy+4/KGIpH7KV7naWKAKANQ==} + '@aws-sdk/client-bedrock-runtime@3.984.0': + resolution: {integrity: sha512-iFrdkDXdo+ELZ5qD8ZYw9MHoOhcXyVutO8z7csnYpJO0rbET/X6B8cQlOCMsqJHxkyMwW21J4vt9S5k2/FgPCg==} engines: {node: '>=20.0.0'} '@aws-sdk/client-bedrock@3.983.0': @@ -667,8 +667,8 @@ packages: resolution: {integrity: sha512-hIzw2XzrG8jzsUSEatehmpkd5rWzASg5IHUfA+m01k/RtvfAML7ZJVVohuKdhAYx+wV2AThLiQJVzqn7F0khrw==} engines: {node: '>=20.0.0'} - '@aws-sdk/eventstream-handler-node@3.972.4': - resolution: {integrity: sha512-LPIN505kUqL3xwtoGYgYkctkUUuVUD4pzZfSo+CahavNft+zty5xWYWhKfnZOKBkYCMUl2Hl/9mkoPeYwxfQvQ==} + '@aws-sdk/eventstream-handler-node@3.972.5': + resolution: {integrity: sha512-xEmd3dnyn83K6t4AJxBJA63wpEoCD45ERFG0XMTViD2E/Ohls9TLxjOWPb1PAxR9/46cKy/TImez1GoqP6xVNQ==} engines: {node: '>=20.0.0'} '@aws-sdk/middleware-eventstream@3.972.3': @@ -691,8 +691,8 @@ packages: resolution: {integrity: sha512-TehLN8W/kivl0U9HcS+keryElEWORROpghDXZBLfnb40DXM7hx/i+7OOjkogXQOF3QtUraJVRkHQ07bPhrWKlw==} engines: {node: '>=20.0.0'} - '@aws-sdk/middleware-websocket@3.972.4': - resolution: {integrity: sha512-0lHsBuO5eVkWiirSHWVDHLHSghyajcVxSGvmv/6tYFdzaXx2PDvqNdfXhKdDZpOOHGCxuY5d3u11SKbVAtB0+Q==} + '@aws-sdk/middleware-websocket@3.972.5': + resolution: {integrity: sha512-BN4A9K71WRIlpQ3+IYGdBC2wVyobZ95g6ZomodmJ8Te772GWo0iDk2Mv6JIHdr842tOTgi1b3npLIFDUS4hl4g==} engines: {node: '>= 14.0.0'} '@aws-sdk/nested-clients@3.982.0': @@ -703,6 +703,10 @@ packages: resolution: {integrity: sha512-4bUzDkJlSPwfegO23ZSBrheuTI8UyAgNzptm1K6fZAIOIc1vnFl12TonecbssAfmM0/UdyTn5QDomwEfIdmJkQ==} engines: {node: '>=20.0.0'} + '@aws-sdk/nested-clients@3.984.0': + resolution: {integrity: sha512-E9Os+U9NWFoEJXbTVT8sCi+HMnzmsMA8cuCkvlUUfin/oWewUTnCkB/OwFwiUQ2N7v1oBk+i4ZSsI1PiuOy8/w==} + engines: {node: '>=20.0.0'} + '@aws-sdk/region-config-resolver@3.972.3': resolution: {integrity: sha512-v4J8qYAWfOMcZ4MJUyatntOicTzEMaU7j3OpkRCGGFSL2NgXQ5VbxauIyORA+pxdKZ0qQG2tCQjQjZDlXEC3Ow==} engines: {node: '>=20.0.0'} @@ -715,6 +719,10 @@ packages: resolution: {integrity: sha512-HR9MBAAEeQRpZAQ96XUalr8PhJG1Kr6JRs7Lk3u9MMN6tXFICxbn9s2rThGIJEPnU0t/edc+5F5tgTtQxsqBuQ==} engines: {node: '>=20.0.0'} + '@aws-sdk/token-providers@3.984.0': + resolution: {integrity: sha512-UJ/+OzZv+4nAQ1bSspCSb4JlYbMB2Adn8CK7hySpKX5sjhRu1bm6w1PqQq59U67LZEKsPdhl1rzcZ7ybK8YQxw==} + engines: {node: '>=20.0.0'} + '@aws-sdk/types@3.973.1': resolution: {integrity: sha512-DwHBiMNOB468JiX6+i34c+THsKHErYUdNQ3HexeXZvVn4zouLjgaS4FejiGSi2HyBuzuyHg7SuOPmjSvoU9NRg==} engines: {node: '>=20.0.0'} @@ -727,6 +735,10 @@ packages: resolution: {integrity: sha512-t/VbL2X3gvDEjC4gdySOeFFOZGQEBKwa23pRHeB7hBLBZ119BB/2OEFtTFWKyp3bnMQgxpeVeGS7/hxk6wpKJw==} engines: {node: '>=20.0.0'} + '@aws-sdk/util-endpoints@3.984.0': + resolution: {integrity: sha512-9ebjLA0hMKHeVvXEtTDCCOBtwjb0bOXiuUV06HNeVdgAjH6gj4x4Zwt4IBti83TiyTGOCl5YfZqGx4ehVsasbQ==} + engines: {node: '>=20.0.0'} + '@aws-sdk/util-format-url@3.972.3': resolution: {integrity: sha512-n7F2ycckcKFXa01vAsT/SJdjFHfKH9s96QHcs5gn8AaaigASICeME8WdUL9uBp8XV/OVwEt8+6gzn6KFUgQa8g==} engines: {node: '>=20.0.0'} @@ -1051,11 +1063,11 @@ packages: '@eshaz/web-worker@1.2.2': resolution: {integrity: sha512-WxXiHFmD9u/owrzempiDlBB1ZYqiLnm9s6aPc8AlFQalq2tKmqdmMr9GXOupDgzXtqnBipj8Un0gkIm7Sjf8mw==} - '@google/genai@1.34.0': - resolution: {integrity: sha512-vu53UMPvjmb7PGzlYu6Tzxso8Dfhn+a7eQFaS2uNemVtDZKwzSpJ5+ikqBbXplF7RGB1STcVDqCkPvquiwb2sw==} + '@google/genai@1.40.0': + resolution: {integrity: sha512-fhIww8smT0QYRX78qWOiz/nIQhHMF5wXOrlXvj33HBrz3vKDBb+wibLcEmTA+L9dmPD4KmfNr7UF3LDQVTXNjA==} engines: {node: '>=20.0.0'} peerDependencies: - '@modelcontextprotocol/sdk': ^1.24.0 + '@modelcontextprotocol/sdk': ^1.25.2 peerDependenciesMeta: '@modelcontextprotocol/sdk': optional: true @@ -1457,22 +1469,22 @@ packages: resolution: {integrity: sha512-faGUlTcXka5l7rv0lP3K3vGW/ejRuOS24RR2aSFWREUQqzjgdsuWNo/IiPqL3kWRGt6Ahl2+qcDAwtdeWeuGUw==} hasBin: true - '@mariozechner/pi-agent-core@0.52.0': - resolution: {integrity: sha512-4jmPixmg+nnU3yvUuz9pLeMYtwktTC9SOcfkCGqGWfAyvYOa6fc1KXfL/IGPk1cDG4INautQ0nHxGoIDwAKFww==} + '@mariozechner/pi-agent-core@0.52.2': + resolution: {integrity: sha512-RavOGZUl1hm+0/3ZG5tJqlUjPavidA0ebQoloW1T8DbXPEP7WlWYKGs5qMH5SnSdCF/Hc0tDn6lSqMdGo60Lpg==} engines: {node: '>=20.0.0'} - '@mariozechner/pi-ai@0.52.0': - resolution: {integrity: sha512-fNyW5k3Ap3mSg2lmeZBYzMRfyDD+/7gSTSDax3OlME9hsXw72rhIrVpvQoivFNroupU/13BOy73y8rvyTEWQqQ==} + '@mariozechner/pi-ai@0.52.2': + resolution: {integrity: sha512-/iyI2CbFiuPB6A5MyakQKy/ez6iTW04CQYXseyaDv4XZszGQa/TYXc4QAW/HxEc8SpuEZhCo8T6ikZBdvTaWwA==} engines: {node: '>=20.0.0'} hasBin: true - '@mariozechner/pi-coding-agent@0.52.0': - resolution: {integrity: sha512-skUR/LYK0kupD8sTn0PCr/YnvGaBEpqSZgZxQ/gEjSzzRXa7Ywoxrr6y3Jvzk68Nv1JenKAyeR1GAI/3QPDKlA==} + '@mariozechner/pi-coding-agent@0.52.2': + resolution: {integrity: sha512-/qJxSmfi488jJLKQkGS9qO2VC21LC7mpms6F3JNMkHS0wdUoq1JFLGTA9OlZT/9WJHz1aLzXeCLAcZvFFcJGfA==} engines: {node: '>=20.0.0'} hasBin: true - '@mariozechner/pi-tui@0.52.0': - resolution: {integrity: sha512-SOWBWI+7SX/CgfmuyO1o+S1nhS5I1QmWrCXxd+2lvhqAvqBiVTmSt3W8RagdAH4G6D4WOcR0FFjqLFezlKV79w==} + '@mariozechner/pi-tui@0.52.2': + resolution: {integrity: sha512-ASNy0dU1cDWXNx4lHvyjOXdoUzrEbuSdTQwkvchiNMbau2nGogdzRXdnYuiJjJKMDqCFtkOPhEUXStpUoOzJZg==} engines: {node: '>=20.0.0'} '@matrix-org/matrix-sdk-crypto-nodejs@0.4.0': @@ -2717,8 +2729,8 @@ packages: '@types/node@10.17.60': resolution: {integrity: sha512-F0KIgDJfy2nA3zMLmWGKxcH2ZVEtCZXHHdOQs2gSaQ27+lNeEfGxzkIw90aXswATX7AZ33tahPbzy6KAfUreVw==} - '@types/node@20.19.31': - resolution: {integrity: sha512-5jsi0wpncvTD33Sh1UCgacK37FFwDn+EG7wCmEvs62fCvBL+n8/76cAYDok21NF6+jaVWIqKwCZyX7Vbu8eB3A==} + '@types/node@20.19.32': + resolution: {integrity: sha512-Ez8QE4DMfhjjTsES9K2dwfV258qBui7qxUsoaixZDiTzbde4U12e1pXGNu/ECsUIOi5/zoCxAQxIhQnaUQ2VvA==} '@types/node@24.10.10': resolution: {integrity: sha512-+0/4J266CBGPUq/ELg7QUHhN25WYjE0wYTPSQJn1xeu8DOlIOPxXxrNGiLmfAWl7HMMgWFWXpt9IDjMWrF5Iow==} @@ -5535,7 +5547,7 @@ snapshots: dependencies: zod: 4.3.6 - '@anthropic-ai/sdk@0.71.2(zod@4.3.6)': + '@anthropic-ai/sdk@0.73.0(zod@4.3.6)': dependencies: json-schema-to-ts: 3.1.1 optionalDependencies: @@ -5573,23 +5585,23 @@ snapshots: '@smithy/util-utf8': 2.3.0 tslib: 2.8.1 - '@aws-sdk/client-bedrock-runtime@3.983.0': + '@aws-sdk/client-bedrock-runtime@3.984.0': dependencies: '@aws-crypto/sha256-browser': 5.2.0 '@aws-crypto/sha256-js': 5.2.0 '@aws-sdk/core': 3.973.6 '@aws-sdk/credential-provider-node': 3.972.5 - '@aws-sdk/eventstream-handler-node': 3.972.4 + '@aws-sdk/eventstream-handler-node': 3.972.5 '@aws-sdk/middleware-eventstream': 3.972.3 '@aws-sdk/middleware-host-header': 3.972.3 '@aws-sdk/middleware-logger': 3.972.3 '@aws-sdk/middleware-recursion-detection': 3.972.3 '@aws-sdk/middleware-user-agent': 3.972.6 - '@aws-sdk/middleware-websocket': 3.972.4 + '@aws-sdk/middleware-websocket': 3.972.5 '@aws-sdk/region-config-resolver': 3.972.3 - '@aws-sdk/token-providers': 3.983.0 + '@aws-sdk/token-providers': 3.984.0 '@aws-sdk/types': 3.973.1 - '@aws-sdk/util-endpoints': 3.983.0 + '@aws-sdk/util-endpoints': 3.984.0 '@aws-sdk/util-user-agent-browser': 3.972.3 '@aws-sdk/util-user-agent-node': 3.972.4 '@smithy/config-resolver': 4.4.6 @@ -5833,7 +5845,7 @@ snapshots: transitivePeerDependencies: - aws-crt - '@aws-sdk/eventstream-handler-node@3.972.4': + '@aws-sdk/eventstream-handler-node@3.972.5': dependencies: '@aws-sdk/types': 3.973.1 '@smithy/eventstream-codec': 4.2.8 @@ -5878,7 +5890,7 @@ snapshots: '@smithy/types': 4.12.0 tslib: 2.8.1 - '@aws-sdk/middleware-websocket@3.972.4': + '@aws-sdk/middleware-websocket@3.972.5': dependencies: '@aws-sdk/types': 3.973.1 '@aws-sdk/util-format-url': 3.972.3 @@ -5888,7 +5900,9 @@ snapshots: '@smithy/protocol-http': 5.3.8 '@smithy/signature-v4': 5.3.8 '@smithy/types': 4.12.0 + '@smithy/util-base64': 4.3.0 '@smithy/util-hex-encoding': 4.2.0 + '@smithy/util-utf8': 4.2.0 tslib: 2.8.1 '@aws-sdk/nested-clients@3.982.0': @@ -5977,6 +5991,49 @@ snapshots: transitivePeerDependencies: - aws-crt + '@aws-sdk/nested-clients@3.984.0': + dependencies: + '@aws-crypto/sha256-browser': 5.2.0 + '@aws-crypto/sha256-js': 5.2.0 + '@aws-sdk/core': 3.973.6 + '@aws-sdk/middleware-host-header': 3.972.3 + '@aws-sdk/middleware-logger': 3.972.3 + '@aws-sdk/middleware-recursion-detection': 3.972.3 + '@aws-sdk/middleware-user-agent': 3.972.6 + '@aws-sdk/region-config-resolver': 3.972.3 + '@aws-sdk/types': 3.973.1 + '@aws-sdk/util-endpoints': 3.984.0 + '@aws-sdk/util-user-agent-browser': 3.972.3 + '@aws-sdk/util-user-agent-node': 3.972.4 + '@smithy/config-resolver': 4.4.6 + '@smithy/core': 3.22.1 + '@smithy/fetch-http-handler': 5.3.9 + '@smithy/hash-node': 4.2.8 + '@smithy/invalid-dependency': 4.2.8 + '@smithy/middleware-content-length': 4.2.8 + '@smithy/middleware-endpoint': 4.4.13 + '@smithy/middleware-retry': 4.4.30 + '@smithy/middleware-serde': 4.2.9 + '@smithy/middleware-stack': 4.2.8 + '@smithy/node-config-provider': 4.3.8 + '@smithy/node-http-handler': 4.4.9 + '@smithy/protocol-http': 5.3.8 + '@smithy/smithy-client': 4.11.2 + '@smithy/types': 4.12.0 + '@smithy/url-parser': 4.2.8 + '@smithy/util-base64': 4.3.0 + '@smithy/util-body-length-browser': 4.2.0 + '@smithy/util-body-length-node': 4.2.1 + '@smithy/util-defaults-mode-browser': 4.3.29 + '@smithy/util-defaults-mode-node': 4.2.32 + '@smithy/util-endpoints': 3.2.8 + '@smithy/util-middleware': 4.2.8 + '@smithy/util-retry': 4.2.8 + '@smithy/util-utf8': 4.2.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + '@aws-sdk/region-config-resolver@3.972.3': dependencies: '@aws-sdk/types': 3.973.1 @@ -6009,6 +6066,18 @@ snapshots: transitivePeerDependencies: - aws-crt + '@aws-sdk/token-providers@3.984.0': + dependencies: + '@aws-sdk/core': 3.973.6 + '@aws-sdk/nested-clients': 3.984.0 + '@aws-sdk/types': 3.973.1 + '@smithy/property-provider': 4.2.8 + '@smithy/shared-ini-file-loader': 4.4.3 + '@smithy/types': 4.12.0 + tslib: 2.8.1 + transitivePeerDependencies: + - aws-crt + '@aws-sdk/types@3.973.1': dependencies: '@smithy/types': 4.12.0 @@ -6030,6 +6099,14 @@ snapshots: '@smithy/util-endpoints': 3.2.8 tslib: 2.8.1 + '@aws-sdk/util-endpoints@3.984.0': + dependencies: + '@aws-sdk/types': 3.973.1 + '@smithy/types': 4.12.0 + '@smithy/url-parser': 4.2.8 + '@smithy/util-endpoints': 3.2.8 + tslib: 2.8.1 + '@aws-sdk/util-format-url@3.972.3': dependencies: '@aws-sdk/types': 3.973.1 @@ -6346,9 +6423,10 @@ snapshots: '@eshaz/web-worker@1.2.2': optional: true - '@google/genai@1.34.0': + '@google/genai@1.40.0': dependencies: google-auth-library: 10.5.0 + protobufjs: 7.5.4 ws: 8.19.0 transitivePeerDependencies: - bufferutil @@ -6584,7 +6662,7 @@ snapshots: '@larksuiteoapi/node-sdk@1.58.0': dependencies: - axios: 1.13.4(debug@4.4.3) + axios: 1.13.4 lodash.identity: 3.0.0 lodash.merge: 4.6.2 lodash.pickby: 4.6.0 @@ -6600,7 +6678,7 @@ snapshots: dependencies: '@types/node': 24.10.10 optionalDependencies: - axios: 1.13.4(debug@4.4.3) + axios: 1.13.4 transitivePeerDependencies: - debug @@ -6695,9 +6773,9 @@ snapshots: std-env: 3.10.0 yoctocolors: 2.1.2 - '@mariozechner/pi-agent-core@0.52.0(ws@8.19.0)(zod@4.3.6)': + '@mariozechner/pi-agent-core@0.52.2(ws@8.19.0)(zod@4.3.6)': dependencies: - '@mariozechner/pi-ai': 0.52.0(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-ai': 0.52.2(ws@8.19.0)(zod@4.3.6) transitivePeerDependencies: - '@modelcontextprotocol/sdk' - aws-crt @@ -6707,11 +6785,11 @@ snapshots: - ws - zod - '@mariozechner/pi-ai@0.52.0(ws@8.19.0)(zod@4.3.6)': + '@mariozechner/pi-ai@0.52.2(ws@8.19.0)(zod@4.3.6)': dependencies: - '@anthropic-ai/sdk': 0.71.2(zod@4.3.6) - '@aws-sdk/client-bedrock-runtime': 3.983.0 - '@google/genai': 1.34.0 + '@anthropic-ai/sdk': 0.73.0(zod@4.3.6) + '@aws-sdk/client-bedrock-runtime': 3.984.0 + '@google/genai': 1.40.0 '@mistralai/mistralai': 1.10.0 '@sinclair/typebox': 0.34.47 ajv: 8.17.1 @@ -6731,12 +6809,12 @@ snapshots: - ws - zod - '@mariozechner/pi-coding-agent@0.52.0(ws@8.19.0)(zod@4.3.6)': + '@mariozechner/pi-coding-agent@0.52.2(ws@8.19.0)(zod@4.3.6)': dependencies: '@mariozechner/jiti': 2.6.5 - '@mariozechner/pi-agent-core': 0.52.0(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-ai': 0.52.0(ws@8.19.0)(zod@4.3.6) - '@mariozechner/pi-tui': 0.52.0 + '@mariozechner/pi-agent-core': 0.52.2(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-ai': 0.52.2(ws@8.19.0)(zod@4.3.6) + '@mariozechner/pi-tui': 0.52.2 '@silvia-odwyer/photon-node': 0.3.4 chalk: 5.6.2 cli-highlight: 2.1.11 @@ -6760,7 +6838,7 @@ snapshots: - ws - zod - '@mariozechner/pi-tui@0.52.0': + '@mariozechner/pi-tui@0.52.2': dependencies: '@types/mime-types': 2.1.4 chalk: 5.6.2 @@ -6803,7 +6881,7 @@ snapshots: '@azure/core-auth': 1.10.1 '@azure/msal-node': 3.8.6 '@microsoft/agents-activity': 1.2.3 - axios: 1.13.4(debug@4.4.3) + axios: 1.13.4 jsonwebtoken: 9.0.3 jwks-rsa: 3.2.2 object-path: 0.11.8 @@ -7590,7 +7668,7 @@ snapshots: '@slack/types': 2.19.0 '@slack/web-api': 7.13.0 '@types/express': 5.0.6 - axios: 1.13.4(debug@4.4.3) + axios: 1.13.4 express: 5.2.1 path-to-regexp: 8.3.0 raw-body: 3.0.2 @@ -7636,7 +7714,7 @@ snapshots: '@slack/types': 2.19.0 '@types/node': 25.2.0 '@types/retry': 0.12.0 - axios: 1.13.4(debug@4.4.3) + axios: 1.13.4 eventemitter3: 5.0.4 form-data: 2.5.4 is-electron: 2.2.2 @@ -8120,7 +8198,7 @@ snapshots: '@types/node@10.17.60': {} - '@types/node@20.19.31': + '@types/node@20.19.32': dependencies: undici-types: 6.21.0 @@ -8448,7 +8526,7 @@ snapshots: '@swc/helpers': 0.5.18 '@types/command-line-args': 5.2.3 '@types/command-line-usage': 5.0.4 - '@types/node': 20.19.31 + '@types/node': 20.19.32 command-line-args: 5.2.1 command-line-usage: 7.0.3 flatbuffers: 24.12.23 @@ -8530,6 +8608,14 @@ snapshots: aws4@1.13.2: {} + axios@1.13.4: + dependencies: + follow-redirects: 1.15.11 + form-data: 2.5.4 + proxy-from-env: 1.1.0 + transitivePeerDependencies: + - debug + axios@1.13.4(debug@4.4.3): dependencies: follow-redirects: 1.15.11(debug@4.4.3) @@ -9105,6 +9191,8 @@ snapshots: flatbuffers@24.12.23: {} + follow-redirects@1.15.11: {} + follow-redirects@1.15.11(debug@4.4.3): optionalDependencies: debug: 4.4.3 diff --git a/scripts/bench-model.ts b/scripts/bench-model.ts index de0ee79ddb..f1698737e3 100644 --- a/scripts/bench-model.ts +++ b/scripts/bench-model.ts @@ -106,7 +106,7 @@ async function main(): Promise { contextWindow: 200000, maxTokens: 8192, }; - const opusModel = getModel("anthropic", "claude-opus-4-5"); + const opusModel = getModel("anthropic", "claude-opus-4-6"); console.log(`Prompt: ${prompt}`); console.log(`Runs: ${runs}`); diff --git a/scripts/docker/install-sh-e2e/run.sh b/scripts/docker/install-sh-e2e/run.sh index dfd31957fb..4873436b05 100755 --- a/scripts/docker/install-sh-e2e/run.sh +++ b/scripts/docker/install-sh-e2e/run.sh @@ -400,9 +400,13 @@ run_profile() { "openai/gpt-4.1-mini")" else agent_model="$(set_agent_model "$profile" \ + "anthropic/claude-opus-4-6" \ + "claude-opus-4-6" \ "anthropic/claude-opus-4-5" \ "claude-opus-4-5")" image_model="$(set_image_model "$profile" \ + "anthropic/claude-opus-4-6" \ + "claude-opus-4-6" \ "anthropic/claude-opus-4-5" \ "claude-opus-4-5")" fi diff --git a/scripts/docs-i18n/util.go b/scripts/docs-i18n/util.go index b5862a5acd..3be70ee307 100644 --- a/scripts/docs-i18n/util.go +++ b/scripts/docs-i18n/util.go @@ -12,7 +12,7 @@ import ( const ( workflowVersion = 15 providerName = "pi" - modelVersion = "claude-opus-4-5" + modelVersion = "claude-opus-4-6" ) func cacheNamespace() string { diff --git a/scripts/zai-fallback-repro.ts b/scripts/zai-fallback-repro.ts index 71e9e34384..75c8793d08 100644 --- a/scripts/zai-fallback-repro.ts +++ b/scripts/zai-fallback-repro.ts @@ -85,10 +85,11 @@ async function main() { agents: { defaults: { model: { - primary: "anthropic/claude-opus-4-5", + primary: "anthropic/claude-opus-4-6", fallbacks: ["zai/glm-4.7"], }, models: { + "anthropic/claude-opus-4-6": {}, "anthropic/claude-opus-4-5": {}, "zai/glm-4.7": {}, }, diff --git a/src/agents/defaults.ts b/src/agents/defaults.ts index a3af2338b4..f1c74b0d5a 100644 --- a/src/agents/defaults.ts +++ b/src/agents/defaults.ts @@ -2,5 +2,5 @@ // Model id uses pi-ai's built-in Anthropic catalog. export const DEFAULT_PROVIDER = "anthropic"; export const DEFAULT_MODEL = "claude-opus-4-6"; -// Context window: Opus supports ~200k tokens (per pi-ai models.generated.ts for Opus 4.5). +// Conservative fallback used when model metadata is unavailable. export const DEFAULT_CONTEXT_TOKENS = 200_000; diff --git a/src/agents/model-auth.test.ts b/src/agents/model-auth.test.ts index 4f12290b9d..7a0af0d185 100644 --- a/src/agents/model-auth.test.ts +++ b/src/agents/model-auth.test.ts @@ -140,7 +140,7 @@ describe("getApiKeyForModel", () => { } catch (err) { error = err; } - expect(String(error)).toContain("openai-codex/gpt-5.2"); + expect(String(error)).toContain("openai-codex/gpt-5.3-codex"); } finally { if (previousOpenAiKey === undefined) { delete process.env.OPENAI_API_KEY; diff --git a/src/agents/model-auth.ts b/src/agents/model-auth.ts index ba85e213cc..60efb30203 100644 --- a/src/agents/model-auth.ts +++ b/src/agents/model-auth.ts @@ -213,7 +213,7 @@ export async function resolveApiKeyForProvider(params: { const hasCodex = listProfilesForProvider(store, "openai-codex").length > 0; if (hasCodex) { throw new Error( - 'No API key found for provider "openai". You are authenticated with OpenAI Codex OAuth. Use openai-codex/gpt-5.2 (ChatGPT OAuth) or set OPENAI_API_KEY for openai/gpt-5.2.', + 'No API key found for provider "openai". You are authenticated with OpenAI Codex OAuth. Use openai-codex/gpt-5.3-codex (OAuth) or set OPENAI_API_KEY to use openai/gpt-5.1-codex.', ); } } diff --git a/src/agents/model-fallback.ts b/src/agents/model-fallback.ts index c5ee529c43..402584daf6 100644 --- a/src/agents/model-fallback.ts +++ b/src/agents/model-fallback.ts @@ -13,9 +13,9 @@ import { isTimeoutError, } from "./failover-error.js"; import { + buildConfiguredAllowlistKeys, buildModelAliasIndex, modelKey, - parseModelRef, resolveConfiguredModelRef, resolveModelRefFromString, } from "./model-selection.js"; @@ -51,28 +51,6 @@ function shouldRethrowAbort(err: unknown): boolean { return isAbortError(err) && !isTimeoutError(err); } -function buildAllowedModelKeys( - cfg: OpenClawConfig | undefined, - defaultProvider: string, -): Set | null { - const rawAllowlist = (() => { - const modelMap = cfg?.agents?.defaults?.models ?? {}; - return Object.keys(modelMap); - })(); - if (rawAllowlist.length === 0) { - return null; - } - const keys = new Set(); - for (const raw of rawAllowlist) { - const parsed = parseModelRef(String(raw ?? ""), defaultProvider); - if (!parsed) { - continue; - } - keys.add(modelKey(parsed.provider, parsed.model)); - } - return keys.size > 0 ? keys : null; -} - function resolveImageFallbackCandidates(params: { cfg: OpenClawConfig | undefined; defaultProvider: string; @@ -82,7 +60,10 @@ function resolveImageFallbackCandidates(params: { cfg: params.cfg ?? {}, defaultProvider: params.defaultProvider, }); - const allowlist = buildAllowedModelKeys(params.cfg, params.defaultProvider); + const allowlist = buildConfiguredAllowlistKeys({ + cfg: params.cfg, + defaultProvider: params.defaultProvider, + }); const seen = new Set(); const candidates: ModelCandidate[] = []; @@ -166,7 +147,10 @@ function resolveFallbackCandidates(params: { cfg: params.cfg ?? {}, defaultProvider, }); - const allowlist = buildAllowedModelKeys(params.cfg, defaultProvider); + const allowlist = buildConfiguredAllowlistKeys({ + cfg: params.cfg, + defaultProvider, + }); const seen = new Set(); const candidates: ModelCandidate[] = []; diff --git a/src/agents/model-selection.test.ts b/src/agents/model-selection.test.ts index 532936b8c6..418962ff94 100644 --- a/src/agents/model-selection.test.ts +++ b/src/agents/model-selection.test.ts @@ -29,6 +29,17 @@ describe("model-selection", () => { }); }); + it("normalizes anthropic alias refs to canonical model ids", () => { + expect(parseModelRef("anthropic/opus-4.6", "openai")).toEqual({ + provider: "anthropic", + model: "claude-opus-4-6", + }); + expect(parseModelRef("opus-4.6", "anthropic")).toEqual({ + provider: "anthropic", + model: "claude-opus-4-6", + }); + }); + it("should use default provider if none specified", () => { expect(parseModelRef("claude-3-5-sonnet", "anthropic")).toEqual({ provider: "anthropic", diff --git a/src/agents/model-selection.ts b/src/agents/model-selection.ts index 65d7b57b7a..e3d68a70ff 100644 --- a/src/agents/model-selection.ts +++ b/src/agents/model-selection.ts @@ -16,6 +16,12 @@ export type ModelAliasIndex = { byKey: Map; }; +const ANTHROPIC_MODEL_ALIASES: Record = { + "opus-4.6": "claude-opus-4-6", + "opus-4.5": "claude-opus-4-5", + "sonnet-4.5": "claude-sonnet-4-5", +}; + function normalizeAliasKey(value: string): string { return value.trim().toLowerCase(); } @@ -59,19 +65,7 @@ function normalizeAnthropicModelId(model: string): string { return trimmed; } const lower = trimmed.toLowerCase(); - if (lower === "opus-4.6") { - return "claude-opus-4-6"; - } - if (lower === "opus-4.5") { - return "claude-opus-4-5"; - } - if (lower === "opus-4.6") { - return "claude-opus-4-6"; - } - if (lower === "sonnet-4.5") { - return "claude-sonnet-4-5"; - } - return trimmed; + return ANTHROPIC_MODEL_ALIASES[lower] ?? trimmed; } function normalizeProviderModelId(provider: string, model: string): string { @@ -105,6 +99,33 @@ export function parseModelRef(raw: string, defaultProvider: string): ModelRef | return { provider, model: normalizedModel }; } +export function resolveAllowlistModelKey(raw: string, defaultProvider: string): string | null { + const parsed = parseModelRef(raw, defaultProvider); + if (!parsed) { + return null; + } + return modelKey(parsed.provider, parsed.model); +} + +export function buildConfiguredAllowlistKeys(params: { + cfg: OpenClawConfig | undefined; + defaultProvider: string; +}): Set | null { + const rawAllowlist = Object.keys(params.cfg?.agents?.defaults?.models ?? {}); + if (rawAllowlist.length === 0) { + return null; + } + + const keys = new Set(); + for (const raw of rawAllowlist) { + const key = resolveAllowlistModelKey(String(raw ?? ""), params.defaultProvider); + if (key) { + keys.add(key); + } + } + return keys.size > 0 ? keys : null; +} + export function buildModelAliasIndex(params: { cfg: OpenClawConfig; defaultProvider: string; diff --git a/src/agents/opencode-zen-models.test.ts b/src/agents/opencode-zen-models.test.ts index 69c6a0497f..fa7a7f268f 100644 --- a/src/agents/opencode-zen-models.test.ts +++ b/src/agents/opencode-zen-models.test.ts @@ -8,12 +8,12 @@ import { describe("resolveOpencodeZenAlias", () => { it("resolves opus alias", () => { - expect(resolveOpencodeZenAlias("opus")).toBe("claude-opus-4-5"); + expect(resolveOpencodeZenAlias("opus")).toBe("claude-opus-4-6"); }); it("keeps legacy aliases working", () => { - expect(resolveOpencodeZenAlias("sonnet")).toBe("claude-opus-4-5"); - expect(resolveOpencodeZenAlias("haiku")).toBe("claude-opus-4-5"); + expect(resolveOpencodeZenAlias("sonnet")).toBe("claude-opus-4-6"); + expect(resolveOpencodeZenAlias("haiku")).toBe("claude-opus-4-6"); expect(resolveOpencodeZenAlias("gpt4")).toBe("gpt-5.1"); expect(resolveOpencodeZenAlias("o1")).toBe("gpt-5.2"); expect(resolveOpencodeZenAlias("gemini-2.5")).toBe("gemini-3-pro"); @@ -32,14 +32,14 @@ describe("resolveOpencodeZenAlias", () => { }); it("is case-insensitive", () => { - expect(resolveOpencodeZenAlias("OPUS")).toBe("claude-opus-4-5"); + expect(resolveOpencodeZenAlias("OPUS")).toBe("claude-opus-4-6"); expect(resolveOpencodeZenAlias("Gpt5")).toBe("gpt-5.2"); }); }); describe("resolveOpencodeZenModelApi", () => { it("maps APIs by model family", () => { - expect(resolveOpencodeZenModelApi("claude-opus-4-5")).toBe("anthropic-messages"); + expect(resolveOpencodeZenModelApi("claude-opus-4-6")).toBe("anthropic-messages"); expect(resolveOpencodeZenModelApi("gemini-3-pro")).toBe("google-generative-ai"); expect(resolveOpencodeZenModelApi("gpt-5.2")).toBe("openai-responses"); expect(resolveOpencodeZenModelApi("alpha-gd4")).toBe("openai-completions"); @@ -53,13 +53,14 @@ describe("getOpencodeZenStaticFallbackModels", () => { it("returns an array of models", () => { const models = getOpencodeZenStaticFallbackModels(); expect(Array.isArray(models)).toBe(true); - expect(models.length).toBe(9); + expect(models.length).toBe(10); }); it("includes Claude, GPT, Gemini, and GLM models", () => { const models = getOpencodeZenStaticFallbackModels(); const ids = models.map((m) => m.id); + expect(ids).toContain("claude-opus-4-6"); expect(ids).toContain("claude-opus-4-5"); expect(ids).toContain("gpt-5.2"); expect(ids).toContain("gpt-5.1-codex"); @@ -83,15 +84,16 @@ describe("getOpencodeZenStaticFallbackModels", () => { describe("OPENCODE_ZEN_MODEL_ALIASES", () => { it("has expected aliases", () => { - expect(OPENCODE_ZEN_MODEL_ALIASES.opus).toBe("claude-opus-4-5"); + expect(OPENCODE_ZEN_MODEL_ALIASES.opus).toBe("claude-opus-4-6"); expect(OPENCODE_ZEN_MODEL_ALIASES.codex).toBe("gpt-5.1-codex"); expect(OPENCODE_ZEN_MODEL_ALIASES.gpt5).toBe("gpt-5.2"); expect(OPENCODE_ZEN_MODEL_ALIASES.gemini).toBe("gemini-3-pro"); expect(OPENCODE_ZEN_MODEL_ALIASES.glm).toBe("glm-4.7"); + expect(OPENCODE_ZEN_MODEL_ALIASES["opus-4.5"]).toBe("claude-opus-4-5"); // Legacy aliases (kept for backward compatibility). - expect(OPENCODE_ZEN_MODEL_ALIASES.sonnet).toBe("claude-opus-4-5"); - expect(OPENCODE_ZEN_MODEL_ALIASES.haiku).toBe("claude-opus-4-5"); + expect(OPENCODE_ZEN_MODEL_ALIASES.sonnet).toBe("claude-opus-4-6"); + expect(OPENCODE_ZEN_MODEL_ALIASES.haiku).toBe("claude-opus-4-6"); expect(OPENCODE_ZEN_MODEL_ALIASES.gpt4).toBe("gpt-5.1"); expect(OPENCODE_ZEN_MODEL_ALIASES.o1).toBe("gpt-5.2"); expect(OPENCODE_ZEN_MODEL_ALIASES["gemini-2.5"]).toBe("gemini-3-pro"); diff --git a/src/agents/opencode-zen-models.ts b/src/agents/opencode-zen-models.ts index efe7e98bbc..49f207a510 100644 --- a/src/agents/opencode-zen-models.ts +++ b/src/agents/opencode-zen-models.ts @@ -11,7 +11,7 @@ import type { ModelApi, ModelDefinitionConfig } from "../config/types.js"; export const OPENCODE_ZEN_API_BASE_URL = "https://opencode.ai/zen/v1"; -export const OPENCODE_ZEN_DEFAULT_MODEL = "claude-opus-4-5"; +export const OPENCODE_ZEN_DEFAULT_MODEL = "claude-opus-4-6"; export const OPENCODE_ZEN_DEFAULT_MODEL_REF = `opencode/${OPENCODE_ZEN_DEFAULT_MODEL}`; // Cache for fetched models (1 hour TTL) @@ -21,19 +21,20 @@ const CACHE_TTL_MS = 60 * 60 * 1000; // 1 hour /** * Model aliases for convenient shortcuts. - * Users can use "opus" instead of "claude-opus-4-5", etc. + * Users can use "opus" instead of "claude-opus-4-6", etc. */ export const OPENCODE_ZEN_MODEL_ALIASES: Record = { // Claude - opus: "claude-opus-4-5", + opus: "claude-opus-4-6", + "opus-4.6": "claude-opus-4-6", "opus-4.5": "claude-opus-4-5", - "opus-4": "claude-opus-4-5", + "opus-4": "claude-opus-4-6", // Legacy Claude aliases (OpenCode Zen rotates model catalogs; keep old keys working). - sonnet: "claude-opus-4-5", - "sonnet-4": "claude-opus-4-5", - haiku: "claude-opus-4-5", - "haiku-3.5": "claude-opus-4-5", + sonnet: "claude-opus-4-6", + "sonnet-4": "claude-opus-4-6", + haiku: "claude-opus-4-6", + "haiku-3.5": "claude-opus-4-6", // GPT-5.x family gpt5: "gpt-5.2", @@ -119,6 +120,7 @@ const MODEL_COSTS: Record< cacheRead: 0.107, cacheWrite: 0, }, + "claude-opus-4-6": { input: 5, output: 25, cacheRead: 0.5, cacheWrite: 6.25 }, "claude-opus-4-5": { input: 5, output: 25, cacheRead: 0.5, cacheWrite: 6.25 }, "gemini-3-pro": { input: 2, output: 12, cacheRead: 0.2, cacheWrite: 0 }, "gpt-5.1-codex-mini": { @@ -143,6 +145,7 @@ const DEFAULT_COST = { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }; const MODEL_CONTEXT_WINDOWS: Record = { "gpt-5.1-codex": 400000, + "claude-opus-4-6": 1000000, "claude-opus-4-5": 200000, "gemini-3-pro": 1048576, "gpt-5.1-codex-mini": 400000, @@ -159,6 +162,7 @@ function getDefaultContextWindow(modelId: string): number { const MODEL_MAX_TOKENS: Record = { "gpt-5.1-codex": 128000, + "claude-opus-4-6": 128000, "claude-opus-4-5": 64000, "gemini-3-pro": 65536, "gpt-5.1-codex-mini": 128000, @@ -195,6 +199,7 @@ function buildModelDefinition(modelId: string): ModelDefinitionConfig { */ const MODEL_NAMES: Record = { "gpt-5.1-codex": "GPT-5.1 Codex", + "claude-opus-4-6": "Claude Opus 4.6", "claude-opus-4-5": "Claude Opus 4.5", "gemini-3-pro": "Gemini 3 Pro", "gpt-5.1-codex-mini": "GPT-5.1 Codex Mini", @@ -222,6 +227,7 @@ function formatModelName(modelId: string): string { export function getOpencodeZenStaticFallbackModels(): ModelDefinitionConfig[] { const modelIds = [ "gpt-5.1-codex", + "claude-opus-4-6", "claude-opus-4-5", "gemini-3-pro", "gpt-5.1-codex-mini", diff --git a/src/agents/tools/image-tool.test.ts b/src/agents/tools/image-tool.test.ts index 990c550fc2..e9e4661fd0 100644 --- a/src/agents/tools/image-tool.test.ts +++ b/src/agents/tools/image-tool.test.ts @@ -53,7 +53,7 @@ describe("image tool implicit imageModel config", () => { }; expect(resolveImageModelConfigForTool({ cfg, agentDir })).toEqual({ primary: "minimax/MiniMax-VL-01", - fallbacks: ["openai/gpt-5-mini", "anthropic/claude-opus-4-6"], + fallbacks: ["openai/gpt-5-mini", "anthropic/claude-opus-4-5"], }); expect(createImageTool({ config: cfg, agentDir })).not.toBeNull(); }); diff --git a/src/agents/tools/image-tool.ts b/src/agents/tools/image-tool.ts index 7cb9f0d5f3..8af8b16ac7 100644 --- a/src/agents/tools/image-tool.ts +++ b/src/agents/tools/image-tool.ts @@ -24,6 +24,8 @@ import { } from "./image-tool.helpers.js"; const DEFAULT_PROMPT = "Describe the image."; +const ANTHROPIC_IMAGE_PRIMARY = "anthropic/claude-opus-4-6"; +const ANTHROPIC_IMAGE_FALLBACK = "anthropic/claude-opus-4-5"; export const __testing = { decodeDataUrl, @@ -117,7 +119,7 @@ export function resolveImageModelConfigForTool(params: { } else if (primary.provider === "openai" && openaiOk) { preferred = "openai/gpt-5-mini"; } else if (primary.provider === "anthropic" && anthropicOk) { - preferred = "anthropic/claude-opus-4-6"; + preferred = ANTHROPIC_IMAGE_PRIMARY; } if (preferred?.trim()) { @@ -125,7 +127,7 @@ export function resolveImageModelConfigForTool(params: { addFallback("openai/gpt-5-mini"); } if (anthropicOk) { - addFallback("anthropic/claude-opus-4-6"); + addFallback(ANTHROPIC_IMAGE_FALLBACK); } // Don't duplicate primary in fallbacks. const pruned = fallbacks.filter((ref) => ref !== preferred); @@ -138,7 +140,7 @@ export function resolveImageModelConfigForTool(params: { // Cross-provider fallback when we can't pair with the primary provider. if (openaiOk) { if (anthropicOk) { - addFallback("anthropic/claude-opus-4-6"); + addFallback(ANTHROPIC_IMAGE_FALLBACK); } return { primary: "openai/gpt-5-mini", @@ -146,7 +148,10 @@ export function resolveImageModelConfigForTool(params: { }; } if (anthropicOk) { - return { primary: "anthropic/claude-opus-4-6" }; + return { + primary: ANTHROPIC_IMAGE_PRIMARY, + fallbacks: [ANTHROPIC_IMAGE_FALLBACK], + }; } return null; diff --git a/src/auto-reply/reply.directive.directive-behavior.accepts-thinking-xhigh-codex-models.e2e.test.ts b/src/auto-reply/reply.directive.directive-behavior.accepts-thinking-xhigh-codex-models.e2e.test.ts index fa85950505..0598a8bb98 100644 --- a/src/auto-reply/reply.directive.directive-behavior.accepts-thinking-xhigh-codex-models.e2e.test.ts +++ b/src/auto-reply/reply.directive.directive-behavior.accepts-thinking-xhigh-codex-models.e2e.test.ts @@ -154,7 +154,7 @@ describe("directive behavior", () => { const texts = (Array.isArray(res) ? res : [res]).map((entry) => entry?.text).filter(Boolean); expect(texts).toContain( - 'Thinking level "xhigh" is only supported for openai/gpt-5.2, openai-codex/gpt-5.2-codex or openai-codex/gpt-5.1-codex.', + 'Thinking level "xhigh" is only supported for openai/gpt-5.2, openai-codex/gpt-5.3-codex, openai-codex/gpt-5.2-codex or openai-codex/gpt-5.1-codex.', ); }); }); diff --git a/src/auto-reply/reply/response-prefix-template.ts b/src/auto-reply/reply/response-prefix-template.ts index 6558d9fbf3..0d10e960c3 100644 --- a/src/auto-reply/reply/response-prefix-template.ts +++ b/src/auto-reply/reply/response-prefix-template.ts @@ -6,7 +6,7 @@ */ export type ResponsePrefixContext = { - /** Short model name (e.g., "gpt-5.2", "claude-opus-4-5") */ + /** Short model name (e.g., "gpt-5.2", "claude-opus-4-6") */ model?: string; /** Full model ID including provider (e.g., "openai-codex/gpt-5.2") */ modelFull?: string; @@ -71,12 +71,12 @@ export function resolveResponsePrefixTemplate( * * Strips: * - Provider prefix (e.g., "openai/" from "openai/gpt-5.2") - * - Date suffixes (e.g., "-20251101" from "claude-opus-4-5-20251101") + * - Date suffixes (e.g., "-20260205" from "claude-opus-4-6-20260205") * - Common version suffixes (e.g., "-latest") * * @example * extractShortModelName("openai-codex/gpt-5.2") // "gpt-5.2" - * extractShortModelName("claude-opus-4-5-20251101") // "claude-opus-4-5" + * extractShortModelName("claude-opus-4-6-20260205") // "claude-opus-4-6" * extractShortModelName("gpt-5.2-latest") // "gpt-5.2" */ export function extractShortModelName(fullModel: string): string { diff --git a/src/auto-reply/thinking.test.ts b/src/auto-reply/thinking.test.ts index c888387a18..5254e42ce1 100644 --- a/src/auto-reply/thinking.test.ts +++ b/src/auto-reply/thinking.test.ts @@ -23,6 +23,7 @@ describe("normalizeThinkLevel", () => { describe("listThinkingLevels", () => { it("includes xhigh for codex models", () => { expect(listThinkingLevels(undefined, "gpt-5.2-codex")).toContain("xhigh"); + expect(listThinkingLevels(undefined, "gpt-5.3-codex")).toContain("xhigh"); }); it("includes xhigh for openai gpt-5.2", () => { diff --git a/src/auto-reply/thinking.ts b/src/auto-reply/thinking.ts index 15c94545ac..8fe74c42de 100644 --- a/src/auto-reply/thinking.ts +++ b/src/auto-reply/thinking.ts @@ -23,6 +23,7 @@ export function isBinaryThinkingProvider(provider?: string | null): boolean { export const XHIGH_MODEL_REFS = [ "openai/gpt-5.2", + "openai-codex/gpt-5.3-codex", "openai-codex/gpt-5.2-codex", "openai-codex/gpt-5.1-codex", ] as const; diff --git a/src/commands/auth-choice.apply.openai.ts b/src/commands/auth-choice.apply.openai.ts index 2022d5d0dd..9bd07455f9 100644 --- a/src/commands/auth-choice.apply.openai.ts +++ b/src/commands/auth-choice.apply.openai.ts @@ -7,6 +7,7 @@ import { normalizeApiKeyInput, validateApiKeyInput, } from "./auth-choice.api-key.js"; +import { applyDefaultModelChoice } from "./auth-choice.default-model.js"; import { isRemoteEnvironment } from "./oauth-env.js"; import { createVpsAwareOAuthHandlers } from "./oauth-flow.js"; import { applyAuthProfileConfig, writeOAuthCredentials } from "./onboard-auth.js"; @@ -15,6 +16,11 @@ import { applyOpenAICodexModelDefault, OPENAI_CODEX_DEFAULT_MODEL, } from "./openai-codex-model-default.js"; +import { + applyOpenAIConfig, + applyOpenAIProviderConfig, + OPENAI_DEFAULT_MODEL, +} from "./openai-model-default.js"; export async function applyAuthChoiceOpenAI( params: ApplyAuthChoiceParams, @@ -25,6 +31,18 @@ export async function applyAuthChoiceOpenAI( } if (authChoice === "openai-api-key") { + let nextConfig = params.config; + let agentModelOverride: string | undefined; + const noteAgentModel = async (model: string) => { + if (!params.agentId) { + return; + } + await params.prompter.note( + `Default model set to ${model} for agent "${params.agentId}".`, + "Model configured", + ); + }; + const envKey = resolveEnvApiKey("openai"); if (envKey) { const useExisting = await params.prompter.confirm({ @@ -43,7 +61,19 @@ export async function applyAuthChoiceOpenAI( `Copied OPENAI_API_KEY to ${result.path} for launchd compatibility.`, "OpenAI API key", ); - return { config: params.config }; + const applied = await applyDefaultModelChoice({ + config: nextConfig, + setDefaultModel: params.setDefaultModel, + defaultModel: OPENAI_DEFAULT_MODEL, + applyDefaultConfig: applyOpenAIConfig, + applyProviderConfig: applyOpenAIProviderConfig, + noteDefault: OPENAI_DEFAULT_MODEL, + noteAgentModel, + prompter: params.prompter, + }); + nextConfig = applied.config; + agentModelOverride = applied.agentModelOverride ?? agentModelOverride; + return { config: nextConfig, agentModelOverride }; } } @@ -67,7 +97,19 @@ export async function applyAuthChoiceOpenAI( `Saved OPENAI_API_KEY to ${result.path} for launchd compatibility.`, "OpenAI API key", ); - return { config: params.config }; + const applied = await applyDefaultModelChoice({ + config: nextConfig, + setDefaultModel: params.setDefaultModel, + defaultModel: OPENAI_DEFAULT_MODEL, + applyDefaultConfig: applyOpenAIConfig, + applyProviderConfig: applyOpenAIProviderConfig, + noteDefault: OPENAI_DEFAULT_MODEL, + noteAgentModel, + prompter: params.prompter, + }); + nextConfig = applied.config; + agentModelOverride = applied.agentModelOverride ?? agentModelOverride; + return { config: nextConfig, agentModelOverride }; } if (params.authChoice === "openai-codex") { diff --git a/src/commands/auth-choice.default-model.test.ts b/src/commands/auth-choice.default-model.test.ts new file mode 100644 index 0000000000..cea387d705 --- /dev/null +++ b/src/commands/auth-choice.default-model.test.ts @@ -0,0 +1,77 @@ +import { describe, expect, it, vi } from "vitest"; +import type { OpenClawConfig } from "../config/config.js"; +import type { WizardPrompter } from "../wizard/prompts.js"; +import { applyDefaultModelChoice } from "./auth-choice.default-model.js"; + +function makePrompter(): WizardPrompter { + return { + intro: async () => {}, + outro: async () => {}, + note: async () => {}, + select: async () => "", + multiselect: async () => [], + text: async () => "", + confirm: async () => false, + progress: () => ({ update: () => {}, stop: () => {} }), + }; +} + +describe("applyDefaultModelChoice", () => { + it("ensures allowlist entry exists when returning an agent override", async () => { + const defaultModel = "vercel-ai-gateway/anthropic/claude-opus-4.6"; + const noteAgentModel = vi.fn(async () => {}); + const applied = await applyDefaultModelChoice({ + config: {}, + setDefaultModel: false, + defaultModel, + // Simulate a provider function that does not explicitly add the entry. + applyProviderConfig: (config: OpenClawConfig) => config, + applyDefaultConfig: (config: OpenClawConfig) => config, + noteAgentModel, + prompter: makePrompter(), + }); + + expect(noteAgentModel).toHaveBeenCalledWith(defaultModel); + expect(applied.agentModelOverride).toBe(defaultModel); + expect(applied.config.agents?.defaults?.models?.[defaultModel]).toEqual({}); + }); + + it("adds canonical allowlist key for anthropic aliases", async () => { + const defaultModel = "anthropic/opus-4.6"; + const applied = await applyDefaultModelChoice({ + config: {}, + setDefaultModel: false, + defaultModel, + applyProviderConfig: (config: OpenClawConfig) => config, + applyDefaultConfig: (config: OpenClawConfig) => config, + noteAgentModel: async () => {}, + prompter: makePrompter(), + }); + + expect(applied.config.agents?.defaults?.models?.[defaultModel]).toEqual({}); + expect(applied.config.agents?.defaults?.models?.["anthropic/claude-opus-4-6"]).toEqual({}); + }); + + it("uses applyDefaultConfig path when setDefaultModel is true", async () => { + const defaultModel = "openai/gpt-5.1-codex"; + const applied = await applyDefaultModelChoice({ + config: {}, + setDefaultModel: true, + defaultModel, + applyProviderConfig: (config: OpenClawConfig) => config, + applyDefaultConfig: () => ({ + agents: { + defaults: { + model: { primary: defaultModel }, + }, + }, + }), + noteDefault: defaultModel, + noteAgentModel: async () => {}, + prompter: makePrompter(), + }); + + expect(applied.agentModelOverride).toBeUndefined(); + expect(applied.config.agents?.defaults?.model).toEqual({ primary: defaultModel }); + }); +}); diff --git a/src/commands/auth-choice.default-model.ts b/src/commands/auth-choice.default-model.ts index a8a1991113..2ef44cb7a3 100644 --- a/src/commands/auth-choice.default-model.ts +++ b/src/commands/auth-choice.default-model.ts @@ -1,5 +1,6 @@ import type { OpenClawConfig } from "../config/config.js"; import type { WizardPrompter } from "../wizard/prompts.js"; +import { ensureModelAllowlistEntry } from "./model-allowlist.js"; export async function applyDefaultModelChoice(params: { config: OpenClawConfig; @@ -20,6 +21,10 @@ export async function applyDefaultModelChoice(params: { } const next = params.applyProviderConfig(params.config); + const nextWithModel = ensureModelAllowlistEntry({ + cfg: next, + modelRef: params.defaultModel, + }); await params.noteAgentModel(params.defaultModel); - return { config: next, agentModelOverride: params.defaultModel }; + return { config: nextWithModel, agentModelOverride: params.defaultModel }; } diff --git a/src/commands/auth-choice.test.ts b/src/commands/auth-choice.test.ts index b13972f7b7..61acc9d0d2 100644 --- a/src/commands/auth-choice.test.ts +++ b/src/commands/auth-choice.test.ts @@ -284,7 +284,7 @@ describe("applyAuthChoice", () => { ); expect(result.config.agents?.defaults?.model?.primary).toBe("anthropic/claude-opus-4-5"); expect(result.config.models?.providers?.["opencode-zen"]).toBeUndefined(); - expect(result.agentModelOverride).toBe("opencode/claude-opus-4-5"); + expect(result.agentModelOverride).toBe("opencode/claude-opus-4-6"); }); it("uses existing OPENROUTER_API_KEY when selecting openrouter-api-key", async () => { @@ -398,7 +398,7 @@ describe("applyAuthChoice", () => { mode: "api_key", }); expect(result.config.agents?.defaults?.model?.primary).toBe( - "vercel-ai-gateway/anthropic/claude-opus-4.5", + "vercel-ai-gateway/anthropic/claude-opus-4.6", ); const authProfilePath = authProfilePathFor(requireAgentDir()); diff --git a/src/commands/model-allowlist.ts b/src/commands/model-allowlist.ts new file mode 100644 index 0000000000..157c3e4eb4 --- /dev/null +++ b/src/commands/model-allowlist.ts @@ -0,0 +1,41 @@ +import type { OpenClawConfig } from "../config/config.js"; +import { DEFAULT_PROVIDER } from "../agents/defaults.js"; +import { resolveAllowlistModelKey } from "../agents/model-selection.js"; + +export function ensureModelAllowlistEntry(params: { + cfg: OpenClawConfig; + modelRef: string; + defaultProvider?: string; +}): OpenClawConfig { + const rawModelRef = params.modelRef.trim(); + if (!rawModelRef) { + return params.cfg; + } + + const models = { ...params.cfg.agents?.defaults?.models }; + const keySet = new Set([rawModelRef]); + const canonicalKey = resolveAllowlistModelKey( + rawModelRef, + params.defaultProvider ?? DEFAULT_PROVIDER, + ); + if (canonicalKey) { + keySet.add(canonicalKey); + } + + for (const key of keySet) { + models[key] = { + ...models[key], + }; + } + + return { + ...params.cfg, + agents: { + ...params.cfg.agents, + defaults: { + ...params.cfg.agents?.defaults, + models, + }, + }, + }; +} diff --git a/src/commands/model-picker.ts b/src/commands/model-picker.ts index 35e0f24b26..b0719fdd43 100644 --- a/src/commands/model-picker.ts +++ b/src/commands/model-picker.ts @@ -12,6 +12,7 @@ import { resolveConfiguredModelRef, } from "../agents/model-selection.js"; import { formatTokenK } from "./models/shared.js"; +import { OPENAI_CODEX_DEFAULT_MODEL } from "./openai-codex-model-default.js"; const KEEP_VALUE = "__keep__"; const MANUAL_VALUE = "__manual__"; @@ -331,7 +332,7 @@ export async function promptModelAllowlist(params: { params.message ?? "Allowlist models (comma-separated provider/model; blank to keep current)", initialValue: existingKeys.join(", "), - placeholder: "openai-codex/gpt-5.2, anthropic/claude-opus-4-6", + placeholder: `${OPENAI_CODEX_DEFAULT_MODEL}, anthropic/claude-opus-4-6`, }); const parsed = String(raw ?? "") .split(",") diff --git a/src/commands/onboard-auth.credentials.ts b/src/commands/onboard-auth.credentials.ts index 8d2dca121e..86980906f8 100644 --- a/src/commands/onboard-auth.credentials.ts +++ b/src/commands/onboard-auth.credentials.ts @@ -117,7 +117,7 @@ export async function setVeniceApiKey(key: string, agentDir?: string) { export const ZAI_DEFAULT_MODEL_REF = "zai/glm-4.7"; export const XIAOMI_DEFAULT_MODEL_REF = "xiaomi/mimo-v2-flash"; export const OPENROUTER_DEFAULT_MODEL_REF = "openrouter/auto"; -export const VERCEL_AI_GATEWAY_DEFAULT_MODEL_REF = "vercel-ai-gateway/anthropic/claude-opus-4.5"; +export const VERCEL_AI_GATEWAY_DEFAULT_MODEL_REF = "vercel-ai-gateway/anthropic/claude-opus-4.6"; export async function setZaiApiKey(key: string, agentDir?: string) { // Write to resolved agent dir so gateway finds credentials on startup. diff --git a/src/commands/onboard-auth.test.ts b/src/commands/onboard-auth.test.ts index 366aaeae38..096e6f086b 100644 --- a/src/commands/onboard-auth.test.ts +++ b/src/commands/onboard-auth.test.ts @@ -393,7 +393,7 @@ describe("applyOpencodeZenProviderConfig", () => { it("adds allowlist entry for the default model", () => { const cfg = applyOpencodeZenProviderConfig({}); const models = cfg.agents?.defaults?.models ?? {}; - expect(Object.keys(models)).toContain("opencode/claude-opus-4-5"); + expect(Object.keys(models)).toContain("opencode/claude-opus-4-6"); }); it("preserves existing alias for the default model", () => { @@ -401,19 +401,19 @@ describe("applyOpencodeZenProviderConfig", () => { agents: { defaults: { models: { - "opencode/claude-opus-4-5": { alias: "My Opus" }, + "opencode/claude-opus-4-6": { alias: "My Opus" }, }, }, }, }); - expect(cfg.agents?.defaults?.models?.["opencode/claude-opus-4-5"]?.alias).toBe("My Opus"); + expect(cfg.agents?.defaults?.models?.["opencode/claude-opus-4-6"]?.alias).toBe("My Opus"); }); }); describe("applyOpencodeZenConfig", () => { it("sets correct primary model", () => { const cfg = applyOpencodeZenConfig({}); - expect(cfg.agents?.defaults?.model?.primary).toBe("opencode/claude-opus-4-5"); + expect(cfg.agents?.defaults?.model?.primary).toBe("opencode/claude-opus-4-6"); }); it("preserves existing model fallbacks", () => { diff --git a/src/commands/onboard-non-interactive.ai-gateway.test.ts b/src/commands/onboard-non-interactive.ai-gateway.test.ts index a154724517..0b02632a51 100644 --- a/src/commands/onboard-non-interactive.ai-gateway.test.ts +++ b/src/commands/onboard-non-interactive.ai-gateway.test.ts @@ -66,7 +66,7 @@ describe("onboard (non-interactive): Vercel AI Gateway", () => { expect(cfg.auth?.profiles?.["vercel-ai-gateway:default"]?.provider).toBe("vercel-ai-gateway"); expect(cfg.auth?.profiles?.["vercel-ai-gateway:default"]?.mode).toBe("api_key"); expect(cfg.agents?.defaults?.model?.primary).toBe( - "vercel-ai-gateway/anthropic/claude-opus-4.5", + "vercel-ai-gateway/anthropic/claude-opus-4.6", ); const { ensureAuthProfileStore } = await import("../agents/auth-profiles.js"); diff --git a/src/commands/onboard-non-interactive.openai-api-key.test.ts b/src/commands/onboard-non-interactive.openai-api-key.test.ts new file mode 100644 index 0000000000..1a9d5989e9 --- /dev/null +++ b/src/commands/onboard-non-interactive.openai-api-key.test.ts @@ -0,0 +1,77 @@ +import fs from "node:fs/promises"; +import os from "node:os"; +import path from "node:path"; +import { describe, expect, it, vi } from "vitest"; +import { OPENAI_DEFAULT_MODEL } from "./openai-model-default.js"; + +describe("onboard (non-interactive): OpenAI API key", () => { + it("stores OPENAI_API_KEY and configures the OpenAI default model", async () => { + const prev = { + home: process.env.HOME, + stateDir: process.env.OPENCLAW_STATE_DIR, + configPath: process.env.OPENCLAW_CONFIG_PATH, + skipChannels: process.env.OPENCLAW_SKIP_CHANNELS, + skipGmail: process.env.OPENCLAW_SKIP_GMAIL_WATCHER, + skipCron: process.env.OPENCLAW_SKIP_CRON, + skipCanvas: process.env.OPENCLAW_SKIP_CANVAS_HOST, + token: process.env.OPENCLAW_GATEWAY_TOKEN, + password: process.env.OPENCLAW_GATEWAY_PASSWORD, + }; + + process.env.OPENCLAW_SKIP_CHANNELS = "1"; + process.env.OPENCLAW_SKIP_GMAIL_WATCHER = "1"; + process.env.OPENCLAW_SKIP_CRON = "1"; + process.env.OPENCLAW_SKIP_CANVAS_HOST = "1"; + delete process.env.OPENCLAW_GATEWAY_TOKEN; + delete process.env.OPENCLAW_GATEWAY_PASSWORD; + + const tempHome = await fs.mkdtemp(path.join(os.tmpdir(), "openclaw-onboard-openai-")); + process.env.HOME = tempHome; + process.env.OPENCLAW_STATE_DIR = tempHome; + process.env.OPENCLAW_CONFIG_PATH = path.join(tempHome, "openclaw.json"); + vi.resetModules(); + + const runtime = { + log: () => {}, + error: (msg: string) => { + throw new Error(msg); + }, + exit: (code: number) => { + throw new Error(`exit:${code}`); + }, + }; + + try { + const { runNonInteractiveOnboarding } = await import("./onboard-non-interactive.js"); + await runNonInteractiveOnboarding( + { + nonInteractive: true, + authChoice: "openai-api-key", + openaiApiKey: "sk-openai-test", + skipHealth: true, + skipChannels: true, + skipSkills: true, + json: true, + }, + runtime, + ); + + const { CONFIG_PATH } = await import("../config/config.js"); + const cfg = JSON.parse(await fs.readFile(CONFIG_PATH, "utf8")) as { + agents?: { defaults?: { model?: { primary?: string } } }; + }; + expect(cfg.agents?.defaults?.model?.primary).toBe(OPENAI_DEFAULT_MODEL); + } finally { + await fs.rm(tempHome, { recursive: true, force: true }); + process.env.HOME = prev.home; + process.env.OPENCLAW_STATE_DIR = prev.stateDir; + process.env.OPENCLAW_CONFIG_PATH = prev.configPath; + process.env.OPENCLAW_SKIP_CHANNELS = prev.skipChannels; + process.env.OPENCLAW_SKIP_GMAIL_WATCHER = prev.skipGmail; + process.env.OPENCLAW_SKIP_CRON = prev.skipCron; + process.env.OPENCLAW_SKIP_CANVAS_HOST = prev.skipCanvas; + process.env.OPENCLAW_GATEWAY_TOKEN = prev.token; + process.env.OPENCLAW_GATEWAY_PASSWORD = prev.password; + } + }, 60_000); +}); diff --git a/src/commands/onboard-non-interactive/local/auth-choice.ts b/src/commands/onboard-non-interactive/local/auth-choice.ts index 9b69f1dfda..d1d4406a44 100644 --- a/src/commands/onboard-non-interactive/local/auth-choice.ts +++ b/src/commands/onboard-non-interactive/local/auth-choice.ts @@ -37,6 +37,7 @@ import { setXiaomiApiKey, setZaiApiKey, } from "../../onboard-auth.js"; +import { applyOpenAIConfig } from "../../openai-model-default.js"; import { resolveNonInteractiveApiKey } from "../api-keys.js"; export async function applyNonInteractiveAuthChoice(params: { @@ -234,7 +235,7 @@ export async function applyNonInteractiveAuthChoice(params: { const result = upsertSharedEnvVar({ key: "OPENAI_API_KEY", value: key }); process.env.OPENAI_API_KEY = key; runtime.log(`Saved OPENAI_API_KEY to ${shortenHomePath(result.path)}`); - return nextConfig; + return applyOpenAIConfig(nextConfig); } if (authChoice === "openrouter-api-key") { diff --git a/src/commands/openai-codex-model-default.test.ts b/src/commands/openai-codex-model-default.test.ts index eed5979a11..ac8ceccd38 100644 --- a/src/commands/openai-codex-model-default.test.ts +++ b/src/commands/openai-codex-model-default.test.ts @@ -4,6 +4,7 @@ import { applyOpenAICodexModelDefault, OPENAI_CODEX_DEFAULT_MODEL, } from "./openai-codex-model-default.js"; +import { OPENAI_DEFAULT_MODEL } from "./openai-model-default.js"; describe("applyOpenAICodexModelDefault", () => { it("sets openai-codex default when model is unset", () => { @@ -17,7 +18,7 @@ describe("applyOpenAICodexModelDefault", () => { it("sets openai-codex default when model is openai/*", () => { const cfg: OpenClawConfig = { - agents: { defaults: { model: "openai/gpt-5.2" } }, + agents: { defaults: { model: OPENAI_DEFAULT_MODEL } }, }; const applied = applyOpenAICodexModelDefault(cfg); expect(applied.changed).toBe(true); @@ -28,7 +29,7 @@ describe("applyOpenAICodexModelDefault", () => { it("does not override openai-codex/*", () => { const cfg: OpenClawConfig = { - agents: { defaults: { model: "openai-codex/gpt-5.2" } }, + agents: { defaults: { model: OPENAI_CODEX_DEFAULT_MODEL } }, }; const applied = applyOpenAICodexModelDefault(cfg); expect(applied.changed).toBe(false); diff --git a/src/commands/openai-codex-model-default.ts b/src/commands/openai-codex-model-default.ts index 08ff72ac6d..b20b6feca7 100644 --- a/src/commands/openai-codex-model-default.ts +++ b/src/commands/openai-codex-model-default.ts @@ -1,7 +1,7 @@ import type { OpenClawConfig } from "../config/config.js"; import type { AgentModelListConfig } from "../config/types.js"; -export const OPENAI_CODEX_DEFAULT_MODEL = "openai-codex/gpt-5.2"; +export const OPENAI_CODEX_DEFAULT_MODEL = "openai-codex/gpt-5.3-codex"; function shouldSetOpenAICodexModel(model?: string): boolean { const trimmed = model?.trim(); diff --git a/src/commands/openai-model-default.test.ts b/src/commands/openai-model-default.test.ts new file mode 100644 index 0000000000..4065e2ac33 --- /dev/null +++ b/src/commands/openai-model-default.test.ts @@ -0,0 +1,40 @@ +import { describe, expect, it } from "vitest"; +import { + applyOpenAIConfig, + applyOpenAIProviderConfig, + OPENAI_DEFAULT_MODEL, +} from "./openai-model-default.js"; + +describe("applyOpenAIProviderConfig", () => { + it("adds allowlist entry for default model", () => { + const next = applyOpenAIProviderConfig({}); + expect(Object.keys(next.agents?.defaults?.models ?? {})).toContain(OPENAI_DEFAULT_MODEL); + }); + + it("preserves existing alias for default model", () => { + const next = applyOpenAIProviderConfig({ + agents: { + defaults: { + models: { + [OPENAI_DEFAULT_MODEL]: { alias: "My GPT" }, + }, + }, + }, + }); + expect(next.agents?.defaults?.models?.[OPENAI_DEFAULT_MODEL]?.alias).toBe("My GPT"); + }); +}); + +describe("applyOpenAIConfig", () => { + it("sets default when model is unset", () => { + const next = applyOpenAIConfig({}); + expect(next.agents?.defaults?.model).toEqual({ primary: OPENAI_DEFAULT_MODEL }); + }); + + it("overrides model.primary when model object already exists", () => { + const next = applyOpenAIConfig({ + agents: { defaults: { model: { primary: "anthropic/claude-opus-4-6", fallback: [] } } }, + }); + expect(next.agents?.defaults?.model).toEqual({ primary: OPENAI_DEFAULT_MODEL, fallback: [] }); + }); +}); diff --git a/src/commands/openai-model-default.ts b/src/commands/openai-model-default.ts new file mode 100644 index 0000000000..191756e0fa --- /dev/null +++ b/src/commands/openai-model-default.ts @@ -0,0 +1,47 @@ +import type { OpenClawConfig } from "../config/config.js"; +import { ensureModelAllowlistEntry } from "./model-allowlist.js"; + +export const OPENAI_DEFAULT_MODEL = "openai/gpt-5.1-codex"; + +export function applyOpenAIProviderConfig(cfg: OpenClawConfig): OpenClawConfig { + const next = ensureModelAllowlistEntry({ + cfg, + modelRef: OPENAI_DEFAULT_MODEL, + }); + const models = { ...next.agents?.defaults?.models }; + models[OPENAI_DEFAULT_MODEL] = { + ...models[OPENAI_DEFAULT_MODEL], + alias: models[OPENAI_DEFAULT_MODEL]?.alias ?? "GPT", + }; + + return { + ...next, + agents: { + ...next.agents, + defaults: { + ...next.agents?.defaults, + models, + }, + }, + }; +} + +export function applyOpenAIConfig(cfg: OpenClawConfig): OpenClawConfig { + const next = applyOpenAIProviderConfig(cfg); + return { + ...next, + agents: { + ...next.agents, + defaults: { + ...next.agents?.defaults, + model: + next.agents?.defaults?.model && typeof next.agents.defaults.model === "object" + ? { + ...next.agents.defaults.model, + primary: OPENAI_DEFAULT_MODEL, + } + : { primary: OPENAI_DEFAULT_MODEL }, + }, + }, + }; +} diff --git a/src/commands/opencode-zen-model-default.ts b/src/commands/opencode-zen-model-default.ts index b3813fb5c8..9f3d4b4565 100644 --- a/src/commands/opencode-zen-model-default.ts +++ b/src/commands/opencode-zen-model-default.ts @@ -1,8 +1,11 @@ import type { OpenClawConfig } from "../config/config.js"; import type { AgentModelListConfig } from "../config/types.js"; -export const OPENCODE_ZEN_DEFAULT_MODEL = "opencode/claude-opus-4-5"; -const LEGACY_OPENCODE_ZEN_DEFAULT_MODEL = "opencode-zen/claude-opus-4-5"; +export const OPENCODE_ZEN_DEFAULT_MODEL = "opencode/claude-opus-4-6"; +const LEGACY_OPENCODE_ZEN_DEFAULT_MODELS = new Set([ + "opencode/claude-opus-4-5", + "opencode-zen/claude-opus-4-5", +]); function resolvePrimaryModel(model?: AgentModelListConfig | string): string | undefined { if (typeof model === "string") { @@ -20,7 +23,9 @@ export function applyOpencodeZenModelDefault(cfg: OpenClawConfig): { } { const current = resolvePrimaryModel(cfg.agents?.defaults?.model)?.trim(); const normalizedCurrent = - current === LEGACY_OPENCODE_ZEN_DEFAULT_MODEL ? OPENCODE_ZEN_DEFAULT_MODEL : current; + current && LEGACY_OPENCODE_ZEN_DEFAULT_MODELS.has(current) + ? OPENCODE_ZEN_DEFAULT_MODEL + : current; if (normalizedCurrent === OPENCODE_ZEN_DEFAULT_MODEL) { return { next: cfg, changed: false }; } diff --git a/src/config/model-alias-defaults.test.ts b/src/config/model-alias-defaults.test.ts index 53a22377bf..4b20dcba23 100644 --- a/src/config/model-alias-defaults.test.ts +++ b/src/config/model-alias-defaults.test.ts @@ -26,7 +26,7 @@ describe("applyModelDefaults", () => { agents: { defaults: { models: { - "anthropic/claude-opus-4-6": { alias: "Opus" }, + "anthropic/claude-opus-4-5": { alias: "Opus" }, }, }, }, @@ -34,7 +34,7 @@ describe("applyModelDefaults", () => { const next = applyModelDefaults(cfg); - expect(next.agents?.defaults?.models?.["anthropic/claude-opus-4-6"]?.alias).toBe("Opus"); + expect(next.agents?.defaults?.models?.["anthropic/claude-opus-4-5"]?.alias).toBe("Opus"); }); it("respects explicit empty alias disables", () => { diff --git a/src/config/types.messages.ts b/src/config/types.messages.ts index 97de53417f..7619666143 100644 --- a/src/config/types.messages.ts +++ b/src/config/types.messages.ts @@ -59,13 +59,13 @@ export type MessagesConfig = { * - special value: `"auto"` derives `[{agents.list[].identity.name}]` for the routed agent (when set) * * Supported template variables (case-insensitive): - * - `{model}` - short model name (e.g., `claude-opus-4-5`, `gpt-4o`) - * - `{modelFull}` - full model identifier (e.g., `anthropic/claude-opus-4-5`) + * - `{model}` - short model name (e.g., `claude-opus-4-6`, `gpt-4o`) + * - `{modelFull}` - full model identifier (e.g., `anthropic/claude-opus-4-6`) * - `{provider}` - provider name (e.g., `anthropic`, `openai`) * - `{thinkingLevel}` or `{think}` - current thinking level (`high`, `low`, `off`) * - `{identity.name}` or `{identityName}` - agent identity name * - * Example: `"[{model} | think:{thinkingLevel}]"` → `"[claude-opus-4-5 | think:high]"` + * Example: `"[{model} | think:{thinkingLevel}]"` → `"[claude-opus-4-6 | think:high]"` * * Unresolved variables remain as literal text (e.g., `{model}` if context unavailable). * diff --git a/src/gateway/test-helpers.mocks.ts b/src/gateway/test-helpers.mocks.ts index aa811d8508..41e6fcdd5a 100644 --- a/src/gateway/test-helpers.mocks.ts +++ b/src/gateway/test-helpers.mocks.ts @@ -404,7 +404,7 @@ vi.mock("../config/config.js", async () => { ? (fileAgents.defaults as Record) : {}; const defaults = { - model: { primary: "anthropic/claude-opus-4-5" }, + model: { primary: "anthropic/claude-opus-4-6" }, workspace: path.join(os.tmpdir(), "openclaw-gateway-test"), ...fileDefaults, ...testState.agentConfig, diff --git a/src/security/audit-extra.ts b/src/security/audit-extra.ts index c784dc853b..7eca5dfc3c 100644 --- a/src/security/audit-extra.ts +++ b/src/security/audit-extra.ts @@ -312,10 +312,12 @@ function isClaudeModel(id: string): boolean { } function isClaude45OrHigher(id: string): boolean { - // Match claude-*-4-5, claude-*-45, claude-*4.5, or opus-4-5/opus-45 variants + // Match claude-*-4-5+, claude-*-45+, claude-*4.5+, or future 5.x+ majors. // Examples that should match: - // claude-opus-4-5, claude-opus-45, claude-4.5, venice/claude-opus-45 - return /\bclaude-[^\s/]*?(?:-4-?5\b|4\.5\b)/i.test(id); + // claude-opus-4-5, claude-opus-4-6, claude-opus-45, claude-4.6, claude-sonnet-5 + return /\bclaude-[^\s/]*?(?:-4-?(?:[5-9]|[1-9]\d)\b|4\.(?:[5-9]|[1-9]\d)\b|-[5-9](?:\b|[.-]))/i.test( + id, + ); } export function collectModelHygieneFindings(cfg: OpenClawConfig): SecurityAuditFinding[] {