fix: align ZAI thinking toggles

This commit is contained in:
Peter Steinberger
2026-01-16 22:25:51 +00:00
parent 3567dc4a47
commit 500c75b4f0
14 changed files with 134 additions and 562 deletions

View File

@@ -70,6 +70,7 @@
- Daemon: fix profile-aware service label resolution (env-driven) and add coverage for launchd/systemd/schtasks. (#969) — thanks @bjesuiter.
- Agents: avoid false positives when logging unsupported Google tool schema keywords.
- Agents: skip Gemini history downgrades for google-antigravity to preserve tool calls. (#894) — thanks @mukhtharcm.
- Agents: map Z.AI thinking to on/off in UI/TUI and drop the pi-agent-core patch now that upstream handles binary thinking.
- Status: restore usage summary line for current provider when no OAuth profiles exist.
- Fix: guard model fallback against undefined provider/model values. (#954) — thanks @roshanasingh4.
- Fix: refactor session store updates, add chat.inject, and harden subagent cleanup flow. (#944) — thanks @tyler6204.

View File

@@ -14,6 +14,8 @@ read_when:
- high → “ultrathink” (max budget)
- xhigh → “ultrathink+” (GPT-5.2 + Codex models only)
- `highest`, `max` map to `high`.
- Provider notes:
- Z.AI (`zai/*`) only supports binary thinking (`on`/`off`). Any non-`off` level is treated as `on` (mapped to `low`).
## Resolution order
1. Inline directive on the message (applies only to that message).

View File

@@ -214,9 +214,6 @@
"@sinclair/typebox": "0.34.47",
"hono": "4.11.4",
"tar": "7.5.3"
},
"patchedDependencies": {
"@mariozechner/pi-agent-core@0.46.0": "patches/@mariozechner__pi-agent-core.patch"
}
},
"vitest": {

View File

@@ -1,54 +0,0 @@
diff --git a/dist/agent.d.ts b/dist/agent.d.ts
index fcfb19924ef6ce233aa55795e3687ce23938c5a6..a63daea868c5b3b7f7bb9272576c65c6ad95da8a 100644
--- a/dist/agent.d.ts
+++ b/dist/agent.d.ts
@@ -38,6 +38,10 @@ export interface AgentOptions {
* Useful for expiring tokens (e.g., GitHub Copilot OAuth).
*/
getApiKey?: (provider: string) => Promise<string | undefined> | string | undefined;
+ /**
+ * Extra params to pass to the provider API (e.g., Z.AI GLM thinking mode params).
+ */
+ extraParams?: Record<string, unknown>;
/**
* Custom token budgets for thinking levels (token-based providers only).
*/
@@ -56,6 +60,8 @@ export declare class Agent {
streamFn: StreamFn;
private _sessionId?;
getApiKey?: (provider: string) => Promise<string | undefined> | string | undefined;
+ /** Extra params to pass to the provider API. */
+ extraParams?: Record<string, unknown>;
private runningPrompt?;
private resolveRunningPrompt?;
private _thinkingBudgets?;
diff --git a/dist/agent.js b/dist/agent.js
index 34ceb4ddcbc53d83edd82d774a76d9bf469b42f3..ecd8b7641c71523296890e11ac0cf0855a0dadd5 100644
--- a/dist/agent.js
+++ b/dist/agent.js
@@ -33,6 +33,7 @@ export class Agent {
streamFn;
_sessionId;
getApiKey;
+ extraParams;
runningPrompt;
resolveRunningPrompt;
_thinkingBudgets;
@@ -45,6 +46,8 @@ export class Agent {
this.streamFn = opts.streamFn || streamSimple;
this._sessionId = opts.sessionId;
this.getApiKey = opts.getApiKey;
+ // PATCH: Support extraParams for provider-specific features (e.g., GLM-4.7 thinking mode)
+ this.extraParams = opts.extraParams;
this._thinkingBudgets = opts.thinkingBudgets;
}
/**
@@ -225,6 +228,8 @@ export class Agent {
convertToLlm: this.convertToLlm,
transformContext: this.transformContext,
getApiKey: this.getApiKey,
+ // PATCH: Pass extraParams through to stream function
+ extraParams: this.extraParams,
getSteeringMessages: async () => {
if (this.steeringMode === "one-at-a-time") {
if (this.steeringQueue.length > 0) {

11
pnpm-lock.yaml generated
View File

@@ -9,11 +9,6 @@ overrides:
hono: 4.11.4
tar: 7.5.3
patchedDependencies:
'@mariozechner/pi-agent-core@0.46.0':
hash: 01312ceb1f6be7e42822c24c9a7a4f7db56b24ae114a364855bd3819779d1cf4
path: patches/@mariozechner__pi-agent-core.patch
importers:
.:
@@ -35,7 +30,7 @@ importers:
version: 1.3.4
'@mariozechner/pi-agent-core':
specifier: 0.46.0
version: 0.46.0(patch_hash=01312ceb1f6be7e42822c24c9a7a4f7db56b24ae114a364855bd3819779d1cf4)(ws@8.19.0)(zod@4.3.5)
version: 0.46.0(ws@8.19.0)(zod@4.3.5)
'@mariozechner/pi-ai':
specifier: 0.46.0
version: 0.46.0(ws@8.19.0)(zod@4.3.5)
@@ -5147,7 +5142,7 @@ snapshots:
transitivePeerDependencies:
- tailwindcss
'@mariozechner/pi-agent-core@0.46.0(patch_hash=01312ceb1f6be7e42822c24c9a7a4f7db56b24ae114a364855bd3819779d1cf4)(ws@8.19.0)(zod@4.3.5)':
'@mariozechner/pi-agent-core@0.46.0(ws@8.19.0)(zod@4.3.5)':
dependencies:
'@mariozechner/pi-ai': 0.46.0(ws@8.19.0)(zod@4.3.5)
'@mariozechner/pi-tui': 0.46.0
@@ -5186,7 +5181,7 @@ snapshots:
dependencies:
'@mariozechner/clipboard': 0.3.0
'@mariozechner/jiti': 2.6.2
'@mariozechner/pi-agent-core': 0.46.0(patch_hash=01312ceb1f6be7e42822c24c9a7a4f7db56b24ae114a364855bd3819779d1cf4)(ws@8.19.0)(zod@4.3.5)
'@mariozechner/pi-agent-core': 0.46.0(ws@8.19.0)(zod@4.3.5)
'@mariozechner/pi-ai': 0.46.0(ws@8.19.0)(zod@4.3.5)
'@mariozechner/pi-tui': 0.46.0
'@silvia-odwyer/photon-node': 0.3.4

View File

@@ -30,7 +30,7 @@ describeLive("pi embedded extra params (live)", () => {
const agent = { streamFn: streamSimple };
applyExtraParamsToAgent(agent, cfg, "openai", model.id, "off");
applyExtraParamsToAgent(agent, cfg, "openai", model.id);
const stream = agent.streamFn(
model,

View File

@@ -1,460 +1,62 @@
import { describe, expect, it } from "vitest";
import { resolveExtraParams } from "./pi-embedded-runner.js";
/**
* Tests for resolveExtraParams - the function that auto-enables GLM-4.x thinking mode.
*
* Z.AI Cloud API format: thinking: { type: "enabled", clear_thinking: boolean }
* - GLM-4.7: Preserved thinking (clear_thinking: false) - reasoning kept across turns
* - GLM-4.5/4.6: Interleaved thinking (clear_thinking: true) - reasoning cleared each turn
*
* @see https://docs.z.ai/guides/capabilities/thinking-mode
*/
describe("resolveExtraParams", () => {
describe("GLM-4.7 preserved thinking (clear_thinking: false)", () => {
it("auto-enables preserved thinking for zai/glm-4.7 with no config", () => {
const result = resolveExtraParams({
cfg: undefined,
provider: "zai",
modelId: "glm-4.7",
});
expect(result).toEqual({
thinking: {
type: "enabled",
clear_thinking: false, // Preserved thinking for GLM-4.7
},
});
it("returns undefined with no model config", () => {
const result = resolveExtraParams({
cfg: undefined,
provider: "zai",
modelId: "glm-4.7",
});
it("auto-enables preserved thinking for zai/GLM-4.7 (case insensitive)", () => {
const result = resolveExtraParams({
cfg: undefined,
provider: "zai",
modelId: "GLM-4.7",
});
expect(result).toBeUndefined();
});
expect(result).toEqual({
thinking: {
type: "enabled",
clear_thinking: false,
it("returns params for exact provider/model key", () => {
const result = resolveExtraParams({
cfg: {
agents: {
defaults: {
models: {
"openai/gpt-4": {
params: {
temperature: 0.7,
maxTokens: 2048,
},
},
},
},
},
});
},
provider: "openai",
modelId: "gpt-4",
});
expect(result).toEqual({
temperature: 0.7,
maxTokens: 2048,
});
});
describe("GLM-4.5/4.6 interleaved thinking (clear_thinking: true)", () => {
it("auto-enables interleaved thinking for zai/glm-4.5", () => {
const result = resolveExtraParams({
cfg: undefined,
provider: "zai",
modelId: "glm-4.5",
});
expect(result).toEqual({
thinking: {
type: "enabled",
clear_thinking: true, // Interleaved thinking for GLM-4.5
},
});
});
it("auto-enables interleaved thinking for zai/glm-4.6", () => {
const result = resolveExtraParams({
cfg: undefined,
provider: "zai",
modelId: "glm-4.6",
});
expect(result).toEqual({
thinking: {
type: "enabled",
clear_thinking: true, // Interleaved thinking for GLM-4.6
},
});
});
it("auto-enables interleaved thinking for zai/glm-4-flash", () => {
const result = resolveExtraParams({
cfg: undefined,
provider: "zai",
modelId: "glm-4-flash",
});
expect(result).toEqual({
thinking: {
type: "enabled",
clear_thinking: true, // Non-4.7 gets interleaved
},
});
});
it("auto-enables interleaved thinking for zai/glm-4.5-air", () => {
const result = resolveExtraParams({
cfg: undefined,
provider: "zai",
modelId: "glm-4.5-air",
});
expect(result).toEqual({
thinking: {
type: "enabled",
clear_thinking: true,
},
});
});
});
describe("config overrides", () => {
it("respects explicit thinking config from user (disable thinking)", () => {
const result = resolveExtraParams({
cfg: {
agents: {
defaults: {
models: {
"zai/glm-4.7": {
params: {
thinking: {
type: "disabled",
},
},
it("ignores unrelated model entries", () => {
const result = resolveExtraParams({
cfg: {
agents: {
defaults: {
models: {
"openai/gpt-4": {
params: {
temperature: 0.7,
},
},
},
},
},
provider: "zai",
modelId: "glm-4.7",
});
expect(result).toEqual({
thinking: {
type: "disabled",
},
});
},
provider: "openai",
modelId: "gpt-4.1-mini",
});
it("preserves other params while adding thinking config", () => {
const result = resolveExtraParams({
cfg: {
agents: {
defaults: {
models: {
"zai/glm-4.7": {
params: {
temperature: 0.7,
max_tokens: 4096,
},
},
},
},
},
},
provider: "zai",
modelId: "glm-4.7",
});
expect(result).toEqual({
temperature: 0.7,
max_tokens: 4096,
thinking: {
type: "enabled",
clear_thinking: false,
},
});
});
it("does not override explicit thinking config even if partial", () => {
const result = resolveExtraParams({
cfg: {
agents: {
defaults: {
models: {
"zai/glm-4.7": {
params: {
thinking: {
type: "enabled",
// User explicitly omitted clear_thinking
},
},
},
},
},
},
},
provider: "zai",
modelId: "glm-4.7",
});
// Should use user's config exactly, not merge defaults
expect(result).toEqual({
thinking: {
type: "enabled",
},
});
});
});
describe("non-GLM models", () => {
it("returns undefined for anthropic/claude with no config", () => {
const result = resolveExtraParams({
cfg: undefined,
provider: "anthropic",
modelId: "claude-3-opus",
});
expect(result).toBeUndefined();
});
it("returns undefined for openai/gpt-4 with no config", () => {
const result = resolveExtraParams({
cfg: undefined,
provider: "openai",
modelId: "gpt-4",
});
expect(result).toBeUndefined();
});
it("passes through params for non-GLM models without modification", () => {
const result = resolveExtraParams({
cfg: {
agents: {
defaults: {
models: {
"openai/gpt-4": {
params: {
logprobs: true,
top_logprobs: 5,
},
},
},
},
},
},
provider: "openai",
modelId: "gpt-4",
});
expect(result).toEqual({
logprobs: true,
top_logprobs: 5,
});
});
it("does not auto-enable thinking for non-zai provider even with glm-4 model id", () => {
const result = resolveExtraParams({
cfg: undefined,
provider: "openai",
modelId: "glm-4.7", // Even if model ID contains glm-4
});
expect(result).toBeUndefined();
});
});
describe("edge cases", () => {
it("handles empty config gracefully", () => {
const result = resolveExtraParams({
cfg: {},
provider: "zai",
modelId: "glm-4.7",
});
expect(result).toEqual({
thinking: {
type: "enabled",
clear_thinking: false,
},
});
});
it("handles config with empty models gracefully", () => {
const result = resolveExtraParams({
cfg: { agents: { defaults: { models: {} } } },
provider: "zai",
modelId: "glm-4.7",
});
expect(result).toEqual({
thinking: {
type: "enabled",
clear_thinking: false,
},
});
});
it("model alias lookup uses exact provider/model key", () => {
const result = resolveExtraParams({
cfg: {
agents: {
defaults: {
models: {
"zai/glm-4.7": {
alias: "smart",
params: {
custom_param: "value",
},
},
},
},
},
},
provider: "zai",
modelId: "glm-4.7",
});
expect(result).toEqual({
custom_param: "value",
thinking: {
type: "enabled",
clear_thinking: false,
},
});
});
it("treats thinking: null as explicit config (no auto-enable)", () => {
const result = resolveExtraParams({
cfg: {
agents: {
defaults: {
models: {
"zai/glm-4.7": {
params: {
thinking: null,
},
},
},
},
},
},
provider: "zai",
modelId: "glm-4.7",
});
// null is !== undefined, so we respect the explicit null config
expect(result).toEqual({
thinking: null,
});
});
it("handles GLM-4.7 variants (glm-4.7-flash, glm-4.7-plus)", () => {
// GLM-4.7-flash should get preserved thinking (contains "glm-4.7")
const flashResult = resolveExtraParams({
cfg: undefined,
provider: "zai",
modelId: "glm-4.7-flash",
});
expect(flashResult).toEqual({
thinking: {
type: "enabled",
clear_thinking: false, // Preserved thinking for GLM-4.7 variants
},
});
// GLM-4.7-plus should also get preserved thinking
const plusResult = resolveExtraParams({
cfg: undefined,
provider: "zai",
modelId: "glm-4.7-plus",
});
expect(plusResult).toEqual({
thinking: {
type: "enabled",
clear_thinking: false,
},
});
});
});
describe("thinkLevel parameter", () => {
it("thinkLevel: 'off' disables auto-enable for GLM-4.x", () => {
const result = resolveExtraParams({
cfg: undefined,
provider: "zai",
modelId: "glm-4.7",
thinkLevel: "off",
});
// Should NOT auto-enable thinking when user explicitly disabled it
expect(result).toBeUndefined();
});
it("thinkLevel: 'off' still passes through explicit config", () => {
const result = resolveExtraParams({
cfg: {
agents: {
defaults: {
models: {
"zai/glm-4.7": {
params: {
custom_param: "value",
},
},
},
},
},
},
provider: "zai",
modelId: "glm-4.7",
thinkLevel: "off",
});
// Should pass through config params but NOT auto-add thinking
expect(result).toEqual({
custom_param: "value",
});
});
it("thinkLevel: 'low' allows auto-enable", () => {
const result = resolveExtraParams({
cfg: undefined,
provider: "zai",
modelId: "glm-4.7",
thinkLevel: "low",
});
expect(result).toEqual({
thinking: {
type: "enabled",
clear_thinking: false,
},
});
});
it("thinkLevel: 'high' allows auto-enable", () => {
const result = resolveExtraParams({
cfg: undefined,
provider: "zai",
modelId: "glm-4.5",
thinkLevel: "high",
});
expect(result).toEqual({
thinking: {
type: "enabled",
clear_thinking: true,
},
});
});
it("thinkLevel: undefined (not specified) allows auto-enable", () => {
const result = resolveExtraParams({
cfg: undefined,
provider: "zai",
modelId: "glm-4.7",
// thinkLevel not specified
});
expect(result).toEqual({
thinking: {
type: "enabled",
clear_thinking: false,
},
});
});
expect(result).toBeUndefined();
});
});

View File

@@ -6,62 +6,19 @@ import type { ClawdbotConfig } from "../../config/config.js";
import { log } from "./logger.js";
/**
* Resolve provider-specific extraParams from model config.
* Auto-enables thinking mode for GLM-4.x models unless explicitly disabled.
* Resolve provider-specific extra params from model config.
* Used to pass through stream params like temperature/maxTokens.
*
* For ZAI GLM-4.x models, we auto-enable thinking via the Z.AI Cloud API format:
* thinking: { type: "enabled", clear_thinking: boolean }
*
* - GLM-4.7: Preserved thinking (clear_thinking: false) - reasoning kept across turns
* - GLM-4.5/4.6: Interleaved thinking (clear_thinking: true) - reasoning cleared each turn
*
* Users can override via config:
* agents.defaults.models["zai/glm-4.7"].params.thinking = { type: "disabled" }
*
* Or disable via runtime flag: --thinking off
*
* @see https://docs.z.ai/guides/capabilities/thinking-mode
* @internal Exported for testing only
*/
export function resolveExtraParams(params: {
cfg: ClawdbotConfig | undefined;
provider: string;
modelId: string;
thinkLevel?: string;
}): Record<string, unknown> | undefined {
const modelKey = `${params.provider}/${params.modelId}`;
const modelConfig = params.cfg?.agents?.defaults?.models?.[modelKey];
let extraParams = modelConfig?.params ? { ...modelConfig.params } : undefined;
// Auto-enable thinking for ZAI GLM-4.x models when not explicitly configured
// Skip if user explicitly disabled thinking via --thinking off
if (params.provider === "zai" && params.thinkLevel !== "off") {
const modelIdLower = params.modelId.toLowerCase();
const isGlm4 = modelIdLower.includes("glm-4");
if (isGlm4) {
const hasThinkingConfig = extraParams?.thinking !== undefined;
if (!hasThinkingConfig) {
// GLM-4.7 supports preserved thinking; GLM-4.5/4.6 clear each turn.
const isGlm47 = modelIdLower.includes("glm-4.7");
const clearThinking = !isGlm47;
extraParams = {
...extraParams,
thinking: {
type: "enabled",
clear_thinking: clearThinking,
},
};
log.debug(
`auto-enabled thinking for ${modelKey}: type=enabled, clear_thinking=${clearThinking}`,
);
}
}
}
return extraParams;
return modelConfig?.params ? { ...modelConfig.params } : undefined;
}
function createStreamFnWithExtraParams(
@@ -106,13 +63,11 @@ export function applyExtraParamsToAgent(
cfg: ClawdbotConfig | undefined,
provider: string,
modelId: string,
thinkLevel?: string,
): void {
const extraParams = resolveExtraParams({
cfg,
provider,
modelId,
thinkLevel,
});
const wrappedStreamFn = createStreamFnWithExtraParams(agent.streamFn, extraParams);

View File

@@ -330,7 +330,6 @@ export async function runEmbeddedAttempt(
params.config,
params.provider,
params.modelId,
params.thinkLevel,
);
try {

View File

@@ -1,5 +1,10 @@
import { describe, expect, it } from "vitest";
import { listThinkingLevels, normalizeReasoningLevel, normalizeThinkLevel } from "./thinking.js";
import {
listThinkingLevelLabels,
listThinkingLevels,
normalizeReasoningLevel,
normalizeThinkLevel,
} from "./thinking.js";
describe("normalizeThinkLevel", () => {
it("accepts mid as medium", () => {
@@ -9,6 +14,10 @@ describe("normalizeThinkLevel", () => {
it("accepts xhigh", () => {
expect(normalizeThinkLevel("xhigh")).toBe("xhigh");
});
it("accepts on as low", () => {
expect(normalizeThinkLevel("on")).toBe("low");
});
});
describe("listThinkingLevels", () => {
@@ -25,6 +34,17 @@ describe("listThinkingLevels", () => {
});
});
describe("listThinkingLevelLabels", () => {
it("returns on/off for ZAI", () => {
expect(listThinkingLevelLabels("zai", "glm-4.7")).toEqual(["off", "on"]);
});
it("returns full levels for non-ZAI", () => {
expect(listThinkingLevelLabels("openai", "gpt-4.1-mini")).toContain("low");
expect(listThinkingLevelLabels("openai", "gpt-4.1-mini")).not.toContain("on");
});
});
describe("normalizeReasoningLevel", () => {
it("accepts on/off", () => {
expect(normalizeReasoningLevel("on")).toBe("on");

View File

@@ -4,6 +4,17 @@ export type ElevatedLevel = "off" | "on";
export type ReasoningLevel = "off" | "on" | "stream";
export type UsageDisplayLevel = "off" | "on";
function normalizeProviderId(provider?: string | null): string {
if (!provider) return "";
const normalized = provider.trim().toLowerCase();
if (normalized === "z.ai" || normalized === "z-ai") return "zai";
return normalized;
}
export function isBinaryThinkingProvider(provider?: string | null): boolean {
return normalizeProviderId(provider) === "zai";
}
export const XHIGH_MODEL_REFS = [
"openai/gpt-5.2",
"openai-codex/gpt-5.2-codex",
@@ -22,6 +33,7 @@ export function normalizeThinkLevel(raw?: string | null): ThinkLevel | undefined
if (!raw) return undefined;
const key = raw.toLowerCase();
if (["off"].includes(key)) return "off";
if (["on", "enable", "enabled"].includes(key)) return "low";
if (["min", "minimal"].includes(key)) return "minimal";
if (["low", "thinkhard", "think-hard", "think_hard"].includes(key)) return "low";
if (["mid", "med", "medium", "thinkharder", "think-harder", "harder"].includes(key))
@@ -49,12 +61,20 @@ export function listThinkingLevels(provider?: string | null, model?: string | nu
return levels;
}
export function listThinkingLevelLabels(
provider?: string | null,
model?: string | null,
): string[] {
if (isBinaryThinkingProvider(provider)) return ["off", "on"];
return listThinkingLevels(provider, model);
}
export function formatThinkingLevels(
provider?: string | null,
model?: string | null,
separator = ", ",
): string {
return listThinkingLevels(provider, model).join(separator);
return listThinkingLevelLabels(provider, model).join(separator);
}
export function formatXHighModelHint(): string {

View File

@@ -1,5 +1,5 @@
import type { SlashCommand } from "@mariozechner/pi-tui";
import { formatThinkingLevels, listThinkingLevels } from "../auto-reply/thinking.js";
import { formatThinkingLevels, listThinkingLevelLabels } from "../auto-reply/thinking.js";
const VERBOSE_LEVELS = ["on", "off"];
const REASONING_LEVELS = ["on", "off"];
@@ -33,7 +33,7 @@ export function parseCommand(input: string): ParsedCommand {
}
export function getSlashCommands(options: SlashCommandOptions = {}): SlashCommand[] {
const thinkLevels = listThinkingLevels(options.provider, options.model);
const thinkLevels = listThinkingLevelLabels(options.provider, options.model);
return [
{ name: "help", description: "Show slash command help" },
{ name: "status", description: "Show gateway status summary" },

View File

@@ -282,6 +282,7 @@ export type GatewaySessionRow = {
outputTokens?: number;
totalTokens?: number;
model?: string;
modelProvider?: string;
contextTokens?: number;
};

View File

@@ -32,6 +32,7 @@ export type SessionsProps = {
};
const THINK_LEVELS = ["", "off", "minimal", "low", "medium", "high"] as const;
const BINARY_THINK_LEVELS = ["", "off", "on"] as const;
const VERBOSE_LEVELS = [
{ value: "", label: "inherit" },
{ value: "off", label: "off (explicit)" },
@@ -39,6 +40,34 @@ const VERBOSE_LEVELS = [
] as const;
const REASONING_LEVELS = ["", "off", "on", "stream"] as const;
function normalizeProviderId(provider?: string | null): string {
if (!provider) return "";
const normalized = provider.trim().toLowerCase();
if (normalized === "z.ai" || normalized === "z-ai") return "zai";
return normalized;
}
function isBinaryThinkingProvider(provider?: string | null): boolean {
return normalizeProviderId(provider) === "zai";
}
function resolveThinkLevelOptions(provider?: string | null): readonly string[] {
return isBinaryThinkingProvider(provider) ? BINARY_THINK_LEVELS : THINK_LEVELS;
}
function resolveThinkLevelDisplay(value: string, isBinary: boolean): string {
if (!isBinary) return value;
if (!value || value === "off") return value;
return "on";
}
function resolveThinkLevelPatchValue(value: string, isBinary: boolean): string | null {
if (!value) return null;
if (!isBinary) return value;
if (value === "on") return "low";
return value;
}
export function renderSessions(props: SessionsProps) {
const rows = props.result?.sessions ?? [];
return html`
@@ -143,7 +172,10 @@ function renderRow(
onPatch: SessionsProps["onPatch"],
) {
const updated = row.updatedAt ? formatAgo(row.updatedAt) : "n/a";
const thinking = row.thinkingLevel ?? "";
const rawThinking = row.thinkingLevel ?? "";
const isBinaryThinking = isBinaryThinkingProvider(row.modelProvider);
const thinking = resolveThinkLevelDisplay(rawThinking, isBinaryThinking);
const thinkLevels = resolveThinkLevelOptions(row.modelProvider);
const verbose = row.verboseLevel ?? "";
const reasoning = row.reasoningLevel ?? "";
const displayName = row.displayName ?? row.key;
@@ -166,10 +198,12 @@ function renderRow(
.value=${thinking}
@change=${(e: Event) => {
const value = (e.target as HTMLSelectElement).value;
onPatch(row.key, { thinkingLevel: value || null });
onPatch(row.key, {
thinkingLevel: resolveThinkLevelPatchValue(value, isBinaryThinking),
});
}}
>
${THINK_LEVELS.map((level) =>
${thinkLevels.map((level) =>
html`<option value=${level}>${level || "inherit"}</option>`,
)}
</select>