feat(sanitize): enhance context overflow error handling in user-facing text

- Added tests to ensure proper sanitization of context overflow errors.
- Introduced a new function to determine when to rewrite context overflow messages.
- Updated the sanitization logic to improve user experience by providing clearer error messages while preserving conversational context.
This commit is contained in:
Tyler Yust
2026-02-07 17:07:12 -08:00
parent 980f788731
commit ea423bbbfd
2 changed files with 38 additions and 1 deletions

View File

@@ -23,6 +23,29 @@ describe("sanitizeUserFacingText", () => {
);
});
it("sanitizes direct context-overflow errors", () => {
expect(
sanitizeUserFacingText(
"Context overflow: prompt too large for the model. Try again with less input or a larger-context model.",
),
).toContain("Context overflow: prompt too large for the model.");
expect(sanitizeUserFacingText("Request size exceeds model context window")).toContain(
"Context overflow: prompt too large for the model.",
);
});
it("does not rewrite conversational mentions of context overflow", () => {
const text =
"nah it failed, hit a context overflow. the prompt was too large for the model. want me to retry it with a different approach?";
expect(sanitizeUserFacingText(text)).toBe(text);
});
it("does not rewrite technical summaries that mention context overflow", () => {
const text =
"Problem: When a subagent reads a very large file, it can exceed the model context window. Auto-compaction cannot help in that case.";
expect(sanitizeUserFacingText(text)).toBe(text);
});
it("sanitizes raw API error payloads", () => {
const raw = '{"type":"error","error":{"message":"Something exploded","type":"server_error"}}';
expect(sanitizeUserFacingText(raw)).toBe("LLM error server_error: Something exploded");

View File

@@ -67,6 +67,8 @@ const ERROR_PAYLOAD_PREFIX_RE =
const FINAL_TAG_RE = /<\s*\/?\s*final\s*>/gi;
const ERROR_PREFIX_RE =
/^(?:error|api\s*error|openai\s*error|anthropic\s*error|gateway\s*error|request failed|failed|exception)[:\s-]+/i;
const CONTEXT_OVERFLOW_ERROR_HEAD_RE =
/^(?:context overflow:|request_too_large\b|request size exceeds\b|request exceeds the maximum size\b|context length exceeded\b|maximum context length\b|prompt is too long\b|exceeds model context window\b)/i;
const HTTP_STATUS_PREFIX_RE = /^(?:http\s*)?(\d{3})\s+(.+)$/i;
const HTTP_ERROR_HINTS = [
"error",
@@ -135,6 +137,18 @@ function isLikelyHttpErrorText(raw: string): boolean {
return HTTP_ERROR_HINTS.some((hint) => message.includes(hint));
}
function shouldRewriteContextOverflowText(raw: string): boolean {
if (!isContextOverflowError(raw)) {
return false;
}
return (
isRawApiErrorPayload(raw) ||
isLikelyHttpErrorText(raw) ||
ERROR_PREFIX_RE.test(raw) ||
CONTEXT_OVERFLOW_ERROR_HEAD_RE.test(raw)
);
}
type ErrorPayload = Record<string, unknown>;
function isErrorPayloadObject(payload: unknown): payload is ErrorPayload {
@@ -403,7 +417,7 @@ export function sanitizeUserFacingText(text: string): string {
);
}
if (isContextOverflowError(trimmed)) {
if (shouldRewriteContextOverflowText(trimmed)) {
return (
"Context overflow: prompt too large for the model. " +
"Try again with less input or a larger-context model."