mirror of
https://github.com/openclaw/openclaw.git
synced 2026-02-09 05:19:32 +08:00
feat(sanitize): enhance context overflow error handling in user-facing text
- Added tests to ensure proper sanitization of context overflow errors. - Introduced a new function to determine when to rewrite context overflow messages. - Updated the sanitization logic to improve user experience by providing clearer error messages while preserving conversational context.
This commit is contained in:
@@ -23,6 +23,29 @@ describe("sanitizeUserFacingText", () => {
|
||||
);
|
||||
});
|
||||
|
||||
it("sanitizes direct context-overflow errors", () => {
|
||||
expect(
|
||||
sanitizeUserFacingText(
|
||||
"Context overflow: prompt too large for the model. Try again with less input or a larger-context model.",
|
||||
),
|
||||
).toContain("Context overflow: prompt too large for the model.");
|
||||
expect(sanitizeUserFacingText("Request size exceeds model context window")).toContain(
|
||||
"Context overflow: prompt too large for the model.",
|
||||
);
|
||||
});
|
||||
|
||||
it("does not rewrite conversational mentions of context overflow", () => {
|
||||
const text =
|
||||
"nah it failed, hit a context overflow. the prompt was too large for the model. want me to retry it with a different approach?";
|
||||
expect(sanitizeUserFacingText(text)).toBe(text);
|
||||
});
|
||||
|
||||
it("does not rewrite technical summaries that mention context overflow", () => {
|
||||
const text =
|
||||
"Problem: When a subagent reads a very large file, it can exceed the model context window. Auto-compaction cannot help in that case.";
|
||||
expect(sanitizeUserFacingText(text)).toBe(text);
|
||||
});
|
||||
|
||||
it("sanitizes raw API error payloads", () => {
|
||||
const raw = '{"type":"error","error":{"message":"Something exploded","type":"server_error"}}';
|
||||
expect(sanitizeUserFacingText(raw)).toBe("LLM error server_error: Something exploded");
|
||||
|
||||
@@ -67,6 +67,8 @@ const ERROR_PAYLOAD_PREFIX_RE =
|
||||
const FINAL_TAG_RE = /<\s*\/?\s*final\s*>/gi;
|
||||
const ERROR_PREFIX_RE =
|
||||
/^(?:error|api\s*error|openai\s*error|anthropic\s*error|gateway\s*error|request failed|failed|exception)[:\s-]+/i;
|
||||
const CONTEXT_OVERFLOW_ERROR_HEAD_RE =
|
||||
/^(?:context overflow:|request_too_large\b|request size exceeds\b|request exceeds the maximum size\b|context length exceeded\b|maximum context length\b|prompt is too long\b|exceeds model context window\b)/i;
|
||||
const HTTP_STATUS_PREFIX_RE = /^(?:http\s*)?(\d{3})\s+(.+)$/i;
|
||||
const HTTP_ERROR_HINTS = [
|
||||
"error",
|
||||
@@ -135,6 +137,18 @@ function isLikelyHttpErrorText(raw: string): boolean {
|
||||
return HTTP_ERROR_HINTS.some((hint) => message.includes(hint));
|
||||
}
|
||||
|
||||
function shouldRewriteContextOverflowText(raw: string): boolean {
|
||||
if (!isContextOverflowError(raw)) {
|
||||
return false;
|
||||
}
|
||||
return (
|
||||
isRawApiErrorPayload(raw) ||
|
||||
isLikelyHttpErrorText(raw) ||
|
||||
ERROR_PREFIX_RE.test(raw) ||
|
||||
CONTEXT_OVERFLOW_ERROR_HEAD_RE.test(raw)
|
||||
);
|
||||
}
|
||||
|
||||
type ErrorPayload = Record<string, unknown>;
|
||||
|
||||
function isErrorPayloadObject(payload: unknown): payload is ErrorPayload {
|
||||
@@ -403,7 +417,7 @@ export function sanitizeUserFacingText(text: string): string {
|
||||
);
|
||||
}
|
||||
|
||||
if (isContextOverflowError(trimmed)) {
|
||||
if (shouldRewriteContextOverflowText(trimmed)) {
|
||||
return (
|
||||
"Context overflow: prompt too large for the model. " +
|
||||
"Try again with less input or a larger-context model."
|
||||
|
||||
Reference in New Issue
Block a user