Forked from altra/high-perf-tools
src / promptPreprocessor.ts
/**
* Prompt preprocessor.
*
* On the first turn: prepends a hidden system block with tool-call rules so the
* model knows how to handle errors. On subsequent turns: passes through unchanged.
*
* The heavy lifting for error feedback is done by safe_impl() in toolsProvider,
* which wraps every tool and returns a structured { tool_error, error, hint } JSON
* object the model can read and retry from.
*/
import {
type ChatMessage,
type PromptPreprocessorController,
} from "@lmstudio/sdk";
const TOOL_RULES = `\
[System: Tool call rules]
• Output valid JSON only — no HTML, no markdown, no trailing commas.
• Booleans: use true/false, never "true"/"false".
• Numbers: plain numbers only, never quoted.
• Always include required parameters (check the tool description).
• For large file content, use multiple write_file calls — long JSON args get truncated.
• When a tool returns { "tool_error": true }, read the "error" and "hint" fields, correct the issue, and retry immediately.
== WRITE SAFETY ==
Before overwriting an existing file with write_file, or making a large replace_in_file substitution:
→ call diff_preview(path, mode="write"|"replace", ...) first
This shows exactly what will change without touching the file. Always do this when the user hasn't seen the current file content.
== GIT STAGING ==
git_stage accepts glob patterns — e.g. files="src/**/*.py" or files="*.ts *.js".
No need to enumerate every file explicitly.`;
export async function promptPreprocessor(
ctl: PromptPreprocessorController,
userMessage: ChatMessage,
): Promise<string | ChatMessage> {
const history = await ctl.pullHistory();
// Inject rules on the first turn by prepending them to the user message.
// history.append() operates on a local copy and is never sent back to LM Studio,
// so the only way to inject content is through the return value.
if (history.length === 0) {
return `${TOOL_RULES}\n\n${userMessage.getText()}`;
}
return userMessage;
}