src / promptPreprocessor.ts
/**
* Prompt preprocessor.
*
* On the first turn: prepends a hidden system block with tool-call rules so the
* model knows how to handle errors. On subsequent turns: passes through unchanged.
*
* The heavy lifting for error feedback is done by safe_impl() in toolsProvider,
* which wraps every tool and returns a structured { tool_error, error, hint } JSON
* object the model can read and retry from.
*/
import {
type ChatMessage,
type PromptPreprocessorController,
} from "@lmstudio/sdk";
const TOOL_RULES = `\
[System: Tool call rules]
⢠Output valid JSON only ā no HTML, no markdown, no trailing commas.
⢠Booleans: use true/false, never "true"/"false".
⢠Numbers: plain numbers only, never quoted.
⢠Always include required parameters (check the tool description).
⢠For large file content, use multiple write_file calls ā long JSON args get truncated.
⢠When a tool returns { "tool_error": true }, read the "error" and "hint" fields, correct the issue, and retry immediately.
== WRITE SAFETY ==
Before overwriting an existing file with write_file, or making a large replace_in_file substitution:
ā call diff_preview(path, mode="write"|"replace", ...) first
This shows exactly what will change without touching the file. Always do this when the user hasn't seen the current file content.
== GIT STAGING ==
git_stage accepts glob patterns ā e.g. files="src/**/*.py" or files="*.ts *.js".
No need to enumerate every file explicitly.`;
export async function promptPreprocessor(
ctl: PromptPreprocessorController,
userMessage: ChatMessage,
): Promise<string | ChatMessage> {
const history = await ctl.pullHistory();
// Detect first turn: history is empty before user sends anything
const isFirstTurn = history.length === 0;
if (isFirstTurn) {
// Prepend the rules as a system message so they appear before the first user turn
history.append("system", TOOL_RULES);
}
history.append(userMessage);
// Return the user message unchanged ā we only modified the history context
return userMessage;
}