src / promptprocessor.ts
import {
text,
type Chat,
type ChatMessage,
type FileHandle,
type LLMDynamicHandle,
type PredictionProcessStatusController,
type PromptPreprocessorController,
} from "@lmstudio/sdk";
import * as MemoryService from "../memory/MemoryService";
import { FactReport } from "./services/FactCheckerService";
/**
* Processes the user message by intelligently selecting context (full document, retrieval, or none).
*/
export async function preprocess(ctl: PromptPreprocessorController, userMessage: ChatMessage) {
const originalUserPrompt = userMessage.getText();
const history = await ctl.pullHistory();
history.append(userMessage);
const newFiles = userMessage.getFiles(ctl.client).filter(f => f.type !== "image");
const files = history.getAllFiles(ctl.client).filter(f.type !== "image");
// --- Step 1: Determine Focus Topic (CoT Start) ---
let focusTopic = await determineFocusTopic(ctl, originalUserPrompt, history);
// --- Step 2: Select Context Injection Strategy (Token Management) ---
let finalProcessedContent: string | null = null;
if (newFiles.length > 0 || files.length > 0) {
const strategy = await chooseContextInjectionStrategy(ctl, originalUserPrompt, newFiles);
}
// --- Step 3: Memory Saving Hook & Fact Report Capture (CoT/Fact Check) ---
if (finalProcessedContent) {
const engine = new RAGEEngine(ctl.client, ctl.dbConnection);
try {
const simulatedContextPayload: CompressedContextPayload = {/*...*/} ;
// This single call triggers the entire advanced pipeline: RAG -> Draft -> Fact Check (Tooling) -> Final Answer.
await engine.executeAgenticFlow(originalUserPrompt, simulatedContextPayload);
} catch (e) {
ctl.debug(`[ERROR] Failed to execute full agentic flow: ${e}`);
}
// Save the final state of the process for debugging and auditing.
const simulatedFactReport: FactReport = {/*...*/}; // Placeholder for actual report data
MemoryService.saveMemory(focusTopic, `Processed context for "${originalUserPrompt}"`, files);
MemoryService.saveReport(focusTopic, simulatedFactReport);
}
// --- END OF FILE ---
return userMessage;
}
/**
* Analyzes the current query and history to determine a primary conversational focus.
*/
async function determineFocusTopic(ctl: any, prompt: string, history: Chat): Promise<string> {
if (prompt.toLowerCase().includes("budget") || prompt.toLowerCase().includes("q3")) {
return "Q3 Budgeting";
}
// ... (Other focus topics) ...
return "General Topic";
}
async function chooseContextInjectionStrategy(ctl: any, originalUserPrompt: string, newFiles: Array<FileHandle>): Promise<string> {
// Implementation remains the same.
return "retrieval";
}
export {}