Project Files
src / strategies / GeminiThinkingStrategy.ts
import { BaseGeminiStrategy } from "./BaseGeminiStrategy";
import { GenerationContext } from "./ModelStrategy";
import { appendSignature, computeContentHash } from "../thought-signatures";
import { stableJsonStringify } from "../generator-utils";
// import { flattenToolCallsToText } from "../generator-utils";
export class GeminiThinkingStrategy extends BaseGeminiStrategy {
// Persist thought_signature for tool calls so LM Studio's auto-continue tool-loop
// can replay functionCall parts without 400s on gemini-3-pro-preview.
protected override async onObservedFunctionCallPart(context: GenerationContext, safeName: string, args: any, sig?: string): Promise<void> {
try {
if (!sig) return;
const id = `${String(safeName)}:${stableJsonStringify(args ?? {})}`;
const hash = computeContentHash(id);
await appendSignature(context.ctl.getWorkingDirectory(), sig, hash);
if (context.debugChunks) console.info("[ThoughtSig] Captured tool-call signature for hash:", hash);
} catch {
// best-effort
}
}
protected override modifyContents(contents: any[], caps: any) {
// IMPORTANT (gemini-3-pro-preview): the Gemini API requires thought_signature on *request*
// functionCall parts for thinking models. In LM Studio's tool-call loop, replaying the
// prior assistant tool call as a functionCall part can 400 if the signature is missing.
//
// To keep the system robust and encapsulated, we flatten historical functionCall parts
// into plain text. Tools still work: new tool calls can be emitted by the model, and
// tool results remain in history as functionResponse parts.
if (!Array.isArray(contents)) return;
for (const msg of contents) {
if (!msg || typeof msg !== "object") continue;
const parts = (msg as any).parts;
if (!Array.isArray(parts)) continue;
let changed = false;
const newParts: any[] = [];
for (const p of parts) {
const fc = (p as any)?.functionCall || (p as any)?.function_call;
if (fc && fc.name) {
const name = String(fc.name);
const args = (fc as any).args;
const argsJson = typeof args === "string" ? args : JSON.stringify(args ?? {});
newParts.push({ text: `[ToolCall] ${name} ${argsJson}` });
changed = true;
} else {
newParts.push(p);
}
}
if (changed) (msg as any).parts = newParts;
}
}
protected override modifyGenerationConfig(generateContent: any, context: GenerationContext, caps: any) {
if (caps.supportsThinking) {
const { pluginConfig } = context;
const thinkingLevel = pluginConfig.get("thinkingLevel");
const thinkingConfig: any = {
includeThoughts: true,
};
// Only add thinkingLevel if the model supports specific levels
if (caps.thinking?.levels && caps.thinking.levels.length > 0) {
thinkingConfig.thinkingLevel = thinkingLevel;
}
generateContent.generationConfig = {
...(generateContent.generationConfig || {}),
thinkingConfig
};
}
}
}