src / toolsProvider.ts
/**
* Codebase Plugin — toolsProvider
*
* Tools:
* Repository · scan_repository, trace_symbol, find_impact_zone
* Memory · save_project_memory, load_project_context, list_projects
* Editing · read_file, write_file, apply_patch
* Verification · run_verification_loop
* Planning · generate_patch_plan
*/
import { text, tool, type Tool, type ToolsProvider } from "@lmstudio/sdk";
import { join, resolve, relative, extname, basename, dirname } from "path";
import { homedir } from "os";
import { readFile, writeFile, readdir, stat } from "fs/promises";
import { exec } from "child_process";
import { promisify } from "util";
import { z } from "zod";
import { pluginConfigSchematics } from "./config";
import { getDb, upsertProject, getProjectByRoot, insertMemoryNote, getMemoryNotes, deleteMemoryNote, listProjects } from "./db";
const execAsync = promisify(exec);
function json(obj: unknown): string {
return JSON.stringify(obj, null, 2);
}
function safe_impl<T extends Record<string, unknown>>(
name: string,
fn: (params: T) => Promise<string>,
): (params: T) => Promise<string> {
return async (params: T) => {
try {
return await fn(params);
} catch (err: unknown) {
const msg = err instanceof Error ? err.message : String(err);
return JSON.stringify({
tool_error: true, tool: name, error: msg,
hint: "Read the error, fix the parameter causing the issue, and retry.",
}, null, 2);
}
};
}
// ---------------------------------------------------------------------------
// Repository scanning helpers
// ---------------------------------------------------------------------------
const IGNORE_DIRS = new Set([
"node_modules", ".git", ".svn", "dist", "build", "out", "__pycache__",
".next", ".nuxt", ".cache", "coverage", ".nyc_output", "vendor",
"venv", ".venv", "env", ".env", "eggs", ".eggs", "target",
]);
const FRAMEWORK_SIGNALS: Array<{ framework: string; language: string; signals: string[] }> = [
{ framework: "Next.js", language: "TypeScript", signals: ["next.config", "pages/", "app/"] },
{ framework: "React", language: "TypeScript", signals: ["react", "jsx", "tsx"] },
{ framework: "Vue", language: "TypeScript", signals: ["vue.config", ".vue"] },
{ framework: "Svelte", language: "TypeScript", signals: ["svelte.config", ".svelte"] },
{ framework: "Express", language: "Node.js", signals: ["express", "app.listen", "router.get"] },
{ framework: "FastAPI", language: "Python", signals: ["fastapi", "uvicorn", "@app.get"] },
{ framework: "Django", language: "Python", signals: ["django", "manage.py", "settings.py"] },
{ framework: "Flask", language: "Python", signals: ["flask", "@app.route"] },
{ framework: "Rust/Cargo", language: "Rust", signals: ["Cargo.toml", "fn main()"] },
{ framework: "Go module", language: "Go", signals: ["go.mod", "func main()"] },
{ framework: "Maven", language: "Java", signals: ["pom.xml", "src/main/java"] },
{ framework: "Gradle", language: "Java", signals: ["build.gradle", "src/main/java"] },
];
const ENTRYPOINT_NAMES = new Set([
"index.ts", "index.js", "main.ts", "main.js", "app.ts", "app.js",
"server.ts", "server.js", "main.py", "app.py", "manage.py",
"main.go", "main.rs", "Main.java", "Program.cs",
]);
const SOURCE_EXTENSIONS = new Set([
".ts", ".tsx", ".js", ".jsx", ".py", ".go", ".rs",
".java", ".cs", ".cpp", ".c", ".h", ".hpp", ".rb", ".php",
]);
interface FileEntry {
path: string; // relative to project root
size: number;
lines: number;
ext: string;
}
async function walkDir(root: string, maxDepth = 6): Promise<FileEntry[]> {
const results: FileEntry[] = [];
async function walk(dir: string, depth: number) {
if (depth > maxDepth) return;
let entries: string[];
try { entries = await readdir(dir); } catch { return; }
await Promise.all(entries.map(async (name) => {
if (name.startsWith(".") && name !== ".github") return;
const full = join(dir, name);
let s;
try { s = await stat(full); } catch { return; }
if (s.isDirectory()) {
if (IGNORE_DIRS.has(name)) return;
await walk(full, depth + 1);
} else if (s.isFile() && SOURCE_EXTENSIONS.has(extname(name))) {
const rel = relative(root, full);
let lines = 0;
try {
const content = await readFile(full, "utf8");
lines = content.split("\n").length;
} catch { /* unreadable */ }
results.push({ path: rel, size: s.size, lines, ext: extname(name) });
}
}));
}
await walk(root, 0);
return results;
}
function detectFramework(files: FileEntry[], rootFiles: string[]): { framework: string; language: string } {
const allSignals = [...files.map(f => f.path), ...rootFiles].join(" ").toLowerCase();
for (const { framework, language, signals } of FRAMEWORK_SIGNALS) {
if (signals.some(s => allSignals.includes(s.toLowerCase()))) {
return { framework, language };
}
}
// Fallback: infer language from dominant extension
const extCounts = new Map<string, number>();
for (const f of files) extCounts.set(f.ext, (extCounts.get(f.ext) ?? 0) + 1);
const dominant = [...extCounts.entries()].sort((a, b) => b[1] - a[1])[0];
const langMap: Record<string, string> = {
".ts": "TypeScript", ".tsx": "TypeScript", ".js": "JavaScript", ".jsx": "JavaScript",
".py": "Python", ".go": "Go", ".rs": "Rust", ".java": "Java",
".cs": "C#", ".cpp": "C++", ".c": "C", ".rb": "Ruby",
};
return { framework: "unknown", language: dominant ? (langMap[dominant[0]] ?? dominant[0]) : "unknown" };
}
function findEntrypoints(files: FileEntry[]): string[] {
return files
.filter(f => ENTRYPOINT_NAMES.has(basename(f.path)))
.sort((a, b) => a.path.split("/").length - b.path.split("/").length)
.slice(0, 10)
.map(f => f.path);
}
// Group files into logical modules by directory
function buildModuleMap(files: FileEntry[]): Record<string, number> {
const dirs = new Map<string, number>();
for (const f of files) {
const dir = dirname(f.path) || ".";
dirs.set(dir, (dirs.get(dir) ?? 0) + 1);
}
return Object.fromEntries([...dirs.entries()].sort((a, b) => b[1] - a[1]).slice(0, 30));
}
// ---------------------------------------------------------------------------
// Shell execution helper
// ---------------------------------------------------------------------------
async function runCommand(
cmd: string,
cwd: string,
timeoutMs = 60_000,
): Promise<{ stdout: string; stdoutTruncated: boolean; stderr: string; stderrTruncated: boolean; exitCode: number }> {
try {
const { stdout, stderr } = await execAsync(cmd, { cwd, timeout: timeoutMs, maxBuffer: 2 * 1024 * 1024 });
return { stdout: stdout.slice(0, 8_000), stdoutTruncated: stdout.length > 8_000, stderr: stderr.slice(0, 4_000), stderrTruncated: stderr.length > 4_000, exitCode: 0 };
} catch (err: unknown) {
const e = err as { stdout?: string; stderr?: string; code?: number };
const so = e.stdout ?? ""; const se = e.stderr ?? String(err);
return {
stdout: so.slice(0, 8_000), stdoutTruncated: so.length > 8_000,
stderr: se.slice(0, 4_000), stderrTruncated: se.length > 4_000,
exitCode: e.code ?? 1,
};
}
}
// ---------------------------------------------------------------------------
// toolsProvider
// ---------------------------------------------------------------------------
export const toolsProvider: ToolsProvider = async (ctl) => {
const cfg = ctl.getPluginConfig(pluginConfigSchematics);
const dataPath = () => cfg.get("dataPath").trim() || join(homedir(), "codebase-memory");
const defaultRoot = () => cfg.get("projectRoot").trim();
const testCmd = () => cfg.get("testCommand").trim() || "npm test";
const buildCmd = () => cfg.get("buildCommand").trim() || "npm run build";
const lintCmd = () => cfg.get("lintCommand").trim();
const maxLines = () => cfg.get("maxFileReadLines");
const tools: Tool[] = [
// -----------------------------------------------------------------------
// REPOSITORY TOOLS
// -----------------------------------------------------------------------
tool({
name: "scan_repository",
description: text`
Walk a project directory, detect framework and language, map source files by
module, find entrypoints, and persist the project to memory. Returns a structured
repository map with file counts, module breakdown, and detected conventions.
Call this FIRST in any new session before touching any files.
Do NOT call repeatedly — once per session is enough unless the project structure changed.
`,
parameters: {
root: z.string().optional()
.describe("Absolute path to project root. Defaults to config projectRoot."),
detectConventions: z.boolean().default(true)
.describe("Read package.json / pyproject.toml / Cargo.toml to detect scripts and conventions"),
},
implementation: safe_impl("scan_repository", async ({ root, detectConventions }) => {
const projectRoot = resolve(root ?? defaultRoot());
if (!projectRoot) throw new Error("No project root. Pass root parameter or set projectRoot in config.");
const files = await walkDir(projectRoot);
let rootFiles: string[] = [];
try { rootFiles = await readdir(projectRoot); } catch { /* ignore */ }
const { framework, language } = detectFramework(files, rootFiles);
const entrypoints = findEntrypoints(files);
const moduleMap = buildModuleMap(files);
// Detect conventions from manifest files
const conventions: string[] = [];
if (detectConventions) {
for (const manifest of ["package.json", "pyproject.toml", "Cargo.toml", "go.mod"]) {
try {
const content = await readFile(join(projectRoot, manifest), "utf8");
if (manifest === "package.json") {
const pkg = JSON.parse(content) as { scripts?: Record<string, string> };
if (pkg.scripts) {
for (const [k, v] of Object.entries(pkg.scripts)) {
conventions.push(`npm run ${k}: ${v}`);
}
}
} else {
conventions.push(`${manifest} present`);
}
} catch { /* file not present */ }
}
}
// Persist to project memory
const db = getDb(dataPath());
const name = basename(projectRoot);
const projectId = upsertProject(db, projectRoot, name, framework, language, entrypoints, conventions);
return json({
projectId,
root: projectRoot,
name,
framework,
language,
totalSourceFiles: files.length,
totalLines: files.reduce((s, f) => s + f.lines, 0),
entrypoints,
moduleMap,
conventions: conventions.slice(0, 20),
largestFiles: files
.sort((a, b) => b.lines - a.lines)
.slice(0, 10)
.map(f => ({ path: f.path, lines: f.lines })),
});
}),
}),
tool({
name: "trace_symbol",
description: text`
Find the definition and all usages of a function, class, variable, or type
across the repository using grep. Returns file paths, line numbers, and
surrounding context for each match.
Call when you need to understand what a symbol does and what depends on it
before editing it.
`,
parameters: {
symbol: z.string().describe("Symbol name to search for (function, class, variable, type)"),
root: z.string().optional()
.describe("Absolute path to project root. Defaults to config projectRoot."),
contextLines: z.coerce.number().int().min(0).max(10).default(3)
.describe("Lines of context to show around each match"),
fileExtensions: z.string().optional()
.describe("Comma-separated extensions to limit search, e.g. 'ts,tsx'. Defaults to all source files."),
},
implementation: safe_impl("trace_symbol", async ({ symbol, root, contextLines, fileExtensions }) => {
const projectRoot = resolve(root ?? defaultRoot());
if (!projectRoot) throw new Error("No project root. Pass root or set config projectRoot.");
const exts = fileExtensions
? fileExtensions.split(",").map(e => e.trim().replace(/^\./, ""))
: ["ts", "tsx", "js", "jsx", "py", "go", "rs", "java", "cs", "cpp", "c"];
const includePattern = exts.map(e => `--include="*.${e}"`).join(" ");
const grepCmd = `grep -rn ${includePattern} -C ${contextLines} --color=never -- "${symbol}" .`;
const result = await runCommand(grepCmd, projectRoot, 15_000);
const lines = (result.stdout + result.stderr).split("\n");
const matches: Array<{ file: string; line: number; context: string }> = [];
const fileSet = new Set<string>();
// Parse grep -n output: "filepath:linenum:content"
const matchRe = /^([^:]+):(\d+):/;
for (const l of lines) {
const m = l.match(matchRe);
if (m) fileSet.add(m[1]);
}
// Group by file
let currentFile = "";
let block: string[] = [];
for (const l of lines) {
if (l === "--") {
if (currentFile && block.length > 0) {
matches.push({ file: currentFile, line: 0, context: block.join("\n") });
block = [];
}
continue;
}
const m = l.match(matchRe);
if (m) currentFile = m[1];
block.push(l);
}
if (currentFile && block.length > 0) {
matches.push({ file: currentFile, line: 0, context: block.join("\n") });
}
return json({
symbol,
filesFound: fileSet.size,
files: [...fileSet],
matchCount: matches.length,
matches: matches.slice(0, 30),
});
}),
}),
tool({
name: "find_impact_zone",
description: text`
Given a file path or symbol name, find all files that import or reference it.
Returns the set of files that would be affected by a change to the target.
Call before editing any file to understand the blast radius.
Do NOT skip this — editing without knowing impact causes regressions.
`,
parameters: {
target: z.string()
.describe("File path (relative to root) or symbol name to find dependents of"),
root: z.string().optional()
.describe("Absolute path to project root. Defaults to config projectRoot."),
fileExtensions: z.string().optional()
.describe("Comma-separated extensions, e.g. 'ts,tsx'. Defaults to all source files."),
},
implementation: safe_impl("find_impact_zone", async ({ target, root, fileExtensions }) => {
const projectRoot = resolve(root ?? defaultRoot());
if (!projectRoot) throw new Error("No project root. Pass root or set config projectRoot.");
const exts = fileExtensions
? fileExtensions.split(",").map(e => e.trim().replace(/^\./, ""))
: ["ts", "tsx", "js", "jsx", "py", "go", "rs", "java", "cs"];
const includePattern = exts.map(e => `--include="*.${e}"`).join(" ");
// Search for both the file name (without extension) and the raw target string
const targetBase = basename(target).replace(/\.[^.]+$/, "");
const terms = [...new Set([target, targetBase])].filter(Boolean);
const fileSet = new Set<string>();
for (const term of terms) {
const cmd = `grep -rl ${includePattern} --color=never -- "${term}" .`;
const result = await runCommand(cmd, projectRoot, 15_000);
for (const line of result.stdout.split("\n")) {
const trimmed = line.trim();
if (trimmed && trimmed !== target) fileSet.add(trimmed);
}
}
return json({
target,
dependentFiles: [...fileSet].slice(0, 50),
dependentCount: fileSet.size,
warning: fileSet.size > 20
? "High impact — more than 20 files depend on this target. Plan changes carefully."
: null,
});
}),
}),
// -----------------------------------------------------------------------
// MEMORY TOOLS
// -----------------------------------------------------------------------
tool({
name: "save_project_memory",
description: text`
Save a note about this project to persistent memory. Notes are categorized and
retrieved in future sessions via load_project_context.
Categories: architecture (how the system is designed), convention (coding patterns
and style rules), bug (known unresolved issues), command (useful shell commands
for this project), decision (why something was built a certain way).
Call at end of session or after learning something non-obvious about the project.
`,
parameters: {
root: z.string().optional()
.describe("Project root path. Defaults to config projectRoot."),
category: z.enum(["architecture", "convention", "bug", "command", "decision"])
.describe("Category for the memory note"),
content: z.string()
.describe("The note content. Be specific — vague notes have no value."),
},
implementation: safe_impl("save_project_memory", async ({ root, category, content }) => {
const projectRoot = resolve(root ?? defaultRoot());
if (!projectRoot) throw new Error("No project root.");
const db = getDb(dataPath());
let project = getProjectByRoot(db, projectRoot);
if (!project) {
// Auto-register project with minimal info if not scanned yet
const id = upsertProject(db, projectRoot, basename(projectRoot), "", "", [], []);
project = getProjectByRoot(db, projectRoot)!;
void id;
}
const noteId = insertMemoryNote(db, project.id, category, content);
return json({ saved: true, noteId, category, projectRoot });
}),
}),
tool({
name: "load_project_context",
description: text`
Load all saved memory notes for a project from persistent storage. Returns
architecture notes, conventions, known bugs, useful commands, and decisions.
Call at the start of every session after scan_repository to restore context
from prior sessions. Do NOT skip — without this, prior knowledge is lost.
`,
parameters: {
root: z.string().optional()
.describe("Project root path. Defaults to config projectRoot."),
category: z.enum(["architecture", "convention", "bug", "command", "decision", "all"]).default("all")
.describe("Filter by category, or 'all' for everything."),
},
implementation: safe_impl("load_project_context", async ({ root, category }) => {
const projectRoot = resolve(root ?? defaultRoot());
if (!projectRoot) throw new Error("No project root.");
const db = getDb(dataPath());
const project = getProjectByRoot(db, projectRoot);
if (!project) {
return json({ projectRoot, notes: [], message: "No memory found. Call scan_repository first." });
}
const notes = getMemoryNotes(db, project.id, category === "all" ? undefined : category);
return json({
projectRoot,
framework: project.framework,
language: project.language,
entrypoints: JSON.parse(project.entrypoints),
conventions: JSON.parse(project.conventions),
notes: notes.map(n => ({ id: n.id, category: n.category, content: n.content, createdAt: n.createdAt })),
totalNotes: notes.length,
});
}),
}),
tool({
name: "delete_project_memory",
description: text`
Delete a specific memory note by its ID. Returns whether the deletion succeeded.
Call when a note is outdated or incorrect. Get the note ID from load_project_context.
`,
parameters: {
noteId: z.coerce.number().int().describe("Note ID from load_project_context"),
},
implementation: safe_impl("delete_project_memory", async ({ noteId }) => {
const db = getDb(dataPath());
const deleted = deleteMemoryNote(db, noteId);
return json({ deleted, noteId });
}),
}),
tool({
name: "list_projects",
description: text`
List all projects in the memory database with their framework, language, and
last-updated timestamp. Returns a summary row per project.
Call when the user asks "what projects do you know about?" or to find the right
project root before calling load_project_context.
`,
parameters: {},
implementation: safe_impl("list_projects", async () => {
const db = getDb(dataPath());
const projects = listProjects(db);
return json({ projects, count: projects.length });
}),
}),
// -----------------------------------------------------------------------
// FILE EDITING TOOLS
// -----------------------------------------------------------------------
tool({
name: "read_file",
description: text`
Read the content of a file, optionally limited to a line range.
Returns the file content, total line count, and the resolved absolute path.
Call before editing a file to see the current state.
Do NOT read the same file twice in one step — cache the content.
`,
parameters: {
path: z.string().describe("File path — absolute or relative to project root"),
root: z.string().optional()
.describe("Project root for resolving relative paths. Defaults to config projectRoot."),
startLine: z.coerce.number().int().min(1).optional()
.describe("First line to read (1-indexed). Defaults to line 1."),
endLine: z.coerce.number().int().optional()
.describe("Last line to read. Defaults to config maxFileReadLines after startLine."),
},
implementation: safe_impl("read_file", async ({ path: filePath, root, startLine, endLine }) => {
const projectRoot = root ?? defaultRoot();
const absPath = filePath.startsWith("/") ? filePath : join(projectRoot, filePath);
const content = await readFile(absPath, "utf8");
const allLines = content.split("\n");
const total = allLines.length;
const start = (startLine ?? 1) - 1;
const end = endLine ?? Math.min(start + maxLines(), total);
const slice = allLines.slice(start, end);
return json({
path: absPath,
totalLines: total,
returnedLines: `${start + 1}–${Math.min(end, total)}`,
content: slice.map((l, i) => `${start + i + 1}: ${l}`).join("\n"),
truncated: end < total,
});
}),
}),
tool({
name: "write_file",
description: text`
Write or overwrite a file with new content. Creates parent directories if needed.
Returns the number of lines written and the resolved path.
Use for new files or complete rewrites only. For targeted edits use apply_patch.
ALWAYS call read_file first to understand the current state before overwriting.
`,
parameters: {
path: z.string().describe("File path — absolute or relative to project root"),
content: z.string().describe("Full file content to write"),
root: z.string().optional()
.describe("Project root for resolving relative paths. Defaults to config projectRoot."),
},
implementation: safe_impl("write_file", async ({ path: filePath, content, root }) => {
const { mkdir } = await import("fs/promises");
const projectRoot = root ?? defaultRoot();
const absPath = filePath.startsWith("/") ? filePath : join(projectRoot, filePath);
await mkdir(dirname(absPath), { recursive: true });
await writeFile(absPath, content, "utf8");
const lines = content.split("\n").length;
return json({ written: true, path: absPath, lines });
}),
}),
tool({
name: "apply_patch",
description: text`
Replace an exact string in a file with new content. Surgical edit — leaves all
other content untouched. Returns whether the replacement was made.
The oldContent must match exactly (including whitespace and indentation).
Call read_file first to get the exact text to replace.
For multiple changes to the same file, call apply_patch once per change in order.
`,
parameters: {
path: z.string().describe("File path — absolute or relative to project root"),
oldContent: z.string().describe("Exact string to replace (must exist verbatim in the file)"),
newContent: z.string().describe("Replacement string"),
root: z.string().optional()
.describe("Project root for resolving relative paths. Defaults to config projectRoot."),
},
implementation: safe_impl("apply_patch", async ({ path: filePath, oldContent, newContent, root }) => {
const projectRoot = root ?? defaultRoot();
const absPath = filePath.startsWith("/") ? filePath : join(projectRoot, filePath);
const original = await readFile(absPath, "utf8");
if (!original.includes(oldContent)) {
throw new Error(
`oldContent not found in ${filePath}. Call read_file to see the current content and use the exact text.`
);
}
const occurrences = original.split(oldContent).length - 1;
if (occurrences > 1) {
throw new Error(
`oldContent appears ${occurrences} times in ${filePath}. Provide more surrounding context to make it unique.`
);
}
const updated = original.replace(oldContent, newContent);
await writeFile(absPath, updated, "utf8");
return json({ patched: true, path: absPath, linesChanged: newContent.split("\n").length });
}),
}),
// -----------------------------------------------------------------------
// VERIFICATION TOOL
// -----------------------------------------------------------------------
tool({
name: "run_verification_loop",
description: text`
Run the project's build, test, and/or lint commands and return structured
pass/fail results with stdout/stderr. Returns exitCode 0 on success.
Call after every apply_patch or write_file to catch regressions immediately.
Do NOT declare a task complete without a passing verification run.
`,
parameters: {
root: z.string().optional()
.describe("Project root. Defaults to config projectRoot."),
runTests: z.boolean().default(true)
.describe("Run the test command (config testCommand)"),
runBuild: z.boolean().default(true)
.describe("Run the build command (config buildCommand)"),
runLint: z.boolean().default(false)
.describe("Run the lint command (config lintCommand). Skipped if lintCommand is blank."),
customCommand: z.string().optional()
.describe("Run this exact command instead of config commands. Overrides runTests/runBuild/runLint."),
timeoutSeconds: z.coerce.number().int().min(5).max(300).default(60)
.describe("Timeout per command in seconds"),
},
implementation: safe_impl("run_verification_loop", async ({ root, runTests, runBuild, runLint, customCommand, timeoutSeconds }) => {
const projectRoot = resolve(root ?? defaultRoot());
if (!projectRoot) throw new Error("No project root.");
const timeout = timeoutSeconds * 1000;
const results: Array<{ command: string; exitCode: number; stdout: string; stderr: string; passed: boolean }> = [];
async function runStep(cmd: string) {
const r = await runCommand(cmd, projectRoot, timeout);
results.push({ command: cmd, exitCode: r.exitCode, stdout: r.stdout, stderr: r.stderr, passed: r.exitCode === 0 });
}
if (customCommand) {
await runStep(customCommand);
} else {
if (runBuild) await runStep(buildCmd());
if (runLint && lintCmd()) await runStep(lintCmd());
if (runTests) await runStep(testCmd());
}
const allPassed = results.every(r => r.passed);
const firstFailure = results.find(r => !r.passed);
return json({
passed: allPassed,
results,
summary: allPassed
? "All checks passed."
: `FAILED: ${firstFailure?.command} exited ${firstFailure?.exitCode}. See stderr for details.`,
});
}),
}),
// -----------------------------------------------------------------------
// PLANNING TOOL
// -----------------------------------------------------------------------
tool({
name: "generate_patch_plan",
description: text`
Produce a structured implementation plan for a feature request or bugfix. Returns
a scaffold payload — follow the instructions field to generate the plan JSON.
The plan lists impacted files, ordered edit steps, and a verification command.
The LLM then executes the plan step-by-step using apply_patch / write_file /
run_verification_loop. No files are changed by this tool.
Call after scan_repository and find_impact_zone, before any edits.
Do NOT start editing without a plan on non-trivial changes.
`,
parameters: {
task: z.string().describe("Feature request or bugfix description"),
root: z.string().optional()
.describe("Project root. Defaults to config projectRoot."),
impactedFiles: z.string().optional()
.describe("JSON array of file paths known to be affected (from find_impact_zone or trace_symbol)"),
constraints: z.string().optional()
.describe("Constraints: must not break X, must follow Y convention, etc."),
},
implementation: safe_impl("generate_patch_plan", async ({ task, root, impactedFiles, constraints }) => {
const projectRoot = resolve(root ?? defaultRoot());
const db = getDb(dataPath());
const project = projectRoot ? getProjectByRoot(db, projectRoot) : null;
const memory = project ? getMemoryNotes(db, project.id) : [];
const knownFiles: string[] = impactedFiles ? JSON.parse(impactedFiles) : [];
return json({
action: "generate_patch_plan",
task,
projectRoot,
framework: project?.framework ?? "unknown",
language: project?.language ?? "unknown",
impactedFiles: knownFiles,
constraints: constraints ?? null,
projectMemory: memory.map(n => ({ category: n.category, content: n.content })),
instructions: `You are a senior software engineer. Produce an implementation plan for the task below.
Task: ${task}
Project: ${project?.name ?? basename(projectRoot ?? "unknown")} (${project?.framework ?? "unknown"}, ${project?.language ?? "unknown"})
Known impacted files: ${knownFiles.length > 0 ? knownFiles.join(", ") : "not yet identified — use trace_symbol / find_impact_zone"}
Constraints: ${constraints ?? "none specified"}
Return a JSON object ONLY:
{
"summary": "one-sentence description of the change",
"impactedFiles": ["...relative paths..."],
"steps": [
{
"stepNumber": 1,
"file": "relative/path/to/file.ts",
"action": "edit | create | delete",
"description": "what to change and why",
"readFirst": true
}
],
"verificationCommand": "the exact command to run to verify the change works",
"rollbackNote": "how to undo if verification fails"
}
Rules:
- Each step touches exactly ONE file
- Order steps so dependencies are created before dependents
- Include a run_verification_loop step as the final step
- Never include speculative changes — only what the task requires
Output ONLY valid JSON.`,
});
}),
}),
];
return tools;
};