"use strict";
/**
* @file fileTools.ts
* File system operations: read, write, list, search, move, copy, delete, etc.
*/
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
var desc = Object.getOwnPropertyDescriptor(m, k);
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
desc = { enumerable: true, get: function() { return m[k]; } };
}
Object.defineProperty(o, k2, desc);
}) : (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
o[k2] = m[k];
}));
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
Object.defineProperty(o, "default", { enumerable: true, value: v });
}) : function(o, v) {
o["default"] = v;
});
var __importStar = (this && this.__importStar) || (function () {
var ownKeys = function(o) {
ownKeys = Object.getOwnPropertyNames || function (o) {
var ar = [];
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
return ar;
};
return ownKeys(o);
};
return function (mod) {
if (mod && mod.__esModule) return mod;
var result = {};
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
__setModuleDefault(result, mod);
return result;
};
})();
Object.defineProperty(exports, "__esModule", { value: true });
exports.createFileTools = createFileTools;
const sdk_1 = require("@lmstudio/sdk");
const promises_1 = require("fs/promises");
const path_1 = require("path");
const zod_1 = require("zod");
const shared_1 = require("./shared");
const fileStateCache_1 = require("./fileStateCache");
const errorCodes_1 = require("./errorCodes");
const spillToDisk_1 = require("./spillToDisk");
function createFileTools(ctx, limits) {
const tools = [];
const fileCache = new fileStateCache_1.FileStateCache();
/** Resolve path relative to cwd but enforce workspace root as security boundary. */
const safePath = (p) => (0, shared_1.validatePath)(ctx.cwd, p, ctx.workspaceRoot);
tools.push((0, sdk_1.tool)({
name: "save_file",
description: (0, sdk_1.text) `
Save content to a specified file. For existing files, prefer replace_text_in_file or patch_file.
`,
parameters: {
file_name: zod_1.z.string(),
content: zod_1.z.string(),
},
implementation: async ({ file_name, content }) => {
if (!file_name || file_name.trim().length === 0)
return (0, errorCodes_1.toolError)(errorCodes_1.EMPTY_INPUT, "Filename cannot be empty.");
if (/[\*\?<>|"]/.test(file_name))
return (0, errorCodes_1.toolError)(errorCodes_1.INVALID_CHARS, "Filename contains invalid characters.");
const filePath = safePath(file_name);
await (0, promises_1.mkdir)((0, path_1.dirname)(filePath), { recursive: true });
await (0, promises_1.writeFile)(filePath, content, "utf-8");
fileCache.invalidate(filePath);
// Track large files for replace_text_in_file hints
if (content.length > 10000) {
if (!ctx.fullState.largeFilesSaved)
ctx.fullState.largeFilesSaved = [];
if (!ctx.fullState.largeFilesSaved.includes(filePath)) {
ctx.fullState.largeFilesSaved.push(filePath);
await ctx.saveState();
}
}
return { success: true, path: filePath };
},
}));
tools.push((0, sdk_1.tool)({
name: "replace_text_in_file",
description: (0, sdk_1.text) `
Replace a specific string in a file with a new string.
Ensure 'old_string' matches exactly (including whitespace) or the replace will fail.
`,
parameters: {
file_name: zod_1.z.string(),
old_string: zod_1.z.string().describe("The exact text to replace. Must be unique in the file."),
new_string: zod_1.z.string().describe("The text to insert in place of old_string."),
},
implementation: async ({ file_name, old_string, new_string }) => {
try {
if (!old_string || old_string.length === 0)
return (0, errorCodes_1.toolError)(errorCodes_1.EMPTY_INPUT, "old_string cannot be empty.");
const filePath = safePath(file_name);
const content = await (0, promises_1.readFile)(filePath, "utf-8");
if (!content.includes(old_string))
return (0, errorCodes_1.toolError)(errorCodes_1.TEXT_NOT_FOUND, "Could not find the exact 'old_string' in the file. Please check whitespace and indentation.");
const occurrenceCount = content.split(old_string).length - 1;
if (occurrenceCount > 1)
return (0, errorCodes_1.toolError)(errorCodes_1.MULTIPLE_MATCHES, `Found ${occurrenceCount} occurrences of 'old_string'. Provide more context to make it unique.`);
await (0, promises_1.writeFile)(filePath, content.replace(old_string, new_string), "utf-8");
fileCache.invalidate(filePath);
return { success: true, message: `Successfully replaced text in ${file_name}` };
}
catch (e) {
return (0, errorCodes_1.toolError)(errorCodes_1.IO_ERROR, `Failed to replace text: ${e instanceof Error ? e.message : String(e)}`);
}
},
}));
tools.push((0, sdk_1.tool)({
name: "patch_file",
description: (0, sdk_1.text) `
Apply multiple text replacements to a file in a single atomic operation.
Each patch has an old_string (exact match) and new_string. All patches are
validated before any writes — if any old_string is not found or not unique,
the entire operation fails and the file remains unchanged.
Use this instead of multiple replace_text_in_file calls when making
several coordinated edits to the same file.
`,
parameters: {
file_name: zod_1.z.string().describe("Path to the file to patch."),
patches: zod_1.z.array(zod_1.z.object({
old_string: zod_1.z.string().describe("Exact text to find (must be unique in the file)."),
new_string: zod_1.z.string().describe("Replacement text."),
})).min(1).describe("Array of replacements to apply in order."),
},
implementation: async ({ file_name, patches }) => {
try {
const filePath = safePath(file_name);
let content = await (0, promises_1.readFile)(filePath, "utf-8");
// Validation pass — simulate sequential application to catch cross-patch issues.
// Each patch is validated against the content AS IT WOULD LOOK after prior patches.
let simulated = content;
for (let i = 0; i < patches.length; i++) {
const p = patches[i];
if (!p.old_string || p.old_string.length === 0) {
return (0, errorCodes_1.toolError)(errorCodes_1.EMPTY_INPUT, `Patch ${i + 1}: old_string cannot be empty.`);
}
if (!simulated.includes(p.old_string)) {
return (0, errorCodes_1.toolError)(errorCodes_1.TEXT_NOT_FOUND, `Patch ${i + 1}: old_string not found in file (after prior patches). Check whitespace/indentation.`);
}
const count = simulated.split(p.old_string).length - 1;
if (count > 1) {
return (0, errorCodes_1.toolError)(errorCodes_1.MULTIPLE_MATCHES, `Patch ${i + 1}: found ${count} occurrences of old_string (after prior patches). Provide more context to make it unique.`);
}
simulated = simulated.replace(p.old_string, p.new_string);
}
// Validation passed — apply for real (simulated already has final content)
content = simulated;
await (0, promises_1.writeFile)(filePath, content, "utf-8");
fileCache.invalidate(filePath);
return { success: true, patches_applied: patches.length, file: file_name };
}
catch (e) {
return (0, errorCodes_1.toolError)(errorCodes_1.IO_ERROR, `Failed to patch file: ${e instanceof Error ? e.message : String(e)}`);
}
},
}));
tools.push((0, sdk_1.tool)({
name: "list_directory",
description: "List files and directories with metadata (type, size, modified date). Use to understand project structure.",
parameters: {
path: zod_1.z.string().optional().describe("Subdirectory path relative to CWD. Defaults to current working directory."),
},
implementation: async ({ path }) => {
const targetPath = path ? safePath(path) : ctx.cwd;
const entries = await (0, promises_1.readdir)(targetPath);
const items = await Promise.all(entries.map(async (name) => {
try {
const s = await (0, promises_1.stat)((0, path_1.join)(targetPath, name));
return { name, type: s.isDirectory() ? "dir" : "file", size: s.isDirectory() ? undefined : s.size, modified: s.mtime.toISOString().slice(0, 16) };
}
catch {
return { name, type: "unknown" };
}
}));
return { cwd: ctx.cwd, path: targetPath, count: items.length, entries: items };
},
}));
tools.push((0, sdk_1.tool)({
name: "read_file",
description: "Read the content of a file. Supports optional line range for large files (start_line/end_line).",
parameters: {
file_name: zod_1.z.string().describe("File path relative to CWD."),
start_line: zod_1.z.number().int().min(1).optional().describe("First line to read (1-based). Omit to start from beginning."),
end_line: zod_1.z.number().int().min(1).optional().describe("Last line to read (1-based, inclusive). Omit to read to end."),
},
implementation: async ({ file_name, start_line, end_line }) => {
const filePath = safePath(file_name);
const stats = await (0, promises_1.stat)(filePath);
if (stats.size > 10_000_000)
return (0, errorCodes_1.toolError)(errorCodes_1.FILE_TOO_LARGE, "File too large (>10MB). Use start_line/end_line to read a portion.");
// Dedup: if full-read requested and file hasn't changed, return stub
const isFullRead = !start_line && !end_line;
if (isFullRead) {
const cached = fileCache.check(filePath, stats.mtimeMs);
if (cached) {
return {
already_read: true,
path: filePath,
total_lines: cached.totalLines,
size: cached.size,
read_count: cached.readCount,
hint: "This file was already read in this conversation and has NOT changed since. " +
"Use the content from your previous read_file call. " +
"Do NOT re-read unchanged files — it wastes context tokens. " +
"If you need to verify specific lines, use start_line/end_line.",
};
}
}
const buffer = await (0, promises_1.readFile)(filePath);
const checkBuffer = buffer.subarray(0, Math.min(buffer.length, 1024));
if (checkBuffer.includes(0))
return (0, errorCodes_1.toolError)(errorCodes_1.FILE_IS_BINARY, "File appears to be binary and cannot be read as text.");
const fullContent = buffer.toString("utf-8");
const allLines = fullContent.split("\n");
const totalLines = allLines.length;
if (start_line || end_line) {
const from = Math.max(1, start_line ?? 1);
const to = Math.min(totalLines, end_line ?? totalLines);
fileCache.record(filePath, stats.mtimeMs, stats.size, totalLines, false);
return { content: allLines.slice(from - 1, to).join("\n"), lines: { from, to, total: totalLines } };
}
// Record in cache before returning
fileCache.record(filePath, stats.mtimeMs, stats.size, totalLines, true);
const MAX_READ = limits?.maxFileRead ?? 6_000;
if (fullContent.length > MAX_READ) {
const spill = await (0, spillToDisk_1.spillIfNeeded)(fullContent, MAX_READ, "read-file");
// Compute suggested line ranges for reading the rest
const truncatedAtLine = fullContent.substring(0, MAX_READ).split("\n").length;
const avgCharsPerLine = Math.ceil(fullContent.length / totalLines);
const linesPerChunk = Math.max(50, Math.floor(MAX_READ / avgCharsPerLine));
const suggested_ranges = [];
let lineFrom = truncatedAtLine;
while (lineFrom <= totalLines) {
suggested_ranges.push({ from: lineFrom, to: Math.min(lineFrom + linesPerChunk - 1, totalLines) });
lineFrom += linesPerChunk;
}
return {
content: spill.preview, truncated: true, total_lines: totalLines,
...(spill.spilled ? { content_full: spill.spillPath } : {}),
suggested_ranges,
hint: `File truncated at ${MAX_READ} chars (line ~${truncatedAtLine} of ${totalLines}). Use start_line/end_line with the suggested_ranges to read the rest.`,
};
}
return { content: fullContent, total_lines: totalLines };
},
}));
const SKIP_DIRS = new Set(["node_modules", ".git", "dist", "build", ".next", "__pycache__", ".venv", "venv", ".lmstudio"]);
tools.push((0, sdk_1.tool)({
name: "grep_files",
description: "Search for a text pattern across files in the project. Returns matching lines with file paths and line numbers.",
parameters: {
pattern: zod_1.z.string().describe("Text or regex pattern to search for."),
path: zod_1.z.string().optional().describe("Subdirectory to search in (relative to CWD). Defaults to entire project."),
file_glob: zod_1.z.string().optional().describe("File glob filter, e.g. '*.ts', '*.py'. Defaults to all files."),
max_results: zod_1.z.number().int().min(1).max(100).optional().describe("Maximum number of matching lines to return. Default: 30."),
},
implementation: async ({ pattern, path, file_glob, max_results }) => {
const searchDir = path ? safePath(path) : ctx.cwd;
const limit = max_results ?? 30;
const matchingLines = [];
const MAX_FILE_SIZE = 1_000_000;
// Compile regex once, validate early
let regex;
try {
regex = new RegExp(pattern, "gi");
}
catch (e) {
return (0, errorCodes_1.toolError)(errorCodes_1.PATTERN_TOO_COMPLEX, `Invalid regex pattern: ${e instanceof Error ? e.message : String(e)}`);
}
// Partial binary detection — read first 512 bytes only
const { open } = await Promise.resolve().then(() => __importStar(require("fs/promises")));
async function walkDir(dir) {
if (matchingLines.length >= limit)
return;
let entries;
try {
entries = await (0, promises_1.readdir)(dir);
}
catch {
return;
}
for (const entry of entries) {
if (matchingLines.length >= limit)
return;
const fullPath = (0, path_1.join)(dir, entry);
try {
const s = await (0, promises_1.stat)(fullPath);
if (s.isDirectory()) {
if (!SKIP_DIRS.has(entry))
await walkDir(fullPath);
}
else if (s.isFile() && s.size < MAX_FILE_SIZE) {
if (file_glob) {
if (file_glob.startsWith("*.")) {
const ext = file_glob.slice(1);
const entryExt = entry.lastIndexOf(".") >= 0 ? entry.slice(entry.lastIndexOf(".")) : "";
if (entryExt !== ext)
continue;
}
else {
const starIdx = file_glob.indexOf("*");
if (starIdx >= 0) {
const prefix = file_glob.slice(0, starIdx);
const suffix = file_glob.slice(starIdx + 1);
if (!entry.startsWith(prefix) || !entry.endsWith(suffix))
continue;
}
else {
if (entry !== file_glob)
continue;
}
}
}
// Binary detection: read only first 512 bytes instead of full file
try {
const fh = await open(fullPath, "r");
const headerBuf = Buffer.alloc(Math.min(512, s.size));
await fh.read(headerBuf, 0, headerBuf.length, 0);
await fh.close();
if (headerBuf.includes(0))
continue; // binary — skip
}
catch {
continue;
}
const content = await (0, promises_1.readFile)(fullPath, "utf-8");
const lines = content.split("\n");
for (let i = 0; i < lines.length; i++) {
regex.lastIndex = 0;
if (regex.test(lines[i])) {
matchingLines.push({ file: fullPath.replace(ctx.cwd + "/", ""), line: i + 1, text: lines[i].trim().substring(0, limits?.grepLineLen ?? 200) });
if (matchingLines.length >= limit)
return;
}
}
}
}
catch { /* skip */ }
}
}
await walkDir(searchDir);
return {
pattern, matches: matchingLines.length, results: matchingLines,
...(matchingLines.length >= limit ? { truncated: true, hint: `Showing first ${limit} matches. Use file_glob or path to narrow search.` } : {}),
};
},
}));
tools.push((0, sdk_1.tool)({
name: "get_project_context",
description: "Get a quick snapshot of the project: directory tree (2 levels), package.json summary, and key config files.",
parameters: {},
implementation: async () => {
const result = { cwd: ctx.cwd };
async function listTree(dir, depth) {
if (depth > 2)
return [];
let entries;
try {
entries = await (0, promises_1.readdir)(dir);
}
catch {
return [];
}
const lines = [];
for (const entry of entries) {
if (SKIP_DIRS.has(entry))
continue;
const full = (0, path_1.join)(dir, entry);
try {
const s = await (0, promises_1.stat)(full);
const prefix = " ".repeat(depth);
if (s.isDirectory()) {
lines.push(`${prefix}${entry}/`);
lines.push(...(await listTree(full, depth + 1)));
}
else
lines.push(`${prefix}${entry}`);
}
catch { /* skip */ }
}
return lines;
}
const tree = await listTree(ctx.cwd, 0);
const treeMax = limits?.dirTreeEntries ?? 100;
result.tree = tree.slice(0, treeMax).join("\n") + (tree.length > treeMax ? "\n... (truncated)" : "");
try {
const pkg = JSON.parse(await (0, promises_1.readFile)((0, path_1.join)(ctx.cwd, "package.json"), "utf-8"));
result.package = { name: pkg.name, version: pkg.version, scripts: pkg.scripts ? Object.keys(pkg.scripts) : [], deps: pkg.dependencies ? Object.keys(pkg.dependencies).length : 0, devDeps: pkg.devDependencies ? Object.keys(pkg.devDependencies).length : 0 };
}
catch { /* no package.json */ }
const configFiles = ["tsconfig.json", ".eslintrc.js", ".eslintrc.json", "vite.config.ts", "webpack.config.js", "pyproject.toml", "requirements.txt", "Cargo.toml", "go.mod", "Makefile", "Dockerfile", ".env.example"];
const foundConfigs = [];
for (const cf of configFiles) {
try {
await (0, promises_1.stat)((0, path_1.join)(ctx.cwd, cf));
foundConfigs.push(cf);
}
catch { /* not found */ }
}
if (foundConfigs.length > 0)
result.config_files = foundConfigs;
try {
const gitHead = await (0, promises_1.readFile)((0, path_1.join)(ctx.cwd, ".git", "HEAD"), "utf-8");
const branchMatch = gitHead.match(/ref: refs\/heads\/(.+)/);
if (branchMatch)
result.git_branch = branchMatch[1].trim();
}
catch { /* not a git repo */ }
return result;
},
}));
tools.push((0, sdk_1.tool)({
name: "make_directory",
description: "Create a new directory in the current working directory.",
parameters: { directory_name: zod_1.z.string() },
implementation: async ({ directory_name }) => {
const dirPath = safePath(directory_name);
await (0, promises_1.mkdir)(dirPath, { recursive: true });
return { success: true, path: dirPath };
},
}));
tools.push((0, sdk_1.tool)({
name: "delete_path",
description: "Delete a file or directory in the current working directory. Be careful!",
parameters: { path: zod_1.z.string() },
implementation: async ({ path }) => {
const targetPath = safePath(path);
await (0, promises_1.rm)(targetPath, { recursive: true, force: true });
fileCache.invalidate(targetPath);
return { success: true, path: targetPath };
},
}));
tools.push((0, sdk_1.tool)({
name: "delete_files_by_pattern",
description: "Delete multiple files in the current directory that match a regex pattern.",
parameters: { pattern: zod_1.z.string().describe("Regex pattern to match filenames (e.g., '^auto_gen_.*\\.txt$')") },
implementation: async ({ pattern }) => {
try {
if (pattern.length > 100)
return (0, errorCodes_1.toolError)(errorCodes_1.PATTERN_TOO_COMPLEX, "Pattern too complex (max 100 characters).");
const regex = new RegExp(pattern);
const start = Date.now();
regex.test("safe_test_string_for_redos_check_1234567890_safe_test_string_for_redos_check_1234567890");
if (Date.now() - start > 100)
return (0, errorCodes_1.toolError)(errorCodes_1.REDOS_TIMEOUT, "Pattern is too complex or slow (ReDoS protection).");
const files = await (0, promises_1.readdir)(ctx.cwd);
const deleted = [];
for (const file of files) {
if (regex.test(file)) {
await (0, promises_1.rm)((0, path_1.join)(ctx.cwd, file), { force: true });
deleted.push(file);
}
}
return { deleted_count: deleted.length, deleted_files: deleted };
}
catch (e) {
return (0, errorCodes_1.toolError)(errorCodes_1.IO_ERROR, `Failed to delete files: ${e instanceof Error ? e.message : String(e)}`);
}
},
}));
tools.push((0, sdk_1.tool)({
name: "move_file",
description: "Move or rename a file or directory.",
parameters: { source: zod_1.z.string(), destination: zod_1.z.string() },
implementation: async ({ source, destination }) => {
const sourcePath = safePath(source);
const destPath = safePath(destination);
await (0, promises_1.rename)(sourcePath, destPath);
fileCache.invalidate(sourcePath);
fileCache.invalidate(destPath);
return { success: true, from: sourcePath, to: destPath };
},
}));
tools.push((0, sdk_1.tool)({
name: "copy_file",
description: "Copy a file to a new location.",
parameters: { source: zod_1.z.string(), destination: zod_1.z.string() },
implementation: async ({ source, destination }) => {
const sourcePath = safePath(source);
const destPath = safePath(destination);
await (0, promises_1.copyFile)(sourcePath, destPath);
fileCache.invalidate(destPath);
return { success: true, from: sourcePath, to: destPath };
},
}));
tools.push((0, sdk_1.tool)({
name: "find_files",
description: "Find files recursively in the current directory matching a glob pattern (e.g., '*.ts', 'src/**/*.json', 'README*').",
parameters: {
pattern: zod_1.z.string().describe("Glob pattern to match filenames (e.g., '*.ts', '**/*.json', 'config.*'). Supports *, ?, and ** for recursive matching."),
max_depth: zod_1.z.number().optional().describe("Maximum depth to search (default: 5)"),
},
implementation: async ({ pattern, max_depth }) => {
const depthLimit = max_depth ?? 5;
const foundFiles = [];
const hasPathSep = pattern.includes("/");
const regexStr = pattern
.replace(/[.+^${}()|[\]\\]/g, '\\$&')
.replace(/\*\*/g, '\0')
.replace(/\*/g, '[^/]*')
.replace(/\?/g, '[^/]')
.replace(/\0/g, '.*');
const globRegex = new RegExp(`^${regexStr}$`, 'i');
async function scan(dir, currentDepth) {
if (currentDepth > depthLimit)
return;
try {
const entries = await (0, promises_1.readdir)(dir, { withFileTypes: true });
for (const entry of entries) {
const fullPath = (0, path_1.join)(dir, entry.name);
if (entry.isDirectory()) {
if (!SKIP_DIRS.has(entry.name))
await scan(fullPath, currentDepth + 1);
}
else if (entry.isFile()) {
const relativePath = fullPath.replace(ctx.cwd + "/", "");
const matchTarget = hasPathSep ? relativePath : entry.name;
if (globRegex.test(matchTarget))
foundFiles.push(fullPath);
}
}
}
catch { /* ignore access errors */ }
}
await scan(ctx.cwd, 0);
const filesMax = limits?.findFilesMax ?? 100;
return { found_files: foundFiles.slice(0, filesMax), count: foundFiles.length };
},
}));
tools.push((0, sdk_1.tool)({
name: "get_file_metadata",
description: "Get metadata (size, dates) for a specific file.",
parameters: { path: zod_1.z.string() },
implementation: async ({ path }) => {
try {
const targetPath = safePath(path);
const stats = await (0, promises_1.stat)(targetPath);
return { path: targetPath, size: stats.size, created: stats.birthtime, modified: stats.mtime, is_directory: stats.isDirectory(), is_file: stats.isFile() };
}
catch (error) {
return (0, errorCodes_1.toolError)(errorCodes_1.IO_ERROR, `Failed to get metadata: ${error instanceof Error ? error.message : String(error)}`);
}
},
}));
tools.push((0, sdk_1.tool)({
name: "audit_html_assets",
description: (0, sdk_1.text) `
Analyze an HTML file and report all referenced assets (images, CSS, JS, fonts, videos, audio).
Detects: duplicate references, missing files, and unused files in the directory.
Use this BEFORE making changes to avoid repeating assets.
`,
parameters: {
file_path: zod_1.z.string().describe("Path to the HTML file to audit"),
},
implementation: async ({ file_path }) => {
const fullPath = safePath(file_path);
const html = await (0, promises_1.readFile)(fullPath, "utf-8");
const baseDir = (0, path_1.dirname)(fullPath);
// Extract all asset references from HTML
const assetPatterns = [
/(?:src|href|poster|data-src|data-bg|data-image|background)\s*=\s*["']([^"']+?)["']/gi,
/url\(\s*["']?([^"')]+?)["']?\s*\)/gi,
];
const referencedAssets = [];
for (const pattern of assetPatterns) {
let match;
while ((match = pattern.exec(html)) !== null) {
const ref = match[1].trim();
// Skip external URLs, data URIs, anchors, protocols
if (/^(https?:|data:|mailto:|tel:|#|javascript:)/i.test(ref))
continue;
referencedAssets.push(ref);
}
}
// Count occurrences
const refCounts = new Map();
for (const ref of referencedAssets) {
refCounts.set(ref, (refCounts.get(ref) || 0) + 1);
}
// Check which files exist
const missing = [];
const found = [];
const duplicates = [];
for (const [ref, count] of refCounts) {
const assetPath = (0, path_1.resolve)(baseDir, ref);
try {
await (0, promises_1.stat)(assetPath);
found.push(ref);
}
catch {
missing.push(ref);
}
if (count > 1) {
duplicates.push({ file: ref, count });
}
}
// Find unused files in directory (images/media only)
const mediaExts = new Set([".jpg", ".jpeg", ".png", ".gif", ".webp", ".svg", ".mp4", ".webm", ".mov", ".mp3", ".wav", ".ogg", ".avif", ".bmp", ".tiff"]);
const unused = [];
const scanDir = async (dir, rel) => {
try {
const entries = await (0, promises_1.readdir)(dir, { withFileTypes: true });
for (const entry of entries) {
if (entry.isDirectory()) {
await scanDir((0, path_1.join)(dir, entry.name), rel ? `${rel}/${entry.name}` : entry.name);
}
else {
const ext = entry.name.toLowerCase().replace(/.*(\.[^.]+)$/, "$1");
if (mediaExts.has(ext)) {
const relPath = rel ? `${rel}/${entry.name}` : entry.name;
// Check if referenced (try both with and without ./ prefix)
if (!refCounts.has(relPath) && !refCounts.has(`./${relPath}`)) {
unused.push(relPath);
}
}
}
}
}
catch { /* skip unreadable dirs */ }
};
await scanDir(baseDir, "");
return {
total_references: referencedAssets.length,
unique_assets: refCounts.size,
duplicates: duplicates.length > 0 ? duplicates.sort((a, b) => b.count - a.count) : "none",
missing_files: missing.length > 0 ? missing : "none",
unused_media_files: unused.length > 0 ? unused : "none",
all_referenced: [...refCounts.keys()].sort(),
};
},
}));
tools.push((0, sdk_1.tool)({
name: "plan_image_layout",
description: (0, sdk_1.text) `
Deterministically assign images from a directory to a given number of sections/slides.
Guarantees NO image is repeated. Returns a JSON mapping of section index to assigned images.
Use this BEFORE generating HTML to plan which images go where.
The model should follow this plan exactly when creating the HTML.
`,
parameters: {
image_directory: zod_1.z.string().describe("Path to the directory containing images (relative to CWD or absolute)."),
section_count: zod_1.z.number().int().min(1).describe("Number of sections/slides to distribute images across."),
images_per_section: zod_1.z.number().int().min(1).optional().describe("Max images per section (default: 1). Extra images go to gallery."),
shuffle: zod_1.z.boolean().optional().describe("Randomize image order before assigning (default: false — keeps alphabetical)."),
},
implementation: async ({ image_directory, section_count, images_per_section, shuffle }) => {
const imgDir = (0, path_1.resolve)(ctx.cwd, image_directory);
const maxPerSection = images_per_section ?? 1;
// Scan for image files
const mediaExts = new Set([".jpg", ".jpeg", ".png", ".gif", ".webp", ".svg", ".avif", ".bmp", ".tiff"]);
const allImages = [];
const scanDir = async (dir, rel) => {
try {
const entries = await (0, promises_1.readdir)(dir, { withFileTypes: true });
for (const entry of entries) {
if (entry.isDirectory()) {
await scanDir((0, path_1.join)(dir, entry.name), rel ? `${rel}/${entry.name}` : entry.name);
}
else {
const ext = entry.name.toLowerCase().replace(/.*(\.[^.]+)$/, "$1");
if (mediaExts.has(ext)) {
allImages.push(rel ? `${rel}/${entry.name}` : entry.name);
}
}
}
}
catch { /* skip unreadable dirs */ }
};
await scanDir(imgDir, "");
if (allImages.length === 0) {
return (0, errorCodes_1.toolError)(errorCodes_1.NO_FILES_FOUND, "No image files found in the specified directory.");
}
// Optionally shuffle
const images = [...allImages];
if (shuffle) {
for (let i = images.length - 1; i > 0; i--) {
const j = Math.floor(Math.random() * (i + 1));
[images[i], images[j]] = [images[j], images[i]];
}
}
else {
images.sort();
}
// Distribute images across sections — round-robin, no repeats
const layout = {};
for (let i = 0; i < section_count; i++) {
layout[i + 1] = [];
}
let imgIndex = 0;
for (let round = 0; round < maxPerSection; round++) {
for (let section = 1; section <= section_count; section++) {
if (imgIndex >= images.length)
break;
layout[section].push(images[imgIndex]);
imgIndex++;
}
if (imgIndex >= images.length)
break;
}
// Remaining unassigned images
const unassigned = imgIndex < images.length ? images.slice(imgIndex) : [];
return {
total_images: images.length,
sections: section_count,
images_per_section: maxPerSection,
layout,
unassigned_images: unassigned.length > 0 ? unassigned : "none",
instruction: "Follow this layout exactly when generating HTML. Do NOT reuse any image across sections.",
};
},
}));
tools.push((0, sdk_1.tool)({
name: "diff_files",
description: (0, sdk_1.text) `
Compare two files and return a unified diff showing the differences.
Useful for comparing any two files, not just git-tracked changes.
`,
parameters: {
file_a: zod_1.z.string().describe("Path to the first file"),
file_b: zod_1.z.string().describe("Path to the second file"),
},
implementation: async ({ file_a, file_b }) => {
const pathA = safePath(file_a);
const pathB = safePath(file_b);
const [contentA, contentB] = await Promise.all([
(0, promises_1.readFile)(pathA, "utf-8"),
(0, promises_1.readFile)(pathB, "utf-8"),
]);
const linesA = contentA.split("\n");
const linesB = contentB.split("\n");
// Simple line-by-line diff
const diff = [];
const maxLen = Math.max(linesA.length, linesB.length);
let changes = 0;
for (let i = 0; i < maxLen; i++) {
const a = linesA[i], b = linesB[i];
if (a === undefined) {
diff.push(`+${i + 1}: ${b}`);
changes++;
}
else if (b === undefined) {
diff.push(`-${i + 1}: ${a}`);
changes++;
}
else if (a !== b) {
diff.push(`-${i + 1}: ${a}`);
diff.push(`+${i + 1}: ${b}`);
changes++;
}
}
const MAX_DIFF = limits?.maxDiff ?? 8_000;
const diffStr = diff.join("\n");
return {
file_a, file_b,
lines_a: linesA.length,
lines_b: linesB.length,
changes,
identical: changes === 0,
diff: changes === 0 ? "Files are identical." :
diffStr.substring(0, MAX_DIFF) + (diffStr.length > MAX_DIFF ? `\n... (truncated, ${diffStr.length} chars total)` : ""),
};
},
}));
tools.push((0, sdk_1.tool)({
name: "change_directory",
description: (0, sdk_1.text) `
Change the current working directory.
Returns the new current working directory.
`,
parameters: { directory: zod_1.z.string() },
implementation: async ({ directory }) => {
const newPath = (0, path_1.resolve)(ctx.cwd, directory);
// Security: ensure new directory stays within workspace root
(0, shared_1.validatePath)(ctx.cwd, directory, ctx.workspaceRoot);
const stats = await (0, promises_1.stat)(newPath);
if (!stats.isDirectory())
throw new Error(`Path is not a directory: ${newPath}`);
const previousDirectory = ctx.cwd;
ctx.cwd = newPath;
ctx.fullState.currentWorkingDirectory = newPath;
await ctx.saveState();
return { previous_directory: previousDirectory, current_directory: ctx.cwd };
},
}));
return tools;
}
//# sourceMappingURL=fileTools.js.map