Forked from brdcastro/maestro
"use strict";
/**
* @file secondaryAgent.ts
* Secondary agent delegation tool: consult_secondary_agent.
* Manual REST-level agent loop with JSON tool call parsing.
*/
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
var desc = Object.getOwnPropertyDescriptor(m, k);
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
desc = { enumerable: true, get: function() { return m[k]; } };
}
Object.defineProperty(o, k2, desc);
}) : (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
o[k2] = m[k];
}));
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
Object.defineProperty(o, "default", { enumerable: true, value: v });
}) : function(o, v) {
o["default"] = v;
});
var __importStar = (this && this.__importStar) || (function () {
var ownKeys = function(o) {
ownKeys = Object.getOwnPropertyNames || function (o) {
var ar = [];
for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
return ar;
};
return ownKeys(o);
};
return function (mod) {
if (mod && mod.__esModule) return mod;
var result = {};
if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
__setModuleDefault(result, mod);
return result;
};
})();
Object.defineProperty(exports, "__esModule", { value: true });
exports.getAgentDepth = getAgentDepth;
exports.resetAgentDepth = resetAgentDepth;
exports.createSecondaryAgentTool = createSecondaryAgentTool;
const sdk_1 = require("@lmstudio/sdk");
const promises_1 = require("fs/promises");
const path_1 = require("path");
const os_1 = require("os");
const crypto_1 = require("crypto");
const child_process_1 = require("child_process");
const util_1 = require("util");
const zod_1 = require("zod");
const shared_1 = require("./shared");
const webSearch_1 = require("./webSearch");
const toolsProvider_1 = require("../memory/toolsProvider");
const projectMemory_1 = require("../memory/projectMemory");
const errorCodes_1 = require("./errorCodes");
// ── Agent Depth Tracking ──────────────────────────────────────────
// Prevents unbounded recursion and gives visibility into nesting depth.
// Max depth 2: primary agent → secondary agent → (secondary's tool loop)
// The debug loop counts as depth+1 since it's another agent invocation.
const MAX_AGENT_DEPTH = 2;
let _currentDepth = 0;
/** Get current agent nesting depth (0 = top-level, 1 = secondary, 2 = debug) */
function getAgentDepth() {
return _currentDepth;
}
/** Reset depth counter (call on new conversation) */
function resetAgentDepth() {
_currentDepth = 0;
}
// --- Helpers ---
let _cachedSecondaryModel = null;
let _cachedEndpoint = null;
let _cachedAt = 0;
const MODEL_CACHE_TTL_MS = 60_000; // 60s — allows detecting model swaps
async function detectSecondaryModel(endpoint) {
const now = Date.now();
if (_cachedSecondaryModel && _cachedEndpoint === endpoint && now - _cachedAt < MODEL_CACHE_TTL_MS) {
return _cachedSecondaryModel;
}
try {
const res = await fetch(`${endpoint}/models`, { signal: AbortSignal.timeout(5_000) });
if (!res.ok)
return "local-model";
const data = await res.json();
const models = data?.data ?? [];
_cachedSecondaryModel = models.length >= 2 ? models[1].id : (models[0]?.id ?? "local-model");
_cachedEndpoint = endpoint;
_cachedAt = now;
return _cachedSecondaryModel;
}
catch {
return "local-model";
}
}
/**
* Normalize common argument aliases the model improvises. Different formats
* use different keys for the same concept; this canonicalizes them so the
* dispatch table sees what it expects.
*/
function normalizeArgs(toolName, args) {
if (args == null)
return args;
if (toolName === "save_file") {
if (Array.isArray(args))
return { files: args };
if (typeof args !== "object")
return args;
if (args.path && !args.file_name)
args.file_name = args.path;
if (args.name && !args.file_name)
args.file_name = args.name;
if (args.data && !args.content)
args.content = args.data;
return args;
}
if (typeof args !== "object")
return args;
if (toolName === "read_file" || toolName === "replace_text_in_file") {
if (args.path && !args.file_name)
args.file_name = args.path;
}
if (toolName === "run_python" && args.code && !args.python)
args.python = args.code;
if (toolName === "run_javascript" && args.code && !args.javascript)
args.javascript = args.code;
return args;
}
/** Parse a tool call from model output (supports 4 formats). */
function parseToolCall(content) {
const trimmed = content.trim();
// Format 4 (Qwen3 favorite): markdown-link `[tool_name]({json args})` — try first
// because it's also valid JSON underneath, and the standalone JSON match below
// would partial-match the args without the tool name attached.
const mdLink = trimmed.match(/\[([a-zA-Z_][a-zA-Z0-9_]*)\]\s*\(\s*(\{[\s\S]*?\})\s*\)/);
if (mdLink) {
try {
const args = JSON.parse(mdLink[2]);
return { tool: mdLink[1], args: normalizeArgs(mdLink[1], args) };
}
catch { /* fall through to JSON-only formats */ }
}
const jsonMatch = trimmed.match(/\{[\s\S]*\}/);
if (!jsonMatch)
return null;
try {
const parsed = JSON.parse(jsonMatch[0]);
if (parsed.tool && parsed.args)
return { tool: parsed.tool, args: normalizeArgs(parsed.tool, parsed.args) };
if (parsed.name && parsed.arguments) {
return { tool: parsed.name, args: normalizeArgs(parsed.name, parsed.arguments) };
}
const toolNameMatch = trimmed.match(/to=([a-zA-Z0-9_.]+)/);
if (toolNameMatch) {
const toolName = toolNameMatch[1].replace(/^functions\./, "");
return { tool: toolName, args: normalizeArgs(toolName, parsed) };
}
}
catch { /* JSON parsing failed */ }
return null;
}
const REFUSAL_KEYWORDS = [
"i cannot browse", "i don't have access", "i can't access",
"unable to browse", "real-time news", "no internet access",
"as an ai", "i do not have the ability", "cannot access the internet",
];
function isRefusal(content) {
return REFUSAL_KEYWORDS.some(kw => content.toLowerCase().includes(kw));
}
const _execFileAsync = (0, util_1.promisify)(child_process_1.execFile);
// Files smaller than this skip the resize step entirely — they're already
// cheap enough to base64-encode straight. 200KB ≈ ~270KB base64 ≈ ~80K tokens
// once you account for tokenizer inefficiency on random strings. Acceptable
// per-image cost. Above this threshold, resize to 768px @ q45 (matching
// take_screenshot defaults).
const IMAGE_RESIZE_THRESHOLD_BYTES = 200_000;
const IMAGE_RESIZE_MAX_DIM = 768;
/**
* If the source image is over the threshold, downsize it via ffmpeg (with
* sips fallback) into a temp JPEG. Returns either the original path (if
* already small enough) or the temp path with a cleanup callback.
*
* The model passes raw image paths to consult_secondary_agent({images: [...]}).
* Without this, a 4MB JPEG path turns into ~5.5MB base64 in the request body
* and replays in every later sub-agent turn — the same trap take_screenshot
* had before rev 65.
*/
async function maybeResizeImage(srcPath) {
let size = 0;
try {
size = (await (0, promises_1.stat)(srcPath)).size;
}
catch {
return { path: srcPath, cleanup: async () => { } };
}
if (size <= IMAGE_RESIZE_THRESHOLD_BYTES) {
return { path: srcPath, cleanup: async () => { } };
}
const tmpDir = (0, path_1.join)((0, os_1.tmpdir)(), `maestro-imgres-${(0, crypto_1.randomBytes)(6).toString("hex")}`);
await (0, promises_1.mkdir)(tmpDir, { recursive: true });
const outPath = (0, path_1.join)(tmpDir, "resized.jpg");
const cleanup = async () => { await (0, promises_1.rm)(tmpDir, { recursive: true, force: true }).catch(() => { }); };
try {
// Try ffmpeg first (bundled). Same flags as take_screenshot resize.
const { getFfmpegPath } = await Promise.resolve().then(() => __importStar(require("../media/ffmpegPath")));
const ffmpeg = await getFfmpegPath();
await _execFileAsync(ffmpeg, [
"-i", srcPath,
"-vf", `scale='min(${IMAGE_RESIZE_MAX_DIM},iw)':'min(${IMAGE_RESIZE_MAX_DIM},ih)':force_original_aspect_ratio=decrease`,
"-q:v", "10", "-y", outPath,
], { timeout: 15_000 });
return { path: outPath, cleanup };
}
catch {
// Fallback to macOS sips. `--out` requires the parent dir to exist.
try {
await _execFileAsync("sips", [
"--resampleHeightWidthMax", String(IMAGE_RESIZE_MAX_DIM),
"--setProperty", "format", "jpeg",
"--setProperty", "formatOptions", "45",
srcPath, "--out", outPath,
], { timeout: 15_000 });
return { path: outPath, cleanup };
}
catch {
// Both resize paths failed — fall through to using the original file.
// Caller still gets the (large) data URI, which is better than nothing.
await cleanup();
return { path: srcPath, cleanup: async () => { } };
}
}
}
/**
* Resolve an image input (path or data URI) into a data URI suitable for
* the chat-completions multimodal `image_url` part.
* Vision-capable models (Gemma 4, Qwen3 VL, etc.) will see the image bytes;
* text-only models will silently ignore the image part and process the text.
*
* Large source files are auto-resized to 768px @ JPEG q45 before encoding,
* to keep the multimodal request body manageable and prevent context bloat
* across sub-agent turns.
*/
async function loadImageAsDataUri(input, cwd, workspaceRoot) {
if (input.startsWith("data:"))
return input;
try {
const fpath = (0, shared_1.validatePath)(cwd, input, workspaceRoot);
const { path: encodePath, cleanup } = await maybeResizeImage(fpath);
try {
const buf = await (0, promises_1.readFile)(encodePath);
// After resize the output is always JPEG. For files under threshold
// (no resize), use the source extension for the correct mime type.
const wasResized = encodePath !== fpath;
const ext = (wasResized ? "jpg" : fpath.toLowerCase().split(".").pop()) || "";
const mime = ext === "png" ? "image/png" :
ext === "webp" ? "image/webp" :
ext === "gif" ? "image/gif" :
ext === "bmp" ? "image/bmp" :
"image/jpeg"; // jpg, jpeg, resized, or unknown default
return `data:${mime};base64,${buf.toString("base64")}`;
}
finally {
await cleanup();
}
}
catch {
return null;
}
}
async function dispatchTool(tc, dctx, filesModified) {
// --- File System ---
if (dctx.allowFileSystem) {
if (tc.tool === "read_file" && tc.args?.file_name) {
const fpath = (0, shared_1.validatePath)(dctx.cwd, tc.args.file_name, dctx.workspaceRoot);
const { stat } = await Promise.resolve().then(() => __importStar(require("fs/promises")));
const s = await stat(fpath);
if (s.size > 10_000_000)
return "Error: File too large (>10MB). Use line ranges or a different approach.";
const buf = await (0, promises_1.readFile)(fpath);
if (buf.subarray(0, 512).includes(0))
return "Error: Binary file cannot be read as text.";
const text = buf.toString("utf-8");
const max = dctx.limits?.maxFileRead ?? 6_000;
return text.length > max ? text.substring(0, max) + `\n... (truncated at ${max} chars)` : text;
}
if (tc.tool === "list_directory") {
return JSON.stringify(await (0, promises_1.readdir)(dctx.cwd));
}
if (tc.tool === "save_file") {
if (Array.isArray(tc.args?.files)) {
const saved = [];
for (const f of tc.args.files) {
const fName = f.file_name || f.name || f.path;
const fContent = f.content || f.data;
if (fName && fContent) {
try {
const fpath = (0, shared_1.validatePath)(dctx.cwd, fName, dctx.workspaceRoot);
await (0, promises_1.mkdir)((0, path_1.dirname)(fpath), { recursive: true });
await (0, promises_1.writeFile)(fpath, fContent, "utf-8");
filesModified.push(fName);
saved.push(fName);
}
catch { /* continue */ }
}
}
return saved.length > 0 ? `Success: Saved ${saved.length} files: ${saved.join(", ")}` : "Error: No valid files in batch.";
}
const fileName = tc.args?.file_name || tc.args?.name || tc.args?.path;
const content = tc.args?.content || tc.args?.data;
if (fileName && content) {
const fpath = (0, shared_1.validatePath)(dctx.cwd, fileName, dctx.workspaceRoot);
await (0, promises_1.mkdir)((0, path_1.dirname)(fpath), { recursive: true });
await (0, promises_1.writeFile)(fpath, content, "utf-8");
filesModified.push(fileName);
return `Success: File saved to ${fpath}`;
}
return "Error: Missing 'file_name' or 'content'.";
}
if (tc.tool === "replace_text_in_file" && tc.args?.file_name && tc.args?.old_string && tc.args?.new_string) {
const fpath = (0, shared_1.validatePath)(dctx.cwd, tc.args.file_name, dctx.workspaceRoot);
const content = await (0, promises_1.readFile)(fpath, "utf-8");
if (!content.includes(tc.args.old_string))
return "Error: 'old_string' not found exactly.";
const count = content.split(tc.args.old_string).length - 1;
if (count > 1)
return `Error: Found ${count} occurrences. Be more specific.`;
await (0, promises_1.writeFile)(fpath, content.replace(tc.args.old_string, tc.args.new_string), "utf-8");
filesModified.push(tc.args.file_name);
return "Success: Text replaced.";
}
if (tc.tool === "delete_files_by_pattern" && tc.args?.pattern) {
if (tc.args.pattern.length > 100)
throw new Error("Pattern too complex");
const regex = new RegExp(tc.args.pattern);
const start = Date.now();
regex.test("safe_test_string_for_redos_check_1234567890_safe_test_string_for_redos_check_1234567890");
if (Date.now() - start > 100)
throw new Error("Pattern too slow");
const files = await (0, promises_1.readdir)(dctx.cwd);
const deleted = [];
for (const file of files) {
if (regex.test(file)) {
await (0, promises_1.rm)((0, path_1.join)(dctx.cwd, file), { force: true });
deleted.push(file);
}
}
return `Deleted ${deleted.length} files: ${deleted.join(", ")}`;
}
}
// --- Web ---
if (dctx.allowWeb) {
if (tc.tool === "wikipedia_search" && tc.args?.query) {
try {
const res = await fetch(`https://en.wikipedia.org/api/rest_v1/page/summary/${encodeURIComponent(tc.args.query)}`);
if (res.ok) {
const d = await res.json();
return JSON.stringify({ title: d.title, extract: d.extract });
}
return "Wikipedia: no results found.";
}
catch {
return "Wikipedia search failed.";
}
}
if (tc.tool === "web_search" && tc.args?.query) {
// SearXNG → DDG Lite fallback (shared implementation)
const sxUrl = dctx.pluginConfig.get("searxngUrl") || "";
if (sxUrl) {
const results = await (0, webSearch_1.searchSearXNG)(sxUrl, tc.args.query, 3);
if (results)
return JSON.stringify(results);
}
const results = await (0, webSearch_1.searchDDGLite)(tc.args.query, 3);
return JSON.stringify(results || []);
}
if (tc.tool === "fetch_web_content" && tc.args?.url) {
let html = (await (await fetch(tc.args.url)).text());
// Strip scripts, styles, nav, footer, and HTML tags — keep only readable text
html = html
.replace(/<script[\s\S]*?<\/script>/gi, "")
.replace(/<style[\s\S]*?<\/style>/gi, "")
.replace(/<nav[\s\S]*?<\/nav>/gi, "")
.replace(/<footer[\s\S]*?<\/footer>/gi, "")
.replace(/<header[\s\S]*?<\/header>/gi, "")
.replace(/<[^>]+>/g, " ")
.replace(/&[a-z]+;/gi, " ")
.replace(/\s+/g, " ")
.trim();
return html.substring(0, dctx.limits?.maxSecondaryWeb ?? 5_000);
}
}
// --- Code ---
if (dctx.allowCode) {
if (tc.tool === "run_python" && tc.args?.python) {
const res = await dctx.originalRunPython({ python: tc.args.python });
return res.stderr ? `Error: ${res.stderr}` : res.stdout;
}
if (tc.tool === "run_javascript" && tc.args?.javascript) {
const res = await dctx.originalRunJavascript({ javascript: tc.args.javascript });
return res.stderr ? `Error: ${res.stderr}` : res.stdout;
}
}
// --- Memory ---
if (tc.tool === "remember" && tc.args?.content) {
try {
const storagePath = "";
const memCfg = { activeProject: dctx.pluginConfig.get("activeProject") || "" };
const { db: memDb, engine: memEngine } = await (0, toolsProvider_1.getSharedInstances)(storagePath);
const target = (0, projectMemory_1.resolveProjectMemoryTarget)(memCfg, { scope: tc.args.scope, project: tc.args.project });
const tags = target.scope === "project" && target.project
? (0, projectMemory_1.buildProjectMemoryTags)(tc.args.tags || [], target.project)
: (tc.args.tags || []);
const id = memDb.store(tc.args.content, tc.args.category || "general", tags, 1.0, "sub-agent", null, target.scope, target.project);
memEngine.indexMemory(id, tc.args.content, tags, tc.args.category || "general");
return `Memory stored (id: ${id}, scope: ${target.scope}${target.project ? `, project: ${target.project}` : ""})`;
}
catch (err) {
return `Memory store failed: ${err.message}`;
}
}
if (tc.tool === "recall" && tc.args?.query) {
try {
const storagePath = "";
const { engine: memEngine } = await (0, toolsProvider_1.getSharedInstances)(storagePath);
const result = await memEngine.retrieve(tc.args.query, tc.args.limit || 5, 30);
return JSON.stringify(result.memories.map(m => ({
content: m.content, category: m.category, tags: m.tags,
relevance: "compositeScore" in m ? Math.round(m.compositeScore * 100) : 50,
})));
}
catch (err) {
return `Memory recall failed: ${err.message}`;
}
}
return "Error: Tool not found/allowed.";
}
// --- Auto-save code blocks from response ---
async function autoSaveCodeBlocks(finalContent, cwd, workspaceRoot, filesModified) {
const codeBlockRegex = /```\s*(\w+)?\s*([\s\S]*?)```/g;
const matches = Array.from(finalContent.matchAll(codeBlockRegex));
const processedFiles = new Set();
for (let i = matches.length - 1; i >= 0; i--) {
const match = matches[i];
const fullBlock = match[0], lang = (match[1] || "txt").toLowerCase(), code = match[2];
const index = match.index || 0;
let handledAsBatch = false;
// Smart JSON Unpacking
if (lang === "json") {
try {
const parsed = JSON.parse(code);
if (Array.isArray(parsed)) {
let extractedCount = 0;
for (const item of parsed) {
const fName = item.path || item.file_name || item.name;
const fContent = item.content || item.data || item.code;
if (fName && typeof fName === "string" && fContent && typeof fContent === "string") {
const fpath = (0, shared_1.validatePath)(cwd, fName, workspaceRoot);
await (0, promises_1.mkdir)((0, path_1.dirname)(fpath), { recursive: true });
await (0, promises_1.writeFile)(fpath, fContent, "utf-8");
filesModified.push(fName);
processedFiles.add(fName);
extractedCount++;
}
}
if (extractedCount > 0) {
handledAsBatch = true;
finalContent = finalContent.slice(0, index) + `\n[System: Extracted ${extractedCount} files from JSON block.]\n` + finalContent.slice(index + fullBlock.length);
}
}
}
catch { /* not JSON batch */ }
}
if (!handledAsBatch && code.trim().length > 50) {
const lookback = finalContent.substring(Math.max(0, index - 500), index);
const EXT_PAT = /(?:tsx|ts|jsx|js|html|css|json|md|py|sh|java|rs|go|sql|yaml|yml|c|cpp|h|hpp|txt)/;
const nameMatch = lookback.match(new RegExp(`(?:\`|\\*\\*|###|filename:|file:)[\\s\\S]*?([\\w\\-\\/\\\\.]+\\.(?:${EXT_PAT.source}))`, 'i'));
let fileName = nameMatch?.[1]?.trim() || "";
if (!fileName) {
const firstLine = code.split('\n')[0].trim();
const commentMatch = firstLine.match(new RegExp(`^(?:\\/\\/|#|<!--|;)\\s*(?:filename:|file:)?\\s*([\\w\\-\\/\\\\.]+\\.(?:${EXT_PAT.source}))`, 'i'));
if (commentMatch)
fileName = commentMatch[1].trim();
}
const isShell = ["bash", "sh", "cmd", "powershell", "console", "zsh", "terminal"].includes(lang);
if ((isShell && !fileName) || !fileName || processedFiles.has(fileName))
continue;
try {
const fpath = (0, shared_1.validatePath)(cwd, fileName, workspaceRoot);
await (0, promises_1.mkdir)((0, path_1.dirname)(fpath), { recursive: true });
await (0, promises_1.writeFile)(fpath, code, "utf-8");
filesModified.push(fileName);
processedFiles.add(fileName);
finalContent = finalContent.slice(0, index) + `\n[System: File '${fileName}' created successfully.]\n` + finalContent.slice(index + fullBlock.length);
}
catch { /* skip */ }
}
}
return finalContent;
}
function createSecondaryAgentTool(ctx, config) {
const { pluginConfig } = config;
const permLevel = pluginConfig.get("subAgentPermissions") || "standard";
const _saWeb = permLevel === "standard" || permLevel === "full";
const _saCode = permLevel === "full";
const _saCaps = ["memory (remember/recall)", "summarization", "file reading"];
if (_saWeb)
_saCaps.push("web search");
if (_saCode)
_saCaps.push("code execution");
const _saNo = [];
if (!_saCode)
_saNo.push("coding", "file creation");
if (!_saWeb)
_saNo.push("web search");
const desc = `Delegate an auxiliary task to a secondary (lighter) model. Capabilities: ${_saCaps.join(", ")}.`
+ (_saNo.length > 0 ? ` Do NOT delegate ${_saNo.join(" or ")} — handle those yourself.` : "")
+ ` Pass image paths/data URIs via 'images' to delegate visual analysis (only works if the loaded model has vision — e.g. Gemma 4, Qwen3-VL).`;
return (0, sdk_1.tool)({
name: "consult_secondary_agent",
description: desc,
parameters: {
task: zod_1.z.string().describe("The task to delegate."),
agent_role: zod_1.z.string().optional().describe("Key from 'Sub-Agent Profiles' config. Default: 'general'."),
context: zod_1.z.string().optional().describe("Additional context or data for the agent."),
allow_tools: zod_1.z.boolean().optional().describe("If true, the secondary agent can use its enabled tools. Default: false."),
images: zod_1.z.array(zod_1.z.string()).optional().describe("Optional image inputs — array of file paths (absolute or relative to cwd) or data URIs. Forwarded to the secondary model as multimodal content. Vision-capable models (Gemma 4, Qwen3-VL) will analyze them; text-only models will ignore the image parts. Each image adds significant tokens — keep to 1-3 per call."),
},
implementation: (0, shared_1.createSafeToolImplementation)(async ({ task, agent_role = "general", context = "", allow_tools = false, images }) => {
const endpoint = "http://localhost:1234/v1";
const modelId = await detectSecondaryModel(endpoint);
const subAgentProfilesStr = '{"summarizer": "You are a summarization expert. Summarize the content concisely.", "coder": "You are a software engineer. Write efficient and safe code."}';
const debugMode = pluginConfig.get("enableAutoDebug");
const autoSave = true;
const showFullCode = false;
const permLevel = pluginConfig.get("subAgentPermissions") || "standard";
const allowFileSystem = true; // always allow reading
const allowWeb = permLevel === "standard" || permLevel === "full";
const allowCode = permLevel === "full";
const runAgentLoop = async (role, taskPrompt, contextData, loopLimit = 8, forceTools = false, workingDir, imageInputs) => {
let systemPrompt = "You are a helpful assistant.";
// Load instructions file if present
try {
const instructions = await (0, promises_1.readFile)((0, path_1.join)(workingDir, "SUB_AGENT_INSTRUCTIONS.md"), "utf-8");
if (instructions.trim())
systemPrompt = instructions;
}
catch { /* ignore */ }
systemPrompt += `\n\n## Current Workspace\nYour current working directory is:\n\n${workingDir}\nAlways assume relative paths are from this directory.`;
// Append profile
try {
const profiles = JSON.parse(subAgentProfilesStr);
if (profiles[role])
systemPrompt += `\n\n## Your Persona\n${profiles[role]}`;
else if (role === "reviewer")
systemPrompt += `\n\n## Your Persona\nYou are a Senior Code Reviewer. Analyze code, find bugs/issues, and FIX them using 'save_file'.`;
}
catch { /* ignore */ }
// Append tools info
let toolsReminder = "";
const toolsEnabled = allow_tools || forceTools;
if (toolsEnabled) {
const allowedTools = [];
if (allowFileSystem)
allowedTools.push("read_file", "list_directory", "save_file", "replace_text_in_file", "delete_files_by_pattern");
if (allowWeb)
allowedTools.push("wikipedia_search", "web_search", "fetch_web_content");
if (allowCode)
allowedTools.push("run_python", "run_javascript");
allowedTools.push("remember", "recall");
if (allowedTools.length > 0) {
systemPrompt += `\n\n## Allowed Tools\nYou have access to: ${allowedTools.join(", ")}.\n`;
toolsReminder = `\n\n[SYSTEM REMINDER: You have access to tools: ${allowedTools.join(", ")}. USE A TOOL if needed.]`;
}
}
// Build the initial user message. If image inputs are present, use
// OpenAI multimodal format (content as array of typed parts) so
// vision-capable models (Gemma 4, Qwen3-VL) actually receive the
// image bytes. Text-only models silently ignore the image parts.
const userText = `Task: ${taskPrompt}\n\nContext: ${contextData}${toolsReminder}`;
let userContent = userText;
if (imageInputs && imageInputs.length > 0) {
const imageParts = [];
const failed = [];
for (const img of imageInputs) {
const dataUri = await loadImageAsDataUri(img, workingDir, ctx.workspaceRoot);
if (dataUri)
imageParts.push({ type: "image_url", image_url: { url: dataUri } });
else
failed.push(img);
}
if (imageParts.length > 0) {
userContent = [
{ type: "text", text: userText + (failed.length ? `\n\n[Warning: ${failed.length} image(s) failed to load: ${failed.join(", ")}]` : "") },
...imageParts,
];
}
}
const msgList = [
{ role: "system", content: systemPrompt },
{ role: "user", content: userContent },
];
let loops = 0, finalContent = "";
// Cap consecutive no-tool-call iterations — when the parser keeps
// failing to extract a tool call, looping 8 times is just slow
// failure. After 2 in a row we bail and return the model's last
// attempt instead of burning ~10 minutes of wall time.
let consecutiveNoToolCall = 0;
const MAX_CONSECUTIVE_NO_TOOL_CALL = 2;
const filesModified = [];
const dctx = {
cwd: workingDir, workspaceRoot: ctx.workspaceRoot, allowFileSystem, allowWeb, allowCode,
originalRunJavascript: config.originalRunJavascript,
originalRunPython: config.originalRunPython,
pluginConfig,
limits: config.limits,
};
while (loops < loopLimit) {
try {
const response = await fetch(`${endpoint}/chat/completions`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ model: modelId, messages: msgList, temperature: 0.7, stream: false }),
signal: AbortSignal.timeout(120_000),
});
if (!response.ok)
return { ...(0, errorCodes_1.toolError)(errorCodes_1.API_ERROR, `API Error: ${response.status}`), filesModified };
const data = await response.json();
let content = data.choices[0].message.content;
content = content.replace(/<\|.*?\|>/g, "").trim();
if (!toolsEnabled)
return { response: content, filesModified };
if (isRefusal(content)) {
msgList.push({ role: "assistant", content }, { role: "system", content: "SYSTEM ERROR: You HAVE access to tools. USE THEM." });
loops++;
continue;
}
const toolCall = parseToolCall(content);
if (toolCall?.tool) {
consecutiveNoToolCall = 0;
msgList.push({ role: "assistant", content });
let toolResult;
try {
toolResult = await dispatchTool(toolCall, dctx, filesModified);
}
catch (err) {
toolResult = `Error: ${err.message}`;
}
msgList.push({ role: "user", content: `Tool Output: ${toolResult}` });
loops++;
}
else {
consecutiveNoToolCall++;
if (content.includes("TASK_COMPLETED") ||
loops >= loopLimit - 1 ||
consecutiveNoToolCall >= MAX_CONSECUTIVE_NO_TOOL_CALL) {
finalContent = content;
break;
}
msgList.push({ role: "assistant", content }, { role: "system", content: "SYSTEM NOTICE: You did not call a tool. If finished, output 'TASK_COMPLETED'. Otherwise USE A TOOL." });
loops++;
}
}
catch (err) {
return { ...(0, errorCodes_1.toolError)(errorCodes_1.API_ERROR, err.message), filesModified };
}
// Prevent unbounded memory growth — keep system prompt + original task.
// Trim from oldest, ensuring we start at an assistant turn to keep pairs intact.
if (msgList.length > 20) {
const sysMsg = msgList[0];
const originalTask = msgList[1];
const tail = msgList.slice(2);
// Find the start of a recent assistant turn (role-aware, not parity-based)
let cutAt = Math.max(0, tail.length - 16);
while (cutAt < tail.length && tail[cutAt]?.role !== "assistant")
cutAt++;
msgList.splice(0, msgList.length, sysMsg, originalTask, ...tail.slice(cutAt));
}
}
// Auto-save code blocks
if (autoSave && allowFileSystem && finalContent) {
finalContent = await autoSaveCodeBlocks(finalContent, workingDir, ctx.workspaceRoot, filesModified);
}
return { response: finalContent, filesModified };
};
// --- Depth check ---
if (_currentDepth >= MAX_AGENT_DEPTH) {
return (0, errorCodes_1.toolError)(errorCodes_1.API_ERROR, `Agent depth limit reached (depth=${_currentDepth}, max=${MAX_AGENT_DEPTH}). ` +
`Cannot spawn another sub-agent. Handle this task directly.`);
}
// --- 1. Primary Agent Loop ---
_currentDepth++;
let primaryResult;
try {
primaryResult = await runAgentLoop(agent_role, task, context, 8, false, ctx.cwd, images);
}
finally {
_currentDepth--;
}
if ("error" in primaryResult)
return primaryResult;
let finalResponse = primaryResult.response;
// --- 2. Auto-Debug Loop ---
if (debugMode && allowCode && primaryResult.filesModified.length > 0) {
const filesToCheck = primaryResult.filesModified.join(", ");
let debugContext = "Here is the content of the created files:\n";
for (const f of primaryResult.filesModified) {
try {
debugContext += `\n--- ${f} ---\n${await (0, promises_1.readFile)((0, path_1.join)(ctx.cwd, f), "utf-8")}\n`;
}
catch { /* skip */ }
}
const MAX_DEBUG = config.limits?.maxDebugContext ?? 8_000;
if (debugContext.length > MAX_DEBUG)
debugContext = debugContext.substring(0, MAX_DEBUG) + "\n\n[... truncated]";
_currentDepth++;
let debugResult;
try {
debugResult = await runAgentLoop("reviewer", `Review the code in these files: ${filesToCheck}. Check for bugs, syntax errors, or logic flaws. Fix them.`, debugContext, 5, true, ctx.cwd);
}
finally {
_currentDepth--;
}
finalResponse += "\n\n--- Auto-Debug Report ---\n" + (debugResult.response || "Debug pass completed.");
if (debugResult.filesModified.length > 0)
finalResponse += `\n(Reviewer fixed: ${debugResult.filesModified.join(", ")})`;
}
// Append file list
if (primaryResult.filesModified.length > 0) {
const fullPaths = primaryResult.filesModified.map((f) => (0, path_1.isAbsolute)(f) ? f : (0, path_1.join)(ctx.cwd, f));
finalResponse += `\n\n[GENERATED_FILES]: ${fullPaths.join(", ")}`;
if (showFullCode) {
finalResponse += `\n\n### Generated Code Content:\n`;
for (const f of primaryResult.filesModified) {
try {
const fpath = (0, path_1.isAbsolute)(f) ? f : (0, path_1.join)(ctx.cwd, f);
const content = await (0, promises_1.readFile)(fpath, "utf-8");
finalResponse += `\n**${f}**\n\`\`\`${f.split('.').pop() || 'txt'}\n${content}\n\`\`\`\n`;
}
catch { /* skip */ }
}
}
}
if (!showFullCode) {
finalResponse = finalResponse.replace(/```[\s\S]*?```/g, "\n[System: Code Block Hidden. The code has been handled by the sub-agent.]\n");
}
return { response: finalResponse, generated_files: primaryResult.filesModified };
}, pluginConfig.get("enableSecondaryAgent"), "consult_secondary_agent"),
});
}
//# sourceMappingURL=secondaryAgent.js.map