agents / subagent.js
"use strict";
/**
* Sub-agent runner — Agent Zero's core feature.
*
* RAM strategy:
* - If subAgentModelId is blank OR only one model is loaded → reuse the same
* model (no extra RAM, just another API call with a different system prompt).
* - If subAgentModelId is set AND that model is loaded → use it as the
* dedicated sub-agent model (for devices that can hold two models).
*
* The loop:
* 1. POST task to /v1/chat/completions with a sub-agent system prompt + tool defs
* 2. If the model emits tool_calls → execute them, append results, loop
* 3. When the model replies without tool_calls → return the final answer
*/
Object.defineProperty(exports, "__esModule", { value: true });
exports.spawnAgent = spawnAgent;
exports.listLoadedModels = listLoadedModels;
const sandbox_1 = require("../sandbox");
const promises_1 = require("fs/promises");
const path_1 = require("path");
const os_1 = require("os");
const memory_1 = require("./memory");
// ── Helpers ────────────────────────────────────────────────────────────────
async function listLoadedModels(endpoint) {
try {
const res = await fetch(`${endpoint}/v1/models`, {
signal: AbortSignal.timeout(5_000),
});
if (!res.ok)
return [];
const data = await res.json();
return data.data.map(m => m.id);
}
catch {
return [];
}
}
async function resolveModel(endpoint, preferred) {
const loaded = await listLoadedModels(endpoint);
if (!loaded.length)
return preferred || "local-model";
// If a preferred model is set and loaded, use it
if (preferred && loaded.includes(preferred))
return preferred;
// Otherwise fall back to whatever is loaded (same model = no extra RAM)
return loaded[0];
}
async function chatCompletion(endpoint, model, messages, tools, timeoutMs) {
const body = {
model,
messages,
temperature: 0.3,
max_tokens: 2048,
};
if (tools.length > 0) {
body.tools = tools.map(t => ({ type: "function", function: t }));
body.tool_choice = "auto";
}
const res = await fetch(`${endpoint}/v1/chat/completions`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(body),
signal: AbortSignal.timeout(timeoutMs),
});
if (!res.ok) {
const err = await res.text().catch(() => res.statusText);
throw new Error(`LM Studio API error ${res.status}: ${err}`);
}
return res.json();
}
async function execTool(name, argsJson, ctx) {
let args;
try {
args = JSON.parse(argsJson);
}
catch {
return `Error: invalid JSON arguments for tool "${name}": ${argsJson}`;
}
const ws = ctx.workspace;
switch (name) {
// ── file tools ────
case "read_file": {
try {
const p = (0, sandbox_1.safe)(ws, String(args.path ?? ""));
const text = await (0, promises_1.readFile)(p, "utf-8");
const lines = text.split("\n");
return lines.slice(0, 200).join("\n") + (lines.length > 200 ? "\n[truncated]" : "");
}
catch (e) {
return `Error: ${e.message}`;
}
}
case "write_file": {
try {
const p = (0, sandbox_1.safe)(ws, String(args.path ?? ""));
await (0, promises_1.mkdir)((0, path_1.dirname)(p), { recursive: true });
await (0, promises_1.writeFile)(p, String(args.content ?? ""), "utf-8");
return `Wrote ${String(args.content ?? "").length} chars to ${args.path}`;
}
catch (e) {
return `Error: ${e.message}`;
}
}
case "list_directory": {
try {
const p = (0, sandbox_1.safe)(ws, String(args.path ?? "."));
const items = await (0, promises_1.readdir)(p, { withFileTypes: true });
return items.slice(0, 100).map(i => `${i.isDirectory() ? "d" : "f"} ${(0, path_1.relative)(ws, `${p}/${i.name}`)}`).join("\n");
}
catch (e) {
return `Error: ${e.message}`;
}
}
// ── execution ────
case "run_python_code": {
const tmp = (0, path_1.join)((0, os_1.tmpdir)(), `sub_agent_py_${Date.now()}.py`);
try {
await (0, promises_1.writeFile)(tmp, String(args.code ?? ""), "utf-8");
const r = await (0, sandbox_1.run)(ctx.pythonBin, [tmp], {
stdin: args.stdin ? String(args.stdin) : undefined,
timeout: ctx.timeout,
});
return JSON.stringify({ returncode: r.code, stdout: r.stdout, stderr: r.stderr });
}
finally {
(0, promises_1.unlink)(tmp).catch(() => { });
}
}
case "run_command": {
const r = await (0, sandbox_1.run)("/bin/bash", ["-c", String(args.command ?? "")], {
cwd: ws,
stdin: args.stdin ? String(args.stdin) : undefined,
timeout: ctx.timeout,
});
return JSON.stringify({ returncode: r.code, stdout: r.stdout, stderr: r.stderr });
}
// ── memory ────
case "memory_save":
return (0, memory_1.memorySave)(ws, String(args.key ?? ""), String(args.content ?? ""), args.tags ?? []);
case "memory_recall":
return (0, memory_1.memoryRecall)(ws, String(args.query ?? ""), Number(args.limit ?? 5), args.tags ?? []);
default:
return `Unknown tool: "${name}". Available: read_file, write_file, list_directory, run_python_code, run_command, memory_save, memory_recall`;
}
}
// Tool definitions sent to the sub-agent model
const SUB_AGENT_TOOLS = [
{
name: "read_file",
description: "Read a file from the workspace.",
parameters: { type: "object", properties: { path: { type: "string" } }, required: ["path"] },
},
{
name: "write_file",
description: "Write content to a file in the workspace.",
parameters: { type: "object", properties: { path: { type: "string" }, content: { type: "string" } }, required: ["path", "content"] },
},
{
name: "list_directory",
description: "List files in a directory.",
parameters: { type: "object", properties: { path: { type: "string" } }, required: ["path"] },
},
{
name: "run_python_code",
description: "Execute a Python code snippet.",
parameters: { type: "object", properties: { code: { type: "string" }, stdin: { type: "string" } }, required: ["code"] },
},
{
name: "run_command",
description: "Run a shell command.",
parameters: { type: "object", properties: { command: { type: "string" } }, required: ["command"] },
},
{
name: "memory_save",
description: "Save a fact or solution to persistent memory.",
parameters: { type: "object", properties: { key: { type: "string" }, content: { type: "string" }, tags: { type: "array", items: { type: "string" } } }, required: ["key", "content"] },
},
{
name: "memory_recall",
description: "Search persistent memory.",
parameters: { type: "object", properties: { query: { type: "string" }, limit: { type: "number" } }, required: ["query"] },
},
];
async function spawnAgent(opts) {
const { endpoint, preferredModelId, maxIterations, systemPrompt, task, context, toolCtx, allowTools, } = opts;
const modelId = await resolveModel(endpoint, preferredModelId);
const tools = allowTools ? SUB_AGENT_TOOLS : [];
const timeoutMs = toolCtx.timeout * 1000 + 30_000; // generous HTTP timeout
const DEFAULT_SYSTEM = `You are a focused sub-agent. Complete the assigned task using available tools.
Be concise. When the task is complete, give a clear final answer without further tool calls.`;
const messages = [
{ role: "system", content: systemPrompt || DEFAULT_SYSTEM },
...(context.trim() ? [
{ role: "user", content: `Context:\n${context}` },
{ role: "assistant", content: "Understood. Ready for the task." },
] : []),
{ role: "user", content: task },
];
let iterations = 0;
while (iterations < maxIterations) {
iterations++;
let resp;
try {
resp = await chatCompletion(endpoint, modelId, messages, tools, timeoutMs);
}
catch (e) {
return `Sub-agent API error: ${e.message}`;
}
const msg = resp.choices[0]?.message;
if (!msg)
return "Sub-agent returned empty response.";
messages.push(msg);
const toolCalls = msg.tool_calls ?? [];
if (toolCalls.length === 0) {
// Final answer
return msg.content?.trim() || "(no response)";
}
// Execute all tool calls in parallel
const results = await Promise.all(toolCalls.map(tc => execTool(tc.function.name, tc.function.arguments, toolCtx)
.then(result => ({ tc, result }))
.catch(err => ({ tc, result: `Error: ${err.message}` }))));
for (const { tc, result } of results) {
messages.push({ role: "tool", tool_call_id: tc.id, content: result });
}
}
return `Sub-agent reached max iterations (${maxIterations}). Last response: ${messages[messages.length - 1].content ?? "(empty)"}`;
}