src / config.ts
// src/config.ts
import { createConfigSchematics } from "@lmstudio/sdk";
export const globalConfigSchematics = createConfigSchematics()
/* Main remote provider */
.field(
"mainApiKey",
"string",
{
displayName: "Main Provider API Key",
isProtected: true,
placeholder: "sk-...",
},
"",
)
.field(
"mainBaseUrl",
"string",
{
displayName: "Main Provider Base URL",
subtitle: "OpenAI-compatible endpoint for the final reasoning model.",
placeholder: "https://api.deepseek.com/v1",
},
"https://api.deepseek.com/v1",
)
.field(
"mainModel",
"string",
{
displayName: "Main Model",
subtitle: "Remote/main model used for the final analysis.",
placeholder: "deepseek-v4-flash",
},
"deepseek-v4-flash",
)
/* Main model generation */
.field(
"mainTemperature",
"numeric",
{
displayName: "Main Temperature",
subtitle: "Use -1 to omit/provider default.",
},
0.1,
)
.field(
"mainTopP",
"numeric",
{
displayName: "Main Top P",
subtitle: "Use -1 to omit/provider default.",
},
-1,
)
.field(
"mainMaxTokens",
"numeric",
{
displayName: "Main Max Tokens",
subtitle: "Use 0 to omit/provider default.",
},
4000,
)
.field(
"mainReasoningEffort",
"string",
{
displayName: "Main Reasoning Effort",
subtitle: "Provider-specific. Use provider_default to omit.",
placeholder: "provider_default",
},
"provider_default",
)
.field(
"mainReasoningDisplay",
"string",
{
displayName: "Main Reasoning Display",
subtitle: "How to display reasoning_content from the main model: hidden, status, or inline.",
placeholder: "status",
},
"status",
)
.field(
"deepSeekThinking",
"string",
{
displayName: "DeepSeek Thinking",
subtitle: "Use disabled for MCP/tool workflows unless reasoning_content is round-tripped.",
placeholder: "disabled",
},
"disabled",
)
/* Local LM Studio orchestrator */
.field(
"localBaseUrl",
"string",
{
displayName: "Local LM Studio Base URL",
subtitle: "Native LM Studio REST API base URL. Do not include /api/v1/chat.",
placeholder: "http://localhost:1234",
},
"http://localhost:1234",
)
.field(
"localApiToken",
"string",
{
displayName: "Local LM Studio API Token",
subtitle: "Required only if LM Studio authentication is enabled.",
isProtected: true,
placeholder: "lm-studio",
},
"",
)
.field(
"localModel",
"string",
{
displayName: "Local Orchestrator Model",
subtitle: "Local model used only for chat cleanup, intent extraction, and task preparation.",
placeholder: "mistral-nemo-instruct-2407",
},
"",
)
.field(
"localContextLength",
"numeric",
{
displayName: "Local Context Length",
subtitle: "context_length passed to LM Studio /api/v1/chat.",
},
8000,
)
.field(
"localMaxOutputTokens",
"numeric",
{
displayName: "Local Max Output Tokens",
subtitle: "Maximum output for the local chat summary. Tool outputs are preserved separately by the relay.",
},
1000,
)
.field(
"localTemperature",
"numeric",
{
displayName: "Local Temperature",
subtitle: "Use 0 for deterministic compression/planning.",
},
0,
)
.field(
"localIntegrations",
"string",
{
displayName: "Local MCP Integrations",
subtitle: "Optional. Usually leave empty. The local model should not process raw tool output.",
placeholder: "mcp/re-memory",
},
"",
)
.field(
"localAllowedTools",
"string",
{
displayName: "Local Allowed Tools",
subtitle: "Comma-separated tool names for local integrations. Leave empty to expose all tools from listed integrations.",
placeholder: "re_memory_search,re_snapshot_load_latest",
},
"",
)
.field(
"localReasoning",
"string",
{
displayName: "Local Reasoning",
subtitle: "LM Studio /api/v1/chat reasoning setting. Use provider_default to omit.",
placeholder: "provider_default",
},
"provider_default",
)
.field(
"localOrchestratorTimeoutMs",
"numeric",
{
displayName: "Local Orchestrator Timeout (ms)",
subtitle: "Abort the local LM Studio orchestrator call after this time. Use 0 to disable.",
},
60000,
)
/* Prompt / relay limits */
.field(
"maxMessages",
"numeric",
{
displayName: "Max Messages",
subtitle: "Maximum recent chat messages included in the relay window. 0 = all.",
},
30,
)
.field(
"maxToolResultChars",
"numeric",
{
displayName: "Max Tool Output Chars",
subtitle: "Maximum chars kept per raw tool output before deterministic relay truncation.",
},
30000,
)
.field(
"maxPromptChars",
"numeric",
{
displayName: "Max Prompt Chars",
subtitle: "Abort if the direct prompt becomes too large before orchestration.",
},
200000,
)
.field(
"failOpen",
"boolean",
{
displayName: "Fail Open",
subtitle: "If local orchestration fails, call the main provider directly.",
},
true,
)
.field(
"showRelayStats",
"boolean",
{
displayName: "Show Relay Stats",
subtitle: "Append local/main model size stats after each response.",
},
true,
)
.field(
"alwaysAvailableToolPrefixes",
"string",
{
displayName: "Always Available Tool Prefixes",
subtitle: "Comma-separated tool name prefixes that are never removed by completed-action filtering.",
placeholder: "re_memory_,re_snapshot_",
},
"re_memory_,re_snapshot_",
)
.build();
export const configSchematics = createConfigSchematics()
.field(
"mainModelOverride",
"string",
{
displayName: "Main Model Override",
subtitle: "Optional. Leave empty to use global main model.",
placeholder: "deepseek-v4-pro",
},
"",
)
.field(
"localModelOverride",
"string",
{
displayName: "Local Model Override",
subtitle: "Optional. Leave empty to use global local orchestrator model.",
placeholder: "mistral-nemo-instruct-2407",
},
"",
)
.field(
"bypassLocalOrchestrator",
"boolean",
{
displayName: "Bypass Local Orchestrator",
subtitle: "Call the main provider directly for this chat.",
},
false,
)
.field(
"debug",
"boolean",
{
displayName: "Debug Logging",
subtitle: "Log local and main request metadata.",
},
false,
)
.build();