Forked from walti/openai-compat-endpoint
src / config.ts
// src/config.ts
import { createConfigSchematics } from "@lmstudio/sdk";
export const globalConfigSchematics = createConfigSchematics()
.field(
"apiKey",
"string",
{
displayName: "API Key",
isProtected: true,
placeholder: "sk-...",
},
"",
)
.field(
"baseUrl",
"string",
{
displayName: "Base URL",
subtitle: "Base URL for OpenAI-compatible API calls.",
placeholder: "https://api.deepseek.com/v1",
},
"https://api.deepseek.com/v1",
)
.field(
"defaultModel",
"string",
{
displayName: "Default Model",
subtitle: "Used when the chat does not override the model.",
placeholder: "deepseek-v4-flash",
},
"deepseek-v4-flash",
)
.field(
"defaultTemperature",
"numeric",
{
displayName: "Default Temperature",
subtitle: "Default sampling temperature. Use -1 to omit.",
},
0.1,
)
.field(
"defaultTopP",
"numeric",
{
displayName: "Default Top P",
subtitle: "Default nucleus sampling. Use -1 to omit.",
},
-1,
)
.field(
"defaultMaxTokens",
"numeric",
{
displayName: "Default Max Tokens",
subtitle: "Maximum output tokens. Use 0 to omit/provider default.",
},
4000,
)
.field(
"defaultPresencePenalty",
"numeric",
{
displayName: "Default Presence Penalty",
subtitle: "Use 0 for neutral/default.",
},
0,
)
.field(
"defaultFrequencyPenalty",
"numeric",
{
displayName: "Default Frequency Penalty",
subtitle: "Use 0 for neutral/default.",
},
0,
)
.field(
"reasoningEffort",
"string",
{
displayName: "Reasoning Effort",
subtitle: "Provider-specific. Use provider_default to omit.",
placeholder: "provider_default",
},
"provider_default",
)
.field(
"deepSeekThinking",
"string",
{
displayName: "DeepSeek Thinking",
subtitle: "Use disabled for MCP/tool workflows unless reasoning_content is round-tripped.",
placeholder: "disabled",
},
"disabled",
)
.field(
"enableTools",
"boolean",
{
displayName: "Enable MCP Tools",
subtitle: "Send LM Studio/MCP tools to the provider.",
},
true,
)
.field(
"maxMessages",
"numeric",
{
displayName: "Max Messages",
subtitle: "Maximum recent chat messages sent to the provider. 0 = all.",
},
20,
)
.field(
"maxToolResultChars",
"numeric",
{
displayName: "Max Tool Result Chars",
subtitle: "Truncate large tool results to reduce prompt size.",
},
30000,
)
.field(
"maxPromptChars",
"numeric",
{
displayName: "Max Prompt Chars",
subtitle: "Block requests above this approximate prompt size.",
},
200000,
)
.field(
"abortIfPromptTooLarge",
"boolean",
{
displayName: "Abort If Prompt Too Large",
subtitle: "Abort instead of sending huge prompts to the provider.",
},
true,
)
.field(
"forgetHistoryAfterCheckpoint",
"boolean",
{
displayName: "Forget History After Checkpoint",
subtitle: "After a saved RE checkpoint marker, omit earlier chat history from provider requests.",
},
true,
)
.build();
export const configSchematics = createConfigSchematics()
.field(
"modelOverride",
"string",
{
displayName: "Model Override",
subtitle: "Optional. Leave empty to use global default model.",
placeholder: "deepseek-v4-pro",
},
"",
)
.field(
"temperatureOverride",
"numeric",
{
displayName: "Temperature Override",
subtitle: "Use -1 to use global default.",
},
-1,
)
.field(
"topPOverride",
"numeric",
{
displayName: "Top P Override",
subtitle: "Use -1 to use global default.",
},
-1,
)
.field(
"maxTokensOverride",
"numeric",
{
displayName: "Max Tokens Override",
subtitle: "Use -1 to use global default. Use 0 to omit/provider default.",
},
-1,
)
.field(
"presencePenaltyOverride",
"numeric",
{
displayName: "Presence Penalty Override",
subtitle: "Use -1 to use global default.",
},
-1,
)
.field(
"frequencyPenaltyOverride",
"numeric",
{
displayName: "Frequency Penalty Override",
subtitle: "Use -1 to use global default.",
},
-1,
)
.field(
"reasoningEffortOverride",
"string",
{
displayName: "Reasoning Effort Override",
subtitle: "Optional. Leave empty to use global setting.",
placeholder: "provider_default",
},
"",
)
.field(
"debug",
"boolean",
{
displayName: "Debug Logging",
subtitle: "Log request payloads and internal debug details.",
},
false,
)
.build();