PLUGIN
Report15 Downloads
OpenAI Compatible API Adapter for LM Studio
Project Files
src / generator.ts
import { type Chat, type GeneratorController, type InferParsedConfig } from "@lmstudio/sdk";
import OpenAI from "openai";
import {
type ChatCompletionMessageParam,
type ChatCompletionMessageToolCall,
type ChatCompletionTool,
type ChatCompletionToolMessageParam,
} from "openai/resources/index";
import { configSchematics } from "./config";
/* -------------------------------------------------------------------------- */
/* Types */
/* -------------------------------------------------------------------------- */
type ToolCallState = {
id: string;
name: string | null;
index: number;
arguments: string;
};
/* -------------------------------------------------------------------------- */
/* Build helpers */
/* -------------------------------------------------------------------------- */
/** Build a pre-configured OpenAI client. */
function createOpenAI(
config: InferParsedConfig<typeof configSchematics>,
) {
const baseURL = config.get("overrideBaseUrlv2").trim() || "http://127.0.0.1:1234/v1";
const apiKey = config.get("apiKeyv2") || "not-needed";
// console.info(`Using base URL: ${baseURL}`);
// console.info(`API key configured: ${apiKey ? "Yes" : "No"}`);
return new OpenAI({
apiKey,
baseURL,
});
}
/** Convert internal chat history to the format expected by OpenAI. */
function toOpenAIMessages(history: Chat): ChatCompletionMessageParam[] {
const messages: ChatCompletionMessageParam[] = [];
for (const message of history) {
switch (message.getRole()) {
case "system":
messages.push({ role: "system", content: message.getText() });
break;
case "user":
messages.push({ role: "user", content: message.getText() });
break;
case "assistant": {
const toolCalls: ChatCompletionMessageToolCall[] = message
.getToolCallRequests()
.map(toolCall => ({
id: toolCall.id ?? "",
type: "function",
function: {
name: toolCall.name,
arguments: JSON.stringify(toolCall.arguments ?? {}),
},
}));
messages.push({
role: "assistant",
content: message.getText(),
...(toolCalls.length ? { tool_calls: toolCalls } : {}),
});
break;
}
case "tool": {
message.getToolCallResults().forEach(toolCallResult => {
messages.push({
role: "tool",
tool_call_id: toolCallResult.toolCallId ?? "",
content: toolCallResult.content,
} as ChatCompletionToolMessageParam);
});
break;
}
}
}
return messages;
}
/** Convert LM Studio tool definitions to OpenAI function-tool descriptors. */
function toOpenAITools(ctl: GeneratorController): ChatCompletionTool[] | undefined {
const lmStudioTools = ctl.getToolDefinitions();
// console.info("Original LM Studio tool definitions:", JSON.stringify(lmStudioTools, null, 2));
const tools = lmStudioTools.map<ChatCompletionTool>(t => {
const openAITool = {
type: "function" as const,
function: {
name: t.function.name,
description: t.function.description,
parameters: t.function.parameters ?? {},
},
};
// console.info(`Transformed tool "${t.function.name}":`, JSON.stringify(openAITool, null, 2));
return openAITool;
});
// console.info(`Converted ${tools.length} tools for OpenAI API:`, tools.map(t => t.function.name));
return tools.length ? tools : undefined;
}
/* -------------------------------------------------------------------------- */
/* Stream-handling utils */
/* -------------------------------------------------------------------------- */
function wireAbort(ctl: GeneratorController, stream: { controller: AbortController }) {
ctl.onAborted(() => {
console.info("Generation aborted by user.");
stream.controller.abort();
});
}
async function consumeStream(stream: AsyncIterable<any>, ctl: GeneratorController) {
// Track multiple tool calls by index
const toolCalls = new Map<number, ToolCallState>();
function flushToolCall(toolCall: ToolCallState) {
if (toolCall.name === null) {
console.warn(`Cannot flush tool call at index ${toolCall.index}: name is null`);
return;
}
try {
const parsedArgs = toolCall.arguments ? JSON.parse(toolCall.arguments) : {};
// Convert string numbers to actual numbers based on tool definition
const toolDef = ctl.getToolDefinitions().find(t => t.function.name === toolCall.name);
if (toolDef?.function.parameters?.properties) {
for (const [key, value] of Object.entries(parsedArgs)) {
const paramDef = toolDef.function.parameters.properties[key];
if (paramDef?.type === 'integer' && typeof value === 'string') {
const numValue = parseInt(value, 10);
if (!isNaN(numValue)) {
parsedArgs[key] = numValue;
// console.info(`Converted parameter ${key} from string "${value}" to integer ${numValue}`);
}
} else if (paramDef?.type === 'array' && typeof value === 'string') {
try {
const arrayValue = JSON.parse(value);
if (Array.isArray(arrayValue)) {
parsedArgs[key] = arrayValue;
// console.info(`Converted parameter ${key} from string to array:`, arrayValue);
}
} catch (e) {
// console.warn(`Failed to parse array parameter ${key}: ${value}`);
}
}
}
}
ctl.toolCallGenerationEnded({
type: "function",
name: toolCall.name,
arguments: parsedArgs,
id: toolCall.id,
});
// console.info(`Tool call flushed: ${toolCall.name} (index ${toolCall.index}) with args:`, parsedArgs);
} catch (error) {
console.error(`Failed to parse tool call arguments at index ${toolCall.index}: ${toolCall.arguments}`, error);
}
}
function flushAllToolCalls() {
for (const [index, toolCall] of toolCalls.entries()) {
flushToolCall(toolCall);
toolCalls.delete(index);
}
}
for await (const chunk of stream) {
// console.info("Received chunk:", JSON.stringify(chunk));
const delta = chunk.choices?.[0]?.delta as
| {
content?: string;
reasoning?: string;
tool_calls?: Array<{
index: number;
id?: string;
function?: { name?: string; arguments?: string };
}>;
}
| undefined;
if (!delta) continue;
const finishReason = chunk.choices?.[0]?.finish_reason;
/* Thinking/Reasoning streaming */
if (delta.reasoning) {
// console.info("Reasoning content:", delta.reasoning);
ctl.fragmentGenerated(delta.reasoning, { reasoningType: "reasoning" });
}
/* Text streaming */
if (delta.content) {
ctl.fragmentGenerated(delta.content);
}
/* Tool-call streaming */
const hasToolCalls = delta.tool_calls && delta.tool_calls.length > 0;
if (hasToolCalls) {
// console.info(`=== TOOL CALL DEBUG ===`);
// console.info(`Processing ${delta.tool_calls!.length} tool call(s), finish_reason: ${finishReason}`);
// console.info("Raw tool_calls delta:", JSON.stringify(delta.tool_calls, null, 2));
}
for (const toolCall of delta.tool_calls ?? []) {
const index = toolCall.index;
// console.info(`Processing tool call at index ${index}:`, JSON.stringify(toolCall, null, 2));
// Only start a new tool call if we have a valid ID (not null or undefined)
if (toolCall.id !== undefined && toolCall.id !== null) {
// If there's already a tool call at this index, flush it first
if (toolCalls.has(index)) {
console.warn(`Tool call at index ${index} already exists, flushing before starting new one`);
flushToolCall(toolCalls.get(index)!);
toolCalls.delete(index);
}
const newToolCall: ToolCallState = {
id: toolCall.id,
name: null,
index: index,
arguments: ""
};
toolCalls.set(index, newToolCall);
ctl.toolCallGenerationStarted();
// console.info(`Tool call started with ID: ${toolCall.id}, index: ${index}`);
}
const currentToolCall = toolCalls.get(index);
if (!currentToolCall) {
// console.warn(`Received tool call data for index ${index} but no tool call state exists. Skipping.`);
continue;
}
if (toolCall.function?.name) {
currentToolCall.name = toolCall.function.name;
ctl.toolCallGenerationNameReceived(toolCall.function.name);
// console.info(`Tool call name received: ${toolCall.function.name} at index ${index}`);
}
if (toolCall.function?.arguments) {
currentToolCall.arguments += toolCall.function.arguments;
ctl.toolCallGenerationArgumentFragmentGenerated(toolCall.function.arguments);
// console.info(`Arguments accumulated (${toolCall.function.arguments.length} chars): total ${currentToolCall.arguments.length} chars at index ${index}`);
}
}
/* Don't flush on finish_reason - let end of stream handle it */
/* Removed the early flush on "stop" because it was flushing incomplete tool calls */
}
/* Ensure any remaining tool calls are flushed at the end */
if (toolCalls.size > 0) {
// console.info(`Stream ended, flushing ${toolCalls.size} remaining tool call(s).`);
flushAllToolCalls();
}
// console.info("Generation completed.");
}
/* -------------------------------------------------------------------------- */
/* API */
/* -------------------------------------------------------------------------- */
export async function generate(ctl: GeneratorController, history: Chat) {
const config = ctl.getPluginConfig(configSchematics);
const model = config.get("modelv2");
const temperature = config.get("temperature");
// console.info(`Using model: ${model}`);
// console.info(`Using temperature: ${temperature}`);
/* 1. Setup client & payload */
const openai = createOpenAI(config);
const messages = toOpenAIMessages(history);
const tools = toOpenAITools(ctl);
// console.info(`Sending ${messages.length} messages to OpenAI API`);
// console.info(`Tools included: ${tools ? 'Yes' : 'No'}`);
if (tools) {
// console.info("Final tools payload being sent to OpenAI:", JSON.stringify(tools, null, 2));
}
/* 2. Kick off streaming completion */
const requestPayload = {
model: model,
messages,
tools,
temperature,
stream: true,
};
// console.info("Complete request payload being sent:", JSON.stringify(requestPayload, null, 2));
const stream = await openai.chat.completions.create(requestPayload);
/* 3. Abort wiring & stream processing */
wireAbort(ctl, stream as any);
await consumeStream(stream as any, ctl);
}
src / generator.ts
import { type Chat, type GeneratorController, type InferParsedConfig } from "@lmstudio/sdk";
import OpenAI from "openai";
import {
type ChatCompletionMessageParam,
type ChatCompletionMessageToolCall,
type ChatCompletionTool,
type ChatCompletionToolMessageParam,
} from "openai/resources/index";
import { configSchematics } from "./config";
/* -------------------------------------------------------------------------- */
/* Types */
/* -------------------------------------------------------------------------- */
type ToolCallState = {
id: string;
name: string | null;
index: number;
arguments: string;
};
/* -------------------------------------------------------------------------- */
/* Build helpers */
/* -------------------------------------------------------------------------- */
/** Build a pre-configured OpenAI client. */
function createOpenAI(
config: InferParsedConfig<typeof configSchematics>,
) {
const baseURL = config.get("overrideBaseUrlv2").trim() || "http://127.0.0.1:1234/v1";
const apiKey = config.get("apiKeyv2") || "not-needed";
// console.info(`Using base URL: ${baseURL}`);
// console.info(`API key configured: ${apiKey ? "Yes" : "No"}`);
return new OpenAI({
apiKey,
baseURL,
});
}
/** Convert internal chat history to the format expected by OpenAI. */
function toOpenAIMessages(history: Chat): ChatCompletionMessageParam[] {
const messages: ChatCompletionMessageParam[] = [];
for (const message of history) {
switch (message.getRole()) {
case "system":
messages.push({ role: "system", content: message.getText() });
break;
case "user":
messages.push({ role: "user", content: message.getText() });
break;
case "assistant": {
const toolCalls: ChatCompletionMessageToolCall[] = message
.getToolCallRequests()
.map(toolCall => ({
id: toolCall.id ?? "",
type: "function",
function: {
name: toolCall.name,
arguments: JSON.stringify(toolCall.arguments ?? {}),
},
}));
messages.push({
role: "assistant",
content: message.getText(),
...(toolCalls.length ? { tool_calls: toolCalls } : {}),
});
break;
}
case "tool": {
message.getToolCallResults().forEach(toolCallResult => {
messages.push({
role: "tool",
tool_call_id: toolCallResult.toolCallId ?? "",
content: toolCallResult.content,
} as ChatCompletionToolMessageParam);
});
break;
}
}
}
return messages;
}
/** Convert LM Studio tool definitions to OpenAI function-tool descriptors. */
function toOpenAITools(ctl: GeneratorController): ChatCompletionTool[] | undefined {
const lmStudioTools = ctl.getToolDefinitions();
// console.info("Original LM Studio tool definitions:", JSON.stringify(lmStudioTools, null, 2));
const tools = lmStudioTools.map<ChatCompletionTool>(t => {
const openAITool = {
type: "function" as const,
function: {
name: t.function.name,
description: t.function.description,
parameters: t.function.parameters ?? {},
},
};
// console.info(`Transformed tool "${t.function.name}":`, JSON.stringify(openAITool, null, 2));
return openAITool;
});
// console.info(`Converted ${tools.length} tools for OpenAI API:`, tools.map(t => t.function.name));
return tools.length ? tools : undefined;
}
/* -------------------------------------------------------------------------- */
/* Stream-handling utils */
/* -------------------------------------------------------------------------- */
function wireAbort(ctl: GeneratorController, stream: { controller: AbortController }) {
ctl.onAborted(() => {
console.info("Generation aborted by user.");
stream.controller.abort();
});
}
async function consumeStream(stream: AsyncIterable<any>, ctl: GeneratorController) {
// Track multiple tool calls by index
const toolCalls = new Map<number, ToolCallState>();
function flushToolCall(toolCall: ToolCallState) {
if (toolCall.name === null) {
console.warn(`Cannot flush tool call at index ${toolCall.index}: name is null`);
return;
}
try {
const parsedArgs = toolCall.arguments ? JSON.parse(toolCall.arguments) : {};
// Convert string numbers to actual numbers based on tool definition
const toolDef = ctl.getToolDefinitions().find(t => t.function.name === toolCall.name);
if (toolDef?.function.parameters?.properties) {
for (const [key, value] of Object.entries(parsedArgs)) {
const paramDef = toolDef.function.parameters.properties[key];
if (paramDef?.type === 'integer' && typeof value === 'string') {
const numValue = parseInt(value, 10);
if (!isNaN(numValue)) {
parsedArgs[key] = numValue;
// console.info(`Converted parameter ${key} from string "${value}" to integer ${numValue}`);
}
} else if (paramDef?.type === 'array' && typeof value === 'string') {
try {
const arrayValue = JSON.parse(value);
if (Array.isArray(arrayValue)) {
parsedArgs[key] = arrayValue;
// console.info(`Converted parameter ${key} from string to array:`, arrayValue);
}
} catch (e) {
// console.warn(`Failed to parse array parameter ${key}: ${value}`);
}
}
}
}
ctl.toolCallGenerationEnded({
type: "function",
name: toolCall.name,
arguments: parsedArgs,
id: toolCall.id,
});
// console.info(`Tool call flushed: ${toolCall.name} (index ${toolCall.index}) with args:`, parsedArgs);
} catch (error) {
console.error(`Failed to parse tool call arguments at index ${toolCall.index}: ${toolCall.arguments}`, error);
}
}
function flushAllToolCalls() {
for (const [index, toolCall] of toolCalls.entries()) {
flushToolCall(toolCall);
toolCalls.delete(index);
}
}
for await (const chunk of stream) {
// console.info("Received chunk:", JSON.stringify(chunk));
const delta = chunk.choices?.[0]?.delta as
| {
content?: string;
reasoning?: string;
tool_calls?: Array<{
index: number;
id?: string;
function?: { name?: string; arguments?: string };
}>;
}
| undefined;
if (!delta) continue;
const finishReason = chunk.choices?.[0]?.finish_reason;
/* Thinking/Reasoning streaming */
if (delta.reasoning) {
// console.info("Reasoning content:", delta.reasoning);
ctl.fragmentGenerated(delta.reasoning, { reasoningType: "reasoning" });
}
/* Text streaming */
if (delta.content) {
ctl.fragmentGenerated(delta.content);
}
/* Tool-call streaming */
const hasToolCalls = delta.tool_calls && delta.tool_calls.length > 0;
if (hasToolCalls) {
// console.info(`=== TOOL CALL DEBUG ===`);
// console.info(`Processing ${delta.tool_calls!.length} tool call(s), finish_reason: ${finishReason}`);
// console.info("Raw tool_calls delta:", JSON.stringify(delta.tool_calls, null, 2));
}
for (const toolCall of delta.tool_calls ?? []) {
const index = toolCall.index;
// console.info(`Processing tool call at index ${index}:`, JSON.stringify(toolCall, null, 2));
// Only start a new tool call if we have a valid ID (not null or undefined)
if (toolCall.id !== undefined && toolCall.id !== null) {
// If there's already a tool call at this index, flush it first
if (toolCalls.has(index)) {
console.warn(`Tool call at index ${index} already exists, flushing before starting new one`);
flushToolCall(toolCalls.get(index)!);
toolCalls.delete(index);
}
const newToolCall: ToolCallState = {
id: toolCall.id,
name: null,
index: index,
arguments: ""
};
toolCalls.set(index, newToolCall);
ctl.toolCallGenerationStarted();
// console.info(`Tool call started with ID: ${toolCall.id}, index: ${index}`);
}
const currentToolCall = toolCalls.get(index);
if (!currentToolCall) {
// console.warn(`Received tool call data for index ${index} but no tool call state exists. Skipping.`);
continue;
}
if (toolCall.function?.name) {
currentToolCall.name = toolCall.function.name;
ctl.toolCallGenerationNameReceived(toolCall.function.name);
// console.info(`Tool call name received: ${toolCall.function.name} at index ${index}`);
}
if (toolCall.function?.arguments) {
currentToolCall.arguments += toolCall.function.arguments;
ctl.toolCallGenerationArgumentFragmentGenerated(toolCall.function.arguments);
// console.info(`Arguments accumulated (${toolCall.function.arguments.length} chars): total ${currentToolCall.arguments.length} chars at index ${index}`);
}
}
/* Don't flush on finish_reason - let end of stream handle it */
/* Removed the early flush on "stop" because it was flushing incomplete tool calls */
}
/* Ensure any remaining tool calls are flushed at the end */
if (toolCalls.size > 0) {
// console.info(`Stream ended, flushing ${toolCalls.size} remaining tool call(s).`);
flushAllToolCalls();
}
// console.info("Generation completed.");
}
/* -------------------------------------------------------------------------- */
/* API */
/* -------------------------------------------------------------------------- */
export async function generate(ctl: GeneratorController, history: Chat) {
const config = ctl.getPluginConfig(configSchematics);
const model = config.get("modelv2");
const temperature = config.get("temperature");
// console.info(`Using model: ${model}`);
// console.info(`Using temperature: ${temperature}`);
/* 1. Setup client & payload */
const openai = createOpenAI(config);
const messages = toOpenAIMessages(history);
const tools = toOpenAITools(ctl);
// console.info(`Sending ${messages.length} messages to OpenAI API`);
// console.info(`Tools included: ${tools ? 'Yes' : 'No'}`);
if (tools) {
// console.info("Final tools payload being sent to OpenAI:", JSON.stringify(tools, null, 2));
}
/* 2. Kick off streaming completion */
const requestPayload = {
model: model,
messages,
tools,
temperature,
stream: true,
};
// console.info("Complete request payload being sent:", JSON.stringify(requestPayload, null, 2));
const stream = await openai.chat.completions.create(requestPayload);
/* 3. Abort wiring & stream processing */
wireAbort(ctl, stream as any);
await consumeStream(stream as any, ctl);
}