PLUGIN
Report10 Downloads
"We have chatgpt at home" This is a work in progress!
Project Files
src / generator.ts
import {
type Chat,
type GeneratorController,
type InferParsedConfig,
} from "@lmstudio/sdk";
import OpenAI from "openai";
import { configSchematics, globalConfigSchematics } from "./config";
import {
ResponseInput,
ResponseInputItem,
ResponseOutputItem,
Tool,
} from "openai/resources/responses/responses";
import { ReasoningEffort } from "openai/resources/shared";
/* -------------------------------------------------------------------------- */
/* Build helpers */
/* -------------------------------------------------------------------------- */
/** Build a pre-configured OpenAI client. */
function createOpenAI(
globalConfig: InferParsedConfig<typeof globalConfigSchematics>
) {
const apiKey = globalConfig.get("openaiApiKey");
return new OpenAI({
apiKey,
baseURL: "https://api.openai.com/v1",
});
}
/** Convert internal chat history to the format expected by OpenAI. */
function toOpenAIMessages(history: Chat): ResponseInput {
const messages: ResponseInputItem[] = [];
for (const message of history) {
switch (message.getRole()) {
case "system":
messages.push({ role: "system", content: message.getText() });
break;
case "user":
messages.push({ role: "user", content: message.getText() });
break;
case "assistant": {
// const toolCalls: ChatCompletionMessageToolCall[] = message
// .getToolCallRequests()
// .map(toolCall => ({
// id: toolCall.id ?? "",
// type: "function",
// function: {
// name: toolCall.name,
// arguments: JSON.stringify(toolCall.arguments ?? {}),
// },
// }));
messages.push({
role: "assistant",
content: message.getText(),
// ...(toolCalls.length ? { tool_calls: toolCalls } : {}),
});
break;
}
// case "tool": {
// message.getToolCallResults().forEach(toolCallResult => {
// messages.push({
// role: "tool",
// tool_call_id: toolCallResult.toolCallId ?? "",
// content: toolCallResult.content,
// } as ChatCompletionToolMessageParam);
// });
// break;
// }
}
}
return messages;
}
/* -------------------------------------------------------------------------- */
/* Stream-handling utils */
/* -------------------------------------------------------------------------- */
function wireAbort(
ctl: GeneratorController,
stream: { controller: AbortController }
) {
ctl.onAborted(() => {
console.info("Generation aborted by user.");
stream.controller.abort();
});
}
/* -------------------------------------------------------------------------- */
/* API */
/* -------------------------------------------------------------------------- */
export async function generate(ctl: GeneratorController, history: Chat) {
const config = ctl.getPluginConfig(configSchematics);
const model = config.get("model");
const verbosity = config.get("verbosity") as
| "low"
| "medium"
| "high"
| null
| undefined;
const reasoningEffort = config.get("reasoningEffort");
const webSearch = config.get("webSearch");
const globalConfig = ctl.getGlobalPluginConfig(globalConfigSchematics);
/* 1. Setup client & payload */
const openai = createOpenAI(globalConfig);
/* 2. Kick off streaming response */
const messages = toOpenAIMessages(history);
const tools: Array<Tool> = [
{
type: "code_interpreter" as const,
container: { type: "auto" as const },
},
];
if (webSearch === "on") {
tools.push({ type: "web_search" as const });
}
const stream = await openai.responses.create({
model: model,
input: messages,
text: {
verbosity: verbosity,
},
reasoning: {
effort: reasoningEffort as ReasoningEffort,
summary: "detailed",
},
tools: tools,
stream: true,
});
/* 3. Abort wiring & stream processing */
wireAbort(ctl, stream as any);
for await (const event of stream) {
if (event.type === "response.output_text.delta") {
ctl.fragmentGenerated(event.delta)
}
else if (event.type === "response.reasoning_summary_text.delta") {
ctl.fragmentGenerated(event.delta, {reasoningType: "reasoning"})
}
}
}
src / generator.ts
import {
type Chat,
type GeneratorController,
type InferParsedConfig,
} from "@lmstudio/sdk";
import OpenAI from "openai";
import { configSchematics, globalConfigSchematics } from "./config";
import {
ResponseInput,
ResponseInputItem,
ResponseOutputItem,
Tool,
} from "openai/resources/responses/responses";
import { ReasoningEffort } from "openai/resources/shared";
/* -------------------------------------------------------------------------- */
/* Build helpers */
/* -------------------------------------------------------------------------- */
/** Build a pre-configured OpenAI client. */
function createOpenAI(
globalConfig: InferParsedConfig<typeof globalConfigSchematics>
) {
const apiKey = globalConfig.get("openaiApiKey");
return new OpenAI({
apiKey,
baseURL: "https://api.openai.com/v1",
});
}
/** Convert internal chat history to the format expected by OpenAI. */
function toOpenAIMessages(history: Chat): ResponseInput {
const messages: ResponseInputItem[] = [];
for (const message of history) {
switch (message.getRole()) {
case "system":
messages.push({ role: "system", content: message.getText() });
break;
case "user":
messages.push({ role: "user", content: message.getText() });
break;
case "assistant": {
// const toolCalls: ChatCompletionMessageToolCall[] = message
// .getToolCallRequests()
// .map(toolCall => ({
// id: toolCall.id ?? "",
// type: "function",
// function: {
// name: toolCall.name,
// arguments: JSON.stringify(toolCall.arguments ?? {}),
// },
// }));
messages.push({
role: "assistant",
content: message.getText(),
// ...(toolCalls.length ? { tool_calls: toolCalls } : {}),
});
break;
}
// case "tool": {
// message.getToolCallResults().forEach(toolCallResult => {
// messages.push({
// role: "tool",
// tool_call_id: toolCallResult.toolCallId ?? "",
// content: toolCallResult.content,
// } as ChatCompletionToolMessageParam);
// });
// break;
// }
}
}
return messages;
}
/* -------------------------------------------------------------------------- */
/* Stream-handling utils */
/* -------------------------------------------------------------------------- */
function wireAbort(
ctl: GeneratorController,
stream: { controller: AbortController }
) {
ctl.onAborted(() => {
console.info("Generation aborted by user.");
stream.controller.abort();
});
}
/* -------------------------------------------------------------------------- */
/* API */
/* -------------------------------------------------------------------------- */
export async function generate(ctl: GeneratorController, history: Chat) {
const config = ctl.getPluginConfig(configSchematics);
const model = config.get("model");
const verbosity = config.get("verbosity") as
| "low"
| "medium"
| "high"
| null
| undefined;
const reasoningEffort = config.get("reasoningEffort");
const webSearch = config.get("webSearch");
const globalConfig = ctl.getGlobalPluginConfig(globalConfigSchematics);
/* 1. Setup client & payload */
const openai = createOpenAI(globalConfig);
/* 2. Kick off streaming response */
const messages = toOpenAIMessages(history);
const tools: Array<Tool> = [
{
type: "code_interpreter" as const,
container: { type: "auto" as const },
},
];
if (webSearch === "on") {
tools.push({ type: "web_search" as const });
}
const stream = await openai.responses.create({
model: model,
input: messages,
text: {
verbosity: verbosity,
},
reasoning: {
effort: reasoningEffort as ReasoningEffort,
summary: "detailed",
},
tools: tools,
stream: true,
});
/* 3. Abort wiring & stream processing */
wireAbort(ctl, stream as any);
for await (const event of stream) {
if (event.type === "response.output_text.delta") {
ctl.fragmentGenerated(event.delta)
}
else if (event.type === "response.reasoning_summary_text.delta") {
ctl.fragmentGenerated(event.delta, {reasoningType: "reasoning"})
}
}
}