Merge pull request #84 from lmstudio-ai/ryan/service-ctl

Update lms to use LM Studio's new service architecture
This commit is contained in:
ryan-the-crayon
2024-10-15 16:26:31 -04:00
committed by GitHub
3 changed files with 114 additions and 303 deletions

View File

@ -1,34 +1,22 @@
import { SimpleLogger, text } from "@lmstudio/lms-common";
import { apiServerPorts, type SimpleLogger, text } from "@lmstudio/lms-common";
import { LMStudioClient } from "@lmstudio/sdk";
import chalk from "chalk";
import { flag, option, optional, string } from "cmd-ts";
import inquirer from "inquirer";
import { platform } from "os";
import { clearLine, moveCursor } from "readline";
import { getCliPref } from "./cliPref";
import { type LogLevelArgs, type LogLevelMap } from "./logLevel";
import {
checkHttpServer,
getServerConfig,
startServer,
type StartServerOpts,
} from "./subcommands/server";
import { spawn } from "child_process";
import { option, optional, string } from "cmd-ts";
import { readFile } from "fs/promises";
import { homedir } from "os";
import path from "path";
import { type LogLevelArgs } from "./logLevel";
import { checkHttpServer } from "./subcommands/server";
import { refinedNumber } from "./types/refinedNumber";
interface AppInstallLocation {
path: string;
argv: Array<string>;
cwd: string;
}
export const createClientArgs = {
yes: flag({
long: "yes",
short: "y",
description: text`
Suppress all confirmations and warnings. Useful for scripting.
`,
}),
noLaunch: flag({
long: "no-launch",
description: text`
Don't launch LM Studio if it's not running. Have no effect if auto start server is disabled.
`,
}),
host: option({
type: optional(string),
long: "host",
@ -48,133 +36,67 @@ export const createClientArgs = {
interface CreateClientArgs {
yes?: boolean;
noLaunch?: boolean;
host?: string;
port?: number;
}
async function maybeTryStartServer(logger: SimpleLogger, startServerOpts: StartServerOpts) {
const { yes } = startServerOpts;
const pref = await getCliPref(logger);
if (pref.get().autoStartServer === undefined && !yes) {
process.stderr.write(text`
${"\n"}${chalk.greenBright.underline("Server Auto Start")}
LM Studio needs to be running in server mode to perform this operation.${"\n\n"}
`);
const { cont } = await inquirer.createPromptModule({
output: process.stderr,
})([
{
type: "confirm",
name: "cont",
message: "Do you want to always start the server if it's not running? (will not ask again)",
default: true,
},
]);
if (cont) {
logger.info("lms will automatically start the server if it's not running.");
} else {
logger.info("lms WILL NOT automatically start the server if it's not running.");
}
if (platform() === "win32") {
logger.info(text`
To change this, edit the config file at
${chalk.greenBright("%USERPROFILE%\\.cache\\lm-studio\\.internal\\cli-pref.json")}
`);
} else {
logger.info(text`
To change this, edit the config file at
${chalk.greenBright("~/.cache/lm-studio/.internal/cli-pref.json")}
`);
}
pref.setWithProducer(draft => {
draft.autoStartServer = cont;
});
if (!cont) {
logger.error(text`
To start the server manually, run the following command:
${chalk.yellow("lms server start ")}${"\n"}
`);
return false;
}
logger.info("Starting the server...");
return await startServer(logger, startServerOpts);
async function isLocalServerAtPortLMStudioServerOrThrow(port: number) {
const response = await fetch(`http://127.0.0.1:${port}/lmstudio-greeting`);
if (response.status !== 200) {
throw new Error("Status is not 200.");
}
if (pref.get().autoStartServer === true) {
logger.info("LM Studio is not running in server mode. Starting the server...");
return await startServer(logger, startServerOpts);
} else if (pref.get().autoStartServer === false) {
logger.error("LM Studio needs to be running in the server mode to perform this operation.");
if (platform() === "win32") {
logger.error(text`
To automatically start the server, edit the config file at
${chalk.yellowBright("%USERPROFILE%\\.cache\\lm-studio\\.internal\\cli-pref.json")}
`);
} else {
logger.error(text`
To automatically start the server, edit the config file at
${chalk.yellowBright("~/.cache/lm-studio/.internal/cli-pref.json")}
`);
}
logger.error(text`
To start the server manually, run the following command:
${chalk.yellow("lms server start ")}${"\n"}
`);
return false;
} else {
// If not true or false, it's undefined
// Meaning --yes is used
logger.info(text`
LM Studio is not running in server mode. Starting the server because
${chalk.yellowBright("--yes")} is set
`);
return await startServer(logger, startServerOpts);
const json = await response.json();
if (json?.lmstudio !== true) {
throw new Error("Not an LM Studio server.");
}
return port;
}
/**
* Creates a logger that will self delete messages at info level.
*/
function createSelfDeletingLogger(logger: SimpleLogger, levelMap: LogLevelMap) {
return new SimpleLogger(
"",
{
debug: levelMap.debug
? (...messages) => {
clearLine(process.stderr, 0);
logger.debug(...messages);
}
: () => {},
info: levelMap.info
? (...messages) => {
clearLine(process.stderr, 0);
logger.info(...messages);
if (!levelMap.debug) {
moveCursor(process.stderr, 0, -1);
}
}
: () => {},
warn: levelMap.warn
? (...messages) => {
clearLine(process.stderr, 0);
logger.warn(...messages);
}
: () => {},
error: levelMap.error
? (...messages) => {
clearLine(process.stderr, 0);
logger.error(...messages);
}
: () => {},
},
{ useLogLevelPrefixes: false },
async function tryFindLocalAPIServer(): Promise<number | null> {
return await Promise.any(apiServerPorts.map(isLocalServerAtPortLMStudioServerOrThrow)).then(
port => port,
() => null,
);
}
function getAppInstallLocationPath() {
return path.join(homedir(), ".cache/lm-studio/.internal/app-install-location.json");
}
export async function wakeUpService(logger: SimpleLogger): Promise<boolean> {
logger.info("Waking up LM Studio service...");
const appInstallLocationPath = getAppInstallLocationPath();
logger.debug(`Resolved appInstallLocationPath: ${appInstallLocationPath}`);
try {
const appInstallLocation = JSON.parse(
await readFile(appInstallLocationPath, "utf-8"),
) as AppInstallLocation;
logger.debug(`Read executable pointer:`, appInstallLocation);
const args: Array<string> = [];
const { path, argv, cwd } = appInstallLocation;
if (argv[1] === ".") {
// We are in development environment
args.push(".");
}
// Add the minimized flag
args.push("--minimized");
// Also add the headless flag
args.push("--run-as-service");
logger.debug(`Spawning process:`, { path, args, cwd });
const child = spawn(path, args, { cwd, detached: true, stdio: "ignore" });
child.unref();
logger.debug(`Process spawned`);
return true;
} catch (e) {
logger.debug(`Failed to launch application`, e);
return false;
}
}
export interface CreateClientOpts {}
export async function createClient(
@ -182,7 +104,6 @@ export async function createClient(
args: CreateClientArgs & LogLevelArgs,
_opts: CreateClientOpts = {},
) {
const { noLaunch, yes } = args;
let { host, port } = args;
if (host === undefined) {
host = "127.0.0.1";
@ -195,37 +116,52 @@ export async function createClient(
);
process.exit(1);
}
if (port === undefined) {
if (host === "127.0.0.1") {
try {
const config = await getServerConfig(logger);
port = config.port;
} catch (e) {
logger.debug("Failed to get last server status", e);
port = 1234;
}
} else {
port = 1234;
if (port === undefined && host === "127.0.0.1") {
// We will now attempt to connect to the local API server.
const localPort = await tryFindLocalAPIServer();
if (localPort !== null) {
const baseUrl = `ws://${host}:${localPort}`;
logger.debug(`Found local API server at ${baseUrl}`);
return new LMStudioClient({ baseUrl, logger, clientIdentifier: "lms-cli" });
}
// At this point, the user wants to access the local LM Studio, but it is not running. We will
// wake up the service and poll the API server until it is up.
await wakeUpService(logger);
// Polling
for (let i = 1; i <= 60; i++) {
await new Promise(resolve => setTimeout(resolve, 1000));
logger.debug(`Polling the API server... (attempt ${i})`);
const localPort = await tryFindLocalAPIServer();
if (localPort !== null) {
const baseUrl = `ws://${host}:${localPort}`;
logger.debug(`Found local API server at ${baseUrl}`);
return new LMStudioClient({ baseUrl, logger, clientIdentifier: "lms-cli" });
}
}
logger.error("");
}
if (port === undefined) {
port = 1234;
}
logger.debug(`Connecting to server at ${host}:${port}`);
if (!(await checkHttpServer(logger, port, host))) {
if (host === "127.0.0.1") {
if (!(await maybeTryStartServer(logger, { port, noLaunch, yes, useReducedLogging: true }))) {
process.exit(1);
}
} else {
logger.error(
text`
The server does not appear to be running at ${host}:${port}. Please make sure the server
is running and accessible at the specified address.
`,
);
process.exit(1);
}
logger.error(
text`
The server does not appear to be running at ${host}:${port}. Please make sure the server
is running and accessible at the specified address.
`,
);
}
const baseUrl = `ws://${host}:${port}`;
logger.debug(`Connecting to server with baseUrl ${port}`);
logger.debug(`Found server at ${port}`);
return new LMStudioClient({
baseUrl,
logger,

View File

@ -1,12 +1,9 @@
import { text, type SimpleLogger } from "@lmstudio/lms-common";
import chalk from "chalk";
import { spawn } from "child_process";
import { command, flag, number, option, optional, subcommands } from "cmd-ts";
import { mkdir, readFile, writeFile } from "fs/promises";
import inquirer from "inquirer";
import os, { platform } from "os";
import os from "os";
import path from "path";
import { getCliPref } from "../cliPref";
import { wakeUpService } from "../createClient";
import { createLogger, logLevelArgs } from "../logLevel";
type HttpServerCtl =
@ -23,12 +20,6 @@ interface HttpServerConfig {
port: number;
}
interface AppInstallLocation {
path: string;
argv: Array<string>;
cwd: string;
}
function getServerCtlPath() {
return path.join(os.homedir(), ".cache/lm-studio/.internal/http-server-ctl.json");
}
@ -37,10 +28,6 @@ function getServerConfigPath() {
return path.join(os.homedir(), ".cache/lm-studio/.internal/http-server-config.json");
}
function getAppInstallLocationPath() {
return path.join(os.homedir(), ".cache/lm-studio/.internal/app-install-location.json");
}
/**
* Write a control object to the server control file.
*/
@ -54,41 +41,6 @@ async function writeToServerCtl(logger: SimpleLogger, controlObject: HttpServerC
await writeFile(serverCtlPath, JSON.stringify(controlObject));
}
/**
* Launches the LM Studio application.
*/
async function launchApplication(logger: SimpleLogger): Promise<boolean> {
logger.debug("Launching LM Studio application...");
const appInstallLocationPath = getAppInstallLocationPath();
logger.debug(`Resolved appInstallLocationPath: ${appInstallLocationPath}`);
try {
const appInstallLocation = JSON.parse(
await readFile(appInstallLocationPath, "utf-8"),
) as AppInstallLocation;
logger.debug(`Read executable pointer:`, appInstallLocation);
const args: Array<string> = [];
const { path, argv, cwd } = appInstallLocation;
if (argv[1] === ".") {
// We are in development environment
args.push(".");
}
// Add the minimized flag
args.push("--minimized");
logger.debug(`Spawning process:`, { path, args, cwd });
const child = spawn(path, args, { cwd, detached: true, stdio: "ignore" });
child.unref();
logger.debug(`Process spawned`);
return true;
} catch (e) {
logger.debug(`Failed to launch application`, e);
return false;
}
}
/**
* Waits for the server control file to be cleared.
*/
@ -169,13 +121,11 @@ export async function getServerConfig(logger: SimpleLogger) {
export interface StartServerOpts {
port?: number;
cors?: boolean;
noLaunch?: boolean;
yes?: boolean;
useReducedLogging?: boolean;
}
export async function startServer(
logger: SimpleLogger,
{ port, cors, noLaunch, yes, useReducedLogging }: StartServerOpts = {},
{ port, cors, useReducedLogging }: StartServerOpts = {},
): Promise<boolean> {
if (port === undefined) {
try {
@ -205,70 +155,9 @@ export async function startServer(
`Requested the server to be started on port ${port}.`,
);
} else {
if (platform() === "linux") {
// Sorry, linux users :(
logger.errorText`
LM Studio is not running. Please start LM Studio and try again.
`;
return false;
}
if (noLaunch) {
logger.errorText`
LM Studio is not running. Since --no-launch is provided, LM Studio will not be launched.
`;
logger.errorText`
The server is not started. Please make sure LM Studio is running and try again.
`;
return false;
}
const cliPref = await getCliPref(logger);
if (!cliPref.get().autoLaunchMinimizedWarned) {
if (yes) {
logger.warn(`Auto-launch warning suppressed by ${chalk.yellowBright("--yes")} flag`);
} else {
process.stderr.write(text`
${"\n"}${chalk.bold.underline.greenBright("About to Launch LM Studio")}
By default, if LM Studio is not running, attempting to start the server will launch LM
Studio in minimized mode and then start the server.
${chalk.grey(text`
If you don't want LM Studio to launch automatically, please use the ${chalk.yellow(
"--no-launch",
)} flag.
`)}
${chalk.gray("This confirmation will not be shown again.")}${"\n\n"}
`);
await inquirer.createPromptModule({
output: process.stderr,
})([
{
type: "input",
name: "confirmation",
message: `Type "${chalk.greenBright("OK")}" to acknowledge:`,
validate: value => {
if (value.toLowerCase() === "ok") {
return true;
}
return 'You need to type "OK" to continue.';
},
},
]);
cliPref.setWithProducer(pref => {
pref.autoLaunchMinimizedWarned = true;
});
}
}
logger.infoText`
Launching LM Studio minimized... (Disable auto-launching via the
${chalk.yellow("--no-launch")} flag.)
`;
const launched = await launchApplication(logger);
const launched = await wakeUpService(logger);
if (launched) {
logger.debug(`LM Studio launched`);
logger.debug(`LM Studio service is running.`);
// At this point, LM Studio is launching. Once it is ready, it will consume the control file
// and start the server. Let's wait for that to happen.
if (await waitForCtlFileClear(logger, 1000, 10)) {
@ -282,8 +171,8 @@ export async function startServer(
}
} else {
logger.errorText`
Failed to launch LM Studio. Please make sure it is installed and have run it at least
once.
Failed to start LM Studio service. Please make sure it is installed and have run it at
least once.
`;
return false;
}
@ -318,19 +207,6 @@ const start = command({
long: "port",
short: "p",
}),
noLaunch: flag({
description: text`
Do not launch LM Studio if it is not running. If LM Studio is not running, the server will
not be started.
`,
long: "no-launch",
}),
yes: flag({
description: text`
Suppress all confirmations and warnings. Useful for scripting.
`,
long: "yes",
}),
cors: flag({
description: text`
Enable CORS on the server. Allows any website you visit to access the server. This is
@ -341,9 +217,9 @@ const start = command({
...logLevelArgs,
},
handler: async args => {
const { port, noLaunch, cors } = args;
const { port, cors } = args;
const logger = createLogger(args);
if (!(await startServer(logger, { port, noLaunch, cors }))) {
if (!(await startServer(logger, { port, cors }))) {
process.exit(1);
}
},

View File

@ -2,8 +2,7 @@ import chalk from "chalk";
import { command, flag } from "cmd-ts";
function getVersion() {
// We are not using the package version, because we want the version to be the same as LM Studio.
return "0.2.24";
return "<LMS-CLI-CURRENT-VERSION>";
}
export function printVersion() {