Auto start for all

This commit is contained in:
Ryan Huang
2024-04-29 12:08:57 -04:00
parent c428b032ec
commit 124d406eaa
7 changed files with 261 additions and 128 deletions

View File

@ -8,11 +8,13 @@ export async function getCliPref(logger?: SimpleLogger) {
const cliPrefSchema = z.object({
autoLaunchMinimizedWarned: z.boolean(),
lastLoadedModels: z.array(z.string()).optional(),
autoStartServer: z.boolean().optional(),
});
type CliPref = z.infer<typeof cliPrefSchema>;
const defaultCliPref: CliPref = {
autoLaunchMinimizedWarned: false,
lastLoadedModels: [],
autoStartServer: undefined,
};
const cliPref = new SimpleFileData(
path.join(os.homedir(), ".cache/lm-studio/.internal/cli-pref.json"),

View File

@ -1,9 +1,125 @@
import { text, type SimpleLogger } from "@lmstudio/lms-common";
import { LMStudioClient } from "@lmstudio/sdk";
import chalk from "chalk";
import { checkHttpServer, getServerLastStatus } from "./subcommands/server";
import { flag } from "cmd-ts";
import inquirer from "inquirer";
import { platform } from "os";
import { getCliPref } from "./cliPref";
import {
checkHttpServer,
getServerLastStatus,
startServer,
type StartServerOpts,
} from "./subcommands/server";
export async function createClient(logger: SimpleLogger) {
export const createClientArgs = {
yes: flag({
long: "yes",
short: "y",
description: text`
Suppress all confirmations and warnings. Useful for scripting.
`,
}),
noLaunch: flag({
long: "no-launch",
description: text`
Don't launch LM Studio if it's not running. Have no effect if auto start server is disabled.
`,
}),
};
interface CreateClientArgs {
yes?: boolean;
noLaunch?: boolean;
}
async function maybeTryStartServer(logger: SimpleLogger, startServerOpts: StartServerOpts) {
const { yes } = startServerOpts;
const pref = await getCliPref(logger);
if (pref.get().autoStartServer === undefined && !yes) {
logger.warnWithoutPrefix(text`
${"\n"}${chalk.greenBright.underline("Server Auto Start")}
LM Studio needs to be running in server mode to perform this operation.${"\n"}
`);
const { cont } = await inquirer.prompt([
{
type: "confirm",
name: "cont",
message: "Do you want to always start the server if it's not running? (will not ask again)",
default: true,
},
]);
if (cont) {
logger.info("lms will automatically start the server if it's not running.");
} else {
logger.info("lms WILL NOT automatically start the server if it's not running.");
}
if (platform() === "win32") {
logger.info(text`
To change this, edit the config file at
${chalk.greenBright("%USERPROFILE%\\.cache\\lm-studio\\.internal\\cli-pref.json")}
`);
} else {
logger.info(text`
To change this, edit the config file at
${chalk.greenBright("~/.cache/lm-studio/.internal/cli-pref.json")}
`);
}
pref.setWithImmer(draft => {
draft.autoStartServer = cont;
});
if (!cont) {
logger.error(text`
To start the server manually, run the following command:
${chalk.yellow("lms server start ")}${"\n"}
`);
return false;
}
logger.info("Starting the server...");
return await startServer(logger, startServerOpts);
}
if (pref.get().autoStartServer === true) {
logger.info("LM Studio is not running in server mode. Starting the server...");
return await startServer(logger, startServerOpts);
} else if (pref.get().autoStartServer === false) {
logger.error("LM Studio needs to be running in the server mode to perform this operation.");
if (platform() === "win32") {
logger.error(text`
To automatically start the server, edit the config file at
${chalk.yellowBright("%USERPROFILE%\\.cache\\lm-studio\\.internal\\cli-pref.json")}
`);
} else {
logger.error(text`
To automatically start the server, edit the config file at
${chalk.yellowBright("~/.cache/lm-studio/.internal/cli-pref.json")}
`);
}
logger.error(text`
To start the server manually, run the following command:
${chalk.yellow("lms server start ")}${"\n"}
`);
return false;
} else {
// If not true or false, it's undefined
// Meaning --yes is used
logger.info(text`
LM Studio is not running in server mode. Starting the server because
${chalk.yellowBright("--yes")} is set
`);
return await startServer(logger, startServerOpts);
}
}
export interface CreateClientOpts {}
export async function createClient(
logger: SimpleLogger,
{ noLaunch, yes }: CreateClientArgs,
_opts: CreateClientOpts = {},
) {
let port: number;
try {
const lastStatus = await getServerLastStatus(logger);
@ -13,16 +129,9 @@ export async function createClient(logger: SimpleLogger) {
port = 1234;
}
if (!(await checkHttpServer(logger, port))) {
logger.error(
text`
LM Studio needs to be running in server mode to perform this operation.
To start the server, run the following command:
${chalk.yellow("lms server start ")}${"\n"}
`,
);
process.exit(1);
if (!(await maybeTryStartServer(logger, { port, noLaunch, yes }))) {
process.exit(1);
}
}
const baseUrl = `ws://127.0.0.1:${port}`;
logger.debug(`Connecting to server with baseUrl ${port}`);

View File

@ -4,7 +4,7 @@ import chalk from "chalk";
import { command, flag } from "cmd-ts";
import columnify from "columnify";
import { architectureInfoLookup } from "../architectureStylizations";
import { createClient } from "../createClient";
import { createClient, createClientArgs } from "../createClient";
import { formatSizeBytes1000, formatSizeBytesWithColor1000 } from "../formatSizeBytes1000";
import { createLogger, logLevelArgs } from "../logLevel";
@ -189,6 +189,7 @@ export const ls = command({
description: "List all downloaded models",
args: {
...logLevelArgs,
...createClientArgs,
llm: flag({
long: "llm",
description: "Show only LLM models",
@ -208,7 +209,7 @@ export const ls = command({
},
handler: async args => {
const logger = createLogger(args);
const client = await createClient(logger);
const client = await createClient(logger, args);
const { llm, embedding, json, detailed } = args;
@ -299,6 +300,7 @@ export const ps = command({
description: "List all loaded models",
args: {
...logLevelArgs,
...createClientArgs,
json: flag({
long: "json",
description: "Outputs in JSON format to stdout",
@ -306,7 +308,7 @@ export const ps = command({
},
handler: async args => {
const logger = createLogger(args);
const client = await createClient(logger);
const client = await createClient(logger, args);
const { json } = args;

View File

@ -7,7 +7,7 @@ import fuzzy from "fuzzy";
import inquirer from "inquirer";
import inquirerPrompt from "inquirer-autocomplete-prompt";
import { getCliPref } from "../cliPref";
import { createClient } from "../createClient";
import { createClient, createClientArgs } from "../createClient";
import { formatElapsedTime } from "../formatElapsedTime";
import { formatSizeBytes1000 } from "../formatSizeBytes1000";
import { createLogger, logLevelArgs } from "../logLevel";
@ -42,6 +42,7 @@ export const load = command({
description: "Load a model",
args: {
...logLevelArgs,
...createClientArgs,
path: positional({
type: optional(string),
description: "The path of the model to load. If not provided, ",
@ -62,8 +63,9 @@ export const load = command({
long: "yes",
short: "y",
description: text`
Answer yes to all prompts. If there are multiple models matching the path, the first one
will be loaded. Fails if the path provided does not match any model.
Suppress all confirmations and warnings. Useful for scripting. If there are multiple
models matching the path, the first one will be loaded. Fails if the path provided does not
match any model.
`,
}),
exact: flag({
@ -87,7 +89,7 @@ export const load = command({
const { gpu, yes, exact, identifier } = args;
let { path } = args;
const logger = createLogger(args);
const client = await createClient(logger);
const client = await createClient(logger, args);
const cliPref = await getCliPref(logger);
const lastLoadedModels = cliPref.get().lastLoadedModels ?? [];
@ -104,19 +106,6 @@ export const load = command({
return aIndex < bIndex ? -1 : aIndex > bIndex ? 1 : 0;
});
if (exact && yes) {
logger.errorWithoutPrefix(
makeTitledPrettyError(
"Invalid usage",
text`
The ${chalk.yellowBright("--exact")} and ${chalk.yellowBright("--yes")} flags cannot be
used together.
`,
).message,
);
process.exit(1);
}
if (exact) {
const model = models.find(model => model.path === path);
if (path === undefined) {
@ -188,6 +177,11 @@ export const load = command({
);
process.exit(1);
}
if (initialFilteredModels.length > 1) {
logger.warnText`
${initialFilteredModels.length} models match the provided path. Loading the first one.
`;
}
model = models[initialFilteredModels[0].index];
} else {
console.info();

View File

@ -162,78 +162,57 @@ export async function getServerLastStatus(logger: SimpleLogger) {
return lastStatus;
}
const start = command({
name: "start",
description: "Starts the local server",
args: {
port: option({
type: optional(number),
description: text`
Port to run the server on. If not provided, the server will run on the same port as the last
time it was started.
`,
long: "port",
short: "p",
}),
noLaunch: flag({
description: text`
Do not launch LM Studio if it is not running. If LM Studio is not running, the server will
not be started.
`,
long: "no-launch",
}),
cors: flag({
description: text`
Enable CORS on the server. Allows any website you visit to access the server. This is
required if you are developing a web application.
`,
long: "cors",
}),
...logLevelArgs,
},
handler: async args => {
let { port } = args;
const { noLaunch, cors } = args;
const logger = createLogger(args);
if (port === undefined) {
try {
port = (await getServerLastStatus(logger)).port;
logger.debug(`Read from last status: port=${port}`);
} catch (e) {
logger.debug(`Failed to read last status`, e);
port = 1234;
logger.debug(`Using default port ${port}`);
}
} else {
logger.debug(`Using provided port ${port}`);
export interface StartServerOpts {
port?: number;
cors?: boolean;
noLaunch?: boolean;
yes?: boolean;
}
export async function startServer(
logger: SimpleLogger,
{ port, cors, noLaunch, yes }: StartServerOpts = {},
): Promise<boolean> {
if (port === undefined) {
try {
port = (await getServerLastStatus(logger)).port;
logger.debug(`Read from last status: port=${port}`);
} catch (e) {
logger.debug(`Failed to read last status`, e);
port = 1234;
logger.debug(`Using default port ${port}`);
}
if (cors) {
logger.warnText`
CORS is enabled. This means any website you visit can use the LM Studio server.
} else {
logger.debug(`Using provided port ${port}`);
}
if (cors) {
logger.warnText`
CORS is enabled. This means any website you visit can use the LM Studio server.
`;
}
logger.info(`Attempting to start the server on port ${port}...`);
await writeToServerCtl(logger, { type: "start", port, cors });
if (await waitForCtlFileClear(logger, 100, 10)) {
logger.info(`Requested the server to be started on port ${port}.`);
} else {
if (noLaunch) {
logger.errorText`
LM Studio is not running. Since --no-launch is provided, LM Studio will not be launched.
`;
logger.errorText`
The server is not started. Please make sure LM Studio is running and try again.
`;
return false;
}
logger.info(`Attempting to start the server on port ${port}...`);
await writeToServerCtl(logger, { type: "start", port, cors });
if (await waitForCtlFileClear(logger, 100, 10)) {
logger.info(`Requested the server to be started on port ${port}.`);
} else {
if (noLaunch) {
logger.errorText`
LM Studio is not running. Since --no-launch is provided, LM Studio will not be launched.
`;
logger.errorText`
The server is not started. Please make sure LM Studio is running and try again.
`;
process.exit(1);
}
const cliPref = await getCliPref(logger);
if (!cliPref.get().autoLaunchMinimizedWarned) {
const cliPref = await getCliPref(logger);
if (!cliPref.get().autoLaunchMinimizedWarned) {
if (yes) {
logger.warn(`Auto-launch warning suppressed by ${chalk.yellowBright("--yes")} flag`);
} else {
logger.warnWithoutPrefix(text`
${"\n"}${chalk.bold.underline.greenBright("About to Launch LM Studio")}
By default, if LM Studio is not running, using the command ${chalk.yellow(
"lms server start",
)} will launch LM Studio in minimized mode and then start the server.
By default, if LM Studio is not running, attempting to start the server will launch LM
Studio in minimized mode and then start the server.
${chalk.grey(text`
If you don't want LM Studio to launch automatically, please use the ${chalk.yellow(
@ -260,37 +239,82 @@ const start = command({
pref.autoLaunchMinimizedWarned = true;
});
}
}
logger.warnText`
Launching LM Studio minimized... (If you don't want LM Studio to launch automatically,
please use the ${chalk.yellow("--no-launch")} flag.)
`;
logger.warnText`
Launching LM Studio minimized... (If you don't want LM Studio to launch automatically,
please use the ${chalk.yellow("--no-launch")} flag.)
`;
const launched = await launchApplication(logger);
if (launched) {
logger.debug(`LM Studio launched`);
// At this point, LM Studio is launching. Once it is ready, it will consume the control file
// and start the server. Let's wait for that to happen.
if (await waitForCtlFileClear(logger, 1000, 10)) {
logger.info(`Requested the server to be started on port ${port}.`);
} else {
logger.error(`Failed to start the server on port ${port}`);
process.exit(1);
}
const launched = await launchApplication(logger);
if (launched) {
logger.debug(`LM Studio launched`);
// At this point, LM Studio is launching. Once it is ready, it will consume the control file
// and start the server. Let's wait for that to happen.
if (await waitForCtlFileClear(logger, 1000, 10)) {
logger.info(`Requested the server to be started on port ${port}.`);
} else {
logger.errorText`
Failed to launch LM Studio. Please make sure it is installed and have run it at least
once.
`;
logger.error(`Failed to start the server on port ${port}`);
process.exit(1);
}
}
logger.info("Verifying the server is running...");
if (await checkHttpServerWithRetries(logger, port, 5)) {
logger.info(`Verification succeeded. The server is running on port ${port}.`);
} else {
logger.error("Failed to verify the server is running. Please try to use another port.");
logger.errorText`
Failed to launch LM Studio. Please make sure it is installed and have run it at least
once.
`;
return false;
}
}
logger.info("Verifying the server is running...");
if (await checkHttpServerWithRetries(logger, port, 5)) {
logger.info(`Verification succeeded. The server is running on port ${port}.`);
return true;
} else {
logger.error("Failed to verify the server is running. Please try to use another port.");
return false;
}
}
const start = command({
name: "start",
description: "Starts the local server",
args: {
port: option({
type: optional(number),
description: text`
Port to run the server on. If not provided, the server will run on the same port as the last
time it was started.
`,
long: "port",
short: "p",
}),
noLaunch: flag({
description: text`
Do not launch LM Studio if it is not running. If LM Studio is not running, the server will
not be started.
`,
long: "no-launch",
}),
yes: flag({
description: text`
Suppress all confirmations and warnings. Useful for scripting.
`,
long: "yes",
}),
cors: flag({
description: text`
Enable CORS on the server. Allows any website you visit to access the server. This is
required if you are developing a web application.
`,
long: "cors",
}),
...logLevelArgs,
},
handler: async args => {
const { port, noLaunch, cors } = args;
const logger = createLogger(args);
if (!(await startServer(logger, { port, noLaunch, cors }))) {
process.exit(1);
}
},

View File

@ -2,7 +2,7 @@ import { text } from "@lmstudio/lms-common";
import boxen from "boxen";
import chalk from "chalk";
import { command } from "cmd-ts";
import { createClient } from "../createClient";
import { createClient, createClientArgs } from "../createClient";
import { formatSizeBytesWithColor1000 } from "../formatSizeBytes1000";
import { createLogger, logLevelArgs } from "../logLevel";
import { checkHttpServer, getServerLastStatus } from "./server";
@ -12,6 +12,7 @@ export const status = command({
description: "Prints the status of LM Studio",
args: {
...logLevelArgs,
...createClientArgs,
},
async handler(args) {
const logger = createLogger(args);
@ -32,7 +33,7 @@ export const status = command({
`;
content += "\n\n";
const client = await createClient(logger);
const client = await createClient(logger, args);
const loadedModels = await client.llm.listLoaded();
const downloadedModels = await client.system.listDownloadedModels();
content += chalk.cyanBright("Loaded Models");

View File

@ -4,7 +4,7 @@ import { boolean, command, flag, optional, positional, string } from "cmd-ts";
import fuzzy from "fuzzy";
import inquirer from "inquirer";
import inquirerPrompt from "inquirer-autocomplete-prompt";
import { createClient } from "../createClient";
import { createClient, createClientArgs } from "../createClient";
import { createLogger, logLevelArgs } from "../logLevel";
import terminalSize from "../terminalSize";
@ -12,6 +12,8 @@ export const unload = command({
name: "unload",
description: "Unload a model",
args: {
...logLevelArgs,
...createClientArgs,
identifier: positional({
type: optional(string),
description: text`
@ -26,12 +28,11 @@ export const unload = command({
long: "all",
short: "a",
}),
...logLevelArgs,
},
handler: async args => {
const { identifier, all } = args;
const logger = createLogger(args);
const client = await createClient(logger);
const client = await createClient(logger, args);
if (all && identifier !== undefined) {
logger.errorWithoutPrefix(