Auto start for all

This commit is contained in:
Ryan Huang
2024-04-29 12:08:57 -04:00
parent c428b032ec
commit 124d406eaa
7 changed files with 261 additions and 128 deletions

View File

@ -8,11 +8,13 @@ export async function getCliPref(logger?: SimpleLogger) {
const cliPrefSchema = z.object({ const cliPrefSchema = z.object({
autoLaunchMinimizedWarned: z.boolean(), autoLaunchMinimizedWarned: z.boolean(),
lastLoadedModels: z.array(z.string()).optional(), lastLoadedModels: z.array(z.string()).optional(),
autoStartServer: z.boolean().optional(),
}); });
type CliPref = z.infer<typeof cliPrefSchema>; type CliPref = z.infer<typeof cliPrefSchema>;
const defaultCliPref: CliPref = { const defaultCliPref: CliPref = {
autoLaunchMinimizedWarned: false, autoLaunchMinimizedWarned: false,
lastLoadedModels: [], lastLoadedModels: [],
autoStartServer: undefined,
}; };
const cliPref = new SimpleFileData( const cliPref = new SimpleFileData(
path.join(os.homedir(), ".cache/lm-studio/.internal/cli-pref.json"), path.join(os.homedir(), ".cache/lm-studio/.internal/cli-pref.json"),

View File

@ -1,9 +1,125 @@
import { text, type SimpleLogger } from "@lmstudio/lms-common"; import { text, type SimpleLogger } from "@lmstudio/lms-common";
import { LMStudioClient } from "@lmstudio/sdk"; import { LMStudioClient } from "@lmstudio/sdk";
import chalk from "chalk"; import chalk from "chalk";
import { checkHttpServer, getServerLastStatus } from "./subcommands/server"; import { flag } from "cmd-ts";
import inquirer from "inquirer";
import { platform } from "os";
import { getCliPref } from "./cliPref";
import {
checkHttpServer,
getServerLastStatus,
startServer,
type StartServerOpts,
} from "./subcommands/server";
export async function createClient(logger: SimpleLogger) { export const createClientArgs = {
yes: flag({
long: "yes",
short: "y",
description: text`
Suppress all confirmations and warnings. Useful for scripting.
`,
}),
noLaunch: flag({
long: "no-launch",
description: text`
Don't launch LM Studio if it's not running. Have no effect if auto start server is disabled.
`,
}),
};
interface CreateClientArgs {
yes?: boolean;
noLaunch?: boolean;
}
async function maybeTryStartServer(logger: SimpleLogger, startServerOpts: StartServerOpts) {
const { yes } = startServerOpts;
const pref = await getCliPref(logger);
if (pref.get().autoStartServer === undefined && !yes) {
logger.warnWithoutPrefix(text`
${"\n"}${chalk.greenBright.underline("Server Auto Start")}
LM Studio needs to be running in server mode to perform this operation.${"\n"}
`);
const { cont } = await inquirer.prompt([
{
type: "confirm",
name: "cont",
message: "Do you want to always start the server if it's not running? (will not ask again)",
default: true,
},
]);
if (cont) {
logger.info("lms will automatically start the server if it's not running.");
} else {
logger.info("lms WILL NOT automatically start the server if it's not running.");
}
if (platform() === "win32") {
logger.info(text`
To change this, edit the config file at
${chalk.greenBright("%USERPROFILE%\\.cache\\lm-studio\\.internal\\cli-pref.json")}
`);
} else {
logger.info(text`
To change this, edit the config file at
${chalk.greenBright("~/.cache/lm-studio/.internal/cli-pref.json")}
`);
}
pref.setWithImmer(draft => {
draft.autoStartServer = cont;
});
if (!cont) {
logger.error(text`
To start the server manually, run the following command:
${chalk.yellow("lms server start ")}${"\n"}
`);
return false;
}
logger.info("Starting the server...");
return await startServer(logger, startServerOpts);
}
if (pref.get().autoStartServer === true) {
logger.info("LM Studio is not running in server mode. Starting the server...");
return await startServer(logger, startServerOpts);
} else if (pref.get().autoStartServer === false) {
logger.error("LM Studio needs to be running in the server mode to perform this operation.");
if (platform() === "win32") {
logger.error(text`
To automatically start the server, edit the config file at
${chalk.yellowBright("%USERPROFILE%\\.cache\\lm-studio\\.internal\\cli-pref.json")}
`);
} else {
logger.error(text`
To automatically start the server, edit the config file at
${chalk.yellowBright("~/.cache/lm-studio/.internal/cli-pref.json")}
`);
}
logger.error(text`
To start the server manually, run the following command:
${chalk.yellow("lms server start ")}${"\n"}
`);
return false;
} else {
// If not true or false, it's undefined
// Meaning --yes is used
logger.info(text`
LM Studio is not running in server mode. Starting the server because
${chalk.yellowBright("--yes")} is set
`);
return await startServer(logger, startServerOpts);
}
}
export interface CreateClientOpts {}
export async function createClient(
logger: SimpleLogger,
{ noLaunch, yes }: CreateClientArgs,
_opts: CreateClientOpts = {},
) {
let port: number; let port: number;
try { try {
const lastStatus = await getServerLastStatus(logger); const lastStatus = await getServerLastStatus(logger);
@ -13,16 +129,9 @@ export async function createClient(logger: SimpleLogger) {
port = 1234; port = 1234;
} }
if (!(await checkHttpServer(logger, port))) { if (!(await checkHttpServer(logger, port))) {
logger.error( if (!(await maybeTryStartServer(logger, { port, noLaunch, yes }))) {
text` process.exit(1);
LM Studio needs to be running in server mode to perform this operation. }
To start the server, run the following command:
${chalk.yellow("lms server start ")}${"\n"}
`,
);
process.exit(1);
} }
const baseUrl = `ws://127.0.0.1:${port}`; const baseUrl = `ws://127.0.0.1:${port}`;
logger.debug(`Connecting to server with baseUrl ${port}`); logger.debug(`Connecting to server with baseUrl ${port}`);

View File

@ -4,7 +4,7 @@ import chalk from "chalk";
import { command, flag } from "cmd-ts"; import { command, flag } from "cmd-ts";
import columnify from "columnify"; import columnify from "columnify";
import { architectureInfoLookup } from "../architectureStylizations"; import { architectureInfoLookup } from "../architectureStylizations";
import { createClient } from "../createClient"; import { createClient, createClientArgs } from "../createClient";
import { formatSizeBytes1000, formatSizeBytesWithColor1000 } from "../formatSizeBytes1000"; import { formatSizeBytes1000, formatSizeBytesWithColor1000 } from "../formatSizeBytes1000";
import { createLogger, logLevelArgs } from "../logLevel"; import { createLogger, logLevelArgs } from "../logLevel";
@ -189,6 +189,7 @@ export const ls = command({
description: "List all downloaded models", description: "List all downloaded models",
args: { args: {
...logLevelArgs, ...logLevelArgs,
...createClientArgs,
llm: flag({ llm: flag({
long: "llm", long: "llm",
description: "Show only LLM models", description: "Show only LLM models",
@ -208,7 +209,7 @@ export const ls = command({
}, },
handler: async args => { handler: async args => {
const logger = createLogger(args); const logger = createLogger(args);
const client = await createClient(logger); const client = await createClient(logger, args);
const { llm, embedding, json, detailed } = args; const { llm, embedding, json, detailed } = args;
@ -299,6 +300,7 @@ export const ps = command({
description: "List all loaded models", description: "List all loaded models",
args: { args: {
...logLevelArgs, ...logLevelArgs,
...createClientArgs,
json: flag({ json: flag({
long: "json", long: "json",
description: "Outputs in JSON format to stdout", description: "Outputs in JSON format to stdout",
@ -306,7 +308,7 @@ export const ps = command({
}, },
handler: async args => { handler: async args => {
const logger = createLogger(args); const logger = createLogger(args);
const client = await createClient(logger); const client = await createClient(logger, args);
const { json } = args; const { json } = args;

View File

@ -7,7 +7,7 @@ import fuzzy from "fuzzy";
import inquirer from "inquirer"; import inquirer from "inquirer";
import inquirerPrompt from "inquirer-autocomplete-prompt"; import inquirerPrompt from "inquirer-autocomplete-prompt";
import { getCliPref } from "../cliPref"; import { getCliPref } from "../cliPref";
import { createClient } from "../createClient"; import { createClient, createClientArgs } from "../createClient";
import { formatElapsedTime } from "../formatElapsedTime"; import { formatElapsedTime } from "../formatElapsedTime";
import { formatSizeBytes1000 } from "../formatSizeBytes1000"; import { formatSizeBytes1000 } from "../formatSizeBytes1000";
import { createLogger, logLevelArgs } from "../logLevel"; import { createLogger, logLevelArgs } from "../logLevel";
@ -42,6 +42,7 @@ export const load = command({
description: "Load a model", description: "Load a model",
args: { args: {
...logLevelArgs, ...logLevelArgs,
...createClientArgs,
path: positional({ path: positional({
type: optional(string), type: optional(string),
description: "The path of the model to load. If not provided, ", description: "The path of the model to load. If not provided, ",
@ -62,8 +63,9 @@ export const load = command({
long: "yes", long: "yes",
short: "y", short: "y",
description: text` description: text`
Answer yes to all prompts. If there are multiple models matching the path, the first one Suppress all confirmations and warnings. Useful for scripting. If there are multiple
will be loaded. Fails if the path provided does not match any model. models matching the path, the first one will be loaded. Fails if the path provided does not
match any model.
`, `,
}), }),
exact: flag({ exact: flag({
@ -87,7 +89,7 @@ export const load = command({
const { gpu, yes, exact, identifier } = args; const { gpu, yes, exact, identifier } = args;
let { path } = args; let { path } = args;
const logger = createLogger(args); const logger = createLogger(args);
const client = await createClient(logger); const client = await createClient(logger, args);
const cliPref = await getCliPref(logger); const cliPref = await getCliPref(logger);
const lastLoadedModels = cliPref.get().lastLoadedModels ?? []; const lastLoadedModels = cliPref.get().lastLoadedModels ?? [];
@ -104,19 +106,6 @@ export const load = command({
return aIndex < bIndex ? -1 : aIndex > bIndex ? 1 : 0; return aIndex < bIndex ? -1 : aIndex > bIndex ? 1 : 0;
}); });
if (exact && yes) {
logger.errorWithoutPrefix(
makeTitledPrettyError(
"Invalid usage",
text`
The ${chalk.yellowBright("--exact")} and ${chalk.yellowBright("--yes")} flags cannot be
used together.
`,
).message,
);
process.exit(1);
}
if (exact) { if (exact) {
const model = models.find(model => model.path === path); const model = models.find(model => model.path === path);
if (path === undefined) { if (path === undefined) {
@ -188,6 +177,11 @@ export const load = command({
); );
process.exit(1); process.exit(1);
} }
if (initialFilteredModels.length > 1) {
logger.warnText`
${initialFilteredModels.length} models match the provided path. Loading the first one.
`;
}
model = models[initialFilteredModels[0].index]; model = models[initialFilteredModels[0].index];
} else { } else {
console.info(); console.info();

View File

@ -162,78 +162,57 @@ export async function getServerLastStatus(logger: SimpleLogger) {
return lastStatus; return lastStatus;
} }
const start = command({ export interface StartServerOpts {
name: "start", port?: number;
description: "Starts the local server", cors?: boolean;
args: { noLaunch?: boolean;
port: option({ yes?: boolean;
type: optional(number), }
description: text` export async function startServer(
Port to run the server on. If not provided, the server will run on the same port as the last logger: SimpleLogger,
time it was started. { port, cors, noLaunch, yes }: StartServerOpts = {},
`, ): Promise<boolean> {
long: "port", if (port === undefined) {
short: "p", try {
}), port = (await getServerLastStatus(logger)).port;
noLaunch: flag({ logger.debug(`Read from last status: port=${port}`);
description: text` } catch (e) {
Do not launch LM Studio if it is not running. If LM Studio is not running, the server will logger.debug(`Failed to read last status`, e);
not be started. port = 1234;
`, logger.debug(`Using default port ${port}`);
long: "no-launch",
}),
cors: flag({
description: text`
Enable CORS on the server. Allows any website you visit to access the server. This is
required if you are developing a web application.
`,
long: "cors",
}),
...logLevelArgs,
},
handler: async args => {
let { port } = args;
const { noLaunch, cors } = args;
const logger = createLogger(args);
if (port === undefined) {
try {
port = (await getServerLastStatus(logger)).port;
logger.debug(`Read from last status: port=${port}`);
} catch (e) {
logger.debug(`Failed to read last status`, e);
port = 1234;
logger.debug(`Using default port ${port}`);
}
} else {
logger.debug(`Using provided port ${port}`);
} }
if (cors) { } else {
logger.warnText` logger.debug(`Using provided port ${port}`);
CORS is enabled. This means any website you visit can use the LM Studio server. }
if (cors) {
logger.warnText`
CORS is enabled. This means any website you visit can use the LM Studio server.
`;
}
logger.info(`Attempting to start the server on port ${port}...`);
await writeToServerCtl(logger, { type: "start", port, cors });
if (await waitForCtlFileClear(logger, 100, 10)) {
logger.info(`Requested the server to be started on port ${port}.`);
} else {
if (noLaunch) {
logger.errorText`
LM Studio is not running. Since --no-launch is provided, LM Studio will not be launched.
`; `;
logger.errorText`
The server is not started. Please make sure LM Studio is running and try again.
`;
return false;
} }
logger.info(`Attempting to start the server on port ${port}...`); const cliPref = await getCliPref(logger);
await writeToServerCtl(logger, { type: "start", port, cors }); if (!cliPref.get().autoLaunchMinimizedWarned) {
if (await waitForCtlFileClear(logger, 100, 10)) { if (yes) {
logger.info(`Requested the server to be started on port ${port}.`); logger.warn(`Auto-launch warning suppressed by ${chalk.yellowBright("--yes")} flag`);
} else { } else {
if (noLaunch) {
logger.errorText`
LM Studio is not running. Since --no-launch is provided, LM Studio will not be launched.
`;
logger.errorText`
The server is not started. Please make sure LM Studio is running and try again.
`;
process.exit(1);
}
const cliPref = await getCliPref(logger);
if (!cliPref.get().autoLaunchMinimizedWarned) {
logger.warnWithoutPrefix(text` logger.warnWithoutPrefix(text`
${"\n"}${chalk.bold.underline.greenBright("About to Launch LM Studio")} ${"\n"}${chalk.bold.underline.greenBright("About to Launch LM Studio")}
By default, if LM Studio is not running, using the command ${chalk.yellow( By default, if LM Studio is not running, attempting to start the server will launch LM
"lms server start", Studio in minimized mode and then start the server.
)} will launch LM Studio in minimized mode and then start the server.
${chalk.grey(text` ${chalk.grey(text`
If you don't want LM Studio to launch automatically, please use the ${chalk.yellow( If you don't want LM Studio to launch automatically, please use the ${chalk.yellow(
@ -260,37 +239,82 @@ const start = command({
pref.autoLaunchMinimizedWarned = true; pref.autoLaunchMinimizedWarned = true;
}); });
} }
}
logger.warnText` logger.warnText`
Launching LM Studio minimized... (If you don't want LM Studio to launch automatically, Launching LM Studio minimized... (If you don't want LM Studio to launch automatically,
please use the ${chalk.yellow("--no-launch")} flag.) please use the ${chalk.yellow("--no-launch")} flag.)
`; `;
const launched = await launchApplication(logger); const launched = await launchApplication(logger);
if (launched) { if (launched) {
logger.debug(`LM Studio launched`); logger.debug(`LM Studio launched`);
// At this point, LM Studio is launching. Once it is ready, it will consume the control file // At this point, LM Studio is launching. Once it is ready, it will consume the control file
// and start the server. Let's wait for that to happen. // and start the server. Let's wait for that to happen.
if (await waitForCtlFileClear(logger, 1000, 10)) { if (await waitForCtlFileClear(logger, 1000, 10)) {
logger.info(`Requested the server to be started on port ${port}.`); logger.info(`Requested the server to be started on port ${port}.`);
} else {
logger.error(`Failed to start the server on port ${port}`);
process.exit(1);
}
} else { } else {
logger.errorText` logger.error(`Failed to start the server on port ${port}`);
Failed to launch LM Studio. Please make sure it is installed and have run it at least
once.
`;
process.exit(1); process.exit(1);
} }
}
logger.info("Verifying the server is running...");
if (await checkHttpServerWithRetries(logger, port, 5)) {
logger.info(`Verification succeeded. The server is running on port ${port}.`);
} else { } else {
logger.error("Failed to verify the server is running. Please try to use another port."); logger.errorText`
Failed to launch LM Studio. Please make sure it is installed and have run it at least
once.
`;
return false;
}
}
logger.info("Verifying the server is running...");
if (await checkHttpServerWithRetries(logger, port, 5)) {
logger.info(`Verification succeeded. The server is running on port ${port}.`);
return true;
} else {
logger.error("Failed to verify the server is running. Please try to use another port.");
return false;
}
}
const start = command({
name: "start",
description: "Starts the local server",
args: {
port: option({
type: optional(number),
description: text`
Port to run the server on. If not provided, the server will run on the same port as the last
time it was started.
`,
long: "port",
short: "p",
}),
noLaunch: flag({
description: text`
Do not launch LM Studio if it is not running. If LM Studio is not running, the server will
not be started.
`,
long: "no-launch",
}),
yes: flag({
description: text`
Suppress all confirmations and warnings. Useful for scripting.
`,
long: "yes",
}),
cors: flag({
description: text`
Enable CORS on the server. Allows any website you visit to access the server. This is
required if you are developing a web application.
`,
long: "cors",
}),
...logLevelArgs,
},
handler: async args => {
const { port, noLaunch, cors } = args;
const logger = createLogger(args);
if (!(await startServer(logger, { port, noLaunch, cors }))) {
process.exit(1); process.exit(1);
} }
}, },

View File

@ -2,7 +2,7 @@ import { text } from "@lmstudio/lms-common";
import boxen from "boxen"; import boxen from "boxen";
import chalk from "chalk"; import chalk from "chalk";
import { command } from "cmd-ts"; import { command } from "cmd-ts";
import { createClient } from "../createClient"; import { createClient, createClientArgs } from "../createClient";
import { formatSizeBytesWithColor1000 } from "../formatSizeBytes1000"; import { formatSizeBytesWithColor1000 } from "../formatSizeBytes1000";
import { createLogger, logLevelArgs } from "../logLevel"; import { createLogger, logLevelArgs } from "../logLevel";
import { checkHttpServer, getServerLastStatus } from "./server"; import { checkHttpServer, getServerLastStatus } from "./server";
@ -12,6 +12,7 @@ export const status = command({
description: "Prints the status of LM Studio", description: "Prints the status of LM Studio",
args: { args: {
...logLevelArgs, ...logLevelArgs,
...createClientArgs,
}, },
async handler(args) { async handler(args) {
const logger = createLogger(args); const logger = createLogger(args);
@ -32,7 +33,7 @@ export const status = command({
`; `;
content += "\n\n"; content += "\n\n";
const client = await createClient(logger); const client = await createClient(logger, args);
const loadedModels = await client.llm.listLoaded(); const loadedModels = await client.llm.listLoaded();
const downloadedModels = await client.system.listDownloadedModels(); const downloadedModels = await client.system.listDownloadedModels();
content += chalk.cyanBright("Loaded Models"); content += chalk.cyanBright("Loaded Models");

View File

@ -4,7 +4,7 @@ import { boolean, command, flag, optional, positional, string } from "cmd-ts";
import fuzzy from "fuzzy"; import fuzzy from "fuzzy";
import inquirer from "inquirer"; import inquirer from "inquirer";
import inquirerPrompt from "inquirer-autocomplete-prompt"; import inquirerPrompt from "inquirer-autocomplete-prompt";
import { createClient } from "../createClient"; import { createClient, createClientArgs } from "../createClient";
import { createLogger, logLevelArgs } from "../logLevel"; import { createLogger, logLevelArgs } from "../logLevel";
import terminalSize from "../terminalSize"; import terminalSize from "../terminalSize";
@ -12,6 +12,8 @@ export const unload = command({
name: "unload", name: "unload",
description: "Unload a model", description: "Unload a model",
args: { args: {
...logLevelArgs,
...createClientArgs,
identifier: positional({ identifier: positional({
type: optional(string), type: optional(string),
description: text` description: text`
@ -26,12 +28,11 @@ export const unload = command({
long: "all", long: "all",
short: "a", short: "a",
}), }),
...logLevelArgs,
}, },
handler: async args => { handler: async args => {
const { identifier, all } = args; const { identifier, all } = args;
const logger = createLogger(args); const logger = createLogger(args);
const client = await createClient(logger); const client = await createClient(logger, args);
if (all && identifier !== undefined) { if (all && identifier !== undefined) {
logger.errorWithoutPrefix( logger.errorWithoutPrefix(