diff --git a/README.md b/README.md index cf2e378..b84b215 100644 --- a/README.md +++ b/README.md @@ -52,8 +52,8 @@ Here are some frequently used commands: - `lms ls --json` - To list all downloaded models in machine-readable JSON format. - `lms ps` - To list all loaded models available for inferencing. - `lms ps --json` - To list all loaded models available for inferencing in machine-readable JSON format. -- `lms load --gpu max` - To load a model with maximum GPU acceleration - - `lms load --gpu max -y` - To load a model with maximum GPU acceleration without confirmation +- `lms load` - To load a model + - `lms load -y` - To load a model with maximum GPU acceleration without confirmation - `lms unload ` - To unload a model - `lms unload --all` - To unload all models - `lms create` - To create a new project with LM Studio SDK diff --git a/src/createClient.ts b/src/createClient.ts index 82936b8..9cab4f9 100644 --- a/src/createClient.ts +++ b/src/createClient.ts @@ -106,6 +106,7 @@ export async function wakeUpService(logger: SimpleLogger): Promise { } export interface CreateClientOpts {} +const lmsKey = ""; export async function createClient( logger: SimpleLogger, @@ -134,7 +135,6 @@ export async function createClient( }; } else { // Not remote. We need to check if this is a production build. - const lmsKey = ""; if (lmsKey.startsWith("<") && !process.env.LMS_FORCE_PROD) { // lmsKey not injected and we did not force prod, this is not a production build. logger.warnText` @@ -176,6 +176,16 @@ export async function createClient( if (localPort !== null) { const baseUrl = `ws://${host}:${localPort}`; logger.debug(`Found local API server at ${baseUrl}`); + + if (auth.clientIdentifier === "lms-cli") { + // We need to refetch the lms key due to the possibility of a new key being generated. + const lmsKey2 = (await readFile(lmsKey2Path, "utf-8")).trim(); + auth = { + ...auth, + clientPasskey: lmsKey + lmsKey2, + }; + } + return new LMStudioClient({ baseUrl, logger, ...auth }); } } diff --git a/src/subcommands/list.ts b/src/subcommands/list.ts index 28b1ec0..7a5b7a0 100644 --- a/src/subcommands/list.ts +++ b/src/subcommands/list.ts @@ -330,7 +330,7 @@ export const ps = command({ To load a model, run: - ${chalk.yellow("lms load --gpu max")}${"\n"} + ${chalk.yellow("lms load")}${"\n"} `, ); return; diff --git a/src/subcommands/unload.ts b/src/subcommands/unload.ts index 79aaf18..cf89740 100644 --- a/src/subcommands/unload.ts +++ b/src/subcommands/unload.ts @@ -97,7 +97,7 @@ export const unload = command({ logger.info(`Model "${identifier}" unloaded.`); } else { if (models.length === 0) { - logger.error(`You don't have any models loaded. Use "lms load --gpu max" to load a model.`); + logger.error(`You don't have any models loaded. Use "lms load" to load a model.`); process.exit(1); } console.info();