Fix commands don't work if they woke LLM service (#119)

This commit is contained in:
ryan-the-crayon
2024-12-06 13:28:52 -05:00
committed by GitHub
parent ee056c0917
commit 39c044bce1
4 changed files with 15 additions and 5 deletions

View File

@ -52,8 +52,8 @@ Here are some frequently used commands:
- `lms ls --json` - To list all downloaded models in machine-readable JSON format. - `lms ls --json` - To list all downloaded models in machine-readable JSON format.
- `lms ps` - To list all loaded models available for inferencing. - `lms ps` - To list all loaded models available for inferencing.
- `lms ps --json` - To list all loaded models available for inferencing in machine-readable JSON format. - `lms ps --json` - To list all loaded models available for inferencing in machine-readable JSON format.
- `lms load --gpu max` - To load a model with maximum GPU acceleration - `lms load` - To load a model
- `lms load <model path> --gpu max -y` - To load a model with maximum GPU acceleration without confirmation - `lms load <model path> -y` - To load a model with maximum GPU acceleration without confirmation
- `lms unload <model identifier>` - To unload a model - `lms unload <model identifier>` - To unload a model
- `lms unload --all` - To unload all models - `lms unload --all` - To unload all models
- `lms create` - To create a new project with LM Studio SDK - `lms create` - To create a new project with LM Studio SDK

View File

@ -106,6 +106,7 @@ export async function wakeUpService(logger: SimpleLogger): Promise<boolean> {
} }
export interface CreateClientOpts {} export interface CreateClientOpts {}
const lmsKey = "<LMS-CLI-LMS-KEY>";
export async function createClient( export async function createClient(
logger: SimpleLogger, logger: SimpleLogger,
@ -134,7 +135,6 @@ export async function createClient(
}; };
} else { } else {
// Not remote. We need to check if this is a production build. // Not remote. We need to check if this is a production build.
const lmsKey = "<LMS-CLI-LMS-KEY>";
if (lmsKey.startsWith("<") && !process.env.LMS_FORCE_PROD) { if (lmsKey.startsWith("<") && !process.env.LMS_FORCE_PROD) {
// lmsKey not injected and we did not force prod, this is not a production build. // lmsKey not injected and we did not force prod, this is not a production build.
logger.warnText` logger.warnText`
@ -176,6 +176,16 @@ export async function createClient(
if (localPort !== null) { if (localPort !== null) {
const baseUrl = `ws://${host}:${localPort}`; const baseUrl = `ws://${host}:${localPort}`;
logger.debug(`Found local API server at ${baseUrl}`); logger.debug(`Found local API server at ${baseUrl}`);
if (auth.clientIdentifier === "lms-cli") {
// We need to refetch the lms key due to the possibility of a new key being generated.
const lmsKey2 = (await readFile(lmsKey2Path, "utf-8")).trim();
auth = {
...auth,
clientPasskey: lmsKey + lmsKey2,
};
}
return new LMStudioClient({ baseUrl, logger, ...auth }); return new LMStudioClient({ baseUrl, logger, ...auth });
} }
} }

View File

@ -330,7 +330,7 @@ export const ps = command({
To load a model, run: To load a model, run:
${chalk.yellow("lms load --gpu max")}${"\n"} ${chalk.yellow("lms load")}${"\n"}
`, `,
); );
return; return;

View File

@ -97,7 +97,7 @@ export const unload = command({
logger.info(`Model "${identifier}" unloaded.`); logger.info(`Model "${identifier}" unloaded.`);
} else { } else {
if (models.length === 0) { if (models.length === 0) {
logger.error(`You don't have any models loaded. Use "lms load --gpu max" to load a model.`); logger.error(`You don't have any models loaded. Use "lms load" to load a model.`);
process.exit(1); process.exit(1);
} }
console.info(); console.info();