Make logs better

This commit is contained in:
Ryan Huang
2024-04-30 14:21:06 -04:00
parent 81dac91bc5
commit d7de9c9324
8 changed files with 37 additions and 141 deletions

View File

@ -16,6 +16,7 @@
"dependencies": {
"@lmstudio/lms-common": "^0.5.5",
"@lmstudio/lms-lmstudio": "^0.0.9",
"@lmstudio/lms-isomorphic": "^0.3.1",
"@lmstudio/sdk": "^0.0.10",
"boxen": "^5.1.2",
"chalk": "^4.1.2",

View File

@ -38,12 +38,14 @@ async function maybeTryStartServer(logger: SimpleLogger, startServerOpts: StartS
const { yes } = startServerOpts;
const pref = await getCliPref(logger);
if (pref.get().autoStartServer === undefined && !yes) {
logger.warnWithoutPrefix(text`
process.stderr.write(text`
${"\n"}${chalk.greenBright.underline("Server Auto Start")}
LM Studio needs to be running in server mode to perform this operation.${"\n"}
LM Studio needs to be running in server mode to perform this operation.${"\n\n"}
`);
const { cont } = await inquirer.prompt([
const { cont } = await inquirer.createPromptModule({
output: process.stderr,
})([
{
type: "confirm",
name: "cont",

View File

@ -1,4 +1,5 @@
import { filteredArray, text, type SimpleLogger } from "@lmstudio/lms-common";
import { terminalSize } from "@lmstudio/lms-isomorphic";
import boxen from "boxen";
import chalk from "chalk";
import { exec, spawn } from "child_process";
@ -16,7 +17,6 @@ import util from "util";
import { z } from "zod";
import { createLogger, logLevelArgs } from "../logLevel";
import { ProgressBar } from "../ProgressBar";
import terminalSize from "../terminalSize";
const execAsync = util.promisify(exec);
const illegalPathChars = ["/", "\\", ":", "*", "?", '"', "<", ">", "|"];
@ -156,8 +156,9 @@ async function selectScaffold(
initialSearch: string,
leaveEmptyLines: number,
) {
inquirer.registerPrompt("autocomplete", inquirerPrompt);
const { selected } = await inquirer.prompt({
const prompt = inquirer.createPromptModule({ output: process.stderr });
prompt.registerPrompt("autocomplete", inquirerPrompt);
const { selected } = await prompt({
type: "autocomplete",
name: "selected",
message: chalk.greenBright("Select a scaffold to use") + chalk.gray(" |"),
@ -221,7 +222,9 @@ async function createWithScaffold(logger: SimpleLogger, scaffold: Scaffold) {
const { name, replaceFrom, default: originalDefaultValue } = arg;
const defaultValue = replacer.replace(originalDefaultValue);
const { value } = await inquirer.prompt({
const { value } = await inquirer.createPromptModule({
output: process.stderr,
})({
type: "input",
name: "value",
message: `${name}`,

View File

@ -1,4 +1,5 @@
import { makeTitledPrettyError, type SimpleLogger, text } from "@lmstudio/lms-common";
import { terminalSize } from "@lmstudio/lms-isomorphic";
import { type DownloadedModel } from "@lmstudio/lms-shared-types";
import {
type LLMAccelerationOffload,
@ -16,7 +17,6 @@ import { formatElapsedTime } from "../formatElapsedTime";
import { formatSizeBytes1000 } from "../formatSizeBytes1000";
import { createLogger, logLevelArgs } from "../logLevel";
import { ProgressBar } from "../ProgressBar";
import terminalSize from "../terminalSize";
const gpuOptionType: Type<string, LLMAccelerationOffload> = {
async from(str) {
@ -275,12 +275,13 @@ async function selectModelToLoad(
leaveEmptyLines: number,
_lastLoadedMap: Map<string, number>,
) {
inquirer.registerPrompt("autocomplete", inquirerPrompt);
console.info(
chalk.gray("! Use the arrow keys to navigate, type to filter, and press enter to select."),
);
console.info();
const { selected } = await inquirer.prompt({
const prompt = inquirer.createPromptModule({ output: process.stderr });
prompt.registerPrompt("autocomplete", inquirerPrompt);
const { selected } = await prompt({
type: "autocomplete",
name: "selected",
message: chalk.greenBright("Select a model to load") + chalk.gray(" |"),

View File

@ -26,24 +26,26 @@ const stream = command({
if (json) {
console.log(JSON.stringify(log));
} else {
console.log("Time: " + chalk.greenBright(new Date(log.timestamp).toLocaleString()));
console.log("Type: " + chalk.greenBright(log.data.type));
console.log("timestamp: " + chalk.greenBright(new Date(log.timestamp).toLocaleString()));
console.log("type: " + chalk.greenBright(log.data.type));
switch (log.data.type) {
case "llm.prediction": {
case "llm.prediction.input": {
printLlmPredictionLogEvent(log.data);
}
}
console.log();
console.log();
}
});
},
});
function printLlmPredictionLogEvent(data: DiagnosticsLogEventData & { type: "llm.prediction" }) {
console.log("Model Identifier: " + chalk.greenBright(data.modelIdentifier));
console.log("Model Path: " + chalk.greenBright(data.modelPath));
console.log(chalk.underline("Full Prompt"));
console.log(chalk.cyanBright(data.input));
console.log();
function printLlmPredictionLogEvent(
data: DiagnosticsLogEventData & { type: "llm.prediction.input" },
) {
console.log("modelIdentifier: " + chalk.greenBright(data.modelIdentifier));
console.log("modelPath: " + chalk.greenBright(data.modelPath));
console.log(`input: "${chalk.green(data.input)}"`);
}
export const log = subcommands({

View File

@ -208,7 +208,7 @@ export async function startServer(
if (yes) {
logger.warn(`Auto-launch warning suppressed by ${chalk.yellowBright("--yes")} flag`);
} else {
logger.warnWithoutPrefix(text`
process.stderr.write(text`
${"\n"}${chalk.bold.underline.greenBright("About to Launch LM Studio")}
By default, if LM Studio is not running, attempting to start the server will launch LM
@ -220,9 +220,11 @@ export async function startServer(
)} flag.
`)}
${chalk.gray("This confirmation will not be shown again.")}${"\n"}
${chalk.gray("This confirmation will not be shown again.")}${"\n\n"}
`);
await inquirer.prompt([
await inquirer.createPromptModule({
output: process.stderr,
})([
{
type: "input",
name: "confirmation",

View File

@ -1,4 +1,5 @@
import { makeTitledPrettyError, text } from "@lmstudio/lms-common";
import { terminalSize } from "@lmstudio/lms-isomorphic";
import chalk from "chalk";
import { boolean, command, flag, optional, positional, string } from "cmd-ts";
import fuzzy from "fuzzy";
@ -6,7 +7,6 @@ import inquirer from "inquirer";
import inquirerPrompt from "inquirer-autocomplete-prompt";
import { createClient, createClientArgs } from "../createClient";
import { createLogger, logLevelArgs } from "../logLevel";
import terminalSize from "../terminalSize";
export const unload = command({
name: "unload",
@ -98,14 +98,15 @@ export const unload = command({
logger.error(`You don't have any models loaded. Use "lms load --gpu max" to load a model.`);
process.exit(1);
}
inquirer.registerPrompt("autocomplete", inquirerPrompt);
console.info();
console.info(
chalk.gray("! Use the arrow keys to navigate, type to filter, and press enter to select."),
);
console.info(chalk.gray("! To unload all models, use the --all flag."));
console.info();
const { selected } = await inquirer.prompt({
const prompt = inquirer.createPromptModule({ output: process.stderr });
prompt.registerPrompt("autocomplete", inquirerPrompt);
const { selected } = await prompt({
type: "autocomplete",
name: "selected",
message: chalk.greenBright("Select a model to unload") + chalk.gray(" |"),

View File

@ -1,116 +0,0 @@
// This file is vendored from https://www.npmjs.com/package/terminal-size
// with minimum changes to make it work in TypeScript.
//
// Done to support CommonJS
import { execFileSync } from "node:child_process";
import fs from "node:fs";
import process from "node:process";
import tty from "node:tty";
const defaultColumns = 80;
const defaultRows = 24;
const exec = (command: any, arguments_: any, { shell, env }: any = {}) =>
execFileSync(command, arguments_, {
encoding: "utf8",
stdio: ["ignore", "pipe", "ignore"],
timeout: 500,
shell,
env,
}).trim();
const create = (columns: any, rows: any) => ({
columns: Number.parseInt(columns, 10),
rows: Number.parseInt(rows, 10),
});
const createIfNotDefault = (maybeColumns: any, maybeRows: any) => {
const { columns, rows } = create(maybeColumns, maybeRows);
if (Number.isNaN(columns) || Number.isNaN(rows)) {
return;
}
if (columns === defaultColumns && rows === defaultRows) {
return;
}
return { columns, rows };
};
export default function terminalSize() {
const { env, stdout, stderr } = process;
if (stdout?.columns && stdout?.rows) {
return create(stdout.columns, stdout.rows);
}
if (stderr?.columns && stderr?.rows) {
return create(stderr.columns, stderr.rows);
}
// These values are static, so not the first choice.
if (env.COLUMNS && env.LINES) {
return create(env.COLUMNS, env.LINES);
}
const fallback = {
columns: defaultColumns,
rows: defaultRows,
};
if (process.platform === "win32") {
// We include `tput` for Windows users using Git Bash.
return tput() ?? fallback;
}
if (process.platform === "darwin") {
return devTty() ?? tput() ?? fallback;
}
return devTty() ?? tput() ?? resize() ?? fallback;
}
const devTty = () => {
try {
// eslint-disable-next-line no-bitwise
const flags =
process.platform === "darwin"
? (fs.constants as any).O_EVTONLY | fs.constants.O_NONBLOCK
: fs.constants.O_NONBLOCK;
// eslint-disable-next-line new-cap
const { columns, rows } = (tty.WriteStream as any)(fs.openSync("/dev/tty", flags));
return { columns, rows };
// eslint-disable-next-line no-empty
} catch {}
};
// On macOS, this only returns correct values when stdout is not redirected.
const tput = () => {
try {
// `tput` requires the `TERM` environment variable to be set.
const columns = exec("tput", ["cols"], { env: { TERM: "dumb", ...process.env } });
const rows = exec("tput", ["lines"], { env: { TERM: "dumb", ...process.env } });
if (columns && rows) {
return createIfNotDefault(columns, rows);
}
} catch {
/* empty */
}
};
// Only exists on Linux.
const resize = () => {
// `resize` is preferred as it works even when all file descriptors are redirected
// https://linux.die.net/man/1/resize
try {
const size = exec("resize", ["-u"]).match(/\d+/g);
if (size!.length === 2) {
return createIfNotDefault(size![0], size![1]);
}
// eslint-disable-next-line no-empty
} catch {}
};