* fix: #247 - inference plugin should check nitro service available * fix: #247 check service status and emit error if any * chore: error handling * chore: typo * fix: open conversation does not work when model is deleted * chore: reload plugins in development mode without exiting the process * chore: move model file check to inference plugin * update package-lock.json --------- Co-authored-by: Hien To <>
This commit is contained in:
parent
63d8b895f3
commit
a57dfe743b
@ -129,6 +129,7 @@ function updateFinishedDownloadAt(fileName: string, time: number) {
|
||||
db.run(stmt, [time, fileName], (err: any) => {
|
||||
if (err) {
|
||||
console.log(err);
|
||||
res(undefined);
|
||||
} else {
|
||||
console.log("Updated 1 row");
|
||||
res("Updated");
|
||||
@ -150,7 +151,11 @@ function getUnfinishedDownloadModels() {
|
||||
|
||||
const query = `SELECT * FROM models WHERE finish_download_at = -1 ORDER BY start_download_at DESC`;
|
||||
db.all(query, (err: Error, row: any) => {
|
||||
if (row) {
|
||||
res(row);
|
||||
} else {
|
||||
res([]);
|
||||
}
|
||||
});
|
||||
db.close();
|
||||
});
|
||||
@ -193,13 +198,10 @@ function getModelById(modelId: string) {
|
||||
path.join(app.getPath("userData"), "jan.db")
|
||||
);
|
||||
|
||||
console.debug("Get model by id", modelId);
|
||||
db.get(
|
||||
`SELECT * FROM models WHERE id = ?`,
|
||||
[modelId],
|
||||
(err: any, row: any) => {
|
||||
console.debug("Get model by id result", row);
|
||||
|
||||
if (row) {
|
||||
const product = {
|
||||
id: row.id,
|
||||
@ -223,6 +225,8 @@ function getModelById(modelId: string) {
|
||||
downloadUrl: row.download_url,
|
||||
};
|
||||
res(product);
|
||||
} else {
|
||||
res(undefined);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
@ -9,14 +9,6 @@ const initModel = async (product) =>
|
||||
}
|
||||
});
|
||||
|
||||
const dispose = async () =>
|
||||
new Promise(async (resolve) => {
|
||||
if (window.electronAPI) {
|
||||
window.electronAPI
|
||||
.invokePluginFunc(MODULE_PATH, "dispose")
|
||||
.then((res) => resolve(res));
|
||||
}
|
||||
});
|
||||
const inferenceUrl = () => "http://localhost:3928/llama/chat_completion";
|
||||
|
||||
const stopModel = () => {
|
||||
@ -27,6 +19,5 @@ const stopModel = () => {
|
||||
export function init({ register }) {
|
||||
register("initModel", "initModel", initModel);
|
||||
register("inferenceUrl", "inferenceUrl", inferenceUrl);
|
||||
register("dispose", "dispose", dispose);
|
||||
register("stopModel", "stopModel", stopModel);
|
||||
}
|
||||
@ -1,102 +0,0 @@
|
||||
const path = require("path");
|
||||
const { app, dialog } = require("electron");
|
||||
const { spawn } = require("child_process");
|
||||
const fs = require("fs");
|
||||
|
||||
let subprocess = null;
|
||||
|
||||
async function initModel(product) {
|
||||
// fileName fallback
|
||||
if (!product.fileName) {
|
||||
product.fileName = product.file_name;
|
||||
}
|
||||
|
||||
if (!product.fileName) {
|
||||
await dialog.showMessageBox({
|
||||
message: "Selected model does not have file name..",
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (subprocess) {
|
||||
console.error(
|
||||
"A subprocess is already running. Attempt to kill then reinit."
|
||||
);
|
||||
dispose();
|
||||
}
|
||||
|
||||
let binaryFolder = path.join(__dirname, "nitro"); // Current directory by default
|
||||
|
||||
// Read the existing config
|
||||
const configFilePath = path.join(binaryFolder, "config", "config.json");
|
||||
let config = {};
|
||||
if (fs.existsSync(configFilePath)) {
|
||||
const rawData = fs.readFileSync(configFilePath, "utf-8");
|
||||
config = JSON.parse(rawData);
|
||||
}
|
||||
|
||||
// Update the llama_model_path
|
||||
if (!config.custom_config) {
|
||||
config.custom_config = {};
|
||||
}
|
||||
|
||||
const modelPath = path.join(app.getPath("userData"), product.fileName);
|
||||
|
||||
config.custom_config.llama_model_path = modelPath;
|
||||
|
||||
// Write the updated config back to the file
|
||||
fs.writeFileSync(configFilePath, JSON.stringify(config, null, 4));
|
||||
|
||||
let binaryName;
|
||||
|
||||
if (process.platform === "win32") {
|
||||
binaryName = "nitro_windows_amd64.exe";
|
||||
} else if (process.platform === "darwin") { // Mac OS platform
|
||||
binaryName = process.arch === "arm64" ? "nitro_mac_arm64" : "nitro_mac_amd64";
|
||||
} else {
|
||||
// Linux
|
||||
binaryName = "nitro_linux_amd64_cuda"; // For other platforms
|
||||
}
|
||||
|
||||
const binaryPath = path.join(binaryFolder, binaryName);
|
||||
|
||||
// Execute the binary
|
||||
|
||||
subprocess = spawn(binaryPath, [configFilePath], { cwd: binaryFolder });
|
||||
|
||||
// Handle subprocess output
|
||||
subprocess.stdout.on("data", (data) => {
|
||||
console.log(`stdout: ${data}`);
|
||||
});
|
||||
|
||||
subprocess.stderr.on("data", (data) => {
|
||||
console.error(`stderr: ${data}`);
|
||||
});
|
||||
|
||||
subprocess.on("close", (code) => {
|
||||
console.log(`child process exited with code ${code}`);
|
||||
subprocess = null;
|
||||
});
|
||||
}
|
||||
|
||||
function dispose() {
|
||||
killSubprocess();
|
||||
// clean other registered resources here
|
||||
}
|
||||
|
||||
function killSubprocess() {
|
||||
if (subprocess) {
|
||||
subprocess.kill();
|
||||
subprocess = null;
|
||||
console.log("Subprocess terminated.");
|
||||
} else {
|
||||
console.error("No subprocess is currently running.");
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
initModel,
|
||||
killSubprocess,
|
||||
dispose,
|
||||
};
|
||||
119
electron/core/plugins/inference-plugin/module.ts
Normal file
119
electron/core/plugins/inference-plugin/module.ts
Normal file
@ -0,0 +1,119 @@
|
||||
const path = require("path");
|
||||
const { app } = require("electron");
|
||||
const { spawn } = require("child_process");
|
||||
const fs = require("fs");
|
||||
const tcpPortUsed = require("tcp-port-used");
|
||||
const { killPortProcess } = require("kill-port-process");
|
||||
|
||||
let subprocess = null;
|
||||
const PORT = 3928;
|
||||
|
||||
const initModel = (product) => {
|
||||
return (
|
||||
new Promise<void>(async (resolve, reject) => {
|
||||
if (!product?.fileName) {
|
||||
reject("Model not found, please download again.");
|
||||
}
|
||||
if (subprocess) {
|
||||
console.error(
|
||||
"A subprocess is already running. Attempt to kill then reinit."
|
||||
);
|
||||
killSubprocess();
|
||||
}
|
||||
resolve(product?.fileName);
|
||||
})
|
||||
// Kill port process if it is already in use
|
||||
.then((fileName) =>
|
||||
tcpPortUsed
|
||||
.waitUntilFree(PORT, 200, 3000)
|
||||
.catch(() => killPortProcess(PORT))
|
||||
.then(() => fileName)
|
||||
)
|
||||
// Spawn Nitro subprocess to load model
|
||||
.then(() => {
|
||||
let binaryFolder = path.join(__dirname, "nitro"); // Current directory by default
|
||||
|
||||
// Read the existing config
|
||||
const configFilePath = path.join(binaryFolder, "config", "config.json");
|
||||
let config: any = {};
|
||||
if (fs.existsSync(configFilePath)) {
|
||||
const rawData = fs.readFileSync(configFilePath, "utf-8");
|
||||
config = JSON.parse(rawData);
|
||||
}
|
||||
|
||||
// Update the llama_model_path
|
||||
if (!config.custom_config) {
|
||||
config.custom_config = {};
|
||||
}
|
||||
|
||||
const modelPath = path.join(app.getPath("userData"), product.fileName);
|
||||
|
||||
config.custom_config.llama_model_path = modelPath;
|
||||
|
||||
// Write the updated config back to the file
|
||||
fs.writeFileSync(configFilePath, JSON.stringify(config, null, 4));
|
||||
|
||||
let binaryName;
|
||||
|
||||
if (process.platform === "win32") {
|
||||
binaryName = "nitro_windows_amd64.exe";
|
||||
} else if (process.platform === "darwin") {
|
||||
// Mac OS platform
|
||||
binaryName =
|
||||
process.arch === "arm64" ? "nitro_mac_arm64" : "nitro_mac_amd64";
|
||||
} else {
|
||||
// Linux
|
||||
binaryName = "nitro_linux_amd64_cuda"; // For other platforms
|
||||
}
|
||||
|
||||
const binaryPath = path.join(binaryFolder, binaryName);
|
||||
|
||||
// Execute the binary
|
||||
|
||||
subprocess = spawn(binaryPath, [configFilePath], { cwd: binaryFolder });
|
||||
|
||||
// Handle subprocess output
|
||||
subprocess.stdout.on("data", (data) => {
|
||||
console.log(`stdout: ${data}`);
|
||||
});
|
||||
|
||||
subprocess.stderr.on("data", (data) => {
|
||||
console.error(`stderr: ${data}`);
|
||||
});
|
||||
|
||||
subprocess.on("close", (code) => {
|
||||
console.log(`child process exited with code ${code}`);
|
||||
subprocess = null;
|
||||
});
|
||||
})
|
||||
.then(() => tcpPortUsed.waitUntilUsed(PORT, 300, 30000))
|
||||
.then(() => {
|
||||
return {};
|
||||
})
|
||||
.catch((err) => {
|
||||
return { error: err };
|
||||
})
|
||||
);
|
||||
};
|
||||
|
||||
function dispose() {
|
||||
killSubprocess();
|
||||
// clean other registered resources here
|
||||
}
|
||||
|
||||
function killSubprocess() {
|
||||
if (subprocess) {
|
||||
subprocess.kill();
|
||||
subprocess = null;
|
||||
console.log("Subprocess terminated.");
|
||||
} else {
|
||||
killPortProcess(PORT);
|
||||
console.error("No subprocess is currently running.");
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
initModel,
|
||||
killSubprocess,
|
||||
dispose,
|
||||
};
|
||||
595
electron/core/plugins/inference-plugin/package-lock.json
generated
595
electron/core/plugins/inference-plugin/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@ -10,23 +10,29 @@
|
||||
"init"
|
||||
],
|
||||
"scripts": {
|
||||
"build": "webpack --config webpack.config.js",
|
||||
"postinstall": "rimraf ./*.tgz && npm run build && cpx \"module.js\" \"dist\" && rimraf dist/nitro/* && cpx \"nitro/**\" \"dist/nitro\"",
|
||||
"build": "tsc -b . && webpack --config webpack.config.js",
|
||||
"postinstall": "rimraf ./*.tgz && npm run build && rimraf dist/nitro/* && cpx \"nitro/**\" \"dist/nitro\"",
|
||||
"build:publish": "npm pack && cpx *.tgz ../../pre-install"
|
||||
},
|
||||
"exports": {
|
||||
".": "./dist/index.js",
|
||||
"./main": "./dist/module.js"
|
||||
},
|
||||
"devDependencies": {
|
||||
"cpx": "^1.5.0",
|
||||
"rimraf": "^3.0.2",
|
||||
"webpack": "^5.88.2",
|
||||
"webpack-cli": "^5.1.4"
|
||||
},
|
||||
"bundledDependencies": [
|
||||
"electron-is-dev",
|
||||
"node-llama-cpp"
|
||||
],
|
||||
"dependencies": {
|
||||
"electron-is-dev": "^2.0.0"
|
||||
"kill-port-process": "^3.2.0",
|
||||
"tcp-port-used": "^1.0.2",
|
||||
"ts-loader": "^9.5.0"
|
||||
},
|
||||
"bundledDependencies": [
|
||||
"tcp-port-used",
|
||||
"kill-port-process"
|
||||
],
|
||||
"engines": {
|
||||
"node": ">=18.0.0"
|
||||
},
|
||||
|
||||
22
electron/core/plugins/inference-plugin/tsconfig.json
Normal file
22
electron/core/plugins/inference-plugin/tsconfig.json
Normal file
@ -0,0 +1,22 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
/* Visit https://aka.ms/tsconfig to read more about this file */
|
||||
/* Language and Environment */
|
||||
"target": "es2016" /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */,
|
||||
/* Modules */
|
||||
"module": "ES6" /* Specify what module code is generated. */,
|
||||
// "rootDir": "./", /* Specify the root folder within your source files. */
|
||||
// "moduleResolution": "node", /* Specify how TypeScript looks up a file from a given module specifier. */
|
||||
// "baseUrl": "." /* Specify the base directory to resolve non-relative module names. */,
|
||||
// "paths": {} /* Specify a set of entries that re-map imports to additional lookup locations. */,
|
||||
// "rootDirs": [], /* Allow multiple folders to be treated as one when resolving modules. */
|
||||
// "resolveJsonModule": true, /* Enable importing .json files. */
|
||||
|
||||
"outDir": "./dist" /* Specify an output folder for all emitted files. */,
|
||||
"esModuleInterop": true /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables 'allowSyntheticDefaultImports' for type compatibility. */,
|
||||
"forceConsistentCasingInFileNames": true /* Ensure that casing is correct in imports. */,
|
||||
/* Type Checking */
|
||||
"strict": false /* Enable all strict type-checking options. */,
|
||||
"skipLibCheck": true /* Skip type checking all .d.ts files. */
|
||||
}
|
||||
}
|
||||
7
electron/core/plugins/inference-plugin/types/index.d.ts
vendored
Normal file
7
electron/core/plugins/inference-plugin/types/index.d.ts
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
export {};
|
||||
|
||||
declare global {
|
||||
interface Window {
|
||||
electronAPI?: any | undefined;
|
||||
}
|
||||
}
|
||||
@ -2,7 +2,7 @@ const path = require("path");
|
||||
|
||||
module.exports = {
|
||||
experiments: { outputModule: true },
|
||||
entry: "./index.js", // Adjust the entry point to match your project's main file
|
||||
entry: "./index.ts", // Adjust the entry point to match your project's main file
|
||||
mode: "production",
|
||||
module: {
|
||||
rules: [
|
||||
@ -19,7 +19,7 @@ module.exports = {
|
||||
library: { type: "module" }, // Specify ESM output format
|
||||
},
|
||||
resolve: {
|
||||
extensions: [".js"],
|
||||
extensions: [".ts", ".js"],
|
||||
},
|
||||
// Add loaders and other configuration as needed for your project
|
||||
};
|
||||
|
||||
@ -156,8 +156,23 @@ function handleIPCs() {
|
||||
|
||||
rmdir(fullPath, { recursive: true }, function (err) {
|
||||
if (err) console.log(err);
|
||||
dispose(requiredModules);
|
||||
|
||||
// just relaunch if packaged, should launch manually in development mode
|
||||
if (app.isPackaged) {
|
||||
app.relaunch();
|
||||
app.exit();
|
||||
} else {
|
||||
for (const modulePath in requiredModules) {
|
||||
delete require.cache[
|
||||
require.resolve(
|
||||
join(app.getPath("userData"), "plugins", modulePath)
|
||||
)
|
||||
];
|
||||
}
|
||||
setupPlugins();
|
||||
mainWindow?.reload();
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@ -9,6 +9,8 @@ import {
|
||||
conversationStatesAtom,
|
||||
getActiveConvoIdAtom,
|
||||
setActiveConvoIdAtom,
|
||||
updateConversationErrorAtom,
|
||||
updateConversationWaitingForResponseAtom,
|
||||
} from "@/_helpers/atoms/Conversation.atom";
|
||||
import {
|
||||
setMainViewStateAtom,
|
||||
@ -33,6 +35,10 @@ const HistoryItem: React.FC<Props> = ({
|
||||
const conversationStates = useAtomValue(conversationStatesAtom);
|
||||
const activeConvoId = useAtomValue(getActiveConvoIdAtom);
|
||||
const setActiveConvoId = useSetAtom(setActiveConvoIdAtom);
|
||||
const updateConvWaiting = useSetAtom(
|
||||
updateConversationWaitingForResponseAtom
|
||||
);
|
||||
const updateConvError = useSetAtom(updateConversationErrorAtom);
|
||||
const isSelected = activeConvoId === conversation.id;
|
||||
|
||||
const { initModel } = useInitModel();
|
||||
@ -42,13 +48,16 @@ const HistoryItem: React.FC<Props> = ({
|
||||
DataService.GET_MODEL_BY_ID,
|
||||
conversation.model_id
|
||||
);
|
||||
if (!model) {
|
||||
alert(
|
||||
`Model ${conversation.model_id} not found! Please re-download the model first.`
|
||||
);
|
||||
} else {
|
||||
initModel(model);
|
||||
|
||||
if (conversation.id) updateConvWaiting(conversation.id, true);
|
||||
initModel(model).then((res: any) => {
|
||||
if (conversation.id) updateConvWaiting(conversation.id, false);
|
||||
|
||||
if (res?.error && conversation.id) {
|
||||
updateConvError(conversation.id, res.error);
|
||||
}
|
||||
});
|
||||
|
||||
if (activeConvoId !== conversation.id) {
|
||||
setMainViewState(MainViewState.Conversation);
|
||||
setActiveConvoId(conversation.id);
|
||||
|
||||
@ -9,14 +9,14 @@ import { Fragment } from "react";
|
||||
import { PlusIcon } from "@heroicons/react/24/outline";
|
||||
import useCreateConversation from "@/_hooks/useCreateConversation";
|
||||
import { currentProductAtom } from "@/_helpers/atoms/Model.atom";
|
||||
import { showingTyping } from "@/_helpers/JotaiWrapper";
|
||||
import LoadingIndicator from "../LoadingIndicator";
|
||||
import { currentConvoStateAtom } from "@/_helpers/atoms/Conversation.atom";
|
||||
|
||||
const InputToolbar: React.FC = () => {
|
||||
const showingAdvancedPrompt = useAtomValue(showingAdvancedPromptAtom);
|
||||
const currentProduct = useAtomValue(currentProductAtom);
|
||||
const { requestCreateConvo } = useCreateConversation();
|
||||
const isTyping = useAtomValue(showingTyping);
|
||||
const currentConvoState = useAtomValue(currentConvoStateAtom);
|
||||
|
||||
if (showingAdvancedPrompt) {
|
||||
return <div />;
|
||||
@ -34,12 +34,20 @@ const InputToolbar: React.FC = () => {
|
||||
return (
|
||||
<Fragment>
|
||||
<div className="flex justify-between gap-2 mr-3 my-2">
|
||||
<div className="h-6">
|
||||
{isTyping && (
|
||||
<div className="my-2" key="indicator">
|
||||
<div className="h-6 space-x-5">
|
||||
{currentConvoState?.waitingForResponse === true && (
|
||||
<div className="ml-1 my-2" key="indicator">
|
||||
<LoadingIndicator />
|
||||
</div>
|
||||
)}{" "}
|
||||
)}
|
||||
{!currentConvoState?.waitingForResponse &&
|
||||
currentConvoState?.error && (
|
||||
<div className="flex flex-row justify-center">
|
||||
<span className="mx-5 my-2 text-red-500 text-sm">
|
||||
{currentConvoState?.error?.toString()}
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* <SecondaryButton title="Regenerate" onClick={onRegenerateClick} /> */}
|
||||
|
||||
@ -13,8 +13,6 @@ export default function JotaiWrapper({ children }: Props) {
|
||||
|
||||
export const currentPromptAtom = atom<string>("");
|
||||
|
||||
export const showingTyping = atom<boolean>(false);
|
||||
|
||||
export const appDownloadProgress = atom<number>(-1);
|
||||
export const searchingModelText = atom<string>("");
|
||||
|
||||
|
||||
@ -55,6 +55,18 @@ export const updateConversationWaitingForResponseAtom = atom(
|
||||
currentState[conversationId] = {
|
||||
...currentState[conversationId],
|
||||
waitingForResponse,
|
||||
error: undefined,
|
||||
};
|
||||
set(conversationStatesAtom, currentState);
|
||||
}
|
||||
);
|
||||
export const updateConversationErrorAtom = atom(
|
||||
null,
|
||||
(get, set, conversationId: string, error?: Error) => {
|
||||
const currentState = { ...get(conversationStatesAtom) };
|
||||
currentState[conversationId] = {
|
||||
...currentState[conversationId],
|
||||
error,
|
||||
};
|
||||
set(conversationStatesAtom, currentState);
|
||||
}
|
||||
|
||||
@ -7,6 +7,8 @@ import {
|
||||
userConversationsAtom,
|
||||
setActiveConvoIdAtom,
|
||||
addNewConversationStateAtom,
|
||||
updateConversationWaitingForResponseAtom,
|
||||
updateConversationErrorAtom,
|
||||
} from "@/_helpers/atoms/Conversation.atom";
|
||||
import useInitModel from "./useInitModel";
|
||||
|
||||
@ -17,6 +19,10 @@ const useCreateConversation = () => {
|
||||
);
|
||||
const setActiveConvoId = useSetAtom(setActiveConvoIdAtom);
|
||||
const addNewConvoState = useSetAtom(addNewConversationStateAtom);
|
||||
const updateConvWaiting = useSetAtom(
|
||||
updateConversationWaitingForResponseAtom
|
||||
);
|
||||
const updateConvError = useSetAtom(updateConversationErrorAtom);
|
||||
|
||||
const requestCreateConvo = async (model: Product) => {
|
||||
const conversationName = model.name;
|
||||
@ -27,7 +33,14 @@ const useCreateConversation = () => {
|
||||
name: conversationName,
|
||||
};
|
||||
const id = await executeSerial(DataService.CREATE_CONVERSATION, conv);
|
||||
await initModel(model);
|
||||
|
||||
if (id) updateConvWaiting(id, true);
|
||||
initModel(model).then((res: any) => {
|
||||
if (id) updateConvWaiting(id, false);
|
||||
if (res?.error) {
|
||||
updateConvError(id, res.error);
|
||||
}
|
||||
});
|
||||
|
||||
const mappedConvo: Conversation = {
|
||||
id,
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
import { Product } from "@/_models/Product";
|
||||
import { executeSerial } from "@/_services/pluginService";
|
||||
import { InfereceService } from "../../shared/coreService";
|
||||
import { InferenceService } from "../../shared/coreService";
|
||||
import { useAtom } from "jotai";
|
||||
import { currentProductAtom } from "@/_helpers/atoms/Model.atom";
|
||||
|
||||
@ -12,12 +12,14 @@ export default function useInitModel() {
|
||||
console.debug(`Model ${model.id} is already init. Ignore..`);
|
||||
return;
|
||||
}
|
||||
try {
|
||||
await executeSerial(InfereceService.INIT_MODEL, model);
|
||||
console.debug(`Init model ${model.name} successfully!`);
|
||||
const res = await executeSerial(InferenceService.INIT_MODEL, model);
|
||||
if (res?.error) {
|
||||
console.log("error occured: ", res);
|
||||
return res;
|
||||
} else {
|
||||
console.log(`Init model successfully!`);
|
||||
setActiveModel(model);
|
||||
} catch (err) {
|
||||
console.error(`Init model ${model.name} failed: ${err}`);
|
||||
return {};
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import { currentPromptAtom, showingTyping } from "@/_helpers/JotaiWrapper";
|
||||
import { currentPromptAtom } from "@/_helpers/JotaiWrapper";
|
||||
import { useAtom, useAtomValue, useSetAtom } from "jotai";
|
||||
import { selectAtom } from "jotai/utils";
|
||||
import { DataService, InfereceService } from "../../shared/coreService";
|
||||
import { DataService, InferenceService } from "../../shared/coreService";
|
||||
import {
|
||||
MessageSenderType,
|
||||
RawMessage,
|
||||
@ -18,6 +18,7 @@ import {
|
||||
import {
|
||||
currentConversationAtom,
|
||||
getActiveConvoIdAtom,
|
||||
updateConversationWaitingForResponseAtom,
|
||||
} from "@/_helpers/atoms/Conversation.atom";
|
||||
|
||||
export default function useSendChatMessage() {
|
||||
@ -26,6 +27,9 @@ export default function useSendChatMessage() {
|
||||
const addNewMessage = useSetAtom(addNewMessageAtom);
|
||||
const updateMessage = useSetAtom(updateMessageAtom);
|
||||
const activeConversationId = useAtomValue(getActiveConvoIdAtom) ?? "";
|
||||
const updateConvWaiting = useSetAtom(
|
||||
updateConversationWaitingForResponseAtom
|
||||
);
|
||||
|
||||
const chatMessagesHistory = useAtomValue(
|
||||
selectAtom(
|
||||
@ -34,10 +38,11 @@ export default function useSendChatMessage() {
|
||||
)
|
||||
);
|
||||
const [currentPrompt, setCurrentPrompt] = useAtom(currentPromptAtom);
|
||||
const [, setIsTyping] = useAtom(showingTyping);
|
||||
|
||||
const sendChatMessage = async () => {
|
||||
setIsTyping(true);
|
||||
setCurrentPrompt("");
|
||||
const conversationId = activeConversationId;
|
||||
updateConvWaiting(conversationId, true);
|
||||
const prompt = currentPrompt.trim();
|
||||
const newMessage: RawMessage = {
|
||||
conversation_id: parseInt(currentConvo?.id ?? "0") ?? 0,
|
||||
@ -65,7 +70,7 @@ export default function useSendChatMessage() {
|
||||
: "assistant",
|
||||
};
|
||||
});
|
||||
const url = await executeSerial(InfereceService.INFERENCE_URL);
|
||||
const url = await executeSerial(InferenceService.INFERENCE_URL);
|
||||
const response = await fetch(url, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
@ -108,7 +113,7 @@ export default function useSendChatMessage() {
|
||||
const lines = text.trim().split("\n");
|
||||
for (const line of lines) {
|
||||
if (line.startsWith("data: ") && !line.includes("data: [DONE]")) {
|
||||
setIsTyping(false);
|
||||
updateConvWaiting(conversationId, false);
|
||||
const data = JSON.parse(line.replace("data: ", ""));
|
||||
answer += data.choices[0]?.delta?.content ?? "";
|
||||
if (answer.startsWith("assistant: ")) {
|
||||
@ -139,7 +144,7 @@ export default function useSendChatMessage() {
|
||||
.replace("T", " ")
|
||||
.replace(/\.\d+Z$/, ""),
|
||||
});
|
||||
setIsTyping(false);
|
||||
updateConvWaiting(conversationId, false);
|
||||
};
|
||||
return {
|
||||
sendChatMessage,
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
import { executeSerial } from "@/_services/pluginService";
|
||||
import { DataService, InfereceService } from "../../shared/coreService";
|
||||
import { DataService, InferenceService } from "../../shared/coreService";
|
||||
import useInitModel from "./useInitModel";
|
||||
import { useSetAtom } from "jotai";
|
||||
import { currentProductAtom } from "@/_helpers/atoms/Model.atom";
|
||||
@ -18,7 +18,7 @@ export default function useStartStopModel() {
|
||||
};
|
||||
|
||||
const stopModel = async (modelId: string) => {
|
||||
await executeSerial(InfereceService.STOP_MODEL, modelId);
|
||||
await executeSerial(InferenceService.STOP_MODEL, modelId);
|
||||
setActiveModel(undefined);
|
||||
};
|
||||
|
||||
|
||||
@ -14,4 +14,5 @@ export interface Conversation {
|
||||
export type ConversationState = {
|
||||
hasMore: boolean;
|
||||
waitingForResponse: boolean;
|
||||
error?: Error;
|
||||
};
|
||||
|
||||
@ -6,7 +6,7 @@ import {
|
||||
import {
|
||||
CoreService,
|
||||
DataService,
|
||||
InfereceService,
|
||||
InferenceService,
|
||||
ModelManagementService,
|
||||
} from "../../shared/coreService";
|
||||
|
||||
@ -14,7 +14,7 @@ export const isCorePluginInstalled = () => {
|
||||
if (!extensionPoints.get(DataService.GET_CONVERSATIONS)) {
|
||||
return false;
|
||||
}
|
||||
if (!extensionPoints.get(InfereceService.INIT_MODEL)) {
|
||||
if (!extensionPoints.get(InferenceService.INIT_MODEL)) {
|
||||
return false;
|
||||
}
|
||||
if (!extensionPoints.get(ModelManagementService.GET_DOWNLOADED_MODELS)) {
|
||||
@ -33,7 +33,7 @@ export const setupBasePlugins = async () => {
|
||||
|
||||
if (
|
||||
!extensionPoints.get(DataService.GET_CONVERSATIONS) ||
|
||||
!extensionPoints.get(InfereceService.INIT_MODEL) ||
|
||||
!extensionPoints.get(InferenceService.INIT_MODEL) ||
|
||||
!extensionPoints.get(ModelManagementService.GET_DOWNLOADED_MODELS)
|
||||
) {
|
||||
const installed = await plugins.install(basePlugins);
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
export type CoreService =
|
||||
| DataService
|
||||
| ModelService
|
||||
| InfereceService
|
||||
| InferenceService
|
||||
| ModelManagementService
|
||||
| SystemMonitoringService
|
||||
| PreferenceService;
|
||||
@ -27,7 +27,7 @@ export enum ModelService {
|
||||
GET_MODELS = "getModels",
|
||||
}
|
||||
|
||||
export enum InfereceService {
|
||||
export enum InferenceService {
|
||||
INFERENCE_URL = "inferenceUrl",
|
||||
INIT_MODEL = "initModel",
|
||||
STOP_MODEL = "stopModel",
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user