fix: #1569 - Does not apply thread settings when loading model (#1576)

This commit is contained in:
Louis 2024-01-14 22:02:36 +07:00 committed by GitHub
parent e7fcd775fb
commit ca28fe51d1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 18 additions and 4 deletions

View File

@ -22,6 +22,7 @@ import {
joinPath, joinPath,
InferenceExtension, InferenceExtension,
log, log,
InferenceEngine,
} from "@janhq/core"; } from "@janhq/core";
import { requestInference } from "./helpers/sse"; import { requestInference } from "./helpers/sse";
import { ulid } from "ulid"; import { ulid } from "ulid";
@ -128,7 +129,7 @@ export default class JanInferenceNitroExtension implements InferenceExtension {
} }
private async onModelInit(model: Model) { private async onModelInit(model: Model) {
if (model.engine !== "nitro") return; if (model.engine !== InferenceEngine.nitro) return;
const modelFullPath = await joinPath(["models", model.id]); const modelFullPath = await joinPath(["models", model.id]);

View File

@ -180,7 +180,7 @@ function promptTemplateConverter(promptTemplate) {
* @returns A Promise that resolves when the model is loaded successfully, or rejects with an error message if the model is not found or fails to load. * @returns A Promise that resolves when the model is loaded successfully, or rejects with an error message if the model is not found or fails to load.
*/ */
function loadLLMModel(settings): Promise<Response> { function loadLLMModel(settings): Promise<Response> {
log(`[NITRO]::Debug: Loading model with params ${settings}`); log(`[NITRO]::Debug: Loading model with params ${JSON.stringify(settings)}`);
return fetchRetry(NITRO_HTTP_LOAD_MODEL_URL, { return fetchRetry(NITRO_HTTP_LOAD_MODEL_URL, {
method: "POST", method: "POST",
headers: { headers: {

View File

@ -1,11 +1,12 @@
/* eslint-disable @typescript-eslint/no-explicit-any */ /* eslint-disable @typescript-eslint/no-explicit-any */
import { EventName, events, Model } from '@janhq/core' import { EventName, events, Model } from '@janhq/core'
import { atom, useAtom } from 'jotai' import { atom, useAtom, useAtomValue } from 'jotai'
import { toaster } from '@/containers/Toast' import { toaster } from '@/containers/Toast'
import { useGetDownloadedModels } from './useGetDownloadedModels' import { useGetDownloadedModels } from './useGetDownloadedModels'
import { LAST_USED_MODEL_ID } from './useRecommendedModel' import { LAST_USED_MODEL_ID } from './useRecommendedModel'
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
export const activeModelAtom = atom<Model | undefined>(undefined) export const activeModelAtom = atom<Model | undefined>(undefined)
@ -17,6 +18,7 @@ export const stateModelAtom = atom({
export function useActiveModel() { export function useActiveModel() {
const [activeModel, setActiveModel] = useAtom(activeModelAtom) const [activeModel, setActiveModel] = useAtom(activeModelAtom)
const activeThread = useAtomValue(activeThreadAtom)
const [stateModel, setStateModel] = useAtom(stateModelAtom) const [stateModel, setStateModel] = useAtom(stateModelAtom)
const { downloadedModels } = useGetDownloadedModels() const { downloadedModels } = useGetDownloadedModels()
@ -34,7 +36,7 @@ export function useActiveModel() {
setStateModel({ state: 'start', loading: true, model: modelId }) setStateModel({ state: 'start', loading: true, model: modelId })
const model = downloadedModels.find((e) => e.id === modelId) let model = downloadedModels.find((e) => e.id === modelId)
if (!model) { if (!model) {
toaster({ toaster({
@ -49,6 +51,17 @@ export function useActiveModel() {
return return
} }
/// Apply thread model settings
if (activeThread?.assistants[0]?.model.id === modelId) {
model = {
...model,
settings: {
...model.settings,
...activeThread.assistants[0].model.settings,
},
}
}
localStorage.setItem(LAST_USED_MODEL_ID, model.id) localStorage.setItem(LAST_USED_MODEL_ID, model.id)
events.emit(EventName.OnModelInit, model) events.emit(EventName.OnModelInit, model)
} }