fix: model import symlink

This commit is contained in:
Louis 2024-10-29 21:28:25 +07:00
parent 61f72e6775
commit 1ab02b706f
No known key found for this signature in database
GPG Key ID: 44FA9F4D33C37DE2
7 changed files with 46 additions and 35 deletions

View File

@ -71,7 +71,7 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine {
model.settings = { model.settings = {
...model.settings, ...model.settings,
llama_model_path: await getModelFilePath( llama_model_path: await getModelFilePath(
model.id, model,
model.settings.llama_model_path model.settings.llama_model_path
), ),
} }
@ -84,7 +84,7 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine {
// Legacy clip vision model support // Legacy clip vision model support
model.settings = { model.settings = {
...model.settings, ...model.settings,
mmproj: await getModelFilePath(model.id, model.settings.mmproj), mmproj: await getModelFilePath(model, model.settings.mmproj),
} }
} else { } else {
const { mmproj, ...settings } = model.settings const { mmproj, ...settings } = model.settings
@ -136,9 +136,13 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine {
/// Legacy /// Legacy
export const getModelFilePath = async ( export const getModelFilePath = async (
id: string, model: Model,
file: string file: string
): Promise<string> => { ): Promise<string> => {
return joinPath([await getJanDataFolderPath(), 'models', id, file]) // Symlink to the model file
if (!model.sources[0]?.url.startsWith('http')) {
return model.sources[0]?.url
}
return joinPath([await getJanDataFolderPath(), 'models', model.id, file])
} }
/// ///

View File

@ -168,7 +168,7 @@ export class CortexAPI implements ICortexAPI {
(acc, cur) => acc + cur.bytes, (acc, cur) => acc + cur.bytes,
0 0
) )
const percent = (transferred / total || 0) * 100 const percent = total > 0 ? transferred / total : 0
events.emit(DownloadTypes[data.type], { events.emit(DownloadTypes[data.type], {
modelId: data.task.id, modelId: data.task.id,

View File

@ -64,8 +64,9 @@ export default class JanModelExtension extends ModelExtension {
// Clip vision model - should not be handled by cortex.cpp // Clip vision model - should not be handled by cortex.cpp
// TensorRT model - should not be handled by cortex.cpp // TensorRT model - should not be handled by cortex.cpp
if ( if (
model.engine === InferenceEngine.nitro_tensorrt_llm || model &&
model.settings.vision_model (model.engine === InferenceEngine.nitro_tensorrt_llm ||
model.settings.vision_model)
) { ) {
return downloadModel(model, (await systemInformation()).gpuSetting) return downloadModel(model, (await systemInformation()).gpuSetting)
} }
@ -88,8 +89,9 @@ export default class JanModelExtension extends ModelExtension {
// Clip vision model - should not be handled by cortex.cpp // Clip vision model - should not be handled by cortex.cpp
// TensorRT model - should not be handled by cortex.cpp // TensorRT model - should not be handled by cortex.cpp
if ( if (
modelDto.engine === InferenceEngine.nitro_tensorrt_llm || modelDto &&
modelDto.settings.vision_model (modelDto.engine === InferenceEngine.nitro_tensorrt_llm ||
modelDto.settings.vision_model)
) { ) {
for (const source of modelDto.sources) { for (const source of modelDto.sources) {
const path = await joinPath(['models', modelDto.id, source.filename]) const path = await joinPath(['models', modelDto.id, source.filename])
@ -110,12 +112,13 @@ export default class JanModelExtension extends ModelExtension {
*/ */
async deleteModel(model: string): Promise<void> { async deleteModel(model: string): Promise<void> {
const modelDto: Model = ModelManager.instance().get(model) const modelDto: Model = ModelManager.instance().get(model)
return this.cortexAPI.deleteModel(model) return this.cortexAPI
.catch(e => console.debug(e)) .deleteModel(model)
.catch((e) => console.debug(e))
.finally(async () => { .finally(async () => {
// Delete legacy model files // Delete legacy model files
await deleteModelFiles(modelDto) if (modelDto)
.catch(e => console.debug(e)) await deleteModelFiles(modelDto).catch((e) => console.debug(e))
}) })
} }
@ -179,13 +182,15 @@ export default class JanModelExtension extends ModelExtension {
toImportModels.map(async (model: Model & { file_path: string }) => toImportModels.map(async (model: Model & { file_path: string }) =>
this.importModel( this.importModel(
model.id, model.id,
await joinPath([ model.sources[0].url.startsWith('http')
? await joinPath([
await dirName(model.file_path), await dirName(model.file_path),
model.sources[0]?.filename ?? model.sources[0]?.filename ??
model.settings?.llama_model_path ?? model.settings?.llama_model_path ??
model.sources[0]?.url.split('/').pop() ?? model.sources[0]?.url.split('/').pop() ??
model.id, model.id,
]) ]) // Copied models
: model.sources[0].url // Symlink models
) )
) )
) )
@ -197,13 +202,14 @@ export default class JanModelExtension extends ModelExtension {
* Models are imported successfully before * Models are imported successfully before
* Now return models from cortex.cpp and merge with legacy models which are not imported * Now return models from cortex.cpp and merge with legacy models which are not imported
*/ */
return ( return await this.cortexAPI
this.cortexAPI.getModels().then((models) => { .getModels()
.then((models) => {
return models.concat( return models.concat(
legacyModels.filter((e) => !models.some((x) => x.id === e.id)) legacyModels.filter((e) => !models.some((x) => x.id === e.id))
) )
}) ?? Promise.resolve(legacyModels) })
) .catch(() => Promise.resolve(legacyModels))
} }
/** /**

View File

@ -125,6 +125,7 @@ export const setDownloadStateAtom = atom(
(acc, m) => acc + m.size.transferred, (acc, m) => acc + m.size.transferred,
0 0
) )
modelDownloadState.size.total = parentTotalSize
modelDownloadState.size.transferred = transferredSize modelDownloadState.size.transferred = transferredSize
modelDownloadState.percent = modelDownloadState.percent =

View File

@ -34,7 +34,7 @@ const useModels = () => {
const getDownloadedModels = async () => { const getDownloadedModels = async () => {
const localModels = (await getModels()).map((e) => ({ const localModels = (await getModels()).map((e) => ({
...e, ...e,
name: ModelManager.instance().models.get(e.id)?.name ?? e.name, name: ModelManager.instance().models.get(e.id)?.name ?? e.id,
metadata: metadata:
ModelManager.instance().models.get(e.id)?.metadata ?? e.metadata, ModelManager.instance().models.get(e.id)?.metadata ?? e.metadata,
})) }))

View File

@ -18,8 +18,8 @@ export const formatDownloadPercentage = (
input: number, input: number,
options?: { hidePercentage?: boolean } options?: { hidePercentage?: boolean }
) => { ) => {
if (options?.hidePercentage) return input * 100 if (options?.hidePercentage) return input <= 1 ? input * 100 : input
return (input * 100).toFixed(2) + '%' return (input <= 1 ? input * 100 : input).toFixed(2) + '%'
} }
export const formatDownloadSpeed = (input: number | undefined) => { export const formatDownloadSpeed = (input: number | undefined) => {

View File

@ -2,15 +2,16 @@ import { EngineManager, InferenceEngine, LocalOAIEngine } from '@janhq/core'
export const getLogoEngine = (engine: InferenceEngine) => { export const getLogoEngine = (engine: InferenceEngine) => {
switch (engine) { switch (engine) {
case InferenceEngine.anthropic:
return 'images/ModelProvider/anthropic.svg'
case InferenceEngine.nitro_tensorrt_llm:
case InferenceEngine.nitro: case InferenceEngine.nitro:
return 'images/ModelProvider/nitro.svg'
case InferenceEngine.cortex_llamacpp: case InferenceEngine.cortex_llamacpp:
case InferenceEngine.cortex_onnx: case InferenceEngine.cortex_onnx:
case InferenceEngine.cortex_tensorrtllm: case InferenceEngine.cortex_tensorrtllm:
return 'images/ModelProvider/cortex.svg' return 'images/ModelProvider/cortex.svg'
case InferenceEngine.anthropic:
return 'images/ModelProvider/anthropic.svg'
case InferenceEngine.nitro_tensorrt_llm:
return 'images/ModelProvider/nitro.svg'
case InferenceEngine.mistral: case InferenceEngine.mistral:
return 'images/ModelProvider/mistral.svg' return 'images/ModelProvider/mistral.svg'
case InferenceEngine.martian: case InferenceEngine.martian:
@ -49,11 +50,10 @@ export const isLocalEngine = (engine: string) => {
export const getTitleByEngine = (engine: InferenceEngine) => { export const getTitleByEngine = (engine: InferenceEngine) => {
switch (engine) { switch (engine) {
case InferenceEngine.nitro: case InferenceEngine.nitro:
return 'Llama.cpp (Nitro)'
case InferenceEngine.nitro_tensorrt_llm:
return 'TensorRT-LLM (Nitro)'
case InferenceEngine.cortex_llamacpp: case InferenceEngine.cortex_llamacpp:
return 'Llama.cpp (Cortex)' return 'Llama.cpp (Cortex)'
case InferenceEngine.nitro_tensorrt_llm:
return 'TensorRT-LLM (Nitro)'
case InferenceEngine.cortex_onnx: case InferenceEngine.cortex_onnx:
return 'Onnx (Cortex)' return 'Onnx (Cortex)'
case InferenceEngine.cortex_tensorrtllm: case InferenceEngine.cortex_tensorrtllm: