fix: model import symlink
This commit is contained in:
parent
61f72e6775
commit
1ab02b706f
@ -71,7 +71,7 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine {
|
||||
model.settings = {
|
||||
...model.settings,
|
||||
llama_model_path: await getModelFilePath(
|
||||
model.id,
|
||||
model,
|
||||
model.settings.llama_model_path
|
||||
),
|
||||
}
|
||||
@ -84,7 +84,7 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine {
|
||||
// Legacy clip vision model support
|
||||
model.settings = {
|
||||
...model.settings,
|
||||
mmproj: await getModelFilePath(model.id, model.settings.mmproj),
|
||||
mmproj: await getModelFilePath(model, model.settings.mmproj),
|
||||
}
|
||||
} else {
|
||||
const { mmproj, ...settings } = model.settings
|
||||
@ -136,9 +136,13 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine {
|
||||
|
||||
/// Legacy
|
||||
export const getModelFilePath = async (
|
||||
id: string,
|
||||
model: Model,
|
||||
file: string
|
||||
): Promise<string> => {
|
||||
return joinPath([await getJanDataFolderPath(), 'models', id, file])
|
||||
// Symlink to the model file
|
||||
if (!model.sources[0]?.url.startsWith('http')) {
|
||||
return model.sources[0]?.url
|
||||
}
|
||||
return joinPath([await getJanDataFolderPath(), 'models', model.id, file])
|
||||
}
|
||||
///
|
||||
|
||||
@ -168,7 +168,7 @@ export class CortexAPI implements ICortexAPI {
|
||||
(acc, cur) => acc + cur.bytes,
|
||||
0
|
||||
)
|
||||
const percent = (transferred / total || 0) * 100
|
||||
const percent = total > 0 ? transferred / total : 0
|
||||
|
||||
events.emit(DownloadTypes[data.type], {
|
||||
modelId: data.task.id,
|
||||
|
||||
@ -51,7 +51,7 @@ export default class JanModelExtension extends ModelExtension {
|
||||
* Called when the extension is unloaded.
|
||||
* @override
|
||||
*/
|
||||
async onUnload() { }
|
||||
async onUnload() {}
|
||||
|
||||
/**
|
||||
* Downloads a machine learning model.
|
||||
@ -64,8 +64,9 @@ export default class JanModelExtension extends ModelExtension {
|
||||
// Clip vision model - should not be handled by cortex.cpp
|
||||
// TensorRT model - should not be handled by cortex.cpp
|
||||
if (
|
||||
model.engine === InferenceEngine.nitro_tensorrt_llm ||
|
||||
model.settings.vision_model
|
||||
model &&
|
||||
(model.engine === InferenceEngine.nitro_tensorrt_llm ||
|
||||
model.settings.vision_model)
|
||||
) {
|
||||
return downloadModel(model, (await systemInformation()).gpuSetting)
|
||||
}
|
||||
@ -88,8 +89,9 @@ export default class JanModelExtension extends ModelExtension {
|
||||
// Clip vision model - should not be handled by cortex.cpp
|
||||
// TensorRT model - should not be handled by cortex.cpp
|
||||
if (
|
||||
modelDto.engine === InferenceEngine.nitro_tensorrt_llm ||
|
||||
modelDto.settings.vision_model
|
||||
modelDto &&
|
||||
(modelDto.engine === InferenceEngine.nitro_tensorrt_llm ||
|
||||
modelDto.settings.vision_model)
|
||||
) {
|
||||
for (const source of modelDto.sources) {
|
||||
const path = await joinPath(['models', modelDto.id, source.filename])
|
||||
@ -110,12 +112,13 @@ export default class JanModelExtension extends ModelExtension {
|
||||
*/
|
||||
async deleteModel(model: string): Promise<void> {
|
||||
const modelDto: Model = ModelManager.instance().get(model)
|
||||
return this.cortexAPI.deleteModel(model)
|
||||
.catch(e => console.debug(e))
|
||||
return this.cortexAPI
|
||||
.deleteModel(model)
|
||||
.catch((e) => console.debug(e))
|
||||
.finally(async () => {
|
||||
// Delete legacy model files
|
||||
await deleteModelFiles(modelDto)
|
||||
.catch(e => console.debug(e))
|
||||
if (modelDto)
|
||||
await deleteModelFiles(modelDto).catch((e) => console.debug(e))
|
||||
})
|
||||
}
|
||||
|
||||
@ -179,13 +182,15 @@ export default class JanModelExtension extends ModelExtension {
|
||||
toImportModels.map(async (model: Model & { file_path: string }) =>
|
||||
this.importModel(
|
||||
model.id,
|
||||
await joinPath([
|
||||
await dirName(model.file_path),
|
||||
model.sources[0]?.filename ??
|
||||
model.settings?.llama_model_path ??
|
||||
model.sources[0]?.url.split('/').pop() ??
|
||||
model.id,
|
||||
])
|
||||
model.sources[0].url.startsWith('http')
|
||||
? await joinPath([
|
||||
await dirName(model.file_path),
|
||||
model.sources[0]?.filename ??
|
||||
model.settings?.llama_model_path ??
|
||||
model.sources[0]?.url.split('/').pop() ??
|
||||
model.id,
|
||||
]) // Copied models
|
||||
: model.sources[0].url // Symlink models
|
||||
)
|
||||
)
|
||||
)
|
||||
@ -197,13 +202,14 @@ export default class JanModelExtension extends ModelExtension {
|
||||
* Models are imported successfully before
|
||||
* Now return models from cortex.cpp and merge with legacy models which are not imported
|
||||
*/
|
||||
return (
|
||||
this.cortexAPI.getModels().then((models) => {
|
||||
return await this.cortexAPI
|
||||
.getModels()
|
||||
.then((models) => {
|
||||
return models.concat(
|
||||
legacyModels.filter((e) => !models.some((x) => x.id === e.id))
|
||||
)
|
||||
}) ?? Promise.resolve(legacyModels)
|
||||
)
|
||||
})
|
||||
.catch(() => Promise.resolve(legacyModels))
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@ -125,6 +125,7 @@ export const setDownloadStateAtom = atom(
|
||||
(acc, m) => acc + m.size.transferred,
|
||||
0
|
||||
)
|
||||
modelDownloadState.size.total = parentTotalSize
|
||||
modelDownloadState.size.transferred = transferredSize
|
||||
|
||||
modelDownloadState.percent =
|
||||
|
||||
@ -34,7 +34,7 @@ const useModels = () => {
|
||||
const getDownloadedModels = async () => {
|
||||
const localModels = (await getModels()).map((e) => ({
|
||||
...e,
|
||||
name: ModelManager.instance().models.get(e.id)?.name ?? e.name,
|
||||
name: ModelManager.instance().models.get(e.id)?.name ?? e.id,
|
||||
metadata:
|
||||
ModelManager.instance().models.get(e.id)?.metadata ?? e.metadata,
|
||||
}))
|
||||
|
||||
@ -18,8 +18,8 @@ export const formatDownloadPercentage = (
|
||||
input: number,
|
||||
options?: { hidePercentage?: boolean }
|
||||
) => {
|
||||
if (options?.hidePercentage) return input * 100
|
||||
return (input * 100).toFixed(2) + '%'
|
||||
if (options?.hidePercentage) return input <= 1 ? input * 100 : input
|
||||
return (input <= 1 ? input * 100 : input).toFixed(2) + '%'
|
||||
}
|
||||
|
||||
export const formatDownloadSpeed = (input: number | undefined) => {
|
||||
|
||||
@ -2,15 +2,16 @@ import { EngineManager, InferenceEngine, LocalOAIEngine } from '@janhq/core'
|
||||
|
||||
export const getLogoEngine = (engine: InferenceEngine) => {
|
||||
switch (engine) {
|
||||
case InferenceEngine.anthropic:
|
||||
return 'images/ModelProvider/anthropic.svg'
|
||||
case InferenceEngine.nitro_tensorrt_llm:
|
||||
case InferenceEngine.nitro:
|
||||
return 'images/ModelProvider/nitro.svg'
|
||||
case InferenceEngine.cortex_llamacpp:
|
||||
case InferenceEngine.cortex_onnx:
|
||||
case InferenceEngine.cortex_tensorrtllm:
|
||||
return 'images/ModelProvider/cortex.svg'
|
||||
case InferenceEngine.anthropic:
|
||||
return 'images/ModelProvider/anthropic.svg'
|
||||
case InferenceEngine.nitro_tensorrt_llm:
|
||||
return 'images/ModelProvider/nitro.svg'
|
||||
|
||||
case InferenceEngine.mistral:
|
||||
return 'images/ModelProvider/mistral.svg'
|
||||
case InferenceEngine.martian:
|
||||
@ -49,11 +50,10 @@ export const isLocalEngine = (engine: string) => {
|
||||
export const getTitleByEngine = (engine: InferenceEngine) => {
|
||||
switch (engine) {
|
||||
case InferenceEngine.nitro:
|
||||
return 'Llama.cpp (Nitro)'
|
||||
case InferenceEngine.nitro_tensorrt_llm:
|
||||
return 'TensorRT-LLM (Nitro)'
|
||||
case InferenceEngine.cortex_llamacpp:
|
||||
return 'Llama.cpp (Cortex)'
|
||||
case InferenceEngine.nitro_tensorrt_llm:
|
||||
return 'TensorRT-LLM (Nitro)'
|
||||
case InferenceEngine.cortex_onnx:
|
||||
return 'Onnx (Cortex)'
|
||||
case InferenceEngine.cortex_tensorrtllm:
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user