Merge branch 'main' into docs/assistants-api-reference

This commit is contained in:
Hieu 2023-12-01 23:37:21 +09:00 committed by GitHub
commit 42674fd79f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
75 changed files with 979 additions and 663 deletions

2
.gitignore vendored
View File

@ -2,7 +2,6 @@
.env
# Jan inference
models/**
error.log
node_modules
*.tgz
@ -11,6 +10,7 @@ dist
build
.DS_Store
electron/renderer
electron/models
package-lock.json
*.log

View File

@ -54,6 +54,9 @@ const getUserSpace = (): Promise<string> => window.core.api?.getUserSpace();
const openFileExplorer: (path: string) => Promise<any> = (path) =>
window.core.api?.openFileExplorer(path);
const getResourcePath: () => Promise<string> = () =>
window.core.api?.getResourcePath();
/**
* Register extension point function type definition
*/
@ -74,4 +77,5 @@ export {
appDataPath,
getUserSpace,
openFileExplorer,
getResourcePath,
};

View File

@ -1,5 +1,5 @@
import { BaseExtension } from "../extension";
import { Model, ModelCatalog } from "../types/index";
import { Model } from "../types/index";
/**
* Model extension for managing models.
@ -43,5 +43,5 @@ export abstract class ModelExtension extends BaseExtension {
* Gets a list of configured models.
* @returns A Promise that resolves with an array of configured models.
*/
abstract getConfiguredModels(): Promise<ModelCatalog[]>;
abstract getConfiguredModels(): Promise<Model[]>;
}

View File

@ -62,6 +62,9 @@ const deleteFile: (path: string) => Promise<any> = (path) =>
const appendFile: (path: string, data: string) => Promise<any> = (path, data) =>
window.core.api?.appendFile(path, data);
const copyFile: (src: string, dest: string) => Promise<any> = (src, dest) =>
window.core.api?.copyFile(src, dest);
/**
* Reads a file line by line.
* @param {string} path - The path of the file to read.
@ -80,4 +83,5 @@ export const fs = {
deleteFile,
appendFile,
readLineByLine,
copyFile,
};

View File

@ -180,7 +180,7 @@ export interface Model {
/**
* The version of the model.
*/
version: string;
version: number;
/**
* The model download source. It can be an external url or a local filepath.
@ -197,12 +197,6 @@ export interface Model {
*/
name: string;
/**
* The organization that owns the model (you!)
* Default: "you"
*/
owned_by: string;
/**
* The Unix timestamp (in seconds) for when the model was created
*/
@ -236,11 +230,16 @@ export interface Model {
metadata: ModelMetadata;
}
export type ModelMetadata = {
author: string;
tags: string[];
size: number;
};
/**
* The Model transition states.
*/
export enum ModelState {
ToDownload = "to_download",
Downloading = "downloading",
Ready = "ready",
Running = "running",
@ -250,65 +249,27 @@ export enum ModelState {
* The available model settings.
*/
export type ModelSettingParams = {
ctx_len: number;
ngl: number;
embedding: boolean;
n_parallel: number;
ctx_len?: number;
ngl?: number;
embedding?: boolean;
n_parallel?: number;
system_prompt?: string;
user_prompt?: string;
ai_prompt?: string;
};
/**
* The available model runtime parameters.
*/
export type ModelRuntimeParam = {
temperature: number;
token_limit: number;
top_k: number;
top_p: number;
stream: boolean;
temperature?: number;
token_limit?: number;
top_k?: number;
top_p?: number;
stream?: boolean;
max_tokens?: number;
};
/**
* The metadata of the model.
*/
export type ModelMetadata = {
engine: string;
quantization: string;
size: number;
binaries: string[];
maxRamRequired: number;
author: string;
avatarUrl: string;
};
/**
* Model type of the presentation object which will be presented to the user
* @data_transfer_object
*/
export interface ModelCatalog {
/** The unique id of the model.*/
id: string;
/** The name of the model.*/
name: string;
/** The avatar url of the model.*/
avatarUrl: string;
/** The short description of the model.*/
shortDescription: string;
/** The long description of the model.*/
longDescription: string;
/** The author name of the model.*/
author: string;
/** The version of the model.*/
version: string;
/** The origin url of the model repo.*/
modelUrl: string;
/** The timestamp indicating when this model was released.*/
releaseDate: number;
/** The tags attached to the model description **/
tags: string[];
/** The available versions of this model to download. */
availableVersions: Model[];
}
/**
* Assistant type defines the shape of an assistant object.
* @stored

View File

@ -1,9 +1,9 @@
import { app, ipcMain, shell } from 'electron'
import { ModuleManager } from '../managers/module'
import { ModuleManager } from './../managers/module'
import { join } from 'path'
import { ExtensionManager } from '../managers/extension'
import { WindowManager } from '../managers/window'
import { userSpacePath } from '../utils/path'
import { ExtensionManager } from './../managers/extension'
import { WindowManager } from './../managers/window'
import { userSpacePath } from './../utils/path'
export function handleAppIPCs() {
/**

View File

@ -1,9 +1,10 @@
import { app, ipcMain } from 'electron'
import { DownloadManager } from '../managers/download'
import { DownloadManager } from './../managers/download'
import { resolve, join } from 'path'
import { WindowManager } from '../managers/window'
import { WindowManager } from './../managers/window'
import request from 'request'
import { createWriteStream, unlink } from 'fs'
import { createWriteStream } from 'fs'
import { getResourcePath } from './../utils/path'
const progress = require('request-progress')
export function handleDownloaderIPCs() {
@ -37,6 +38,10 @@ export function handleDownloaderIPCs() {
rq?.abort()
})
ipcMain.handle('getResourcePath', async (_event) => {
return getResourcePath()
})
/**
* Downloads a file from a given URL.
* @param _event - The IPC event object.

View File

@ -1,19 +1,16 @@
import { app, ipcMain, webContents } from 'electron'
import { readdirSync, rmdir, writeFileSync } from 'fs'
import { ModuleManager } from '../managers/module'
import { ipcMain, webContents } from 'electron'
import { readdirSync } from 'fs'
import { ModuleManager } from './../managers/module'
import { join, extname } from 'path'
import { ExtensionManager } from '../managers/extension'
import { WindowManager } from '../managers/window'
import { manifest, tarball } from 'pacote'
import {
getActiveExtensions,
getAllExtensions,
installExtensions,
} from '../extension/store'
import { getExtension } from '../extension/store'
import { removeExtension } from '../extension/store'
import Extension from '../extension/extension'
import { userSpacePath } from '../utils/path'
} from './../extension/store'
import { getExtension } from './../extension/store'
import { removeExtension } from './../extension/store'
import Extension from './../extension/extension'
import { getResourcePath, userSpacePath } from './../utils/path'
export function handleExtensionIPCs() {
/**MARK: General handlers */
@ -48,11 +45,7 @@ export function handleExtensionIPCs() {
* @returns An array of paths to the base extensions.
*/
ipcMain.handle('extension:baseExtensions', async (_event) => {
const baseExtensionPath = join(
__dirname,
'../',
app.isPackaged ? '../../app.asar.unpacked/pre-install' : '../pre-install'
)
const baseExtensionPath = join(getResourcePath(), 'pre-install')
return readdirSync(baseExtensionPath)
.filter((file) => extname(file) === '.tgz')
.map((file) => join(baseExtensionPath, file))

View File

@ -1,8 +1,9 @@
import { app, ipcMain } from 'electron'
import { ipcMain } from 'electron'
import * as fs from 'fs'
import fse from 'fs-extra'
import { join } from 'path'
import readline from 'readline'
import { userSpacePath } from '../utils/path'
import { userSpacePath } from './../utils/path'
/**
* Handles file system operations.
@ -145,6 +146,12 @@ export function handleFsIPCs() {
}
})
ipcMain.handle('copyFile', async (_event, src: string, dest: string) => {
console.debug(`Copying file from ${src} to ${dest}`)
return fse.copySync(src, dest, { overwrite: false })
})
/**
* Reads a file line by line.
* @param event - The event object.

View File

@ -1,5 +1,5 @@
import { app, dialog } from "electron";
import { WindowManager } from "../managers/window";
import { WindowManager } from "./../managers/window";
import { autoUpdater } from "electron-updater";
export function handleAppUpdates() {

View File

@ -67,6 +67,20 @@ export function fsInvokers() {
* @param {string} path - The path of the directory to remove.
*/
rmdir: (path: string) => ipcRenderer.invoke('rmdir', path),
/**
* Copies a file from the source path to the destination path.
* @param {string} src - The source path of the file to copy.
* @param {string} dest - The destination path where the file should be copied.
*/
copyFile: (src: string, dest: string) => ipcRenderer.invoke('copyFile', src, dest),
/**
* Retrieves the resource path.
* @returns {Promise<string>} A promise that resolves to the resource path.
*/
getResourcePath: () => ipcRenderer.invoke('getResourcePath'),
}
return interfaces

View File

@ -1,7 +1,7 @@
import { app, BrowserWindow } from 'electron'
import { join } from 'path'
import { setupMenu } from './utils/menu'
import { handleFsIPCs } from './handlers/fs'
import { createUserSpace, getResourcePath } from './utils/path'
/**
* Managers
@ -18,9 +18,11 @@ import { handleThemesIPCs } from './handlers/theme'
import { handleExtensionIPCs } from './handlers/extension'
import { handleAppIPCs } from './handlers/app'
import { handleAppUpdates } from './handlers/update'
import { handleFsIPCs } from './handlers/fs'
app
.whenReady()
.then(createUserSpace)
.then(ExtensionManager.instance.migrateExtensions)
.then(ExtensionManager.instance.setupExtensions)
.then(setupMenu)
@ -56,7 +58,7 @@ function createMainWindow() {
})
const startURL = app.isPackaged
? `file://${join(__dirname, '../renderer/index.html')}`
? `file://${join(__dirname, '..', 'renderer', 'index.html')}`
: 'http://localhost:3000'
/* Load frontend app to the window */

View File

@ -1,10 +1,10 @@
import { app } from 'electron'
import { init } from '../extension'
import { init } from './../extension'
import { join, resolve } from 'path'
import { rmdir } from 'fs'
import Store from 'electron-store'
import { existsSync, mkdirSync, writeFileSync } from 'fs'
import { userSpacePath } from '../utils/path'
import { userSpacePath } from './../utils/path'
/**
* Manages extension installation and migration.
*/

View File

@ -1,4 +1,4 @@
import { dispose } from "../utils/disposable";
import { dispose } from "./../utils/disposable";
/**
* Manages imported modules.

View File

@ -13,10 +13,12 @@
"renderer/**/*",
"build/*.{js,map}",
"build/**/*.{js,map}",
"pre-install"
"pre-install",
"models/**/*"
],
"asarUnpack": [
"pre-install"
"pre-install",
"models"
],
"publish": [
{
@ -70,6 +72,7 @@
"@uiball/loaders": "^1.3.0",
"electron-store": "^8.1.0",
"electron-updater": "^6.1.4",
"fs-extra": "^11.2.0",
"pacote": "^17.0.4",
"request": "^2.88.2",
"request-progress": "^3.0.0",

View File

@ -1,4 +1,19 @@
import { join } from 'path'
import { app } from 'electron'
import { mkdir } from 'fs-extra'
export async function createUserSpace(): Promise<void> {
return mkdir(userSpacePath).catch(() => {})
}
export const userSpacePath = join(app.getPath('home'), 'jan')
export function getResourcePath() {
let appPath = join(app.getAppPath(), '..', 'app.asar.unpacked')
if (!app.isPackaged) {
// for development mode
appPath = join(__dirname, '..', '..')
}
return appPath
}

View File

@ -146,7 +146,6 @@ export default class JanInferenceExtension implements InferenceExtension {
object: "thread.message",
};
events.emit(EventName.OnMessageResponse, message);
console.log(JSON.stringify(data, null, 2));
instance.isCancelled = false;
instance.controller = new AbortController();

View File

@ -1,3 +1,2 @@
declare const PLUGIN_NAME: string;
declare const MODULE_PATH: string;
declare const MODEL_CATALOG_URL: string;
declare const PLUGIN_NAME: string
declare const MODULE_PATH: string

View File

@ -1,21 +0,0 @@
interface Version {
name: string
quantMethod: string
bits: number
size: number
maxRamRequired: number
usecase: string
downloadLink: string
}
interface ModelSchema {
id: string
name: string
shortDescription: string
avatarUrl: string
longDescription: string
author: string
version: string
modelUrl: string
tags: string[]
versions: Version[]
}

View File

@ -1,46 +0,0 @@
import { ModelCatalog } from '@janhq/core'
export const parseToModel = (modelGroup): ModelCatalog => {
const modelVersions = []
modelGroup.versions.forEach((v) => {
const model = {
object: 'model',
version: modelGroup.version,
source_url: v.downloadLink,
id: v.name,
name: v.name,
owned_by: 'you',
created: 0,
description: modelGroup.longDescription,
state: 'to_download',
settings: v.settings,
parameters: v.parameters,
metadata: {
engine: '',
quantization: v.quantMethod,
size: v.size,
binaries: [],
maxRamRequired: v.maxRamRequired,
author: modelGroup.author,
avatarUrl: modelGroup.avatarUrl,
},
}
modelVersions.push(model)
})
const modelCatalog: ModelCatalog = {
id: modelGroup.id,
name: modelGroup.name,
avatarUrl: modelGroup.avatarUrl,
shortDescription: modelGroup.shortDescription,
longDescription: modelGroup.longDescription,
author: modelGroup.author,
version: modelGroup.version,
modelUrl: modelGroup.modelUrl,
releaseDate: modelGroup.createdAt,
tags: modelGroup.tags,
availableVersions: modelVersions,
}
return modelCatalog
}

View File

@ -1,6 +1,12 @@
import { ExtensionType, fs, downloadFile, abortDownload } from '@janhq/core'
import { ModelExtension, Model, ModelCatalog } from '@janhq/core'
import { parseToModel } from './helpers/modelParser'
import {
ExtensionType,
fs,
downloadFile,
abortDownload,
getResourcePath,
getUserSpace,
} from '@janhq/core'
import { ModelExtension, Model, ModelState } from '@janhq/core'
import { join } from 'path'
/**
@ -24,10 +30,7 @@ export default class JanModelExtension implements ModelExtension {
* @override
*/
onLoad(): void {
/** Cloud Native
* TODO: Fetch all downloading progresses?
**/
fs.mkdir(JanModelExtension._homeDir)
this.copyModelsToHomeDir()
}
/**
@ -36,6 +39,30 @@ export default class JanModelExtension implements ModelExtension {
*/
onUnload(): void {}
private async copyModelsToHomeDir() {
try {
// list all of the files under the home directory
const files = await fs.listFiles('')
if (files.includes(JanModelExtension._homeDir)) {
// ignore if the model is already downloaded
console.debug('Model already downloaded')
return
}
// copy models folder from resources to home directory
const resourePath = await getResourcePath()
const srcPath = join(resourePath, 'models')
const userSpace = await getUserSpace()
const destPath = join(userSpace, JanModelExtension._homeDir)
await fs.copyFile(srcPath, destPath)
} catch (err) {
console.error(err)
}
}
/**
* Downloads a machine learning model.
* @param model - The model to download.
@ -57,11 +84,11 @@ export default class JanModelExtension implements ModelExtension {
* @returns {Promise<void>} A promise that resolves when the download has been cancelled.
*/
async cancelModelDownload(modelId: string): Promise<void> {
return abortDownload(join(JanModelExtension._homeDir, modelId, modelId)).then(
() => {
fs.rmdir(join(JanModelExtension._homeDir, modelId))
}
)
return abortDownload(
join(JanModelExtension._homeDir, modelId, modelId)
).then(() => {
fs.deleteFile(join(JanModelExtension._homeDir, modelId, modelId))
})
}
/**
@ -72,7 +99,26 @@ export default class JanModelExtension implements ModelExtension {
async deleteModel(modelId: string): Promise<void> {
try {
const dirPath = join(JanModelExtension._homeDir, modelId)
await fs.rmdir(dirPath)
// remove all files under dirPath except model.json
const files = await fs.listFiles(dirPath)
const deletePromises = files.map((fileName: string) => {
if (fileName !== JanModelExtension._modelMetadataFileName) {
return fs.deleteFile(join(dirPath, fileName))
}
})
await Promise.allSettled(deletePromises)
// update the state as default
const jsonFilePath = join(
dirPath,
JanModelExtension._modelMetadataFileName
)
const json = await fs.readFile(jsonFilePath)
const model = JSON.parse(json) as Model
delete model.state
await fs.writeFile(jsonFilePath, JSON.stringify(model, null, 2))
} catch (err) {
console.error(err)
}
@ -91,7 +137,17 @@ export default class JanModelExtension implements ModelExtension {
)
try {
await fs.writeFile(jsonFilePath, JSON.stringify(model, null, 2))
await fs.writeFile(
jsonFilePath,
JSON.stringify(
{
...model,
state: ModelState.Ready,
},
null,
2
)
)
} catch (err) {
console.error(err)
}
@ -102,39 +158,62 @@ export default class JanModelExtension implements ModelExtension {
* @returns A Promise that resolves with an array of all models.
*/
async getDownloadedModels(): Promise<Model[]> {
const results: Model[] = []
const allDirs: string[] = await fs.listFiles(JanModelExtension._homeDir)
for (const dir of allDirs) {
const modelDirPath = join(JanModelExtension._homeDir, dir)
const isModelDir = await fs.isDirectory(modelDirPath)
if (!isModelDir) {
// if not a directory, ignore
continue
const models = await this.getModelsMetadata()
return models.filter((model) => model.state === ModelState.Ready)
}
private async getModelsMetadata(): Promise<Model[]> {
try {
const filesUnderJanRoot = await fs.listFiles('')
if (!filesUnderJanRoot.includes(JanModelExtension._homeDir)) {
console.debug('model folder not found')
return []
}
const jsonFiles: string[] = (await fs.listFiles(modelDirPath)).filter(
(fileName: string) => fileName === JanModelExtension._modelMetadataFileName
)
const files: string[] = await fs.listFiles(JanModelExtension._homeDir)
for (const json of jsonFiles) {
const model: Model = JSON.parse(
await fs.readFile(join(modelDirPath, json))
const allDirectories: string[] = []
for (const file of files) {
const isDirectory = await fs.isDirectory(
join(JanModelExtension._homeDir, file)
)
results.push(model)
if (isDirectory) {
allDirectories.push(file)
}
}
}
return results
const readJsonPromises = allDirectories.map((dirName) => {
const jsonPath = join(
JanModelExtension._homeDir,
dirName,
JanModelExtension._modelMetadataFileName
)
return this.readModelMetadata(jsonPath)
})
const results = await Promise.allSettled(readJsonPromises)
const modelData = results.map((result) => {
if (result.status === 'fulfilled') {
return JSON.parse(result.value) as Model
} else {
console.error(result.reason)
}
})
return modelData
} catch (err) {
console.error(err)
return []
}
}
private readModelMetadata(path: string) {
return fs.readFile(join(path))
}
/**
* Gets all available models.
* @returns A Promise that resolves with an array of all models.
*/
getConfiguredModels(): Promise<ModelCatalog[]> {
// Add a timestamp to the URL to prevent caching
return import(
/* webpackIgnore: true */ MODEL_CATALOG_URL + `?t=${Date.now()}`
).then((module) => module.default.map((e) => parseToModel(e)))
async getConfiguredModels(): Promise<Model[]> {
return this.getModelsMetadata()
}
}

View File

@ -19,9 +19,6 @@ module.exports = {
new webpack.DefinePlugin({
PLUGIN_NAME: JSON.stringify(packageJson.name),
MODULE_PATH: JSON.stringify(`${packageJson.name}/${packageJson.module}`),
MODEL_CATALOG_URL: JSON.stringify(
'https://cdn.jsdelivr.net/npm/@janhq/models@latest/dist/index.js'
),
}),
],
output: {

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/Nous-Capybara-34B-GGUF/resolve/main/nous-capybara-34b.Q5_K_M.gguf",
"id": "capybara-34b",
"object": "model",
"name": "Capybara 200k 34B",
"version": 1.0,
"description": "Nous Capybara 34B, a variant of the Yi-34B model, is the first Nous model with a 200K context length, trained for three epochs on the innovative Capybara dataset.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "USER: ",
"ai_prompt": "ASSISTANT: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "NousResearch, The Bloke",
"tags": ["General", "Big Context Length"],
"size": 24320000000
}
}

View File

@ -0,0 +1,23 @@
{
"source_url": "https://huggingface.co/TheBloke/deepseek-coder-1.3b-base-GGUF/resolve/main/deepseek-coder-1.3b-base.Q4_K_M.gguf",
"id": "deepseek-coder-1.3b",
"object": "model",
"name": "Deepseek Coder 1.3B",
"version": "1.0",
"description": "",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "",
"ai_prompt": ""
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "deepseek, The Bloke",
"tags": ["Code"],
"size": 870000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/dolphin-2_2-yi-34b-GGUF/resolve/main/dolphin-2_2-yi-34b.Q5_K_M.gguf",
"id": "dolphin-yi-34b",
"object": "model",
"name": "Dolphin Yi 34B",
"version": "1.0",
"description": "Dolphin, based on the Yi-34B model and enhanced with features like conversation and empathy, is trained on a unique dataset for advanced multi-turn conversations. Notably uncensored, it requires careful implementation of an alignment layer for ethical use.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "<|im_start|>system\n",
"user_prompt": "<|im_end|>\n<|im_start|>user\n",
"ai_prompt": "<|im_end|>\n<|im_start|>assistant\n"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "ehartford, The Bloke",
"tags": ["General Use", "Role-playing"],
"size": 24320000000
}
}

24
models/islm-3b/model.json Normal file
View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/UmbrellaCorp/IS-LM-3B_GGUF/resolve/main/IS-LM-Q4_K_M.gguf",
"id": "islm-3b",
"object": "model",
"name": "IS LM 3B",
"version": "1.0",
"description": "IS LM 3B, based on the StableLM 3B model is specifically finetuned for economic analysis using DataForge Economics and QLoRA over three epochs, enhancing its proficiency in economic forecasting and analysis.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "USER: ",
"ai_prompt": "ASSISTANT: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "UmbrellaCorp, The Bloke",
"tags": ["General Use", "Economics"],
"size": 1710000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/lzlv_70B-GGUF/resolve/main/lzlv_70b_fp16_hf.Q5_K_M.gguf",
"id": "lzlv-70b",
"object": "model",
"name": "Lzlv 70B",
"version": "1.0",
"description": "lzlv_70B is a sophisticated AI model designed for roleplaying and creative tasks. This merge aims to combine intelligence with creativity, seemingly outperforming its individual components in complex scenarios and creative outputs.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "USER: ",
"ai_prompt": "ASSISTANT: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "lizpreciatior, The Bloke",
"tags": ["General Use", "Role-playing"],
"size": 48750000000
}
}

23
models/marx-3b/model.json Normal file
View File

@ -0,0 +1,23 @@
{
"source_url": "https://huggingface.co/TheBloke/Marx-3B-v3-GGUF/resolve/main/marx-3b-v3.Q4_K_M.gguf",
"id": "marx-3b",
"object": "model",
"name": "Marx 3B",
"version": "1.0",
"description": "Marx 3B, based on the StableLM 3B model is specifically finetuned for chating using EverythingLM data and QLoRA over two epochs, enhancing its proficiency in understand general knowledege.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "### HUMAN: ",
"ai_prompt": "### RESPONSE: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Bohan Du, The Bloke",
"tags": ["General Use"],
"size": 1620000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/MythoMax-L2-13B-GGUF/resolve/main/mythomax-l2-13b.Q5_K_M.gguf",
"id": "mythomax-13b",
"object": "model",
"name": "Mythomax L2 13B",
"version": "1.0",
"description": "Mythomax L2 13b, an advanced AI model derived from MythoMix, merges MythoLogic-L2's deep comprehension with Huginn's writing skills through a unique tensor merge technique, excelling in roleplaying and storytelling.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "### Instruction: ",
"ai_prompt": "### Response: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Gryphe, The Bloke",
"tags": ["Role-playing"],
"size": 9230000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/neural-chat-7B-v3-1-GGUF/resolve/main/neural-chat-7b-v3-1.Q4_K_M.gguf",
"id": "neural-chat-7b",
"object": "model",
"name": "Neural Chat 7B",
"version": "1.0",
"description": "The Neural Chat 7B model, developed on the foundation of mistralai/Mistral-7B-v0.1, has been fine-tuned using the Open-Orca/SlimOrca dataset and aligned with the Direct Preference Optimization (DPO) algorithm. It has demonstrated substantial improvements in various AI tasks and performance well on the open_llm_leaderboard.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "### System: ",
"user_prompt": "### User: ",
"ai_prompt": "### Assistant: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Intel, The Bloke",
"tags": ["General Use", "Role-playing", "Big Context Length"],
"size": 4370000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/NeuralHermes-2.5-Mistral-7B-GGUF/resolve/main/neuralhermes-2.5-mistral-7b.Q4_K_M.gguf",
"id": "neuralhermes-7b",
"object": "model",
"name": "NeuralHermes 7B",
"version": "1.0",
"description": "NeuralHermes 2.5 has been enhanced using Direct Preference Optimization. This fine-tuning, inspired by the RLHF process of Neural-chat-7b and OpenHermes-2.5-Mistral-7B, has led to improved performance across several benchmarks.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "<|im_start|>system\n",
"user_prompt": "<|im_end|>\n<|im_start|>user\n",
"ai_prompt": "<|im_end|>\n<|im_start|>assistant\n"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Intel, The Bloke",
"tags": ["General Use", "Code", "Big Context Length"],
"size": 4370000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/Noromaid-20B-v0.1.1-GGUF/resolve/main/noromaid-20b-v0.1.1.Q4_K_M.gguf",
"id": "noromaid-20b",
"object": "model",
"name": "Noromaid 20B",
"version": "1.0",
"description": "The Noromaid 20b model is designed for role-playing and general use, featuring a unique touch with the no_robots dataset that enhances human-like behavior.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "### Instruction: ",
"ai_prompt": "### Response: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "NeverSleep, The Bloke",
"tags": ["Role-playing"],
"size": 12040000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/openchat_3.5-GGUF/resolve/main/openchat_3.5.Q4_K_M.gguf",
"id": "openchat-7b",
"object": "model",
"name": "Open Chat 3.5 7B",
"version": "1.0",
"description": "OpenChat represents a breakthrough in the realm of open-source language models. By implementing the C-RLFT fine-tuning strategy, inspired by offline reinforcement learning, this 7B model achieves results on par with ChatGPT (March).",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "GPT4 User: ",
"ai_prompt": "<|end_of_turn|>\nGPT4 Assistant: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "OpenChat, The Bloke",
"tags": ["General", "Code"],
"size": 4370000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/OpenHermes-2.5-Mistral-7B-GGUF/resolve/main/openhermes-2.5-mistral-7b.Q4_K_M.gguf",
"id": "openhermes-mistral-7b",
"object": "model",
"name": "Openhermes 2.5 Mistral 7B",
"version": "1.0",
"description": "The OpenHermes 2.5 Mistral 7B incorporates additional code datasets, more than a million GPT-4 generated data examples, and other high-quality open datasets. This enhancement led to significant improvement in benchmarks, highlighting its improved skill in handling code-centric tasks.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "<|im_start|>system\n",
"user_prompt": "<|im_end|>\n<|im_start|>user\n",
"ai_prompt": "<|im_end|>\n<|im_start|>assistant\n"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Teknium, The Bloke",
"tags": ["General", "Roleplay"],
"size": 4370000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/Orca-2-13B-GGUF/resolve/main/orca-2-13b.Q5_K_M.gguf",
"id": "openorca-13b",
"object": "model",
"name": "Orca 2 13B",
"version": "1.0",
"description": "Orca 2 is a finetuned version of LLAMA-2, designed primarily for single-turn responses in reasoning, reading comprehension, math problem solving, and text summarization.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "<|im_start|>system\n",
"user_prompt": "<|im_end|>\n<|im_start|>user\n",
"ai_prompt": "<|im_end|>\n<|im_start|>assistant\n"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Microsoft, The Bloke",
"tags": ["General Use"],
"size": 9230000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/Mistral-7B-OpenOrca-GGUF/resolve/main/mistral-7b-openorca.Q4_K_M.gguf",
"id": "openorca-7b",
"object": "model",
"name": "OpenOrca 7B",
"version": "1.0",
"description": "OpenOrca 8k 7B is a model based on Mistral 7B, fine-tuned using the OpenOrca dataset. Notably ranked first on the HF Leaderboard for models under 30B, it excels in efficiency and accessibility.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "<|im_start|>system\n",
"user_prompt": "<|im_end|>\n<|im_start|>user\n",
"ai_prompt": "<|im_end|>\n<|im_start|>assistant\n"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "OpenOrca, The Bloke",
"tags": ["General", "Code"],
"size": 4370000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/Phind-CodeLlama-34B-v2-GGUF/resolve/main/phind-codellama-34b-v2.Q5_K_M.gguf",
"id": "phind-34b",
"object": "model",
"name": "Phind 34B",
"version": "1.0",
"description": "Phind-CodeLlama-34B-v2 is an AI model fine-tuned on 1.5B tokens of high-quality programming data. It's a SOTA open-source model in coding. This multi-lingual model excels in various programming languages, including Python, C/C++, TypeScript, Java, and is designed to be steerable and user-friendly.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "### System Prompt\n",
"user_prompt": "### User Message\n",
"ai_prompt": "### Assistant\n"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Phind, The Bloke",
"tags": ["Code", "Big Context Length"],
"size": 24320000000
}
}

View File

@ -0,0 +1,23 @@
{
"source_url": "https://huggingface.co/TheBloke/rocket-3B-GGUF/resolve/main/rocket-3b.Q4_K_M.gguf",
"id": "rocket-3b",
"object": "model",
"name": "Rocket 3B",
"version": "1.0",
"description": "Rocket-3B is a GPT-like model, primarily English, fine-tuned on diverse public datasets. It outperforms larger models in benchmarks, showcasing superior understanding and text generation, making it an effective chat model for its size.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "<|im_start|>system\n",
"user_prompt": "<|im_end|>\n<|im_start|>user\n",
"ai_prompt": "<|im_end|>\n<|im_start|>assistant\n"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "pansophic, The Bloke",
"tags": ["General Use"],
"size": 1710000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/Starling-LM-7B-alpha-GGUF/resolve/main/starling-lm-7b-alpha.Q4_K_M.gguf",
"id": "starling-7b",
"object": "model",
"name": "Strarling alpha 7B",
"version": "1.0",
"description": "Starling-RM-7B-alpha is a language model finetuned with Reinforcement Learning from AI Feedback from Openchat 3.5. It stands out for its impressive performance using GPT-4 as a judge, making it one of the top-performing models in its category.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "GPT4 User: ",
"ai_prompt": "<|end_of_turn|>\nGPT4 Assistant: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Berkeley-nest, The Bloke",
"tags": ["General", "Code"],
"size": 4370000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/GOAT-70B-Storytelling-GGUF/resolve/main/goat-70b-storytelling.Q5_K_M.gguf",
"id": "storytelling-70b",
"object": "model",
"name": "Storytelling 70B",
"version": "1.0",
"description": "The GOAT-70B-Storytelling model is designed for autonomous story-writing, including crafting books and movie scripts. Based on the LLaMA 2 70B architecture, this model excels in generating cohesive and engaging narratives using inputs like plot outlines and character profiles.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "### USER: ",
"ai_prompt": "\n### ASSISTANT: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "GOAT-AI, The Bloke",
"tags": ["General Use", "Writing"],
"size": 48750000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/LLaMA2-13B-Tiefighter-GGUF/resolve/main/llama2-13b-tiefighter.Q5_K_M.gguf",
"id": "tiefighter-13b",
"object": "model",
"name": "Tiefighter 13B",
"version": "1.0",
"description": "Tiefighter-13B is a highly creative, merged AI model achieved by combining various 'LORAs' on top of an existing merge, particularly focusing on storytelling and improvisation. This model excels in story writing, chatbots, and adventuring, and is designed to perform better with less detailed inputs, leveraging its inherent creativity.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "### Instruction: ",
"ai_prompt": "\n### Response: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "KoboldAI, The Bloke",
"tags": ["General Use", "Role-playing", "Writing"],
"size": 9230000000
}
}

View File

@ -0,0 +1,23 @@
{
"source_url": "https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v0.6/resolve/main/ggml-model-q4_0.gguf",
"id": "tinyllama-1.1b",
"object": "model",
"name": "TinyLlama Chat 1.1B",
"version": "1.0",
"description": "The TinyLlama project, featuring a 1.1B parameter Llama model, is pretrained on an expansive 3 trillion token dataset. Its design ensures easy integration with various Llama-based open-source projects. Despite its smaller size, it efficiently utilizes lower computational and memory resources, drawing on GPT-4's analytical prowess to enhance its conversational abilities and versatility.",
"format": "gguf",
"settings": {
"ctx_len": 2048,
"system_prompt": "<|system|>\n",
"user_prompt": "<|user|>\n",
"ai_prompt": "<|assistant|>\n"
},
"parameters": {
"max_tokens": 2048
},
"metadata": {
"author": "TinyLlama",
"tags": ["General Use"],
"size": 637000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/WizardCoder-Python-13B-V1.0-GGUF/resolve/main/wizardcoder-python-13b-v1.0.Q5_K_M.gguf",
"id": "wizardcoder-13b",
"object": "model",
"name": "Wizard Coder Python 13B",
"version": "1.0",
"description": "WizardCoder-Python-13B is a Python coding model major models like ChatGPT-3.5. This model based on the Llama2 architecture, demonstrate high proficiency in specific domains like coding and mathematics.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "### Instruction:\n",
"ai_prompt": "### Response:\n"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "WizardLM, The Bloke",
"tags": ["Code", "Big Context Length"],
"size": 9230000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/WizardCoder-Python-34B-V1.0-GGUF/resolve/main/wizardcoder-python-34b-v1.0.Q5_K_M.gguf",
"id": "wizardcoder-34b",
"object": "model",
"name": "Wizard Coder Python 34B",
"version": "1.0",
"description": "WizardCoder-Python-34B is a Python coding model major models like ChatGPT-3.5. This model based on the Llama2 architecture, demonstrate high proficiency in specific domains like coding and mathematics.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "### Instruction:\n",
"ai_prompt": "### Response:\n"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "WizardLM, The Bloke",
"tags": ["Code", "Big Context Length"],
"size": 24320000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/Xwin-LM-70B-V0.1-GGUF/resolve/main/xwin-lm-70b-v0.1.Q5_K_M.gguf",
"id": "xwin-70b",
"object": "model",
"name": "Xwin LM 70B",
"version": "1.0",
"description": "Xwin-LM, based on Llama2 models, emphasizes alignment and exhibits advanced language understanding, text generation, and role-playing abilities.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "",
"user_prompt": "USER: ",
"ai_prompt": "ASSISTANT: "
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "Xwin-LM, The Bloke",
"tags": ["General Use", "Role-playing"],
"size": 48750000000
}
}

View File

@ -0,0 +1,21 @@
{
"source_url": "https://huggingface.co/TheBloke/Yarn-Llama-2-70B-32k-GGUF/resolve/main/yarn-llama-2-70b-32k.Q5_K_M.gguf",
"id": "yarn-70b",
"object": "model",
"name": "Yarn 32k 70B",
"version": "1,0",
"description": "Yarn-Llama-2-70b-32k is designed specifically for handling long contexts. It represents an extension of the Llama-2-70b-hf model, now supporting a 32k token context window.",
"format": "gguf",
"settings": {
"ctx_len": 4096
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "NousResearch, The Bloke",
"tags": ["General Use", "Big Context Length"],
"size": 48750000000
}
}

24
models/yi-34b/model.json Normal file
View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/Yi-34B-Chat-GGUF/resolve/main/yi-34b-chat.Q5_K_M.gguf",
"id": "yi-34b",
"object": "model",
"name": "Yi 34B",
"version": "1.0",
"description": "Yi-34B, a specialized chat model, is known for its diverse and creative responses and excels across various NLP tasks and benchmarks.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "<|im_start|>system\n",
"user_prompt": "<|im_end|>\n<|im_start|>user\n",
"ai_prompt": "<|im_end|>\n<|im_start|>assistant\n"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "01-ai, The Bloke",
"tags": ["General", "Role-playing", "Writing"],
"size": 24320000000
}
}

View File

@ -0,0 +1,24 @@
{
"source_url": "https://huggingface.co/TheBloke/zephyr-7B-beta-GGUF/resolve/main/zephyr-7b-beta.Q4_K_M.gguf",
"id": "zephyr-beta-7b",
"object": "model",
"name": "Zephyr Beta 7B",
"version": "1.0",
"description": "The Zephyr-7B-β model marks the second iteration in the Zephyr series, designed to function as an effective assistant. It has been fine-tuned from the mistralai/Mistral-7B-v0.1 base model, utilizing a combination of public and synthetic datasets with the application of Direct Preference Optimization.",
"format": "gguf",
"settings": {
"ctx_len": 4096,
"system_prompt": "<|system|>\n",
"user_prompt": "</s>\n<|user|>\n",
"ai_prompt": "</s>\n<|assistant|>\n"
},
"parameters": {
"max_tokens": 4096
},
"metadata": {
"author": "HuggingFaceH4, The Bloke",
"tags": ["General Use", "Big Context Length"],
"size": 4370000000
}
}

View File

@ -6,8 +6,7 @@
"uikit",
"core",
"electron",
"web",
"server"
"web"
],
"nohoist": [
"uikit",
@ -17,15 +16,13 @@
"electron",
"electron/**",
"web",
"web/**",
"server",
"server/**"
"web/**"
]
},
"scripts": {
"lint": "yarn workspace jan lint && yarn workspace jan-web lint",
"test": "yarn workspace jan test:e2e",
"dev:electron": "yarn workspace jan dev",
"dev:electron": "cpx \"models/**\" \"electron/models/\" && yarn workspace jan dev",
"dev:web": "yarn workspace jan-web dev",
"dev": "concurrently --kill-others \"yarn dev:web\" \"wait-on http://localhost:3000 && yarn dev:electron\"",
"test-local": "yarn lint && yarn build:test && yarn test",
@ -33,7 +30,7 @@
"build:uikit": "yarn workspace @janhq/uikit install && yarn workspace @janhq/uikit build",
"build:core": "cd core && yarn install && yarn run build",
"build:web": "yarn workspace jan-web build && cpx \"web/out/**\" \"electron/renderer/\"",
"build:electron": "yarn workspace jan build",
"build:electron": "yarn workspace jan build && cpx \"models/**\" \"electron/models/\"",
"build:electron:test": "yarn workspace jan build:test",
"build:extensions": "rimraf ./electron/pre-install/*.tgz && concurrently --kill-others-on-fail \"cd ./extensions/conversational-extension && npm install && npm run build:publish\" \"cd ./extensions/inference-extension && npm install && npm run build:publish\" \"cd ./extensions/model-extension && npm install && npm run build:publish\" \"cd ./extensions/monitoring-extension && npm install && npm run build:publish\" \"cd ./extensions/assistant-extension && npm install && npm run build:publish\"",
"build:test": "yarn build:web && yarn workspace jan build:test",

View File

@ -1,9 +1,16 @@
type Props = {
title: string
description?: string
disabled?: boolean
onChange?: (text?: string) => void
}
export default function ItemCardSidebar({ description, title }: Props) {
export default function ItemCardSidebar({
description,
title,
disabled,
onChange,
}: Props) {
return (
<div className="flex flex-col gap-2">
<div className="flex items-center gap-2">
@ -11,9 +18,11 @@ export default function ItemCardSidebar({ description, title }: Props) {
</div>
<input
value={description}
disabled={disabled}
type="text"
className="block w-full rounded-md border-0 px-1 py-1.5 text-white shadow-sm ring-1 ring-inset ring-gray-300 placeholder:text-gray-400 focus:ring-2 focus:ring-inset focus:ring-indigo-600 sm:text-sm sm:leading-6"
placeholder=""
onChange={(e) => onChange?.(e.target.value)}
/>
</div>
)

View File

@ -69,18 +69,14 @@ export default function DownloadingState() {
/>
<div className="flex items-center justify-between gap-x-2">
<div className="flex gap-x-2">
<p className="line-clamp-1">{item?.fileName}</p>
<p className="line-clamp-1">{item?.modelId}</p>
<span>{formatDownloadPercentage(item?.percent)}</span>
</div>
<Button
themes="outline"
size="sm"
onClick={() => {
if (item?.fileName) {
const model = models.find(
(e) => e.id === item?.fileName
)
if (!model) return
if (item?.modelId) {
extensionManager
.get<ModelExtension>(ExtensionType.Model)
?.cancelModelDownload(item.modelId)

View File

@ -1,4 +1,4 @@
import { Fragment, useState, useEffect, useContext } from 'react'
import { Fragment, useState, useEffect } from 'react'
import {
Button,
@ -11,7 +11,7 @@ import {
CommandList,
} from '@janhq/uikit'
import { useSetAtom } from 'jotai'
import { useAtomValue, useSetAtom } from 'jotai'
import {
MessageCircleIcon,
SettingsIcon,
@ -22,18 +22,19 @@ import {
import ShortCut from '@/containers/Shortcut'
import { FeatureToggleContext } from '@/context/FeatureToggle'
import { MainViewState } from '@/constants/screens'
import { useMainViewState } from '@/hooks/useMainViewState'
import { showRightSideBarAtom } from '@/screens/Chat/Sidebar'
import { activeThreadAtom } from '@/helpers/atoms/Conversation.atom'
export default function CommandSearch() {
const { setMainViewState } = useMainViewState()
const [open, setOpen] = useState(false)
const setShowRightSideBar = useSetAtom(showRightSideBarAtom)
const activeThread = useAtomValue(activeThreadAtom)
const menus = [
{
@ -123,13 +124,15 @@ export default function CommandSearch() {
</CommandGroup>
</CommandList>
</CommandModal>
<Button
themes="outline"
className="unset-drag justify-start text-left text-xs font-normal text-muted-foreground focus:ring-0"
onClick={() => setShowRightSideBar((show) => !show)}
>
Toggle right
</Button>
{activeThread && (
<Button
themes="outline"
className="unset-drag justify-start text-left text-xs font-normal text-muted-foreground focus:ring-0"
onClick={() => setShowRightSideBar((show) => !show)}
>
Toggle right
</Button>
)}
</Fragment>
)
}

View File

@ -24,34 +24,30 @@ import { extensionManager } from '@/extension'
import { downloadingModelsAtom } from '@/helpers/atoms/Model.atom'
type Props = {
suitableModel: Model
model: Model
isFromList?: boolean
}
export default function ModalCancelDownload({
suitableModel,
isFromList,
}: Props) {
export default function ModalCancelDownload({ model, isFromList }: Props) {
const { modelDownloadStateAtom } = useDownloadState()
const downloadAtom = useMemo(
() => atom((get) => get(modelDownloadStateAtom)[suitableModel.name]),
() => atom((get) => get(modelDownloadStateAtom)[model.id]),
// eslint-disable-next-line react-hooks/exhaustive-deps
[suitableModel.name]
[model.id]
)
const models = useAtomValue(downloadingModelsAtom)
const downloadState = useAtomValue(downloadAtom)
const cancelText = `Cancel ${formatDownloadPercentage(downloadState.percent)}`
return (
<Modal>
<ModalTrigger asChild>
{isFromList ? (
<Button themes="outline" size="sm">
Cancel ({formatDownloadPercentage(downloadState.percent)})
{cancelText}
</Button>
) : (
<Button>
Cancel ({formatDownloadPercentage(downloadState.percent)})
</Button>
<Button>{cancelText}</Button>
)}
</ModalTrigger>
<ModalContent>
@ -60,7 +56,7 @@ export default function ModalCancelDownload({
</ModalHeader>
<p>
Are you sure you want to cancel the download of&nbsp;
{downloadState?.fileName}?
{downloadState?.modelId}?
</p>
<ModalFooter>
<div className="flex gap-x-2">
@ -71,11 +67,7 @@ export default function ModalCancelDownload({
<Button
themes="danger"
onClick={() => {
if (downloadState?.fileName) {
const model = models.find(
(e) => e.id === downloadState?.fileName
)
if (!model) return
if (downloadState?.modelId) {
extensionManager
.get<ModelExtension>(ExtensionType.Model)
?.cancelModelDownload(downloadState.modelId)

View File

@ -36,11 +36,11 @@ export default function EventListenerWrapper({ children }: PropsWithChildren) {
useEffect(() => {
if (window && window.electronAPI) {
window.electronAPI.onFileDownloadUpdate(
(_event: string, state: DownloadState | undefined) => {
(_event: string, state: any | undefined) => {
if (!state) return
setDownloadState({
...state,
fileName: state.fileName.split('/').pop() ?? '',
modelId: state.fileName.split('/').pop() ?? '',
})
}
)
@ -48,18 +48,18 @@ export default function EventListenerWrapper({ children }: PropsWithChildren) {
window.electronAPI.onFileDownloadError(
(_event: string, callback: any) => {
console.error('Download error', callback)
const fileName = callback.fileName.split('/').pop() ?? ''
setDownloadStateFailed(fileName)
const modelId = callback.fileName.split('/').pop() ?? ''
setDownloadStateFailed(modelId)
}
)
window.electronAPI.onFileDownloadSuccess(
(_event: string, callback: any) => {
if (callback && callback.fileName) {
const fileName = callback.fileName.split('/').pop() ?? ''
setDownloadStateSuccess(fileName)
const modelId = callback.fileName.split('/').pop() ?? ''
setDownloadStateSuccess(modelId)
const model = modelsRef.current.find((e) => e.id === fileName)
const model = modelsRef.current.find((e) => e.id === modelId)
if (model)
extensionManager
.get<ModelExtension>(ExtensionType.Model)

View File

@ -48,9 +48,8 @@ export function useActiveModel() {
}
const currentTime = Date.now()
console.debug('Init model: ', modelId)
const res = await initModel(modelId, model?.settings)
if (res && res.error && res.modelFile === stateModel.model) {
if (res && res.error) {
const errorMessage = `${res.error}`
alert(errorMessage)
setStateModel(() => ({

View File

@ -1,5 +1,7 @@
import {
Assistant,
ConversationalExtension,
ExtensionType,
Thread,
ThreadAssistantInfo,
ThreadState,
@ -8,10 +10,13 @@ import { atom, useAtom, useAtomValue, useSetAtom } from 'jotai'
import { generateThreadId } from '@/utils/conversation'
import { extensionManager } from '@/extension'
import {
threadsAtom,
setActiveThreadIdAtom,
threadStatesAtom,
activeThreadAtom,
updateThreadAtom,
} from '@/helpers/atoms/Conversation.atom'
const createNewThreadAtom = atom(null, (get, set, newThread: Thread) => {
@ -35,6 +40,8 @@ export const useCreateNewThread = () => {
const setActiveThreadId = useSetAtom(setActiveThreadIdAtom)
const [threadStates, setThreadStates] = useAtom(threadStatesAtom)
const threads = useAtomValue(threadsAtom)
const activeThread = useAtomValue(activeThreadAtom)
const updateThread = useSetAtom(updateThreadAtom)
const requestCreateNewThread = async (assistant: Assistant) => {
const unfinishedThreads = threads.filter((t) => t.isFinishInit === false)
@ -86,7 +93,20 @@ export const useCreateNewThread = () => {
setActiveThreadId(thread.id)
}
function updateThreadTitle(title: string) {
if (!activeThread) return
const updatedConv: Thread = {
...activeThread,
title,
}
updateThread(updatedConv)
extensionManager
.get<ConversationalExtension>(ExtensionType.Conversational)
?.saveThread(updatedConv)
}
return {
requestCreateNewThread,
updateThreadTitle,
}
}

View File

@ -6,10 +6,10 @@ import { currentPromptAtom } from '@/containers/Providers/Jotai'
import { toaster } from '@/containers/Toast'
import { extensionManager } from '../extension/ExtensionManager'
import { useActiveModel } from './useActiveModel'
import { extensionManager } from '@/extension/ExtensionManager'
import {
cleanConversationMessages,
deleteConversationMessage,

View File

@ -1,6 +1,6 @@
import { Model, ExtensionType, ModelExtension } from '@janhq/core'
import { useAtom } from 'jotai'
import { useAtom, useAtomValue } from 'jotai'
import { useDownloadState } from './useDownloadState'
@ -27,7 +27,6 @@ export default function useDownloadModel() {
total: 0,
transferred: 0,
},
fileName: model.id,
})
setDownloadingModels([...downloadingModels, model])

View File

@ -8,39 +8,39 @@ const modelDownloadStateAtom = atom<Record<string, DownloadState>>({})
const setDownloadStateAtom = atom(null, (get, set, state: DownloadState) => {
const currentState = { ...get(modelDownloadStateAtom) }
console.debug(
`current download state for ${state.fileName} is ${JSON.stringify(state)}`
`current download state for ${state.modelId} is ${JSON.stringify(state)}`
)
currentState[state.fileName] = state
currentState[state.modelId] = state
set(modelDownloadStateAtom, currentState)
})
const setDownloadStateSuccessAtom = atom(null, (get, set, fileName: string) => {
const setDownloadStateSuccessAtom = atom(null, (get, set, modelId: string) => {
const currentState = { ...get(modelDownloadStateAtom) }
const state = currentState[fileName]
const state = currentState[modelId]
if (!state) {
console.error(`Cannot find download state for ${fileName}`)
console.error(`Cannot find download state for ${modelId}`)
return
}
delete currentState[fileName]
delete currentState[modelId]
set(modelDownloadStateAtom, currentState)
toaster({
title: 'Download Completed',
description: `Download ${fileName} completed`,
description: `Download ${modelId} completed`,
})
})
const setDownloadStateFailedAtom = atom(null, (get, set, fileName: string) => {
const setDownloadStateFailedAtom = atom(null, (get, set, modelId: string) => {
const currentState = { ...get(modelDownloadStateAtom) }
const state = currentState[fileName]
const state = currentState[modelId]
if (!state) {
console.error(`Cannot find download state for ${fileName}`)
console.error(`Cannot find download state for ${modelId}`)
toaster({
title: 'Cancel Download',
description: `Model ${fileName} cancel download`,
description: `Model ${modelId} cancel download`,
})
return
}
delete currentState[fileName]
delete currentState[modelId]
set(modelDownloadStateAtom, currentState)
})

View File

@ -1,25 +1,15 @@
import { useEffect, useState } from 'react'
import { ExtensionType, ModelExtension } from '@janhq/core'
import { ModelCatalog } from '@janhq/core'
import { dummyModel } from '@/utils/dummy'
import { Model } from '@janhq/core'
import { extensionManager } from '@/extension/ExtensionManager'
export async function getConfiguredModels(): Promise<ModelCatalog[]> {
return (
extensionManager
.get<ModelExtension>(ExtensionType.Model)
?.getConfiguredModels() ?? []
)
}
export function useGetConfiguredModels() {
const [loading, setLoading] = useState<boolean>(false)
const [models, setModels] = useState<ModelCatalog[]>([])
const [models, setModels] = useState<Model[]>([])
async function getConfiguredModels(): Promise<ModelCatalog[]> {
const getConfiguredModels = async (): Promise<Model[]> => {
const models = await extensionManager
.get<ModelExtension>(ExtensionType.Model)
?.getConfiguredModels()
@ -28,9 +18,9 @@ export function useGetConfiguredModels() {
async function fetchModels() {
setLoading(true)
let models = await getConfiguredModels()
const models = await getConfiguredModels()
if (process.env.NODE_ENV === 'development') {
models = [dummyModel, ...models]
// models = [dummyModel, ...models] // TODO: NamH add back dummy model later
}
setLoading(false)
setModels(models)

View File

@ -1,27 +0,0 @@
import { useState } from 'react'
import { Model } from '@janhq/core'
import { useAtomValue } from 'jotai'
import { totalRamAtom } from '@/helpers/atoms/SystemBar.atom'
export default function useGetMostSuitableModelVersion() {
const [suitableModel, setSuitableModel] = useState<Model | undefined>()
const totalRam = useAtomValue(totalRamAtom)
const getMostSuitableModelVersion = async (modelVersions: Model[]) => {
// find the model version with the highest required RAM that is still below the user's RAM by 80%
const modelVersion = modelVersions.reduce((prev, current) => {
if (current.metadata.maxRamRequired > prev.metadata.maxRamRequired) {
if (current.metadata.maxRamRequired < totalRam * 0.8) {
return current
}
}
return prev
})
setSuitableModel(modelVersion)
}
return { suitableModel, getMostSuitableModelVersion }
}

View File

@ -1,50 +0,0 @@
import { Model } from '@janhq/core'
import { ModelPerformance, TagType } from '@/constants/tagType'
// Recommendation:
// `Recommended (green)`: "Max RAM required" is 80% of users max RAM.
// `Slow on your device (yellow)`: Max RAM required is 80-100% of users max RAM
// `Not enough RAM (red)`: User RAM is below "Max RAM required"
export default function useGetPerformanceTag() {
async function getPerformanceForModel(
model: Model,
totalRam: number
): Promise<{ title: string; performanceTag: TagType }> {
const requiredRam = model.metadata.maxRamRequired
const performanceTag = calculateRamPerformance(requiredRam, totalRam)
let title = ''
switch (performanceTag) {
case ModelPerformance.PerformancePositive:
title = 'Recommended'
break
case ModelPerformance.PerformanceNeutral:
title = 'Slow on your device'
break
case ModelPerformance.PerformanceNegative:
title = 'Not enough RAM'
break
}
return { title, performanceTag }
}
return { getPerformanceForModel }
}
const calculateRamPerformance = (
requiredRamAmt: number,
totalRamAmt: number
) => {
const percentage = requiredRamAmt / totalRamAmt
if (percentage < 0.8) {
return ModelPerformance.PerformancePositive
} else if (percentage >= 0.8 && percentage < 1) {
return ModelPerformance.PerformanceNeutral
} else {
return ModelPerformance.PerformanceNegative
}
}

View File

@ -45,48 +45,6 @@ export default function useSendChatMessage() {
const selectedModel = useAtomValue(selectedModelAtom)
const { startModel } = useActiveModel()
function updateThreadTitle(newMessage: MessageRequest) {
if (
activeThread &&
newMessage.messages &&
newMessage.messages.length > 2 &&
(activeThread.title === '' || activeThread.title === activeModel?.name)
) {
const summaryMsg: ChatCompletionMessage = {
role: ChatCompletionRole.User,
content:
'Summarize this conversation in less than 5 words, the response should just include the summary',
}
// Request convo summary
setTimeout(async () => {
const result = await extensionManager
.get<InferenceExtension>(ExtensionType.Inference)
?.inferenceRequest({
...newMessage,
messages: newMessage.messages?.slice(0, -1).concat([summaryMsg]),
})
.catch(console.error)
const content = result?.content[0]?.text.value.trim()
if (
activeThread &&
activeThread.id === newMessage.threadId &&
content &&
content.length > 0 &&
content.split(' ').length <= 20
) {
const updatedConv: Thread = {
...activeThread,
title: content,
}
updateThread(updatedConv)
extensionManager
.get<ConversationalExtension>(ExtensionType.Conversational)
?.saveThread(updatedConv)
}
}, 1000)
}
}
const sendChatMessage = async () => {
if (!currentPrompt || currentPrompt.trim().length === 0) {
return
@ -172,7 +130,6 @@ export default function useSendChatMessage() {
}
addNewMessage(threadMessage)
updateThreadTitle(messageRequest)
await extensionManager
.get<ConversationalExtension>(ExtensionType.Conversational)
@ -180,6 +137,10 @@ export default function useSendChatMessage() {
const modelId = selectedModel?.id ?? activeThread.assistants[0].model.id
if (activeModel?.id !== modelId) {
toaster({
title: 'Message queued.',
description: 'It will be sent once the model is done loading',
})
await startModel(modelId)
}
events.emit(EventName.OnMessageSent, messageRequest)

View File

@ -22,11 +22,6 @@ export default function useSetActiveThread() {
return
}
if (!thread.isFinishInit) {
console.debug('Thread not finish init')
return
}
// load the corresponding messages
const messages = await extensionManager
.get<ConversationalExtension>(ExtensionType.Conversational)

View File

@ -43,8 +43,10 @@ const MessageToolbar = ({ message }: { message: ThreadMessage }) => {
.get<InferenceExtension>(ExtensionType.Inference)
?.stopInference()
setTimeout(() => {
message.status = MessageStatus.Ready
events.emit(EventName.OnMessageUpdate, message)
events.emit(EventName.OnMessageUpdate, {
...message,
status: MessageStatus.Ready,
})
}, 300)
}

View File

@ -9,6 +9,8 @@ import DropdownListSidebar, {
} from '@/containers/DropdownListSidebar'
import ItemCardSidebar from '@/containers/ItemCardSidebar'
import { useCreateNewThread } from '@/hooks/useCreateNewThread'
import { activeThreadAtom } from '@/helpers/atoms/Conversation.atom'
export const showRightSideBarAtom = atom<boolean>(false)
@ -17,6 +19,7 @@ export default function Sidebar() {
const showing = useAtomValue(showRightSideBarAtom)
const activeThread = useAtomValue(activeThreadAtom)
const selectedModel = useAtomValue(selectedModelAtom)
const { updateThreadTitle } = useCreateNewThread()
const onReviewInFinderClick = async (type: string) => {
if (!activeThread) return
@ -47,7 +50,6 @@ export default function Sidebar() {
if (!filePath) return
const fullPath = join(userSpace, filePath)
console.log(fullPath)
openFileExplorer(fullPath)
}
@ -80,7 +82,6 @@ export default function Sidebar() {
if (!filePath) return
const fullPath = join(userSpace, filePath)
console.log(fullPath)
openFileExplorer(fullPath)
}
@ -96,8 +97,16 @@ export default function Sidebar() {
onRevealInFinderClick={onReviewInFinderClick}
onViewJsonClick={onViewJsonClick}
>
<ItemCardSidebar description={activeThread?.id} title="Thread ID" />
<ItemCardSidebar title="Thread title" />
<ItemCardSidebar
description={activeThread?.id}
title="Thread ID"
disabled
/>
<ItemCardSidebar
title="Thread title"
description={activeThread?.title}
onChange={(title) => updateThreadTitle(title ?? '')}
/>
</CardSidebar>
<CardSidebar
title="Assistant"
@ -107,6 +116,7 @@ export default function Sidebar() {
<ItemCardSidebar
description={activeThread?.assistants[0].assistant_name ?? ''}
title="Assistant"
disabled
/>
</CardSidebar>
<CardSidebar

View File

@ -1,71 +1,46 @@
/* eslint-disable react/display-name */
import { forwardRef, useEffect, useState } from 'react'
import { forwardRef } from 'react'
import { ModelCatalog } from '@janhq/core'
import { Model } from '@janhq/core'
import { Badge } from '@janhq/uikit'
import useGetMostSuitableModelVersion from '@/hooks/useGetMostSuitableModelVersion'
import ExploreModelItemHeader from '@/screens/ExploreModels/ExploreModelItemHeader'
import ModelVersionList from '@/screens/ExploreModels/ModelVersionList'
import { toGigabytes } from '@/utils/converter'
import { displayDate } from '@/utils/datetime'
type Props = {
model: ModelCatalog
model: Model
}
const ExploreModelItem = forwardRef<HTMLDivElement, Props>(({ model }, ref) => {
const [show, setShow] = useState(false)
const { availableVersions } = model
const { suitableModel, getMostSuitableModelVersion } =
useGetMostSuitableModelVersion()
useEffect(() => {
getMostSuitableModelVersion(availableVersions)
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [availableVersions])
if (!suitableModel) {
return null
}
return (
<div
ref={ref}
className="mb-4 flex flex-col rounded-md border border-border bg-background/60"
>
<ExploreModelItemHeader
suitableModel={suitableModel}
exploreModel={model}
/>
<ExploreModelItemHeader model={model} />
<div className="flex flex-col p-4">
<div className="mb-4 flex flex-col gap-1">
<span className="font-semibold">About</span>
<p>{model.longDescription}</p>
<p>{model.description}</p>
</div>
<div className="mb-4 flex space-x-6 border-b border-border pb-4">
<div>
<span className="font-semibold">Author</span>
<p className="mt-1 font-medium">{model.author}</p>
<p className="mt-1 font-medium">{model.metadata.author}</p>
</div>
<div>
<span className="mb-1 font-semibold">Compatibility</span>
<div className="mt-1 flex gap-2">
<Badge
{/* <Badge
themes="secondary"
className="line-clamp-1 lg:line-clamp-none"
title={`${toGigabytes(
suitableModel.metadata.maxRamRequired
model.metadata.maxRamRequired // TODO: check this
)} RAM required`}
>
{toGigabytes(suitableModel.metadata.maxRamRequired)} RAM
required
</Badge>
{toGigabytes(model.metadata.maxRamRequired)} RAM required
</Badge> */}
</div>
</div>
</div>
@ -75,21 +50,12 @@ const ExploreModelItem = forwardRef<HTMLDivElement, Props>(({ model }, ref) => {
<span className="font-semibold">Version</span>
<div className="mt-2 flex space-x-2">
<Badge themes="outline">v{model.version}</Badge>
{suitableModel.metadata.quantization && (
<Badge themes="outline">
{suitableModel.metadata.quantization}
</Badge>
)}
</div>
</div>
<div>
<span className="font-semibold">Release Date</span>
<p className="mt-1 ">{displayDate(model.releaseDate)}</p>
</div>
<div>
<span className="font-semibold">Tags</span>
<div className="mt-2 flex space-x-2">
{model.tags.map((tag, i) => (
{model.metadata.tags.map((tag, i) => (
<Badge key={i} themes="outline">
{tag}
</Badge>
@ -97,23 +63,6 @@ const ExploreModelItem = forwardRef<HTMLDivElement, Props>(({ model }, ref) => {
</div>
</div>
</div>
{model.availableVersions?.length > 0 && (
<div className="mt-5 w-full rounded-md border border-border bg-background p-2">
<button onClick={() => setShow(!show)} className="w-full">
{!show
? '+ Show Available Versions'
: '- Collapse Available Versions'}
</button>
{show && (
<ModelVersionList
models={model.availableVersions}
recommendedVersion={suitableModel?.name ?? ''}
/>
)}
</div>
)}
</div>
</div>
)

View File

@ -1,7 +1,7 @@
/* eslint-disable react-hooks/exhaustive-deps */
import { useCallback, useEffect, useMemo, useState } from 'react'
import { useCallback, useMemo, useState } from 'react'
import { Model, ModelCatalog } from '@janhq/core'
import { Model } from '@janhq/core'
import { Badge, Button } from '@janhq/uikit'
import { atom, useAtomValue } from 'jotai'
@ -15,67 +15,41 @@ import { ModelPerformance, TagType } from '@/constants/tagType'
import useDownloadModel from '@/hooks/useDownloadModel'
import { useDownloadState } from '@/hooks/useDownloadState'
import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels'
import useGetPerformanceTag from '@/hooks/useGetPerformanceTag'
import { useMainViewState } from '@/hooks/useMainViewState'
import { toGigabytes } from '@/utils/converter'
import { totalRamAtom } from '@/helpers/atoms/SystemBar.atom'
type Props = {
suitableModel: Model
exploreModel: ModelCatalog
model: Model
}
const ExploreModelItemHeader: React.FC<Props> = ({
suitableModel,
exploreModel,
}) => {
const ExploreModelItemHeader: React.FC<Props> = ({ model }) => {
const { downloadModel } = useDownloadModel()
const { downloadedModels } = useGetDownloadedModels()
const { modelDownloadStateAtom, downloadStates } = useDownloadState()
const { getPerformanceForModel } = useGetPerformanceTag()
const [title, setTitle] = useState<string>('Recommended')
const totalRam = useAtomValue(totalRamAtom)
const [performanceTag, setPerformanceTag] = useState<TagType>(
ModelPerformance.PerformancePositive
)
const downloadAtom = useMemo(
() => atom((get) => get(modelDownloadStateAtom)[suitableModel.name]),
[suitableModel.name]
() => atom((get) => get(modelDownloadStateAtom)[model.id]),
[model.id]
)
const downloadState = useAtomValue(downloadAtom)
const { setMainViewState } = useMainViewState()
const calculatePerformance = useCallback(
(suitableModel: Model) => async () => {
const { title, performanceTag } = await getPerformanceForModel(
suitableModel,
totalRam
)
setPerformanceTag(performanceTag)
setTitle(title)
},
[totalRam]
)
useEffect(() => {
calculatePerformance(suitableModel)
}, [suitableModel])
const onDownloadClick = useCallback(() => {
downloadModel(suitableModel)
downloadModel(model)
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [suitableModel])
}, [model])
// TODO: Comparing between Model Id and Version Name?
const isDownloaded =
downloadedModels.find((model) => model.id === suitableModel.name) != null
const isDownloaded = downloadedModels.find((md) => md.id === model.id) != null
let downloadButton = (
<Button onClick={() => onDownloadClick()}>
{suitableModel.metadata.size
? `Download (${toGigabytes(suitableModel.metadata.size)})`
{model.metadata.size
? `Download (${toGigabytes(model.metadata.size)})`
: 'Download'}
</Button>
)
@ -93,7 +67,7 @@ const ExploreModelItemHeader: React.FC<Props> = ({
}
if (downloadState != null && downloadStates.length > 0) {
downloadButton = <ModalCancelDownload suitableModel={suitableModel} />
downloadButton = <ModalCancelDownload model={model} />
}
const renderBadge = (performance: TagType) => {
@ -115,7 +89,7 @@ const ExploreModelItemHeader: React.FC<Props> = ({
return (
<div className="flex items-center justify-between rounded-t-md border-b border-border bg-background/50 px-4 py-2">
<div className="flex items-center gap-2">
<span className="font-medium">{exploreModel.name}</span>
<span className="font-medium">{model.name}</span>
{performanceTag && renderBadge(performanceTag)}
</div>
{downloadButton}

View File

@ -1,16 +1,14 @@
import { ModelCatalog } from '@janhq/core'
import { Model } from '@janhq/core'
import ExploreModelItem from '@/screens/ExploreModels/ExploreModelItem'
type Props = {
models: ModelCatalog[]
models: Model[]
}
const ExploreModelList: React.FC<Props> = ({ models }) => (
<div className="relative h-full w-full flex-shrink-0">
{models?.map((item, i) => (
<ExploreModelItem key={item.name + '/' + item.id} model={item} />
))}
{models?.map((model) => <ExploreModelItem key={model.id} model={model} />)}
</div>
)

View File

@ -2,7 +2,7 @@
import React, { useMemo } from 'react'
import { Model } from '@janhq/core'
import { Badge, Button } from '@janhq/uikit'
import { Button } from '@janhq/uikit'
import { atom, useAtomValue } from 'jotai'
import ModalCancelDownload from '@/containers/ModalCancelDownload'
@ -63,7 +63,7 @@ const ModelVersionItem: React.FC<Props> = ({ model }) => {
}
if (downloadState != null && downloadStates.length > 0) {
downloadButton = <ModalCancelDownload suitableModel={model} isFromList />
downloadButton = <ModalCancelDownload model={model} isFromList />
}
return (
@ -74,16 +74,7 @@ const ModelVersionItem: React.FC<Props> = ({ model }) => {
</span>
</div>
<div className="flex items-center gap-4">
<div className="flex justify-end gap-2">
<Badge
themes="secondary"
className="line-clamp-1"
title={`${toGigabytes(model.metadata.maxRamRequired)} RAM required`}
>{`${toGigabytes(
model.metadata.maxRamRequired
)} RAM required`}</Badge>
<Badge themes="secondary">{toGigabytes(model.metadata.size)}</Badge>
</div>
<div className="flex justify-end gap-2"></div>
{downloadButton}
</div>
</div>

View File

@ -55,7 +55,7 @@ export default function BlankStateMyModel() {
}
/>
<div className="flex items-center justify-between">
<p>{item?.fileName}</p>
<p>{item?.modelId}</p>
<span>{formatDownloadPercentage(item?.percent)}</span>
</div>
</div>

View File

@ -63,10 +63,7 @@ const MyModelsScreen = () => {
<div className="flex items-start gap-x-4">
<div className="inline-flex rounded-full border border-border p-1">
<Avatar className="h-8 w-8">
<AvatarImage
src={model.metadata.avatarUrl}
alt={model.metadata.author}
/>
<AvatarImage alt={model.metadata.author} />
<AvatarFallback>
{model.metadata.author.charAt(0)}
</AvatarFallback>

View File

@ -30,7 +30,6 @@ const SettingsScreen = () => {
setMenus(menu)
}, [])
const preferenceExtensions = preferenceItems
.map((x) => x.extensionnName)
.filter((x, i) => {

View File

@ -4,7 +4,6 @@ type DownloadState = {
speed: number
percent: number
size: DownloadSize
fileName: string
error?: string
}

View File

@ -1,119 +0,0 @@
/* eslint-disable @typescript-eslint/naming-convention */
import { ModelCatalog, ModelState } from '@janhq/core'
export const dummyModel: ModelCatalog = {
id: 'aladar/TinyLLama-v0-GGUF',
name: 'TinyLLama-v0-GGUF',
shortDescription: 'TinyLlama-1.1B-Chat-v0.3-GGUF',
longDescription: 'https://huggingface.co/aladar/TinyLLama-v0-GGUF/tree/main',
avatarUrl: '',
releaseDate: Date.now(),
author: 'aladar',
version: '1.0.0',
modelUrl: 'aladar/TinyLLama-v0-GGUF',
tags: ['freeform', 'tags'],
availableVersions: [
{
object: 'model',
version: '1.0.0',
source_url:
'https://huggingface.co/aladar/TinyLLama-v0-GGUF/resolve/main/TinyLLama-v0.Q8_0.gguf',
id: 'TinyLLama-v0.Q8_0.gguf',
name: 'TinyLLama-v0.Q8_0.gguf',
owned_by: 'you',
created: 0,
description: '',
state: ModelState.ToDownload,
settings: {
ctx_len: 2048,
ngl: 100,
embedding: true,
n_parallel: 4,
},
parameters: {
temperature: 0.7,
token_limit: 2048,
top_k: 0,
top_p: 1,
stream: true,
},
metadata: {
engine: '',
quantization: '',
size: 5816320,
binaries: [],
maxRamRequired: 256000000,
author: 'aladar',
avatarUrl: '',
},
},
{
object: 'model',
version: '1.0.0',
source_url:
'https://huggingface.co/aladar/TinyLLama-v0-GGUF/resolve/main/TinyLLama-v0.f16.gguf',
id: 'TinyLLama-v0.f16.gguf',
name: 'TinyLLama-v0.f16.gguf',
owned_by: 'you',
created: 0,
description: '',
state: ModelState.ToDownload,
settings: {
ctx_len: 2048,
ngl: 100,
embedding: true,
n_parallel: 4,
},
parameters: {
temperature: 0.7,
token_limit: 2048,
top_k: 0,
top_p: 1,
stream: true,
},
metadata: {
engine: '',
quantization: '',
size: 5816320,
binaries: [],
maxRamRequired: 256000000,
author: 'aladar',
avatarUrl: '',
},
},
{
object: 'model',
version: '1.0.0',
source_url:
'https://huggingface.co/aladar/TinyLLama-v0-GGUF/resolve/main/TinyLLama-v0.f32.gguf',
id: 'TinyLLama-v0.f32.gguf',
name: 'TinyLLama-v0.f32.gguf',
owned_by: 'you',
created: 0,
description: '',
state: ModelState.ToDownload,
settings: {
ctx_len: 2048,
ngl: 100,
embedding: true,
n_parallel: 4,
},
parameters: {
temperature: 0.7,
token_limit: 2048,
top_k: 0,
top_p: 1,
stream: true,
},
metadata: {
engine: '',
quantization: '',
size: 5816320,
binaries: [],
maxRamRequired: 256000000,
author: 'aladar',
avatarUrl: '',
},
},
],
}