diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json
new file mode 100644
index 000000000..f980b9df7
--- /dev/null
+++ b/.devcontainer/devcontainer.json
@@ -0,0 +1,4 @@
+{
+ "name": "jan",
+ "image": "node:20"
+}
\ No newline at end of file
diff --git a/.github/workflows/jan-electron-build-nightly.yml b/.github/workflows/jan-electron-build-nightly.yml
index 4531152d4..cad2ac227 100644
--- a/.github/workflows/jan-electron-build-nightly.yml
+++ b/.github/workflows/jan-electron-build-nightly.yml
@@ -8,7 +8,7 @@ on:
- 'README.md'
- 'docs/**'
schedule:
- - cron: '0 20 * * 2,3,4' # At 8 PM UTC on Tuesday, Wednesday, and Thursday, which is 3 AM UTC+7
+ - cron: '0 20 * * 1,2,3' # At 8 PM UTC on Monday, Tuesday, and Wednesday which is 3 AM UTC+7 Tuesday, Wednesday, and Thursday
workflow_dispatch:
inputs:
public_provider:
diff --git a/.github/workflows/template-build-linux-x64.yml b/.github/workflows/template-build-linux-x64.yml
index c6d1eac97..08cb1dada 100644
--- a/.github/workflows/template-build-linux-x64.yml
+++ b/.github/workflows/template-build-linux-x64.yml
@@ -98,8 +98,8 @@ jobs:
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- ANALYTICS_ID: ${{ secrets.JAN_APP_POSTHOG_PROJECT_API_KEY }}
- ANALYTICS_HOST: ${{ secrets.JAN_APP_POSTHOG_URL }}
+ ANALYTICS_ID: ${{ secrets.JAN_APP_UMAMI_PROJECT_API_KEY }}
+ ANALYTICS_HOST: ${{ secrets.JAN_APP_UMAMI_URL }}
- name: Upload Artifact .deb file
if: inputs.public_provider != 'github'
diff --git a/.github/workflows/template-build-macos.yml b/.github/workflows/template-build-macos.yml
index bc48e6c21..0ad1d3a6a 100644
--- a/.github/workflows/template-build-macos.yml
+++ b/.github/workflows/template-build-macos.yml
@@ -137,8 +137,8 @@ jobs:
APPLE_APP_SPECIFIC_PASSWORD: ${{ secrets.APPLE_APP_SPECIFIC_PASSWORD }}
APP_PATH: "."
DEVELOPER_ID: ${{ secrets.DEVELOPER_ID }}
- ANALYTICS_ID: ${{ secrets.JAN_APP_POSTHOG_PROJECT_API_KEY }}
- ANALYTICS_HOST: ${{ secrets.JAN_APP_POSTHOG_URL }}
+ ANALYTICS_ID: ${{ secrets.JAN_APP_UMAMI_PROJECT_API_KEY }}
+ ANALYTICS_HOST: ${{ secrets.JAN_APP_UMAMI_URL }}
- name: Upload Artifact
if: inputs.public_provider != 'github'
diff --git a/.github/workflows/template-build-windows-x64.yml b/.github/workflows/template-build-windows-x64.yml
index 5d96b3f49..b81997bde 100644
--- a/.github/workflows/template-build-windows-x64.yml
+++ b/.github/workflows/template-build-windows-x64.yml
@@ -127,8 +127,8 @@ jobs:
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- ANALYTICS_ID: ${{ secrets.JAN_APP_POSTHOG_PROJECT_API_KEY }}
- ANALYTICS_HOST: ${{ secrets.JAN_APP_POSTHOG_URL }}
+ ANALYTICS_ID: ${{ secrets.JAN_APP_UMAMI_PROJECT_API_KEY }}
+ ANALYTICS_HOST: ${{ secrets.JAN_APP_UMAMI_URL }}
AZURE_KEY_VAULT_URI: ${{ secrets.AZURE_KEY_VAULT_URI }}
AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
diff --git a/.github/workflows/update-release-url.yml b/.github/workflows/update-release-url.yml
index 545d6542e..99a3db0e0 100644
--- a/.github/workflows/update-release-url.yml
+++ b/.github/workflows/update-release-url.yml
@@ -17,7 +17,7 @@ jobs:
with:
fetch-depth: "0"
token: ${{ secrets.PAT_SERVICE_ACCOUNT }}
- ref: main
+ ref: dev
- name: Get Latest Release
uses: pozetroninc/github-action-get-latest-release@v0.7.0
@@ -46,4 +46,4 @@ jobs:
git config --global user.name "Service Account"
git add README.md
git commit -m "Update README.md with Stable Download URLs"
- git -c http.extraheader="AUTHORIZATION: bearer ${{ secrets.PAT_SERVICE_ACCOUNT }}" push origin HEAD:main
+ git -c http.extraheader="AUTHORIZATION: bearer ${{ secrets.PAT_SERVICE_ACCOUNT }}" push origin HEAD:dev
diff --git a/.gitignore b/.gitignore
index e3e4635fc..4540e5c7a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,6 +12,8 @@ build
electron/renderer
electron/models
electron/docs
+electron/engines
+server/pre-install
package-lock.json
*.log
@@ -26,3 +28,4 @@ extensions/inference-nitro-extension/bin/*/*.exp
extensions/inference-nitro-extension/bin/*/*.lib
extensions/inference-nitro-extension/bin/saved-*
extensions/inference-nitro-extension/bin/*.tar.gz
+
diff --git a/README.md b/README.md
index 3a99407f5..34eecc9f3 100644
--- a/README.md
+++ b/README.md
@@ -76,31 +76,31 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute
Experimental (Nightly Build)
-
+
jan.exe
-
+
Intel
-
+
M1/M2
-
+
jan.deb
-
+
jan.AppImage
diff --git a/core/.prettierignore b/core/.prettierignore
new file mode 100644
index 000000000..02d9145c1
--- /dev/null
+++ b/core/.prettierignore
@@ -0,0 +1,5 @@
+.next/
+node_modules/
+dist/
+*.hbs
+*.mdx
\ No newline at end of file
diff --git a/core/src/api/index.ts b/core/src/api/index.ts
index a3d0361e7..0d7cc51f7 100644
--- a/core/src/api/index.ts
+++ b/core/src/api/index.ts
@@ -3,7 +3,6 @@
* @description Enum of all the routes exposed by the app
*/
export enum AppRoute {
- appDataPath = 'appDataPath',
openExternalUrl = 'openExternalUrl',
openAppDirectory = 'openAppDirectory',
openFileExplore = 'openFileExplorer',
@@ -12,6 +11,7 @@ export enum AppRoute {
updateAppConfiguration = 'updateAppConfiguration',
relaunch = 'relaunch',
joinPath = 'joinPath',
+ isSubdirectory = 'isSubdirectory',
baseName = 'baseName',
startServer = 'startServer',
stopServer = 'stopServer',
@@ -61,7 +61,9 @@ export enum FileManagerRoute {
syncFile = 'syncFile',
getJanDataFolderPath = 'getJanDataFolderPath',
getResourcePath = 'getResourcePath',
+ getUserHomePath = 'getUserHomePath',
fileStat = 'fileStat',
+ writeBlob = 'writeBlob',
}
export type ApiFunction = (...args: any[]) => any
diff --git a/core/src/core.ts b/core/src/core.ts
index aa545e10e..8831c6001 100644
--- a/core/src/core.ts
+++ b/core/src/core.ts
@@ -22,7 +22,11 @@ const executeOnMain: (extension: string, method: string, ...args: any[]) => Prom
* @param {object} network - Optional object to specify proxy/whether to ignore SSL certificates.
* @returns {Promise} A promise that resolves when the file is downloaded.
*/
-const downloadFile: (url: string, fileName: string, network?: { proxy?: string, ignoreSSL?: boolean }) => Promise = (url, fileName, network) => {
+const downloadFile: (
+ url: string,
+ fileName: string,
+ network?: { proxy?: string; ignoreSSL?: boolean }
+) => Promise = (url, fileName, network) => {
return global.core?.api?.downloadFile(url, fileName, network)
}
@@ -79,6 +83,12 @@ const openExternalUrl: (url: string) => Promise = (url) =>
*/
const getResourcePath: () => Promise = () => global.core.api?.getResourcePath()
+/**
+ * Gets the user's home path.
+ * @returns return user's home path
+ */
+const getUserHomePath = (): Promise => global.core.api?.getUserHomePath()
+
/**
* Log to file from browser processes.
*
@@ -87,6 +97,17 @@ const getResourcePath: () => Promise = () => global.core.api?.getResourc
const log: (message: string, fileName?: string) => void = (message, fileName) =>
global.core.api?.log(message, fileName)
+/**
+ * Check whether the path is a subdirectory of another path.
+ *
+ * @param from - The path to check.
+ * @param to - The path to check against.
+ *
+ * @returns {Promise} - A promise that resolves with a boolean indicating whether the path is a subdirectory.
+ */
+const isSubdirectory: (from: string, to: string) => Promise = (from: string, to: string) =>
+ global.core.api?.isSubdirectory(from, to)
+
/**
* Register extension point function type definition
*/
@@ -94,7 +115,7 @@ export type RegisterExtensionPoint = (
extensionName: string,
extensionId: string,
method: Function,
- priority?: number,
+ priority?: number
) => void
/**
@@ -111,5 +132,7 @@ export {
openExternalUrl,
baseName,
log,
+ isSubdirectory,
+ getUserHomePath,
FileStat,
}
diff --git a/core/src/fs.ts b/core/src/fs.ts
index ea636977a..0e570d1f5 100644
--- a/core/src/fs.ts
+++ b/core/src/fs.ts
@@ -1,4 +1,4 @@
-import { FileStat } from "./types"
+import { FileStat } from './types'
/**
* Writes data to a file at the specified path.
@@ -6,6 +6,15 @@ import { FileStat } from "./types"
*/
const writeFileSync = (...args: any[]) => global.core.api?.writeFileSync(...args)
+/**
+ * Writes blob data to a file at the specified path.
+ * @param path - The path to file.
+ * @param data - The blob data.
+ * @returns
+ */
+const writeBlob: (path: string, data: string) => Promise = (path, data) =>
+ global.core.api?.writeBlob(path, data)
+
/**
* Reads the contents of a file at the specified path.
* @returns {Promise} A Promise that resolves with the contents of the file.
@@ -60,7 +69,6 @@ const syncFile: (src: string, dest: string) => Promise = (src, dest) =>
*/
const copyFileSync = (...args: any[]) => global.core.api?.copyFileSync(...args)
-
/**
* Gets the file's stats.
*
@@ -70,7 +78,6 @@ const copyFileSync = (...args: any[]) => global.core.api?.copyFileSync(...args)
const fileStat: (path: string) => Promise = (path) =>
global.core.api?.fileStat(path)
-
// TODO: Export `dummy` fs functions automatically
// Currently adding these manually
export const fs = {
@@ -84,5 +91,6 @@ export const fs = {
appendFileSync,
copyFileSync,
syncFile,
- fileStat
+ fileStat,
+ writeBlob,
}
diff --git a/core/src/node/api/common/builder.ts b/core/src/node/api/common/builder.ts
index 14946f415..5c99cf4d8 100644
--- a/core/src/node/api/common/builder.ts
+++ b/core/src/node/api/common/builder.ts
@@ -2,7 +2,8 @@ import fs from 'fs'
import { JanApiRouteConfiguration, RouteConfiguration } from './configuration'
import { join } from 'path'
import { ContentType, MessageStatus, Model, ThreadMessage } from './../../../index'
-import { getJanDataFolderPath } from '../../utils'
+import { getEngineConfiguration, getJanDataFolderPath } from '../../utils'
+import { DEFAULT_CHAT_COMPLETION_URL } from './consts'
export const getBuilder = async (configuration: RouteConfiguration) => {
const directoryPath = join(getJanDataFolderPath(), configuration.dirName)
@@ -265,19 +266,22 @@ export const downloadModel = async (
const modelBinaryPath = join(directoryPath, modelId)
const request = require('request')
- const rq = request({ url: model.source_url, strictSSL, proxy })
const progress = require('request-progress')
- progress(rq, {})
- .on('progress', function (state: any) {
- console.log('progress', JSON.stringify(state, null, 2))
- })
- .on('error', function (err: Error) {
- console.error('error', err)
- })
- .on('end', function () {
- console.log('end')
- })
- .pipe(fs.createWriteStream(modelBinaryPath))
+
+ for (const source of model.sources) {
+ const rq = request({ url: source, strictSSL, proxy })
+ progress(rq, {})
+ .on('progress', function (state: any) {
+ console.debug('progress', JSON.stringify(state, null, 2))
+ })
+ .on('error', function (err: Error) {
+ console.error('error', err)
+ })
+ .on('end', function () {
+ console.debug('end')
+ })
+ .pipe(fs.createWriteStream(modelBinaryPath))
+ }
return {
message: `Starting download ${modelId}`,
@@ -306,7 +310,7 @@ export const chatCompletions = async (request: any, reply: any) => {
const engineConfiguration = await getEngineConfiguration(requestedModel.engine)
let apiKey: string | undefined = undefined
- let apiUrl: string = 'http://127.0.0.1:3928/inferences/llamacpp/chat_completion' // default nitro url
+ let apiUrl: string = DEFAULT_CHAT_COMPLETION_URL
if (engineConfiguration) {
apiKey = engineConfiguration.api_key
@@ -317,7 +321,7 @@ export const chatCompletions = async (request: any, reply: any) => {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
- "Access-Control-Allow-Origin": "*"
+ 'Access-Control-Allow-Origin': '*',
})
const headers: Record = {
@@ -343,13 +347,3 @@ export const chatCompletions = async (request: any, reply: any) => {
response.body.pipe(reply.raw)
}
}
-
-const getEngineConfiguration = async (engineId: string) => {
- if (engineId !== 'openai') {
- return undefined
- }
- const directoryPath = join(getJanDataFolderPath(), 'engines')
- const filePath = join(directoryPath, `${engineId}.json`)
- const data = await fs.readFileSync(filePath, 'utf-8')
- return JSON.parse(data)
-}
diff --git a/core/src/node/api/common/consts.ts b/core/src/node/api/common/consts.ts
new file mode 100644
index 000000000..bc3cfe300
--- /dev/null
+++ b/core/src/node/api/common/consts.ts
@@ -0,0 +1,19 @@
+// The PORT to use for the Nitro subprocess
+export const NITRO_DEFAULT_PORT = 3928
+
+// The HOST address to use for the Nitro subprocess
+export const LOCAL_HOST = '127.0.0.1'
+
+export const SUPPORTED_MODEL_FORMAT = '.gguf'
+
+// The URL for the Nitro subprocess
+const NITRO_HTTP_SERVER_URL = `http://${LOCAL_HOST}:${NITRO_DEFAULT_PORT}`
+// The URL for the Nitro subprocess to load a model
+export const NITRO_HTTP_LOAD_MODEL_URL = `${NITRO_HTTP_SERVER_URL}/inferences/llamacpp/loadmodel`
+// The URL for the Nitro subprocess to validate a model
+export const NITRO_HTTP_VALIDATE_MODEL_URL = `${NITRO_HTTP_SERVER_URL}/inferences/llamacpp/modelstatus`
+
+// The URL for the Nitro subprocess to kill itself
+export const NITRO_HTTP_KILL_URL = `${NITRO_HTTP_SERVER_URL}/processmanager/destroy`
+
+export const DEFAULT_CHAT_COMPLETION_URL = `http://${LOCAL_HOST}:${NITRO_DEFAULT_PORT}/inferences/llamacpp/chat_completion` // default nitro url
diff --git a/core/src/node/api/common/startStopModel.ts b/core/src/node/api/common/startStopModel.ts
new file mode 100644
index 000000000..0d4934e1c
--- /dev/null
+++ b/core/src/node/api/common/startStopModel.ts
@@ -0,0 +1,351 @@
+import fs from 'fs'
+import { join } from 'path'
+import { getJanDataFolderPath, getJanExtensionsPath, getSystemResourceInfo } from '../../utils'
+import { logServer } from '../../log'
+import { ChildProcessWithoutNullStreams, spawn } from 'child_process'
+import { Model, ModelSettingParams, PromptTemplate } from '../../../types'
+import {
+ LOCAL_HOST,
+ NITRO_DEFAULT_PORT,
+ NITRO_HTTP_KILL_URL,
+ NITRO_HTTP_LOAD_MODEL_URL,
+ NITRO_HTTP_VALIDATE_MODEL_URL,
+ SUPPORTED_MODEL_FORMAT,
+} from './consts'
+
+// The subprocess instance for Nitro
+let subprocess: ChildProcessWithoutNullStreams | undefined = undefined
+
+// TODO: move this to core type
+interface NitroModelSettings extends ModelSettingParams {
+ llama_model_path: string
+ cpu_threads: number
+}
+
+export const startModel = async (modelId: string, settingParams?: ModelSettingParams) => {
+ try {
+ await runModel(modelId, settingParams)
+
+ return {
+ message: `Model ${modelId} started`,
+ }
+ } catch (e) {
+ return {
+ error: e,
+ }
+ }
+}
+
+const runModel = async (modelId: string, settingParams?: ModelSettingParams): Promise => {
+ const janDataFolderPath = getJanDataFolderPath()
+ const modelFolderFullPath = join(janDataFolderPath, 'models', modelId)
+
+ if (!fs.existsSync(modelFolderFullPath)) {
+ throw `Model not found: ${modelId}`
+ }
+
+ const files: string[] = fs.readdirSync(modelFolderFullPath)
+
+ // Look for GGUF model file
+ const ggufBinFile = files.find((file) => file.toLowerCase().includes(SUPPORTED_MODEL_FORMAT))
+
+ const modelMetadataPath = join(modelFolderFullPath, 'model.json')
+ const modelMetadata: Model = JSON.parse(fs.readFileSync(modelMetadataPath, 'utf-8'))
+
+ if (!ggufBinFile) {
+ throw 'No GGUF model file found'
+ }
+ const modelBinaryPath = join(modelFolderFullPath, ggufBinFile)
+
+ const nitroResourceProbe = await getSystemResourceInfo()
+ const nitroModelSettings: NitroModelSettings = {
+ ...modelMetadata.settings,
+ ...settingParams,
+ llama_model_path: modelBinaryPath,
+ // This is critical and requires real CPU physical core count (or performance core)
+ cpu_threads: Math.max(1, nitroResourceProbe.numCpuPhysicalCore),
+ ...(modelMetadata.settings.mmproj && {
+ mmproj: join(modelFolderFullPath, modelMetadata.settings.mmproj),
+ }),
+ }
+
+ logServer(`[NITRO]::Debug: Nitro model settings: ${JSON.stringify(nitroModelSettings)}`)
+
+ // Convert settings.prompt_template to system_prompt, user_prompt, ai_prompt
+ if (modelMetadata.settings.prompt_template) {
+ const promptTemplate = modelMetadata.settings.prompt_template
+ const prompt = promptTemplateConverter(promptTemplate)
+ if (prompt?.error) {
+ return Promise.reject(prompt.error)
+ }
+ nitroModelSettings.system_prompt = prompt.system_prompt
+ nitroModelSettings.user_prompt = prompt.user_prompt
+ nitroModelSettings.ai_prompt = prompt.ai_prompt
+ }
+
+ await runNitroAndLoadModel(modelId, nitroModelSettings)
+}
+
+// TODO: move to util
+const promptTemplateConverter = (promptTemplate: string): PromptTemplate => {
+ // Split the string using the markers
+ const systemMarker = '{system_message}'
+ const promptMarker = '{prompt}'
+
+ if (promptTemplate.includes(systemMarker) && promptTemplate.includes(promptMarker)) {
+ // Find the indices of the markers
+ const systemIndex = promptTemplate.indexOf(systemMarker)
+ const promptIndex = promptTemplate.indexOf(promptMarker)
+
+ // Extract the parts of the string
+ const system_prompt = promptTemplate.substring(0, systemIndex)
+ const user_prompt = promptTemplate.substring(systemIndex + systemMarker.length, promptIndex)
+ const ai_prompt = promptTemplate.substring(promptIndex + promptMarker.length)
+
+ // Return the split parts
+ return { system_prompt, user_prompt, ai_prompt }
+ } else if (promptTemplate.includes(promptMarker)) {
+ // Extract the parts of the string for the case where only promptMarker is present
+ const promptIndex = promptTemplate.indexOf(promptMarker)
+ const user_prompt = promptTemplate.substring(0, promptIndex)
+ const ai_prompt = promptTemplate.substring(promptIndex + promptMarker.length)
+
+ // Return the split parts
+ return { user_prompt, ai_prompt }
+ }
+
+ // Return an error if none of the conditions are met
+ return { error: 'Cannot split prompt template' }
+}
+
+const runNitroAndLoadModel = async (modelId: string, modelSettings: NitroModelSettings) => {
+ // Gather system information for CPU physical cores and memory
+ const tcpPortUsed = require('tcp-port-used')
+
+ await stopModel(modelId)
+ await tcpPortUsed.waitUntilFree(NITRO_DEFAULT_PORT, 300, 5000)
+
+ /**
+ * There is a problem with Windows process manager
+ * Should wait for awhile to make sure the port is free and subprocess is killed
+ * The tested threshold is 500ms
+ **/
+ if (process.platform === 'win32') {
+ await new Promise((resolve) => setTimeout(resolve, 500))
+ }
+
+ await spawnNitroProcess()
+ await loadLLMModel(modelSettings)
+ await validateModelStatus()
+}
+
+const spawnNitroProcess = async (): Promise => {
+ logServer(`[NITRO]::Debug: Spawning Nitro subprocess...`)
+
+ let binaryFolder = join(
+ getJanExtensionsPath(),
+ '@janhq',
+ 'inference-nitro-extension',
+ 'dist',
+ 'bin'
+ )
+
+ let executableOptions = executableNitroFile()
+ const tcpPortUsed = require('tcp-port-used')
+
+ const args: string[] = ['1', LOCAL_HOST, NITRO_DEFAULT_PORT.toString()]
+ // Execute the binary
+ logServer(
+ `[NITRO]::Debug: Spawn nitro at path: ${executableOptions.executablePath}, and args: ${args}`
+ )
+ subprocess = spawn(
+ executableOptions.executablePath,
+ ['1', LOCAL_HOST, NITRO_DEFAULT_PORT.toString()],
+ {
+ cwd: binaryFolder,
+ env: {
+ ...process.env,
+ CUDA_VISIBLE_DEVICES: executableOptions.cudaVisibleDevices,
+ },
+ }
+ )
+
+ // Handle subprocess output
+ subprocess.stdout.on('data', (data: any) => {
+ logServer(`[NITRO]::Debug: ${data}`)
+ })
+
+ subprocess.stderr.on('data', (data: any) => {
+ logServer(`[NITRO]::Error: ${data}`)
+ })
+
+ subprocess.on('close', (code: any) => {
+ logServer(`[NITRO]::Debug: Nitro exited with code: ${code}`)
+ subprocess = undefined
+ })
+
+ tcpPortUsed.waitUntilUsed(NITRO_DEFAULT_PORT, 300, 30000).then(() => {
+ logServer(`[NITRO]::Debug: Nitro is ready`)
+ })
+}
+
+type NitroExecutableOptions = {
+ executablePath: string
+ cudaVisibleDevices: string
+}
+
+const executableNitroFile = (): NitroExecutableOptions => {
+ const nvidiaInfoFilePath = join(getJanDataFolderPath(), 'settings', 'settings.json')
+ let binaryFolder = join(
+ getJanExtensionsPath(),
+ '@janhq',
+ 'inference-nitro-extension',
+ 'dist',
+ 'bin'
+ )
+
+ let cudaVisibleDevices = ''
+ let binaryName = 'nitro'
+ /**
+ * The binary folder is different for each platform.
+ */
+ if (process.platform === 'win32') {
+ /**
+ * For Windows: win-cpu, win-cuda-11-7, win-cuda-12-0
+ */
+ let nvidiaInfo = JSON.parse(fs.readFileSync(nvidiaInfoFilePath, 'utf-8'))
+ if (nvidiaInfo['run_mode'] === 'cpu') {
+ binaryFolder = join(binaryFolder, 'win-cpu')
+ } else {
+ if (nvidiaInfo['cuda'].version === '12') {
+ binaryFolder = join(binaryFolder, 'win-cuda-12-0')
+ } else {
+ binaryFolder = join(binaryFolder, 'win-cuda-11-7')
+ }
+ cudaVisibleDevices = nvidiaInfo['gpu_highest_vram']
+ }
+ binaryName = 'nitro.exe'
+ } else if (process.platform === 'darwin') {
+ /**
+ * For MacOS: mac-arm64 (Silicon), mac-x64 (InteL)
+ */
+ if (process.arch === 'arm64') {
+ binaryFolder = join(binaryFolder, 'mac-arm64')
+ } else {
+ binaryFolder = join(binaryFolder, 'mac-x64')
+ }
+ } else {
+ /**
+ * For Linux: linux-cpu, linux-cuda-11-7, linux-cuda-12-0
+ */
+ let nvidiaInfo = JSON.parse(fs.readFileSync(nvidiaInfoFilePath, 'utf-8'))
+ if (nvidiaInfo['run_mode'] === 'cpu') {
+ binaryFolder = join(binaryFolder, 'linux-cpu')
+ } else {
+ if (nvidiaInfo['cuda'].version === '12') {
+ binaryFolder = join(binaryFolder, 'linux-cuda-12-0')
+ } else {
+ binaryFolder = join(binaryFolder, 'linux-cuda-11-7')
+ }
+ cudaVisibleDevices = nvidiaInfo['gpu_highest_vram']
+ }
+ }
+
+ return {
+ executablePath: join(binaryFolder, binaryName),
+ cudaVisibleDevices,
+ }
+}
+
+const validateModelStatus = async (): Promise => {
+ // Send a GET request to the validation URL.
+ // Retry the request up to 3 times if it fails, with a delay of 500 milliseconds between retries.
+ const fetchRT = require('fetch-retry')
+ const fetchRetry = fetchRT(fetch)
+
+ return fetchRetry(NITRO_HTTP_VALIDATE_MODEL_URL, {
+ method: 'GET',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ retries: 5,
+ retryDelay: 500,
+ }).then(async (res: Response) => {
+ logServer(`[NITRO]::Debug: Validate model state success with response ${JSON.stringify(res)}`)
+ // If the response is OK, check model_loaded status.
+ if (res.ok) {
+ const body = await res.json()
+ // If the model is loaded, return an empty object.
+ // Otherwise, return an object with an error message.
+ if (body.model_loaded) {
+ return Promise.resolve()
+ }
+ }
+ return Promise.reject('Validate model status failed')
+ })
+}
+
+const loadLLMModel = async (settings: NitroModelSettings): Promise => {
+ logServer(`[NITRO]::Debug: Loading model with params ${JSON.stringify(settings)}`)
+ const fetchRT = require('fetch-retry')
+ const fetchRetry = fetchRT(fetch)
+
+ return fetchRetry(NITRO_HTTP_LOAD_MODEL_URL, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ body: JSON.stringify(settings),
+ retries: 3,
+ retryDelay: 500,
+ })
+ .then((res: any) => {
+ logServer(`[NITRO]::Debug: Load model success with response ${JSON.stringify(res)}`)
+ return Promise.resolve(res)
+ })
+ .catch((err: any) => {
+ logServer(`[NITRO]::Error: Load model failed with error ${err}`)
+ return Promise.reject(err)
+ })
+}
+
+/**
+ * Stop model and kill nitro process.
+ */
+export const stopModel = async (_modelId: string) => {
+ if (!subprocess) {
+ return {
+ error: "Model isn't running",
+ }
+ }
+ return new Promise((resolve, reject) => {
+ const controller = new AbortController()
+ setTimeout(() => {
+ controller.abort()
+ reject({
+ error: 'Failed to stop model: Timedout',
+ })
+ }, 5000)
+ const tcpPortUsed = require('tcp-port-used')
+ logServer(`[NITRO]::Debug: Request to kill Nitro`)
+
+ fetch(NITRO_HTTP_KILL_URL, {
+ method: 'DELETE',
+ signal: controller.signal,
+ })
+ .then(() => {
+ subprocess?.kill()
+ subprocess = undefined
+ })
+ .catch(() => {
+ // don't need to do anything, we still kill the subprocess
+ })
+ .then(() => tcpPortUsed.waitUntilFree(NITRO_DEFAULT_PORT, 300, 5000))
+ .then(() => logServer(`[NITRO]::Debug: Nitro process is terminated`))
+ .then(() =>
+ resolve({
+ message: 'Model stopped',
+ })
+ )
+ })
+}
diff --git a/core/src/node/api/routes/common.ts b/core/src/node/api/routes/common.ts
index a6c65a382..27385e561 100644
--- a/core/src/node/api/routes/common.ts
+++ b/core/src/node/api/routes/common.ts
@@ -10,6 +10,8 @@ import {
} from '../common/builder'
import { JanApiRouteConfiguration } from '../common/configuration'
+import { startModel, stopModel } from '../common/startStopModel'
+import { ModelSettingParams } from '../../../types'
export const commonRouter = async (app: HttpServer) => {
// Common Routes
@@ -17,19 +19,33 @@ export const commonRouter = async (app: HttpServer) => {
app.get(`/${key}`, async (_request) => getBuilder(JanApiRouteConfiguration[key]))
app.get(`/${key}/:id`, async (request: any) =>
- retrieveBuilder(JanApiRouteConfiguration[key], request.params.id),
+ retrieveBuilder(JanApiRouteConfiguration[key], request.params.id)
)
app.delete(`/${key}/:id`, async (request: any) =>
- deleteBuilder(JanApiRouteConfiguration[key], request.params.id),
+ deleteBuilder(JanApiRouteConfiguration[key], request.params.id)
)
})
// Download Model Routes
app.get(`/models/download/:modelId`, async (request: any) =>
- downloadModel(request.params.modelId, { ignoreSSL: request.query.ignoreSSL === 'true', proxy: request.query.proxy }),
+ downloadModel(request.params.modelId, {
+ ignoreSSL: request.query.ignoreSSL === 'true',
+ proxy: request.query.proxy,
+ })
)
+ app.put(`/models/:modelId/start`, async (request: any) => {
+ let settingParams: ModelSettingParams | undefined = undefined
+ if (Object.keys(request.body).length !== 0) {
+ settingParams = JSON.parse(request.body) as ModelSettingParams
+ }
+
+ return startModel(request.params.modelId, settingParams)
+ })
+
+ app.put(`/models/:modelId/stop`, async (request: any) => stopModel(request.params.modelId))
+
// Chat Completion Routes
app.post(`/chat/completions`, async (request: any, reply: any) => chatCompletions(request, reply))
diff --git a/core/src/node/api/routes/fileManager.ts b/core/src/node/api/routes/fileManager.ts
index 159c23a0c..66056444e 100644
--- a/core/src/node/api/routes/fileManager.ts
+++ b/core/src/node/api/routes/fileManager.ts
@@ -8,5 +8,7 @@ export const fsRouter = async (app: HttpServer) => {
app.post(`/app/${FileManagerRoute.getResourcePath}`, async (request: any, reply: any) => {})
+ app.post(`/app/${FileManagerRoute.getUserHomePath}`, async (request: any, reply: any) => {})
+
app.post(`/app/${FileManagerRoute.fileStat}`, async (request: any, reply: any) => {})
}
diff --git a/core/src/node/api/routes/fs.ts b/core/src/node/api/routes/fs.ts
index 5f511af27..c5404ccce 100644
--- a/core/src/node/api/routes/fs.ts
+++ b/core/src/node/api/routes/fs.ts
@@ -2,6 +2,7 @@ import { FileSystemRoute } from '../../../api'
import { join } from 'path'
import { HttpServer } from '../HttpServer'
import { getJanDataFolderPath } from '../../utils'
+import { normalizeFilePath } from '../../path'
export const fsRouter = async (app: HttpServer) => {
const moduleName = 'fs'
@@ -13,10 +14,10 @@ export const fsRouter = async (app: HttpServer) => {
const result = await import(moduleName).then((mdl) => {
return mdl[route](
...body.map((arg: any) =>
- typeof arg === 'string' && arg.includes('file:/')
- ? join(getJanDataFolderPath(), arg.replace('file:/', ''))
- : arg,
- ),
+ typeof arg === 'string' && (arg.startsWith(`file:/`) || arg.startsWith(`file:\\`))
+ ? join(getJanDataFolderPath(), normalizeFilePath(arg))
+ : arg
+ )
)
})
res.status(200).send(result)
diff --git a/core/src/node/utils/index.ts b/core/src/node/utils/index.ts
index 00db04c9b..4bcbf13b1 100644
--- a/core/src/node/utils/index.ts
+++ b/core/src/node/utils/index.ts
@@ -1,16 +1,18 @@
-import { AppConfiguration } from "../../types";
-import { join } from "path";
-import fs from "fs";
-import os from "os";
+import { AppConfiguration, SystemResourceInfo } from '../../types'
+import { join } from 'path'
+import fs from 'fs'
+import os from 'os'
+import { log, logServer } from '../log'
+import childProcess from 'child_process'
// TODO: move this to core
-const configurationFileName = "settings.json";
+const configurationFileName = 'settings.json'
// TODO: do no specify app name in framework module
-const defaultJanDataFolder = join(os.homedir(), "jan");
+const defaultJanDataFolder = join(os.homedir(), 'jan')
const defaultAppConfig: AppConfiguration = {
data_folder: defaultJanDataFolder,
-};
+}
/**
* Getting App Configurations.
@@ -20,39 +22,39 @@ const defaultAppConfig: AppConfiguration = {
export const getAppConfigurations = (): AppConfiguration => {
// Retrieve Application Support folder path
// Fallback to user home directory if not found
- const configurationFile = getConfigurationFilePath();
+ const configurationFile = getConfigurationFilePath()
if (!fs.existsSync(configurationFile)) {
// create default app config if we don't have one
- console.debug(`App config not found, creating default config at ${configurationFile}`);
- fs.writeFileSync(configurationFile, JSON.stringify(defaultAppConfig));
- return defaultAppConfig;
+ console.debug(`App config not found, creating default config at ${configurationFile}`)
+ fs.writeFileSync(configurationFile, JSON.stringify(defaultAppConfig))
+ return defaultAppConfig
}
try {
const appConfigurations: AppConfiguration = JSON.parse(
- fs.readFileSync(configurationFile, "utf-8"),
- );
- return appConfigurations;
+ fs.readFileSync(configurationFile, 'utf-8')
+ )
+ return appConfigurations
} catch (err) {
- console.error(`Failed to read app config, return default config instead! Err: ${err}`);
- return defaultAppConfig;
+ console.error(`Failed to read app config, return default config instead! Err: ${err}`)
+ return defaultAppConfig
}
-};
+}
const getConfigurationFilePath = () =>
join(
- global.core?.appPath() || process.env[process.platform == "win32" ? "USERPROFILE" : "HOME"],
- configurationFileName,
- );
+ global.core?.appPath() || process.env[process.platform == 'win32' ? 'USERPROFILE' : 'HOME'],
+ configurationFileName
+ )
export const updateAppConfiguration = (configuration: AppConfiguration): Promise => {
- const configurationFile = getConfigurationFilePath();
- console.debug("updateAppConfiguration, configurationFile: ", configurationFile);
+ const configurationFile = getConfigurationFilePath()
+ console.debug('updateAppConfiguration, configurationFile: ', configurationFile)
- fs.writeFileSync(configurationFile, JSON.stringify(configuration));
- return Promise.resolve();
-};
+ fs.writeFileSync(configurationFile, JSON.stringify(configuration))
+ return Promise.resolve()
+}
/**
* Utility function to get server log path
@@ -60,13 +62,13 @@ export const updateAppConfiguration = (configuration: AppConfiguration): Promise
* @returns {string} The log path.
*/
export const getServerLogPath = (): string => {
- const appConfigurations = getAppConfigurations();
- const logFolderPath = join(appConfigurations.data_folder, "logs");
+ const appConfigurations = getAppConfigurations()
+ const logFolderPath = join(appConfigurations.data_folder, 'logs')
if (!fs.existsSync(logFolderPath)) {
- fs.mkdirSync(logFolderPath, { recursive: true });
+ fs.mkdirSync(logFolderPath, { recursive: true })
}
- return join(logFolderPath, "server.log");
-};
+ return join(logFolderPath, 'server.log')
+}
/**
* Utility function to get app log path
@@ -74,13 +76,13 @@ export const getServerLogPath = (): string => {
* @returns {string} The log path.
*/
export const getAppLogPath = (): string => {
- const appConfigurations = getAppConfigurations();
- const logFolderPath = join(appConfigurations.data_folder, "logs");
+ const appConfigurations = getAppConfigurations()
+ const logFolderPath = join(appConfigurations.data_folder, 'logs')
if (!fs.existsSync(logFolderPath)) {
- fs.mkdirSync(logFolderPath, { recursive: true });
+ fs.mkdirSync(logFolderPath, { recursive: true })
}
- return join(logFolderPath, "app.log");
-};
+ return join(logFolderPath, 'app.log')
+}
/**
* Utility function to get data folder path
@@ -88,9 +90,9 @@ export const getAppLogPath = (): string => {
* @returns {string} The data folder path.
*/
export const getJanDataFolderPath = (): string => {
- const appConfigurations = getAppConfigurations();
- return appConfigurations.data_folder;
-};
+ const appConfigurations = getAppConfigurations()
+ return appConfigurations.data_folder
+}
/**
* Utility function to get extension path
@@ -98,6 +100,70 @@ export const getJanDataFolderPath = (): string => {
* @returns {string} The extensions path.
*/
export const getJanExtensionsPath = (): string => {
- const appConfigurations = getAppConfigurations();
- return join(appConfigurations.data_folder, "extensions");
-};
+ const appConfigurations = getAppConfigurations()
+ return join(appConfigurations.data_folder, 'extensions')
+}
+
+/**
+ * Utility function to physical cpu count
+ *
+ * @returns {number} The physical cpu count.
+ */
+export const physicalCpuCount = async (): Promise => {
+ const platform = os.platform()
+ if (platform === 'linux') {
+ const output = await exec('lscpu -p | egrep -v "^#" | sort -u -t, -k 2,4 | wc -l')
+ return parseInt(output.trim(), 10)
+ } else if (platform === 'darwin') {
+ const output = await exec('sysctl -n hw.physicalcpu_max')
+ return parseInt(output.trim(), 10)
+ } else if (platform === 'win32') {
+ const output = await exec('WMIC CPU Get NumberOfCores')
+ return output
+ .split(os.EOL)
+ .map((line: string) => parseInt(line))
+ .filter((value: number) => !isNaN(value))
+ .reduce((sum: number, number: number) => sum + number, 1)
+ } else {
+ const cores = os.cpus().filter((cpu: any, index: number) => {
+ const hasHyperthreading = cpu.model.includes('Intel')
+ const isOdd = index % 2 === 1
+ return !hasHyperthreading || isOdd
+ })
+ return cores.length
+ }
+}
+
+const exec = async (command: string): Promise => {
+ return new Promise((resolve, reject) => {
+ childProcess.exec(command, { encoding: 'utf8' }, (error, stdout) => {
+ if (error) {
+ reject(error)
+ } else {
+ resolve(stdout)
+ }
+ })
+ })
+}
+
+export const getSystemResourceInfo = async (): Promise => {
+ const cpu = await physicalCpuCount()
+ const message = `[NITRO]::CPU informations - ${cpu}`
+ log(message)
+ logServer(message)
+
+ return {
+ numCpuPhysicalCore: cpu,
+ memAvailable: 0, // TODO: this should not be 0
+ }
+}
+
+export const getEngineConfiguration = async (engineId: string) => {
+ if (engineId !== 'openai') {
+ return undefined
+ }
+ const directoryPath = join(getJanDataFolderPath(), 'engines')
+ const filePath = join(directoryPath, `${engineId}.json`)
+ const data = fs.readFileSync(filePath, 'utf-8')
+ return JSON.parse(data)
+}
diff --git a/core/src/types/assistant/assistantEntity.ts b/core/src/types/assistant/assistantEntity.ts
index 91bb2bb22..733dbea8d 100644
--- a/core/src/types/assistant/assistantEntity.ts
+++ b/core/src/types/assistant/assistantEntity.ts
@@ -2,6 +2,13 @@
* Assistant type defines the shape of an assistant object.
* @stored
*/
+
+export type AssistantTool = {
+ type: string
+ enabled: boolean
+ settings: any
+}
+
export type Assistant = {
/** Represents the avatar of the user. */
avatar: string
@@ -22,7 +29,7 @@ export type Assistant = {
/** Represents the instructions for the object. */
instructions?: string
/** Represents the tools associated with the object. */
- tools?: any
+ tools?: AssistantTool[]
/** Represents the file identifiers associated with the object. */
file_ids: string[]
/** Represents the metadata of the object. */
diff --git a/core/src/types/index.ts b/core/src/types/index.ts
index 3bdcb5421..ee6f4ef08 100644
--- a/core/src/types/index.ts
+++ b/core/src/types/index.ts
@@ -6,3 +6,4 @@ export * from './inference'
export * from './monitoring'
export * from './file'
export * from './config'
+export * from './miscellaneous'
diff --git a/core/src/types/inference/inferenceEntity.ts b/core/src/types/inference/inferenceEntity.ts
index 58b838ae7..c37e3b079 100644
--- a/core/src/types/inference/inferenceEntity.ts
+++ b/core/src/types/inference/inferenceEntity.ts
@@ -1,3 +1,5 @@
+import { ContentType, ContentValue } from '../message'
+
/**
* The role of the author of this message.
*/
@@ -13,7 +15,32 @@ export enum ChatCompletionRole {
*/
export type ChatCompletionMessage = {
/** The contents of the message. **/
- content?: string
+ content?: ChatCompletionMessageContent
/** The role of the author of this message. **/
role: ChatCompletionRole
}
+
+export type ChatCompletionMessageContent =
+ | string
+ | (ChatCompletionMessageContentText &
+ ChatCompletionMessageContentImage &
+ ChatCompletionMessageContentDoc)[]
+
+export enum ChatCompletionMessageContentType {
+ Text = 'text',
+ Image = 'image_url',
+ Doc = 'doc_url',
+}
+
+export type ChatCompletionMessageContentText = {
+ type: ChatCompletionMessageContentType
+ text: string
+}
+export type ChatCompletionMessageContentImage = {
+ type: ChatCompletionMessageContentType
+ image_url: { url: string }
+}
+export type ChatCompletionMessageContentDoc = {
+ type: ChatCompletionMessageContentType
+ doc_url: { url: string }
+}
diff --git a/core/src/types/message/messageEntity.ts b/core/src/types/message/messageEntity.ts
index 199743796..87e4b1997 100644
--- a/core/src/types/message/messageEntity.ts
+++ b/core/src/types/message/messageEntity.ts
@@ -1,5 +1,6 @@
import { ChatCompletionMessage, ChatCompletionRole } from '../inference'
import { ModelInfo } from '../model'
+import { Thread } from '../thread'
/**
* The `ThreadMessage` type defines the shape of a thread's message object.
@@ -35,7 +36,10 @@ export type ThreadMessage = {
export type MessageRequest = {
id?: string
- /** The thread id of the message request. **/
+ /**
+ * @deprecated Use thread object instead
+ * The thread id of the message request.
+ */
threadId: string
/**
@@ -48,6 +52,10 @@ export type MessageRequest = {
/** Settings for constructing a chat completion request **/
model?: ModelInfo
+
+ /** The thread of this message is belong to. **/
+ // TODO: deprecate threadId field
+ thread?: Thread
}
/**
@@ -62,7 +70,7 @@ export enum MessageStatus {
/** Message loaded with error. **/
Error = 'error',
/** Message is cancelled streaming */
- Stopped = "stopped"
+ Stopped = 'stopped',
}
/**
@@ -71,6 +79,7 @@ export enum MessageStatus {
export enum ContentType {
Text = 'text',
Image = 'image',
+ Pdf = 'pdf',
}
/**
@@ -80,6 +89,8 @@ export enum ContentType {
export type ContentValue = {
value: string
annotations: string[]
+ name?: string
+ size?: number
}
/**
diff --git a/core/src/types/miscellaneous/index.ts b/core/src/types/miscellaneous/index.ts
new file mode 100644
index 000000000..02c973323
--- /dev/null
+++ b/core/src/types/miscellaneous/index.ts
@@ -0,0 +1,2 @@
+export * from './systemResourceInfo'
+export * from './promptTemplate'
diff --git a/core/src/types/miscellaneous/promptTemplate.ts b/core/src/types/miscellaneous/promptTemplate.ts
new file mode 100644
index 000000000..a6743c67c
--- /dev/null
+++ b/core/src/types/miscellaneous/promptTemplate.ts
@@ -0,0 +1,6 @@
+export type PromptTemplate = {
+ system_prompt?: string
+ ai_prompt?: string
+ user_prompt?: string
+ error?: string
+}
diff --git a/core/src/types/miscellaneous/systemResourceInfo.ts b/core/src/types/miscellaneous/systemResourceInfo.ts
new file mode 100644
index 000000000..1472cda47
--- /dev/null
+++ b/core/src/types/miscellaneous/systemResourceInfo.ts
@@ -0,0 +1,4 @@
+export type SystemResourceInfo = {
+ numCpuPhysicalCore: number
+ memAvailable: number
+}
diff --git a/core/src/types/model/modelEntity.ts b/core/src/types/model/modelEntity.ts
index 80adc9e96..644c34dfb 100644
--- a/core/src/types/model/modelEntity.ts
+++ b/core/src/types/model/modelEntity.ts
@@ -7,6 +7,7 @@ export type ModelInfo = {
settings: ModelSettingParams
parameters: ModelRuntimeParams
engine?: InferenceEngine
+ proxyEngine?: InferenceEngine
}
/**
@@ -18,7 +19,13 @@ export enum InferenceEngine {
nitro = 'nitro',
openai = 'openai',
triton_trtllm = 'triton_trtllm',
- hf_endpoint = 'hf_endpoint',
+
+ tool_retrieval_enabled = 'tool_retrieval_enabled',
+}
+
+export type ModelArtifact = {
+ filename: string
+ url: string
}
/**
@@ -45,7 +52,7 @@ export type Model = {
/**
* The model download source. It can be an external url or a local filepath.
*/
- source_url: string
+ sources: ModelArtifact[]
/**
* The model identifier, which can be referenced in the API endpoints.
@@ -85,6 +92,13 @@ export type Model = {
* The model engine.
*/
engine: InferenceEngine
+
+ proxyEngine?: InferenceEngine
+
+ /**
+ * Is multimodal or not.
+ */
+ visionModel?: boolean
}
export type ModelMetadata = {
@@ -107,6 +121,9 @@ export type ModelSettingParams = {
system_prompt?: string
ai_prompt?: string
user_prompt?: string
+ llama_model_path?: string
+ mmproj?: string
+ cont_batching?: boolean
}
/**
@@ -122,4 +139,5 @@ export type ModelRuntimeParams = {
stop?: string[]
frequency_penalty?: number
presence_penalty?: number
+ engine?: string
}
diff --git a/core/src/types/thread/index.ts b/core/src/types/thread/index.ts
index c6ff6204a..32155e1cd 100644
--- a/core/src/types/thread/index.ts
+++ b/core/src/types/thread/index.ts
@@ -1,2 +1,3 @@
export * from './threadEntity'
export * from './threadInterface'
+export * from './threadEvent'
diff --git a/core/src/types/thread/threadEntity.ts b/core/src/types/thread/threadEntity.ts
index 4ff3aa1fc..dd88b10ec 100644
--- a/core/src/types/thread/threadEntity.ts
+++ b/core/src/types/thread/threadEntity.ts
@@ -1,3 +1,4 @@
+import { AssistantTool } from '../assistant'
import { ModelInfo } from '../model'
/**
@@ -30,6 +31,7 @@ export type ThreadAssistantInfo = {
assistant_name: string
model: ModelInfo
instructions?: string
+ tools?: AssistantTool[]
}
/**
@@ -41,5 +43,4 @@ export type ThreadState = {
waitingForResponse: boolean
error?: Error
lastMessage?: string
- isFinishInit?: boolean
}
diff --git a/core/src/types/thread/threadEvent.ts b/core/src/types/thread/threadEvent.ts
new file mode 100644
index 000000000..4b19b09c1
--- /dev/null
+++ b/core/src/types/thread/threadEvent.ts
@@ -0,0 +1,4 @@
+export enum ThreadEvent {
+ /** The `OnThreadStarted` event is emitted when a thread is started. */
+ OnThreadStarted = 'OnThreadStarted',
+}
diff --git a/docs/.env.example b/docs/.env.example
index 6755f2520..b4a7fa5f1 100644
--- a/docs/.env.example
+++ b/docs/.env.example
@@ -1,5 +1,5 @@
GTM_ID=xxxx
-POSTHOG_PROJECT_API_KEY=xxxx
-POSTHOG_APP_URL=xxxx
+UMAMI_PROJECT_API_KEY=xxxx
+UMAMI_APP_URL=xxxx
ALGOLIA_API_KEY=xxxx
ALGOLIA_APP_ID=xxxx
\ No newline at end of file
diff --git a/docs/docs/docs/03-engineering/models.md b/docs/docs/docs/03-engineering/models.md
index cb0b44f41..4e4c3c604 100644
--- a/docs/docs/docs/03-engineering/models.md
+++ b/docs/docs/docs/03-engineering/models.md
@@ -56,7 +56,6 @@ jan/ # Jan root folder
- Each `model` folder contains a `model.json` file, which is a representation of a model.
- `model.json` contains metadata and default parameters used to run a model.
-- The only required field is `source_url`.
### Example
@@ -64,36 +63,43 @@ Here's a standard example `model.json` for a GGUF model.
```js
{
-"id": "zephyr-7b", // Defaults to foldername
-"object": "model", // Defaults to "model"
-"source_url": "https://huggingface.co/TheBloke/zephyr-7B-beta-GGUF/blob/main/zephyr-7b-beta.Q4_K_M.gguf",
-"name": "Zephyr 7B", // Defaults to foldername
-"owned_by": "you", // Defaults to "you"
-"version": "1", // Defaults to 1
-"created": 1231231, // Defaults to file creation time
-"description": null, // Defaults to null
-"state": enum[null, "ready"]
-"format": "ggufv3", // Defaults to "ggufv3"
-"engine": "nitro", // engine_id specified in jan/engine folder
-"engine_parameters": { // Engine parameters inside model.json can override
- "ctx_len": 2048, // the value inside the base engine.json
+ "id": "zephyr-7b", // Defaults to foldername
+ "object": "model", // Defaults to "model"
+ "sources": [
+ {
+ "filename": "zephyr-7b-beta.Q4_K_M.gguf",
+ "url": "https://huggingface.co/TheBloke/zephyr-7B-beta-GGUF/blob/main/zephyr-7b-beta.Q4_K_M.gguf"
+ }
+ ],
+ "name": "Zephyr 7B", // Defaults to foldername
+ "owned_by": "you", // Defaults to "you"
+ "version": "1", // Defaults to 1
+ "created": 1231231, // Defaults to file creation time
+ "description": null, // Defaults to null
+ "format": "ggufv3", // Defaults to "ggufv3"
+ "engine": "nitro", // engine_id specified in jan/engine folder
+ "engine_parameters": {
+ // Engine parameters inside model.json can override
+ "ctx_len": 4096, // the value inside the base engine.json
"ngl": 100,
"embedding": true,
- "n_parallel": 4,
-},
-"model_parameters": { // Models are called parameters
+ "n_parallel": 4
+ },
+ "model_parameters": {
+ // Models are called parameters
"stream": true,
- "max_tokens": 2048,
- "stop": [""], // This usually can be left blank, only used with specific need from model author
+ "max_tokens": 4096,
+ "stop": [""], // This usually can be left blank, only used with specific need from model author
"frequency_penalty": 0,
"presence_penalty": 0,
"temperature": 0.7,
"top_p": 0.95
-},
-"metadata": {}, // Defaults to {}
-"assets": [ // Defaults to current dir
- "file://.../zephyr-7b-q4_k_m.bin",
-]
+ },
+ "metadata": {}, // Defaults to {}
+ "assets": [
+ // Defaults to current dir
+ "file://.../zephyr-7b-q4_k_m.bin"
+ ]
}
```
diff --git a/docs/docs/guides/04-using-models/02-import-manually.mdx b/docs/docs/guides/04-using-models/02-import-manually.mdx
index f5ecb5259..68142a8af 100644
--- a/docs/docs/guides/04-using-models/02-import-manually.mdx
+++ b/docs/docs/guides/04-using-models/02-import-manually.mdx
@@ -31,7 +31,6 @@ In this section, we will show you how to import a GGUF model from [HuggingFace](
## Manually Importing a Downloaded Model (nightly versions and v0.4.4+)
-
### 1. Create a Model Folder
Navigate to the `~/jan/models` folder. You can find this folder by going to `App Settings` > `Advanced` > `Open App Directory`.
@@ -92,7 +91,7 @@ Drag and drop your model binary into this folder, ensuring the `modelname.gguf`
#### 3. Voila
-If your model doesn't show up in the Model Selector in conversations, please restart the app.
+If your model doesn't show up in the Model Selector in conversations, please restart the app.
If that doesn't work, please feel free to join our [Discord community](https://discord.gg/Dt7MxDyNNZ) for support, updates, and discussions.
@@ -190,14 +189,18 @@ Edit `model.json` and include the following configurations:
- Ensure the filename must be `model.json`.
- Ensure the `id` property matches the folder name you created.
- Ensure the GGUF filename should match the `id` property exactly.
-- Ensure the `source_url` property is the direct binary download link ending in `.gguf`. In HuggingFace, you can find the direct links in the `Files and versions` tab.
+- Ensure the `source.url` property is the direct binary download link ending in `.gguf`. In HuggingFace, you can find the direct links in the `Files and versions` tab.
- Ensure you are using the correct `prompt_template`. This is usually provided in the HuggingFace model's description page.
-- Ensure the `state` property is set to `ready`.
```json title="model.json"
{
// highlight-start
- "source_url": "https://huggingface.co/janhq/trinity-v1-GGUF/resolve/main/trinity-v1.Q4_K_M.gguf",
+ "sources": [
+ {
+ "filename": "trinity-v1.Q4_K_M.gguf",
+ "url": "https://huggingface.co/janhq/trinity-v1-GGUF/resolve/main/trinity-v1.Q4_K_M.gguf"
+ }
+ ],
"id": "trinity-v1-7b",
// highlight-end
"object": "model",
@@ -208,7 +211,8 @@ Edit `model.json` and include the following configurations:
"settings": {
"ctx_len": 4096,
// highlight-next-line
- "prompt_template": "{system_message}\n### Instruction:\n{prompt}\n### Response:"
+ "prompt_template": "{system_message}\n### Instruction:\n{prompt}\n### Response:",
+ "llama_model_path": "trinity-v1.Q4_K_M.gguf"
},
"parameters": {
"max_tokens": 4096
@@ -218,9 +222,7 @@ Edit `model.json` and include the following configurations:
"tags": ["7B", "Merged"],
"size": 4370000000
},
- "engine": "nitro",
- // highlight-next-line
- "state": "ready"
+ "engine": "nitro"
}
```
diff --git a/docs/docs/guides/04-using-models/03-integrate-with-remote-server.mdx b/docs/docs/guides/04-using-models/03-integrate-with-remote-server.mdx
index 3ed15bc9f..533797fca 100644
--- a/docs/docs/guides/04-using-models/03-integrate-with-remote-server.mdx
+++ b/docs/docs/guides/04-using-models/03-integrate-with-remote-server.mdx
@@ -40,7 +40,12 @@ Navigate to the `~/jan/models` folder. Create a folder named `gpt-3.5-turbo-16k`
```json title="~/jan/models/gpt-3.5-turbo-16k/model.json"
{
- "source_url": "https://openai.com",
+ "sources": [
+ {
+ "filename": "openai",
+ "url": "https://openai.com"
+ }
+ ],
// highlight-next-line
"id": "gpt-3.5-turbo-16k",
"object": "model",
@@ -55,8 +60,7 @@ Navigate to the `~/jan/models` folder. Create a folder named `gpt-3.5-turbo-16k`
"author": "OpenAI",
"tags": ["General", "Big Context Length"]
},
- "engine": "openai",
- "state": "ready"
+ "engine": "openai"
// highlight-end
}
```
@@ -118,7 +122,12 @@ Navigate to the `~/jan/models` folder. Create a folder named `mistral-ins-7b-q4`
```json title="~/jan/models/mistral-ins-7b-q4/model.json"
{
- "source_url": "https://jan.ai",
+ "sources": [
+ {
+ "filename": "janai",
+ "url": "https://jan.ai"
+ }
+ ],
// highlight-next-line
"id": "mistral-ins-7b-q4",
"object": "model",
@@ -134,8 +143,7 @@ Navigate to the `~/jan/models` folder. Create a folder named `mistral-ins-7b-q4`
"tags": ["remote", "awesome"]
},
// highlight-start
- "engine": "openai",
- "state": "ready"
+ "engine": "openai"
// highlight-end
}
```
diff --git a/docs/docs/guides/07-integrations/02-integrate-openrouter.mdx b/docs/docs/guides/07-integrations/02-integrate-openrouter.mdx
index 8623a1a4a..e0db0e336 100644
--- a/docs/docs/guides/07-integrations/02-integrate-openrouter.mdx
+++ b/docs/docs/guides/07-integrations/02-integrate-openrouter.mdx
@@ -49,7 +49,12 @@ Navigate to the `~/jan/models` folder. Create a folder named `} - A promise that resolves with the result.
+ */
+ ipcMain.handle(
+ AppRoute.isSubdirectory,
+ async (_event, from: string, to: string) => {
+ const relative = getRelative(from, to)
+ const isSubdir =
+ relative && !relative.startsWith('..') && !isAbsolute(relative)
+
+ if (isSubdir === '') return false
+ else return isSubdir
+ }
+ )
+
/**
* Retrieve basename from given path, respect to the current OS.
*/
diff --git a/electron/handlers/fileManager.ts b/electron/handlers/fileManager.ts
index f41286934..e328cb53b 100644
--- a/electron/handlers/fileManager.ts
+++ b/electron/handlers/fileManager.ts
@@ -1,4 +1,4 @@
-import { ipcMain } from 'electron'
+import { ipcMain, app } from 'electron'
// @ts-ignore
import reflect from '@alumna/reflect'
@@ -38,6 +38,10 @@ export function handleFileMangerIPCs() {
getResourcePath()
)
+ ipcMain.handle(FileManagerRoute.getUserHomePath, async (_event) =>
+ app.getPath('home')
+ )
+
// handle fs is directory here
ipcMain.handle(
FileManagerRoute.fileStat,
@@ -59,4 +63,20 @@ export function handleFileMangerIPCs() {
return fileStat
}
)
+
+ ipcMain.handle(
+ FileManagerRoute.writeBlob,
+ async (_event, path: string, data: string): Promise => {
+ try {
+ const normalizedPath = normalizeFilePath(path)
+ const dataBuffer = Buffer.from(data, 'base64')
+ fs.writeFileSync(
+ join(getJanDataFolderPath(), normalizedPath),
+ dataBuffer
+ )
+ } catch (err) {
+ console.error(`writeFile ${path} result: ${err}`)
+ }
+ }
+ )
}
diff --git a/electron/handlers/fs.ts b/electron/handlers/fs.ts
index 408a5fd10..34026b940 100644
--- a/electron/handlers/fs.ts
+++ b/electron/handlers/fs.ts
@@ -1,9 +1,9 @@
import { ipcMain } from 'electron'
-import { FileSystemRoute } from '@janhq/core'
-import { join } from 'path'
import { getJanDataFolderPath, normalizeFilePath } from '@janhq/core/node'
-
+import fs from 'fs'
+import { FileManagerRoute, FileSystemRoute } from '@janhq/core'
+import { join } from 'path'
/**
* Handles file system operations.
*/
@@ -15,7 +15,7 @@ export function handleFsIPCs() {
mdl[route](
...args.map((arg) =>
typeof arg === 'string' &&
- (arg.includes(`file:/`) || arg.includes(`file:\\`))
+ (arg.startsWith(`file:/`) || arg.startsWith(`file:\\`))
? join(getJanDataFolderPath(), normalizeFilePath(arg))
: arg
)
diff --git a/electron/main.ts b/electron/main.ts
index fb7066cd0..5d7e59c0f 100644
--- a/electron/main.ts
+++ b/electron/main.ts
@@ -28,6 +28,22 @@ import { setupCore } from './utils/setup'
app
.whenReady()
+ .then(async () => {
+ if (!app.isPackaged) {
+ // Which means you're running from source code
+ const { default: installExtension, REACT_DEVELOPER_TOOLS } = await import(
+ 'electron-devtools-installer'
+ ) // Don't use import on top level, since the installer package is dev-only
+ try {
+ const name = installExtension(REACT_DEVELOPER_TOOLS)
+ console.log(`Added Extension: ${name}`)
+ } catch (err) {
+ console.log('An error occurred while installing devtools:')
+ console.error(err)
+ // Only log the error and don't throw it because it's not critical
+ }
+ }
+ })
.then(setupCore)
.then(createUserSpace)
.then(migrateExtensions)
diff --git a/electron/package.json b/electron/package.json
index 173e54f2b..2892fedc6 100644
--- a/electron/package.json
+++ b/electron/package.json
@@ -86,7 +86,7 @@
"request": "^2.88.2",
"request-progress": "^3.0.0",
"rimraf": "^5.0.5",
- "typescript": "^5.3.3",
+ "typescript": "^5.2.2",
"ulid": "^2.3.0",
"use-debounce": "^9.0.4"
},
@@ -99,6 +99,7 @@
"@typescript-eslint/parser": "^6.7.3",
"electron": "28.0.0",
"electron-builder": "^24.9.1",
+ "electron-devtools-installer": "^3.2.0",
"electron-playwright-helpers": "^1.6.0",
"eslint-plugin-react": "^7.33.2",
"run-script-os": "^1.1.6"
diff --git a/electron/playwright.config.ts b/electron/playwright.config.ts
index 98b2c7b45..1fa3313f2 100644
--- a/electron/playwright.config.ts
+++ b/electron/playwright.config.ts
@@ -1,9 +1,9 @@
-import { PlaywrightTestConfig } from "@playwright/test";
+import { PlaywrightTestConfig } from '@playwright/test'
const config: PlaywrightTestConfig = {
- testDir: "./tests",
+ testDir: './tests',
retries: 0,
- timeout: 120000,
-};
+ globalTimeout: 300000,
+}
-export default config;
+export default config
diff --git a/electron/tests/explore.e2e.spec.ts b/electron/tests/hub.e2e.spec.ts
similarity index 71%
rename from electron/tests/explore.e2e.spec.ts
rename to electron/tests/hub.e2e.spec.ts
index 77eb3dbda..cc72e037e 100644
--- a/electron/tests/explore.e2e.spec.ts
+++ b/electron/tests/hub.e2e.spec.ts
@@ -9,6 +9,7 @@ import {
let electronApp: ElectronApplication
let page: Page
+const TIMEOUT: number = parseInt(process.env.TEST_TIMEOUT || '300000')
test.beforeAll(async () => {
process.env.CI = 'e2e'
@@ -26,7 +27,9 @@ test.beforeAll(async () => {
})
await stubDialog(electronApp, 'showMessageBox', { response: 1 })
- page = await electronApp.firstWindow()
+ page = await electronApp.firstWindow({
+ timeout: TIMEOUT,
+ })
})
test.afterAll(async () => {
@@ -34,8 +37,12 @@ test.afterAll(async () => {
await page.close()
})
-test('explores models', async () => {
- await page.getByTestId('Hub').first().click()
- await page.getByTestId('testid-explore-models').isVisible()
- // More test cases here...
+test('explores hub', async () => {
+ test.setTimeout(TIMEOUT)
+ await page.getByTestId('Hub').first().click({
+ timeout: TIMEOUT,
+ })
+ await page.getByTestId('hub-container-test-id').isVisible({
+ timeout: TIMEOUT,
+ })
})
diff --git a/electron/tests/main.e2e.spec.ts b/electron/tests/main.e2e.spec.ts
deleted file mode 100644
index 1a5bfe696..000000000
--- a/electron/tests/main.e2e.spec.ts
+++ /dev/null
@@ -1,55 +0,0 @@
-import { _electron as electron } from 'playwright'
-import { ElectronApplication, Page, expect, test } from '@playwright/test'
-
-import {
- findLatestBuild,
- parseElectronApp,
- stubDialog,
-} from 'electron-playwright-helpers'
-
-let electronApp: ElectronApplication
-let page: Page
-
-test.beforeAll(async () => {
- process.env.CI = 'e2e'
-
- const latestBuild = findLatestBuild('dist')
- expect(latestBuild).toBeTruthy()
-
- // parse the packaged Electron app and find paths and other info
- const appInfo = parseElectronApp(latestBuild)
- expect(appInfo).toBeTruthy()
- expect(appInfo.asar).toBe(true)
- expect(appInfo.executable).toBeTruthy()
- expect(appInfo.main).toBeTruthy()
- expect(appInfo.name).toBe('jan')
- expect(appInfo.packageJson).toBeTruthy()
- expect(appInfo.packageJson.name).toBe('jan')
- expect(appInfo.platform).toBeTruthy()
- expect(appInfo.platform).toBe(process.platform)
- expect(appInfo.resourcesDir).toBeTruthy()
-
- electronApp = await electron.launch({
- args: [appInfo.main], // main file from package.json
- executablePath: appInfo.executable, // path to the Electron executable
- })
- await stubDialog(electronApp, 'showMessageBox', { response: 1 })
-
- page = await electronApp.firstWindow()
-})
-
-test.afterAll(async () => {
- await electronApp.close()
- await page.close()
-})
-
-test('renders the home page', async () => {
- expect(page).toBeDefined()
-
- // Welcome text is available
- const welcomeText = await page
- .getByTestId('testid-welcome-title')
- .first()
- .isVisible()
- expect(welcomeText).toBe(false)
-})
diff --git a/electron/tests/navigation.e2e.spec.ts b/electron/tests/navigation.e2e.spec.ts
index 2f4f7b767..5c8721c2f 100644
--- a/electron/tests/navigation.e2e.spec.ts
+++ b/electron/tests/navigation.e2e.spec.ts
@@ -9,6 +9,7 @@ import {
let electronApp: ElectronApplication
let page: Page
+const TIMEOUT: number = parseInt(process.env.TEST_TIMEOUT || '300000')
test.beforeAll(async () => {
process.env.CI = 'e2e'
@@ -26,7 +27,9 @@ test.beforeAll(async () => {
})
await stubDialog(electronApp, 'showMessageBox', { response: 1 })
- page = await electronApp.firstWindow()
+ page = await electronApp.firstWindow({
+ timeout: TIMEOUT,
+ })
})
test.afterAll(async () => {
@@ -35,20 +38,24 @@ test.afterAll(async () => {
})
test('renders left navigation panel', async () => {
- // Chat section should be there
- const chatSection = await page.getByTestId('Chat').first().isVisible()
- expect(chatSection).toBe(false)
-
- // Home actions
- /* Disable unstable feature tests
- ** const botBtn = await page.getByTestId("Bot").first().isEnabled();
- ** Enable back when it is whitelisted
- */
-
+ test.setTimeout(TIMEOUT)
const systemMonitorBtn = await page
.getByTestId('System Monitor')
.first()
- .isEnabled()
- const settingsBtn = await page.getByTestId('Settings').first().isEnabled()
+ .isEnabled({
+ timeout: TIMEOUT,
+ })
+ const settingsBtn = await page
+ .getByTestId('Thread')
+ .first()
+ .isEnabled({ timeout: TIMEOUT })
expect([systemMonitorBtn, settingsBtn].filter((e) => !e).length).toBe(0)
+ // Chat section should be there
+ await page.getByTestId('Local API Server').first().click({
+ timeout: TIMEOUT,
+ })
+ const localServer = await page.getByTestId('local-server-testid').first()
+ await expect(localServer).toBeVisible({
+ timeout: TIMEOUT,
+ })
})
diff --git a/electron/tests/settings.e2e.spec.ts b/electron/tests/settings.e2e.spec.ts
index 798504c70..ad2d7b4a4 100644
--- a/electron/tests/settings.e2e.spec.ts
+++ b/electron/tests/settings.e2e.spec.ts
@@ -9,6 +9,7 @@ import {
let electronApp: ElectronApplication
let page: Page
+const TIMEOUT: number = parseInt(process.env.TEST_TIMEOUT || '300000')
test.beforeAll(async () => {
process.env.CI = 'e2e'
@@ -26,7 +27,9 @@ test.beforeAll(async () => {
})
await stubDialog(electronApp, 'showMessageBox', { response: 1 })
- page = await electronApp.firstWindow()
+ page = await electronApp.firstWindow({
+ timeout: TIMEOUT,
+ })
})
test.afterAll(async () => {
@@ -35,6 +38,8 @@ test.afterAll(async () => {
})
test('shows settings', async () => {
- await page.getByTestId('Settings').first().click()
- await page.getByTestId('testid-setting-description').isVisible()
+ test.setTimeout(TIMEOUT)
+ await page.getByTestId('Settings').first().click({ timeout: TIMEOUT })
+ const settingDescription = page.getByTestId('testid-setting-description')
+ await expect(settingDescription).toBeVisible({ timeout: TIMEOUT })
})
diff --git a/electron/tests/system-monitor.e2e.spec.ts b/electron/tests/system-monitor.e2e.spec.ts
deleted file mode 100644
index 747a8ae18..000000000
--- a/electron/tests/system-monitor.e2e.spec.ts
+++ /dev/null
@@ -1,41 +0,0 @@
-import { _electron as electron } from 'playwright'
-import { ElectronApplication, Page, expect, test } from '@playwright/test'
-
-import {
- findLatestBuild,
- parseElectronApp,
- stubDialog,
-} from 'electron-playwright-helpers'
-
-let electronApp: ElectronApplication
-let page: Page
-
-test.beforeAll(async () => {
- process.env.CI = 'e2e'
-
- const latestBuild = findLatestBuild('dist')
- expect(latestBuild).toBeTruthy()
-
- // parse the packaged Electron app and find paths and other info
- const appInfo = parseElectronApp(latestBuild)
- expect(appInfo).toBeTruthy()
-
- electronApp = await electron.launch({
- args: [appInfo.main], // main file from package.json
- executablePath: appInfo.executable, // path to the Electron executable
- })
- await stubDialog(electronApp, 'showMessageBox', { response: 1 })
-
- page = await electronApp.firstWindow()
-})
-
-test.afterAll(async () => {
- await electronApp.close()
- await page.close()
-})
-
-test('shows system monitor', async () => {
- await page.getByTestId('System Monitor').first().click()
- await page.getByTestId('testid-system-monitor').isVisible()
- // More test cases here...
-})
diff --git a/extensions/assistant-extension/package.json b/extensions/assistant-extension/package.json
index 4e84aa573..84bcdf47e 100644
--- a/extensions/assistant-extension/package.json
+++ b/extensions/assistant-extension/package.json
@@ -3,26 +3,50 @@
"version": "1.0.0",
"description": "This extension enables assistants, including Jan, a default assistant that can call all downloaded models",
"main": "dist/index.js",
- "module": "dist/module.js",
+ "node": "dist/node/index.js",
"author": "Jan ",
"license": "AGPL-3.0",
"scripts": {
- "build": "tsc -b . && webpack --config webpack.config.js",
- "build:publish": "rimraf *.tgz --glob && npm run build && npm pack && cpx *.tgz ../../electron/pre-install"
+ "build": "tsc --module commonjs && rollup -c rollup.config.ts",
+ "build:publish:linux": "rimraf *.tgz --glob && npm run build && npm pack && cpx *.tgz ../../electron/pre-install",
+ "build:publish:darwin": "rimraf *.tgz --glob && npm run build && ../../.github/scripts/auto-sign.sh && npm pack && cpx *.tgz ../../electron/pre-install",
+ "build:publish:win32": "rimraf *.tgz --glob && npm run build && npm pack && cpx *.tgz ../../electron/pre-install",
+ "build:publish": "run-script-os"
},
"devDependencies": {
+ "@rollup/plugin-commonjs": "^25.0.7",
+ "@rollup/plugin-json": "^6.1.0",
+ "@rollup/plugin-node-resolve": "^15.2.3",
+ "@rollup/plugin-replace": "^5.0.5",
+ "@types/pdf-parse": "^1.1.4",
+ "cpx": "^1.5.0",
"rimraf": "^3.0.2",
- "webpack": "^5.88.2",
- "webpack-cli": "^5.1.4"
+ "rollup": "^2.38.5",
+ "rollup-plugin-define": "^1.0.1",
+ "rollup-plugin-sourcemaps": "^0.6.3",
+ "rollup-plugin-typescript2": "^0.36.0",
+ "typescript": "^5.3.3",
+ "run-script-os": "^1.1.6"
},
"dependencies": {
"@janhq/core": "file:../../core",
+ "@langchain/community": "0.0.13",
+ "hnswlib-node": "^1.4.2",
+ "langchain": "^0.0.214",
"path-browserify": "^1.0.1",
+ "pdf-parse": "^1.1.1",
"ts-loader": "^9.5.0"
},
"files": [
"dist/*",
"package.json",
"README.md"
+ ],
+ "bundleDependencies": [
+ "@janhq/core",
+ "@langchain/community",
+ "hnswlib-node",
+ "langchain",
+ "pdf-parse"
]
}
diff --git a/extensions/assistant-extension/rollup.config.ts b/extensions/assistant-extension/rollup.config.ts
new file mode 100644
index 000000000..7916ef9c8
--- /dev/null
+++ b/extensions/assistant-extension/rollup.config.ts
@@ -0,0 +1,81 @@
+import resolve from "@rollup/plugin-node-resolve";
+import commonjs from "@rollup/plugin-commonjs";
+import sourceMaps from "rollup-plugin-sourcemaps";
+import typescript from "rollup-plugin-typescript2";
+import json from "@rollup/plugin-json";
+import replace from "@rollup/plugin-replace";
+
+const packageJson = require("./package.json");
+
+const pkg = require("./package.json");
+
+export default [
+ {
+ input: `src/index.ts`,
+ output: [{ file: pkg.main, format: "es", sourcemap: true }],
+ // Indicate here external modules you don't wanna include in your bundle (i.e.: 'lodash')
+ external: [],
+ watch: {
+ include: "src/**",
+ },
+ plugins: [
+ replace({
+ NODE: JSON.stringify(`${packageJson.name}/${packageJson.node}`),
+ EXTENSION_NAME: JSON.stringify(packageJson.name),
+ VERSION: JSON.stringify(packageJson.version),
+ }),
+ // Allow json resolution
+ json(),
+ // Compile TypeScript files
+ typescript({ useTsconfigDeclarationDir: true }),
+ // Compile TypeScript files
+ // Allow bundling cjs modules (unlike webpack, rollup doesn't understand cjs)
+ commonjs(),
+ // Allow node_modules resolution, so you can use 'external' to control
+ // which external modules to include in the bundle
+ // https://github.com/rollup/rollup-plugin-node-resolve#usage
+ resolve({
+ extensions: [".js", ".ts", ".svelte"],
+ }),
+
+ // Resolve source maps to the original source
+ sourceMaps(),
+ ],
+ },
+ {
+ input: `src/node/index.ts`,
+ output: [{ dir: "dist/node", format: "cjs", sourcemap: false }],
+ // Indicate here external modules you don't wanna include in your bundle (i.e.: 'lodash')
+ external: [
+ "@janhq/core/node",
+ "@langchain/community",
+ "langchain",
+ "langsmith",
+ "path",
+ "hnswlib-node",
+ ],
+ watch: {
+ include: "src/node/**",
+ },
+ // inlineDynamicImports: true,
+ plugins: [
+ // Allow json resolution
+ json(),
+ // Compile TypeScript files
+ typescript({ useTsconfigDeclarationDir: true }),
+ // Allow bundling cjs modules (unlike webpack, rollup doesn't understand cjs)
+ commonjs({
+ ignoreDynamicRequires: true,
+ }),
+ // Allow node_modules resolution, so you can use 'external' to control
+ // which external modules to include in the bundle
+ // https://github.com/rollup/rollup-plugin-node-resolve#usage
+ resolve({
+ extensions: [".ts", ".js", ".json"],
+ }),
+
+ // Resolve source maps to the original source
+ // sourceMaps(),
+ ],
+ },
+];
diff --git a/extensions/assistant-extension/src/@types/global.d.ts b/extensions/assistant-extension/src/@types/global.d.ts
index 3b45ccc5a..dc11709a4 100644
--- a/extensions/assistant-extension/src/@types/global.d.ts
+++ b/extensions/assistant-extension/src/@types/global.d.ts
@@ -1 +1,3 @@
-declare const MODULE: string;
+declare const NODE: string;
+declare const EXTENSION_NAME: string;
+declare const VERSION: string;
diff --git a/extensions/assistant-extension/src/index.ts b/extensions/assistant-extension/src/index.ts
index 96de33b7b..6495ea786 100644
--- a/extensions/assistant-extension/src/index.ts
+++ b/extensions/assistant-extension/src/index.ts
@@ -1,16 +1,151 @@
-import { fs, Assistant } from "@janhq/core";
-import { AssistantExtension } from "@janhq/core";
-import { join } from "path";
+import {
+ fs,
+ Assistant,
+ MessageRequest,
+ events,
+ InferenceEngine,
+ MessageEvent,
+ InferenceEvent,
+ joinPath,
+ executeOnMain,
+ AssistantExtension,
+} from "@janhq/core";
export default class JanAssistantExtension extends AssistantExtension {
private static readonly _homeDir = "file://assistants";
+ controller = new AbortController();
+ isCancelled = false;
+ retrievalThreadId: string | undefined = undefined;
+
async onLoad() {
// making the assistant directory
- if (!(await fs.existsSync(JanAssistantExtension._homeDir)))
- fs.mkdirSync(JanAssistantExtension._homeDir).then(() => {
- this.createJanAssistant();
- });
+ const assistantDirExist = await fs.existsSync(
+ JanAssistantExtension._homeDir,
+ );
+ if (
+ localStorage.getItem(`${EXTENSION_NAME}-version`) !== VERSION ||
+ !assistantDirExist
+ ) {
+ if (!assistantDirExist)
+ await fs.mkdirSync(JanAssistantExtension._homeDir);
+
+ // Write assistant metadata
+ this.createJanAssistant();
+ // Finished migration
+ localStorage.setItem(`${EXTENSION_NAME}-version`, VERSION);
+ }
+
+ // Events subscription
+ events.on(MessageEvent.OnMessageSent, (data: MessageRequest) =>
+ JanAssistantExtension.handleMessageRequest(data, this),
+ );
+
+ events.on(InferenceEvent.OnInferenceStopped, () => {
+ JanAssistantExtension.handleInferenceStopped(this);
+ });
+ }
+
+ private static async handleInferenceStopped(instance: JanAssistantExtension) {
+ instance.isCancelled = true;
+ instance.controller?.abort();
+ }
+
+ private static async handleMessageRequest(
+ data: MessageRequest,
+ instance: JanAssistantExtension,
+ ) {
+ instance.isCancelled = false;
+ instance.controller = new AbortController();
+
+ if (
+ data.model?.engine !== InferenceEngine.tool_retrieval_enabled ||
+ !data.messages ||
+ !data.thread?.assistants[0]?.tools
+ ) {
+ return;
+ }
+
+ const latestMessage = data.messages[data.messages.length - 1];
+
+ // Ingest the document if needed
+ if (
+ latestMessage &&
+ latestMessage.content &&
+ typeof latestMessage.content !== "string"
+ ) {
+ const docFile = latestMessage.content[1]?.doc_url?.url;
+ if (docFile) {
+ await executeOnMain(
+ NODE,
+ "toolRetrievalIngestNewDocument",
+ docFile,
+ data.model?.proxyEngine,
+ );
+ }
+ }
+
+ // Load agent on thread changed
+ if (instance.retrievalThreadId !== data.threadId) {
+ await executeOnMain(NODE, "toolRetrievalLoadThreadMemory", data.threadId);
+
+ instance.retrievalThreadId = data.threadId;
+
+ // Update the text splitter
+ await executeOnMain(
+ NODE,
+ "toolRetrievalUpdateTextSplitter",
+ data.thread.assistants[0].tools[0]?.settings?.chunk_size ?? 4000,
+ data.thread.assistants[0].tools[0]?.settings?.chunk_overlap ?? 200,
+ );
+ }
+
+ if (latestMessage.content) {
+ const prompt =
+ typeof latestMessage.content === "string"
+ ? latestMessage.content
+ : latestMessage.content[0].text;
+ // Retrieve the result
+ console.debug("toolRetrievalQuery", latestMessage.content);
+ const retrievalResult = await executeOnMain(
+ NODE,
+ "toolRetrievalQueryResult",
+ prompt,
+ );
+
+ // Update the message content
+ // Using the retrieval template with the result and query
+ if (data.thread?.assistants[0].tools)
+ data.messages[data.messages.length - 1].content =
+ data.thread.assistants[0].tools[0].settings?.retrieval_template
+ ?.replace("{CONTEXT}", retrievalResult)
+ .replace("{QUESTION}", prompt);
+ }
+
+ // Filter out all the messages that are not text
+ data.messages = data.messages.map((message) => {
+ if (
+ message.content &&
+ typeof message.content !== "string" &&
+ (message.content.length ?? 0) > 0
+ ) {
+ return {
+ ...message,
+ content: [message.content[0]],
+ };
+ }
+ return message;
+ });
+
+ // Reroute the result to inference engine
+ const output = {
+ ...data,
+ model: {
+ ...data.model,
+ engine: data.model.proxyEngine,
+ },
+ };
+ events.emit(MessageEvent.OnMessageSent, output);
}
/**
@@ -19,15 +154,21 @@ export default class JanAssistantExtension extends AssistantExtension {
onUnload(): void {}
async createAssistant(assistant: Assistant): Promise {
- const assistantDir = join(JanAssistantExtension._homeDir, assistant.id);
+ const assistantDir = await joinPath([
+ JanAssistantExtension._homeDir,
+ assistant.id,
+ ]);
if (!(await fs.existsSync(assistantDir))) await fs.mkdirSync(assistantDir);
// store the assistant metadata json
- const assistantMetadataPath = join(assistantDir, "assistant.json");
+ const assistantMetadataPath = await joinPath([
+ assistantDir,
+ "assistant.json",
+ ]);
try {
await fs.writeFileSync(
assistantMetadataPath,
- JSON.stringify(assistant, null, 2)
+ JSON.stringify(assistant, null, 2),
);
} catch (err) {
console.error(err);
@@ -39,14 +180,17 @@ export default class JanAssistantExtension extends AssistantExtension {
// get all the assistant metadata json
const results: Assistant[] = [];
const allFileName: string[] = await fs.readdirSync(
- JanAssistantExtension._homeDir
+ JanAssistantExtension._homeDir,
);
for (const fileName of allFileName) {
- const filePath = join(JanAssistantExtension._homeDir, fileName);
+ const filePath = await joinPath([
+ JanAssistantExtension._homeDir,
+ fileName,
+ ]);
if (filePath.includes(".DS_Store")) continue;
const jsonFiles: string[] = (await fs.readdirSync(filePath)).filter(
- (file: string) => file === "assistant.json"
+ (file: string) => file === "assistant.json",
);
if (jsonFiles.length !== 1) {
@@ -55,8 +199,8 @@ export default class JanAssistantExtension extends AssistantExtension {
}
const content = await fs.readFileSync(
- join(filePath, jsonFiles[0]),
- "utf-8"
+ await joinPath([filePath, jsonFiles[0]]),
+ "utf-8",
);
const assistant: Assistant =
typeof content === "object" ? content : JSON.parse(content);
@@ -73,7 +217,10 @@ export default class JanAssistantExtension extends AssistantExtension {
}
// remove the directory
- const assistantDir = join(JanAssistantExtension._homeDir, assistant.id);
+ const assistantDir = await joinPath([
+ JanAssistantExtension._homeDir,
+ assistant.id,
+ ]);
await fs.rmdirSync(assistantDir);
return Promise.resolve();
}
@@ -89,7 +236,24 @@ export default class JanAssistantExtension extends AssistantExtension {
description: "A default assistant that can use all downloaded models",
model: "*",
instructions: "",
- tools: undefined,
+ tools: [
+ {
+ type: "retrieval",
+ enabled: false,
+ settings: {
+ top_k: 2,
+ chunk_size: 1024,
+ chunk_overlap: 64,
+ retrieval_template: `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
+ ----------------
+ CONTEXT: {CONTEXT}
+ ----------------
+ QUESTION: {QUESTION}
+ ----------------
+ Helpful Answer:`,
+ },
+ },
+ ],
file_ids: [],
metadata: undefined,
};
diff --git a/extensions/assistant-extension/src/node/engine.ts b/extensions/assistant-extension/src/node/engine.ts
new file mode 100644
index 000000000..54b2a6ba1
--- /dev/null
+++ b/extensions/assistant-extension/src/node/engine.ts
@@ -0,0 +1,13 @@
+import fs from "fs";
+import path from "path";
+import { getJanDataFolderPath } from "@janhq/core/node";
+
+// Sec: Do not send engine settings over requests
+// Read it manually instead
+export const readEmbeddingEngine = (engineName: string) => {
+ const engineSettings = fs.readFileSync(
+ path.join(getJanDataFolderPath(), "engines", `${engineName}.json`),
+ "utf-8",
+ );
+ return JSON.parse(engineSettings);
+};
diff --git a/extensions/assistant-extension/src/node/index.ts b/extensions/assistant-extension/src/node/index.ts
new file mode 100644
index 000000000..95a7243a4
--- /dev/null
+++ b/extensions/assistant-extension/src/node/index.ts
@@ -0,0 +1,39 @@
+import { getJanDataFolderPath, normalizeFilePath } from "@janhq/core/node";
+import { Retrieval } from "./tools/retrieval";
+import path from "path";
+
+const retrieval = new Retrieval();
+
+export async function toolRetrievalUpdateTextSplitter(
+ chunkSize: number,
+ chunkOverlap: number,
+) {
+ retrieval.updateTextSplitter(chunkSize, chunkOverlap);
+ return Promise.resolve();
+}
+export async function toolRetrievalIngestNewDocument(
+ file: string,
+ engine: string,
+) {
+ const filePath = path.join(getJanDataFolderPath(), normalizeFilePath(file));
+ const threadPath = path.dirname(filePath.replace("files", ""));
+ retrieval.updateEmbeddingEngine(engine);
+ await retrieval.ingestAgentKnowledge(filePath, `${threadPath}/memory`);
+ return Promise.resolve();
+}
+
+export async function toolRetrievalLoadThreadMemory(threadId: string) {
+ try {
+ await retrieval.loadRetrievalAgent(
+ path.join(getJanDataFolderPath(), "threads", threadId, "memory"),
+ );
+ return Promise.resolve();
+ } catch (err) {
+ console.debug(err);
+ }
+}
+
+export async function toolRetrievalQueryResult(query: string) {
+ const res = await retrieval.generateResult(query);
+ return Promise.resolve(res);
+}
diff --git a/extensions/assistant-extension/src/node/tools/retrieval/index.ts b/extensions/assistant-extension/src/node/tools/retrieval/index.ts
new file mode 100644
index 000000000..cd7e9abb1
--- /dev/null
+++ b/extensions/assistant-extension/src/node/tools/retrieval/index.ts
@@ -0,0 +1,79 @@
+import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";
+import { formatDocumentsAsString } from "langchain/util/document";
+import { PDFLoader } from "langchain/document_loaders/fs/pdf";
+
+import { HNSWLib } from "langchain/vectorstores/hnswlib";
+
+import { OpenAIEmbeddings } from "langchain/embeddings/openai";
+import { readEmbeddingEngine } from "../../engine";
+
+export class Retrieval {
+ public chunkSize: number = 100;
+ public chunkOverlap?: number = 0;
+ private retriever: any;
+
+ private embeddingModel?: OpenAIEmbeddings = undefined;
+ private textSplitter?: RecursiveCharacterTextSplitter;
+
+ constructor(chunkSize: number = 4000, chunkOverlap: number = 200) {
+ this.updateTextSplitter(chunkSize, chunkOverlap);
+ }
+
+ public updateTextSplitter(chunkSize: number, chunkOverlap: number): void {
+ this.chunkSize = chunkSize;
+ this.chunkOverlap = chunkOverlap;
+ this.textSplitter = new RecursiveCharacterTextSplitter({
+ chunkSize: chunkSize,
+ chunkOverlap: chunkOverlap,
+ });
+ }
+
+ public updateEmbeddingEngine(engine: string): void {
+ // Engine settings are not compatible with the current embedding model params
+ // Switch case manually for now
+ const settings = readEmbeddingEngine(engine);
+ if (engine === "nitro") {
+ this.embeddingModel = new OpenAIEmbeddings(
+ { openAIApiKey: "nitro-embedding" },
+ { basePath: "http://127.0.0.1:3928/v1" }
+ );
+ } else {
+ // Fallback to OpenAI Settings
+ this.embeddingModel = new OpenAIEmbeddings({
+ configuration: {
+ apiKey: settings.api_key,
+ },
+ });
+ }
+ }
+
+ public ingestAgentKnowledge = async (
+ filePath: string,
+ memoryPath: string
+ ): Promise => {
+ const loader = new PDFLoader(filePath, {
+ splitPages: true,
+ });
+ if (!this.embeddingModel) return Promise.reject();
+ const doc = await loader.load();
+ const docs = await this.textSplitter!.splitDocuments(doc);
+ const vectorStore = await HNSWLib.fromDocuments(docs, this.embeddingModel);
+ return vectorStore.save(memoryPath);
+ };
+
+ public loadRetrievalAgent = async (memoryPath: string): Promise => {
+ if (!this.embeddingModel) return Promise.reject();
+ const vectorStore = await HNSWLib.load(memoryPath, this.embeddingModel);
+ this.retriever = vectorStore.asRetriever(2);
+ return Promise.resolve();
+ };
+
+ public generateResult = async (query: string): Promise => {
+ if (!this.retriever) {
+ return Promise.resolve(" ");
+ }
+ const relevantDocs = await this.retriever.getRelevantDocuments(query);
+ const serializedDoc = formatDocumentsAsString(relevantDocs);
+ return Promise.resolve(serializedDoc);
+ };
+}
diff --git a/extensions/assistant-extension/tsconfig.json b/extensions/assistant-extension/tsconfig.json
index 2477d58ce..d3794cace 100644
--- a/extensions/assistant-extension/tsconfig.json
+++ b/extensions/assistant-extension/tsconfig.json
@@ -1,14 +1,20 @@
{
"compilerOptions": {
- "target": "es2016",
- "module": "ES6",
"moduleResolution": "node",
- "outDir": "./dist",
- "esModuleInterop": true,
- "forceConsistentCasingInFileNames": true,
- "strict": false,
+ "target": "es5",
+ "module": "ES2020",
+ "lib": ["es2015", "es2016", "es2017", "dom"],
+ "strict": true,
+ "sourceMap": true,
+ "declaration": true,
+ "allowSyntheticDefaultImports": true,
+ "experimentalDecorators": true,
+ "emitDecoratorMetadata": true,
+ "declarationDir": "dist/types",
+ "outDir": "dist",
+ "importHelpers": true,
+ "typeRoots": ["node_modules/@types"],
"skipLibCheck": true,
- "rootDir": "./src"
},
- "include": ["./src"]
+ "include": ["src"],
}
diff --git a/extensions/assistant-extension/webpack.config.js b/extensions/assistant-extension/webpack.config.js
deleted file mode 100644
index 74d16fc8e..000000000
--- a/extensions/assistant-extension/webpack.config.js
+++ /dev/null
@@ -1,38 +0,0 @@
-const path = require("path");
-const webpack = require("webpack");
-const packageJson = require("./package.json");
-
-module.exports = {
- experiments: { outputModule: true },
- entry: "./src/index.ts", // Adjust the entry point to match your project's main file
- mode: "production",
- module: {
- rules: [
- {
- test: /\.tsx?$/,
- use: "ts-loader",
- exclude: /node_modules/,
- },
- ],
- },
- output: {
- filename: "index.js", // Adjust the output file name as needed
- path: path.resolve(__dirname, "dist"),
- library: { type: "module" }, // Specify ESM output format
- },
- plugins: [
- new webpack.DefinePlugin({
- MODULE: JSON.stringify(`${packageJson.name}/${packageJson.module}`),
- }),
- ],
- resolve: {
- extensions: [".ts", ".js"],
- fallback: {
- path: require.resolve("path-browserify"),
- },
- },
- optimization: {
- minimize: false,
- },
- // Add loaders and other configuration as needed for your project
-};
diff --git a/extensions/conversational-extension/src/index.ts b/extensions/conversational-extension/src/index.ts
index 66becb748..3d28a9c1d 100644
--- a/extensions/conversational-extension/src/index.ts
+++ b/extensions/conversational-extension/src/index.ts
@@ -4,15 +4,14 @@ import {
ConversationalExtension,
Thread,
ThreadMessage,
+ events,
} from '@janhq/core'
/**
* JSONConversationalExtension is a ConversationalExtension implementation that provides
* functionality for managing threads.
*/
-export default class JSONConversationalExtension
- extends ConversationalExtension
-{
+export default class JSONConversationalExtension extends ConversationalExtension {
private static readonly _homeDir = 'file://threads'
private static readonly _threadInfoFileName = 'thread.json'
private static readonly _threadMessagesFileName = 'messages.jsonl'
@@ -119,6 +118,33 @@ export default class JSONConversationalExtension
])
if (!(await fs.existsSync(threadDirPath)))
await fs.mkdirSync(threadDirPath)
+
+ if (message.content[0]?.type === 'image') {
+ const filesPath = await joinPath([threadDirPath, 'files'])
+ if (!(await fs.existsSync(filesPath))) await fs.mkdirSync(filesPath)
+
+ const imagePath = await joinPath([filesPath, `${message.id}.png`])
+ const base64 = message.content[0].text.annotations[0]
+ await this.storeImage(base64, imagePath)
+ if ((await fs.existsSync(imagePath)) && message.content?.length) {
+ // Use file path instead of blob
+ message.content[0].text.annotations[0] = `threads/${message.thread_id}/files/${message.id}.png`
+ }
+ }
+
+ if (message.content[0]?.type === 'pdf') {
+ const filesPath = await joinPath([threadDirPath, 'files'])
+ if (!(await fs.existsSync(filesPath))) await fs.mkdirSync(filesPath)
+
+ const filePath = await joinPath([filesPath, `${message.id}.pdf`])
+ const blob = message.content[0].text.annotations[0]
+ await this.storeFile(blob, filePath)
+
+ if ((await fs.existsSync(filePath)) && message.content?.length) {
+ // Use file path instead of blob
+ message.content[0].text.annotations[0] = `threads/${message.thread_id}/files/${message.id}.pdf`
+ }
+ }
await fs.appendFileSync(threadMessagePath, JSON.stringify(message) + '\n')
Promise.resolve()
} catch (err) {
@@ -126,6 +152,25 @@ export default class JSONConversationalExtension
}
}
+ async storeImage(base64: string, filePath: string): Promise {
+ const base64Data = base64.replace(/^data:image\/\w+;base64,/, '')
+
+ try {
+ await fs.writeBlob(filePath, base64Data)
+ } catch (err) {
+ console.error(err)
+ }
+ }
+
+ async storeFile(base64: string, filePath: string): Promise {
+ const base64Data = base64.replace(/^data:application\/pdf;base64,/, '')
+ try {
+ await fs.writeBlob(filePath, base64Data)
+ } catch (err) {
+ console.error(err)
+ }
+ }
+
async writeMessages(
threadId: string,
messages: ThreadMessage[]
@@ -229,7 +274,11 @@ export default class JSONConversationalExtension
const messages: ThreadMessage[] = []
result.forEach((line: string) => {
- messages.push(JSON.parse(line) as ThreadMessage)
+ try {
+ messages.push(JSON.parse(line) as ThreadMessage)
+ } catch (err) {
+ console.error(err)
+ }
})
return messages
} catch (err) {
diff --git a/extensions/inference-nitro-extension/bin/version.txt b/extensions/inference-nitro-extension/bin/version.txt
index f2722b133..c2c0004f0 100644
--- a/extensions/inference-nitro-extension/bin/version.txt
+++ b/extensions/inference-nitro-extension/bin/version.txt
@@ -1 +1 @@
-0.2.12
+0.3.5
diff --git a/extensions/inference-nitro-extension/package.json b/extensions/inference-nitro-extension/package.json
index 9379e194b..8ad516ad9 100644
--- a/extensions/inference-nitro-extension/package.json
+++ b/extensions/inference-nitro-extension/package.json
@@ -35,11 +35,12 @@
"rollup-plugin-sourcemaps": "^0.6.3",
"rollup-plugin-typescript2": "^0.36.0",
"run-script-os": "^1.1.6",
- "typescript": "^5.3.3"
+ "typescript": "^5.2.2"
},
"dependencies": {
"@janhq/core": "file:../../core",
"@rollup/plugin-replace": "^5.0.5",
+ "@types/os-utils": "^0.0.4",
"fetch-retry": "^5.0.6",
"path-browserify": "^1.0.1",
"rxjs": "^7.8.1",
diff --git a/extensions/inference-nitro-extension/src/@types/global.d.ts b/extensions/inference-nitro-extension/src/@types/global.d.ts
index 5fb41f0f8..bc126337f 100644
--- a/extensions/inference-nitro-extension/src/@types/global.d.ts
+++ b/extensions/inference-nitro-extension/src/@types/global.d.ts
@@ -2,22 +2,6 @@ declare const NODE: string;
declare const INFERENCE_URL: string;
declare const TROUBLESHOOTING_URL: string;
-/**
- * The parameters for the initModel function.
- * @property settings - The settings for the machine learning model.
- * @property settings.ctx_len - The context length.
- * @property settings.ngl - The number of generated tokens.
- * @property settings.cont_batching - Whether to use continuous batching.
- * @property settings.embedding - Whether to use embedding.
- */
-interface EngineSettings {
- ctx_len: number;
- ngl: number;
- cpu_threads: number;
- cont_batching: boolean;
- embedding: boolean;
-}
-
/**
* The response from the initModel function.
* @property error - An error message if the model fails to load.
@@ -26,8 +10,3 @@ interface ModelOperationResponse {
error?: any;
modelFile?: string;
}
-
-interface ResourcesInfo {
- numCpuPhysicalCore: number;
- memAvailable: number;
-}
\ No newline at end of file
diff --git a/extensions/inference-nitro-extension/src/index.ts b/extensions/inference-nitro-extension/src/index.ts
index 735383a61..9f1f00263 100644
--- a/extensions/inference-nitro-extension/src/index.ts
+++ b/extensions/inference-nitro-extension/src/index.ts
@@ -24,6 +24,7 @@ import {
MessageEvent,
ModelEvent,
InferenceEvent,
+ ModelSettingParams,
} from "@janhq/core";
import { requestInference } from "./helpers/sse";
import { ulid } from "ulid";
@@ -45,12 +46,12 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
private _currentModel: Model | undefined;
- private _engineSettings: EngineSettings = {
+ private _engineSettings: ModelSettingParams = {
ctx_len: 2048,
ngl: 100,
cpu_threads: 1,
cont_batching: false,
- embedding: false,
+ embedding: true,
};
controller = new AbortController();
@@ -83,19 +84,19 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
// Events subscription
events.on(MessageEvent.OnMessageSent, (data: MessageRequest) =>
- this.onMessageRequest(data)
+ this.onMessageRequest(data),
);
events.on(ModelEvent.OnModelInit, (model: Model) =>
- this.onModelInit(model)
+ this.onModelInit(model),
);
events.on(ModelEvent.OnModelStop, (model: Model) =>
- this.onModelStop(model)
+ this.onModelStop(model),
);
events.on(InferenceEvent.OnInferenceStopped, () =>
- this.onInferenceStopped()
+ this.onInferenceStopped(),
);
// Attempt to fetch nvidia info
@@ -120,7 +121,7 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
} else {
await fs.writeFileSync(
engineFile,
- JSON.stringify(this._engineSettings, null, 2)
+ JSON.stringify(this._engineSettings, null, 2),
);
}
} catch (err) {
@@ -133,6 +134,7 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
const modelFullPath = await joinPath(["models", model.id]);
+ this._currentModel = model;
const nitroInitResult = await executeOnMain(NODE, "runModel", {
modelFullPath,
model,
@@ -143,12 +145,11 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
return;
}
- this._currentModel = model;
events.emit(ModelEvent.OnModelReady, model);
this.getNitroProcesHealthIntervalId = setInterval(
() => this.periodicallyGetNitroHealth(),
- JanInferenceNitroExtension._intervalHealthCheck
+ JanInferenceNitroExtension._intervalHealthCheck,
);
}
@@ -225,6 +226,9 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
*/
private async onMessageRequest(data: MessageRequest) {
if (data.model?.engine !== InferenceEngine.nitro || !this._currentModel) {
+ console.log(
+ `Model is not nitro or no model loaded ${data.model?.engine} ${this._currentModel}`
+ );
return;
}
diff --git a/extensions/inference-nitro-extension/src/node/index.ts b/extensions/inference-nitro-extension/src/node/index.ts
index 0a7a2e33e..7ba90b556 100644
--- a/extensions/inference-nitro-extension/src/node/index.ts
+++ b/extensions/inference-nitro-extension/src/node/index.ts
@@ -3,11 +3,19 @@ import path from "path";
import { ChildProcessWithoutNullStreams, spawn } from "child_process";
import tcpPortUsed from "tcp-port-used";
import fetchRT from "fetch-retry";
-import { log, getJanDataFolderPath } from "@janhq/core/node";
+import {
+ log,
+ getJanDataFolderPath,
+ getSystemResourceInfo,
+} from "@janhq/core/node";
import { getNitroProcessInfo, updateNvidiaInfo } from "./nvidia";
-import { Model, InferenceEngine, ModelSettingParams } from "@janhq/core";
+import {
+ Model,
+ InferenceEngine,
+ ModelSettingParams,
+ PromptTemplate,
+} from "@janhq/core";
import { executableNitroFile } from "./execute";
-import { physicalCpuCount } from "./utils";
// Polyfill fetch with retry
const fetchRetry = fetchRT(fetch);
@@ -19,25 +27,6 @@ interface ModelInitOptions {
modelFullPath: string;
model: Model;
}
-
-/**
- * The response object of Prompt Template parsing.
- */
-interface PromptTemplate {
- system_prompt?: string;
- ai_prompt?: string;
- user_prompt?: string;
- error?: string;
-}
-
-/**
- * Model setting args for Nitro model load.
- */
-interface ModelSettingArgs extends ModelSettingParams {
- llama_model_path: string;
- cpu_threads: number;
-}
-
// The PORT to use for the Nitro subprocess
const PORT = 3928;
// The HOST address to use for the Nitro subprocess
@@ -60,7 +49,7 @@ let subprocess: ChildProcessWithoutNullStreams | undefined = undefined;
// The current model file url
let currentModelFile: string = "";
// The current model settings
-let currentSettings: ModelSettingArgs | undefined = undefined;
+let currentSettings: ModelSettingParams | undefined = undefined;
/**
* Stops a Nitro subprocess.
@@ -78,7 +67,7 @@ function stopModel(): Promise {
* TODO: Should pass absolute of the model file instead of just the name - So we can modurize the module.ts to npm package
*/
async function runModel(
- wrapper: ModelInitOptions
+ wrapper: ModelInitOptions,
): Promise {
if (wrapper.model.engine !== InferenceEngine.nitro) {
// Not a nitro model
@@ -96,7 +85,7 @@ async function runModel(
const ggufBinFile = files.find(
(file) =>
file === path.basename(currentModelFile) ||
- file.toLowerCase().includes(SUPPORTED_MODEL_FORMAT)
+ file.toLowerCase().includes(SUPPORTED_MODEL_FORMAT),
);
if (!ggufBinFile) return Promise.reject("No GGUF model file found");
@@ -106,7 +95,7 @@ async function runModel(
if (wrapper.model.engine !== InferenceEngine.nitro) {
return Promise.reject("Not a nitro model");
} else {
- const nitroResourceProbe = await getResourcesInfo();
+ const nitroResourceProbe = await getSystemResourceInfo();
// Convert settings.prompt_template to system_prompt, user_prompt, ai_prompt
if (wrapper.model.settings.prompt_template) {
const promptTemplate = wrapper.model.settings.prompt_template;
@@ -119,13 +108,20 @@ async function runModel(
wrapper.model.settings.ai_prompt = prompt.ai_prompt;
}
+ const modelFolderPath = path.join(janRoot, "models", wrapper.model.id);
+ const modelPath = wrapper.model.settings.llama_model_path
+ ? path.join(modelFolderPath, wrapper.model.settings.llama_model_path)
+ : currentModelFile;
+
currentSettings = {
- llama_model_path: currentModelFile,
...wrapper.model.settings,
+ llama_model_path: modelPath,
// This is critical and requires real CPU physical core count (or performance core)
cpu_threads: Math.max(1, nitroResourceProbe.numCpuPhysicalCore),
+ ...(wrapper.model.settings.mmproj && {
+ mmproj: path.join(modelFolderPath, wrapper.model.settings.mmproj),
+ }),
};
- console.log(currentSettings);
return runNitroAndLoadModel();
}
}
@@ -184,10 +180,10 @@ function promptTemplateConverter(promptTemplate: string): PromptTemplate {
const system_prompt = promptTemplate.substring(0, systemIndex);
const user_prompt = promptTemplate.substring(
systemIndex + systemMarker.length,
- promptIndex
+ promptIndex,
);
const ai_prompt = promptTemplate.substring(
- promptIndex + promptMarker.length
+ promptIndex + promptMarker.length,
);
// Return the split parts
@@ -197,7 +193,7 @@ function promptTemplateConverter(promptTemplate: string): PromptTemplate {
const promptIndex = promptTemplate.indexOf(promptMarker);
const user_prompt = promptTemplate.substring(0, promptIndex);
const ai_prompt = promptTemplate.substring(
- promptIndex + promptMarker.length
+ promptIndex + promptMarker.length,
);
// Return the split parts
@@ -213,6 +209,9 @@ function promptTemplateConverter(promptTemplate: string): PromptTemplate {
* @returns A Promise that resolves when the model is loaded successfully, or rejects with an error message if the model is not found or fails to load.
*/
function loadLLMModel(settings: any): Promise {
+ if (!settings?.ngl) {
+ settings.ngl = 100;
+ }
log(`[NITRO]::Debug: Loading model with params ${JSON.stringify(settings)}`);
return fetchRetry(NITRO_HTTP_LOAD_MODEL_URL, {
method: "POST",
@@ -226,14 +225,14 @@ function loadLLMModel(settings: any): Promise {
.then((res) => {
log(
`[NITRO]::Debug: Load model success with response ${JSON.stringify(
- res
- )}`
+ res,
+ )}`,
);
return Promise.resolve(res);
})
.catch((err) => {
log(`[NITRO]::Error: Load model failed with error ${err}`);
- return Promise.reject();
+ return Promise.reject(err);
});
}
@@ -255,8 +254,8 @@ async function validateModelStatus(): Promise {
retryDelay: 500,
}).then(async (res: Response) => {
log(
- `[NITRO]::Debug: Validate model state success with response ${JSON.stringify(
- res
+ `[NITRO]::Debug: Validate model state with response ${JSON.stringify(
+ res.status
)}`
);
// If the response is OK, check model_loaded status.
@@ -265,9 +264,19 @@ async function validateModelStatus(): Promise {
// If the model is loaded, return an empty object.
// Otherwise, return an object with an error message.
if (body.model_loaded) {
+ log(
+ `[NITRO]::Debug: Validate model state success with response ${JSON.stringify(
+ body
+ )}`
+ );
return Promise.resolve();
}
}
+ log(
+ `[NITRO]::Debug: Validate model state failed with response ${JSON.stringify(
+ res.statusText
+ )}`
+ );
return Promise.reject("Validate model status failed");
});
}
@@ -308,7 +317,7 @@ function spawnNitroProcess(): Promise {
const args: string[] = ["1", LOCAL_HOST, PORT.toString()];
// Execute the binary
log(
- `[NITRO]::Debug: Spawn nitro at path: ${executableOptions.executablePath}, and args: ${args}`
+ `[NITRO]::Debug: Spawn nitro at path: ${executableOptions.executablePath}, and args: ${args}`,
);
subprocess = spawn(
executableOptions.executablePath,
@@ -319,7 +328,7 @@ function spawnNitroProcess(): Promise {
...process.env,
CUDA_VISIBLE_DEVICES: executableOptions.cudaVisibleDevices,
},
- }
+ },
);
// Handle subprocess output
@@ -344,22 +353,6 @@ function spawnNitroProcess(): Promise {
});
}
-/**
- * Get the system resources information
- * TODO: Move to Core so that it can be reused
- */
-function getResourcesInfo(): Promise {
- return new Promise(async (resolve) => {
- const cpu = await physicalCpuCount();
- log(`[NITRO]::CPU informations - ${cpu}`);
- const response: ResourcesInfo = {
- numCpuPhysicalCore: cpu,
- memAvailable: 0,
- };
- resolve(response);
- });
-}
-
/**
* Every module should have a dispose function
* This will be called when the extension is unloaded and should clean up any resources
diff --git a/extensions/inference-nitro-extension/src/node/utils.ts b/extensions/inference-nitro-extension/src/node/utils.ts
deleted file mode 100644
index c7ef2e9a6..000000000
--- a/extensions/inference-nitro-extension/src/node/utils.ts
+++ /dev/null
@@ -1,56 +0,0 @@
-import os from "os";
-import childProcess from "child_process";
-
-function exec(command: string): Promise {
- return new Promise((resolve, reject) => {
- childProcess.exec(command, { encoding: "utf8" }, (error, stdout) => {
- if (error) {
- reject(error);
- } else {
- resolve(stdout);
- }
- });
- });
-}
-
-let amount: number;
-const platform = os.platform();
-
-export async function physicalCpuCount(): Promise {
- return new Promise((resolve, reject) => {
- if (platform === "linux") {
- exec('lscpu -p | egrep -v "^#" | sort -u -t, -k 2,4 | wc -l')
- .then((output) => {
- amount = parseInt(output.trim(), 10);
- resolve(amount);
- })
- .catch(reject);
- } else if (platform === "darwin") {
- exec("sysctl -n hw.physicalcpu_max")
- .then((output) => {
- amount = parseInt(output.trim(), 10);
- resolve(amount);
- })
- .catch(reject);
- } else if (platform === "win32") {
- exec("WMIC CPU Get NumberOfCores")
- .then((output) => {
- amount = output
- .split(os.EOL)
- .map((line: string) => parseInt(line))
- .filter((value: number) => !isNaN(value))
- .reduce((sum: number, number: number) => sum + number, 1);
- resolve(amount);
- })
- .catch(reject);
- } else {
- const cores = os.cpus().filter((cpu: any, index: number) => {
- const hasHyperthreading = cpu.model.includes("Intel");
- const isOdd = index % 2 === 1;
- return !hasHyperthreading || isOdd;
- });
- amount = cores.length;
- resolve(amount);
- }
- });
-}
diff --git a/extensions/inference-openai-extension/src/index.ts b/extensions/inference-openai-extension/src/index.ts
index 0b53d7c21..fd1230bc7 100644
--- a/extensions/inference-openai-extension/src/index.ts
+++ b/extensions/inference-openai-extension/src/index.ts
@@ -15,6 +15,7 @@ import {
ThreadMessage,
events,
fs,
+ InferenceEngine,
BaseExtension,
MessageEvent,
ModelEvent,
@@ -114,7 +115,7 @@ export default class JanInferenceOpenAIExtension extends BaseExtension {
}
}
private static async handleModelInit(model: OpenAIModel) {
- if (model.engine !== "openai") {
+ if (model.engine !== InferenceEngine.openai) {
return;
} else {
JanInferenceOpenAIExtension._currentModel = model;
diff --git a/extensions/inference-openai-extension/tsconfig.json b/extensions/inference-openai-extension/tsconfig.json
index b48175a16..7bfdd9009 100644
--- a/extensions/inference-openai-extension/tsconfig.json
+++ b/extensions/inference-openai-extension/tsconfig.json
@@ -3,13 +3,12 @@
"target": "es2016",
"module": "ES6",
"moduleResolution": "node",
-
"outDir": "./dist",
"esModuleInterop": true,
"forceConsistentCasingInFileNames": true,
"strict": false,
"skipLibCheck": true,
- "rootDir": "./src"
+ "rootDir": "./src",
},
- "include": ["./src"]
+ "include": ["./src"],
}
diff --git a/extensions/inference-triton-trtllm-extension/tsconfig.json b/extensions/inference-triton-trtllm-extension/tsconfig.json
index b48175a16..7bfdd9009 100644
--- a/extensions/inference-triton-trtllm-extension/tsconfig.json
+++ b/extensions/inference-triton-trtllm-extension/tsconfig.json
@@ -3,13 +3,12 @@
"target": "es2016",
"module": "ES6",
"moduleResolution": "node",
-
"outDir": "./dist",
"esModuleInterop": true,
"forceConsistentCasingInFileNames": true,
"strict": false,
"skipLibCheck": true,
- "rootDir": "./src"
+ "rootDir": "./src",
},
- "include": ["./src"]
+ "include": ["./src"],
}
diff --git a/extensions/model-extension/package.json b/extensions/model-extension/package.json
index 376e724b4..86f177d14 100644
--- a/extensions/model-extension/package.json
+++ b/extensions/model-extension/package.json
@@ -1,6 +1,6 @@
{
"name": "@janhq/model-extension",
- "version": "1.0.22",
+ "version": "1.0.23",
"description": "Model Management Extension provides model exploration and seamless downloads",
"main": "dist/index.js",
"module": "dist/module.js",
diff --git a/extensions/model-extension/src/index.ts b/extensions/model-extension/src/index.ts
index f41999bd8..5640177a0 100644
--- a/extensions/model-extension/src/index.ts
+++ b/extensions/model-extension/src/index.ts
@@ -80,16 +80,34 @@ export default class JanModelExtension extends ModelExtension {
const modelDirPath = await joinPath([JanModelExtension._homeDir, model.id])
if (!(await fs.existsSync(modelDirPath))) await fs.mkdirSync(modelDirPath)
- // try to retrieve the download file name from the source url
- // if it fails, use the model ID as the file name
- const extractedFileName = await model.source_url.split('/').pop()
+ if (model.sources.length > 1) {
+ // path to model binaries
+ for (const source of model.sources) {
+ let path = this.extractFileName(source.url)
+ if (source.filename) {
+ path = await joinPath([modelDirPath, source.filename])
+ }
+
+ downloadFile(source.url, path, network)
+ }
+ } else {
+ const fileName = this.extractFileName(model.sources[0]?.url)
+ const path = await joinPath([modelDirPath, fileName])
+ downloadFile(model.sources[0]?.url, path, network)
+ }
+ }
+
+ /**
+ * try to retrieve the download file name from the source url
+ */
+ private extractFileName(url: string): string {
+ const extractedFileName = url.split('/').pop()
const fileName = extractedFileName
.toLowerCase()
.endsWith(JanModelExtension._supportedModelFormat)
? extractedFileName
- : model.id
- const path = await joinPath([modelDirPath, fileName])
- downloadFile(model.source_url, path, network)
+ : extractedFileName + JanModelExtension._supportedModelFormat
+ return fileName
}
/**
@@ -98,6 +116,7 @@ export default class JanModelExtension extends ModelExtension {
* @returns {Promise} A promise that resolves when the download has been cancelled.
*/
async cancelModelDownload(modelId: string): Promise {
+ const model = await this.getConfiguredModels()
return abortDownload(
await joinPath([JanModelExtension._homeDir, modelId, modelId])
).then(async () => {
@@ -163,15 +182,16 @@ export default class JanModelExtension extends ModelExtension {
.then((files: string[]) => {
// or model binary exists in the directory
// model binary name can match model ID or be a .gguf file and not be an incompleted model file
+ // TODO: Check diff between urls, filenames
return (
files.includes(modelDir) ||
- files.some(
+ files.filter(
(file) =>
file
.toLowerCase()
.includes(JanModelExtension._supportedModelFormat) &&
!file.endsWith(JanModelExtension._incompletedModelFileName)
- )
+ )?.length >= model.sources.length
)
})
}
@@ -198,7 +218,6 @@ export default class JanModelExtension extends ModelExtension {
const readJsonPromises = allDirectories.map(async (dirName) => {
// filter out directories that don't match the selector
-
// read model.json
const jsonPath = await joinPath([
JanModelExtension._homeDir,
@@ -226,7 +245,21 @@ export default class JanModelExtension extends ModelExtension {
const modelData = results.map((result) => {
if (result.status === 'fulfilled') {
try {
- return result.value as Model
+ // This to ensure backward compatibility with `model.json` with `source_url`
+ const tmpModel =
+ typeof result.value === 'object'
+ ? result.value
+ : JSON.parse(result.value)
+ if (tmpModel['source_url'] != null) {
+ tmpModel['source'] = [
+ {
+ filename: tmpModel.id,
+ url: tmpModel['source_url'],
+ },
+ ]
+ }
+
+ return tmpModel as Model
} catch {
console.debug(`Unable to parse model metadata: ${result.value}`)
return undefined
diff --git a/extensions/monitoring-extension/src/index.ts b/extensions/monitoring-extension/src/index.ts
index d3f20b437..9297a770f 100644
--- a/extensions/monitoring-extension/src/index.ts
+++ b/extensions/monitoring-extension/src/index.ts
@@ -1,5 +1,4 @@
-import { MonitoringExtension } from "@janhq/core";
-import { executeOnMain } from "@janhq/core";
+import { MonitoringExtension, executeOnMain } from "@janhq/core";
/**
* JanMonitoringExtension is a extension that provides system monitoring functionality.
diff --git a/models/bakllava-1/model.json b/models/bakllava-1/model.json
new file mode 100644
index 000000000..91f6f4136
--- /dev/null
+++ b/models/bakllava-1/model.json
@@ -0,0 +1,33 @@
+{
+ "sources": [
+ {
+ "filename": "ggml-model-q5_k.gguf",
+ "url": "https://huggingface.co/mys/ggml_bakllava-1/resolve/main/ggml-model-q5_k.gguf"
+ },
+ {
+ "filename": "mmproj-model-f16.gguf",
+ "url": "https://huggingface.co/mys/ggml_bakllava-1/resolve/main/mmproj-model-f16.gguf"
+ }
+ ],
+ "id": "bakllava-1",
+ "object": "model",
+ "name": "BakLlava 1",
+ "version": "1.0",
+ "description": "BakLlava 1 can bring vision understanding to Jan",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "\n### Instruction:\n{prompt}\n### Response:\n",
+ "llama_model_path": "ggml-model-q5_k.gguf",
+ "mmproj": "mmproj-model-f16.gguf"
+ },
+ "parameters": {
+ "max_tokens": 4096
+ },
+ "metadata": {
+ "author": "Mys",
+ "tags": ["Vision"],
+ "size": 5750000000
+ },
+ "engine": "nitro"
+}
diff --git a/models/capybara-34b/model.json b/models/capybara-34b/model.json
index ffca28c6d..af029bbb0 100644
--- a/models/capybara-34b/model.json
+++ b/models/capybara-34b/model.json
@@ -1,29 +1,34 @@
{
- "source_url": "https://huggingface.co/TheBloke/Nous-Capybara-34B-GGUF/resolve/main/nous-capybara-34b.Q5_K_M.gguf",
- "id": "capybara-34b",
- "object": "model",
- "name": "Capybara 200k 34B Q5",
- "version": "1.0",
- "description": "Nous Capybara 34B is a long context length model that supports 200K tokens.",
- "format": "gguf",
- "settings": {
- "ctx_len": 4096,
- "prompt_template": "USER:\n{prompt}\nASSISTANT:"
- },
- "parameters": {
- "temperature": 0.7,
- "top_p": 0.95,
- "stream": true,
- "max_tokens": 4096,
- "stop": [],
- "frequency_penalty": 0,
- "presence_penalty": 0
- },
- "metadata": {
- "author": "NousResearch, The Bloke",
- "tags": ["34B", "Finetuned"],
- "size": 24320000000
- },
- "engine": "nitro"
- }
-
\ No newline at end of file
+ "sources": [
+ {
+ "filename": "nous-capybara-34b.Q5_K_M.gguf",
+ "url": "https://huggingface.co/TheBloke/Nous-Capybara-34B-GGUF/resolve/main/nous-capybara-34b.Q5_K_M.gguf"
+ }
+ ],
+ "id": "capybara-34b",
+ "object": "model",
+ "name": "Capybara 200k 34B Q5",
+ "version": "1.0",
+ "description": "Nous Capybara 34B is a long context length model that supports 200K tokens.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "USER:\n{prompt}\nASSISTANT:",
+ "llama_model_path": "nous-capybara-34b.Q5_K_M.gguf"
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 4096,
+ "stop": [],
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "NousResearch, The Bloke",
+ "tags": ["34B", "Finetuned"],
+ "size": 24320000000
+ },
+ "engine": "nitro"
+}
diff --git a/models/codeninja-1.0-7b/model.json b/models/codeninja-1.0-7b/model.json
index 98fbac5df..4ffe355d1 100644
--- a/models/codeninja-1.0-7b/model.json
+++ b/models/codeninja-1.0-7b/model.json
@@ -1,29 +1,33 @@
{
- "source_url": "https://huggingface.co/beowolx/CodeNinja-1.0-OpenChat-7B-GGUF/resolve/main/codeninja-1.0-openchat-7b.Q4_K_M.gguf",
- "id": "codeninja-1.0-7b",
- "object": "model",
- "name": "CodeNinja 7B Q4",
- "version": "1.0",
- "description": "CodeNinja is good for coding tasks and can handle various languages including Python, C, C++, Rust, Java, JavaScript, and more.",
- "format": "gguf",
- "settings": {
- "ctx_len": 4096,
- "prompt_template": "GPT4 Correct User: {prompt}<|end_of_turn|>GPT4 Correct Assistant:"
- },
- "parameters": {
- "temperature": 0.7,
- "top_p": 0.95,
- "stream": true,
- "max_tokens": 4096,
- "stop": ["<|end_of_turn|>"],
- "frequency_penalty": 0,
- "presence_penalty": 0
- },
- "metadata": {
- "author": "Beowolx",
- "tags": ["7B", "Finetuned"],
- "size": 4370000000
- },
- "engine": "nitro"
- }
-
\ No newline at end of file
+ "sources": [
+ {
+ "filename": "codeninja-1.0-openchat-7b.Q4_K_M.gguf",
+ "url": "https://huggingface.co/beowolx/CodeNinja-1.0-OpenChat-7B-GGUF/resolve/main/codeninja-1.0-openchat-7b.Q4_K_M.gguf"
+ }
+ ],
+ "id": "codeninja-1.0-7b",
+ "object": "model",
+ "name": "CodeNinja 7B Q4",
+ "version": "1.0",
+ "description": "CodeNinja is good for coding tasks and can handle various languages including Python, C, C++, Rust, Java, JavaScript, and more.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "GPT4 Correct User: {prompt}<|end_of_turn|>GPT4 Correct Assistant:",
+ "llama_model_path": "codeninja-1.0-openchat-7b.Q4_K_M.gguf"
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 4096,
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "Beowolx",
+ "tags": ["7B", "Finetuned"],
+ "size": 4370000000
+ },
+ "engine": "nitro"
+}
diff --git a/models/config/default-model.json b/models/config/default-model.json
index 2263625f4..2fa2df2ee 100644
--- a/models/config/default-model.json
+++ b/models/config/default-model.json
@@ -2,7 +2,12 @@
"object": "model",
"version": 1,
"format": "gguf",
- "source_url": "N/A",
+ "sources": [
+ {
+ "url": "N/A",
+ "filename": "N/A"
+ }
+ ],
"id": "N/A",
"name": "N/A",
"created": 0,
@@ -10,7 +15,8 @@
"settings": {
"ctx_len": 4096,
"embedding": false,
- "prompt_template": "{system_message}\n### Instruction: {prompt}\n### Response:"
+ "prompt_template": "{system_message}\n### Instruction: {prompt}\n### Response:",
+ "llama_model_path": "N/A"
},
"parameters": {
"temperature": 0.7,
diff --git a/models/deepseek-coder-1.3b/model.json b/models/deepseek-coder-1.3b/model.json
index 8c454802f..365dbfd2f 100644
--- a/models/deepseek-coder-1.3b/model.json
+++ b/models/deepseek-coder-1.3b/model.json
@@ -1,29 +1,34 @@
-
{
- "source_url": "https://huggingface.co/TheBloke/deepseek-coder-1.3b-instruct-GGUF/resolve/main/deepseek-coder-1.3b-instruct.Q8_0.gguf",
- "id": "deepseek-coder-1.3b",
- "object": "model",
- "name": "Deepseek Coder 1.3B Q8",
- "version": "1.0",
- "description": "Deepseek Coder excelled in project-level code completion with advanced capabilities across multiple programming languages.",
- "format": "gguf",
- "settings": {
- "ctx_len": 4096,
- "prompt_template": "### Instruction:\n{prompt}\n### Response:"
- },
- "parameters": {
- "temperature": 0.7,
- "top_p": 0.95,
- "stream": true,
- "max_tokens": 4096,
- "stop": [],
- "frequency_penalty": 0,
- "presence_penalty": 0
- },
- "metadata": {
- "author": "Deepseek, The Bloke",
- "tags": ["Tiny", "Foundational Model"],
- "size": 1430000000
- },
- "engine": "nitro"
- }
+ "sources": [
+ {
+ "filename": "deepseek-coder-1.3b-instruct.Q8_0.gguf",
+ "url": "https://huggingface.co/TheBloke/deepseek-coder-1.3b-instruct-GGUF/resolve/main/deepseek-coder-1.3b-instruct.Q8_0.gguf"
+ }
+ ],
+ "id": "deepseek-coder-1.3b",
+ "object": "model",
+ "name": "Deepseek Coder 1.3B Q8",
+ "version": "1.0",
+ "description": "Deepseek Coder excelled in project-level code completion with advanced capabilities across multiple programming languages.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "### Instruction:\n{prompt}\n### Response:",
+ "llama_model_path": "deepseek-coder-1.3b-instruct.Q8_0.gguf"
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 4096,
+ "stop": [],
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "Deepseek, The Bloke",
+ "tags": ["Tiny", "Foundational Model"],
+ "size": 1430000000
+ },
+ "engine": "nitro"
+}
diff --git a/models/deepseek-coder-34b/model.json b/models/deepseek-coder-34b/model.json
index 905a66033..8e17b9563 100644
--- a/models/deepseek-coder-34b/model.json
+++ b/models/deepseek-coder-34b/model.json
@@ -1,29 +1,34 @@
{
- "source_url": "https://huggingface.co/TheBloke/deepseek-coder-33B-instruct-GGUF/resolve/main/deepseek-coder-33b-instruct.Q5_K_M.gguf",
- "id": "deepseek-coder-34b",
- "object": "model",
- "name": "Deepseek Coder 33B Q5",
- "version": "1.0",
- "description": "Deepseek Coder excelled in project-level code completion with advanced capabilities across multiple programming languages.",
- "format": "gguf",
- "settings": {
- "ctx_len": 4096,
- "prompt_template": "### Instruction:\n{prompt}\n### Response:"
- },
- "parameters": {
- "temperature": 0.7,
- "top_p": 0.95,
- "stream": true,
- "max_tokens": 4096,
- "stop": [],
- "frequency_penalty": 0,
- "presence_penalty": 0
- },
- "metadata": {
- "author": "Deepseek, The Bloke",
- "tags": ["34B", "Foundational Model"],
- "size": 19940000000
- },
- "engine": "nitro"
- }
-
\ No newline at end of file
+ "sources": [
+ {
+ "filename": "deepseek-coder-33b-instruct.Q5_K_M.gguf",
+ "url": "https://huggingface.co/TheBloke/deepseek-coder-33B-instruct-GGUF/resolve/main/deepseek-coder-33b-instruct.Q5_K_M.gguf"
+ }
+ ],
+ "id": "deepseek-coder-34b",
+ "object": "model",
+ "name": "Deepseek Coder 33B Q5",
+ "version": "1.0",
+ "description": "Deepseek Coder excelled in project-level code completion with advanced capabilities across multiple programming languages.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "### Instruction:\n{prompt}\n### Response:",
+ "llama_model_path": "deepseek-coder-33b-instruct.Q5_K_M.gguf"
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 4096,
+ "stop": [],
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "Deepseek, The Bloke",
+ "tags": ["34B", "Foundational Model"],
+ "size": 19940000000
+ },
+ "engine": "nitro"
+}
diff --git a/models/dolphin-2.7-mixtral-8x7b/model.json b/models/dolphin-2.7-mixtral-8x7b/model.json
index 67bc6737c..7df4fbfbd 100644
--- a/models/dolphin-2.7-mixtral-8x7b/model.json
+++ b/models/dolphin-2.7-mixtral-8x7b/model.json
@@ -1,28 +1,34 @@
{
- "source_url": "https://huggingface.co/TheBloke/dolphin-2.7-mixtral-8x7b-GGUF/resolve/main/dolphin-2.7-mixtral-8x7b.Q4_K_M.gguf",
- "id": "dolphin-2.7-mixtral-8x7b",
- "object": "model",
- "name": "Dolphin 8x7B Q4",
- "version": "1.0",
- "description": "Dolphin is an uncensored model built on Mixtral-8x7b. It is good at programming tasks.",
- "format": "gguf",
- "settings": {
- "ctx_len": 4096,
- "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant"
- },
- "parameters": {
- "temperature": 0.7,
- "top_p": 0.95,
- "stream": true,
- "max_tokens": 4096,
- "stop": [],
- "frequency_penalty": 0,
- "presence_penalty": 0
- },
- "metadata": {
- "author": "Cognitive Computations, TheBloke",
- "tags": ["70B", "Finetuned"],
- "size": 26440000000
- },
- "engine": "nitro"
- }
+ "sources": [
+ {
+ "filename": "dolphin-2.7-mixtral-8x7b.Q4_K_M.gguf",
+ "url": "https://huggingface.co/TheBloke/dolphin-2.7-mixtral-8x7b-GGUF/resolve/main/dolphin-2.7-mixtral-8x7b.Q4_K_M.gguf"
+ }
+ ],
+ "id": "dolphin-2.7-mixtral-8x7b",
+ "object": "model",
+ "name": "Dolphin 8x7B Q4",
+ "version": "1.0",
+ "description": "Dolphin is an uncensored model built on Mixtral-8x7b. It is good at programming tasks.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
+ "llama_model_path": "dolphin-2.7-mixtral-8x7b.Q4_K_M.gguf"
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 4096,
+ "stop": [],
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "Cognitive Computations, TheBloke",
+ "tags": ["70B", "Finetuned"],
+ "size": 26440000000
+ },
+ "engine": "nitro"
+}
diff --git a/models/gpt-3.5-turbo-16k-0613/model.json b/models/gpt-3.5-turbo-16k-0613/model.json
index 0df4119a5..aa57e1154 100644
--- a/models/gpt-3.5-turbo-16k-0613/model.json
+++ b/models/gpt-3.5-turbo-16k-0613/model.json
@@ -1,18 +1,20 @@
{
- "source_url": "https://openai.com",
- "id": "gpt-3.5-turbo-16k-0613",
- "object": "model",
- "name": "OpenAI GPT 3.5 Turbo 16k 0613",
- "version": "1.0",
- "description": "OpenAI GPT 3.5 Turbo 16k 0613 model is extremely good",
- "format": "api",
- "settings": {},
- "parameters": {},
- "metadata": {
- "author": "OpenAI",
- "tags": ["General", "Big Context Length"]
- },
- "engine": "openai",
- "state": "ready"
+ "sources": [
+ {
+ "url": "https://openai.com"
+ }
+ ],
+ "id": "gpt-3.5-turbo-16k-0613",
+ "object": "model",
+ "name": "OpenAI GPT 3.5 Turbo 16k 0613",
+ "version": "1.0",
+ "description": "OpenAI GPT 3.5 Turbo 16k 0613 model is extremely good",
+ "format": "api",
+ "settings": {},
+ "parameters": {},
+ "metadata": {
+ "author": "OpenAI",
+ "tags": ["General", "Big Context Length"]
+ },
+ "engine": "openai"
}
-
\ No newline at end of file
diff --git a/models/gpt-3.5-turbo/model.json b/models/gpt-3.5-turbo/model.json
index a7dbf3d4e..617f0d056 100644
--- a/models/gpt-3.5-turbo/model.json
+++ b/models/gpt-3.5-turbo/model.json
@@ -1,18 +1,20 @@
{
- "source_url": "https://openai.com",
- "id": "gpt-3.5-turbo",
- "object": "model",
- "name": "OpenAI GPT 3.5 Turbo",
- "version": "1.0",
- "description": "OpenAI GPT 3.5 Turbo model is extremely good",
- "format": "api",
- "settings": {},
- "parameters": {},
- "metadata": {
- "author": "OpenAI",
- "tags": ["General", "Big Context Length"]
- },
- "engine": "openai",
- "state": "ready"
+ "sources": [
+ {
+ "url": "https://openai.com"
+ }
+ ],
+ "id": "gpt-3.5-turbo",
+ "object": "model",
+ "name": "OpenAI GPT 3.5 Turbo",
+ "version": "1.0",
+ "description": "OpenAI GPT 3.5 Turbo model is extremely good",
+ "format": "api",
+ "settings": {},
+ "parameters": {},
+ "metadata": {
+ "author": "OpenAI",
+ "tags": ["General", "Big Context Length"]
+ },
+ "engine": "openai"
}
-
\ No newline at end of file
diff --git a/models/gpt-4/model.json b/models/gpt-4/model.json
index 3f17e65cb..7aa2338e3 100644
--- a/models/gpt-4/model.json
+++ b/models/gpt-4/model.json
@@ -1,18 +1,20 @@
{
- "source_url": "https://openai.com",
- "id": "gpt-4",
- "object": "model",
- "name": "OpenAI GPT 4",
- "version": "1.0",
- "description": "OpenAI GPT 4 model is extremely good",
- "format": "api",
- "settings": {},
- "parameters": {},
- "metadata": {
- "author": "OpenAI",
- "tags": ["General", "Big Context Length"]
- },
- "engine": "openai",
- "state": "ready"
+ "sources": [
+ {
+ "url": "https://openai.com"
+ }
+ ],
+ "id": "gpt-4",
+ "object": "model",
+ "name": "OpenAI GPT 4",
+ "version": "1.0",
+ "description": "OpenAI GPT 4 model is extremely good",
+ "format": "api",
+ "settings": {},
+ "parameters": {},
+ "metadata": {
+ "author": "OpenAI",
+ "tags": ["General", "Big Context Length"]
+ },
+ "engine": "openai"
}
-
\ No newline at end of file
diff --git a/models/llama2-chat-70b-q4/model.json b/models/llama2-chat-70b-q4/model.json
index 2595ab677..2459e426f 100644
--- a/models/llama2-chat-70b-q4/model.json
+++ b/models/llama2-chat-70b-q4/model.json
@@ -1,29 +1,34 @@
{
- "source_url": "https://huggingface.co/TheBloke/Llama-2-70B-Chat-GGUF/resolve/main/llama-2-70b-chat.Q4_K_M.gguf",
- "id": "llama2-chat-70b-q4",
- "object": "model",
- "name": "Llama 2 Chat 70B Q4",
- "version": "1.0",
- "description": "Llama 2 Chat 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.",
- "format": "gguf",
- "settings": {
- "ctx_len": 4096,
- "prompt_template": "[INST] <>\n{system_message}< >\n{prompt}[/INST]"
- },
- "parameters": {
- "temperature": 0.7,
- "top_p": 0.95,
- "stream": true,
- "max_tokens": 4096,
- "stop": [],
- "frequency_penalty": 0,
- "presence_penalty": 0
- },
- "metadata": {
- "author": "MetaAI, The Bloke",
- "tags": ["70B", "Foundational Model"],
- "size": 43920000000
- },
- "engine": "nitro"
- }
-
\ No newline at end of file
+ "sources": [
+ {
+ "filename": "llama-2-70b-chat.Q4_K_M.gguf",
+ "url": "https://huggingface.co/TheBloke/Llama-2-70B-Chat-GGUF/resolve/main/llama-2-70b-chat.Q4_K_M.gguf"
+ }
+ ],
+ "id": "llama2-chat-70b-q4",
+ "object": "model",
+ "name": "Llama 2 Chat 70B Q4",
+ "version": "1.0",
+ "description": "Llama 2 Chat 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "[INST] <>\n{system_message}< >\n{prompt}[/INST]",
+ "llama_model_path": "llama-2-70b-chat.Q4_K_M.gguf"
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 4096,
+ "stop": [],
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "MetaAI, The Bloke",
+ "tags": ["70B", "Foundational Model"],
+ "size": 43920000000
+ },
+ "engine": "nitro"
+}
diff --git a/models/llama2-chat-7b-q4/model.json b/models/llama2-chat-7b-q4/model.json
index 68eab3790..bc9847eef 100644
--- a/models/llama2-chat-7b-q4/model.json
+++ b/models/llama2-chat-7b-q4/model.json
@@ -1,29 +1,34 @@
{
- "source_url": "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/resolve/main/llama-2-7b-chat.Q4_K_M.gguf",
- "id": "llama2-chat-7b-q4",
- "object": "model",
- "name": "Llama 2 Chat 7B Q4",
- "version": "1.0",
- "description": "Llama 2 Chat 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.",
- "format": "gguf",
- "settings": {
- "ctx_len": 4096,
- "prompt_template": "[INST] <>\n{system_message}< >\n{prompt}[/INST]"
- },
- "parameters": {
- "temperature": 0.7,
- "top_p": 0.95,
- "stream": true,
- "max_tokens": 4096,
- "stop": [],
- "frequency_penalty": 0,
- "presence_penalty": 0
- },
- "metadata": {
- "author": "MetaAI, The Bloke",
- "tags": ["7B", "Foundational Model"],
- "size": 4080000000
- },
- "engine": "nitro"
- }
-
\ No newline at end of file
+ "sources": [
+ {
+ "filename": "llama-2-7b-chat.Q4_K_M.gguf",
+ "url": "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/resolve/main/llama-2-7b-chat.Q4_K_M.gguf"
+ }
+ ],
+ "id": "llama2-chat-7b-q4",
+ "object": "model",
+ "name": "Llama 2 Chat 7B Q4",
+ "version": "1.0",
+ "description": "Llama 2 Chat 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "[INST] <>\n{system_message}< >\n{prompt}[/INST]",
+ "llama_model_path": "llama-2-7b-chat.Q4_K_M.gguf"
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 4096,
+ "stop": [],
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "MetaAI, The Bloke",
+ "tags": ["7B", "Foundational Model"],
+ "size": 4080000000
+ },
+ "engine": "nitro"
+}
diff --git a/models/llava-1.5-13b-q5/model.json b/models/llava-1.5-13b-q5/model.json
new file mode 100644
index 000000000..027b8398f
--- /dev/null
+++ b/models/llava-1.5-13b-q5/model.json
@@ -0,0 +1,33 @@
+{
+ "sources": [
+ {
+ "filename": "ggml-model-q5_k.gguf",
+ "url": "https://huggingface.co/mys/ggml_llava-v1.5-13b/resolve/main/ggml-model-q5_k.gguf"
+ },
+ {
+ "filename": "mmproj-model-f16.gguf",
+ "url": "https://huggingface.co/mys/ggml_llava-v1.5-13b/resolve/main/mmproj-model-f16.gguf"
+ }
+ ],
+ "id": "llava-1.5-13b-q5",
+ "object": "model",
+ "name": "LlaVa 1.5 13B Q5 K",
+ "version": "1.0",
+ "description": "LlaVa 1.5 can bring vision understanding to Jan",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "\n### Instruction:\n{prompt}\n### Response:\n",
+ "llama_model_path": "ggml-model-q5_k.gguf",
+ "mmproj": "mmproj-model-f16.gguf"
+ },
+ "parameters": {
+ "max_tokens": 4096
+ },
+ "metadata": {
+ "author": "Mys",
+ "tags": ["Vision"],
+ "size": 9850000000
+ },
+ "engine": "nitro"
+}
diff --git a/models/llava-1.5-7b-q5/model.json b/models/llava-1.5-7b-q5/model.json
new file mode 100644
index 000000000..658b98880
--- /dev/null
+++ b/models/llava-1.5-7b-q5/model.json
@@ -0,0 +1,33 @@
+{
+ "sources": [
+ {
+ "filename": "ggml-model-q5_k.gguf",
+ "url": "https://huggingface.co/mys/ggml_llava-v1.5-7b/resolve/main/ggml-model-q5_k.gguf"
+ },
+ {
+ "filename": "mmproj-model-f16.gguf",
+ "url": "https://huggingface.co/mys/ggml_llava-v1.5-7b/resolve/main/mmproj-model-f16.gguf"
+ }
+ ],
+ "id": "llava-1.5-7b-q5",
+ "object": "model",
+ "name": "LlaVa 1.5 7B Q5 K",
+ "version": "1.0",
+ "description": "LlaVa 1.5 can bring vision understanding to Jan",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "\n### Instruction:\n{prompt}\n### Response:\n",
+ "llama_model_path": "ggml-model-q5_k.gguf",
+ "mmproj": "mmproj-model-f16.gguf"
+ },
+ "parameters": {
+ "max_tokens": 4096
+ },
+ "metadata": {
+ "author": "Mys",
+ "tags": ["Vision"],
+ "size": 5400000000
+ },
+ "engine": "nitro"
+}
diff --git a/models/mistral-ins-7b-q4/model.json b/models/mistral-ins-7b-q4/model.json
index 6db1aa35b..bfdaffa90 100644
--- a/models/mistral-ins-7b-q4/model.json
+++ b/models/mistral-ins-7b-q4/model.json
@@ -1,30 +1,35 @@
{
- "source_url": "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q4_K_M.gguf",
- "id": "mistral-ins-7b-q4",
- "object": "model",
- "name": "Mistral Instruct 7B Q4",
- "version": "1.0",
- "description": "Mistral Instruct 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.",
- "format": "gguf",
- "settings": {
- "ctx_len": 4096,
- "prompt_template": "[INST] {prompt} [/INST]"
- },
- "parameters": {
- "temperature": 0.7,
- "top_p": 0.95,
- "stream": true,
- "max_tokens": 4096,
- "stop": [],
- "frequency_penalty": 0,
- "presence_penalty": 0
- },
- "metadata": {
- "author": "MistralAI, The Bloke",
- "tags": ["Featured", "7B", "Foundational Model"],
- "size": 4370000000,
- "cover": "https://raw.githubusercontent.com/janhq/jan/main/models/mistral-ins-7b-q4/cover.png"
- },
- "engine": "nitro"
- }
-
\ No newline at end of file
+ "sources": [
+ {
+ "filename": "mistral-7b-instruct-v0.2.Q4_K_M.gguf",
+ "url": "https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q4_K_M.gguf"
+ }
+ ],
+ "id": "mistral-ins-7b-q4",
+ "object": "model",
+ "name": "Mistral Instruct 7B Q4",
+ "version": "1.0",
+ "description": "Mistral Instruct 7b model, specifically designed for a comprehensive understanding through training on extensive internet data.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "[INST] {prompt} [/INST]",
+ "llama_model_path": "mistral-7b-instruct-v0.2.Q4_K_M.gguf"
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 4096,
+ "stop": [],
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "MistralAI, The Bloke",
+ "tags": ["Featured", "7B", "Foundational Model"],
+ "size": 4370000000,
+ "cover": "https://raw.githubusercontent.com/janhq/jan/main/models/mistral-ins-7b-q4/cover.png"
+ },
+ "engine": "nitro"
+}
diff --git a/models/mixtral-8x7b-instruct/model.json b/models/mixtral-8x7b-instruct/model.json
index 31ff2838a..e0a0ee040 100644
--- a/models/mixtral-8x7b-instruct/model.json
+++ b/models/mixtral-8x7b-instruct/model.json
@@ -1,28 +1,33 @@
{
- "source_url": "https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF/resolve/main/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf",
- "id": "mixtral-8x7b-instruct",
- "object": "model",
- "name": "Mixtral 8x7B Instruct Q4",
- "version": "1.0",
- "description": "The Mixtral-8x7B is a pretrained generative Sparse Mixture of Experts. The Mixtral-8x7B outperforms 70B models on most benchmarks.",
- "format": "gguf",
- "settings": {
- "ctx_len": 4096,
- "prompt_template": "[INST] {prompt} [/INST]"
- },
- "parameters": {
- "temperature": 0.7,
- "top_p": 0.95,
- "stream": true,
- "max_tokens": 4096,
- "stop": [],
- "frequency_penalty": 0,
- "presence_penalty": 0
- },
- "metadata": {
- "author": "MistralAI, TheBloke",
- "tags": ["70B", "Foundational Model"],
- "size": 26440000000
- },
- "engine": "nitro"
- }
+ "sources": [
+ {
+ "filename": "mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf",
+ "url": "https://huggingface.co/TheBloke/Mixtral-8x7B-Instruct-v0.1-GGUF/resolve/main/mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf"
+ }
+ ],
+ "id": "mixtral-8x7b-instruct",
+ "object": "model",
+ "name": "Mixtral 8x7B Instruct Q4",
+ "version": "1.0",
+ "description": "The Mixtral-8x7B is a pretrained generative Sparse Mixture of Experts. The Mixtral-8x7B outperforms 70B models on most benchmarks.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "[INST] {prompt} [/INST]",
+ "llama_model_path": "mixtral-8x7b-instruct-v0.1.Q4_K_M.gguf"
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 4096,
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "MistralAI, TheBloke",
+ "tags": ["70B", "Foundational Model"],
+ "size": 26440000000
+ },
+ "engine": "nitro"
+}
diff --git a/models/noromaid-7b/model.json b/models/noromaid-7b/model.json
index fbb7858e1..78d579a64 100644
--- a/models/noromaid-7b/model.json
+++ b/models/noromaid-7b/model.json
@@ -1,29 +1,34 @@
{
- "source_url": "https://huggingface.co/NeverSleep/Noromaid-7b-v0.1.1-GGUF/resolve/main/Noromaid-7b-v0.1.1.q5_k_m.gguf",
- "id": "noromaid-7b",
- "object": "model",
- "name": "Noromaid 7B Q5",
- "version": "1.0",
- "description": "The Noromaid 7b model is designed for role-playing with human-like behavior.",
- "format": "gguf",
- "settings": {
- "ctx_len": 4096,
- "prompt_template": "### Instruction:{prompt}\n### Response:"
- },
- "parameters": {
- "temperature": 0.7,
- "top_p": 0.95,
- "stream": true,
- "max_tokens": 4096,
- "stop": [],
- "frequency_penalty": 0,
- "presence_penalty": 0
- },
- "metadata": {
- "author": "NeverSleep",
- "tags": ["7B", "Merged"],
- "size": 4370000000
- },
- "engine": "nitro"
- }
-
\ No newline at end of file
+ "sources": [
+ {
+ "filename": "Noromaid-7b-v0.1.1.q5_k_m.gguf",
+ "url": "https://huggingface.co/NeverSleep/Noromaid-7b-v0.1.1-GGUF/resolve/main/Noromaid-7b-v0.1.1.q5_k_m.gguf"
+ }
+ ],
+ "id": "noromaid-7b",
+ "object": "model",
+ "name": "Noromaid 7B Q5",
+ "version": "1.0",
+ "description": "The Noromaid 7b model is designed for role-playing with human-like behavior.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "### Instruction:{prompt}\n### Response:",
+ "llama_model_path": "Noromaid-7b-v0.1.1.q5_k_m.gguf"
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 4096,
+ "stop": [],
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "NeverSleep",
+ "tags": ["7B", "Merged"],
+ "size": 4370000000
+ },
+ "engine": "nitro"
+}
diff --git a/models/openchat-3.5-7b/model.json b/models/openchat-3.5-7b/model.json
index e4b72f9c6..294f7d269 100644
--- a/models/openchat-3.5-7b/model.json
+++ b/models/openchat-3.5-7b/model.json
@@ -1,28 +1,34 @@
{
- "source_url": "https://huggingface.co/TheBloke/openchat-3.5-1210-GGUF/resolve/main/openchat-3.5-1210.Q4_K_M.gguf",
- "id": "openchat-3.5-7b",
- "object": "model",
- "name": "Openchat-3.5 7B Q4",
- "version": "1.0",
- "description": "The performance of this open-source model surpasses that of ChatGPT-3.5 and Grok-1 across various benchmarks.",
- "format": "gguf",
- "settings": {
- "ctx_len": 4096,
- "prompt_template": "GPT4 Correct User: {prompt}<|end_of_turn|>GPT4 Correct Assistant:"
- },
- "parameters": {
- "temperature": 0.7,
- "top_p": 0.95,
- "stream": true,
- "max_tokens": 4096,
- "stop": ["<|end_of_turn|>"],
- "frequency_penalty": 0,
- "presence_penalty": 0
- },
- "metadata": {
- "author": "Openchat",
- "tags": ["Recommended", "7B", "Finetuned"],
- "size": 4370000000
- },
- "engine": "nitro"
- }
+ "sources": [
+ {
+ "filename": "openchat-3.5-1210.Q4_K_M.gguf",
+ "url": "https://huggingface.co/TheBloke/openchat-3.5-1210-GGUF/resolve/main/openchat-3.5-1210.Q4_K_M.gguf"
+ }
+ ],
+ "id": "openchat-3.5-7b",
+ "object": "model",
+ "name": "Openchat-3.5 7B Q4",
+ "version": "1.0",
+ "description": "The performance of this open-source model surpasses that of ChatGPT-3.5 and Grok-1 across various benchmarks.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "GPT4 Correct User: {prompt}<|end_of_turn|>GPT4 Correct Assistant:",
+ "llama_model_path": "openchat-3.5-1210.Q4_K_M.gguf"
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 4096,
+ "stop": ["<|end_of_turn|>"],
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "Openchat",
+ "tags": ["Recommended", "7B", "Finetuned"],
+ "size": 4370000000
+ },
+ "engine": "nitro"
+}
diff --git a/models/openhermes-neural-7b/model.json b/models/openhermes-neural-7b/model.json
index ca3e88502..87e1df143 100644
--- a/models/openhermes-neural-7b/model.json
+++ b/models/openhermes-neural-7b/model.json
@@ -1,29 +1,34 @@
{
- "source_url": "https://huggingface.co/janhq/openhermes-2.5-neural-chat-v3-3-slerp-GGUF/resolve/main/openhermes-2.5-neural-chat-v3-3-slerp.Q4_K_M.gguf",
- "id": "openhermes-neural-7b",
- "object": "model",
- "name": "OpenHermes Neural 7B Q4",
- "version": "1.0",
- "description": "OpenHermes Neural is a merged model using the TIES method. It performs well in various benchmarks.",
- "format": "gguf",
- "settings": {
- "ctx_len": 4096,
- "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant"
- },
- "parameters": {
- "temperature": 0.7,
- "top_p": 0.95,
- "stream": true,
- "max_tokens": 4096,
- "stop": [],
- "frequency_penalty": 0,
- "presence_penalty": 0
- },
- "metadata": {
- "author": "Intel, Jan",
- "tags": ["7B", "Merged", "Featured"],
- "size": 4370000000,
- "cover": "https://raw.githubusercontent.com/janhq/jan/main/models/openhermes-neural-7b/cover.png"
- },
- "engine": "nitro"
- }
+ "sources": [
+ {
+ "filename": "openhermes-2.5-neural-chat-v3-3-slerp.Q4_K_M.gguf",
+ "url": "https://huggingface.co/janhq/openhermes-2.5-neural-chat-v3-3-slerp-GGUF/resolve/main/openhermes-2.5-neural-chat-v3-3-slerp.Q4_K_M.gguf"
+ }
+ ],
+ "id": "openhermes-neural-7b",
+ "object": "model",
+ "name": "OpenHermes Neural 7B Q4",
+ "version": "1.0",
+ "description": "OpenHermes Neural is a merged model using the TIES method. It performs well in various benchmarks.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
+ "llama_model_path": "openhermes-2.5-neural-chat-v3-3-slerp.Q4_K_M.gguf"
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 4096,
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "Intel, Jan",
+ "tags": ["7B", "Merged", "Featured"],
+ "size": 4370000000,
+ "cover": "https://raw.githubusercontent.com/janhq/jan/main/models/openhermes-neural-7b/cover.png"
+ },
+ "engine": "nitro"
+}
diff --git a/models/phi-2-3b/model.json b/models/phi-2-3b/model.json
index 97ff369e7..e452fdb55 100644
--- a/models/phi-2-3b/model.json
+++ b/models/phi-2-3b/model.json
@@ -1,29 +1,34 @@
{
- "source_url": "https://huggingface.co/TheBloke/phi-2-GGUF/resolve/main/phi-2.Q8_0.gguf",
- "id": "phi-2-3b",
- "object": "model",
- "name": "Phi-2 3B Q8",
- "version": "1.0",
- "description": "Phi-2 is a 2.7B model, excelling in common sense and logical reasoning benchmarks, trained with synthetic texts and filtered websites.",
- "format": "gguf",
- "settings": {
- "ctx_len": 4096,
- "prompt_template": "Intruct:\n{prompt}\nOutput:"
- },
- "parameters": {
- "temperature": 0.7,
- "top_p": 0.95,
- "stream": true,
- "max_tokens": 4096,
- "stop": [],
- "frequency_penalty": 0,
- "presence_penalty": 0
- },
- "metadata": {
- "author": "Microsoft",
- "tags": ["3B","Foundational Model"],
- "size": 2960000000
- },
- "engine": "nitro"
- }
-
\ No newline at end of file
+ "sources": [
+ {
+ "filename": "phi-2.Q8_0.gguf",
+ "url": "https://huggingface.co/TheBloke/phi-2-GGUF/resolve/main/phi-2.Q8_0.gguf"
+ }
+ ],
+ "id": "phi-2-3b",
+ "object": "model",
+ "name": "Phi-2 3B Q8",
+ "version": "1.0",
+ "description": "Phi-2 is a 2.7B model, excelling in common sense and logical reasoning benchmarks, trained with synthetic texts and filtered websites.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "Intruct:\n{prompt}\nOutput:",
+ "llama_model_path": "phi-2.Q8_0.gguf"
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 4096,
+ "stop": [],
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "Microsoft",
+ "tags": ["3B", "Foundational Model"],
+ "size": 2960000000
+ },
+ "engine": "nitro"
+}
diff --git a/models/phind-34b/model.json b/models/phind-34b/model.json
index 7fc77ed71..60309cb9b 100644
--- a/models/phind-34b/model.json
+++ b/models/phind-34b/model.json
@@ -1,29 +1,34 @@
{
- "source_url": "https://huggingface.co/TheBloke/Phind-CodeLlama-34B-v2-GGUF/resolve/main/phind-codellama-34b-v2.Q5_K_M.gguf",
- "id": "phind-34b",
- "object": "model",
- "name": "Phind 34B Q5",
- "version": "1.0",
- "description": "Phind 34B is fine-tuned on 1.5B tokens of high-quality programming data. This multi-lingual model excels in various programming languages and is designed to be steerable and user-friendly.",
- "format": "gguf",
- "settings": {
- "ctx_len": 4096,
- "prompt_template": "### System Prompt\n{system_message}\n### User Message\n{prompt}\n### Assistant"
- },
- "parameters": {
- "temperature": 0.7,
- "top_p": 0.95,
- "stream": true,
- "max_tokens": 4096,
- "stop": [],
- "frequency_penalty": 0,
- "presence_penalty": 0
- },
- "metadata": {
- "author": "Phind, The Bloke",
- "tags": ["34B", "Finetuned"],
- "size": 20220000000
- },
- "engine": "nitro"
- }
-
\ No newline at end of file
+ "sources": [
+ {
+ "filename": "phind-codellama-34b-v2.Q5_K_M.gguf",
+ "url": "https://huggingface.co/TheBloke/Phind-CodeLlama-34B-v2-GGUF/resolve/main/phind-codellama-34b-v2.Q5_K_M.gguf"
+ }
+ ],
+ "id": "phind-34b",
+ "object": "model",
+ "name": "Phind 34B Q5",
+ "version": "1.0",
+ "description": "Phind 34B is fine-tuned on 1.5B tokens of high-quality programming data. This multi-lingual model excels in various programming languages and is designed to be steerable and user-friendly.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "### System Prompt\n{system_message}\n### User Message\n{prompt}\n### Assistant",
+ "llama_model_path": "phind-codellama-34b-v2.Q5_K_M.gguf"
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 4096,
+ "stop": [],
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "Phind, The Bloke",
+ "tags": ["34B", "Finetuned"],
+ "size": 20220000000
+ },
+ "engine": "nitro"
+}
diff --git a/models/solar-10.7b-slerp/model.json b/models/solar-10.7b-slerp/model.json
index 9177fa013..8e62fa25b 100644
--- a/models/solar-10.7b-slerp/model.json
+++ b/models/solar-10.7b-slerp/model.json
@@ -1,29 +1,33 @@
{
- "source_url": "https://huggingface.co/janhq/Solar-10.7B-SLERP-GGUF/resolve/main/solar-10.7b-slerp.Q4_K_M.gguf",
- "id": "solar-10.7b-slerp",
- "object": "model",
- "name": "Solar Slerp 10.7B Q4",
- "version": "1.0",
- "description": "This model uses the Slerp merge method from SOLAR Instruct and Pandora-v1",
- "format": "gguf",
- "settings": {
- "ctx_len": 4096,
- "prompt_template": "### User: {prompt}\n### Assistant:"
- },
- "parameters": {
- "temperature": 0.7,
- "top_p": 0.95,
- "stream": true,
- "max_tokens": 4096,
- "stop": [],
- "frequency_penalty": 0,
- "presence_penalty": 0
- },
- "metadata": {
- "author": "Jan",
- "tags": ["13B","Finetuned"],
- "size": 6360000000
- },
- "engine": "nitro"
- }
-
\ No newline at end of file
+ "sources": [
+ {
+ "filename": "solar-10.7b-slerp.Q4_K_M.gguf",
+ "url": "https://huggingface.co/janhq/Solar-10.7B-SLERP-GGUF/resolve/main/solar-10.7b-slerp.Q4_K_M.gguf"
+ }
+ ],
+ "id": "solar-10.7b-slerp",
+ "object": "model",
+ "name": "Solar Slerp 10.7B Q4",
+ "version": "1.0",
+ "description": "This model uses the Slerp merge method from SOLAR Instruct and Pandora-v1",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "### User: {prompt}\n### Assistant:",
+ "llama_model_path": "solar-10.7b-slerp.Q4_K_M.gguf"
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 4096,
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "Jan",
+ "tags": ["13B", "Finetuned"],
+ "size": 6360000000
+ },
+ "engine": "nitro"
+}
diff --git a/models/starling-7b/model.json b/models/starling-7b/model.json
index 1a6d7e55c..eaa540bd9 100644
--- a/models/starling-7b/model.json
+++ b/models/starling-7b/model.json
@@ -1,29 +1,34 @@
{
- "source_url": "https://huggingface.co/TheBloke/Starling-LM-7B-alpha-GGUF/resolve/main/starling-lm-7b-alpha.Q4_K_M.gguf",
- "id": "starling-7b",
- "object": "model",
- "name": "Starling alpha 7B Q4",
- "version": "1.0",
- "description": "Starling 7B, an upgrade of Openchat 3.5 using RLAIF, is really good at various benchmarks, especially with GPT-4 judging its performance.",
- "format": "gguf",
- "settings": {
- "ctx_len": 4096,
- "prompt_template": "GPT4 User: {prompt}<|end_of_turn|>GPT4 Assistant:"
- },
- "parameters": {
- "temperature": 0.7,
- "top_p": 0.95,
- "stream": true,
- "max_tokens": 4096,
- "stop": ["<|end_of_turn|>"],
- "frequency_penalty": 0,
- "presence_penalty": 0
- },
- "metadata": {
- "author": "Berkeley-nest, The Bloke",
- "tags": ["7B","Finetuned"],
- "size": 4370000000
- },
- "engine": "nitro"
- }
-
\ No newline at end of file
+ "sources": [
+ {
+ "filename": "starling-lm-7b-alpha.Q4_K_M.gguf",
+ "url": "https://huggingface.co/TheBloke/Starling-LM-7B-alpha-GGUF/resolve/main/starling-lm-7b-alpha.Q4_K_M.gguf"
+ }
+ ],
+ "id": "starling-7b",
+ "object": "model",
+ "name": "Starling alpha 7B Q4",
+ "version": "1.0",
+ "description": "Starling 7B, an upgrade of Openchat 3.5 using RLAIF, is really good at various benchmarks, especially with GPT-4 judging its performance.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "GPT4 User: {prompt}<|end_of_turn|>GPT4 Assistant:",
+ "llama_model_path": "starling-lm-7b-alpha.Q4_K_M.gguf"
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 4096,
+ "stop": ["<|end_of_turn|>"],
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "Berkeley-nest, The Bloke",
+ "tags": ["7B", "Finetuned"],
+ "size": 4370000000
+ },
+ "engine": "nitro"
+}
diff --git a/models/stealth-v1.2-7b/model.json b/models/stealth-v1.2-7b/model.json
index 92bfe46e1..235cbbb88 100644
--- a/models/stealth-v1.2-7b/model.json
+++ b/models/stealth-v1.2-7b/model.json
@@ -1,32 +1,33 @@
{
- "source_url": "https://huggingface.co/janhq/stealth-v1.3-GGUF/resolve/main/stealth-v1.3.Q4_K_M.gguf",
- "id": "stealth-v1.2-7b",
- "object": "model",
- "name": "Stealth 7B Q4",
- "version": "1.0",
- "description": "This is a new experimental family designed to enhance Mathematical and Logical abilities.",
- "format": "gguf",
- "settings": {
- "ctx_len": 4096,
- "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant"
- },
- "parameters": {
- "temperature": 0.7,
- "top_p": 0.95,
- "stream": true,
- "max_tokens": 4096,
- "stop": [],
- "frequency_penalty": 0,
- "presence_penalty": 0
- },
- "metadata": {
- "author": "Jan",
- "tags": [
- "7B",
- "Finetuned",
- "Featured"
- ],
- "size": 4370000000
- },
- "engine": "nitro"
- }
\ No newline at end of file
+ "sources": [
+ {
+ "filename": "stealth-v1.3.Q4_K_M.gguf",
+ "url": "https://huggingface.co/janhq/stealth-v1.3-GGUF/resolve/main/stealth-v1.3.Q4_K_M.gguf"
+ }
+ ],
+ "id": "stealth-v1.2-7b",
+ "object": "model",
+ "name": "Stealth 7B Q4",
+ "version": "1.0",
+ "description": "This is a new experimental family designed to enhance Mathematical and Logical abilities.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
+ "llama_model_path": "stealth-v1.3.Q4_K_M.gguf"
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 4096,
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "Jan",
+ "tags": ["7B", "Finetuned", "Featured"],
+ "size": 4370000000
+ },
+ "engine": "nitro"
+}
diff --git a/models/tinyllama-1.1b/model.json b/models/tinyllama-1.1b/model.json
index 641511569..6a9187fa5 100644
--- a/models/tinyllama-1.1b/model.json
+++ b/models/tinyllama-1.1b/model.json
@@ -1,5 +1,10 @@
{
- "source_url": "https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf",
+ "sources": [
+ {
+ "filename": "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf",
+ "url": "https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf"
+ }
+ ],
"id": "tinyllama-1.1b",
"object": "model",
"name": "TinyLlama Chat 1.1B Q4",
@@ -7,8 +12,9 @@
"description": "TinyLlama is a tiny model with only 1.1B. It's a good model for less powerful computers.",
"format": "gguf",
"settings": {
- "ctx_len": 2048,
- "prompt_template": "<|system|>\n{system_message}<|user|>\n{prompt}<|assistant|>"
+ "ctx_len": 4096,
+ "prompt_template": "<|system|>\n{system_message}<|user|>\n{prompt}<|assistant|>",
+ "llama_model_path": "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf"
},
"parameters": {
"temperature": 0.7,
@@ -20,9 +26,9 @@
"presence_penalty": 0
},
"metadata": {
- "author": "TinyLlama",
- "tags": ["Tiny", "Foundation Model"],
- "size": 669000000
+ "author": "TinyLlama",
+ "tags": ["Tiny", "Foundation Model"],
+ "size": 669000000
},
"engine": "nitro"
-}
\ No newline at end of file
+}
diff --git a/models/trinity-v1.2-7b/model.json b/models/trinity-v1.2-7b/model.json
index ce5e7a4cf..2dda120e6 100644
--- a/models/trinity-v1.2-7b/model.json
+++ b/models/trinity-v1.2-7b/model.json
@@ -1,29 +1,34 @@
{
- "source_url": "https://huggingface.co/janhq/trinity-v1.2-GGUF/resolve/main/trinity-v1.2.Q4_K_M.gguf",
- "id": "trinity-v1.2-7b",
- "object": "model",
- "name": "Trinity-v1.2 7B Q4",
- "version": "1.0",
- "description": "Trinity is an experimental model merge using the Slerp method. Recommended for daily assistance purposes.",
- "format": "gguf",
- "settings": {
- "ctx_len": 4096,
- "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant"
- },
- "parameters": {
- "temperature": 0.7,
- "top_p": 0.95,
- "stream": true,
- "max_tokens": 4096,
- "stop": [],
- "frequency_penalty": 0,
- "presence_penalty": 0
- },
- "metadata": {
- "author": "Jan",
- "tags": ["7B", "Merged", "Featured"],
- "size": 4370000000,
- "cover": "https://raw.githubusercontent.com/janhq/jan/main/models/trinity-v1.2-7b/cover.png"
- },
- "engine": "nitro"
- }
\ No newline at end of file
+ "sources": [
+ {
+ "filename": "trinity-v1.2.Q4_K_M.gguf",
+ "url": "https://huggingface.co/janhq/trinity-v1.2-GGUF/resolve/main/trinity-v1.2.Q4_K_M.gguf"
+ }
+ ],
+ "id": "trinity-v1.2-7b",
+ "object": "model",
+ "name": "Trinity-v1.2 7B Q4",
+ "version": "1.0",
+ "description": "Trinity is an experimental model merge using the Slerp method. Recommended for daily assistance purposes.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
+ "llama_model_path": "trinity-v1.2.Q4_K_M.gguf"
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 4096,
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "Jan",
+ "tags": ["7B", "Merged", "Featured"],
+ "size": 4370000000,
+ "cover": "https://raw.githubusercontent.com/janhq/jan/main/models/trinity-v1.2-7b/cover.png"
+ },
+ "engine": "nitro"
+}
diff --git a/models/tulu-2-70b/model.json b/models/tulu-2-70b/model.json
index ae95b870d..06b96e50d 100644
--- a/models/tulu-2-70b/model.json
+++ b/models/tulu-2-70b/model.json
@@ -1,28 +1,33 @@
{
- "source_url": "https://huggingface.co/TheBloke/tulu-2-dpo-70B-GGUF/resolve/main/tulu-2-dpo-70b.Q4_K_M.gguf",
- "id": "tulu-2-70b",
- "object": "model",
- "name": "Tulu 2 70B Q4",
- "version": "1.0",
- "description": "Tulu 70B is a strong alternative to Llama 2 70b Chat to act as helpful assistants.",
- "format": "gguf",
- "settings": {
- "ctx_len": 4096,
- "prompt_template": "<|user|>\n{prompt}\n<|assistant|>"
- },
- "parameters": {
- "temperature": 0.7,
- "top_p": 0.95,
- "stream": true,
- "max_tokens": 4096,
- "stop": [],
- "frequency_penalty": 0,
- "presence_penalty": 0
- },
- "metadata": {
- "author": "Lizpreciatior, The Bloke",
- "tags": ["70B", "Finetuned"],
- "size": 41400000000
- },
- "engine": "nitro"
- }
\ No newline at end of file
+ "sources": [
+ {
+ "filename": "tulu-2-dpo-70b.Q4_K_M.gguf",
+ "url": "https://huggingface.co/TheBloke/tulu-2-dpo-70B-GGUF/resolve/main/tulu-2-dpo-70b.Q4_K_M.gguf"
+ }
+ ],
+ "id": "tulu-2-70b",
+ "object": "model",
+ "name": "Tulu 2 70B Q4",
+ "version": "1.0",
+ "description": "Tulu 70B is a strong alternative to Llama 2 70b Chat to act as helpful assistants.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "<|user|>\n{prompt}\n<|assistant|>",
+ "llama_model_path": "tulu-2-dpo-70b.Q4_K_M.gguf"
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 4096,
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "Lizpreciatior, The Bloke",
+ "tags": ["70B", "Finetuned"],
+ "size": 41400000000
+ },
+ "engine": "nitro"
+}
diff --git a/models/wizardcoder-13b/model.json b/models/wizardcoder-13b/model.json
index 1605ed362..051c739a0 100644
--- a/models/wizardcoder-13b/model.json
+++ b/models/wizardcoder-13b/model.json
@@ -1,29 +1,34 @@
{
- "source_url": "https://huggingface.co/TheBloke/WizardCoder-Python-13B-V1.0-GGUF/resolve/main/wizardcoder-python-13b-v1.0.Q5_K_M.gguf",
- "id": "wizardcoder-13b",
- "object": "model",
- "name": "Wizard Coder Python 13B Q5",
- "version": "1.0",
- "description": "WizardCoder 13B is a Python coding model. This model demonstrate high proficiency in specific domains like coding and mathematics.",
- "format": "gguf",
- "settings": {
- "ctx_len": 4096,
- "prompt_template": "### Instruction:\n{prompt}\n### Response:"
- },
- "parameters": {
- "temperature": 0.7,
- "top_p": 0.95,
- "stream": true,
- "max_tokens": 4096,
- "stop": [],
- "frequency_penalty": 0,
- "presence_penalty": 0
- },
- "metadata": {
- "author": "WizardLM, The Bloke",
- "tags": ["Recommended", "13B", "Finetuned"],
- "size": 7870000000
- },
- "engine": "nitro"
- }
-
\ No newline at end of file
+ "sources": [
+ {
+ "filename": "wizardcoder-python-13b-v1.0.Q5_K_M.gguf",
+ "url": "https://huggingface.co/TheBloke/WizardCoder-Python-13B-V1.0-GGUF/resolve/main/wizardcoder-python-13b-v1.0.Q5_K_M.gguf"
+ }
+ ],
+ "id": "wizardcoder-13b",
+ "object": "model",
+ "name": "Wizard Coder Python 13B Q5",
+ "version": "1.0",
+ "description": "WizardCoder 13B is a Python coding model. This model demonstrate high proficiency in specific domains like coding and mathematics.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "### Instruction:\n{prompt}\n### Response:",
+ "llama_model_path": "wizardcoder-python-13b-v1.0.Q5_K_M.gguf"
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 4096,
+ "stop": [],
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "WizardLM, The Bloke",
+ "tags": ["Recommended", "13B", "Finetuned"],
+ "size": 7870000000
+ },
+ "engine": "nitro"
+}
diff --git a/models/yarn-mistral-7b/model.json b/models/yarn-mistral-7b/model.json
index 2676fbb58..ee6de1319 100644
--- a/models/yarn-mistral-7b/model.json
+++ b/models/yarn-mistral-7b/model.json
@@ -1,29 +1,31 @@
{
- "source_url": "https://huggingface.co/TheBloke/Yarn-Mistral-7B-128k-GGUF/resolve/main/yarn-mistral-7b-128k.Q4_K_M.gguf",
- "id": "yarn-mistral-7b",
- "object": "model",
- "name": "Yarn Mistral 7B Q4",
- "version": "1.0",
- "description": "Yarn Mistral 7B is a language model for long context and supports a 128k token context window.",
- "format": "gguf",
- "settings": {
- "ctx_len": 4096,
- "prompt_template": "{prompt}"
- },
- "parameters": {
- "temperature": 0.7,
- "top_p": 0.95,
- "stream": true,
- "max_tokens": 4096,
- "stop": [],
- "frequency_penalty": 0,
- "presence_penalty": 0
- },
- "metadata": {
- "author": "NousResearch, The Bloke",
- "tags": ["7B","Finetuned"],
- "size": 4370000000
- },
- "engine": "nitro"
- }
-
\ No newline at end of file
+ "sources": [
+ {
+ "url": "https://huggingface.co/TheBloke/Yarn-Mistral-7B-128k-GGUF/resolve/main/yarn-mistral-7b-128k.Q4_K_M.gguf"
+ }
+ ],
+ "id": "yarn-mistral-7b",
+ "object": "model",
+ "name": "Yarn Mistral 7B Q4",
+ "version": "1.0",
+ "description": "Yarn Mistral 7B is a language model for long context and supports a 128k token context window.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "{prompt}"
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 4096,
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "NousResearch, The Bloke",
+ "tags": ["7B", "Finetuned"],
+ "size": 4370000000
+ },
+ "engine": "nitro"
+}
diff --git a/models/yi-34b/model.json b/models/yi-34b/model.json
index 8ff23aaa0..3697a9e22 100644
--- a/models/yi-34b/model.json
+++ b/models/yi-34b/model.json
@@ -1,29 +1,34 @@
{
- "source_url": "https://huggingface.co/TheBloke/Yi-34B-Chat-GGUF/resolve/main/yi-34b-chat.Q5_K_M.gguf",
- "id": "yi-34b",
- "object": "model",
- "name": "Yi 34B Q5",
- "version": "1.0",
- "description": "Yi-34B, a specialized chat model, is known for its diverse and creative responses and excels across various NLP tasks and benchmarks.",
- "format": "gguf",
- "settings": {
- "ctx_len": 4096,
- "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant"
- },
- "parameters": {
- "temperature": 0.7,
- "top_p": 0.95,
- "stream": true,
- "max_tokens": 4096,
- "stop": [],
- "frequency_penalty": 0,
- "presence_penalty": 0
- },
- "metadata": {
- "author": "01-ai, The Bloke",
- "tags": ["34B", "Foundational Model"],
- "size": 20660000000
- },
- "engine": "nitro"
- }
-
\ No newline at end of file
+ "sources": [
+ {
+ "filename": "yi-34b-chat.Q5_K_M.gguf",
+ "url": "https://huggingface.co/TheBloke/Yi-34B-Chat-GGUF/resolve/main/yi-34b-chat.Q5_K_M.gguf"
+ }
+ ],
+ "id": "yi-34b",
+ "object": "model",
+ "name": "Yi 34B Q5",
+ "version": "1.0",
+ "description": "Yi-34B, a specialized chat model, is known for its diverse and creative responses and excels across various NLP tasks and benchmarks.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
+ "llama_model_path": "yi-34b-chat.Q5_K_M.gguf"
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 4096,
+ "stop": [],
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "01-ai, The Bloke",
+ "tags": ["34B", "Foundational Model"],
+ "size": 20660000000
+ },
+ "engine": "nitro"
+}
diff --git a/server/package.json b/server/package.json
index 9495a0d65..f61730da4 100644
--- a/server/package.json
+++ b/server/package.json
@@ -26,6 +26,8 @@
"dotenv": "^16.3.1",
"fastify": "^4.24.3",
"request": "^2.88.2",
+ "fetch-retry": "^5.0.6",
+ "tcp-port-used": "^1.0.2",
"request-progress": "^3.0.0"
},
"devDependencies": {
@@ -35,6 +37,7 @@
"@typescript-eslint/parser": "^6.7.3",
"eslint-plugin-react": "^7.33.2",
"run-script-os": "^1.1.6",
+ "@types/tcp-port-used": "^1.0.4",
"typescript": "^5.2.2"
}
}
diff --git a/uikit/package.json b/uikit/package.json
index 43e73dcf2..66f05840b 100644
--- a/uikit/package.json
+++ b/uikit/package.json
@@ -18,6 +18,7 @@
},
"dependencies": {
"@radix-ui/react-avatar": "^1.0.4",
+ "@radix-ui/react-checkbox": "^1.0.4",
"@radix-ui/react-context": "^1.0.1",
"@radix-ui/react-dialog": "^1.0.5",
"@radix-ui/react-icons": "^1.3.0",
diff --git a/uikit/src/button/styles.scss b/uikit/src/button/styles.scss
index 74585ed1e..003df5b4d 100644
--- a/uikit/src/button/styles.scss
+++ b/uikit/src/button/styles.scss
@@ -9,7 +9,7 @@
}
&-secondary-blue {
- @apply bg-blue-200 text-blue-600 hover:bg-blue-500/50;
+ @apply bg-blue-200 text-blue-600 hover:bg-blue-300/50 dark:hover:bg-blue-200/80;
}
&-danger {
@@ -17,7 +17,7 @@
}
&-secondary-danger {
- @apply bg-red-200 text-red-600 hover:bg-red-500/50;
+ @apply bg-red-200 text-red-600 hover:bg-red-300/50 dark:hover:bg-red-200/80;
}
&-outline {
@@ -67,14 +67,18 @@
[type='submit'] {
&.btn-primary {
@apply bg-primary hover:bg-primary/90;
+ @apply disabled:pointer-events-none disabled:bg-zinc-100 disabled:text-zinc-400;
}
&.btn-secondary {
@apply bg-secondary hover:bg-secondary/80;
+ @apply disabled:pointer-events-none disabled:bg-zinc-100 disabled:text-zinc-400;
}
&.btn-secondary-blue {
@apply bg-blue-200 text-blue-900 hover:bg-blue-200/80;
+ @apply disabled:pointer-events-none disabled:bg-zinc-100 disabled:text-zinc-400;
}
&.btn-danger {
@apply bg-danger hover:bg-danger/90;
+ @apply disabled:pointer-events-none disabled:bg-zinc-100 disabled:text-zinc-400;
}
}
diff --git a/uikit/src/checkbox/index.tsx b/uikit/src/checkbox/index.tsx
new file mode 100644
index 000000000..1e78aeafb
--- /dev/null
+++ b/uikit/src/checkbox/index.tsx
@@ -0,0 +1,29 @@
+'use client'
+
+import * as React from 'react'
+import * as CheckboxPrimitive from '@radix-ui/react-checkbox'
+import { CheckIcon } from '@radix-ui/react-icons'
+
+import { twMerge } from 'tailwind-merge'
+
+const Checkbox = React.forwardRef<
+ React.ElementRef,
+ React.ComponentPropsWithoutRef
+>(({ className, ...props }, ref) => (
+
+
+
+
+
+))
+Checkbox.displayName = CheckboxPrimitive.Root.displayName
+
+export { Checkbox }
diff --git a/uikit/src/checkbox/styles.scss b/uikit/src/checkbox/styles.scss
new file mode 100644
index 000000000..33610f837
--- /dev/null
+++ b/uikit/src/checkbox/styles.scss
@@ -0,0 +1,7 @@
+.checkbox {
+ @apply border-border data-[state=checked]:bg-primary h-5 w-5 flex-shrink-0 rounded-md border data-[state=checked]:text-white;
+
+ &--icon {
+ @apply h-4 w-4;
+ }
+}
diff --git a/uikit/src/index.ts b/uikit/src/index.ts
index 3d5eaa82a..1b0a26bd1 100644
--- a/uikit/src/index.ts
+++ b/uikit/src/index.ts
@@ -12,3 +12,4 @@ export * from './command'
export * from './textarea'
export * from './select'
export * from './slider'
+export * from './checkbox'
diff --git a/uikit/src/main.scss b/uikit/src/main.scss
index 546f22811..c1326ba19 100644
--- a/uikit/src/main.scss
+++ b/uikit/src/main.scss
@@ -16,6 +16,7 @@
@import './textarea/styles.scss';
@import './select/styles.scss';
@import './slider/styles.scss';
+@import './checkbox/styles.scss';
.animate-spin {
animation: spin 1s linear infinite;
diff --git a/web/containers/CardSidebar/index.tsx b/web/containers/CardSidebar/index.tsx
index 552856921..38a8678d9 100644
--- a/web/containers/CardSidebar/index.tsx
+++ b/web/containers/CardSidebar/index.tsx
@@ -19,6 +19,7 @@ import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
interface Props {
children: ReactNode
+ rightAction?: ReactNode
title: string
asChild?: boolean
hideMoreVerticalAction?: boolean
@@ -27,6 +28,7 @@ export default function CardSidebar({
children,
title,
asChild,
+ rightAction,
hideMoreVerticalAction,
}: Props) {
const [show, setShow] = useState(true)
@@ -48,27 +50,16 @@ export default function CardSidebar({
-
{title}
-
- {!asChild && (
- <>
- {!hideMoreVerticalAction && (
-
setMore(!more)}
- >
-
-
- )}
- >
- )}
+
setShow(!show)}
- className="flex w-full flex-1 items-center space-x-2 rounded-lg bg-zinc-100 px-3 py-2 dark:bg-zinc-900"
+ onClick={() => {
+ if (!children) return
+ setShow(!show)
+ }}
+ className="flex w-full flex-1 items-center space-x-2 rounded-lg bg-zinc-100 py-2 pr-2 dark:bg-zinc-900"
>
+ {title}
+
+
+ {rightAction && rightAction}
+ {!asChild && (
+ <>
+ {!hideMoreVerticalAction && (
+
setMore(!more)}
+ >
+
+
+ )}
+ >
+ )}
{more && (
diff --git a/web/containers/Checkbox/index.tsx b/web/containers/Checkbox/index.tsx
index e8f916d98..a545771b6 100644
--- a/web/containers/Checkbox/index.tsx
+++ b/web/containers/Checkbox/index.tsx
@@ -9,54 +9,26 @@ import {
TooltipTrigger,
} from '@janhq/uikit'
-import { useAtomValue, useSetAtom } from 'jotai'
import { InfoIcon } from 'lucide-react'
-import { useActiveModel } from '@/hooks/useActiveModel'
-import useUpdateModelParameters from '@/hooks/useUpdateModelParameters'
-
-import { getConfigurationsData } from '@/utils/componentSettings'
-import { toSettingParams } from '@/utils/modelParam'
-
-import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom'
-import {
- engineParamsUpdateAtom,
- getActiveThreadIdAtom,
- getActiveThreadModelParamsAtom,
-} from '@/helpers/atoms/Thread.atom'
-
type Props = {
name: string
title: string
+ enabled?: boolean
description: string
checked: boolean
+ onValueChanged?: (e: string | number | boolean) => void
}
-const Checkbox: React.FC
= ({ name, title, checked, description }) => {
- const { updateModelParameter } = useUpdateModelParameters()
- const threadId = useAtomValue(getActiveThreadIdAtom)
-
- const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
-
- const modelSettingParams = toSettingParams(activeModelParams)
-
- const engineParams = getConfigurationsData(modelSettingParams)
-
- const setEngineParamsUpdate = useSetAtom(engineParamsUpdateAtom)
-
- const serverEnabled = useAtomValue(serverEnabledAtom)
-
- const { stopModel } = useActiveModel()
-
+const Checkbox: React.FC = ({
+ title,
+ checked,
+ enabled = true,
+ description,
+ onValueChanged,
+}) => {
const onCheckedChange = (checked: boolean) => {
- if (!threadId) return
- if (engineParams.some((x) => x.name.includes(name))) {
- setEngineParamsUpdate(true)
- stopModel()
- } else {
- setEngineParamsUpdate(false)
- }
- updateModelParameter(threadId, name, checked)
+ onValueChanged?.(checked)
}
return (
@@ -80,7 +52,7 @@ const Checkbox: React.FC = ({ name, title, checked, description }) => {
)
diff --git a/web/containers/DropdownListSidebar/index.tsx b/web/containers/DropdownListSidebar/index.tsx
index eb867f54e..140a1aba1 100644
--- a/web/containers/DropdownListSidebar/index.tsx
+++ b/web/containers/DropdownListSidebar/index.tsx
@@ -26,6 +26,8 @@ import { useMainViewState } from '@/hooks/useMainViewState'
import useRecommendedModel from '@/hooks/useRecommendedModel'
+import useUpdateModelParameters from '@/hooks/useUpdateModelParameters'
+
import { toGibibytes } from '@/utils/converter'
import ModelLabel from '../ModelLabel'
@@ -34,10 +36,8 @@ import OpenAiKeyInput from '../OpenAiKeyInput'
import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom'
import {
- ModelParams,
activeThreadAtom,
setThreadModelParamsAtom,
- threadStatesAtom,
} from '@/helpers/atoms/Thread.atom'
export const selectedModelAtom = atom
(undefined)
@@ -49,7 +49,6 @@ const DropdownListSidebar = ({
strictedThread?: boolean
}) => {
const activeThread = useAtomValue(activeThreadAtom)
- const threadStates = useAtomValue(threadStatesAtom)
const [selectedModel, setSelectedModel] = useAtom(selectedModelAtom)
const setThreadModelParams = useSetAtom(setThreadModelParamsAtom)
@@ -58,15 +57,7 @@ const DropdownListSidebar = ({
const { setMainViewState } = useMainViewState()
const [loader, setLoader] = useState(0)
const { recommendedModel, downloadedModels } = useRecommendedModel()
-
- /**
- * Default value for max_tokens and ctx_len
- * Its to avoid OOM issue since a model can set a big number for these settings
- */
- const defaultValue = (value?: number) => {
- if (value && value < 4096) return value
- return 4096
- }
+ const { updateModelParameter } = useUpdateModelParameters()
useEffect(() => {
if (!activeThread) return
@@ -78,31 +69,7 @@ const DropdownListSidebar = ({
model = recommendedModel
}
setSelectedModel(model)
- const finishInit = threadStates[activeThread.id].isFinishInit ?? true
- if (finishInit) return
- const modelParams: ModelParams = {
- ...model?.parameters,
- ...model?.settings,
- /**
- * This is to set default value for these settings instead of maximum value
- * Should only apply when model.json has these settings
- */
- ...(model?.parameters.max_tokens && {
- max_tokens: defaultValue(model?.parameters.max_tokens),
- }),
- ...(model?.settings.ctx_len && {
- ctx_len: defaultValue(model?.settings.ctx_len),
- }),
- }
- setThreadModelParams(activeThread.id, modelParams)
- }, [
- recommendedModel,
- activeThread,
- threadStates,
- downloadedModels,
- setThreadModelParams,
- setSelectedModel,
- ])
+ }, [recommendedModel, activeThread, downloadedModels, setSelectedModel])
// This is fake loader please fix this when we have realtime percentage when load model
useEffect(() => {
@@ -144,7 +111,16 @@ const DropdownListSidebar = ({
...model?.parameters,
...model?.settings,
}
+ // Update model paramter to the thread state
setThreadModelParams(activeThread.id, modelParams)
+
+ // Update model parameter to the thread file
+ if (model)
+ updateModelParameter(activeThread.id, {
+ params: modelParams,
+ modelId: model.id,
+ engine: model.engine,
+ })
}
},
[
@@ -154,6 +130,7 @@ const DropdownListSidebar = ({
setSelectedModel,
setServerEnabled,
setThreadModelParams,
+ updateModelParameter,
]
)
diff --git a/web/containers/Layout/index.tsx b/web/containers/Layout/index.tsx
index 033038bad..77a1fe971 100644
--- a/web/containers/Layout/index.tsx
+++ b/web/containers/Layout/index.tsx
@@ -12,7 +12,8 @@ import TopBar from '@/containers/Layout/TopBar'
import { MainViewState } from '@/constants/screens'
import { useMainViewState } from '@/hooks/useMainViewState'
-import { SUCCESS_SET_NEW_DESTINATION } from '@/hooks/useVaultDirectory'
+
+import { SUCCESS_SET_NEW_DESTINATION } from '@/screens/Settings/Advanced/DataFolder'
const BaseLayout = (props: PropsWithChildren) => {
const { children } = props
@@ -27,7 +28,6 @@ const BaseLayout = (props: PropsWithChildren) => {
useEffect(() => {
if (localStorage.getItem(SUCCESS_SET_NEW_DESTINATION) === 'true') {
setMainViewState(MainViewState.Settings)
- localStorage.removeItem(SUCCESS_SET_NEW_DESTINATION)
}
}, [setMainViewState])
diff --git a/web/containers/Loader/GenerateResponse.tsx b/web/containers/Loader/GenerateResponse.tsx
new file mode 100644
index 000000000..457c44987
--- /dev/null
+++ b/web/containers/Loader/GenerateResponse.tsx
@@ -0,0 +1,39 @@
+import React, { useEffect, useState } from 'react'
+
+export default function GenerateResponse() {
+ const [loader, setLoader] = useState(0)
+
+ // This is fake loader please fix this when we have realtime percentage when load model
+ useEffect(() => {
+ if (loader === 24) {
+ setTimeout(() => {
+ setLoader(loader + 1)
+ }, 250)
+ } else if (loader === 50) {
+ setTimeout(() => {
+ setLoader(loader + 1)
+ }, 250)
+ } else if (loader === 78) {
+ setTimeout(() => {
+ setLoader(loader + 1)
+ }, 250)
+ } else if (loader === 85) {
+ setLoader(85)
+ } else {
+ setLoader(loader + 1)
+ }
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [loader])
+
+ return (
+
+
+
+
Generating response...
+
+
+ )
+}
diff --git a/web/containers/ModelConfigInput/index.tsx b/web/containers/ModelConfigInput/index.tsx
index e409fd424..d573a0bf9 100644
--- a/web/containers/ModelConfigInput/index.tsx
+++ b/web/containers/ModelConfigInput/index.tsx
@@ -7,65 +7,26 @@ import {
TooltipTrigger,
} from '@janhq/uikit'
-import { useAtomValue, useSetAtom } from 'jotai'
-
import { InfoIcon } from 'lucide-react'
-import { useActiveModel } from '@/hooks/useActiveModel'
-import useUpdateModelParameters from '@/hooks/useUpdateModelParameters'
-
-import { getConfigurationsData } from '@/utils/componentSettings'
-
-import { toSettingParams } from '@/utils/modelParam'
-
-import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom'
-import {
- engineParamsUpdateAtom,
- getActiveThreadIdAtom,
- getActiveThreadModelParamsAtom,
-} from '@/helpers/atoms/Thread.atom'
-
type Props = {
title: string
+ enabled?: boolean
name: string
description: string
placeholder: string
value: string
+ onValueChanged?: (e: string | number | boolean) => void
}
const ModelConfigInput: React.FC = ({
title,
- name,
+ enabled = true,
value,
description,
placeholder,
+ onValueChanged,
}) => {
- const { updateModelParameter } = useUpdateModelParameters()
- const threadId = useAtomValue(getActiveThreadIdAtom)
-
- const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
-
- const modelSettingParams = toSettingParams(activeModelParams)
-
- const engineParams = getConfigurationsData(modelSettingParams)
-
- const setEngineParamsUpdate = useSetAtom(engineParamsUpdateAtom)
-
- const { stopModel } = useActiveModel()
-
- const serverEnabled = useAtomValue(serverEnabledAtom)
-
- const onValueChanged = (e: React.ChangeEvent) => {
- if (!threadId) return
- if (engineParams.some((x) => x.name.includes(name))) {
- setEngineParamsUpdate(true)
- stopModel()
- } else {
- setEngineParamsUpdate(false)
- }
- updateModelParameter(threadId, name, e.target.value)
- }
-
return (
@@ -86,9 +47,9 @@ const ModelConfigInput: React.FC
= ({
)
diff --git a/web/containers/Providers/EventHandler.tsx b/web/containers/Providers/EventHandler.tsx
index 1f9d6d7af..ec0fbfc90 100644
--- a/web/containers/Providers/EventHandler.tsx
+++ b/web/containers/Providers/EventHandler.tsx
@@ -1,5 +1,5 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
-import { ReactNode, useEffect, useRef } from 'react'
+import { ReactNode, useCallback, useEffect, useRef } from 'react'
import {
events,
@@ -13,9 +13,15 @@ import {
} from '@janhq/core'
import { useAtomValue, useSetAtom } from 'jotai'
-import { activeModelAtom, stateModelAtom } from '@/hooks/useActiveModel'
+import {
+ activeModelAtom,
+ loadModelErrorAtom,
+ stateModelAtom,
+} from '@/hooks/useActiveModel'
import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels'
+import { queuedMessageAtom } from '@/hooks/useSendChatMessage'
+
import { toaster } from '../Toast'
import { extensionManager } from '@/extension'
@@ -26,6 +32,7 @@ import {
import {
updateThreadWaitingForResponseAtom,
threadsAtom,
+ isGeneratingResponseAtom,
} from '@/helpers/atoms/Thread.atom'
export default function EventHandler({ children }: { children: ReactNode }) {
@@ -34,11 +41,14 @@ export default function EventHandler({ children }: { children: ReactNode }) {
const { downloadedModels } = useGetDownloadedModels()
const setActiveModel = useSetAtom(activeModelAtom)
const setStateModel = useSetAtom(stateModelAtom)
+ const setQueuedMessage = useSetAtom(queuedMessageAtom)
+ const setLoadModelError = useSetAtom(loadModelErrorAtom)
const updateThreadWaiting = useSetAtom(updateThreadWaitingForResponseAtom)
const threads = useAtomValue(threadsAtom)
const modelsRef = useRef(downloadedModels)
const threadsRef = useRef(threads)
+ const setIsGeneratingResponse = useSetAtom(isGeneratingResponseAtom)
useEffect(() => {
threadsRef.current = threads
@@ -48,51 +58,72 @@ export default function EventHandler({ children }: { children: ReactNode }) {
modelsRef.current = downloadedModels
}, [downloadedModels])
- async function handleNewMessageResponse(message: ThreadMessage) {
- addNewMessage(message)
- }
+ const onNewMessageResponse = useCallback(
+ (message: ThreadMessage) => {
+ addNewMessage(message)
+ },
+ [addNewMessage]
+ )
- async function handleModelReady(model: Model) {
- setActiveModel(model)
- toaster({
- title: 'Success!',
- description: `Model ${model.id} has been started.`,
- })
- setStateModel(() => ({
- state: 'stop',
- loading: false,
- model: model.id,
- }))
- }
+ const onModelReady = useCallback(
+ (model: Model) => {
+ setActiveModel(model)
+ toaster({
+ title: 'Success!',
+ description: `Model ${model.id} has been started.`,
+ type: 'success',
+ })
+ setStateModel(() => ({
+ state: 'stop',
+ loading: false,
+ model: model.id,
+ }))
+ },
+ [setActiveModel, setStateModel]
+ )
- async function handleModelStopped() {
- setTimeout(async () => {
+ const onModelStopped = useCallback(() => {
+ setTimeout(() => {
setActiveModel(undefined)
setStateModel({ state: 'start', loading: false, model: '' })
}, 500)
- }
+ }, [setActiveModel, setStateModel])
- async function handleModelFail(res: any) {
- const errorMessage = `${res.error}`
- alert(errorMessage)
- setStateModel(() => ({
- state: 'start',
- loading: false,
- model: res.modelId,
- }))
- }
+ const onModelInitFailed = useCallback(
+ (res: any) => {
+ const errorMessage = `${res.error}`
+ console.error('Failed to load model: ' + errorMessage)
+ setLoadModelError(errorMessage)
+ setStateModel(() => ({
+ state: 'start',
+ loading: false,
+ model: res.modelId,
+ }))
+ setQueuedMessage(false)
+ },
+ [setStateModel, setQueuedMessage, setLoadModelError]
+ )
- async function handleMessageResponseUpdate(message: ThreadMessage) {
- updateMessage(
- message.id,
- message.thread_id,
- message.content,
- message.status
- )
- if (message.status !== MessageStatus.Pending) {
+ const onMessageResponseUpdate = useCallback(
+ (message: ThreadMessage) => {
+ updateMessage(
+ message.id,
+ message.thread_id,
+ message.content,
+ message.status
+ )
+ if (message.status === MessageStatus.Pending) {
+ if (message.content.length) {
+ updateThreadWaiting(message.thread_id, false)
+ setIsGeneratingResponse(false)
+ }
+ return
+ }
// Mark the thread as not waiting for response
updateThreadWaiting(message.thread_id, false)
+ setIsGeneratingResponse(false)
+
const thread = threadsRef.current?.find((e) => e.id == message.thread_id)
if (thread) {
const messageContent = message.content[0]?.text.value ?? ''
@@ -111,26 +142,33 @@ export default function EventHandler({ children }: { children: ReactNode }) {
.get(ExtensionTypeEnum.Conversational)
?.addNewMessage(message)
}
- }
- }
+ },
+ [updateMessage, updateThreadWaiting]
+ )
useEffect(() => {
+ console.log('Registering events')
if (window.core?.events) {
- events.on(MessageEvent.OnMessageResponse, handleNewMessageResponse)
- events.on(MessageEvent.OnMessageUpdate, handleMessageResponseUpdate)
- events.on(ModelEvent.OnModelReady, handleModelReady)
- events.on(ModelEvent.OnModelFail, handleModelFail)
- events.on(ModelEvent.OnModelStopped, handleModelStopped)
+ events.on(MessageEvent.OnMessageResponse, onNewMessageResponse)
+ events.on(MessageEvent.OnMessageUpdate, onMessageResponseUpdate)
+
+ events.on(ModelEvent.OnModelReady, onModelReady)
+ events.on(ModelEvent.OnModelFail, onModelInitFailed)
+ events.on(ModelEvent.OnModelStopped, onModelStopped)
}
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [])
+ }, [
+ onNewMessageResponse,
+ onMessageResponseUpdate,
+ onModelReady,
+ onModelInitFailed,
+ onModelStopped,
+ ])
useEffect(() => {
return () => {
- events.off(MessageEvent.OnMessageResponse, handleNewMessageResponse)
- events.off(MessageEvent.OnMessageUpdate, handleMessageResponseUpdate)
+ events.off(MessageEvent.OnMessageResponse, onNewMessageResponse)
+ events.off(MessageEvent.OnMessageUpdate, onMessageResponseUpdate)
}
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [])
+ }, [onNewMessageResponse, onMessageResponseUpdate])
return <>{children}>
}
diff --git a/web/containers/Providers/EventListener.tsx b/web/containers/Providers/EventListener.tsx
index d91a877d6..62d4cacb6 100644
--- a/web/containers/Providers/EventListener.tsx
+++ b/web/containers/Providers/EventListener.tsx
@@ -105,11 +105,14 @@ export default function EventListenerWrapper({ children }: PropsWithChildren) {
})
}
return () => {}
- }, [])
+ }, [
+ setDownloadState,
+ setDownloadStateCancelled,
+ setDownloadStateFailed,
+ setDownloadStateSuccess,
+ setDownloadedModels,
+ setProgress,
+ ])
- return (
-
- {children}
-
- )
+ return {children}
}
diff --git a/web/containers/Providers/Jotai.tsx b/web/containers/Providers/Jotai.tsx
index 2554ce38d..103f0d9ee 100644
--- a/web/containers/Providers/Jotai.tsx
+++ b/web/containers/Providers/Jotai.tsx
@@ -9,9 +9,17 @@ type Props = {
}
export const currentPromptAtom = atom('')
+export const fileUploadAtom = atom([])
export const appDownloadProgress = atom(-1)
export const searchAtom = atom('')
export default function JotaiWrapper({ children }: Props) {
return {children}
}
+
+export type FileType = 'image' | 'pdf'
+
+export type FileInfo = {
+ file: File
+ type: FileType
+}
diff --git a/web/containers/Providers/index.tsx b/web/containers/Providers/index.tsx
index f9726e43d..895c22177 100644
--- a/web/containers/Providers/index.tsx
+++ b/web/containers/Providers/index.tsx
@@ -6,8 +6,6 @@ import { Toaster } from 'react-hot-toast'
import { TooltipProvider } from '@janhq/uikit'
-import { PostHogProvider } from 'posthog-js/react'
-
import GPUDriverPrompt from '@/containers/GPUDriverPromptModal'
import EventListenerWrapper from '@/containers/Providers/EventListener'
import JotaiWrapper from '@/containers/Providers/Jotai'
@@ -21,7 +19,7 @@ import {
setupBaseExtensions,
} from '@/services/extensionService'
-import { instance } from '@/utils/posthog'
+import Umami from '@/utils/umami'
import KeyListener from './KeyListener'
@@ -70,25 +68,22 @@ const Providers = (props: PropsWithChildren) => {
}, [setupCore])
return (
-
-
-
- {setupCore && activated && (
-
-
-
-
- {children}
-
- {!isMac && }
-
-
-
-
- )}
-
-
-
+
+
+
+ {setupCore && activated && (
+
+
+
+ {children}
+ {!isMac && }
+
+
+
+
+ )}
+
+
)
}
diff --git a/web/containers/Shortcut/index.tsx b/web/containers/Shortcut/index.tsx
index 6153f48a7..dd0518b56 100644
--- a/web/containers/Shortcut/index.tsx
+++ b/web/containers/Shortcut/index.tsx
@@ -1,6 +1,6 @@
export default function ShortCut(props: { menu: string }) {
const { menu } = props
- const symbol = isMac ? '⌘' : 'Ctrl'
+ const symbol = isMac ? '⌘' : 'Ctrl + '
return (
diff --git a/web/containers/SliderRightPanel/index.tsx b/web/containers/SliderRightPanel/index.tsx
index d9ed00f83..7c017e70f 100644
--- a/web/containers/SliderRightPanel/index.tsx
+++ b/web/containers/SliderRightPanel/index.tsx
@@ -9,74 +9,36 @@ import {
TooltipPortal,
TooltipTrigger,
} from '@janhq/uikit'
-import { useAtomValue, useSetAtom } from 'jotai'
import { InfoIcon } from 'lucide-react'
-import { useActiveModel } from '@/hooks/useActiveModel'
import { useClickOutside } from '@/hooks/useClickOutside'
-import useUpdateModelParameters from '@/hooks/useUpdateModelParameters'
-
-import { getConfigurationsData } from '@/utils/componentSettings'
-import { toSettingParams } from '@/utils/modelParam'
-
-import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom'
-import {
- engineParamsUpdateAtom,
- getActiveThreadIdAtom,
- getActiveThreadModelParamsAtom,
-} from '@/helpers/atoms/Thread.atom'
-
type Props = {
name: string
title: string
+ enabled: boolean
description: string
min: number
max: number
step: number
value: number
+ onValueChanged: (e: string | number | boolean) => void
}
const SliderRightPanel: React.FC
= ({
- name,
title,
+ enabled,
min,
max,
step,
description,
value,
+ onValueChanged,
}) => {
- const { updateModelParameter } = useUpdateModelParameters()
- const threadId = useAtomValue(getActiveThreadIdAtom)
-
- const serverEnabled = useAtomValue(serverEnabledAtom)
-
- const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
-
- const modelSettingParams = toSettingParams(activeModelParams)
-
- const engineParams = getConfigurationsData(modelSettingParams)
-
- const setEngineParamsUpdate = useSetAtom(engineParamsUpdateAtom)
-
- const { stopModel } = useActiveModel()
-
const [showTooltip, setShowTooltip] = useState({ max: false, min: false })
useClickOutside(() => setShowTooltip({ max: false, min: false }), null, [])
-
- const onValueChanged = (e: number[]) => {
- if (!threadId) return
- if (engineParams.some((x) => x.name.includes(name))) {
- setEngineParamsUpdate(true)
- stopModel()
- } else {
- setEngineParamsUpdate(false)
- }
- updateModelParameter(threadId, name, e[0])
- }
-
return (
@@ -99,11 +61,11 @@ const SliderRightPanel: React.FC
= ({
onValueChanged?.(e[0])}
min={min}
max={max}
step={step}
- disabled={serverEnabled}
+ disabled={!enabled}
/>
{min}
@@ -118,18 +80,18 @@ const SliderRightPanel: React.FC
= ({
min={min}
max={max}
value={String(value)}
- disabled={serverEnabled}
+ disabled={!enabled}
onBlur={(e) => {
if (Number(e.target.value) > Number(max)) {
- onValueChanged([Number(max)])
+ onValueChanged?.(Number(max))
setShowTooltip({ max: true, min: false })
} else if (Number(e.target.value) < Number(min)) {
- onValueChanged([Number(min)])
+ onValueChanged?.(Number(min))
setShowTooltip({ max: false, min: true })
}
}}
onChange={(e) => {
- onValueChanged([Number(e.target.value)])
+ onValueChanged?.(Number(e.target.value))
}}
/>
diff --git a/web/containers/Toast/index.tsx b/web/containers/Toast/index.tsx
index c5e5f03da..7cffa89b9 100644
--- a/web/containers/Toast/index.tsx
+++ b/web/containers/Toast/index.tsx
@@ -6,7 +6,99 @@ import { twMerge } from 'tailwind-merge'
type Props = {
title?: string
description?: string
- type?: 'default' | 'error' | 'success'
+ type?: 'default' | 'error' | 'success' | 'warning'
+}
+
+const ErrorIcon = () => {
+ return (
+
+
+
+ )
+}
+
+const WarningIcon = () => {
+ return (
+
+
+
+ )
+}
+
+const SuccessIcon = () => {
+ return (
+
+
+
+ )
+}
+
+const DefaultIcon = () => {
+ return (
+
+
+
+ )
+}
+
+const renderIcon = (type: string) => {
+ switch (type) {
+ case 'warning':
+ return
+
+ case 'error':
+ return
+
+ case 'success':
+ return
+
+ default:
+ return
+ }
}
export function toaster(props: Props) {
@@ -16,37 +108,52 @@ export function toaster(props: Props) {
return (
-
-
- {title}
-
-
- {description}
-
+
+
{renderIcon(type)}
+
+
{title}
+
{description}
+
+
toast.dismiss(t.id)}
+ />
-
toast.dismiss(t.id)}
- />
)
},
- { id: 'toast', duration: 3000 }
+ { id: 'toast', duration: 2000, position: 'top-right' }
+ )
+}
+
+export function snackbar(props: Props) {
+ const { description, type = 'default' } = props
+ return toast.custom(
+ (t) => {
+ return (
+
+
+
{renderIcon(type)}
+
{description}
+
toast.dismiss(t.id)}
+ />
+
+
+ )
+ },
+ { id: 'snackbar', duration: 2000, position: 'bottom-center' }
)
}
diff --git a/web/helpers/atoms/SystemBar.atom.ts b/web/helpers/atoms/SystemBar.atom.ts
index aa5e77d58..42ef7b29f 100644
--- a/web/helpers/atoms/SystemBar.atom.ts
+++ b/web/helpers/atoms/SystemBar.atom.ts
@@ -2,5 +2,6 @@ import { atom } from 'jotai'
export const totalRamAtom = atom
(0)
export const usedRamAtom = atom(0)
+export const availableRamAtom = atom(0)
export const cpuUsageAtom = atom(0)
diff --git a/web/helpers/atoms/Thread.atom.ts b/web/helpers/atoms/Thread.atom.ts
index 26b1e9c59..cab286bd1 100644
--- a/web/helpers/atoms/Thread.atom.ts
+++ b/web/helpers/atoms/Thread.atom.ts
@@ -23,6 +23,7 @@ export const setActiveThreadIdAtom = atom(
export const waitingToSendMessage = atom(undefined)
+export const isGeneratingResponseAtom = atom(undefined)
/**
* Stores all thread states for the current user
*/
@@ -46,18 +47,6 @@ export const deleteThreadStateAtom = atom(
}
)
-export const updateThreadInitSuccessAtom = atom(
- null,
- (get, set, threadId: string) => {
- const currentState = { ...get(threadStatesAtom) }
- currentState[threadId] = {
- ...currentState[threadId],
- isFinishInit: true,
- }
- set(threadStatesAtom, currentState)
- }
-)
-
export const updateThreadWaitingForResponseAtom = atom(
null,
(get, set, threadId: string, waitingForResponse: boolean) => {
diff --git a/web/hooks/useActiveModel.ts b/web/hooks/useActiveModel.ts
index 336f0be21..54a1fdbe0 100644
--- a/web/hooks/useActiveModel.ts
+++ b/web/hooks/useActiveModel.ts
@@ -1,5 +1,5 @@
import { events, Model, ModelEvent } from '@janhq/core'
-import { atom, useAtom, useAtomValue } from 'jotai'
+import { atom, useAtom, useAtomValue, useSetAtom } from 'jotai'
import { toaster } from '@/containers/Toast'
@@ -9,6 +9,7 @@ import { LAST_USED_MODEL_ID } from './useRecommendedModel'
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
export const activeModelAtom = atom(undefined)
+export const loadModelErrorAtom = atom(undefined)
export const stateModelAtom = atom({
state: 'start',
@@ -21,6 +22,7 @@ export function useActiveModel() {
const activeThread = useAtomValue(activeThreadAtom)
const [stateModel, setStateModel] = useAtom(stateModelAtom)
const { downloadedModels } = useGetDownloadedModels()
+ const setLoadModelError = useSetAtom(loadModelErrorAtom)
const startModel = async (modelId: string) => {
if (
@@ -31,6 +33,7 @@ export function useActiveModel() {
return
}
// TODO: incase we have multiple assistants, the configuration will be from assistant
+ setLoadModelError(undefined)
setActiveModel(undefined)
@@ -42,6 +45,7 @@ export function useActiveModel() {
toaster({
title: `Model ${modelId} not found!`,
description: `Please download the model first.`,
+ type: 'warning',
})
setStateModel(() => ({
state: 'start',
diff --git a/web/hooks/useCreateNewThread.ts b/web/hooks/useCreateNewThread.ts
index e374d0cc5..ee8df22df 100644
--- a/web/hooks/useCreateNewThread.ts
+++ b/web/hooks/useCreateNewThread.ts
@@ -7,19 +7,23 @@ import {
ThreadState,
Model,
} from '@janhq/core'
-import { atom, useAtomValue, useSetAtom } from 'jotai'
+import { atom, useSetAtom } from 'jotai'
+
+import { selectedModelAtom } from '@/containers/DropdownListSidebar'
+import { fileUploadAtom } from '@/containers/Providers/Jotai'
import { generateThreadId } from '@/utils/thread'
-import useDeleteThread from './useDeleteThread'
+import useRecommendedModel from './useRecommendedModel'
+
+import useSetActiveThread from './useSetActiveThread'
import { extensionManager } from '@/extension'
import {
threadsAtom,
- setActiveThreadIdAtom,
threadStatesAtom,
updateThreadAtom,
- updateThreadInitSuccessAtom,
+ setThreadModelParamsAtom,
} from '@/helpers/atoms/Thread.atom'
const createNewThreadAtom = atom(null, (get, set, newThread: Thread) => {
@@ -30,7 +34,6 @@ const createNewThreadAtom = atom(null, (get, set, newThread: Thread) => {
hasMore: false,
waitingForResponse: false,
lastMessage: undefined,
- isFinishInit: false,
}
currentState[newThread.id] = threadState
set(threadStatesAtom, currentState)
@@ -41,45 +44,35 @@ const createNewThreadAtom = atom(null, (get, set, newThread: Thread) => {
})
export const useCreateNewThread = () => {
- const threadStates = useAtomValue(threadStatesAtom)
- const updateThreadFinishInit = useSetAtom(updateThreadInitSuccessAtom)
const createNewThread = useSetAtom(createNewThreadAtom)
- const setActiveThreadId = useSetAtom(setActiveThreadIdAtom)
+ const { setActiveThread } = useSetActiveThread()
const updateThread = useSetAtom(updateThreadAtom)
+ const setFileUpload = useSetAtom(fileUploadAtom)
+ const setSelectedModel = useSetAtom(selectedModelAtom)
+ const setThreadModelParams = useSetAtom(setThreadModelParamsAtom)
- const { deleteThread } = useDeleteThread()
+ const { recommendedModel, downloadedModels } = useRecommendedModel()
const requestCreateNewThread = async (
assistant: Assistant,
model?: Model | undefined
) => {
- // loop through threads state and filter if there's any thread that is not finish init
- let unfinishedInitThreadId: string | undefined = undefined
- for (const key in threadStates) {
- const isFinishInit = threadStates[key].isFinishInit ?? true
- if (!isFinishInit) {
- unfinishedInitThreadId = key
- break
- }
- }
+ const defaultModel = model ?? recommendedModel ?? downloadedModels[0]
- if (unfinishedInitThreadId) {
- await deleteThread(unfinishedInitThreadId)
- }
-
- const modelId = model ? model.id : '*'
const createdAt = Date.now()
const assistantInfo: ThreadAssistantInfo = {
assistant_id: assistant.id,
assistant_name: assistant.name,
+ tools: assistant.tools,
model: {
- id: modelId,
- settings: {},
- parameters: {},
- engine: undefined,
+ id: defaultModel?.id ?? '*',
+ settings: defaultModel?.settings ?? {},
+ parameters: defaultModel?.parameters ?? {},
+ engine: defaultModel?.engine,
},
instructions: assistant.instructions,
}
+
const threadId = generateThreadId(assistant.id)
const thread: Thread = {
id: threadId,
@@ -91,19 +84,27 @@ export const useCreateNewThread = () => {
}
// add the new thread on top of the thread list to the state
+ //TODO: Why do we have thread list then thread states? Should combine them
createNewThread(thread)
- setActiveThreadId(thread.id)
+
+ setSelectedModel(defaultModel)
+ setThreadModelParams(thread.id, {
+ ...defaultModel?.settings,
+ ...defaultModel?.parameters,
+ })
+
+ // Delete the file upload state
+ setFileUpload([])
+ // Update thread metadata
+ await updateThreadMetadata(thread)
+
+ setActiveThread(thread)
}
- function updateThreadMetadata(thread: Thread) {
+ async function updateThreadMetadata(thread: Thread) {
updateThread(thread)
- const threadState = threadStates[thread.id]
- const isFinishInit = threadState?.isFinishInit ?? true
- if (!isFinishInit) {
- updateThreadFinishInit(thread.id)
- }
- extensionManager
+ await extensionManager
.get(ExtensionTypeEnum.Conversational)
?.saveThread(thread)
}
diff --git a/web/hooks/useDeleteModel.ts b/web/hooks/useDeleteModel.ts
index cd7292997..fa0cfb45e 100644
--- a/web/hooks/useDeleteModel.ts
+++ b/web/hooks/useDeleteModel.ts
@@ -19,6 +19,7 @@ export default function useDeleteModel() {
toaster({
title: 'Model Deletion Successful',
description: `The model ${model.id} has been successfully deleted.`,
+ type: 'success',
})
}
diff --git a/web/hooks/useDeleteThread.ts b/web/hooks/useDeleteThread.ts
index 00ba98b99..87cee125d 100644
--- a/web/hooks/useDeleteThread.ts
+++ b/web/hooks/useDeleteThread.ts
@@ -21,7 +21,6 @@ import {
threadsAtom,
setActiveThreadIdAtom,
deleteThreadStateAtom,
- threadStatesAtom,
updateThreadStateLastMessageAtom,
} from '@/helpers/atoms/Thread.atom'
@@ -34,7 +33,6 @@ export default function useDeleteThread() {
const deleteMessages = useSetAtom(deleteChatMessagesAtom)
const cleanMessages = useSetAtom(cleanChatMessagesAtom)
const deleteThreadState = useSetAtom(deleteThreadStateAtom)
- const threadStates = useAtomValue(threadStatesAtom)
const updateThreadLastMessage = useSetAtom(updateThreadStateLastMessageAtom)
const cleanThread = async (threadId: string) => {
@@ -74,21 +72,16 @@ export default function useDeleteThread() {
const availableThreads = threads.filter((c) => c.id !== threadId)
setThreads(availableThreads)
- const deletingThreadState = threadStates[threadId]
- const isFinishInit = deletingThreadState?.isFinishInit ?? true
-
// delete the thread state
deleteThreadState(threadId)
- if (isFinishInit) {
- deleteMessages(threadId)
- setCurrentPrompt('')
- toaster({
- title: 'Thread successfully deleted.',
- description: `Thread ${threadId} has been successfully deleted.`,
- })
- }
-
+ deleteMessages(threadId)
+ setCurrentPrompt('')
+ toaster({
+ title: 'Thread successfully deleted.',
+ description: `Thread ${threadId} has been successfully deleted.`,
+ type: 'success',
+ })
if (availableThreads.length > 0) {
setActiveThreadId(availableThreads[0].id)
} else {
diff --git a/web/hooks/useDownloadModel.ts b/web/hooks/useDownloadModel.ts
index 49dbe1d5b..528108d18 100644
--- a/web/hooks/useDownloadModel.ts
+++ b/web/hooks/useDownloadModel.ts
@@ -6,6 +6,7 @@ import {
ModelExtension,
abortDownload,
joinPath,
+ ModelArtifact,
} from '@janhq/core'
import { useSetAtom } from 'jotai'
@@ -25,6 +26,23 @@ export default function useDownloadModel() {
const addNewDownloadingModel = useSetAtom(addNewDownloadingModelAtom)
const downloadModel = async (model: Model) => {
+ const childrenDownloadProgress: DownloadState[] = []
+ model.sources.forEach((source: ModelArtifact) => {
+ childrenDownloadProgress.push({
+ modelId: source.filename,
+ time: {
+ elapsed: 0,
+ remaining: 0,
+ },
+ speed: 0,
+ percent: 0,
+ size: {
+ total: 0,
+ transferred: 0,
+ },
+ })
+ })
+
// set an initial download state
setDownloadState({
modelId: model.id,
@@ -38,6 +56,7 @@ export default function useDownloadModel() {
total: 0,
transferred: 0,
},
+ children: childrenDownloadProgress,
})
addNewDownloadingModel(model)
@@ -46,6 +65,7 @@ export default function useDownloadModel() {
.get(ExtensionTypeEnum.Model)
?.downloadModel(model, { ignoreSSL, proxy })
}
+
const abortModelDownload = async (model: Model) => {
await abortDownload(
await joinPath(['models', model.id, modelBinFileName(model)])
diff --git a/web/hooks/useDownloadState.ts b/web/hooks/useDownloadState.ts
index d39ab5e58..37f41d2a1 100644
--- a/web/hooks/useDownloadState.ts
+++ b/web/hooks/useDownloadState.ts
@@ -26,6 +26,7 @@ const setDownloadStateSuccessAtom = atom(null, (get, set, modelId: string) => {
toaster({
title: 'Download Completed',
description: `Download ${modelId} completed`,
+ type: 'success',
})
})
@@ -61,6 +62,7 @@ const setDownloadStateCancelledAtom = atom(
toaster({
title: 'Cancel Download',
description: `Model ${modelId} cancel download`,
+ type: 'warning',
})
return
diff --git a/web/hooks/useFactoryReset.ts b/web/hooks/useFactoryReset.ts
new file mode 100644
index 000000000..56994d4c4
--- /dev/null
+++ b/web/hooks/useFactoryReset.ts
@@ -0,0 +1,59 @@
+import { useEffect, useState } from 'react'
+
+import { fs, AppConfiguration, joinPath, getUserHomePath } from '@janhq/core'
+
+export default function useFactoryReset() {
+ const [defaultJanDataFolder, setdefaultJanDataFolder] = useState('')
+
+ useEffect(() => {
+ async function getDefaultJanDataFolder() {
+ const homePath = await getUserHomePath()
+ const defaultJanDataFolder = await joinPath([homePath, 'jan'])
+ setdefaultJanDataFolder(defaultJanDataFolder)
+ }
+ getDefaultJanDataFolder()
+ }, [])
+
+ const resetAll = async (keepCurrentFolder?: boolean) => {
+ // read the place of jan data folder
+ const appConfiguration: AppConfiguration | undefined =
+ await window.core?.api?.getAppConfigurations()
+
+ if (!appConfiguration) {
+ console.debug('Failed to get app configuration')
+ }
+
+ console.debug('appConfiguration: ', appConfiguration)
+ const janDataFolderPath = appConfiguration!.data_folder
+
+ if (defaultJanDataFolder === janDataFolderPath) {
+ console.debug('Jan data folder is already at user home')
+ } else {
+ // if jan data folder is not at user home, we update the app configuration to point to user home
+ if (!keepCurrentFolder) {
+ const configuration: AppConfiguration = {
+ data_folder: defaultJanDataFolder,
+ }
+ await window.core?.api?.updateAppConfiguration(configuration)
+ }
+ }
+
+ const modelPath = await joinPath([janDataFolderPath, 'models'])
+ const threadPath = await joinPath([janDataFolderPath, 'threads'])
+
+ console.debug(`Removing models at ${modelPath}`)
+ await fs.rmdirSync(modelPath, { recursive: true })
+
+ console.debug(`Removing threads at ${threadPath}`)
+ await fs.rmdirSync(threadPath, { recursive: true })
+
+ // reset the localStorage
+ localStorage.clear()
+ await window.core?.api?.relaunch()
+ }
+
+ return {
+ defaultJanDataFolder,
+ resetAll,
+ }
+}
diff --git a/web/hooks/useGetConfiguredModels.ts b/web/hooks/useGetConfiguredModels.ts
index 5662d534e..8be052ae2 100644
--- a/web/hooks/useGetConfiguredModels.ts
+++ b/web/hooks/useGetConfiguredModels.ts
@@ -1,7 +1,6 @@
-import { useEffect, useState } from 'react'
+import { useCallback, useEffect, useState } from 'react'
-import { ExtensionTypeEnum, ModelExtension } from '@janhq/core'
-import { Model } from '@janhq/core'
+import { ExtensionTypeEnum, ModelExtension, Model } from '@janhq/core'
import { extensionManager } from '@/extension/ExtensionManager'
@@ -9,23 +8,23 @@ export function useGetConfiguredModels() {
const [loading, setLoading] = useState(false)
const [models, setModels] = useState([])
- const getConfiguredModels = async (): Promise => {
- const models = await extensionManager
- .get(ExtensionTypeEnum.Model)
- ?.getConfiguredModels()
- return models ?? []
- }
-
- async function fetchModels() {
+ const fetchModels = useCallback(async () => {
setLoading(true)
const models = await getConfiguredModels()
setLoading(false)
setModels(models)
- }
+ }, [])
useEffect(() => {
fetchModels()
- }, [])
+ }, [fetchModels])
return { loading, models }
}
+
+const getConfiguredModels = async (): Promise => {
+ const models = await extensionManager
+ .get(ExtensionTypeEnum.Model)
+ ?.getConfiguredModels()
+ return models ?? []
+}
diff --git a/web/hooks/useGetDownloadedModels.ts b/web/hooks/useGetDownloadedModels.ts
index f509fce63..bba420858 100644
--- a/web/hooks/useGetDownloadedModels.ts
+++ b/web/hooks/useGetDownloadedModels.ts
@@ -6,7 +6,7 @@ import { atom, useAtom } from 'jotai'
import { extensionManager } from '@/extension/ExtensionManager'
-const downloadedModelsAtom = atom([])
+export const downloadedModelsAtom = atom([])
export function useGetDownloadedModels() {
const [downloadedModels, setDownloadedModels] = useAtom(downloadedModelsAtom)
@@ -15,7 +15,8 @@ export function useGetDownloadedModels() {
getDownloadedModels().then((downloadedModels) => {
setDownloadedModels(downloadedModels)
})
- }, [setDownloadedModels])
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [])
return { downloadedModels, setDownloadedModels }
}
diff --git a/web/hooks/useGetSystemResources.ts b/web/hooks/useGetSystemResources.ts
index 8dffa8eb4..de595ad7b 100644
--- a/web/hooks/useGetSystemResources.ts
+++ b/web/hooks/useGetSystemResources.ts
@@ -6,6 +6,7 @@ import { useSetAtom } from 'jotai'
import { extensionManager } from '@/extension/ExtensionManager'
import {
+ availableRamAtom,
cpuUsageAtom,
totalRamAtom,
usedRamAtom,
@@ -16,6 +17,7 @@ export default function useGetSystemResources() {
const [cpu, setCPU] = useState(0)
const setTotalRam = useSetAtom(totalRamAtom)
const setUsedRam = useSetAtom(usedRamAtom)
+ const setAvailableRam = useSetAtom(availableRamAtom)
const setCpuUsage = useSetAtom(cpuUsageAtom)
const getSystemResources = async () => {
@@ -40,6 +42,10 @@ export default function useGetSystemResources() {
setTotalRam(resourceInfor.mem.totalMemory)
setRam(Math.round(ram * 100))
+ if (resourceInfor.mem.totalMemory && resourceInfor.mem.usedMemory)
+ setAvailableRam(
+ resourceInfor.mem.totalMemory - resourceInfor.mem.usedMemory
+ )
setCPU(Math.round(currentLoadInfor?.cpu?.usage ?? 0))
setCpuUsage(Math.round(currentLoadInfor?.cpu?.usage ?? 0))
}
diff --git a/web/hooks/usePath.ts b/web/hooks/usePath.ts
index 70dbfa6bb..aea25bef1 100644
--- a/web/hooks/usePath.ts
+++ b/web/hooks/usePath.ts
@@ -3,23 +3,15 @@ import { useAtomValue } from 'jotai'
import { selectedModelAtom } from '@/containers/DropdownListSidebar'
-import { activeThreadAtom, threadStatesAtom } from '@/helpers/atoms/Thread.atom'
+import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
export const usePath = () => {
const activeThread = useAtomValue(activeThreadAtom)
- const threadStates = useAtomValue(threadStatesAtom)
const selectedModel = useAtomValue(selectedModelAtom)
const onReviewInFinder = async (type: string) => {
// TODO: this logic should be refactored.
- if (type !== 'Model') {
- if (!activeThread) return
- const activeThreadState = threadStates[activeThread.id]
- if (!activeThreadState.isFinishInit) {
- alert('Thread is not started yet')
- return
- }
- }
+ if (type !== 'Model' && !activeThread) return
const userSpace = await getJanDataFolderPath()
let filePath = undefined
@@ -48,14 +40,7 @@ export const usePath = () => {
const onViewJson = async (type: string) => {
// TODO: this logic should be refactored.
- if (type !== 'Model') {
- if (!activeThread) return
- const activeThreadState = threadStates[activeThread.id]
- if (!activeThreadState.isFinishInit) {
- alert('Thread is not started yet')
- return
- }
- }
+ if (type !== 'Model' && !activeThread) return
const userSpace = await getJanDataFolderPath()
let filePath = undefined
@@ -86,8 +71,32 @@ export const usePath = () => {
openFileExplorer(fullPath)
}
+ const onViewFile = async (id: string) => {
+ if (!activeThread) return
+
+ const userSpace = await getJanDataFolderPath()
+ let filePath = undefined
+ filePath = await joinPath(['threads', `${activeThread.id}/files`, `${id}`])
+ if (!filePath) return
+ const fullPath = await joinPath([userSpace, filePath])
+ openFileExplorer(fullPath)
+ }
+
+ const onViewFileContainer = async () => {
+ if (!activeThread) return
+
+ const userSpace = await getJanDataFolderPath()
+ let filePath = undefined
+ filePath = await joinPath(['threads', `${activeThread.id}/files`])
+ if (!filePath) return
+ const fullPath = await joinPath([userSpace, filePath])
+ openFileExplorer(fullPath)
+ }
+
return {
onReviewInFinder,
onViewJson,
+ onViewFile,
+ onViewFileContainer,
}
}
diff --git a/web/hooks/useRecommendedModel.ts b/web/hooks/useRecommendedModel.ts
index cc47d3fe6..427d2bf73 100644
--- a/web/hooks/useRecommendedModel.ts
+++ b/web/hooks/useRecommendedModel.ts
@@ -26,7 +26,6 @@ export default function useRecommendedModel() {
const activeModel = useAtomValue(activeModelAtom)
const [downloadedModels, setDownloadedModels] = useState([])
const [recommendedModel, setRecommendedModel] = useState()
- const threadStates = useAtomValue(threadStatesAtom)
const activeThread = useAtomValue(activeThreadAtom)
const getAndSortDownloadedModels = useCallback(async (): Promise => {
@@ -44,27 +43,11 @@ export default function useRecommendedModel() {
> => {
const models = await getAndSortDownloadedModels()
if (!activeThread) return
+ const modelId = activeThread.assistants[0]?.model.id
+ const model = models.find((model) => model.id === modelId)
- const finishInit = threadStates[activeThread.id].isFinishInit ?? true
- if (finishInit) {
- const modelId = activeThread.assistants[0]?.model.id
- const model = models.find((model) => model.id === modelId)
-
- if (model) {
- setRecommendedModel(model)
- }
-
- return
- } else {
- const modelId = activeThread.assistants[0]?.model.id
- if (modelId !== '*') {
- const model = models.find((model) => model.id === modelId)
-
- if (model) {
- setRecommendedModel(model)
- }
- return
- }
+ if (model) {
+ setRecommendedModel(model)
}
if (activeModel) {
@@ -108,6 +91,7 @@ export default function useRecommendedModel() {
console.debug(`Using last used model ${lastUsedModel.id}`)
setRecommendedModel(lastUsedModel)
+ // eslint-disable-next-line react-hooks/exhaustive-deps
}, [getAndSortDownloadedModels, activeThread])
useEffect(() => {
diff --git a/web/hooks/useSendChatMessage.ts b/web/hooks/useSendChatMessage.ts
index bf9740489..7d89764db 100644
--- a/web/hooks/useSendChatMessage.ts
+++ b/web/hooks/useSendChatMessage.ts
@@ -1,4 +1,5 @@
-import { useEffect, useRef, useState } from 'react'
+/* eslint-disable @typescript-eslint/no-explicit-any */
+import { useEffect, useRef } from 'react'
import {
ChatCompletionMessage,
@@ -13,19 +14,21 @@ import {
Model,
ConversationalExtension,
MessageEvent,
+ InferenceEngine,
+ ChatCompletionMessageContentType,
+ AssistantTool,
} from '@janhq/core'
-import { useAtom, useAtomValue, useSetAtom } from 'jotai'
+import { atom, useAtom, useAtomValue, useSetAtom } from 'jotai'
import { ulid } from 'ulid'
import { selectedModelAtom } from '@/containers/DropdownListSidebar'
-import { currentPromptAtom } from '@/containers/Providers/Jotai'
-
-import { toaster } from '@/containers/Toast'
+import { currentPromptAtom, fileUploadAtom } from '@/containers/Providers/Jotai'
+import { getBase64 } from '@/utils/base64'
import { toRuntimeParams, toSettingParams } from '@/utils/modelParam'
-import { useActiveModel } from './useActiveModel'
+import { loadModelErrorAtom, useActiveModel } from './useActiveModel'
import { extensionManager } from '@/extension/ExtensionManager'
import {
@@ -36,47 +39,53 @@ import {
activeThreadAtom,
engineParamsUpdateAtom,
getActiveThreadModelParamsAtom,
- threadStatesAtom,
+ isGeneratingResponseAtom,
updateThreadAtom,
- updateThreadInitSuccessAtom,
updateThreadWaitingForResponseAtom,
} from '@/helpers/atoms/Thread.atom'
+export const queuedMessageAtom = atom(false)
+export const reloadModelAtom = atom(false)
+
export default function useSendChatMessage() {
const activeThread = useAtomValue(activeThreadAtom)
const addNewMessage = useSetAtom(addNewMessageAtom)
const updateThread = useSetAtom(updateThreadAtom)
const updateThreadWaiting = useSetAtom(updateThreadWaitingForResponseAtom)
- const [currentPrompt, setCurrentPrompt] = useAtom(currentPromptAtom)
+ const setCurrentPrompt = useSetAtom(currentPromptAtom)
const currentMessages = useAtomValue(getCurrentChatMessagesAtom)
const { activeModel } = useActiveModel()
const selectedModel = useAtomValue(selectedModelAtom)
const { startModel } = useActiveModel()
- const [queuedMessage, setQueuedMessage] = useState(false)
+ const setQueuedMessage = useSetAtom(queuedMessageAtom)
+ const loadModelFailed = useAtomValue(loadModelErrorAtom)
const modelRef = useRef()
- const threadStates = useAtomValue(threadStatesAtom)
- const updateThreadInitSuccess = useSetAtom(updateThreadInitSuccessAtom)
+ const loadModelFailedRef = useRef()
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
-
const engineParamsUpdate = useAtomValue(engineParamsUpdateAtom)
- const setEngineParamsUpdate = useSetAtom(engineParamsUpdateAtom)
- const [reloadModel, setReloadModel] = useState(false)
+ const setEngineParamsUpdate = useSetAtom(engineParamsUpdateAtom)
+ const setReloadModel = useSetAtom(reloadModelAtom)
+ const [fileUpload, setFileUpload] = useAtom(fileUploadAtom)
+ const setIsGeneratingResponse = useSetAtom(isGeneratingResponseAtom)
useEffect(() => {
modelRef.current = activeModel
}, [activeModel])
+ useEffect(() => {
+ loadModelFailedRef.current = loadModelFailed
+ }, [loadModelFailed])
+
const resendChatMessage = async (currentMessage: ThreadMessage) => {
if (!activeThread) {
console.error('No active thread')
return
}
-
+ setIsGeneratingResponse(true)
updateThreadWaiting(activeThread.id, true)
-
const messages: ChatCompletionMessage[] = [
activeThread.assistants[0]?.instructions,
]
@@ -113,83 +122,36 @@ export default function useSendChatMessage() {
if (activeModel?.id !== modelId) {
setQueuedMessage(true)
startModel(modelId)
- await WaitForModelStarting(modelId)
+ await waitForModelStarting(modelId)
setQueuedMessage(false)
}
events.emit(MessageEvent.OnMessageSent, messageRequest)
}
- // TODO: Refactor @louis
- const WaitForModelStarting = async (modelId: string) => {
- return new Promise((resolve) => {
- setTimeout(async () => {
- if (modelRef.current?.id !== modelId) {
- console.debug('waiting for model to start')
- await WaitForModelStarting(modelId)
- resolve()
- } else {
- resolve()
- }
- }, 200)
- })
- }
-
- const sendChatMessage = async () => {
- if (!currentPrompt || currentPrompt.trim().length === 0) return
+ const sendChatMessage = async (message: string) => {
+ if (!message || message.trim().length === 0) return
if (!activeThread) {
console.error('No active thread')
return
}
+ setIsGeneratingResponse(true)
if (engineParamsUpdate) setReloadModel(true)
- const activeThreadState = threadStates[activeThread.id]
const runtimeParams = toRuntimeParams(activeModelParams)
const settingParams = toSettingParams(activeModelParams)
- // if the thread is not initialized, we need to initialize it first
- if (
- !activeThreadState.isFinishInit ||
- activeThread.assistants[0].model.id !== selectedModel?.id
- ) {
- if (!selectedModel) {
- toaster({ title: 'Please select a model' })
- return
- }
- const assistantId = activeThread.assistants[0].assistant_id ?? ''
- const assistantName = activeThread.assistants[0].assistant_name ?? ''
- const instructions = activeThread.assistants[0].instructions ?? ''
-
- const updatedThread: Thread = {
- ...activeThread,
- assistants: [
- {
- assistant_id: assistantId,
- assistant_name: assistantName,
- instructions: instructions,
- model: {
- id: selectedModel.id,
- settings: settingParams,
- parameters: runtimeParams,
- engine: selectedModel.engine,
- },
- },
- ],
- }
- updateThreadInitSuccess(activeThread.id)
- updateThread(updatedThread)
-
- await extensionManager
- .get(ExtensionTypeEnum.Conversational)
- ?.saveThread(updatedThread)
- }
-
updateThreadWaiting(activeThread.id, true)
-
- const prompt = currentPrompt.trim()
+ const prompt = message.trim()
setCurrentPrompt('')
+ const base64Blob = fileUpload[0]
+ ? await getBase64(fileUpload[0].file).then()
+ : undefined
+
+ const msgId = ulid()
+
const messages: ChatCompletionMessage[] = [
activeThread.assistants[0]?.instructions,
]
@@ -210,16 +172,41 @@ export default function useSendChatMessage() {
.concat([
{
role: ChatCompletionRole.User,
- content: prompt,
+ content:
+ selectedModel && base64Blob
+ ? [
+ {
+ type: ChatCompletionMessageContentType.Text,
+ text: prompt,
+ },
+ {
+ type: ChatCompletionMessageContentType.Doc,
+ doc_url: {
+ url: `threads/${activeThread.id}/files/${msgId}.pdf`,
+ },
+ },
+ ]
+ : prompt,
} as ChatCompletionMessage,
])
)
- const msgId = ulid()
- const modelRequest = selectedModel ?? activeThread.assistants[0].model
+ let modelRequest = selectedModel ?? activeThread.assistants[0].model
if (runtimeParams.stream == null) {
runtimeParams.stream = true
}
+ // Add middleware to the model request with tool retrieval enabled
+ if (
+ activeThread.assistants[0].tools?.some(
+ (tool: AssistantTool) => tool.type === 'retrieval' && tool.enabled
+ )
+ ) {
+ modelRequest = {
+ ...modelRequest,
+ engine: InferenceEngine.tool_retrieval_enabled,
+ proxyEngine: modelRequest.engine,
+ }
+ }
const messageRequest: MessageRequest = {
id: msgId,
threadId: activeThread.id,
@@ -229,8 +216,44 @@ export default function useSendChatMessage() {
settings: settingParams,
parameters: runtimeParams,
},
+ thread: activeThread,
}
const timestamp = Date.now()
+
+ const content: any = []
+
+ if (base64Blob && fileUpload[0]?.type === 'image') {
+ content.push({
+ type: ContentType.Image,
+ text: {
+ value: prompt,
+ annotations: [base64Blob],
+ },
+ })
+ }
+
+ if (base64Blob && fileUpload[0]?.type === 'pdf') {
+ content.push({
+ type: ContentType.Pdf,
+ text: {
+ value: prompt,
+ annotations: [base64Blob],
+ name: fileUpload[0].file.name,
+ size: fileUpload[0].file.size,
+ },
+ })
+ }
+
+ if (prompt && !base64Blob) {
+ content.push({
+ type: ContentType.Text,
+ text: {
+ value: prompt,
+ annotations: [],
+ },
+ })
+ }
+
const threadMessage: ThreadMessage = {
id: msgId,
thread_id: activeThread.id,
@@ -239,18 +262,21 @@ export default function useSendChatMessage() {
created: timestamp,
updated: timestamp,
object: 'thread.message',
- content: [
- {
- type: ContentType.Text,
- text: {
- value: prompt,
- annotations: [],
- },
- },
- ],
+ content: content,
}
addNewMessage(threadMessage)
+ if (base64Blob) {
+ setFileUpload([])
+ }
+
+ const updatedThread: Thread = {
+ ...activeThread,
+ updated: timestamp,
+ }
+
+ // change last update thread when send message
+ updateThread(updatedThread)
await extensionManager
.get(ExtensionTypeEnum.Conversational)
@@ -261,7 +287,7 @@ export default function useSendChatMessage() {
if (activeModel?.id !== modelId) {
setQueuedMessage(true)
startModel(modelId)
- await WaitForModelStarting(modelId)
+ await waitForModelStarting(modelId)
setQueuedMessage(false)
}
@@ -271,10 +297,21 @@ export default function useSendChatMessage() {
setEngineParamsUpdate(false)
}
+ const waitForModelStarting = async (modelId: string) => {
+ return new Promise((resolve) => {
+ setTimeout(async () => {
+ if (modelRef.current?.id !== modelId && !loadModelFailedRef.current) {
+ await waitForModelStarting(modelId)
+ resolve()
+ } else {
+ resolve()
+ }
+ }, 200)
+ })
+ }
+
return {
- reloadModel,
sendChatMessage,
resendChatMessage,
- queuedMessage,
}
}
diff --git a/web/hooks/useSetActiveThread.ts b/web/hooks/useSetActiveThread.ts
index 76a744bcd..3545d0d23 100644
--- a/web/hooks/useSetActiveThread.ts
+++ b/web/hooks/useSetActiveThread.ts
@@ -13,6 +13,7 @@ import { setConvoMessagesAtom } from '@/helpers/atoms/ChatMessage.atom'
import {
ModelParams,
getActiveThreadIdAtom,
+ isGeneratingResponseAtom,
setActiveThreadIdAtom,
setThreadModelParamsAtom,
} from '@/helpers/atoms/Thread.atom'
@@ -22,6 +23,7 @@ export default function useSetActiveThread() {
const setActiveThreadId = useSetAtom(setActiveThreadIdAtom)
const setThreadMessage = useSetAtom(setConvoMessagesAtom)
const setThreadModelParams = useSetAtom(setThreadModelParamsAtom)
+ const setIsGeneratingResponse = useSetAtom(isGeneratingResponseAtom)
const setActiveThread = async (thread: Thread) => {
if (activeThreadId === thread.id) {
@@ -29,6 +31,7 @@ export default function useSetActiveThread() {
return
}
+ setIsGeneratingResponse(false)
events.emit(InferenceEvent.OnInferenceStopped, thread.id)
// load the corresponding messages
diff --git a/web/hooks/useSettings.ts b/web/hooks/useSettings.ts
index 34d123359..168e72489 100644
--- a/web/hooks/useSettings.ts
+++ b/web/hooks/useSettings.ts
@@ -1,4 +1,4 @@
-import { useEffect, useState } from 'react'
+import { useCallback, useEffect, useState } from 'react'
import { fs, joinPath } from '@janhq/core'
import { atom, useAtom } from 'jotai'
@@ -13,6 +13,7 @@ export const useSettings = () => {
useEffect(() => {
setTimeout(() => validateSettings, 3000)
+ // eslint-disable-next-line react-hooks/exhaustive-deps
}, [])
const validateSettings = async () => {
@@ -31,7 +32,7 @@ export const useSettings = () => {
})
}
- const readSettings = async () => {
+ const readSettings = useCallback(async () => {
if (!window?.core?.api) {
return
}
@@ -41,7 +42,8 @@ export const useSettings = () => {
return typeof settings === 'object' ? settings : JSON.parse(settings)
}
return {}
- }
+ }, [])
+
const saveSettings = async ({
runMode,
notify,
diff --git a/web/hooks/useThreads.ts b/web/hooks/useThreads.ts
index b79cfea92..b7de014cc 100644
--- a/web/hooks/useThreads.ts
+++ b/web/hooks/useThreads.ts
@@ -5,24 +5,24 @@ import {
ConversationalExtension,
} from '@janhq/core'
-import { useAtom } from 'jotai'
+import { useAtomValue, useSetAtom } from 'jotai'
import useSetActiveThread from './useSetActiveThread'
import { extensionManager } from '@/extension/ExtensionManager'
import {
ModelParams,
+ activeThreadAtom,
threadModelParamsAtom,
threadStatesAtom,
threadsAtom,
} from '@/helpers/atoms/Thread.atom'
const useThreads = () => {
- const [threadStates, setThreadStates] = useAtom(threadStatesAtom)
- const [threads, setThreads] = useAtom(threadsAtom)
- const [threadModelRuntimeParams, setThreadModelRuntimeParams] = useAtom(
- threadModelParamsAtom
- )
+ const setThreadStates = useSetAtom(threadStatesAtom)
+ const setThreads = useSetAtom(threadsAtom)
+ const setThreadModelRuntimeParams = useSetAtom(threadModelParamsAtom)
+ const activeThread = useAtomValue(activeThreadAtom)
const { setActiveThread } = useSetActiveThread()
const getThreads = async () => {
@@ -39,7 +39,6 @@ const useThreads = () => {
hasMore: false,
waitingForResponse: false,
lastMessage,
- isFinishInit: true,
}
const modelParams = thread.assistants?.[0]?.model?.parameters
@@ -51,41 +50,12 @@ const useThreads = () => {
}
})
- // allow at max 1 unfinished init thread and it should be at the top of the list
- let unfinishedThreadId: string | undefined = undefined
- const unfinishedThreadState: Record = {}
-
- for (const key of Object.keys(threadStates)) {
- const threadState = threadStates[key]
- if (threadState.isFinishInit === false) {
- unfinishedThreadState[key] = threadState
- unfinishedThreadId = key
- break
- }
- }
- const unfinishedThread: Thread | undefined = threads.find(
- (thread) => thread.id === unfinishedThreadId
- )
-
- let allThreads: Thread[] = [...localThreads]
- if (unfinishedThread) {
- allThreads = [unfinishedThread, ...localThreads]
- }
-
- if (unfinishedThreadId) {
- localThreadStates[unfinishedThreadId] =
- unfinishedThreadState[unfinishedThreadId]
-
- threadModelParams[unfinishedThreadId] =
- threadModelRuntimeParams[unfinishedThreadId]
- }
-
// updating app states
setThreadStates(localThreadStates)
- setThreads(allThreads)
+ setThreads(localThreads)
setThreadModelRuntimeParams(threadModelParams)
- if (allThreads.length > 0) {
- setActiveThread(allThreads[0])
+ if (localThreads.length && !activeThread) {
+ setActiveThread(localThreads[0])
}
} catch (error) {
console.error(error)
diff --git a/web/hooks/useUpdateModelParameters.ts b/web/hooks/useUpdateModelParameters.ts
index 80070ef26..694394cee 100644
--- a/web/hooks/useUpdateModelParameters.ts
+++ b/web/hooks/useUpdateModelParameters.ts
@@ -2,12 +2,15 @@
import {
ConversationalExtension,
ExtensionTypeEnum,
+ InferenceEngine,
Thread,
ThreadAssistantInfo,
} from '@janhq/core'
import { useAtomValue, useSetAtom } from 'jotai'
+import { selectedModelAtom } from '@/containers/DropdownListSidebar'
+
import { toRuntimeParams, toSettingParams } from '@/utils/modelParam'
import { extensionManager } from '@/extension'
@@ -19,16 +22,22 @@ import {
threadsAtom,
} from '@/helpers/atoms/Thread.atom'
+export type UpdateModelParameter = {
+ params?: ModelParams
+ modelId?: string
+ engine?: InferenceEngine
+}
+
export default function useUpdateModelParameters() {
const threads = useAtomValue(threadsAtom)
const setThreadModelParams = useSetAtom(setThreadModelParamsAtom)
const activeThreadState = useAtomValue(activeThreadStateAtom)
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
+ const selectedModel = useAtomValue(selectedModelAtom)
const updateModelParameter = async (
threadId: string,
- name: string,
- value: number | boolean | string
+ settings: UpdateModelParameter
) => {
const thread = threads.find((thread) => thread.id === threadId)
if (!thread) {
@@ -40,21 +49,18 @@ export default function useUpdateModelParameters() {
console.error('No active thread')
return
}
+
+ const params = settings.modelId
+ ? settings.params
+ : { ...activeModelParams, ...settings.params }
+
const updatedModelParams: ModelParams = {
- ...activeModelParams,
- // Explicitly set the value to an array if the name is 'stop'
- // This is because the inference engine would only accept an array for the 'stop' parameter
- [name]: name === 'stop' ? (value === '' ? [] : [value]) : value,
+ ...params,
}
// update the state
setThreadModelParams(thread.id, updatedModelParams)
- if (!activeThreadState.isFinishInit) {
- // if thread is not initialized, we don't need to update thread.json
- return
- }
-
const assistants = thread.assistants.map(
(assistant: ThreadAssistantInfo) => {
const runtimeParams = toRuntimeParams(updatedModelParams)
@@ -62,6 +68,10 @@ export default function useUpdateModelParameters() {
assistant.model.parameters = runtimeParams
assistant.model.settings = settingParams
+ if (selectedModel) {
+ assistant.model.id = settings.modelId ?? selectedModel?.id
+ assistant.model.engine = settings.engine ?? selectedModel?.engine
+ }
return assistant
}
)
diff --git a/web/hooks/useVaultDirectory.ts b/web/hooks/useVaultDirectory.ts
deleted file mode 100644
index 9d7adf2ab..000000000
--- a/web/hooks/useVaultDirectory.ts
+++ /dev/null
@@ -1,87 +0,0 @@
-import { useEffect, useState } from 'react'
-
-import { fs, AppConfiguration } from '@janhq/core'
-
-export const SUCCESS_SET_NEW_DESTINATION = 'successSetNewDestination'
-
-export function useVaultDirectory() {
- const [isSameDirectory, setIsSameDirectory] = useState(false)
- const [isDirectoryConfirm, setIsDirectoryConfirm] = useState(false)
- const [isErrorSetNewDest, setIsErrorSetNewDest] = useState(false)
- const [currentPath, setCurrentPath] = useState('')
- const [newDestinationPath, setNewDestinationPath] = useState('')
-
- useEffect(() => {
- window.core?.api
- ?.getAppConfigurations()
- ?.then((appConfig: AppConfiguration) => {
- setCurrentPath(appConfig.data_folder)
- })
- }, [])
-
- const setNewDestination = async () => {
- const destFolder = await window.core?.api?.selectDirectory()
- setNewDestinationPath(destFolder)
-
- if (destFolder) {
- console.debug(`Destination folder selected: ${destFolder}`)
- try {
- const appConfiguration: AppConfiguration =
- await window.core?.api?.getAppConfigurations()
- const currentJanDataFolder = appConfiguration.data_folder
-
- if (currentJanDataFolder === destFolder) {
- console.debug(
- `Destination folder is the same as current folder. Ignore..`
- )
- setIsSameDirectory(true)
- setIsDirectoryConfirm(false)
- return
- } else {
- setIsSameDirectory(false)
- setIsDirectoryConfirm(true)
- }
- setIsErrorSetNewDest(false)
- } catch (e) {
- console.error(`Error: ${e}`)
- setIsErrorSetNewDest(true)
- }
- }
- }
-
- const applyNewDestination = async () => {
- try {
- const appConfiguration: AppConfiguration =
- await window.core?.api?.getAppConfigurations()
- const currentJanDataFolder = appConfiguration.data_folder
-
- appConfiguration.data_folder = newDestinationPath
-
- await fs.syncFile(currentJanDataFolder, newDestinationPath)
- await window.core?.api?.updateAppConfiguration(appConfiguration)
- console.debug(
- `File sync finished from ${currentPath} to ${newDestinationPath}`
- )
-
- setIsErrorSetNewDest(false)
- localStorage.setItem(SUCCESS_SET_NEW_DESTINATION, 'true')
- await window.core?.api?.relaunch()
- } catch (e) {
- console.error(`Error: ${e}`)
- setIsErrorSetNewDest(true)
- }
- }
-
- return {
- setNewDestination,
- newDestinationPath,
- applyNewDestination,
- isSameDirectory,
- setIsDirectoryConfirm,
- isDirectoryConfirm,
- setIsSameDirectory,
- currentPath,
- isErrorSetNewDest,
- setIsErrorSetNewDest,
- }
-}
diff --git a/web/next.config.js b/web/next.config.js
index 455ba70fc..a2e202c51 100644
--- a/web/next.config.js
+++ b/web/next.config.js
@@ -24,14 +24,9 @@ const nextConfig = {
config.plugins = [
...config.plugins,
new webpack.DefinePlugin({
- PLUGIN_CATALOG: JSON.stringify(
- 'https://cdn.jsdelivr.net/npm/@janhq/plugin-catalog@latest/dist/index.js'
- ),
VERSION: JSON.stringify(packageJson.version),
- ANALYTICS_ID:
- JSON.stringify(process.env.ANALYTICS_ID) ?? JSON.stringify('xxx'),
- ANALYTICS_HOST:
- JSON.stringify(process.env.ANALYTICS_HOST) ?? JSON.stringify('xxx'),
+ ANALYTICS_ID: JSON.stringify(process.env.ANALYTICS_ID),
+ ANALYTICS_HOST: JSON.stringify(process.env.ANALYTICS_HOST),
API_BASE_URL: JSON.stringify('http://localhost:1337'),
isMac: process.platform === 'darwin',
isWindows: process.platform === 'win32',
diff --git a/web/package.json b/web/package.json
index bba3dd48b..498481aa3 100644
--- a/web/package.json
+++ b/web/package.json
@@ -8,6 +8,7 @@
"build": "next build",
"start": "next start",
"lint": "eslint .",
+ "lint:fix": "eslint . --fix",
"format": "prettier --write \"**/*.{js,jsx,ts,tsx}\"",
"compile": "tsc --noEmit -p . --pretty"
},
@@ -21,7 +22,7 @@
"class-variance-authority": "^0.7.0",
"framer-motion": "^10.16.4",
"highlight.js": "^11.9.0",
- "jotai": "^2.4.0",
+ "jotai": "^2.6.0",
"lodash": "^4.17.21",
"lucide-react": "^0.291.0",
"marked": "^9.1.2",
@@ -32,6 +33,7 @@
"posthog-js": "^1.95.1",
"react": "18.2.0",
"react-dom": "18.2.0",
+ "react-dropzone": "^14.2.3",
"react-hook-form": "^7.47.0",
"react-hot-toast": "^2.4.1",
"react-icons": "^4.12.0",
diff --git a/web/screens/Chat/AssistantSetting/index.tsx b/web/screens/Chat/AssistantSetting/index.tsx
new file mode 100644
index 000000000..df516def0
--- /dev/null
+++ b/web/screens/Chat/AssistantSetting/index.tsx
@@ -0,0 +1,78 @@
+import { useAtomValue } from 'jotai'
+
+import { useCreateNewThread } from '@/hooks/useCreateNewThread'
+
+import SettingComponentBuilder, {
+ SettingComponentData,
+} from '../ModelSetting/SettingComponent'
+
+import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
+
+const AssistantSetting = ({
+ componentData,
+}: {
+ componentData: SettingComponentData[]
+}) => {
+ const activeThread = useAtomValue(activeThreadAtom)
+ const { updateThreadMetadata } = useCreateNewThread()
+
+ return (
+
+ {activeThread && componentData && (
+ {
+ if (
+ activeThread.assistants[0].tools &&
+ (name === 'chunk_overlap' || name === 'chunk_size')
+ ) {
+ if (
+ activeThread.assistants[0].tools[0]?.settings.chunk_size <
+ activeThread.assistants[0].tools[0]?.settings.chunk_overlap
+ ) {
+ activeThread.assistants[0].tools[0].settings.chunk_overlap =
+ activeThread.assistants[0].tools[0].settings.chunk_size
+ }
+
+ if (
+ name === 'chunk_size' &&
+ value <
+ activeThread.assistants[0].tools[0].settings.chunk_overlap
+ ) {
+ activeThread.assistants[0].tools[0].settings.chunk_overlap =
+ value
+ } else if (
+ name === 'chunk_overlap' &&
+ value > activeThread.assistants[0].tools[0].settings.chunk_size
+ ) {
+ activeThread.assistants[0].tools[0].settings.chunk_size = value
+ }
+ }
+
+ updateThreadMetadata({
+ ...activeThread,
+ assistants: [
+ {
+ ...activeThread.assistants[0],
+ tools: [
+ {
+ type: 'retrieval',
+ enabled: true,
+ settings: {
+ ...(activeThread.assistants[0].tools &&
+ activeThread.assistants[0].tools[0]?.settings),
+ [name]: value,
+ },
+ },
+ ],
+ },
+ ],
+ })
+ }}
+ />
+ )}
+
+ )
+}
+
+export default AssistantSetting
diff --git a/web/screens/Chat/ChatBody/index.tsx b/web/screens/Chat/ChatBody/index.tsx
index f56e13845..1ce6b591f 100644
--- a/web/screens/Chat/ChatBody/index.tsx
+++ b/web/screens/Chat/ChatBody/index.tsx
@@ -10,6 +10,7 @@ import LogoMark from '@/containers/Brand/Logo/Mark'
import { MainViewState } from '@/constants/screens'
+import { loadModelErrorAtom } from '@/hooks/useActiveModel'
import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels'
import { useMainViewState } from '@/hooks/useMainViewState'
@@ -24,6 +25,7 @@ const ChatBody: React.FC = () => {
const messages = useAtomValue(getCurrentChatMessagesAtom)
const { downloadedModels } = useGetDownloadedModels()
const { setMainViewState } = useMainViewState()
+ const loadModelError = useAtomValue(loadModelErrorAtom)
if (downloadedModels.length === 0)
return (
@@ -80,9 +82,13 @@ const ChatBody: React.FC = () => {
{messages.map((message, index) => (
-
- {(message.status === MessageStatus.Error ||
- message.status === MessageStatus.Stopped) &&
+ {(message.status !== MessageStatus.Pending ||
+ message.content.length > 0) && (
+
+ )}
+ {!loadModelError &&
+ (message.status === MessageStatus.Error ||
+ message.status === MessageStatus.Stopped) &&
index === messages.length - 1 && (
)}
diff --git a/web/screens/Chat/ChatInput/index.tsx b/web/screens/Chat/ChatInput/index.tsx
new file mode 100644
index 000000000..ee1ac9a41
--- /dev/null
+++ b/web/screens/Chat/ChatInput/index.tsx
@@ -0,0 +1,268 @@
+/* eslint-disable @typescript-eslint/no-explicit-any */
+import { useContext, useEffect, useRef, useState } from 'react'
+
+import { InferenceEvent, MessageStatus, events } from '@janhq/core'
+
+import {
+ Textarea,
+ Button,
+ Tooltip,
+ TooltipArrow,
+ TooltipContent,
+ TooltipPortal,
+ TooltipTrigger,
+} from '@janhq/uikit'
+import { useAtom, useAtomValue } from 'jotai'
+import {
+ FileTextIcon,
+ ImageIcon,
+ StopCircle,
+ PaperclipIcon,
+} from 'lucide-react'
+
+import { twMerge } from 'tailwind-merge'
+
+import { currentPromptAtom, fileUploadAtom } from '@/containers/Providers/Jotai'
+
+import { FeatureToggleContext } from '@/context/FeatureToggle'
+
+import { useActiveModel } from '@/hooks/useActiveModel'
+import { useClickOutside } from '@/hooks/useClickOutside'
+
+import useSendChatMessage from '@/hooks/useSendChatMessage'
+
+import FileUploadPreview from '../FileUploadPreview'
+import ImageUploadPreview from '../ImageUploadPreview'
+
+import { getCurrentChatMessagesAtom } from '@/helpers/atoms/ChatMessage.atom'
+import {
+ activeThreadAtom,
+ getActiveThreadIdAtom,
+ waitingToSendMessage,
+} from '@/helpers/atoms/Thread.atom'
+
+const ChatInput: React.FC = () => {
+ const activeThread = useAtomValue(activeThreadAtom)
+ const { stateModel } = useActiveModel()
+ const messages = useAtomValue(getCurrentChatMessagesAtom)
+
+ const [currentPrompt, setCurrentPrompt] = useAtom(currentPromptAtom)
+ const { sendChatMessage } = useSendChatMessage()
+
+ const activeThreadId = useAtomValue(getActiveThreadIdAtom)
+ const [isWaitingToSend, setIsWaitingToSend] = useAtom(waitingToSendMessage)
+ const [fileUpload, setFileUpload] = useAtom(fileUploadAtom)
+ const textareaRef = useRef
(null)
+ const fileInputRef = useRef(null)
+ const imageInputRef = useRef(null)
+ const [showAttacmentMenus, setShowAttacmentMenus] = useState(false)
+ const { experimentalFeature } = useContext(FeatureToggleContext)
+
+ const onPromptChange = (e: React.ChangeEvent) => {
+ setCurrentPrompt(e.target.value)
+ }
+
+ const refAttachmentMenus = useClickOutside(() => setShowAttacmentMenus(false))
+
+ useEffect(() => {
+ if (isWaitingToSend && activeThreadId) {
+ setIsWaitingToSend(false)
+ sendChatMessage(currentPrompt)
+ }
+ }, [
+ activeThreadId,
+ isWaitingToSend,
+ currentPrompt,
+ setIsWaitingToSend,
+ sendChatMessage,
+ ])
+
+ useEffect(() => {
+ if (textareaRef.current) {
+ textareaRef.current.focus()
+ }
+ }, [activeThreadId])
+
+ useEffect(() => {
+ if (textareaRef.current) {
+ textareaRef.current.style.height = '40px'
+ textareaRef.current.style.height = textareaRef.current.scrollHeight + 'px'
+ }
+ }, [currentPrompt])
+
+ const onKeyDown = async (e: React.KeyboardEvent) => {
+ if (e.key === 'Enter' && !e.shiftKey) {
+ e.preventDefault()
+ if (messages[messages.length - 1]?.status !== MessageStatus.Pending)
+ sendChatMessage(currentPrompt)
+ else onStopInferenceClick()
+ }
+ }
+
+ const onStopInferenceClick = async () => {
+ events.emit(InferenceEvent.OnInferenceStopped, {})
+ }
+
+ /**
+ * Handles the change event of the extension file input element by setting the file name state.
+ * Its to be used to display the extension file name of the selected file.
+ * @param event - The change event object.
+ */
+ const handleFileChange = (event: React.ChangeEvent) => {
+ const file = event.target.files?.[0]
+ if (!file) return
+ setFileUpload([{ file: file, type: 'pdf' }])
+ setCurrentPrompt('Summarize this for me')
+ }
+
+ const handleImageChange = (event: React.ChangeEvent) => {
+ const file = event.target.files?.[0]
+ if (!file) return
+ setFileUpload([{ file: file, type: 'image' }])
+ setCurrentPrompt('What do you see in this image?')
+ }
+
+ const renderPreview = (fileUpload: any) => {
+ if (fileUpload.length > 0) {
+ if (fileUpload[0].type === 'image') {
+ return
+ } else {
+ return
+ }
+ }
+ }
+
+ return (
+
+
+ {renderPreview(fileUpload)}
+
+
+ {experimentalFeature && (
+
+
+ {
+ if (
+ fileUpload.length > 0 ||
+ (activeThread?.assistants[0].tools &&
+ !activeThread?.assistants[0].tools[0]?.enabled)
+ ) {
+ e.stopPropagation()
+ } else {
+ setShowAttacmentMenus(!showAttacmentMenus)
+ }
+ }}
+ />
+
+
+ {fileUpload.length > 0 ||
+ (activeThread?.assistants[0].tools &&
+ !activeThread?.assistants[0].tools[0]?.enabled && (
+
+ {fileUpload.length !== 0 && (
+
+ Currently, we only support 1 attachment at the same
+ time
+
+ )}
+ {activeThread?.assistants[0].tools &&
+ activeThread?.assistants[0].tools[0]?.enabled ===
+ false && (
+
+ Turn on Retrieval in Assistant Settings to use this
+ feature
+
+ )}
+
+
+ ))}
+
+
+ )}
+
+ {showAttacmentMenus && (
+
+
+
+
+ Image
+
+ {
+ fileInputRef.current?.click()
+ setShowAttacmentMenus(false)
+ }}
+ >
+
+ Document
+
+
+
+ )}
+
+
+
+
+
+ {messages[messages.length - 1]?.status !== MessageStatus.Pending ? (
+
sendChatMessage(currentPrompt)}
+ >
+ Send
+
+ ) : (
+
+
+
+ )}
+
+ )
+}
+
+export default ChatInput
diff --git a/web/screens/Chat/EngineSetting/index.tsx b/web/screens/Chat/EngineSetting/index.tsx
index 4394f835b..2153bcbde 100644
--- a/web/screens/Chat/EngineSetting/index.tsx
+++ b/web/screens/Chat/EngineSetting/index.tsx
@@ -6,11 +6,11 @@ import { selectedModelAtom } from '@/containers/DropdownListSidebar'
import { getConfigurationsData } from '@/utils/componentSettings'
import { toSettingParams } from '@/utils/modelParam'
-import settingComponentBuilder from '../ModelSetting/settingComponentBuilder'
+import SettingComponentBuilder from '../ModelSetting/SettingComponent'
import { getActiveThreadModelParamsAtom } from '@/helpers/atoms/Thread.atom'
-const EngineSetting = () => {
+const EngineSetting = ({ enabled = true }: { enabled?: boolean }) => {
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
const selectedModel = useAtomValue(selectedModelAtom)
@@ -18,13 +18,18 @@ const EngineSetting = () => {
const modelSettingParams = toSettingParams(activeModelParams)
- const componentData = getConfigurationsData(modelSettingParams, selectedModel)
-
- componentData.sort((a, b) => a.title.localeCompare(b.title))
+ const componentData = getConfigurationsData(
+ modelSettingParams,
+ selectedModel
+ ).toSorted((a, b) => a.title.localeCompare(b.title))
return (
- {settingComponentBuilder(componentData)}
+ e.name !== 'prompt_template'}
+ />
)
}
diff --git a/web/screens/Chat/ErrorMessage/index.tsx b/web/screens/Chat/ErrorMessage/index.tsx
index 8879b15be..84a89cee8 100644
--- a/web/screens/Chat/ErrorMessage/index.tsx
+++ b/web/screens/Chat/ErrorMessage/index.tsx
@@ -17,7 +17,6 @@ import {
deleteMessageAtom,
getCurrentChatMessagesAtom,
} from '@/helpers/atoms/ChatMessage.atom'
-import { totalRamAtom } from '@/helpers/atoms/SystemBar.atom'
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
@@ -25,8 +24,6 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
const thread = useAtomValue(activeThreadAtom)
const deleteMessage = useSetAtom(deleteMessageAtom)
const { resendChatMessage } = useSendChatMessage()
- const { activeModel } = useActiveModel()
- const totalRam = useAtomValue(totalRamAtom)
const regenerateMessage = async () => {
const lastMessageIndex = messages.length - 1
@@ -70,33 +67,26 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
{message.status === MessageStatus.Error && (
- {Number(activeModel?.metadata.size) > totalRam ? (
- <>
- Oops! Model size exceeds available RAM. Consider selecting a
- smaller model or upgrading your RAM for smoother performance.
- >
- ) : (
- <>
- Apologies, something's amiss!
- Jan's in beta. Find troubleshooting guides{' '}
-
- here
- {' '}
- or reach out to us on{' '}
-
- Discord
- {' '}
- for assistance.
- >
- )}
+ <>
+ Apologies, something's amiss!
+ Jan's in beta. Find troubleshooting guides{' '}
+
+ here
+ {' '}
+ or reach out to us on{' '}
+
+ Discord
+ {' '}
+ for assistance.
+ >
)}
diff --git a/web/screens/Chat/FileUploadPreview/Icon.tsx b/web/screens/Chat/FileUploadPreview/Icon.tsx
new file mode 100644
index 000000000..fdfcf5565
--- /dev/null
+++ b/web/screens/Chat/FileUploadPreview/Icon.tsx
@@ -0,0 +1,95 @@
+import React from 'react'
+
+type Props = {
+ type: string
+}
+
+const Icon: React.FC = ({ type }) => {
+ return (
+
+
+ {type}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ )
+}
+
+export default Icon
diff --git a/web/screens/Chat/FileUploadPreview/index.tsx b/web/screens/Chat/FileUploadPreview/index.tsx
new file mode 100644
index 000000000..7e1a1bebd
--- /dev/null
+++ b/web/screens/Chat/FileUploadPreview/index.tsx
@@ -0,0 +1,47 @@
+import React from 'react'
+
+import { useAtom, useSetAtom } from 'jotai'
+
+import { XIcon } from 'lucide-react'
+
+import { currentPromptAtom, fileUploadAtom } from '@/containers/Providers/Jotai'
+
+import { toGibibytes } from '@/utils/converter'
+
+import Icon from './Icon'
+
+const FileUploadPreview: React.FC = () => {
+ const [fileUpload, setFileUpload] = useAtom(fileUploadAtom)
+ const setCurrentPrompt = useSetAtom(currentPromptAtom)
+
+ const onDeleteClick = () => {
+ setFileUpload([])
+ setCurrentPrompt('')
+ }
+
+ return (
+
+
+
+
+
+
+ {fileUpload[0].file.name.replaceAll(/[-._]/g, ' ')}
+
+
+ {toGibibytes(fileUpload[0].file.size)}
+
+
+
+
+
+
+
+
+ )
+}
+
+export default FileUploadPreview
diff --git a/web/screens/Chat/ImageUploadPreview/index.tsx b/web/screens/Chat/ImageUploadPreview/index.tsx
new file mode 100644
index 000000000..2a9c9b4ba
--- /dev/null
+++ b/web/screens/Chat/ImageUploadPreview/index.tsx
@@ -0,0 +1,54 @@
+import React, { useEffect } from 'react'
+import { useState } from 'react'
+
+import { useSetAtom } from 'jotai'
+
+import { XIcon } from 'lucide-react'
+
+import { currentPromptAtom, fileUploadAtom } from '@/containers/Providers/Jotai'
+
+import { getBase64 } from '@/utils/base64'
+
+type Props = {
+ file: File
+}
+
+const ImageUploadPreview: React.FC = ({ file }) => {
+ const [base64, setBase64] = useState()
+ const setFileUpload = useSetAtom(fileUploadAtom)
+ const setCurrentPrompt = useSetAtom(currentPromptAtom)
+
+ useEffect(() => {
+ getBase64(file)
+ .then((base64) => setBase64(base64))
+ .catch((err) => console.error(err))
+ }, [file])
+
+ if (!base64) {
+ return
+ }
+
+ const onDeleteClick = () => {
+ setFileUpload([])
+ setCurrentPrompt('')
+ }
+
+ return (
+
+
+
+
+ {file.name.replaceAll(/[-._]/g, ' ')}
+
+
+
+
+
+
+ )
+}
+
+export default React.memo(ImageUploadPreview)
diff --git a/web/screens/Chat/LoadModelErrorMessage/index.tsx b/web/screens/Chat/LoadModelErrorMessage/index.tsx
new file mode 100644
index 000000000..d3c4a704d
--- /dev/null
+++ b/web/screens/Chat/LoadModelErrorMessage/index.tsx
@@ -0,0 +1,48 @@
+import { MessageStatus, ThreadMessage } from '@janhq/core'
+import { useAtomValue } from 'jotai'
+
+import { useActiveModel } from '@/hooks/useActiveModel'
+
+import { totalRamAtom } from '@/helpers/atoms/SystemBar.atom'
+
+const LoadModelErrorMessage = () => {
+ const { activeModel } = useActiveModel()
+ const availableRam = useAtomValue(totalRamAtom)
+
+ return (
+ <>
+
+
+ {Number(activeModel?.metadata.size) > availableRam ? (
+ <>
+ Oops! Model size exceeds available RAM. Consider selecting a
+ smaller model or upgrading your RAM for smoother performance.
+ >
+ ) : (
+ <>
+ Apologies, something's amiss!
+ Jan's in beta. Find troubleshooting guides{' '}
+
+ here
+ {' '}
+ or reach out to us on{' '}
+
+ Discord
+ {' '}
+ for assistance.
+ >
+ )}
+
+
+ >
+ )
+}
+export default LoadModelErrorMessage
diff --git a/web/screens/Chat/MessageQueuedBanner/index.tsx b/web/screens/Chat/MessageQueuedBanner/index.tsx
new file mode 100644
index 000000000..5847394b4
--- /dev/null
+++ b/web/screens/Chat/MessageQueuedBanner/index.tsx
@@ -0,0 +1,21 @@
+import { useAtomValue } from 'jotai'
+
+import { queuedMessageAtom } from '@/hooks/useSendChatMessage'
+
+const MessageQueuedBanner: React.FC = () => {
+ const queuedMessage = useAtomValue(queuedMessageAtom)
+
+ return (
+
+ {queuedMessage && (
+
+
+ Message queued. It can be sent once the model has started
+
+
+ )}
+
+ )
+}
+
+export default MessageQueuedBanner
diff --git a/web/screens/Chat/MessageToolbar/index.tsx b/web/screens/Chat/MessageToolbar/index.tsx
index 183eae814..070022122 100644
--- a/web/screens/Chat/MessageToolbar/index.tsx
+++ b/web/screens/Chat/MessageToolbar/index.tsx
@@ -3,8 +3,9 @@ import {
ExtensionTypeEnum,
ThreadMessage,
ChatCompletionRole,
+ ConversationalExtension,
+ ContentType,
} from '@janhq/core'
-import { ConversationalExtension } from '@janhq/core'
import { useAtomValue, useSetAtom } from 'jotai'
import { RefreshCcw, CopyIcon, Trash2Icon, CheckIcon } from 'lucide-react'
@@ -53,7 +54,9 @@ const MessageToolbar = ({ message }: { message: ThreadMessage }) => {
{message.id === messages[messages.length - 1]?.id &&
- messages[messages.length - 1].status !== MessageStatus.Error && (
+ messages[messages.length - 1].status !== MessageStatus.Error &&
+ messages[messages.length - 1].content[0]?.type !==
+ ContentType.Pdf && (
boolean
+ updater?: (
+ threadId: string,
+ name: string,
+ value: string | number | boolean | string[]
+ ) => void
+}) => {
+ const { updateModelParameter } = useUpdateModelParameters()
+
+ const threadId = useAtomValue(getActiveThreadIdAtom)
+
+ const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
+
+ const modelSettingParams = toSettingParams(activeModelParams)
+
+ const engineParams = getConfigurationsData(modelSettingParams)
+
+ const setEngineParamsUpdate = useSetAtom(engineParamsUpdateAtom)
+
+ const { stopModel } = useActiveModel()
+
+ const onValueChanged = (
+ name: string,
+ value: string | number | boolean | string[]
+ ) => {
+ if (!threadId) return
+ if (engineParams.some((x) => x.name.includes(name))) {
+ setEngineParamsUpdate(true)
+ stopModel()
+ } else {
+ setEngineParamsUpdate(false)
+ }
+ if (updater) updater(threadId, name, value)
+ else {
+ // Convert stop string to array
+ if (name === 'stop' && typeof value === 'string') {
+ value = [value]
+ }
+ updateModelParameter(threadId, {
+ params: { [name]: value },
+ })
+ }
+ }
+
+ const components = componentData
+ .filter((x) => (selector ? selector(x) : true))
+ .map((data) => {
+ switch (data.controllerType) {
+ case 'slider':
+ const { min, max, step, value } = data.controllerData as SliderData
+ return (
+
onValueChanged(data.name, value)}
+ />
+ )
+ case 'input':
+ const { placeholder, value: textValue } =
+ data.controllerData as InputData
+ return (
+ onValueChanged(data.name, value)}
+ />
+ )
+ case 'checkbox':
+ const { checked } = data.controllerData as CheckboxData
+ return (
+ onValueChanged(data.name, value)}
+ />
+ )
+ default:
+ return null
+ }
+ })
+
+ return {components}
+}
+
+export default SettingComponent
diff --git a/web/screens/Chat/ModelSetting/index.tsx b/web/screens/Chat/ModelSetting/index.tsx
index ff5d3d40f..ea95363eb 100644
--- a/web/screens/Chat/ModelSetting/index.tsx
+++ b/web/screens/Chat/ModelSetting/index.tsx
@@ -8,7 +8,7 @@ import { selectedModelAtom } from '@/containers/DropdownListSidebar'
import { getConfigurationsData } from '@/utils/componentSettings'
import { toRuntimeParams } from '@/utils/modelParam'
-import settingComponentBuilder from './settingComponentBuilder'
+import SettingComponentBuilder from './SettingComponent'
import { getActiveThreadModelParamsAtom } from '@/helpers/atoms/Thread.atom'
@@ -27,7 +27,10 @@ const ModelSetting = () => {
return (
- {settingComponentBuilder(componentData)}
+ e.name !== 'prompt_template'}
+ />
)
}
diff --git a/web/screens/Chat/ModelSetting/predefinedComponent.ts b/web/screens/Chat/ModelSetting/predefinedComponent.ts
index abcec508e..b67117184 100644
--- a/web/screens/Chat/ModelSetting/predefinedComponent.ts
+++ b/web/screens/Chat/ModelSetting/predefinedComponent.ts
@@ -1,4 +1,4 @@
-import { SettingComponentData } from './settingComponentBuilder'
+import { SettingComponentData } from './SettingComponent'
export const presetConfiguration: Record = {
prompt_template: {
@@ -141,4 +141,52 @@ export const presetConfiguration: Record = {
value: 1,
},
},
+ // assistant
+ chunk_size: {
+ name: 'chunk_size',
+ title: 'Chunk Size',
+ description: 'Maximum number of tokens in a chunk',
+ controllerType: 'slider',
+ controllerData: {
+ min: 128,
+ max: 2048,
+ step: 128,
+ value: 1024,
+ },
+ },
+ chunk_overlap: {
+ name: 'chunk_overlap',
+ title: 'Chunk Overlap',
+ description: 'Number of tokens overlapping between two adjacent chunks',
+ controllerType: 'slider',
+ controllerData: {
+ min: 32,
+ max: 512,
+ step: 32,
+ value: 64,
+ },
+ },
+ top_k: {
+ name: 'top_k',
+ title: 'Top K',
+ description: 'Number of top-ranked documents to retrieve',
+ controllerType: 'slider',
+ controllerData: {
+ min: 1,
+ max: 5,
+ step: 1,
+ value: 2,
+ },
+ },
+ retrieval_template: {
+ name: 'retrieval_template',
+ title: 'Retrieval Template',
+ description:
+ 'The template to use for retrieval. The following variables are available: {CONTEXT}, {QUESTION}',
+ controllerType: 'input',
+ controllerData: {
+ placeholder: 'Retrieval Template',
+ value: '',
+ },
+ },
}
diff --git a/web/screens/Chat/ModelSetting/settingComponentBuilder.tsx b/web/screens/Chat/ModelSetting/settingComponentBuilder.tsx
deleted file mode 100644
index 8ff8e7c02..000000000
--- a/web/screens/Chat/ModelSetting/settingComponentBuilder.tsx
+++ /dev/null
@@ -1,89 +0,0 @@
-/* eslint-disable no-case-declarations */
-import Checkbox from '@/containers/Checkbox'
-import ModelConfigInput from '@/containers/ModelConfigInput'
-import SliderRightPanel from '@/containers/SliderRightPanel'
-
-export type ControllerType = 'slider' | 'checkbox' | 'input'
-
-export type SettingComponentData = {
- name: string
- title: string
- description: string
- controllerType: ControllerType
- controllerData: SliderData | CheckboxData | InputData
-}
-
-export type InputData = {
- placeholder: string
- value: string
-}
-
-export type SliderData = {
- min: number
- max: number
-
- step: number
- value: number
-}
-
-type CheckboxData = {
- checked: boolean
-}
-
-const settingComponentBuilder = (
- componentData: SettingComponentData[],
- onlyPrompt?: boolean
-) => {
- const components = componentData
- .filter((x) =>
- onlyPrompt ? x.name === 'prompt_template' : x.name !== 'prompt_template'
- )
- .map((data) => {
- switch (data.controllerType) {
- case 'slider':
- const { min, max, step, value } = data.controllerData as SliderData
- return (
-
- )
- case 'input':
- const { placeholder, value: textValue } =
- data.controllerData as InputData
- return (
-
- )
- case 'checkbox':
- const { checked } = data.controllerData as CheckboxData
- return (
-
- )
- default:
- return null
- }
- })
-
- return {components}
-}
-
-export default settingComponentBuilder
diff --git a/web/screens/Chat/RequestDownloadModel/index.tsx b/web/screens/Chat/RequestDownloadModel/index.tsx
new file mode 100644
index 000000000..e62dc562d
--- /dev/null
+++ b/web/screens/Chat/RequestDownloadModel/index.tsx
@@ -0,0 +1,42 @@
+import React, { Fragment, useCallback } from 'react'
+
+import { Button } from '@janhq/uikit'
+
+import LogoMark from '@/containers/Brand/Logo/Mark'
+
+import { MainViewState } from '@/constants/screens'
+
+import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels'
+import { useMainViewState } from '@/hooks/useMainViewState'
+
+const RequestDownloadModel: React.FC = () => {
+ const { downloadedModels } = useGetDownloadedModels()
+ const { setMainViewState } = useMainViewState()
+
+ const onClick = useCallback(() => {
+ setMainViewState(MainViewState.Hub)
+ }, [setMainViewState])
+
+ return (
+
+ {downloadedModels.length === 0 && (
+
+
+ Welcome!
+
+ You need to download your first model
+
+
+ Explore The Hub
+
+
+ )}
+
+ )
+}
+
+export default React.memo(RequestDownloadModel)
diff --git a/web/screens/Chat/Sidebar/index.tsx b/web/screens/Chat/Sidebar/index.tsx
index 500787218..8088501b9 100644
--- a/web/screens/Chat/Sidebar/index.tsx
+++ b/web/screens/Chat/Sidebar/index.tsx
@@ -1,7 +1,8 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
-import React from 'react'
+import React, { useContext } from 'react'
-import { Input, Textarea } from '@janhq/uikit'
+import { InferenceEngine } from '@janhq/core'
+import { Input, Textarea, Switch } from '@janhq/uikit'
import { atom, useAtomValue } from 'jotai'
@@ -10,17 +11,22 @@ import { twMerge } from 'tailwind-merge'
import LogoMark from '@/containers/Brand/Logo/Mark'
import CardSidebar from '@/containers/CardSidebar'
-import DropdownListSidebar from '@/containers/DropdownListSidebar'
+import DropdownListSidebar, {
+ selectedModelAtom,
+} from '@/containers/DropdownListSidebar'
+
+import { FeatureToggleContext } from '@/context/FeatureToggle'
import { useCreateNewThread } from '@/hooks/useCreateNewThread'
import { getConfigurationsData } from '@/utils/componentSettings'
import { toRuntimeParams, toSettingParams } from '@/utils/modelParam'
+import AssistantSetting from '../AssistantSetting'
import EngineSetting from '../EngineSetting'
import ModelSetting from '../ModelSetting'
-import settingComponentBuilder from '../ModelSetting/settingComponentBuilder'
+import SettingComponentBuilder from '../ModelSetting/SettingComponent'
import {
activeThreadAtom,
@@ -33,18 +39,24 @@ const Sidebar: React.FC = () => {
const showing = useAtomValue(showRightSideBarAtom)
const activeThread = useAtomValue(activeThreadAtom)
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
-
+ const selectedModel = useAtomValue(selectedModelAtom)
const { updateThreadMetadata } = useCreateNewThread()
+ const { experimentalFeature } = useContext(FeatureToggleContext)
const modelEngineParams = toSettingParams(activeModelParams)
const modelRuntimeParams = toRuntimeParams(activeModelParams)
+ const componentDataAssistantSetting = getConfigurationsData(
+ (activeThread?.assistants[0]?.tools &&
+ activeThread?.assistants[0]?.tools[0]?.settings) ??
+ {}
+ )
const componentDataEngineSetting = getConfigurationsData(modelEngineParams)
const componentDataRuntimeSetting = getConfigurationsData(modelRuntimeParams)
return (
{
}}
/>
- {/* Temporary disabled */}
- {/*
-
- Tools
-
-
-
- Retrieval
-
-
-
-
*/}
+ {experimentalFeature && (
+
+ {activeThread?.assistants[0]?.tools &&
+ componentDataAssistantSetting.length > 0 && (
+
+
{
+ if (activeThread)
+ updateThreadMetadata({
+ ...activeThread,
+ assistants: [
+ {
+ ...activeThread.assistants[0],
+ tools: [
+ {
+ type: 'retrieval',
+ enabled: e,
+ settings:
+ (activeThread.assistants[0].tools &&
+ activeThread.assistants[0]
+ .tools[0]?.settings) ??
+ {},
+ },
+ ],
+ },
+ ],
+ })
+ }}
+ />
+ }
+ >
+ {activeThread?.assistants[0]?.tools[0].enabled && (
+
+
+
+ Embedding Engine
+
+
+
+ {selectedModel?.engine ===
+ InferenceEngine.openai
+ ? 'OpenAI'
+ : 'Nitro'}
+
+
+
+
+
+ )}
+
+
+ )}
+
+ )}
- {componentDataRuntimeSetting.length !== 0 && (
+ {componentDataRuntimeSetting.length > 0 && (
@@ -159,13 +224,16 @@ const Sidebar: React.FC = () => {
- {settingComponentBuilder(componentDataEngineSetting, true)}
+ x.name === 'prompt_template'}
+ />
)}
- {componentDataEngineSetting.length !== 0 && (
+ {componentDataEngineSetting.length > 0 && (
diff --git a/web/screens/Chat/SimpleTextMessage/index.tsx b/web/screens/Chat/SimpleTextMessage/index.tsx
index 8754664aa..261bb3497 100644
--- a/web/screens/Chat/SimpleTextMessage/index.tsx
+++ b/web/screens/Chat/SimpleTextMessage/index.tsx
@@ -1,10 +1,23 @@
import React, { useEffect, useRef, useState } from 'react'
-import { ChatCompletionRole, MessageStatus, ThreadMessage } from '@janhq/core'
+import {
+ ChatCompletionRole,
+ ContentType,
+ MessageStatus,
+ ThreadMessage,
+} from '@janhq/core'
+import {
+ Tooltip,
+ TooltipArrow,
+ TooltipContent,
+ TooltipPortal,
+ TooltipTrigger,
+} from '@janhq/uikit'
import hljs from 'highlight.js'
import { useAtomValue } from 'jotai'
+import { FolderOpenIcon } from 'lucide-react'
import { Marked, Renderer } from 'marked'
import { markedHighlight } from 'marked-highlight'
@@ -13,12 +26,13 @@ import { twMerge } from 'tailwind-merge'
import LogoMark from '@/containers/Brand/Logo/Mark'
-import BubbleLoader from '@/containers/Loader/Bubble'
-
import { useClipboard } from '@/hooks/useClipboard'
+import { usePath } from '@/hooks/usePath'
+import { toGibibytes } from '@/utils/converter'
import { displayDate } from '@/utils/datetime'
+import Icon from '../FileUploadPreview/Icon'
import MessageToolbar from '../MessageToolbar'
import { getCurrentChatMessagesAtom } from '@/helpers/atoms/ChatMessage.atom'
@@ -29,6 +43,7 @@ const SimpleTextMessage: React.FC = (props) => {
text = props.content[0]?.text?.value ?? ''
}
const clipboard = useClipboard({ timeout: 1000 })
+ const { onViewFile, onViewFileContainer } = usePath()
const marked: Marked = new Marked(
markedHighlight({
@@ -77,7 +92,6 @@ const SimpleTextMessage: React.FC = (props) => {
const isUser = props.role === ChatCompletionRole.User
const isSystem = props.role === ChatCompletionRole.System
const [tokenCount, setTokenCount] = useState(0)
-
const [lastTimestamp, setLastTimestamp] = useState()
const [tokenSpeed, setTokenSpeed] = useState(0)
const messages = useAtomValue(getCurrentChatMessagesAtom)
@@ -148,6 +162,7 @@ const SimpleTextMessage: React.FC = (props) => {
)}
+
= (props) => {
- {props.status === MessageStatus.Pending &&
- (!props.content[0] || props.content[0].text.value === '') ? (
-
- ) : (
- <>
-
- >
- )}
+ <>
+ {props.content[0]?.type === ContentType.Image && (
+
+
onViewFile(`${props.id}.png`)}
+ />
+
+
+
+
+
+
+
+
+
+ Show in finder
+
+
+
+
+
+ )}
+
+ {props.content[0]?.type === ContentType.Pdf && (
+
+
+ onViewFile(`${props.id}.${props.content[0]?.type}`)
+ }
+ />
+
+
+
+
+
+
+
+
+ Show in finder
+
+
+
+
+
+
+
+
+
+ {props.content[0].text.name?.replaceAll(/[-._]/g, ' ')}
+
+
+ {toGibibytes(Number(props.content[0].text.size))}
+
+
+
+ )}
+
+
+ >
)
diff --git a/web/screens/Chat/ThreadList/index.tsx b/web/screens/Chat/ThreadList/index.tsx
index 0e09a20a7..b4a045b1d 100644
--- a/web/screens/Chat/ThreadList/index.tsx
+++ b/web/screens/Chat/ThreadList/index.tsx
@@ -1,4 +1,4 @@
-import { useEffect } from 'react'
+import { useEffect, useState } from 'react'
import {
Modal,
@@ -49,17 +49,19 @@ export default function ThreadList() {
const activeThread = useAtomValue(activeThreadAtom)
const { deleteThread, cleanThread } = useDeleteThread()
const { downloadedModels } = useGetDownloadedModels()
+ const [isThreadsReady, setIsThreadsReady] = useState(false)
const { activeThreadId, setActiveThread: onThreadClick } =
useSetActiveThread()
useEffect(() => {
- getThreads()
+ getThreads().then(() => setIsThreadsReady(true))
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [])
useEffect(() => {
if (
+ isThreadsReady &&
downloadedModels.length !== 0 &&
threads.length === 0 &&
assistants.length !== 0 &&
@@ -68,7 +70,7 @@ export default function ThreadList() {
requestCreateNewThread(assistants[0])
}
// eslint-disable-next-line react-hooks/exhaustive-deps
- }, [assistants, threads, downloadedModels, activeThread])
+ }, [assistants, threads, downloadedModels, activeThread, isThreadsReady])
return (
@@ -95,13 +97,10 @@ export default function ThreadList() {
}}
>
-
-
{thread.title}
-
- {thread.updated &&
- displayDate(new Date(thread.updated).getTime())}
-
-
+
+ {thread.updated && displayDate(thread.updated)}
+
+
{thread.title}
{lastMessage || 'No new message'}
@@ -160,9 +159,9 @@ export default function ThreadList() {
-
+
Delete thread
diff --git a/web/screens/Chat/index.tsx b/web/screens/Chat/index.tsx
index 684027e49..1f7896604 100644
--- a/web/screens/Chat/index.tsx
+++ b/web/screens/Chat/index.tsx
@@ -1,111 +1,149 @@
-import { ChangeEvent, Fragment, KeyboardEvent, useEffect, useRef } from 'react'
+/* eslint-disable @typescript-eslint/naming-convention */
+import React, { useContext, useEffect, useState } from 'react'
-import { InferenceEvent, MessageStatus, events } from '@janhq/core'
-import { Button, Textarea } from '@janhq/uikit'
+import { useDropzone } from 'react-dropzone'
-import { useAtom, useAtomValue } from 'jotai'
+import { useAtomValue, useSetAtom } from 'jotai'
-import { debounce } from 'lodash'
-import { StopCircle } from 'lucide-react'
+import { UploadCloudIcon } from 'lucide-react'
-import LogoMark from '@/containers/Brand/Logo/Mark'
+import { twMerge } from 'tailwind-merge'
+import GenerateResponse from '@/containers/Loader/GenerateResponse'
import ModelReload from '@/containers/Loader/ModelReload'
import ModelStart from '@/containers/Loader/ModelStart'
-import { currentPromptAtom } from '@/containers/Providers/Jotai'
+import { currentPromptAtom, fileUploadAtom } from '@/containers/Providers/Jotai'
import { showLeftSideBarAtom } from '@/containers/Providers/KeyListener'
-import { MainViewState } from '@/constants/screens'
+import { snackbar } from '@/containers/Toast'
-import { useActiveModel } from '@/hooks/useActiveModel'
+import { FeatureToggleContext } from '@/context/FeatureToggle'
-import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels'
-import { useMainViewState } from '@/hooks/useMainViewState'
-
-import useSendChatMessage from '@/hooks/useSendChatMessage'
+import { activeModelAtom, loadModelErrorAtom } from '@/hooks/useActiveModel'
+import { queuedMessageAtom, reloadModelAtom } from '@/hooks/useSendChatMessage'
import ChatBody from '@/screens/Chat/ChatBody'
import ThreadList from '@/screens/Chat/ThreadList'
+import ChatInput from './ChatInput'
+import LoadModelErrorMessage from './LoadModelErrorMessage'
+import RequestDownloadModel from './RequestDownloadModel'
import Sidebar from './Sidebar'
-import { getCurrentChatMessagesAtom } from '@/helpers/atoms/ChatMessage.atom'
-
import {
activeThreadAtom,
engineParamsUpdateAtom,
- getActiveThreadIdAtom,
- waitingToSendMessage,
+ isGeneratingResponseAtom,
} from '@/helpers/atoms/Thread.atom'
-import { activeThreadStateAtom } from '@/helpers/atoms/Thread.atom'
+const renderError = (code: string) => {
+ switch (code) {
+ case 'multiple-upload':
+ return 'Currently, we only support 1 attachment at the same time'
-const ChatScreen = () => {
- const activeThread = useAtomValue(activeThreadAtom)
- const { downloadedModels } = useGetDownloadedModels()
- const showLeftSideBar = useAtomValue(showLeftSideBarAtom)
+ case 'retrieval-off':
+ return 'Turn on Retrieval in Assistant Settings to use this feature'
- const { activeModel, stateModel } = useActiveModel()
- const { setMainViewState } = useMainViewState()
- const messages = useAtomValue(getCurrentChatMessagesAtom)
+ case 'file-invalid-type':
+ return 'We do not support this file type'
- const [currentPrompt, setCurrentPrompt] = useAtom(currentPromptAtom)
- const activeThreadState = useAtomValue(activeThreadStateAtom)
- const { sendChatMessage, queuedMessage, reloadModel } = useSendChatMessage()
- const isWaitingForResponse = activeThreadState?.waitingForResponse ?? false
- const isDisabledChatbox =
- currentPrompt.trim().length === 0 || isWaitingForResponse
-
- const activeThreadId = useAtomValue(getActiveThreadIdAtom)
- const [isWaitingToSend, setIsWaitingToSend] = useAtom(waitingToSendMessage)
-
- const textareaRef = useRef
(null)
- const modelRef = useRef(activeModel)
- const engineParamsUpdate = useAtomValue(engineParamsUpdateAtom)
-
- useEffect(() => {
- modelRef.current = activeModel
- }, [activeModel])
-
- const onPromptChange = (e: React.ChangeEvent) => {
- setCurrentPrompt(e.target.value)
+ default:
+ return 'Oops, something error, please try again.'
}
+}
- useEffect(() => {
- if (isWaitingToSend && activeThreadId) {
- setIsWaitingToSend(false)
- sendChatMessage()
- }
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [waitingToSendMessage, activeThreadId])
+const ChatScreen: React.FC = () => {
+ const setCurrentPrompt = useSetAtom(currentPromptAtom)
+ const activeThread = useAtomValue(activeThreadAtom)
+ const showLeftSideBar = useAtomValue(showLeftSideBarAtom)
+ const engineParamsUpdate = useAtomValue(engineParamsUpdateAtom)
+ const [dragOver, setDragOver] = useState(false)
- useEffect(() => {
- if (textareaRef.current) {
- textareaRef.current.style.height = '40px'
- textareaRef.current.style.height = textareaRef.current.scrollHeight + 'px'
- }
- }, [currentPrompt])
+ const queuedMessage = useAtomValue(queuedMessageAtom)
+ const reloadModel = useAtomValue(reloadModelAtom)
+ const [dragRejected, setDragRejected] = useState({ code: '' })
+ const setFileUpload = useSetAtom(fileUploadAtom)
+ const { experimentalFeature } = useContext(FeatureToggleContext)
- const onKeyDown = debounce(
- async (e: React.KeyboardEvent) => {
- if (e.key === 'Enter') {
- if (!e.shiftKey) {
- e.preventDefault()
- if (messages[messages.length - 1]?.status !== MessageStatus.Pending)
- sendChatMessage()
- else onStopInferenceClick()
- }
+ const activeModel = useAtomValue(activeModelAtom)
+
+ const isGeneratingResponse = useAtomValue(isGeneratingResponseAtom)
+ const loadModelError = useAtomValue(loadModelErrorAtom)
+
+ const { getRootProps, isDragReject } = useDropzone({
+ noClick: true,
+ multiple: false,
+ accept: {
+ 'application/pdf': ['.pdf'],
+ },
+
+ onDragOver: (e) => {
+ // Retrieval file drag and drop is experimental feature
+ if (!experimentalFeature) return
+ if (
+ e.dataTransfer.items.length === 1 &&
+ activeThread?.assistants[0].tools &&
+ activeThread?.assistants[0].tools[0]?.enabled
+ ) {
+ setDragOver(true)
+ } else if (
+ activeThread?.assistants[0].tools &&
+ !activeThread?.assistants[0].tools[0]?.enabled
+ ) {
+ setDragRejected({ code: 'retrieval-off' })
+ } else {
+ setDragRejected({ code: 'multiple-upload' })
}
},
- 50,
- { leading: false, trailing: true }
- )
+ onDragLeave: () => setDragOver(false),
+ onDrop: (files, rejectFiles) => {
+ // Retrieval file drag and drop is experimental feature
+ if (!experimentalFeature) return
+ if (
+ !files ||
+ files.length !== 1 ||
+ rejectFiles.length !== 0 ||
+ (activeThread?.assistants[0].tools &&
+ !activeThread?.assistants[0].tools[0]?.enabled)
+ )
+ return
+ const imageType = files[0]?.type.includes('image')
+ setFileUpload([{ file: files[0], type: imageType ? 'image' : 'pdf' }])
+ setDragOver(false)
+ if (imageType) {
+ setCurrentPrompt('What do you see in this image?')
+ } else {
+ setCurrentPrompt('Summarize this for me')
+ }
+ },
+ onDropRejected: (e) => {
+ if (
+ activeThread?.assistants[0].tools &&
+ !activeThread?.assistants[0].tools[0]?.enabled
+ ) {
+ setDragRejected({ code: 'retrieval-off' })
+ } else {
+ setDragRejected({ code: e[0].errors[0].code })
+ }
+ setDragOver(false)
+ },
+ })
- const onStopInferenceClick = async () => {
- events.emit(InferenceEvent.OnInferenceStopped, {})
- }
+ useEffect(() => {
+ if (dragRejected.code) {
+ snackbar({
+ description: renderError(dragRejected.code),
+ type: 'error',
+ })
+ }
+ setTimeout(() => {
+ if (dragRejected.code) {
+ setDragRejected({ code: '' })
+ }
+ }, 2000)
+ }, [dragRejected.code])
return (
@@ -116,34 +154,41 @@ const ChatScreen = () => {
) : null}
-
+
+ {dragOver && (
+
+
+
+
+
+
+
+
+ {isDragReject
+ ? 'Currently, we only support 1 attachment at the same time with PDF format'
+ : 'Drop file here'}
+
+ {!isDragReject &&
(PDF)
}
+
+
+
+
+ )}
{activeThread ? (
) : (
-
- {downloadedModels.length === 0 && (
-
-
- Welcome!
-
- You need to download your first model
-
- setMainViewState(MainViewState.Hub)}
- >
- Explore The Hub
-
-
- )}
-
+
)}
{!engineParamsUpdate &&
}
@@ -167,44 +212,9 @@ const ChatScreen = () => {
)}
-
-
+ {activeModel && isGeneratingResponse &&
}
+ {loadModelError &&
}
+
diff --git a/web/screens/ExploreModels/ExploreModelItemHeader/index.tsx b/web/screens/ExploreModels/ExploreModelItemHeader/index.tsx
index b56e20404..3ffe2cbac 100644
--- a/web/screens/ExploreModels/ExploreModelItemHeader/index.tsx
+++ b/web/screens/ExploreModels/ExploreModelItemHeader/index.tsx
@@ -24,9 +24,11 @@ import { MainViewState } from '@/constants/screens'
import { useCreateNewThread } from '@/hooks/useCreateNewThread'
import useDownloadModel from '@/hooks/useDownloadModel'
+
import { useDownloadState } from '@/hooks/useDownloadState'
+
import { getAssistants } from '@/hooks/useGetAssistants'
-import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels'
+import { downloadedModelsAtom } from '@/hooks/useGetDownloadedModels'
import { useMainViewState } from '@/hooks/useMainViewState'
import { toGibibytes } from '@/utils/converter'
@@ -43,8 +45,8 @@ type Props = {
const ExploreModelItemHeader: React.FC = ({ model, onClick, open }) => {
const { downloadModel } = useDownloadModel()
- const { downloadedModels } = useGetDownloadedModels()
- const { modelDownloadStateAtom, downloadStates } = useDownloadState()
+ const downloadedModels = useAtomValue(downloadedModelsAtom)
+ const { modelDownloadStateAtom } = useDownloadState()
const { requestCreateNewThread } = useCreateNewThread()
const totalRam = useAtomValue(totalRamAtom)
const serverEnabled = useAtomValue(serverEnabledAtom)
@@ -100,9 +102,7 @@ const ExploreModelItemHeader: React.FC = ({ model, onClick, open }) => {
)}
)
- }
-
- if (downloadState != null && downloadStates.length > 0) {
+ } else if (downloadState != null) {
downloadButton =
}
diff --git a/web/screens/ExploreModels/index.tsx b/web/screens/ExploreModels/index.tsx
index d988fcafc..398b2db08 100644
--- a/web/screens/ExploreModels/index.tsx
+++ b/web/screens/ExploreModels/index.tsx
@@ -52,9 +52,12 @@ const ExploreModelsScreen = () => {
if (loading) return
return (
-
+
-
+
{
const [host, setHost] = useAtom(hostAtom)
const [port, setPort] = useAtom(portAtom)
+ const hostOptions = ['127.0.0.1', '0.0.0.0']
+
const FIRST_TIME_VISIT_API_SERVER = 'firstTimeVisitAPIServer'
const [firstTimeVisitAPIServer, setFirstTimeVisitAPIServer] =
useState
(false)
- const handleChangePort = (value: any) => {
- if (Number(value) <= 0 || Number(value) >= 65536) {
- setErrorRangePort(true)
- } else {
- setErrorRangePort(false)
- }
- setPort(value)
- }
+ const handleChangePort = useCallback(
+ (value: string) => {
+ if (Number(value) <= 0 || Number(value) >= 65536) {
+ setErrorRangePort(true)
+ } else {
+ setErrorRangePort(false)
+ }
+ setPort(value)
+ },
+ [setPort]
+ )
useEffect(() => {
- if (
- localStorage.getItem(FIRST_TIME_VISIT_API_SERVER) === null ||
- localStorage.getItem(FIRST_TIME_VISIT_API_SERVER) === 'true'
- ) {
- localStorage.setItem(FIRST_TIME_VISIT_API_SERVER, 'true')
+ if (localStorage.getItem(FIRST_TIME_VISIT_API_SERVER) == null) {
setFirstTimeVisitAPIServer(true)
}
}, [firstTimeVisitAPIServer])
useEffect(() => {
handleChangePort(port)
- }, [])
+ }, [handleChangePort, port])
return (
-
+
{/* Left SideBar */}
@@ -167,8 +167,19 @@ const LocalServerScreen = () => {
- 127.0.0.1
- 0.0.0.0
+ {hostOptions.map((option, i) => {
+ return (
+
+ {option}
+
+ )
+ })}
@@ -361,7 +372,11 @@ const LocalServerScreen = () => {
- {settingComponentBuilder(componentDataEngineSetting, true)}
+ x.name === 'prompt_template'}
+ />
@@ -371,7 +386,7 @@ const LocalServerScreen = () => {
diff --git a/web/screens/Settings/Advanced/DataFolder/ModalConfirmDestNotEmpty.tsx b/web/screens/Settings/Advanced/DataFolder/ModalConfirmDestNotEmpty.tsx
new file mode 100644
index 000000000..e4aba41cc
--- /dev/null
+++ b/web/screens/Settings/Advanced/DataFolder/ModalConfirmDestNotEmpty.tsx
@@ -0,0 +1,59 @@
+import React from 'react'
+
+import {
+ Modal,
+ ModalPortal,
+ ModalContent,
+ ModalHeader,
+ ModalTitle,
+ ModalFooter,
+ ModalClose,
+ Button,
+} from '@janhq/uikit'
+
+import { atom, useAtom } from 'jotai'
+
+export const showDestNotEmptyConfirmAtom = atom(false)
+
+type Props = {
+ onUserConfirmed: () => void
+}
+
+const ModalChangeDestNotEmpty: React.FC
= ({ onUserConfirmed }) => {
+ const [show, setShow] = useAtom(showDestNotEmptyConfirmAtom)
+
+ return (
+
+
+
+
+
+
+ This folder is not empty. Are you sure you want to relocate Jan
+ Data Folder here?
+
+
+
+
+ You may accidentally delete your other personal data when uninstalling
+ the app in the future. Are you sure you want to proceed with this
+ folder? Please review your selection carefully.
+
+
+
+ setShow(false)}>
+ Cancel
+
+
+
+ Yes, Proceed
+
+
+
+
+
+
+ )
+}
+
+export default ModalChangeDestNotEmpty
diff --git a/web/screens/Settings/Advanced/DataFolder/ModalErrorSetDestGlobal.tsx b/web/screens/Settings/Advanced/DataFolder/ModalErrorSetDestGlobal.tsx
index 3729dc0d8..84646e735 100644
--- a/web/screens/Settings/Advanced/DataFolder/ModalErrorSetDestGlobal.tsx
+++ b/web/screens/Settings/Advanced/DataFolder/ModalErrorSetDestGlobal.tsx
@@ -16,7 +16,6 @@ export const showChangeFolderErrorAtom = atom(false)
const ModalErrorSetDestGlobal = () => {
const [show, setShow] = useAtom(showChangeFolderErrorAtom)
-
return (
diff --git a/web/screens/Settings/Advanced/DataFolder/ModalSameDirectory.tsx b/web/screens/Settings/Advanced/DataFolder/ModalSameDirectory.tsx
index 8b2d90c61..1909e6428 100644
--- a/web/screens/Settings/Advanced/DataFolder/ModalSameDirectory.tsx
+++ b/web/screens/Settings/Advanced/DataFolder/ModalSameDirectory.tsx
@@ -15,7 +15,11 @@ import { atom, useAtom } from 'jotai'
export const showSamePathModalAtom = atom(false)
-const ModalSameDirectory = () => {
+type Props = {
+ onChangeFolderClick: () => void
+}
+
+const ModalSameDirectory = ({ onChangeFolderClick }: Props) => {
const [show, setShow] = useAtom(showSamePathModalAtom)
return (
@@ -34,7 +38,14 @@ const ModalSameDirectory = () => {
Cancel
- setShow(false)} autoFocus>
+ {
+ setShow(false)
+ onChangeFolderClick()
+ }}
+ autoFocus
+ >
Choose a different folder
diff --git a/web/screens/Settings/Advanced/DataFolder/index.tsx b/web/screens/Settings/Advanced/DataFolder/index.tsx
index 9a1863fa2..fe590bfaa 100644
--- a/web/screens/Settings/Advanced/DataFolder/index.tsx
+++ b/web/screens/Settings/Advanced/DataFolder/index.tsx
@@ -1,25 +1,33 @@
import { Fragment, useCallback, useEffect, useState } from 'react'
-import { fs, AppConfiguration } from '@janhq/core'
+import { fs, AppConfiguration, isSubdirectory } from '@janhq/core'
import { Button, Input } from '@janhq/uikit'
import { useSetAtom } from 'jotai'
import { PencilIcon, FolderOpenIcon } from 'lucide-react'
-import { SUCCESS_SET_NEW_DESTINATION } from '@/hooks/useVaultDirectory'
+import Loader from '@/containers/Loader'
+
+export const SUCCESS_SET_NEW_DESTINATION = 'successSetNewDestination'
import ModalChangeDirectory, {
showDirectoryConfirmModalAtom,
} from './ModalChangeDirectory'
+import ModalChangeDestNotEmpty, {
+ showDestNotEmptyConfirmAtom,
+} from './ModalConfirmDestNotEmpty'
import ModalErrorSetDestGlobal, {
showChangeFolderErrorAtom,
} from './ModalErrorSetDestGlobal'
+
import ModalSameDirectory, { showSamePathModalAtom } from './ModalSameDirectory'
const DataFolder = () => {
const [janDataFolderPath, setJanDataFolderPath] = useState('')
+ const [showLoader, setShowLoader] = useState(false)
const setShowDirectoryConfirm = useSetAtom(showDirectoryConfirmModalAtom)
const setShowSameDirectory = useSetAtom(showSamePathModalAtom)
const setShowChangeFolderError = useSetAtom(showChangeFolderErrorAtom)
+ const showDestNotEmptyConfirm = useSetAtom(showDestNotEmptyConfirmAtom)
const [destinationPath, setDestinationPath] = useState(undefined)
useEffect(() => {
@@ -39,28 +47,55 @@ const DataFolder = () => {
return
}
+ const appConfiguration: AppConfiguration =
+ await window.core?.api?.getAppConfigurations()
+ const currentJanDataFolder = appConfiguration.data_folder
+
+ if (await isSubdirectory(currentJanDataFolder, destFolder)) {
+ setShowSameDirectory(true)
+ return
+ }
+
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
+ const newDestChildren: any[] = await fs.readdirSync(destFolder)
+ const isNotEmpty =
+ newDestChildren.filter((x) => x !== '.DS_Store').length > 0
+
+ if (isNotEmpty) {
+ showDestNotEmptyConfirm(true)
+ return
+ }
+
setDestinationPath(destFolder)
setShowDirectoryConfirm(true)
- }, [janDataFolderPath, setShowSameDirectory, setShowDirectoryConfirm])
+ }, [
+ janDataFolderPath,
+ setShowDirectoryConfirm,
+ setShowSameDirectory,
+ showDestNotEmptyConfirm,
+ ])
const onUserConfirmed = useCallback(async () => {
if (!destinationPath) return
try {
+ setShowLoader(true)
const appConfiguration: AppConfiguration =
await window.core?.api?.getAppConfigurations()
const currentJanDataFolder = appConfiguration.data_folder
appConfiguration.data_folder = destinationPath
await fs.syncFile(currentJanDataFolder, destinationPath)
await window.core?.api?.updateAppConfiguration(appConfiguration)
-
console.debug(
`File sync finished from ${currentJanDataFolder} to ${destinationPath}`
)
-
localStorage.setItem(SUCCESS_SET_NEW_DESTINATION, 'true')
+ setTimeout(() => {
+ setShowLoader(false)
+ }, 1200)
await window.core?.api?.relaunch()
} catch (e) {
console.error(`Error: ${e}`)
+ setShowLoader(false)
setShowChangeFolderError(true)
}
}, [destinationPath, setShowChangeFolderError])
@@ -88,7 +123,8 @@ const DataFolder = () => {
/>
window.core?.api?.openAppDirectory()}
/>
{
-
+
+
+ {showLoader &&
}
)
}
diff --git a/web/screens/Settings/Advanced/FactoryReset/ModalConfirmReset.tsx b/web/screens/Settings/Advanced/FactoryReset/ModalConfirmReset.tsx
new file mode 100644
index 000000000..7b2a4027a
--- /dev/null
+++ b/web/screens/Settings/Advanced/FactoryReset/ModalConfirmReset.tsx
@@ -0,0 +1,97 @@
+import React, { useCallback, useState } from 'react'
+
+import {
+ Modal,
+ ModalPortal,
+ ModalContent,
+ ModalHeader,
+ ModalTitle,
+ ModalFooter,
+ ModalClose,
+ Button,
+ Checkbox,
+ Input,
+} from '@janhq/uikit'
+import { atom, useAtom } from 'jotai'
+
+import useFactoryReset from '@/hooks/useFactoryReset'
+
+export const modalValidationAtom = atom(false)
+
+const ModalConfirmReset = () => {
+ const [modalValidation, setModalValidation] = useAtom(modalValidationAtom)
+ const { resetAll, defaultJanDataFolder } = useFactoryReset()
+ const [inputValue, setInputValue] = useState('')
+ const [currentDirectoryChecked, setCurrentDirectoryChecked] = useState(true)
+ const onFactoryResetClick = useCallback(
+ () => resetAll(currentDirectoryChecked),
+ [currentDirectoryChecked, resetAll]
+ )
+
+ return (
+
setModalValidation(false)}
+ >
+
+
+
+
+ Are you sure you want to reset to default settings?
+
+
+
+ It will reset the application to its original state, deleting all your
+ usage data, including model customizations and conversation history.
+ This action is irreversible.
+
+
+
{`To confirm, please enter the word "RESET" below:`}
+
setInputValue(e.target.value)}
+ />
+
+
+
setCurrentDirectoryChecked(Boolean(e))}
+ />
+
+
+ Keep the current app data location
+
+
+ Otherwise it will reset back to its original location at:{' '}
+ {/* TODO should be from system */}
+ {defaultJanDataFolder}
+
+
+
+
+
+ setModalValidation(false)}>
+ Cancel
+
+
+
+ Reset Now
+
+
+
+
+
+
+ )
+}
+
+export default ModalConfirmReset
diff --git a/web/screens/Settings/Advanced/FactoryReset/index.tsx b/web/screens/Settings/Advanced/FactoryReset/index.tsx
new file mode 100644
index 000000000..e7b1e2995
--- /dev/null
+++ b/web/screens/Settings/Advanced/FactoryReset/index.tsx
@@ -0,0 +1,37 @@
+import { Button } from '@janhq/uikit'
+
+import { useSetAtom } from 'jotai'
+
+import ModalValidation, { modalValidationAtom } from './ModalConfirmReset'
+
+const FactoryReset = () => {
+ const setModalValidation = useSetAtom(modalValidationAtom)
+
+ return (
+
+
+
+
+ Reset to Factory Default
+
+
+
+ Reset the application to its original state, deleting all your usage
+ data, including model customizations and conversation history. This
+ action is irreversible and recommended only if the application is in a
+ corrupted state.
+
+
+
setModalValidation(true)}
+ >
+ Reset
+
+
+
+ )
+}
+
+export default FactoryReset
diff --git a/web/screens/Settings/Advanced/index.tsx b/web/screens/Settings/Advanced/index.tsx
index e1f733699..109431515 100644
--- a/web/screens/Settings/Advanced/index.tsx
+++ b/web/screens/Settings/Advanced/index.tsx
@@ -1,4 +1,3 @@
-/* eslint-disable react-hooks/exhaustive-deps */
'use client'
import {
@@ -21,6 +20,7 @@ import { FeatureToggleContext } from '@/context/FeatureToggle'
import { useSettings } from '@/hooks/useSettings'
import DataFolder from './DataFolder'
+import FactoryReset from './FactoryReset'
const Advanced = () => {
const {
@@ -36,6 +36,7 @@ const Advanced = () => {
const { readSettings, saveSettings, validateSettings, setShowNotification } =
useSettings()
+
const onProxyChange = useCallback(
(event: ChangeEvent
) => {
const value = event.target.value || ''
@@ -50,10 +51,12 @@ const Advanced = () => {
)
useEffect(() => {
- readSettings().then((settings) => {
+ const setUseGpuIfPossible = async () => {
+ const settings = await readSettings()
setGpuEnabled(settings.run_mode === 'gpu')
- })
- }, [])
+ }
+ setUseGpuIfPossible()
+ }, [readSettings])
const clearLogs = async () => {
if (await fs.existsSync(`file://logs`)) {
@@ -62,6 +65,7 @@ const Advanced = () => {
toaster({
title: 'Logs cleared',
description: 'All logs have been cleared.',
+ type: 'success',
})
}
@@ -96,13 +100,7 @@ const Advanced = () => {
{
- if (e === true) {
- setExperimentalFeature(true)
- } else {
- setExperimentalFeature(false)
- }
- }}
+ onCheckedChange={setExperimentalFeature}
/>
@@ -119,7 +117,7 @@ const Advanced = () => {
{
+ onCheckedChange={(e) => {
if (e === true) {
saveSettings({ runMode: 'gpu' })
setGpuEnabled(true)
@@ -137,7 +135,7 @@ const Advanced = () => {
)}
{/* Directory */}
- {experimentalFeature && }
+
{/* Proxy */}
@@ -170,43 +168,10 @@ const Advanced = () => {
certain proxies.
- {
- if (e === true) {
- setIgnoreSSL(true)
- } else {
- setIgnoreSSL(false)
- }
- }}
- />
+ setIgnoreSSL(e)} />
- {/* Open app directory */}
- {window.electronAPI && (
-
-
-
-
- Open App Directory
-
-
-
- Open the directory where your app data, like conversation history
- and model configurations, is located.
-
-
-
window.core?.api?.openAppDirectory()}
- >
- Open
-
-
- )}
-
- {/* Claer log */}
+ {/* Clear log */}
@@ -218,6 +183,9 @@ const Advanced = () => {
Clear
+
+ {/* Factory Reset */}
+
)
}
diff --git a/web/screens/Settings/index.tsx b/web/screens/Settings/index.tsx
index ea12ccc20..c70938f91 100644
--- a/web/screens/Settings/index.tsx
+++ b/web/screens/Settings/index.tsx
@@ -7,14 +7,14 @@ import { motion as m } from 'framer-motion'
import { twMerge } from 'tailwind-merge'
-import { SUCCESS_SET_NEW_DESTINATION } from '@/hooks/useVaultDirectory'
-
import Advanced from '@/screens/Settings/Advanced'
import AppearanceOptions from '@/screens/Settings/Appearance'
import ExtensionCatalog from '@/screens/Settings/CoreExtensions'
import Models from '@/screens/Settings/Models'
+import { SUCCESS_SET_NEW_DESTINATION } from './Advanced/DataFolder'
+
const SettingsScreen = () => {
const [activeStaticMenu, setActiveStaticMenu] = useState('My Models')
const [menus, setMenus] = useState
([])
@@ -49,11 +49,15 @@ const SettingsScreen = () => {
useEffect(() => {
if (localStorage.getItem(SUCCESS_SET_NEW_DESTINATION) === 'true') {
setActiveStaticMenu('Advanced Settings')
+ localStorage.removeItem(SUCCESS_SET_NEW_DESTINATION)
}
}, [])
return (
-
+
diff --git a/web/screens/SystemMonitor/index.tsx b/web/screens/SystemMonitor/index.tsx
index ed3b057a1..3bf8bb35e 100644
--- a/web/screens/SystemMonitor/index.tsx
+++ b/web/screens/SystemMonitor/index.tsx
@@ -35,7 +35,7 @@ export default function SystemMonitorScreen() {
return (
-
+
diff --git a/web/tsconfig.json b/web/tsconfig.json
index 3529c3531..26f0e8ef3 100644
--- a/web/tsconfig.json
+++ b/web/tsconfig.json
@@ -17,13 +17,13 @@
"incremental": true,
"plugins": [
{
- "name": "next"
- }
+ "name": "next",
+ },
],
"paths": {
- "@/*": ["./*"]
- }
+ "@/*": ["./*"],
+ },
},
"include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"],
- "exclude": ["node_modules", "../electron"]
+ "exclude": ["node_modules"],
}
diff --git a/web/types/downloadState.d.ts b/web/types/downloadState.d.ts
index 3c3389b4f..cca526bf1 100644
--- a/web/types/downloadState.d.ts
+++ b/web/types/downloadState.d.ts
@@ -4,6 +4,8 @@ type DownloadState = {
speed: number
percent: number
size: DownloadSize
+ isFinished?: boolean
+ children?: DownloadState[]
error?: string
}
diff --git a/web/types/index.d.ts b/web/types/index.d.ts
index 328fc8f53..833c3e2bd 100644
--- a/web/types/index.d.ts
+++ b/web/types/index.d.ts
@@ -4,7 +4,6 @@ import { APIFunctions } from '@janhq/core'
export {}
declare global {
- declare const PLUGIN_CATALOG: string
declare const VERSION: string
declare const ANALYTICS_ID: string
declare const ANALYTICS_HOST: string
diff --git a/web/utils/base64.ts b/web/utils/base64.ts
new file mode 100644
index 000000000..07d016eeb
--- /dev/null
+++ b/web/utils/base64.ts
@@ -0,0 +1,9 @@
+export const getBase64 = async (file: File): Promise =>
+ new Promise((resolve) => {
+ const reader = new FileReader()
+ reader.readAsDataURL(file)
+ reader.onload = () => {
+ const baseURL = reader.result
+ resolve(baseURL as string)
+ }
+ })
diff --git a/web/utils/componentSettings.ts b/web/utils/componentSettings.ts
index 3536bfd99..64df09ffb 100644
--- a/web/utils/componentSettings.ts
+++ b/web/utils/componentSettings.ts
@@ -1,13 +1,10 @@
-import { Model, ModelRuntimeParams, ModelSettingParams } from '@janhq/core'
+import { Model } from '@janhq/core'
+import { SettingComponentData } from '@/screens/Chat/ModelSetting/SettingComponent'
import { presetConfiguration } from '@/screens/Chat/ModelSetting/predefinedComponent'
-import { SettingComponentData } from '@/screens/Chat/ModelSetting/settingComponentBuilder'
-
-import { ModelParams } from '@/helpers/atoms/Thread.atom'
-
export const getConfigurationsData = (
- settings: ModelSettingParams | ModelRuntimeParams,
+ settings: object,
selectedModel?: Model
) => {
const componentData: SettingComponentData[] = []
@@ -19,31 +16,35 @@ export const getConfigurationsData = (
return
}
if ('slider' === componentSetting.controllerType) {
- const value = Number(settings[key as keyof ModelParams])
+ const value = Number(settings[key as keyof typeof settings])
if ('value' in componentSetting.controllerData) {
componentSetting.controllerData.value = value
if ('max' in componentSetting.controllerData) {
switch (key) {
case 'max_tokens':
componentSetting.controllerData.max =
- selectedModel?.parameters.max_tokens || 4096
+ selectedModel?.parameters.max_tokens ||
+ componentSetting.controllerData.max ||
+ 4096
break
case 'ctx_len':
componentSetting.controllerData.max =
- selectedModel?.settings.ctx_len || 4096
+ selectedModel?.settings.ctx_len ||
+ componentSetting.controllerData.max ||
+ 4096
break
}
}
}
} else if ('input' === componentSetting.controllerType) {
- const value = settings[key as keyof ModelParams] as string
- const placeholder = settings[key as keyof ModelParams] as string
+ const value = settings[key as keyof typeof settings] as string
+ const placeholder = settings[key as keyof typeof settings] as string
if ('value' in componentSetting.controllerData)
componentSetting.controllerData.value = value
if ('placeholder' in componentSetting.controllerData)
componentSetting.controllerData.placeholder = placeholder
} else if ('checkbox' === componentSetting.controllerType) {
- const checked = settings[key as keyof ModelParams] as boolean
+ const checked = settings[key as keyof typeof settings] as boolean
if ('checked' in componentSetting.controllerData)
componentSetting.controllerData.checked = checked
diff --git a/web/utils/model.ts b/web/utils/model.ts
index 2dd6fbdd3..eab4076d8 100644
--- a/web/utils/model.ts
+++ b/web/utils/model.ts
@@ -2,7 +2,7 @@ import { Model } from '@janhq/core'
export const modelBinFileName = (model: Model) => {
const modelFormatExt = '.gguf'
- const extractedFileName = model.source_url?.split('/').pop() ?? model.id
+ const extractedFileName = model.sources[0]?.url.split('/').pop() ?? model.id
const fileName = extractedFileName.toLowerCase().endsWith(modelFormatExt)
? extractedFileName
: model.id
diff --git a/web/utils/modelParam.ts b/web/utils/modelParam.ts
index 7d559c313..4b9fe84ae 100644
--- a/web/utils/modelParam.ts
+++ b/web/utils/modelParam.ts
@@ -22,7 +22,7 @@ export const toRuntimeParams = (
for (const [key, value] of Object.entries(modelParams)) {
if (key in defaultModelParams) {
- runtimeParams[key as keyof ModelRuntimeParams] = value
+ Object.assign(runtimeParams, { ...runtimeParams, [key]: value })
}
}
@@ -40,12 +40,14 @@ export const toSettingParams = (
n_parallel: undefined,
cpu_threads: undefined,
prompt_template: undefined,
+ llama_model_path: undefined,
+ mmproj: undefined,
}
const settingParams: ModelSettingParams = {}
for (const [key, value] of Object.entries(modelParams)) {
if (key in defaultSettingParams) {
- settingParams[key as keyof ModelSettingParams] = value
+ Object.assign(settingParams, { ...settingParams, [key]: value })
}
}
diff --git a/web/utils/posthog.ts b/web/utils/posthog.ts
deleted file mode 100644
index 9bcbaa8ce..000000000
--- a/web/utils/posthog.ts
+++ /dev/null
@@ -1,50 +0,0 @@
-import posthog, { Properties } from 'posthog-js'
-
-// Initialize PostHog
-posthog.init(ANALYTICS_ID, {
- api_host: ANALYTICS_HOST,
- autocapture: false,
- capture_pageview: false,
- capture_pageleave: false,
- rageclick: false,
-})
-// Export the PostHog instance
-export const instance = posthog
-
-// Enum for Analytics Events
-export enum AnalyticsEvent {
- Ping = 'Ping',
-}
-
-// Function to determine the operating system
-function getOperatingSystem(): string {
- if (isMac) return 'MacOS'
- if (isWindows) return 'Windows'
- if (isLinux) return 'Linux'
- return 'Unknown'
-}
-
-function captureAppVersionAndOS() {
- const properties: Properties = {
- $appVersion: VERSION,
- $userOperatingSystem: getOperatingSystem(),
- // Set the following Posthog default properties to empty strings
- $initial_browser: '',
- $browser: '',
- $initial_browser_version: '',
- $browser_version: '',
- $initial_current_url: '',
- $current_url: '',
- $initial_device_type: '',
- $device_type: '',
- $initial_pathname: '',
- $pathname: '',
- $initial_referrer: '',
- $referrer: '',
- $initial_referring_domain: '',
- $referring_domain: '',
- }
- posthog.capture(AnalyticsEvent.Ping, properties)
-}
-
-captureAppVersionAndOS()
diff --git a/web/utils/umami.tsx b/web/utils/umami.tsx
new file mode 100644
index 000000000..ac9e70304
--- /dev/null
+++ b/web/utils/umami.tsx
@@ -0,0 +1,65 @@
+import { useEffect } from 'react'
+
+import Script from 'next/script'
+
+// Define the type for the umami data object
+interface UmamiData {
+ version: string
+}
+
+declare global {
+ interface Window {
+ umami:
+ | {
+ track: (event: string, data?: UmamiData) => void
+ }
+ | undefined
+ }
+}
+
+const Umami = () => {
+ const appVersion = VERSION
+ const analyticsHost = ANALYTICS_HOST
+ const analyticsId = ANALYTICS_ID
+
+ useEffect(() => {
+ if (!appVersion || !analyticsHost || !analyticsId) return
+ const ping = () => {
+ // Check if umami is defined before ping
+ if (window.umami !== null && typeof window.umami !== 'undefined') {
+ window.umami.track(appVersion, {
+ version: appVersion,
+ })
+ }
+ }
+
+ // Wait for umami to be defined before ping
+ if (window.umami !== null && typeof window.umami !== 'undefined') {
+ ping()
+ } else {
+ // Listen for umami script load event
+ document.addEventListener('umami:loaded', ping)
+ }
+
+ // Cleanup function to remove event listener if the component unmounts
+ return () => {
+ document.removeEventListener('umami:loaded', ping)
+ }
+ }, [appVersion, analyticsHost, analyticsId])
+
+ return (
+ <>
+ {appVersion && analyticsHost && analyticsId && (
+