diff --git a/.github/workflows/jan-electron-build-nightly.yml b/.github/workflows/jan-electron-build-nightly.yml
index 08b6ad476..cad2ac227 100644
--- a/.github/workflows/jan-electron-build-nightly.yml
+++ b/.github/workflows/jan-electron-build-nightly.yml
@@ -1,6 +1,12 @@
name: Jan Build Electron App Nightly or Manual
on:
+ push:
+ branches:
+ - main
+ paths-ignore:
+ - 'README.md'
+ - 'docs/**'
schedule:
- cron: '0 20 * * 1,2,3' # At 8 PM UTC on Monday, Tuesday, and Wednesday which is 3 AM UTC+7 Tuesday, Wednesday, and Thursday
workflow_dispatch:
@@ -23,12 +29,20 @@ jobs:
- name: Set public provider
id: set-public-provider
run: |
- if [ ${{ github.event == 'workflow_dispatch' }} ]; then
+ if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
echo "::set-output name=public_provider::${{ github.event.inputs.public_provider }}"
echo "::set-output name=ref::${{ github.ref }}"
else
- echo "::set-output name=public_provider::cloudflare-r2"
- echo "::set-output name=ref::refs/heads/dev"
+ if [ "${{ github.event_name }}" == "schedule" ]; then
+ echo "::set-output name=public_provider::cloudflare-r2"
+ echo "::set-output name=ref::refs/heads/dev"
+ elif [ "${{ github.event_name }}" == "push" ]; then
+ echo "::set-output name=public_provider::cloudflare-r2"
+ echo "::set-output name=ref::${{ github.ref }}"
+ else
+ echo "::set-output name=public_provider::none"
+ echo "::set-output name=ref::${{ github.ref }}"
+ fi
fi
# Job create Update app version based on latest release tag with build number and save to output
get-update-version:
@@ -73,6 +87,17 @@ jobs:
push_to_branch: dev
new_version: ${{ needs.get-update-version.outputs.new_version }}
+ noti-discord-pre-release-and-update-url-readme:
+ needs: [build-macos, build-windows-x64, build-linux-x64, get-update-version, set-public-provider]
+ secrets: inherit
+ if: github.event_name == 'push'
+ uses: ./.github/workflows/template-noti-discord-and-update-url-readme.yml
+ with:
+ ref: refs/heads/dev
+ build_reason: Pre-release
+ push_to_branch: dev
+ new_version: ${{ needs.get-update-version.outputs.new_version }}
+
noti-discord-manual-and-update-url-readme:
needs: [build-macos, build-windows-x64, build-linux-x64, get-update-version, set-public-provider]
secrets: inherit
diff --git a/.github/workflows/jan-electron-build-pre-release.yml b/.github/workflows/jan-electron-build-pre-release.yml
deleted file mode 100644
index d37cda5ab..000000000
--- a/.github/workflows/jan-electron-build-pre-release.yml
+++ /dev/null
@@ -1,52 +0,0 @@
-name: Jan Build Electron Pre Release
-
-on:
- push:
- branches:
- - main
- paths:
- - "!README.md"
-
-jobs:
-
- # Job create Update app version based on latest release tag with build number and save to output
- get-update-version:
- uses: ./.github/workflows/template-get-update-version.yml
-
- build-macos:
- uses: ./.github/workflows/template-build-macos.yml
- secrets: inherit
- needs: [get-update-version]
- with:
- ref: ${{ github.ref }}
- public_provider: cloudflare-r2
- new_version: ${{ needs.get-update-version.outputs.new_version }}
-
- build-windows-x64:
- uses: ./.github/workflows/template-build-windows-x64.yml
- secrets: inherit
- needs: [get-update-version]
- with:
- ref: ${{ github.ref }}
- public_provider: cloudflare-r2
- new_version: ${{ needs.get-update-version.outputs.new_version }}
-
- build-linux-x64:
- uses: ./.github/workflows/template-build-linux-x64.yml
- secrets: inherit
- needs: [get-update-version]
- with:
- ref: ${{ github.ref }}
- public_provider: cloudflare-r2
- new_version: ${{ needs.get-update-version.outputs.new_version }}
-
- noti-discord-nightly-and-update-url-readme:
- needs: [build-macos, build-windows-x64, build-linux-x64, get-update-version]
- secrets: inherit
- if: github.event_name == 'push' && github.ref == 'refs/heads/main'
- uses: ./.github/workflows/template-noti-discord-and-update-url-readme.yml
- with:
- ref: refs/heads/dev
- build_reason: Nightly
- push_to_branch: dev
- new_version: ${{ needs.get-update-version.outputs.new_version }}
diff --git a/.github/workflows/template-build-linux-x64.yml b/.github/workflows/template-build-linux-x64.yml
index c6d1eac97..08cb1dada 100644
--- a/.github/workflows/template-build-linux-x64.yml
+++ b/.github/workflows/template-build-linux-x64.yml
@@ -98,8 +98,8 @@ jobs:
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- ANALYTICS_ID: ${{ secrets.JAN_APP_POSTHOG_PROJECT_API_KEY }}
- ANALYTICS_HOST: ${{ secrets.JAN_APP_POSTHOG_URL }}
+ ANALYTICS_ID: ${{ secrets.JAN_APP_UMAMI_PROJECT_API_KEY }}
+ ANALYTICS_HOST: ${{ secrets.JAN_APP_UMAMI_URL }}
- name: Upload Artifact .deb file
if: inputs.public_provider != 'github'
diff --git a/.github/workflows/template-build-macos.yml b/.github/workflows/template-build-macos.yml
index bc48e6c21..0ad1d3a6a 100644
--- a/.github/workflows/template-build-macos.yml
+++ b/.github/workflows/template-build-macos.yml
@@ -137,8 +137,8 @@ jobs:
APPLE_APP_SPECIFIC_PASSWORD: ${{ secrets.APPLE_APP_SPECIFIC_PASSWORD }}
APP_PATH: "."
DEVELOPER_ID: ${{ secrets.DEVELOPER_ID }}
- ANALYTICS_ID: ${{ secrets.JAN_APP_POSTHOG_PROJECT_API_KEY }}
- ANALYTICS_HOST: ${{ secrets.JAN_APP_POSTHOG_URL }}
+ ANALYTICS_ID: ${{ secrets.JAN_APP_UMAMI_PROJECT_API_KEY }}
+ ANALYTICS_HOST: ${{ secrets.JAN_APP_UMAMI_URL }}
- name: Upload Artifact
if: inputs.public_provider != 'github'
diff --git a/.github/workflows/template-build-windows-x64.yml b/.github/workflows/template-build-windows-x64.yml
index 5d96b3f49..b81997bde 100644
--- a/.github/workflows/template-build-windows-x64.yml
+++ b/.github/workflows/template-build-windows-x64.yml
@@ -127,8 +127,8 @@ jobs:
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- ANALYTICS_ID: ${{ secrets.JAN_APP_POSTHOG_PROJECT_API_KEY }}
- ANALYTICS_HOST: ${{ secrets.JAN_APP_POSTHOG_URL }}
+ ANALYTICS_ID: ${{ secrets.JAN_APP_UMAMI_PROJECT_API_KEY }}
+ ANALYTICS_HOST: ${{ secrets.JAN_APP_UMAMI_URL }}
AZURE_KEY_VAULT_URI: ${{ secrets.AZURE_KEY_VAULT_URI }}
AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}
diff --git a/.github/workflows/update-release-url.yml b/.github/workflows/update-release-url.yml
index 545d6542e..99a3db0e0 100644
--- a/.github/workflows/update-release-url.yml
+++ b/.github/workflows/update-release-url.yml
@@ -17,7 +17,7 @@ jobs:
with:
fetch-depth: "0"
token: ${{ secrets.PAT_SERVICE_ACCOUNT }}
- ref: main
+ ref: dev
- name: Get Latest Release
uses: pozetroninc/github-action-get-latest-release@v0.7.0
@@ -46,4 +46,4 @@ jobs:
git config --global user.name "Service Account"
git add README.md
git commit -m "Update README.md with Stable Download URLs"
- git -c http.extraheader="AUTHORIZATION: bearer ${{ secrets.PAT_SERVICE_ACCOUNT }}" push origin HEAD:main
+ git -c http.extraheader="AUTHORIZATION: bearer ${{ secrets.PAT_SERVICE_ACCOUNT }}" push origin HEAD:dev
diff --git a/README.md b/README.md
index f7ae2de86..ec35170f6 100644
--- a/README.md
+++ b/README.md
@@ -43,31 +43,31 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute
| Experimental (Nightly Build) |
-
+
jan.exe
|
-
+
Intel
|
-
+
M1/M2
|
-
+
jan.deb
|
-
+
jan.AppImage
diff --git a/core/src/api/index.ts b/core/src/api/index.ts
index a232c4090..0d7cc51f7 100644
--- a/core/src/api/index.ts
+++ b/core/src/api/index.ts
@@ -3,7 +3,6 @@
* @description Enum of all the routes exposed by the app
*/
export enum AppRoute {
- appDataPath = 'appDataPath',
openExternalUrl = 'openExternalUrl',
openAppDirectory = 'openAppDirectory',
openFileExplore = 'openFileExplorer',
@@ -12,6 +11,7 @@ export enum AppRoute {
updateAppConfiguration = 'updateAppConfiguration',
relaunch = 'relaunch',
joinPath = 'joinPath',
+ isSubdirectory = 'isSubdirectory',
baseName = 'baseName',
startServer = 'startServer',
stopServer = 'stopServer',
@@ -61,6 +61,7 @@ export enum FileManagerRoute {
syncFile = 'syncFile',
getJanDataFolderPath = 'getJanDataFolderPath',
getResourcePath = 'getResourcePath',
+ getUserHomePath = 'getUserHomePath',
fileStat = 'fileStat',
writeBlob = 'writeBlob',
}
diff --git a/core/src/core.ts b/core/src/core.ts
index aa545e10e..8831c6001 100644
--- a/core/src/core.ts
+++ b/core/src/core.ts
@@ -22,7 +22,11 @@ const executeOnMain: (extension: string, method: string, ...args: any[]) => Prom
* @param {object} network - Optional object to specify proxy/whether to ignore SSL certificates.
* @returns {Promise} A promise that resolves when the file is downloaded.
*/
-const downloadFile: (url: string, fileName: string, network?: { proxy?: string, ignoreSSL?: boolean }) => Promise = (url, fileName, network) => {
+const downloadFile: (
+ url: string,
+ fileName: string,
+ network?: { proxy?: string; ignoreSSL?: boolean }
+) => Promise = (url, fileName, network) => {
return global.core?.api?.downloadFile(url, fileName, network)
}
@@ -79,6 +83,12 @@ const openExternalUrl: (url: string) => Promise = (url) =>
*/
const getResourcePath: () => Promise = () => global.core.api?.getResourcePath()
+/**
+ * Gets the user's home path.
+ * @returns return user's home path
+ */
+const getUserHomePath = (): Promise => global.core.api?.getUserHomePath()
+
/**
* Log to file from browser processes.
*
@@ -87,6 +97,17 @@ const getResourcePath: () => Promise = () => global.core.api?.getResourc
const log: (message: string, fileName?: string) => void = (message, fileName) =>
global.core.api?.log(message, fileName)
+/**
+ * Check whether the path is a subdirectory of another path.
+ *
+ * @param from - The path to check.
+ * @param to - The path to check against.
+ *
+ * @returns {Promise} - A promise that resolves with a boolean indicating whether the path is a subdirectory.
+ */
+const isSubdirectory: (from: string, to: string) => Promise = (from: string, to: string) =>
+ global.core.api?.isSubdirectory(from, to)
+
/**
* Register extension point function type definition
*/
@@ -94,7 +115,7 @@ export type RegisterExtensionPoint = (
extensionName: string,
extensionId: string,
method: Function,
- priority?: number,
+ priority?: number
) => void
/**
@@ -111,5 +132,7 @@ export {
openExternalUrl,
baseName,
log,
+ isSubdirectory,
+ getUserHomePath,
FileStat,
}
diff --git a/core/src/node/api/common/builder.ts b/core/src/node/api/common/builder.ts
index a9819bce6..5c99cf4d8 100644
--- a/core/src/node/api/common/builder.ts
+++ b/core/src/node/api/common/builder.ts
@@ -2,7 +2,8 @@ import fs from 'fs'
import { JanApiRouteConfiguration, RouteConfiguration } from './configuration'
import { join } from 'path'
import { ContentType, MessageStatus, Model, ThreadMessage } from './../../../index'
-import { getJanDataFolderPath } from '../../utils'
+import { getEngineConfiguration, getJanDataFolderPath } from '../../utils'
+import { DEFAULT_CHAT_COMPLETION_URL } from './consts'
export const getBuilder = async (configuration: RouteConfiguration) => {
const directoryPath = join(getJanDataFolderPath(), configuration.dirName)
@@ -309,7 +310,7 @@ export const chatCompletions = async (request: any, reply: any) => {
const engineConfiguration = await getEngineConfiguration(requestedModel.engine)
let apiKey: string | undefined = undefined
- let apiUrl: string = 'http://127.0.0.1:3928/inferences/llamacpp/chat_completion' // default nitro url
+ let apiUrl: string = DEFAULT_CHAT_COMPLETION_URL
if (engineConfiguration) {
apiKey = engineConfiguration.api_key
@@ -320,7 +321,7 @@ export const chatCompletions = async (request: any, reply: any) => {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
- "Access-Control-Allow-Origin": "*"
+ 'Access-Control-Allow-Origin': '*',
})
const headers: Record = {
@@ -346,13 +347,3 @@ export const chatCompletions = async (request: any, reply: any) => {
response.body.pipe(reply.raw)
}
}
-
-const getEngineConfiguration = async (engineId: string) => {
- if (engineId !== 'openai') {
- return undefined
- }
- const directoryPath = join(getJanDataFolderPath(), 'engines')
- const filePath = join(directoryPath, `${engineId}.json`)
- const data = await fs.readFileSync(filePath, 'utf-8')
- return JSON.parse(data)
-}
diff --git a/core/src/node/api/common/consts.ts b/core/src/node/api/common/consts.ts
new file mode 100644
index 000000000..bc3cfe300
--- /dev/null
+++ b/core/src/node/api/common/consts.ts
@@ -0,0 +1,19 @@
+// The PORT to use for the Nitro subprocess
+export const NITRO_DEFAULT_PORT = 3928
+
+// The HOST address to use for the Nitro subprocess
+export const LOCAL_HOST = '127.0.0.1'
+
+export const SUPPORTED_MODEL_FORMAT = '.gguf'
+
+// The URL for the Nitro subprocess
+const NITRO_HTTP_SERVER_URL = `http://${LOCAL_HOST}:${NITRO_DEFAULT_PORT}`
+// The URL for the Nitro subprocess to load a model
+export const NITRO_HTTP_LOAD_MODEL_URL = `${NITRO_HTTP_SERVER_URL}/inferences/llamacpp/loadmodel`
+// The URL for the Nitro subprocess to validate a model
+export const NITRO_HTTP_VALIDATE_MODEL_URL = `${NITRO_HTTP_SERVER_URL}/inferences/llamacpp/modelstatus`
+
+// The URL for the Nitro subprocess to kill itself
+export const NITRO_HTTP_KILL_URL = `${NITRO_HTTP_SERVER_URL}/processmanager/destroy`
+
+export const DEFAULT_CHAT_COMPLETION_URL = `http://${LOCAL_HOST}:${NITRO_DEFAULT_PORT}/inferences/llamacpp/chat_completion` // default nitro url
diff --git a/core/src/node/api/common/startStopModel.ts b/core/src/node/api/common/startStopModel.ts
new file mode 100644
index 000000000..0d4934e1c
--- /dev/null
+++ b/core/src/node/api/common/startStopModel.ts
@@ -0,0 +1,351 @@
+import fs from 'fs'
+import { join } from 'path'
+import { getJanDataFolderPath, getJanExtensionsPath, getSystemResourceInfo } from '../../utils'
+import { logServer } from '../../log'
+import { ChildProcessWithoutNullStreams, spawn } from 'child_process'
+import { Model, ModelSettingParams, PromptTemplate } from '../../../types'
+import {
+ LOCAL_HOST,
+ NITRO_DEFAULT_PORT,
+ NITRO_HTTP_KILL_URL,
+ NITRO_HTTP_LOAD_MODEL_URL,
+ NITRO_HTTP_VALIDATE_MODEL_URL,
+ SUPPORTED_MODEL_FORMAT,
+} from './consts'
+
+// The subprocess instance for Nitro
+let subprocess: ChildProcessWithoutNullStreams | undefined = undefined
+
+// TODO: move this to core type
+interface NitroModelSettings extends ModelSettingParams {
+ llama_model_path: string
+ cpu_threads: number
+}
+
+export const startModel = async (modelId: string, settingParams?: ModelSettingParams) => {
+ try {
+ await runModel(modelId, settingParams)
+
+ return {
+ message: `Model ${modelId} started`,
+ }
+ } catch (e) {
+ return {
+ error: e,
+ }
+ }
+}
+
+const runModel = async (modelId: string, settingParams?: ModelSettingParams): Promise => {
+ const janDataFolderPath = getJanDataFolderPath()
+ const modelFolderFullPath = join(janDataFolderPath, 'models', modelId)
+
+ if (!fs.existsSync(modelFolderFullPath)) {
+ throw `Model not found: ${modelId}`
+ }
+
+ const files: string[] = fs.readdirSync(modelFolderFullPath)
+
+ // Look for GGUF model file
+ const ggufBinFile = files.find((file) => file.toLowerCase().includes(SUPPORTED_MODEL_FORMAT))
+
+ const modelMetadataPath = join(modelFolderFullPath, 'model.json')
+ const modelMetadata: Model = JSON.parse(fs.readFileSync(modelMetadataPath, 'utf-8'))
+
+ if (!ggufBinFile) {
+ throw 'No GGUF model file found'
+ }
+ const modelBinaryPath = join(modelFolderFullPath, ggufBinFile)
+
+ const nitroResourceProbe = await getSystemResourceInfo()
+ const nitroModelSettings: NitroModelSettings = {
+ ...modelMetadata.settings,
+ ...settingParams,
+ llama_model_path: modelBinaryPath,
+ // This is critical and requires real CPU physical core count (or performance core)
+ cpu_threads: Math.max(1, nitroResourceProbe.numCpuPhysicalCore),
+ ...(modelMetadata.settings.mmproj && {
+ mmproj: join(modelFolderFullPath, modelMetadata.settings.mmproj),
+ }),
+ }
+
+ logServer(`[NITRO]::Debug: Nitro model settings: ${JSON.stringify(nitroModelSettings)}`)
+
+ // Convert settings.prompt_template to system_prompt, user_prompt, ai_prompt
+ if (modelMetadata.settings.prompt_template) {
+ const promptTemplate = modelMetadata.settings.prompt_template
+ const prompt = promptTemplateConverter(promptTemplate)
+ if (prompt?.error) {
+ return Promise.reject(prompt.error)
+ }
+ nitroModelSettings.system_prompt = prompt.system_prompt
+ nitroModelSettings.user_prompt = prompt.user_prompt
+ nitroModelSettings.ai_prompt = prompt.ai_prompt
+ }
+
+ await runNitroAndLoadModel(modelId, nitroModelSettings)
+}
+
+// TODO: move to util
+const promptTemplateConverter = (promptTemplate: string): PromptTemplate => {
+ // Split the string using the markers
+ const systemMarker = '{system_message}'
+ const promptMarker = '{prompt}'
+
+ if (promptTemplate.includes(systemMarker) && promptTemplate.includes(promptMarker)) {
+ // Find the indices of the markers
+ const systemIndex = promptTemplate.indexOf(systemMarker)
+ const promptIndex = promptTemplate.indexOf(promptMarker)
+
+ // Extract the parts of the string
+ const system_prompt = promptTemplate.substring(0, systemIndex)
+ const user_prompt = promptTemplate.substring(systemIndex + systemMarker.length, promptIndex)
+ const ai_prompt = promptTemplate.substring(promptIndex + promptMarker.length)
+
+ // Return the split parts
+ return { system_prompt, user_prompt, ai_prompt }
+ } else if (promptTemplate.includes(promptMarker)) {
+ // Extract the parts of the string for the case where only promptMarker is present
+ const promptIndex = promptTemplate.indexOf(promptMarker)
+ const user_prompt = promptTemplate.substring(0, promptIndex)
+ const ai_prompt = promptTemplate.substring(promptIndex + promptMarker.length)
+
+ // Return the split parts
+ return { user_prompt, ai_prompt }
+ }
+
+ // Return an error if none of the conditions are met
+ return { error: 'Cannot split prompt template' }
+}
+
+const runNitroAndLoadModel = async (modelId: string, modelSettings: NitroModelSettings) => {
+ // Gather system information for CPU physical cores and memory
+ const tcpPortUsed = require('tcp-port-used')
+
+ await stopModel(modelId)
+ await tcpPortUsed.waitUntilFree(NITRO_DEFAULT_PORT, 300, 5000)
+
+ /**
+ * There is a problem with Windows process manager
+ * Should wait for awhile to make sure the port is free and subprocess is killed
+ * The tested threshold is 500ms
+ **/
+ if (process.platform === 'win32') {
+ await new Promise((resolve) => setTimeout(resolve, 500))
+ }
+
+ await spawnNitroProcess()
+ await loadLLMModel(modelSettings)
+ await validateModelStatus()
+}
+
+const spawnNitroProcess = async (): Promise => {
+ logServer(`[NITRO]::Debug: Spawning Nitro subprocess...`)
+
+ let binaryFolder = join(
+ getJanExtensionsPath(),
+ '@janhq',
+ 'inference-nitro-extension',
+ 'dist',
+ 'bin'
+ )
+
+ let executableOptions = executableNitroFile()
+ const tcpPortUsed = require('tcp-port-used')
+
+ const args: string[] = ['1', LOCAL_HOST, NITRO_DEFAULT_PORT.toString()]
+ // Execute the binary
+ logServer(
+ `[NITRO]::Debug: Spawn nitro at path: ${executableOptions.executablePath}, and args: ${args}`
+ )
+ subprocess = spawn(
+ executableOptions.executablePath,
+ ['1', LOCAL_HOST, NITRO_DEFAULT_PORT.toString()],
+ {
+ cwd: binaryFolder,
+ env: {
+ ...process.env,
+ CUDA_VISIBLE_DEVICES: executableOptions.cudaVisibleDevices,
+ },
+ }
+ )
+
+ // Handle subprocess output
+ subprocess.stdout.on('data', (data: any) => {
+ logServer(`[NITRO]::Debug: ${data}`)
+ })
+
+ subprocess.stderr.on('data', (data: any) => {
+ logServer(`[NITRO]::Error: ${data}`)
+ })
+
+ subprocess.on('close', (code: any) => {
+ logServer(`[NITRO]::Debug: Nitro exited with code: ${code}`)
+ subprocess = undefined
+ })
+
+ tcpPortUsed.waitUntilUsed(NITRO_DEFAULT_PORT, 300, 30000).then(() => {
+ logServer(`[NITRO]::Debug: Nitro is ready`)
+ })
+}
+
+type NitroExecutableOptions = {
+ executablePath: string
+ cudaVisibleDevices: string
+}
+
+const executableNitroFile = (): NitroExecutableOptions => {
+ const nvidiaInfoFilePath = join(getJanDataFolderPath(), 'settings', 'settings.json')
+ let binaryFolder = join(
+ getJanExtensionsPath(),
+ '@janhq',
+ 'inference-nitro-extension',
+ 'dist',
+ 'bin'
+ )
+
+ let cudaVisibleDevices = ''
+ let binaryName = 'nitro'
+ /**
+ * The binary folder is different for each platform.
+ */
+ if (process.platform === 'win32') {
+ /**
+ * For Windows: win-cpu, win-cuda-11-7, win-cuda-12-0
+ */
+ let nvidiaInfo = JSON.parse(fs.readFileSync(nvidiaInfoFilePath, 'utf-8'))
+ if (nvidiaInfo['run_mode'] === 'cpu') {
+ binaryFolder = join(binaryFolder, 'win-cpu')
+ } else {
+ if (nvidiaInfo['cuda'].version === '12') {
+ binaryFolder = join(binaryFolder, 'win-cuda-12-0')
+ } else {
+ binaryFolder = join(binaryFolder, 'win-cuda-11-7')
+ }
+ cudaVisibleDevices = nvidiaInfo['gpu_highest_vram']
+ }
+ binaryName = 'nitro.exe'
+ } else if (process.platform === 'darwin') {
+ /**
+ * For MacOS: mac-arm64 (Silicon), mac-x64 (InteL)
+ */
+ if (process.arch === 'arm64') {
+ binaryFolder = join(binaryFolder, 'mac-arm64')
+ } else {
+ binaryFolder = join(binaryFolder, 'mac-x64')
+ }
+ } else {
+ /**
+ * For Linux: linux-cpu, linux-cuda-11-7, linux-cuda-12-0
+ */
+ let nvidiaInfo = JSON.parse(fs.readFileSync(nvidiaInfoFilePath, 'utf-8'))
+ if (nvidiaInfo['run_mode'] === 'cpu') {
+ binaryFolder = join(binaryFolder, 'linux-cpu')
+ } else {
+ if (nvidiaInfo['cuda'].version === '12') {
+ binaryFolder = join(binaryFolder, 'linux-cuda-12-0')
+ } else {
+ binaryFolder = join(binaryFolder, 'linux-cuda-11-7')
+ }
+ cudaVisibleDevices = nvidiaInfo['gpu_highest_vram']
+ }
+ }
+
+ return {
+ executablePath: join(binaryFolder, binaryName),
+ cudaVisibleDevices,
+ }
+}
+
+const validateModelStatus = async (): Promise => {
+ // Send a GET request to the validation URL.
+ // Retry the request up to 3 times if it fails, with a delay of 500 milliseconds between retries.
+ const fetchRT = require('fetch-retry')
+ const fetchRetry = fetchRT(fetch)
+
+ return fetchRetry(NITRO_HTTP_VALIDATE_MODEL_URL, {
+ method: 'GET',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ retries: 5,
+ retryDelay: 500,
+ }).then(async (res: Response) => {
+ logServer(`[NITRO]::Debug: Validate model state success with response ${JSON.stringify(res)}`)
+ // If the response is OK, check model_loaded status.
+ if (res.ok) {
+ const body = await res.json()
+ // If the model is loaded, return an empty object.
+ // Otherwise, return an object with an error message.
+ if (body.model_loaded) {
+ return Promise.resolve()
+ }
+ }
+ return Promise.reject('Validate model status failed')
+ })
+}
+
+const loadLLMModel = async (settings: NitroModelSettings): Promise => {
+ logServer(`[NITRO]::Debug: Loading model with params ${JSON.stringify(settings)}`)
+ const fetchRT = require('fetch-retry')
+ const fetchRetry = fetchRT(fetch)
+
+ return fetchRetry(NITRO_HTTP_LOAD_MODEL_URL, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ body: JSON.stringify(settings),
+ retries: 3,
+ retryDelay: 500,
+ })
+ .then((res: any) => {
+ logServer(`[NITRO]::Debug: Load model success with response ${JSON.stringify(res)}`)
+ return Promise.resolve(res)
+ })
+ .catch((err: any) => {
+ logServer(`[NITRO]::Error: Load model failed with error ${err}`)
+ return Promise.reject(err)
+ })
+}
+
+/**
+ * Stop model and kill nitro process.
+ */
+export const stopModel = async (_modelId: string) => {
+ if (!subprocess) {
+ return {
+ error: "Model isn't running",
+ }
+ }
+ return new Promise((resolve, reject) => {
+ const controller = new AbortController()
+ setTimeout(() => {
+ controller.abort()
+ reject({
+ error: 'Failed to stop model: Timedout',
+ })
+ }, 5000)
+ const tcpPortUsed = require('tcp-port-used')
+ logServer(`[NITRO]::Debug: Request to kill Nitro`)
+
+ fetch(NITRO_HTTP_KILL_URL, {
+ method: 'DELETE',
+ signal: controller.signal,
+ })
+ .then(() => {
+ subprocess?.kill()
+ subprocess = undefined
+ })
+ .catch(() => {
+ // don't need to do anything, we still kill the subprocess
+ })
+ .then(() => tcpPortUsed.waitUntilFree(NITRO_DEFAULT_PORT, 300, 5000))
+ .then(() => logServer(`[NITRO]::Debug: Nitro process is terminated`))
+ .then(() =>
+ resolve({
+ message: 'Model stopped',
+ })
+ )
+ })
+}
diff --git a/core/src/node/api/routes/common.ts b/core/src/node/api/routes/common.ts
index a6c65a382..27385e561 100644
--- a/core/src/node/api/routes/common.ts
+++ b/core/src/node/api/routes/common.ts
@@ -10,6 +10,8 @@ import {
} from '../common/builder'
import { JanApiRouteConfiguration } from '../common/configuration'
+import { startModel, stopModel } from '../common/startStopModel'
+import { ModelSettingParams } from '../../../types'
export const commonRouter = async (app: HttpServer) => {
// Common Routes
@@ -17,19 +19,33 @@ export const commonRouter = async (app: HttpServer) => {
app.get(`/${key}`, async (_request) => getBuilder(JanApiRouteConfiguration[key]))
app.get(`/${key}/:id`, async (request: any) =>
- retrieveBuilder(JanApiRouteConfiguration[key], request.params.id),
+ retrieveBuilder(JanApiRouteConfiguration[key], request.params.id)
)
app.delete(`/${key}/:id`, async (request: any) =>
- deleteBuilder(JanApiRouteConfiguration[key], request.params.id),
+ deleteBuilder(JanApiRouteConfiguration[key], request.params.id)
)
})
// Download Model Routes
app.get(`/models/download/:modelId`, async (request: any) =>
- downloadModel(request.params.modelId, { ignoreSSL: request.query.ignoreSSL === 'true', proxy: request.query.proxy }),
+ downloadModel(request.params.modelId, {
+ ignoreSSL: request.query.ignoreSSL === 'true',
+ proxy: request.query.proxy,
+ })
)
+ app.put(`/models/:modelId/start`, async (request: any) => {
+ let settingParams: ModelSettingParams | undefined = undefined
+ if (Object.keys(request.body).length !== 0) {
+ settingParams = JSON.parse(request.body) as ModelSettingParams
+ }
+
+ return startModel(request.params.modelId, settingParams)
+ })
+
+ app.put(`/models/:modelId/stop`, async (request: any) => stopModel(request.params.modelId))
+
// Chat Completion Routes
app.post(`/chat/completions`, async (request: any, reply: any) => chatCompletions(request, reply))
diff --git a/core/src/node/api/routes/fileManager.ts b/core/src/node/api/routes/fileManager.ts
index 159c23a0c..66056444e 100644
--- a/core/src/node/api/routes/fileManager.ts
+++ b/core/src/node/api/routes/fileManager.ts
@@ -8,5 +8,7 @@ export const fsRouter = async (app: HttpServer) => {
app.post(`/app/${FileManagerRoute.getResourcePath}`, async (request: any, reply: any) => {})
+ app.post(`/app/${FileManagerRoute.getUserHomePath}`, async (request: any, reply: any) => {})
+
app.post(`/app/${FileManagerRoute.fileStat}`, async (request: any, reply: any) => {})
}
diff --git a/core/src/node/log.ts b/core/src/node/log.ts
index 8a5155d8d..6f2c2f80f 100644
--- a/core/src/node/log.ts
+++ b/core/src/node/log.ts
@@ -2,38 +2,36 @@ import fs from 'fs'
import util from 'util'
import { getAppLogPath, getServerLogPath } from './utils'
-export const log = function (message: string) {
- const appLogPath = getAppLogPath()
+export const log = (message: string) => {
+ const path = getAppLogPath()
if (!message.startsWith('[')) {
message = `[APP]::${message}`
}
message = `${new Date().toISOString()} ${message}`
- if (fs.existsSync(appLogPath)) {
- var log_file = fs.createWriteStream(appLogPath, {
- flags: 'a',
- })
- log_file.write(util.format(message) + '\n')
- log_file.close()
- console.debug(message)
- }
+ writeLog(message, path)
}
-export const logServer = function (message: string) {
- const serverLogPath = getServerLogPath()
+export const logServer = (message: string) => {
+ const path = getServerLogPath()
if (!message.startsWith('[')) {
message = `[SERVER]::${message}`
}
message = `${new Date().toISOString()} ${message}`
+ writeLog(message, path)
+}
- if (fs.existsSync(serverLogPath)) {
- var log_file = fs.createWriteStream(serverLogPath, {
+const writeLog = (message: string, logPath: string) => {
+ if (!fs.existsSync(logPath)) {
+ fs.writeFileSync(logPath, message)
+ } else {
+ const logFile = fs.createWriteStream(logPath, {
flags: 'a',
})
- log_file.write(util.format(message) + '\n')
- log_file.close()
+ logFile.write(util.format(message) + '\n')
+ logFile.close()
console.debug(message)
}
}
diff --git a/core/src/node/utils/index.ts b/core/src/node/utils/index.ts
index 00db04c9b..4bcbf13b1 100644
--- a/core/src/node/utils/index.ts
+++ b/core/src/node/utils/index.ts
@@ -1,16 +1,18 @@
-import { AppConfiguration } from "../../types";
-import { join } from "path";
-import fs from "fs";
-import os from "os";
+import { AppConfiguration, SystemResourceInfo } from '../../types'
+import { join } from 'path'
+import fs from 'fs'
+import os from 'os'
+import { log, logServer } from '../log'
+import childProcess from 'child_process'
// TODO: move this to core
-const configurationFileName = "settings.json";
+const configurationFileName = 'settings.json'
// TODO: do no specify app name in framework module
-const defaultJanDataFolder = join(os.homedir(), "jan");
+const defaultJanDataFolder = join(os.homedir(), 'jan')
const defaultAppConfig: AppConfiguration = {
data_folder: defaultJanDataFolder,
-};
+}
/**
* Getting App Configurations.
@@ -20,39 +22,39 @@ const defaultAppConfig: AppConfiguration = {
export const getAppConfigurations = (): AppConfiguration => {
// Retrieve Application Support folder path
// Fallback to user home directory if not found
- const configurationFile = getConfigurationFilePath();
+ const configurationFile = getConfigurationFilePath()
if (!fs.existsSync(configurationFile)) {
// create default app config if we don't have one
- console.debug(`App config not found, creating default config at ${configurationFile}`);
- fs.writeFileSync(configurationFile, JSON.stringify(defaultAppConfig));
- return defaultAppConfig;
+ console.debug(`App config not found, creating default config at ${configurationFile}`)
+ fs.writeFileSync(configurationFile, JSON.stringify(defaultAppConfig))
+ return defaultAppConfig
}
try {
const appConfigurations: AppConfiguration = JSON.parse(
- fs.readFileSync(configurationFile, "utf-8"),
- );
- return appConfigurations;
+ fs.readFileSync(configurationFile, 'utf-8')
+ )
+ return appConfigurations
} catch (err) {
- console.error(`Failed to read app config, return default config instead! Err: ${err}`);
- return defaultAppConfig;
+ console.error(`Failed to read app config, return default config instead! Err: ${err}`)
+ return defaultAppConfig
}
-};
+}
const getConfigurationFilePath = () =>
join(
- global.core?.appPath() || process.env[process.platform == "win32" ? "USERPROFILE" : "HOME"],
- configurationFileName,
- );
+ global.core?.appPath() || process.env[process.platform == 'win32' ? 'USERPROFILE' : 'HOME'],
+ configurationFileName
+ )
export const updateAppConfiguration = (configuration: AppConfiguration): Promise => {
- const configurationFile = getConfigurationFilePath();
- console.debug("updateAppConfiguration, configurationFile: ", configurationFile);
+ const configurationFile = getConfigurationFilePath()
+ console.debug('updateAppConfiguration, configurationFile: ', configurationFile)
- fs.writeFileSync(configurationFile, JSON.stringify(configuration));
- return Promise.resolve();
-};
+ fs.writeFileSync(configurationFile, JSON.stringify(configuration))
+ return Promise.resolve()
+}
/**
* Utility function to get server log path
@@ -60,13 +62,13 @@ export const updateAppConfiguration = (configuration: AppConfiguration): Promise
* @returns {string} The log path.
*/
export const getServerLogPath = (): string => {
- const appConfigurations = getAppConfigurations();
- const logFolderPath = join(appConfigurations.data_folder, "logs");
+ const appConfigurations = getAppConfigurations()
+ const logFolderPath = join(appConfigurations.data_folder, 'logs')
if (!fs.existsSync(logFolderPath)) {
- fs.mkdirSync(logFolderPath, { recursive: true });
+ fs.mkdirSync(logFolderPath, { recursive: true })
}
- return join(logFolderPath, "server.log");
-};
+ return join(logFolderPath, 'server.log')
+}
/**
* Utility function to get app log path
@@ -74,13 +76,13 @@ export const getServerLogPath = (): string => {
* @returns {string} The log path.
*/
export const getAppLogPath = (): string => {
- const appConfigurations = getAppConfigurations();
- const logFolderPath = join(appConfigurations.data_folder, "logs");
+ const appConfigurations = getAppConfigurations()
+ const logFolderPath = join(appConfigurations.data_folder, 'logs')
if (!fs.existsSync(logFolderPath)) {
- fs.mkdirSync(logFolderPath, { recursive: true });
+ fs.mkdirSync(logFolderPath, { recursive: true })
}
- return join(logFolderPath, "app.log");
-};
+ return join(logFolderPath, 'app.log')
+}
/**
* Utility function to get data folder path
@@ -88,9 +90,9 @@ export const getAppLogPath = (): string => {
* @returns {string} The data folder path.
*/
export const getJanDataFolderPath = (): string => {
- const appConfigurations = getAppConfigurations();
- return appConfigurations.data_folder;
-};
+ const appConfigurations = getAppConfigurations()
+ return appConfigurations.data_folder
+}
/**
* Utility function to get extension path
@@ -98,6 +100,70 @@ export const getJanDataFolderPath = (): string => {
* @returns {string} The extensions path.
*/
export const getJanExtensionsPath = (): string => {
- const appConfigurations = getAppConfigurations();
- return join(appConfigurations.data_folder, "extensions");
-};
+ const appConfigurations = getAppConfigurations()
+ return join(appConfigurations.data_folder, 'extensions')
+}
+
+/**
+ * Utility function to physical cpu count
+ *
+ * @returns {number} The physical cpu count.
+ */
+export const physicalCpuCount = async (): Promise => {
+ const platform = os.platform()
+ if (platform === 'linux') {
+ const output = await exec('lscpu -p | egrep -v "^#" | sort -u -t, -k 2,4 | wc -l')
+ return parseInt(output.trim(), 10)
+ } else if (platform === 'darwin') {
+ const output = await exec('sysctl -n hw.physicalcpu_max')
+ return parseInt(output.trim(), 10)
+ } else if (platform === 'win32') {
+ const output = await exec('WMIC CPU Get NumberOfCores')
+ return output
+ .split(os.EOL)
+ .map((line: string) => parseInt(line))
+ .filter((value: number) => !isNaN(value))
+ .reduce((sum: number, number: number) => sum + number, 1)
+ } else {
+ const cores = os.cpus().filter((cpu: any, index: number) => {
+ const hasHyperthreading = cpu.model.includes('Intel')
+ const isOdd = index % 2 === 1
+ return !hasHyperthreading || isOdd
+ })
+ return cores.length
+ }
+}
+
+const exec = async (command: string): Promise => {
+ return new Promise((resolve, reject) => {
+ childProcess.exec(command, { encoding: 'utf8' }, (error, stdout) => {
+ if (error) {
+ reject(error)
+ } else {
+ resolve(stdout)
+ }
+ })
+ })
+}
+
+export const getSystemResourceInfo = async (): Promise => {
+ const cpu = await physicalCpuCount()
+ const message = `[NITRO]::CPU informations - ${cpu}`
+ log(message)
+ logServer(message)
+
+ return {
+ numCpuPhysicalCore: cpu,
+ memAvailable: 0, // TODO: this should not be 0
+ }
+}
+
+export const getEngineConfiguration = async (engineId: string) => {
+ if (engineId !== 'openai') {
+ return undefined
+ }
+ const directoryPath = join(getJanDataFolderPath(), 'engines')
+ const filePath = join(directoryPath, `${engineId}.json`)
+ const data = fs.readFileSync(filePath, 'utf-8')
+ return JSON.parse(data)
+}
diff --git a/core/src/types/config/appConfigEvent.ts b/core/src/types/config/appConfigEvent.ts
new file mode 100644
index 000000000..50e33cfa2
--- /dev/null
+++ b/core/src/types/config/appConfigEvent.ts
@@ -0,0 +1,6 @@
+/**
+ * App configuration event name
+ */
+export enum AppConfigurationEventName {
+ OnConfigurationUpdate = 'OnConfigurationUpdate',
+}
diff --git a/core/src/types/config/index.ts b/core/src/types/config/index.ts
index 0fa3645aa..d2e182b99 100644
--- a/core/src/types/config/index.ts
+++ b/core/src/types/config/index.ts
@@ -1 +1,2 @@
export * from './appConfigEntity'
+export * from './appConfigEvent'
diff --git a/core/src/types/index.ts b/core/src/types/index.ts
index 3bdcb5421..ee6f4ef08 100644
--- a/core/src/types/index.ts
+++ b/core/src/types/index.ts
@@ -6,3 +6,4 @@ export * from './inference'
export * from './monitoring'
export * from './file'
export * from './config'
+export * from './miscellaneous'
diff --git a/core/src/types/miscellaneous/index.ts b/core/src/types/miscellaneous/index.ts
new file mode 100644
index 000000000..02c973323
--- /dev/null
+++ b/core/src/types/miscellaneous/index.ts
@@ -0,0 +1,2 @@
+export * from './systemResourceInfo'
+export * from './promptTemplate'
diff --git a/core/src/types/miscellaneous/promptTemplate.ts b/core/src/types/miscellaneous/promptTemplate.ts
new file mode 100644
index 000000000..a6743c67c
--- /dev/null
+++ b/core/src/types/miscellaneous/promptTemplate.ts
@@ -0,0 +1,6 @@
+export type PromptTemplate = {
+ system_prompt?: string
+ ai_prompt?: string
+ user_prompt?: string
+ error?: string
+}
diff --git a/core/src/types/miscellaneous/systemResourceInfo.ts b/core/src/types/miscellaneous/systemResourceInfo.ts
new file mode 100644
index 000000000..1472cda47
--- /dev/null
+++ b/core/src/types/miscellaneous/systemResourceInfo.ts
@@ -0,0 +1,4 @@
+export type SystemResourceInfo = {
+ numCpuPhysicalCore: number
+ memAvailable: number
+}
diff --git a/core/src/types/model/modelEntity.ts b/core/src/types/model/modelEntity.ts
index 727ff085f..644c34dfb 100644
--- a/core/src/types/model/modelEntity.ts
+++ b/core/src/types/model/modelEntity.ts
@@ -123,6 +123,7 @@ export type ModelSettingParams = {
user_prompt?: string
llama_model_path?: string
mmproj?: string
+ cont_batching?: boolean
}
/**
diff --git a/core/src/types/thread/threadEntity.ts b/core/src/types/thread/threadEntity.ts
index 37136eae6..dd88b10ec 100644
--- a/core/src/types/thread/threadEntity.ts
+++ b/core/src/types/thread/threadEntity.ts
@@ -43,5 +43,4 @@ export type ThreadState = {
waitingForResponse: boolean
error?: Error
lastMessage?: string
- isFinishInit?: boolean
}
diff --git a/docs/.env.example b/docs/.env.example
index 6755f2520..b4a7fa5f1 100644
--- a/docs/.env.example
+++ b/docs/.env.example
@@ -1,5 +1,5 @@
GTM_ID=xxxx
-POSTHOG_PROJECT_API_KEY=xxxx
-POSTHOG_APP_URL=xxxx
+UMAMI_PROJECT_API_KEY=xxxx
+UMAMI_APP_URL=xxxx
ALGOLIA_API_KEY=xxxx
ALGOLIA_APP_ID=xxxx
\ No newline at end of file
diff --git a/electron/handlers/app.ts b/electron/handlers/app.ts
index bdb70047a..c1f431ef3 100644
--- a/electron/handlers/app.ts
+++ b/electron/handlers/app.ts
@@ -1,5 +1,5 @@
import { app, ipcMain, dialog, shell } from 'electron'
-import { join, basename } from 'path'
+import { join, basename, relative as getRelative, isAbsolute } from 'path'
import { WindowManager } from './../managers/window'
import { getResourcePath } from './../utils/path'
import { AppRoute, AppConfiguration } from '@janhq/core'
@@ -50,6 +50,27 @@ export function handleAppIPCs() {
join(...paths)
)
+ /**
+ * Checks if the given path is a subdirectory of the given directory.
+ *
+ * @param _event - The IPC event object.
+ * @param from - The path to check.
+ * @param to - The directory to check against.
+ *
+ * @returns {Promise} - A promise that resolves with the result.
+ */
+ ipcMain.handle(
+ AppRoute.isSubdirectory,
+ async (_event, from: string, to: string) => {
+ const relative = getRelative(from, to)
+ const isSubdir =
+ relative && !relative.startsWith('..') && !isAbsolute(relative)
+
+ if (isSubdir === '') return false
+ else return isSubdir
+ }
+ )
+
/**
* Retrieve basename from given path, respect to the current OS.
*/
diff --git a/electron/handlers/fileManager.ts b/electron/handlers/fileManager.ts
index 2528aef71..e328cb53b 100644
--- a/electron/handlers/fileManager.ts
+++ b/electron/handlers/fileManager.ts
@@ -1,4 +1,4 @@
-import { ipcMain } from 'electron'
+import { ipcMain, app } from 'electron'
// @ts-ignore
import reflect from '@alumna/reflect'
@@ -38,6 +38,10 @@ export function handleFileMangerIPCs() {
getResourcePath()
)
+ ipcMain.handle(FileManagerRoute.getUserHomePath, async (_event) =>
+ app.getPath('home')
+ )
+
// handle fs is directory here
ipcMain.handle(
FileManagerRoute.fileStat,
diff --git a/electron/main.ts b/electron/main.ts
index fb7066cd0..5d7e59c0f 100644
--- a/electron/main.ts
+++ b/electron/main.ts
@@ -28,6 +28,22 @@ import { setupCore } from './utils/setup'
app
.whenReady()
+ .then(async () => {
+ if (!app.isPackaged) {
+ // Which means you're running from source code
+ const { default: installExtension, REACT_DEVELOPER_TOOLS } = await import(
+ 'electron-devtools-installer'
+ ) // Don't use import on top level, since the installer package is dev-only
+ try {
+ const name = installExtension(REACT_DEVELOPER_TOOLS)
+ console.log(`Added Extension: ${name}`)
+ } catch (err) {
+ console.log('An error occurred while installing devtools:')
+ console.error(err)
+ // Only log the error and don't throw it because it's not critical
+ }
+ }
+ })
.then(setupCore)
.then(createUserSpace)
.then(migrateExtensions)
diff --git a/electron/package.json b/electron/package.json
index 173e54f2b..2892fedc6 100644
--- a/electron/package.json
+++ b/electron/package.json
@@ -86,7 +86,7 @@
"request": "^2.88.2",
"request-progress": "^3.0.0",
"rimraf": "^5.0.5",
- "typescript": "^5.3.3",
+ "typescript": "^5.2.2",
"ulid": "^2.3.0",
"use-debounce": "^9.0.4"
},
@@ -99,6 +99,7 @@
"@typescript-eslint/parser": "^6.7.3",
"electron": "28.0.0",
"electron-builder": "^24.9.1",
+ "electron-devtools-installer": "^3.2.0",
"electron-playwright-helpers": "^1.6.0",
"eslint-plugin-react": "^7.33.2",
"run-script-os": "^1.1.6"
diff --git a/electron/playwright.config.ts b/electron/playwright.config.ts
index 98b2c7b45..1fa3313f2 100644
--- a/electron/playwright.config.ts
+++ b/electron/playwright.config.ts
@@ -1,9 +1,9 @@
-import { PlaywrightTestConfig } from "@playwright/test";
+import { PlaywrightTestConfig } from '@playwright/test'
const config: PlaywrightTestConfig = {
- testDir: "./tests",
+ testDir: './tests',
retries: 0,
- timeout: 120000,
-};
+ globalTimeout: 300000,
+}
-export default config;
+export default config
diff --git a/electron/tests/explore.e2e.spec.ts b/electron/tests/hub.e2e.spec.ts
similarity index 71%
rename from electron/tests/explore.e2e.spec.ts
rename to electron/tests/hub.e2e.spec.ts
index 77eb3dbda..cc72e037e 100644
--- a/electron/tests/explore.e2e.spec.ts
+++ b/electron/tests/hub.e2e.spec.ts
@@ -9,6 +9,7 @@ import {
let electronApp: ElectronApplication
let page: Page
+const TIMEOUT: number = parseInt(process.env.TEST_TIMEOUT || '300000')
test.beforeAll(async () => {
process.env.CI = 'e2e'
@@ -26,7 +27,9 @@ test.beforeAll(async () => {
})
await stubDialog(electronApp, 'showMessageBox', { response: 1 })
- page = await electronApp.firstWindow()
+ page = await electronApp.firstWindow({
+ timeout: TIMEOUT,
+ })
})
test.afterAll(async () => {
@@ -34,8 +37,12 @@ test.afterAll(async () => {
await page.close()
})
-test('explores models', async () => {
- await page.getByTestId('Hub').first().click()
- await page.getByTestId('testid-explore-models').isVisible()
- // More test cases here...
+test('explores hub', async () => {
+ test.setTimeout(TIMEOUT)
+ await page.getByTestId('Hub').first().click({
+ timeout: TIMEOUT,
+ })
+ await page.getByTestId('hub-container-test-id').isVisible({
+ timeout: TIMEOUT,
+ })
})
diff --git a/electron/tests/main.e2e.spec.ts b/electron/tests/main.e2e.spec.ts
deleted file mode 100644
index 1a5bfe696..000000000
--- a/electron/tests/main.e2e.spec.ts
+++ /dev/null
@@ -1,55 +0,0 @@
-import { _electron as electron } from 'playwright'
-import { ElectronApplication, Page, expect, test } from '@playwright/test'
-
-import {
- findLatestBuild,
- parseElectronApp,
- stubDialog,
-} from 'electron-playwright-helpers'
-
-let electronApp: ElectronApplication
-let page: Page
-
-test.beforeAll(async () => {
- process.env.CI = 'e2e'
-
- const latestBuild = findLatestBuild('dist')
- expect(latestBuild).toBeTruthy()
-
- // parse the packaged Electron app and find paths and other info
- const appInfo = parseElectronApp(latestBuild)
- expect(appInfo).toBeTruthy()
- expect(appInfo.asar).toBe(true)
- expect(appInfo.executable).toBeTruthy()
- expect(appInfo.main).toBeTruthy()
- expect(appInfo.name).toBe('jan')
- expect(appInfo.packageJson).toBeTruthy()
- expect(appInfo.packageJson.name).toBe('jan')
- expect(appInfo.platform).toBeTruthy()
- expect(appInfo.platform).toBe(process.platform)
- expect(appInfo.resourcesDir).toBeTruthy()
-
- electronApp = await electron.launch({
- args: [appInfo.main], // main file from package.json
- executablePath: appInfo.executable, // path to the Electron executable
- })
- await stubDialog(electronApp, 'showMessageBox', { response: 1 })
-
- page = await electronApp.firstWindow()
-})
-
-test.afterAll(async () => {
- await electronApp.close()
- await page.close()
-})
-
-test('renders the home page', async () => {
- expect(page).toBeDefined()
-
- // Welcome text is available
- const welcomeText = await page
- .getByTestId('testid-welcome-title')
- .first()
- .isVisible()
- expect(welcomeText).toBe(false)
-})
diff --git a/electron/tests/navigation.e2e.spec.ts b/electron/tests/navigation.e2e.spec.ts
index 2f4f7b767..5c8721c2f 100644
--- a/electron/tests/navigation.e2e.spec.ts
+++ b/electron/tests/navigation.e2e.spec.ts
@@ -9,6 +9,7 @@ import {
let electronApp: ElectronApplication
let page: Page
+const TIMEOUT: number = parseInt(process.env.TEST_TIMEOUT || '300000')
test.beforeAll(async () => {
process.env.CI = 'e2e'
@@ -26,7 +27,9 @@ test.beforeAll(async () => {
})
await stubDialog(electronApp, 'showMessageBox', { response: 1 })
- page = await electronApp.firstWindow()
+ page = await electronApp.firstWindow({
+ timeout: TIMEOUT,
+ })
})
test.afterAll(async () => {
@@ -35,20 +38,24 @@ test.afterAll(async () => {
})
test('renders left navigation panel', async () => {
- // Chat section should be there
- const chatSection = await page.getByTestId('Chat').first().isVisible()
- expect(chatSection).toBe(false)
-
- // Home actions
- /* Disable unstable feature tests
- ** const botBtn = await page.getByTestId("Bot").first().isEnabled();
- ** Enable back when it is whitelisted
- */
-
+ test.setTimeout(TIMEOUT)
const systemMonitorBtn = await page
.getByTestId('System Monitor')
.first()
- .isEnabled()
- const settingsBtn = await page.getByTestId('Settings').first().isEnabled()
+ .isEnabled({
+ timeout: TIMEOUT,
+ })
+ const settingsBtn = await page
+ .getByTestId('Thread')
+ .first()
+ .isEnabled({ timeout: TIMEOUT })
expect([systemMonitorBtn, settingsBtn].filter((e) => !e).length).toBe(0)
+ // Chat section should be there
+ await page.getByTestId('Local API Server').first().click({
+ timeout: TIMEOUT,
+ })
+ const localServer = await page.getByTestId('local-server-testid').first()
+ await expect(localServer).toBeVisible({
+ timeout: TIMEOUT,
+ })
})
diff --git a/electron/tests/settings.e2e.spec.ts b/electron/tests/settings.e2e.spec.ts
index 798504c70..ad2d7b4a4 100644
--- a/electron/tests/settings.e2e.spec.ts
+++ b/electron/tests/settings.e2e.spec.ts
@@ -9,6 +9,7 @@ import {
let electronApp: ElectronApplication
let page: Page
+const TIMEOUT: number = parseInt(process.env.TEST_TIMEOUT || '300000')
test.beforeAll(async () => {
process.env.CI = 'e2e'
@@ -26,7 +27,9 @@ test.beforeAll(async () => {
})
await stubDialog(electronApp, 'showMessageBox', { response: 1 })
- page = await electronApp.firstWindow()
+ page = await electronApp.firstWindow({
+ timeout: TIMEOUT,
+ })
})
test.afterAll(async () => {
@@ -35,6 +38,8 @@ test.afterAll(async () => {
})
test('shows settings', async () => {
- await page.getByTestId('Settings').first().click()
- await page.getByTestId('testid-setting-description').isVisible()
+ test.setTimeout(TIMEOUT)
+ await page.getByTestId('Settings').first().click({ timeout: TIMEOUT })
+ const settingDescription = page.getByTestId('testid-setting-description')
+ await expect(settingDescription).toBeVisible({ timeout: TIMEOUT })
})
diff --git a/electron/tests/system-monitor.e2e.spec.ts b/electron/tests/system-monitor.e2e.spec.ts
deleted file mode 100644
index 747a8ae18..000000000
--- a/electron/tests/system-monitor.e2e.spec.ts
+++ /dev/null
@@ -1,41 +0,0 @@
-import { _electron as electron } from 'playwright'
-import { ElectronApplication, Page, expect, test } from '@playwright/test'
-
-import {
- findLatestBuild,
- parseElectronApp,
- stubDialog,
-} from 'electron-playwright-helpers'
-
-let electronApp: ElectronApplication
-let page: Page
-
-test.beforeAll(async () => {
- process.env.CI = 'e2e'
-
- const latestBuild = findLatestBuild('dist')
- expect(latestBuild).toBeTruthy()
-
- // parse the packaged Electron app and find paths and other info
- const appInfo = parseElectronApp(latestBuild)
- expect(appInfo).toBeTruthy()
-
- electronApp = await electron.launch({
- args: [appInfo.main], // main file from package.json
- executablePath: appInfo.executable, // path to the Electron executable
- })
- await stubDialog(electronApp, 'showMessageBox', { response: 1 })
-
- page = await electronApp.firstWindow()
-})
-
-test.afterAll(async () => {
- await electronApp.close()
- await page.close()
-})
-
-test('shows system monitor', async () => {
- await page.getByTestId('System Monitor').first().click()
- await page.getByTestId('testid-system-monitor').isVisible()
- // More test cases here...
-})
diff --git a/extensions/assistant-extension/package.json b/extensions/assistant-extension/package.json
index f4e4dd825..84bcdf47e 100644
--- a/extensions/assistant-extension/package.json
+++ b/extensions/assistant-extension/package.json
@@ -8,7 +8,10 @@
"license": "AGPL-3.0",
"scripts": {
"build": "tsc --module commonjs && rollup -c rollup.config.ts",
- "build:publish": "rimraf *.tgz --glob && npm run build && npm pack && cpx *.tgz ../../electron/pre-install"
+ "build:publish:linux": "rimraf *.tgz --glob && npm run build && npm pack && cpx *.tgz ../../electron/pre-install",
+ "build:publish:darwin": "rimraf *.tgz --glob && npm run build && ../../.github/scripts/auto-sign.sh && npm pack && cpx *.tgz ../../electron/pre-install",
+ "build:publish:win32": "rimraf *.tgz --glob && npm run build && npm pack && cpx *.tgz ../../electron/pre-install",
+ "build:publish": "run-script-os"
},
"devDependencies": {
"@rollup/plugin-commonjs": "^25.0.7",
@@ -22,7 +25,8 @@
"rollup-plugin-define": "^1.0.1",
"rollup-plugin-sourcemaps": "^0.6.3",
"rollup-plugin-typescript2": "^0.36.0",
- "typescript": "^5.3.3"
+ "typescript": "^5.3.3",
+ "run-script-os": "^1.1.6"
},
"dependencies": {
"@janhq/core": "file:../../core",
diff --git a/extensions/assistant-extension/src/node/tools/retrieval/index.ts b/extensions/assistant-extension/src/node/tools/retrieval/index.ts
index f9d5c4029..cd7e9abb1 100644
--- a/extensions/assistant-extension/src/node/tools/retrieval/index.ts
+++ b/extensions/assistant-extension/src/node/tools/retrieval/index.ts
@@ -12,12 +12,11 @@ export class Retrieval {
public chunkOverlap?: number = 0;
private retriever: any;
- private embeddingModel: any = undefined;
+ private embeddingModel?: OpenAIEmbeddings = undefined;
private textSplitter?: RecursiveCharacterTextSplitter;
constructor(chunkSize: number = 4000, chunkOverlap: number = 200) {
this.updateTextSplitter(chunkSize, chunkOverlap);
- this.embeddingModel = new OpenAIEmbeddings({});
}
public updateTextSplitter(chunkSize: number, chunkOverlap: number): void {
@@ -36,7 +35,7 @@ export class Retrieval {
if (engine === "nitro") {
this.embeddingModel = new OpenAIEmbeddings(
{ openAIApiKey: "nitro-embedding" },
- { basePath: "http://127.0.0.1:3928/v1" },
+ { basePath: "http://127.0.0.1:3928/v1" }
);
} else {
// Fallback to OpenAI Settings
@@ -50,11 +49,12 @@ export class Retrieval {
public ingestAgentKnowledge = async (
filePath: string,
- memoryPath: string,
+ memoryPath: string
): Promise => {
const loader = new PDFLoader(filePath, {
splitPages: true,
});
+ if (!this.embeddingModel) return Promise.reject();
const doc = await loader.load();
const docs = await this.textSplitter!.splitDocuments(doc);
const vectorStore = await HNSWLib.fromDocuments(docs, this.embeddingModel);
@@ -62,6 +62,7 @@ export class Retrieval {
};
public loadRetrievalAgent = async (memoryPath: string): Promise => {
+ if (!this.embeddingModel) return Promise.reject();
const vectorStore = await HNSWLib.load(memoryPath, this.embeddingModel);
this.retriever = vectorStore.asRetriever(2);
return Promise.resolve();
diff --git a/extensions/conversational-extension/src/index.ts b/extensions/conversational-extension/src/index.ts
index 61f0fd0e9..3d28a9c1d 100644
--- a/extensions/conversational-extension/src/index.ts
+++ b/extensions/conversational-extension/src/index.ts
@@ -119,19 +119,20 @@ export default class JSONConversationalExtension extends ConversationalExtension
if (!(await fs.existsSync(threadDirPath)))
await fs.mkdirSync(threadDirPath)
- if (message.content[0].type === 'image') {
+ if (message.content[0]?.type === 'image') {
const filesPath = await joinPath([threadDirPath, 'files'])
if (!(await fs.existsSync(filesPath))) await fs.mkdirSync(filesPath)
const imagePath = await joinPath([filesPath, `${message.id}.png`])
const base64 = message.content[0].text.annotations[0]
await this.storeImage(base64, imagePath)
- // if (fs.existsSync(imagePath)) {
- // message.content[0].text.annotations[0] = imagePath
- // }
+ if ((await fs.existsSync(imagePath)) && message.content?.length) {
+ // Use file path instead of blob
+ message.content[0].text.annotations[0] = `threads/${message.thread_id}/files/${message.id}.png`
+ }
}
- if (message.content[0].type === 'pdf') {
+ if (message.content[0]?.type === 'pdf') {
const filesPath = await joinPath([threadDirPath, 'files'])
if (!(await fs.existsSync(filesPath))) await fs.mkdirSync(filesPath)
@@ -139,7 +140,7 @@ export default class JSONConversationalExtension extends ConversationalExtension
const blob = message.content[0].text.annotations[0]
await this.storeFile(blob, filePath)
- if (await fs.existsSync(filePath)) {
+ if ((await fs.existsSync(filePath)) && message.content?.length) {
// Use file path instead of blob
message.content[0].text.annotations[0] = `threads/${message.thread_id}/files/${message.id}.pdf`
}
diff --git a/extensions/inference-nitro-extension/bin/version.txt b/extensions/inference-nitro-extension/bin/version.txt
index 769ed6ae7..c2c0004f0 100644
--- a/extensions/inference-nitro-extension/bin/version.txt
+++ b/extensions/inference-nitro-extension/bin/version.txt
@@ -1 +1 @@
-0.2.14
+0.3.5
diff --git a/extensions/inference-nitro-extension/package.json b/extensions/inference-nitro-extension/package.json
index 44727eb70..8ad516ad9 100644
--- a/extensions/inference-nitro-extension/package.json
+++ b/extensions/inference-nitro-extension/package.json
@@ -35,7 +35,7 @@
"rollup-plugin-sourcemaps": "^0.6.3",
"rollup-plugin-typescript2": "^0.36.0",
"run-script-os": "^1.1.6",
- "typescript": "^5.3.3"
+ "typescript": "^5.2.2"
},
"dependencies": {
"@janhq/core": "file:../../core",
diff --git a/extensions/inference-nitro-extension/rollup.config.ts b/extensions/inference-nitro-extension/rollup.config.ts
index 374a054cd..77a9fb208 100644
--- a/extensions/inference-nitro-extension/rollup.config.ts
+++ b/extensions/inference-nitro-extension/rollup.config.ts
@@ -27,6 +27,9 @@ export default [
TROUBLESHOOTING_URL: JSON.stringify(
"https://jan.ai/guides/troubleshooting"
),
+ JAN_SERVER_INFERENCE_URL: JSON.stringify(
+ "http://localhost:1337/v1/chat/completions"
+ ),
}),
// Allow json resolution
json(),
diff --git a/extensions/inference-nitro-extension/src/@types/global.d.ts b/extensions/inference-nitro-extension/src/@types/global.d.ts
index 5fb41f0f8..7a4fb4805 100644
--- a/extensions/inference-nitro-extension/src/@types/global.d.ts
+++ b/extensions/inference-nitro-extension/src/@types/global.d.ts
@@ -1,22 +1,7 @@
declare const NODE: string;
declare const INFERENCE_URL: string;
declare const TROUBLESHOOTING_URL: string;
-
-/**
- * The parameters for the initModel function.
- * @property settings - The settings for the machine learning model.
- * @property settings.ctx_len - The context length.
- * @property settings.ngl - The number of generated tokens.
- * @property settings.cont_batching - Whether to use continuous batching.
- * @property settings.embedding - Whether to use embedding.
- */
-interface EngineSettings {
- ctx_len: number;
- ngl: number;
- cpu_threads: number;
- cont_batching: boolean;
- embedding: boolean;
-}
+declare const JAN_SERVER_INFERENCE_URL: string;
/**
* The response from the initModel function.
@@ -26,8 +11,3 @@ interface ModelOperationResponse {
error?: any;
modelFile?: string;
}
-
-interface ResourcesInfo {
- numCpuPhysicalCore: number;
- memAvailable: number;
-}
\ No newline at end of file
diff --git a/extensions/inference-nitro-extension/src/helpers/sse.ts b/extensions/inference-nitro-extension/src/helpers/sse.ts
index c6352383d..aab260828 100644
--- a/extensions/inference-nitro-extension/src/helpers/sse.ts
+++ b/extensions/inference-nitro-extension/src/helpers/sse.ts
@@ -6,6 +6,7 @@ import { Observable } from "rxjs";
* @returns An Observable that emits the generated response as a string.
*/
export function requestInference(
+ inferenceUrl: string,
recentMessages: any[],
model: Model,
controller?: AbortController
@@ -17,7 +18,7 @@ export function requestInference(
stream: true,
...model.parameters,
});
- fetch(INFERENCE_URL, {
+ fetch(inferenceUrl, {
method: "POST",
headers: {
"Content-Type": "application/json",
diff --git a/extensions/inference-nitro-extension/src/index.ts b/extensions/inference-nitro-extension/src/index.ts
index 0e6edb992..81a0031ac 100644
--- a/extensions/inference-nitro-extension/src/index.ts
+++ b/extensions/inference-nitro-extension/src/index.ts
@@ -24,6 +24,7 @@ import {
MessageEvent,
ModelEvent,
InferenceEvent,
+ ModelSettingParams,
} from "@janhq/core";
import { requestInference } from "./helpers/sse";
import { ulid } from "ulid";
@@ -45,7 +46,7 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
private _currentModel: Model | undefined;
- private _engineSettings: EngineSettings = {
+ private _engineSettings: ModelSettingParams = {
ctx_len: 2048,
ngl: 100,
cpu_threads: 1,
@@ -67,35 +68,48 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
*/
private nitroProcessInfo: any = undefined;
+ private inferenceUrl = "";
+
/**
* Subscribes to events emitted by the @janhq/core package.
*/
async onLoad() {
if (!(await fs.existsSync(JanInferenceNitroExtension._homeDir))) {
- await fs
- .mkdirSync(JanInferenceNitroExtension._homeDir)
- .catch((err: Error) => console.debug(err));
+ try {
+ await fs.mkdirSync(JanInferenceNitroExtension._homeDir);
+ } catch (e) {
+ console.debug(e);
+ }
}
+ // init inference url
+ // @ts-ignore
+ const electronApi = window?.electronAPI;
+ this.inferenceUrl = INFERENCE_URL;
+ if (!electronApi) {
+ this.inferenceUrl = JAN_SERVER_INFERENCE_URL;
+ }
+ console.debug("Inference url: ", this.inferenceUrl);
+
if (!(await fs.existsSync(JanInferenceNitroExtension._settingsDir)))
await fs.mkdirSync(JanInferenceNitroExtension._settingsDir);
this.writeDefaultEngineSettings();
// Events subscription
events.on(MessageEvent.OnMessageSent, (data: MessageRequest) =>
- this.onMessageRequest(data),
+ this.onMessageRequest(data)
);
events.on(ModelEvent.OnModelInit, (model: Model) =>
- this.onModelInit(model),
+ this.onModelInit(model)
);
events.on(ModelEvent.OnModelStop, (model: Model) =>
- this.onModelStop(model),
+ this.onModelStop(model)
);
events.on(InferenceEvent.OnInferenceStopped, () =>
- this.onInferenceStopped(),
+ this.onInferenceStopped()
);
// Attempt to fetch nvidia info
@@ -120,7 +134,7 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
} else {
await fs.writeFileSync(
engineFile,
- JSON.stringify(this._engineSettings, null, 2),
+ JSON.stringify(this._engineSettings, null, 2)
);
}
} catch (err) {
@@ -133,6 +147,7 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
const modelFullPath = await joinPath(["models", model.id]);
+ this._currentModel = model;
const nitroInitResult = await executeOnMain(NODE, "runModel", {
modelFullPath,
model,
@@ -143,12 +158,11 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
return;
}
- this._currentModel = model;
events.emit(ModelEvent.OnModelReady, model);
this.getNitroProcesHealthIntervalId = setInterval(
() => this.periodicallyGetNitroHealth(),
- JanInferenceNitroExtension._intervalHealthCheck,
+ JanInferenceNitroExtension._intervalHealthCheck
);
}
@@ -205,7 +219,11 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
return new Promise(async (resolve, reject) => {
if (!this._currentModel) return Promise.reject("No model loaded");
- requestInference(data.messages ?? [], this._currentModel).subscribe({
+ requestInference(
+ this.inferenceUrl,
+ data.messages ?? [],
+ this._currentModel
+ ).subscribe({
next: (_content: any) => {},
complete: async () => {
resolve(message);
@@ -225,6 +243,9 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
*/
private async onMessageRequest(data: MessageRequest) {
if (data.model?.engine !== InferenceEngine.nitro || !this._currentModel) {
+ console.log(
+ `Model is not nitro or no model loaded ${data.model?.engine} ${this._currentModel}`
+ );
return;
}
@@ -250,7 +271,12 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
...(this._currentModel || {}),
...(data.model || {}),
};
- requestInference(data.messages ?? [], model, this.controller).subscribe({
+ requestInference(
+ this.inferenceUrl,
+ data.messages ?? [],
+ model,
+ this.controller
+ ).subscribe({
next: (content: any) => {
const messageContent: ThreadContent = {
type: ContentType.Text,
diff --git a/extensions/inference-nitro-extension/src/node/index.ts b/extensions/inference-nitro-extension/src/node/index.ts
index 77060e414..7ba90b556 100644
--- a/extensions/inference-nitro-extension/src/node/index.ts
+++ b/extensions/inference-nitro-extension/src/node/index.ts
@@ -3,11 +3,19 @@ import path from "path";
import { ChildProcessWithoutNullStreams, spawn } from "child_process";
import tcpPortUsed from "tcp-port-used";
import fetchRT from "fetch-retry";
-import { log, getJanDataFolderPath } from "@janhq/core/node";
+import {
+ log,
+ getJanDataFolderPath,
+ getSystemResourceInfo,
+} from "@janhq/core/node";
import { getNitroProcessInfo, updateNvidiaInfo } from "./nvidia";
-import { Model, InferenceEngine, ModelSettingParams } from "@janhq/core";
+import {
+ Model,
+ InferenceEngine,
+ ModelSettingParams,
+ PromptTemplate,
+} from "@janhq/core";
import { executableNitroFile } from "./execute";
-import { physicalCpuCount } from "./utils";
// Polyfill fetch with retry
const fetchRetry = fetchRT(fetch);
@@ -19,25 +27,6 @@ interface ModelInitOptions {
modelFullPath: string;
model: Model;
}
-
-/**
- * The response object of Prompt Template parsing.
- */
-interface PromptTemplate {
- system_prompt?: string;
- ai_prompt?: string;
- user_prompt?: string;
- error?: string;
-}
-
-/**
- * Model setting args for Nitro model load.
- */
-interface ModelSettingArgs extends ModelSettingParams {
- llama_model_path: string;
- cpu_threads: number;
-}
-
// The PORT to use for the Nitro subprocess
const PORT = 3928;
// The HOST address to use for the Nitro subprocess
@@ -60,7 +49,7 @@ let subprocess: ChildProcessWithoutNullStreams | undefined = undefined;
// The current model file url
let currentModelFile: string = "";
// The current model settings
-let currentSettings: ModelSettingArgs | undefined = undefined;
+let currentSettings: ModelSettingParams | undefined = undefined;
/**
* Stops a Nitro subprocess.
@@ -106,7 +95,7 @@ async function runModel(
if (wrapper.model.engine !== InferenceEngine.nitro) {
return Promise.reject("Not a nitro model");
} else {
- const nitroResourceProbe = await getResourcesInfo();
+ const nitroResourceProbe = await getSystemResourceInfo();
// Convert settings.prompt_template to system_prompt, user_prompt, ai_prompt
if (wrapper.model.settings.prompt_template) {
const promptTemplate = wrapper.model.settings.prompt_template;
@@ -220,6 +209,9 @@ function promptTemplateConverter(promptTemplate: string): PromptTemplate {
* @returns A Promise that resolves when the model is loaded successfully, or rejects with an error message if the model is not found or fails to load.
*/
function loadLLMModel(settings: any): Promise {
+ if (!settings?.ngl) {
+ settings.ngl = 100;
+ }
log(`[NITRO]::Debug: Loading model with params ${JSON.stringify(settings)}`);
return fetchRetry(NITRO_HTTP_LOAD_MODEL_URL, {
method: "POST",
@@ -240,7 +232,7 @@ function loadLLMModel(settings: any): Promise {
})
.catch((err) => {
log(`[NITRO]::Error: Load model failed with error ${err}`);
- return Promise.reject();
+ return Promise.reject(err);
});
}
@@ -262,9 +254,9 @@ async function validateModelStatus(): Promise {
retryDelay: 500,
}).then(async (res: Response) => {
log(
- `[NITRO]::Debug: Validate model state success with response ${JSON.stringify(
- res,
- )}`,
+ `[NITRO]::Debug: Validate model state with response ${JSON.stringify(
+ res.status
+ )}`
);
// If the response is OK, check model_loaded status.
if (res.ok) {
@@ -272,9 +264,19 @@ async function validateModelStatus(): Promise {
// If the model is loaded, return an empty object.
// Otherwise, return an object with an error message.
if (body.model_loaded) {
+ log(
+ `[NITRO]::Debug: Validate model state success with response ${JSON.stringify(
+ body
+ )}`
+ );
return Promise.resolve();
}
}
+ log(
+ `[NITRO]::Debug: Validate model state failed with response ${JSON.stringify(
+ res.statusText
+ )}`
+ );
return Promise.reject("Validate model status failed");
});
}
@@ -351,22 +353,6 @@ function spawnNitroProcess(): Promise {
});
}
-/**
- * Get the system resources information
- * TODO: Move to Core so that it can be reused
- */
-function getResourcesInfo(): Promise {
- return new Promise(async (resolve) => {
- const cpu = await physicalCpuCount();
- log(`[NITRO]::CPU informations - ${cpu}`);
- const response: ResourcesInfo = {
- numCpuPhysicalCore: cpu,
- memAvailable: 0,
- };
- resolve(response);
- });
-}
-
/**
* Every module should have a dispose function
* This will be called when the extension is unloaded and should clean up any resources
diff --git a/extensions/inference-nitro-extension/src/node/utils.ts b/extensions/inference-nitro-extension/src/node/utils.ts
deleted file mode 100644
index c7ef2e9a6..000000000
--- a/extensions/inference-nitro-extension/src/node/utils.ts
+++ /dev/null
@@ -1,56 +0,0 @@
-import os from "os";
-import childProcess from "child_process";
-
-function exec(command: string): Promise {
- return new Promise((resolve, reject) => {
- childProcess.exec(command, { encoding: "utf8" }, (error, stdout) => {
- if (error) {
- reject(error);
- } else {
- resolve(stdout);
- }
- });
- });
-}
-
-let amount: number;
-const platform = os.platform();
-
-export async function physicalCpuCount(): Promise {
- return new Promise((resolve, reject) => {
- if (platform === "linux") {
- exec('lscpu -p | egrep -v "^#" | sort -u -t, -k 2,4 | wc -l')
- .then((output) => {
- amount = parseInt(output.trim(), 10);
- resolve(amount);
- })
- .catch(reject);
- } else if (platform === "darwin") {
- exec("sysctl -n hw.physicalcpu_max")
- .then((output) => {
- amount = parseInt(output.trim(), 10);
- resolve(amount);
- })
- .catch(reject);
- } else if (platform === "win32") {
- exec("WMIC CPU Get NumberOfCores")
- .then((output) => {
- amount = output
- .split(os.EOL)
- .map((line: string) => parseInt(line))
- .filter((value: number) => !isNaN(value))
- .reduce((sum: number, number: number) => sum + number, 1);
- resolve(amount);
- })
- .catch(reject);
- } else {
- const cores = os.cpus().filter((cpu: any, index: number) => {
- const hasHyperthreading = cpu.model.includes("Intel");
- const isOdd = index % 2 === 1;
- return !hasHyperthreading || isOdd;
- });
- amount = cores.length;
- resolve(amount);
- }
- });
-}
diff --git a/extensions/inference-openai-extension/src/index.ts b/extensions/inference-openai-extension/src/index.ts
index 44525b631..fd1230bc7 100644
--- a/extensions/inference-openai-extension/src/index.ts
+++ b/extensions/inference-openai-extension/src/index.ts
@@ -20,6 +20,8 @@ import {
MessageEvent,
ModelEvent,
InferenceEvent,
+ AppConfigurationEventName,
+ joinPath,
} from "@janhq/core";
import { requestInference } from "./helpers/sse";
import { ulid } from "ulid";
@@ -31,7 +33,7 @@ import { join } from "path";
* It also subscribes to events emitted by the @janhq/core package and handles new message requests.
*/
export default class JanInferenceOpenAIExtension extends BaseExtension {
- private static readonly _homeDir = "file://engines";
+ private static readonly _engineDir = "file://engines";
private static readonly _engineMetadataFileName = "openai.json";
private static _currentModel: OpenAIModel;
@@ -48,9 +50,9 @@ export default class JanInferenceOpenAIExtension extends BaseExtension {
* Subscribes to events emitted by the @janhq/core package.
*/
async onLoad() {
- if (!(await fs.existsSync(JanInferenceOpenAIExtension._homeDir))) {
+ if (!(await fs.existsSync(JanInferenceOpenAIExtension._engineDir))) {
await fs
- .mkdirSync(JanInferenceOpenAIExtension._homeDir)
+ .mkdirSync(JanInferenceOpenAIExtension._engineDir)
.catch((err) => console.debug(err));
}
@@ -71,6 +73,20 @@ export default class JanInferenceOpenAIExtension extends BaseExtension {
events.on(InferenceEvent.OnInferenceStopped, () => {
JanInferenceOpenAIExtension.handleInferenceStopped(this);
});
+
+ const settingsFilePath = await joinPath([
+ JanInferenceOpenAIExtension._engineDir,
+ JanInferenceOpenAIExtension._engineMetadataFileName,
+ ]);
+
+ events.on(
+ AppConfigurationEventName.OnConfigurationUpdate,
+ (settingsKey: string) => {
+ // Update settings on changes
+ if (settingsKey === settingsFilePath)
+ JanInferenceOpenAIExtension.writeDefaultEngineSettings();
+ },
+ );
}
/**
@@ -81,7 +97,7 @@ export default class JanInferenceOpenAIExtension extends BaseExtension {
static async writeDefaultEngineSettings() {
try {
const engineFile = join(
- JanInferenceOpenAIExtension._homeDir,
+ JanInferenceOpenAIExtension._engineDir,
JanInferenceOpenAIExtension._engineMetadataFileName,
);
if (await fs.existsSync(engineFile)) {
@@ -182,7 +198,7 @@ export default class JanInferenceOpenAIExtension extends BaseExtension {
},
error: async (err) => {
if (instance.isCancelled || message.content.length > 0) {
- message.status = MessageStatus.Error;
+ message.status = MessageStatus.Stopped;
events.emit(MessageEvent.OnMessageUpdate, message);
return;
}
@@ -194,7 +210,7 @@ export default class JanInferenceOpenAIExtension extends BaseExtension {
},
};
message.content = [messageContent];
- message.status = MessageStatus.Ready;
+ message.status = MessageStatus.Error;
events.emit(MessageEvent.OnMessageUpdate, message);
},
});
diff --git a/models/dolphin-phi-2/model.json b/models/dolphin-phi-2/model.json
index c7348c359..ae82041fc 100644
--- a/models/dolphin-phi-2/model.json
+++ b/models/dolphin-phi-2/model.json
@@ -13,7 +13,7 @@
"format": "gguf",
"settings": {
"ctx_len": 4096,
- "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant\n",
+ "prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
"llama_model_path": "dolphin-2_6-phi-2.Q8_0.gguf"
},
"parameters": {
@@ -29,4 +29,4 @@
"size": 2960000000
},
"engine": "nitro"
- }
\ No newline at end of file
+ }
diff --git a/server/package.json b/server/package.json
index 9495a0d65..f61730da4 100644
--- a/server/package.json
+++ b/server/package.json
@@ -26,6 +26,8 @@
"dotenv": "^16.3.1",
"fastify": "^4.24.3",
"request": "^2.88.2",
+ "fetch-retry": "^5.0.6",
+ "tcp-port-used": "^1.0.2",
"request-progress": "^3.0.0"
},
"devDependencies": {
@@ -35,6 +37,7 @@
"@typescript-eslint/parser": "^6.7.3",
"eslint-plugin-react": "^7.33.2",
"run-script-os": "^1.1.6",
+ "@types/tcp-port-used": "^1.0.4",
"typescript": "^5.2.2"
}
}
diff --git a/uikit/package.json b/uikit/package.json
index 43e73dcf2..66f05840b 100644
--- a/uikit/package.json
+++ b/uikit/package.json
@@ -18,6 +18,7 @@
},
"dependencies": {
"@radix-ui/react-avatar": "^1.0.4",
+ "@radix-ui/react-checkbox": "^1.0.4",
"@radix-ui/react-context": "^1.0.1",
"@radix-ui/react-dialog": "^1.0.5",
"@radix-ui/react-icons": "^1.3.0",
diff --git a/uikit/src/button/styles.scss b/uikit/src/button/styles.scss
index 74585ed1e..003df5b4d 100644
--- a/uikit/src/button/styles.scss
+++ b/uikit/src/button/styles.scss
@@ -9,7 +9,7 @@
}
&-secondary-blue {
- @apply bg-blue-200 text-blue-600 hover:bg-blue-500/50;
+ @apply bg-blue-200 text-blue-600 hover:bg-blue-300/50 dark:hover:bg-blue-200/80;
}
&-danger {
@@ -17,7 +17,7 @@
}
&-secondary-danger {
- @apply bg-red-200 text-red-600 hover:bg-red-500/50;
+ @apply bg-red-200 text-red-600 hover:bg-red-300/50 dark:hover:bg-red-200/80;
}
&-outline {
@@ -67,14 +67,18 @@
[type='submit'] {
&.btn-primary {
@apply bg-primary hover:bg-primary/90;
+ @apply disabled:pointer-events-none disabled:bg-zinc-100 disabled:text-zinc-400;
}
&.btn-secondary {
@apply bg-secondary hover:bg-secondary/80;
+ @apply disabled:pointer-events-none disabled:bg-zinc-100 disabled:text-zinc-400;
}
&.btn-secondary-blue {
@apply bg-blue-200 text-blue-900 hover:bg-blue-200/80;
+ @apply disabled:pointer-events-none disabled:bg-zinc-100 disabled:text-zinc-400;
}
&.btn-danger {
@apply bg-danger hover:bg-danger/90;
+ @apply disabled:pointer-events-none disabled:bg-zinc-100 disabled:text-zinc-400;
}
}
diff --git a/uikit/src/checkbox/index.tsx b/uikit/src/checkbox/index.tsx
new file mode 100644
index 000000000..1e78aeafb
--- /dev/null
+++ b/uikit/src/checkbox/index.tsx
@@ -0,0 +1,29 @@
+'use client'
+
+import * as React from 'react'
+import * as CheckboxPrimitive from '@radix-ui/react-checkbox'
+import { CheckIcon } from '@radix-ui/react-icons'
+
+import { twMerge } from 'tailwind-merge'
+
+const Checkbox = React.forwardRef<
+ React.ElementRef,
+ React.ComponentPropsWithoutRef
+>(({ className, ...props }, ref) => (
+
+
+
+
+
+))
+Checkbox.displayName = CheckboxPrimitive.Root.displayName
+
+export { Checkbox }
diff --git a/uikit/src/checkbox/styles.scss b/uikit/src/checkbox/styles.scss
new file mode 100644
index 000000000..33610f837
--- /dev/null
+++ b/uikit/src/checkbox/styles.scss
@@ -0,0 +1,7 @@
+.checkbox {
+ @apply border-border data-[state=checked]:bg-primary h-5 w-5 flex-shrink-0 rounded-md border data-[state=checked]:text-white;
+
+ &--icon {
+ @apply h-4 w-4;
+ }
+}
diff --git a/uikit/src/index.ts b/uikit/src/index.ts
index 3d5eaa82a..1b0a26bd1 100644
--- a/uikit/src/index.ts
+++ b/uikit/src/index.ts
@@ -12,3 +12,4 @@ export * from './command'
export * from './textarea'
export * from './select'
export * from './slider'
+export * from './checkbox'
diff --git a/uikit/src/input/styles.scss b/uikit/src/input/styles.scss
index b78db270a..9990da8b4 100644
--- a/uikit/src/input/styles.scss
+++ b/uikit/src/input/styles.scss
@@ -1,6 +1,6 @@
.input {
@apply border-border placeholder:text-muted-foreground flex h-9 w-full rounded-lg border bg-transparent px-3 py-1 transition-colors;
- @apply disabled:cursor-not-allowed disabled:bg-zinc-100;
+ @apply disabled:cursor-not-allowed disabled:bg-zinc-100 disabled:dark:bg-zinc-800 disabled:dark:text-zinc-600;
@apply focus-within:outline-none focus-visible:outline-0 focus-visible:ring-2 focus-visible:ring-blue-500 focus-visible:ring-offset-1;
@apply file:border-0 file:bg-transparent file:font-medium;
}
diff --git a/uikit/src/main.scss b/uikit/src/main.scss
index 546f22811..c1326ba19 100644
--- a/uikit/src/main.scss
+++ b/uikit/src/main.scss
@@ -16,6 +16,7 @@
@import './textarea/styles.scss';
@import './select/styles.scss';
@import './slider/styles.scss';
+@import './checkbox/styles.scss';
.animate-spin {
animation: spin 1s linear infinite;
diff --git a/uikit/src/select/styles.scss b/uikit/src/select/styles.scss
index 665ca8cba..bc5b6c0cc 100644
--- a/uikit/src/select/styles.scss
+++ b/uikit/src/select/styles.scss
@@ -1,5 +1,6 @@
.select {
- @apply placeholder:text-muted-foreground border-border flex h-9 w-full items-center justify-between whitespace-nowrap rounded-md border bg-transparent px-3 py-2 text-sm shadow-sm disabled:cursor-not-allowed disabled:opacity-50 [&>span]:line-clamp-1;
+ @apply placeholder:text-muted-foreground border-border flex h-9 w-full items-center justify-between whitespace-nowrap rounded-md border bg-transparent px-3 py-2 text-sm shadow-sm disabled:cursor-not-allowed [&>span]:line-clamp-1;
+ @apply disabled:cursor-not-allowed disabled:bg-zinc-100 disabled:dark:bg-zinc-800 disabled:dark:text-zinc-600;
@apply focus-within:outline-none focus-visible:outline-0 focus-visible:ring-2 focus-visible:ring-blue-500 focus-visible:ring-offset-1;
&-caret {
diff --git a/web/containers/CardSidebar/index.tsx b/web/containers/CardSidebar/index.tsx
index c0dd19ba5..38a8678d9 100644
--- a/web/containers/CardSidebar/index.tsx
+++ b/web/containers/CardSidebar/index.tsx
@@ -13,6 +13,8 @@ import { useClickOutside } from '@/hooks/useClickOutside'
import { usePath } from '@/hooks/usePath'
+import { openFileTitle } from '@/utils/titleUtils'
+
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
interface Props {
@@ -38,13 +40,6 @@ export default function CardSidebar({
useClickOutside(() => setMore(false), null, [menu, toggle])
- let openFolderTitle: string = 'Open Containing Folder'
- if (isMac) {
- openFolderTitle = 'Show in Finder'
- } else if (isWindows) {
- openFolderTitle = 'Show in File Explorer'
- }
-
return (
- {openFolderTitle}
+ {openFileTitle()}
Opens thread.json. Changes affect this thread only.
@@ -126,7 +121,7 @@ export default function CardSidebar({
) : (
- Show in Finder
+ {openFileTitle()}
)}
>
diff --git a/web/containers/DropdownListSidebar/index.tsx b/web/containers/DropdownListSidebar/index.tsx
index fdc39063a..140a1aba1 100644
--- a/web/containers/DropdownListSidebar/index.tsx
+++ b/web/containers/DropdownListSidebar/index.tsx
@@ -26,6 +26,8 @@ import { useMainViewState } from '@/hooks/useMainViewState'
import useRecommendedModel from '@/hooks/useRecommendedModel'
+import useUpdateModelParameters from '@/hooks/useUpdateModelParameters'
+
import { toGibibytes } from '@/utils/converter'
import ModelLabel from '../ModelLabel'
@@ -34,68 +36,40 @@ import OpenAiKeyInput from '../OpenAiKeyInput'
import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom'
import {
- ModelParams,
activeThreadAtom,
- getActiveThreadIdAtom,
setThreadModelParamsAtom,
- threadStatesAtom,
} from '@/helpers/atoms/Thread.atom'
export const selectedModelAtom = atom(undefined)
-export default function DropdownListSidebar() {
- const activeThreadId = useAtomValue(getActiveThreadIdAtom)
+// TODO: Move all of the unscoped logics outside of the component
+const DropdownListSidebar = ({
+ strictedThread = true,
+}: {
+ strictedThread?: boolean
+}) => {
const activeThread = useAtomValue(activeThreadAtom)
- const threadStates = useAtomValue(threadStatesAtom)
const [selectedModel, setSelectedModel] = useAtom(selectedModelAtom)
const setThreadModelParams = useSetAtom(setThreadModelParamsAtom)
- const { activeModel, stateModel } = useActiveModel()
+
+ const { stateModel } = useActiveModel()
const [serverEnabled, setServerEnabled] = useAtom(serverEnabledAtom)
const { setMainViewState } = useMainViewState()
-
+ const [loader, setLoader] = useState(0)
const { recommendedModel, downloadedModels } = useRecommendedModel()
-
- /**
- * Default value for max_tokens and ctx_len
- * Its to avoid OOM issue since a model can set a big number for these settings
- */
- const defaultValue = (value?: number) => {
- if (value && value < 4096) return value
- return 4096
- }
+ const { updateModelParameter } = useUpdateModelParameters()
useEffect(() => {
- setSelectedModel(selectedModel || activeModel || recommendedModel)
+ if (!activeThread) return
- if (activeThread) {
- const finishInit = threadStates[activeThread.id].isFinishInit ?? true
- if (finishInit) return
- const modelParams: ModelParams = {
- ...recommendedModel?.parameters,
- ...recommendedModel?.settings,
- /**
- * This is to set default value for these settings instead of maximum value
- * Should only apply when model.json has these settings
- */
- ...(recommendedModel?.parameters.max_tokens && {
- max_tokens: defaultValue(recommendedModel?.parameters.max_tokens),
- }),
- ...(recommendedModel?.settings.ctx_len && {
- ctx_len: defaultValue(recommendedModel?.settings.ctx_len),
- }),
- }
- setThreadModelParams(activeThread.id, modelParams)
+ let model = downloadedModels.find(
+ (model) => model.id === activeThread.assistants[0].model.id
+ )
+ if (!model) {
+ model = recommendedModel
}
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [
- recommendedModel,
- activeThread,
- setSelectedModel,
- setThreadModelParams,
- threadStates,
- ])
-
- const [loader, setLoader] = useState(0)
+ setSelectedModel(model)
+ }, [recommendedModel, activeThread, downloadedModels, setSelectedModel])
// This is fake loader please fix this when we have realtime percentage when load model
useEffect(() => {
@@ -132,25 +106,35 @@ export default function DropdownListSidebar() {
setServerEnabled(false)
}
- if (activeThreadId) {
+ if (activeThread) {
const modelParams = {
...model?.parameters,
...model?.settings,
}
- setThreadModelParams(activeThreadId, modelParams)
+ // Update model paramter to the thread state
+ setThreadModelParams(activeThread.id, modelParams)
+
+ // Update model parameter to the thread file
+ if (model)
+ updateModelParameter(activeThread.id, {
+ params: modelParams,
+ modelId: model.id,
+ engine: model.engine,
+ })
}
},
- // eslint-disable-next-line react-hooks/exhaustive-deps
[
downloadedModels,
serverEnabled,
- activeThreadId,
- activeModel,
+ activeThread,
+ setSelectedModel,
+ setServerEnabled,
setThreadModelParams,
+ updateModelParameter,
]
)
- if (!activeThread) {
+ if (strictedThread && !activeThread) {
return null
}
@@ -236,10 +220,9 @@ export default function DropdownListSidebar() {
-
+
>
)
}
+
+export default DropdownListSidebar
diff --git a/web/containers/Layout/TopBar/index.tsx b/web/containers/Layout/TopBar/index.tsx
index ab67cb3b7..f72f5f066 100644
--- a/web/containers/Layout/TopBar/index.tsx
+++ b/web/containers/Layout/TopBar/index.tsx
@@ -27,6 +27,8 @@ import { usePath } from '@/hooks/usePath'
import { showRightSideBarAtom } from '@/screens/Chat/Sidebar'
+import { openFileTitle } from '@/utils/titleUtils'
+
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
const TopBar = () => {
@@ -120,13 +122,14 @@ const TopBar = () => {
- {activeThread && (
-
+
+ {((activeThread && mainViewState === MainViewState.Thread) ||
+ mainViewState === MainViewState.LocalServer) && (
{showing && (
@@ -161,7 +164,7 @@ const TopBar = () => {
className="text-muted-foreground"
/>
- Show in Finder
+ {openFileTitle()}
{
/>
- Show in Finder
+ {openFileTitle()}
@@ -227,8 +230,8 @@ const TopBar = () => {
/>
-
- )}
+ )}
+
)}
diff --git a/web/containers/Layout/index.tsx b/web/containers/Layout/index.tsx
index e7bde49c0..77a1fe971 100644
--- a/web/containers/Layout/index.tsx
+++ b/web/containers/Layout/index.tsx
@@ -12,7 +12,8 @@ import TopBar from '@/containers/Layout/TopBar'
import { MainViewState } from '@/constants/screens'
import { useMainViewState } from '@/hooks/useMainViewState'
-import { SUCCESS_SET_NEW_DESTINATION } from '@/hooks/useVaultDirectory'
+
+import { SUCCESS_SET_NEW_DESTINATION } from '@/screens/Settings/Advanced/DataFolder'
const BaseLayout = (props: PropsWithChildren) => {
const { children } = props
diff --git a/web/containers/OpenAiKeyInput/index.tsx b/web/containers/OpenAiKeyInput/index.tsx
index abd79e6a8..444c8074f 100644
--- a/web/containers/OpenAiKeyInput/index.tsx
+++ b/web/containers/OpenAiKeyInput/index.tsx
@@ -1,16 +1,19 @@
import React, { useEffect, useState } from 'react'
-import { InferenceEngine, Model } from '@janhq/core'
+import { InferenceEngine } from '@janhq/core'
import { Input } from '@janhq/uikit'
+import { useAtomValue } from 'jotai'
+
import { useEngineSettings } from '@/hooks/useEngineSettings'
-type Props = {
- selectedModel?: Model
- serverEnabled: boolean
-}
+import { selectedModelAtom } from '../DropdownListSidebar'
-const OpenAiKeyInput: React.FC = ({ selectedModel, serverEnabled }) => {
+import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom'
+
+const OpenAiKeyInput: React.FC = () => {
+ const selectedModel = useAtomValue(selectedModelAtom)
+ const serverEnabled = useAtomValue(serverEnabledAtom)
const [openAISettings, setOpenAISettings] = useState<
{ api_key: string } | undefined
>(undefined)
@@ -20,8 +23,7 @@ const OpenAiKeyInput: React.FC = ({ selectedModel, serverEnabled }) => {
readOpenAISettings().then((settings) => {
setOpenAISettings(settings)
})
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [])
+ }, [readOpenAISettings])
if (!selectedModel || selectedModel.engine !== InferenceEngine.openai) {
return null
diff --git a/web/containers/Providers/EventHandler.tsx b/web/containers/Providers/EventHandler.tsx
index 01d32b346..ec0fbfc90 100644
--- a/web/containers/Providers/EventHandler.tsx
+++ b/web/containers/Providers/EventHandler.tsx
@@ -13,20 +13,26 @@ import {
} from '@janhq/core'
import { useAtomValue, useSetAtom } from 'jotai'
-import { activeModelAtom, stateModelAtom } from '@/hooks/useActiveModel'
+import {
+ activeModelAtom,
+ loadModelErrorAtom,
+ stateModelAtom,
+} from '@/hooks/useActiveModel'
import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels'
+import { queuedMessageAtom } from '@/hooks/useSendChatMessage'
+
import { toaster } from '../Toast'
import { extensionManager } from '@/extension'
import {
addNewMessageAtom,
updateMessageAtom,
- generateResponseAtom,
} from '@/helpers/atoms/ChatMessage.atom'
import {
updateThreadWaitingForResponseAtom,
threadsAtom,
+ isGeneratingResponseAtom,
} from '@/helpers/atoms/Thread.atom'
export default function EventHandler({ children }: { children: ReactNode }) {
@@ -35,12 +41,14 @@ export default function EventHandler({ children }: { children: ReactNode }) {
const { downloadedModels } = useGetDownloadedModels()
const setActiveModel = useSetAtom(activeModelAtom)
const setStateModel = useSetAtom(stateModelAtom)
- const setGenerateResponse = useSetAtom(generateResponseAtom)
+ const setQueuedMessage = useSetAtom(queuedMessageAtom)
+ const setLoadModelError = useSetAtom(loadModelErrorAtom)
const updateThreadWaiting = useSetAtom(updateThreadWaitingForResponseAtom)
const threads = useAtomValue(threadsAtom)
const modelsRef = useRef(downloadedModels)
const threadsRef = useRef(threads)
+ const setIsGeneratingResponse = useSetAtom(isGeneratingResponseAtom)
useEffect(() => {
threadsRef.current = threads
@@ -52,7 +60,6 @@ export default function EventHandler({ children }: { children: ReactNode }) {
const onNewMessageResponse = useCallback(
(message: ThreadMessage) => {
- setGenerateResponse(false)
addNewMessage(message)
},
[addNewMessage]
@@ -64,6 +71,7 @@ export default function EventHandler({ children }: { children: ReactNode }) {
toaster({
title: 'Success!',
description: `Model ${model.id} has been started.`,
+ type: 'success',
})
setStateModel(() => ({
state: 'stop',
@@ -85,18 +93,19 @@ export default function EventHandler({ children }: { children: ReactNode }) {
(res: any) => {
const errorMessage = `${res.error}`
console.error('Failed to load model: ' + errorMessage)
+ setLoadModelError(errorMessage)
setStateModel(() => ({
state: 'start',
loading: false,
model: res.modelId,
}))
+ setQueuedMessage(false)
},
- [setStateModel]
+ [setStateModel, setQueuedMessage, setLoadModelError]
)
const onMessageResponseUpdate = useCallback(
(message: ThreadMessage) => {
- setGenerateResponse(false)
updateMessage(
message.id,
message.thread_id,
@@ -104,11 +113,17 @@ export default function EventHandler({ children }: { children: ReactNode }) {
message.status
)
if (message.status === MessageStatus.Pending) {
+ if (message.content.length) {
+ updateThreadWaiting(message.thread_id, false)
+ setIsGeneratingResponse(false)
+ }
return
}
// Mark the thread as not waiting for response
updateThreadWaiting(message.thread_id, false)
+ setIsGeneratingResponse(false)
+
const thread = threadsRef.current?.find((e) => e.id == message.thread_id)
if (thread) {
const messageContent = message.content[0]?.text.value ?? ''
diff --git a/web/containers/Providers/index.tsx b/web/containers/Providers/index.tsx
index f9726e43d..895c22177 100644
--- a/web/containers/Providers/index.tsx
+++ b/web/containers/Providers/index.tsx
@@ -6,8 +6,6 @@ import { Toaster } from 'react-hot-toast'
import { TooltipProvider } from '@janhq/uikit'
-import { PostHogProvider } from 'posthog-js/react'
-
import GPUDriverPrompt from '@/containers/GPUDriverPromptModal'
import EventListenerWrapper from '@/containers/Providers/EventListener'
import JotaiWrapper from '@/containers/Providers/Jotai'
@@ -21,7 +19,7 @@ import {
setupBaseExtensions,
} from '@/services/extensionService'
-import { instance } from '@/utils/posthog'
+import Umami from '@/utils/umami'
import KeyListener from './KeyListener'
@@ -70,25 +68,22 @@ const Providers = (props: PropsWithChildren) => {
}, [setupCore])
return (
-
-
-
- {setupCore && activated && (
-
-
-
-
- {children}
-
- {!isMac && }
-
-
-
-
- )}
-
-
-
+
+
+
+ {setupCore && activated && (
+
+
+
+ {children}
+ {!isMac && }
+
+
+
+
+ )}
+
+
)
}
diff --git a/web/containers/Toast/index.tsx b/web/containers/Toast/index.tsx
index c5e5f03da..eae340fee 100644
--- a/web/containers/Toast/index.tsx
+++ b/web/containers/Toast/index.tsx
@@ -6,7 +6,99 @@ import { twMerge } from 'tailwind-merge'
type Props = {
title?: string
description?: string
- type?: 'default' | 'error' | 'success'
+ type?: 'default' | 'error' | 'success' | 'warning'
+}
+
+const ErrorIcon = () => {
+ return (
+
+ )
+}
+
+const WarningIcon = () => {
+ return (
+
+ )
+}
+
+const SuccessIcon = () => {
+ return (
+
+ )
+}
+
+const DefaultIcon = () => {
+ return (
+
+ )
+}
+
+const renderIcon = (type: string) => {
+ switch (type) {
+ case 'warning':
+ return
+
+ case 'error':
+ return
+
+ case 'success':
+ return
+
+ default:
+ return
+ }
}
export function toaster(props: Props) {
@@ -16,37 +108,52 @@ export function toaster(props: Props) {
return (
-
-
- {title}
-
-
- {description}
-
+
+ {renderIcon(type)}
+
+ {title}
+ {description}
+
+ toast.dismiss(t.id)}
+ />
- toast.dismiss(t.id)}
- />
)
},
- { id: 'toast', duration: 3000 }
+ { id: 'toast', duration: 2000, position: 'top-right' }
+ )
+}
+
+export function snackbar(props: Props) {
+ const { description, type = 'default' } = props
+ return toast.custom(
+ (t) => {
+ return (
+
+
+ {renderIcon(type)}
+ {description}
+ toast.dismiss(t.id)}
+ />
+
+
+ )
+ },
+ { id: 'snackbar', duration: 2000, position: 'bottom-center' }
)
}
diff --git a/web/helpers/atoms/ChatMessage.atom.ts b/web/helpers/atoms/ChatMessage.atom.ts
index 0d9211649..b11e8f3be 100644
--- a/web/helpers/atoms/ChatMessage.atom.ts
+++ b/web/helpers/atoms/ChatMessage.atom.ts
@@ -14,8 +14,6 @@ import {
/**
* Stores all chat messages for all threads
*/
-export const generateResponseAtom = atom (false)
-
export const chatMessages = atom>({})
/**
diff --git a/web/helpers/atoms/SystemBar.atom.ts b/web/helpers/atoms/SystemBar.atom.ts
index aa5e77d58..42ef7b29f 100644
--- a/web/helpers/atoms/SystemBar.atom.ts
+++ b/web/helpers/atoms/SystemBar.atom.ts
@@ -2,5 +2,6 @@ import { atom } from 'jotai'
export const totalRamAtom = atom(0)
export const usedRamAtom = atom(0)
+export const availableRamAtom = atom(0)
export const cpuUsageAtom = atom(0)
diff --git a/web/helpers/atoms/Thread.atom.ts b/web/helpers/atoms/Thread.atom.ts
index 26b1e9c59..cab286bd1 100644
--- a/web/helpers/atoms/Thread.atom.ts
+++ b/web/helpers/atoms/Thread.atom.ts
@@ -23,6 +23,7 @@ export const setActiveThreadIdAtom = atom(
export const waitingToSendMessage = atom(undefined)
+export const isGeneratingResponseAtom = atom(undefined)
/**
* Stores all thread states for the current user
*/
@@ -46,18 +47,6 @@ export const deleteThreadStateAtom = atom(
}
)
-export const updateThreadInitSuccessAtom = atom(
- null,
- (get, set, threadId: string) => {
- const currentState = { ...get(threadStatesAtom) }
- currentState[threadId] = {
- ...currentState[threadId],
- isFinishInit: true,
- }
- set(threadStatesAtom, currentState)
- }
-)
-
export const updateThreadWaitingForResponseAtom = atom(
null,
(get, set, threadId: string, waitingForResponse: boolean) => {
diff --git a/web/hooks/useActiveModel.ts b/web/hooks/useActiveModel.ts
index 336f0be21..54a1fdbe0 100644
--- a/web/hooks/useActiveModel.ts
+++ b/web/hooks/useActiveModel.ts
@@ -1,5 +1,5 @@
import { events, Model, ModelEvent } from '@janhq/core'
-import { atom, useAtom, useAtomValue } from 'jotai'
+import { atom, useAtom, useAtomValue, useSetAtom } from 'jotai'
import { toaster } from '@/containers/Toast'
@@ -9,6 +9,7 @@ import { LAST_USED_MODEL_ID } from './useRecommendedModel'
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
export const activeModelAtom = atom(undefined)
+export const loadModelErrorAtom = atom(undefined)
export const stateModelAtom = atom({
state: 'start',
@@ -21,6 +22,7 @@ export function useActiveModel() {
const activeThread = useAtomValue(activeThreadAtom)
const [stateModel, setStateModel] = useAtom(stateModelAtom)
const { downloadedModels } = useGetDownloadedModels()
+ const setLoadModelError = useSetAtom(loadModelErrorAtom)
const startModel = async (modelId: string) => {
if (
@@ -31,6 +33,7 @@ export function useActiveModel() {
return
}
// TODO: incase we have multiple assistants, the configuration will be from assistant
+ setLoadModelError(undefined)
setActiveModel(undefined)
@@ -42,6 +45,7 @@ export function useActiveModel() {
toaster({
title: `Model ${modelId} not found!`,
description: `Please download the model first.`,
+ type: 'warning',
})
setStateModel(() => ({
state: 'start',
diff --git a/web/hooks/useCreateNewThread.ts b/web/hooks/useCreateNewThread.ts
index d9451a46c..ee8df22df 100644
--- a/web/hooks/useCreateNewThread.ts
+++ b/web/hooks/useCreateNewThread.ts
@@ -7,21 +7,23 @@ import {
ThreadState,
Model,
} from '@janhq/core'
-import { atom, useAtom, useAtomValue, useSetAtom } from 'jotai'
+import { atom, useSetAtom } from 'jotai'
+import { selectedModelAtom } from '@/containers/DropdownListSidebar'
import { fileUploadAtom } from '@/containers/Providers/Jotai'
import { generateThreadId } from '@/utils/thread'
-import useDeleteThread from './useDeleteThread'
+import useRecommendedModel from './useRecommendedModel'
+
+import useSetActiveThread from './useSetActiveThread'
import { extensionManager } from '@/extension'
import {
threadsAtom,
- setActiveThreadIdAtom,
threadStatesAtom,
updateThreadAtom,
- updateThreadInitSuccessAtom,
+ setThreadModelParamsAtom,
} from '@/helpers/atoms/Thread.atom'
const createNewThreadAtom = atom(null, (get, set, newThread: Thread) => {
@@ -32,7 +34,6 @@ const createNewThreadAtom = atom(null, (get, set, newThread: Thread) => {
hasMore: false,
waitingForResponse: false,
lastMessage: undefined,
- isFinishInit: false,
}
currentState[newThread.id] = threadState
set(threadStatesAtom, currentState)
@@ -43,46 +44,35 @@ const createNewThreadAtom = atom(null, (get, set, newThread: Thread) => {
})
export const useCreateNewThread = () => {
- const threadStates = useAtomValue(threadStatesAtom)
- const updateThreadFinishInit = useSetAtom(updateThreadInitSuccessAtom)
const createNewThread = useSetAtom(createNewThreadAtom)
- const setActiveThreadId = useSetAtom(setActiveThreadIdAtom)
+ const { setActiveThread } = useSetActiveThread()
const updateThread = useSetAtom(updateThreadAtom)
- const [fileUpload, setFileUpload] = useAtom(fileUploadAtom)
- const { deleteThread } = useDeleteThread()
+ const setFileUpload = useSetAtom(fileUploadAtom)
+ const setSelectedModel = useSetAtom(selectedModelAtom)
+ const setThreadModelParams = useSetAtom(setThreadModelParamsAtom)
+
+ const { recommendedModel, downloadedModels } = useRecommendedModel()
const requestCreateNewThread = async (
assistant: Assistant,
model?: Model | undefined
) => {
- // loop through threads state and filter if there's any thread that is not finish init
- let unfinishedInitThreadId: string | undefined = undefined
- for (const key in threadStates) {
- const isFinishInit = threadStates[key].isFinishInit ?? true
- if (!isFinishInit) {
- unfinishedInitThreadId = key
- break
- }
- }
+ const defaultModel = model ?? recommendedModel ?? downloadedModels[0]
- if (unfinishedInitThreadId) {
- await deleteThread(unfinishedInitThreadId)
- }
-
- const modelId = model ? model.id : '*'
const createdAt = Date.now()
const assistantInfo: ThreadAssistantInfo = {
assistant_id: assistant.id,
assistant_name: assistant.name,
tools: assistant.tools,
model: {
- id: modelId,
- settings: {},
- parameters: {},
- engine: undefined,
+ id: defaultModel?.id ?? '*',
+ settings: defaultModel?.settings ?? {},
+ parameters: defaultModel?.parameters ?? {},
+ engine: defaultModel?.engine,
},
instructions: assistant.instructions,
}
+
const threadId = generateThreadId(assistant.id)
const thread: Thread = {
id: threadId,
@@ -94,22 +84,27 @@ export const useCreateNewThread = () => {
}
// add the new thread on top of the thread list to the state
+ //TODO: Why do we have thread list then thread states? Should combine them
createNewThread(thread)
- setActiveThreadId(thread.id)
+
+ setSelectedModel(defaultModel)
+ setThreadModelParams(thread.id, {
+ ...defaultModel?.settings,
+ ...defaultModel?.parameters,
+ })
// Delete the file upload state
setFileUpload([])
+ // Update thread metadata
+ await updateThreadMetadata(thread)
+
+ setActiveThread(thread)
}
- function updateThreadMetadata(thread: Thread) {
+ async function updateThreadMetadata(thread: Thread) {
updateThread(thread)
- const threadState = threadStates[thread.id]
- const isFinishInit = threadState?.isFinishInit ?? true
- if (!isFinishInit) {
- updateThreadFinishInit(thread.id)
- }
- extensionManager
+ await extensionManager
.get(ExtensionTypeEnum.Conversational)
?.saveThread(thread)
}
diff --git a/web/hooks/useDeleteModel.ts b/web/hooks/useDeleteModel.ts
index cd7292997..fa0cfb45e 100644
--- a/web/hooks/useDeleteModel.ts
+++ b/web/hooks/useDeleteModel.ts
@@ -19,6 +19,7 @@ export default function useDeleteModel() {
toaster({
title: 'Model Deletion Successful',
description: `The model ${model.id} has been successfully deleted.`,
+ type: 'success',
})
}
diff --git a/web/hooks/useDeleteThread.ts b/web/hooks/useDeleteThread.ts
index 84dd8a468..87cee125d 100644
--- a/web/hooks/useDeleteThread.ts
+++ b/web/hooks/useDeleteThread.ts
@@ -21,7 +21,6 @@ import {
threadsAtom,
setActiveThreadIdAtom,
deleteThreadStateAtom,
- threadStatesAtom,
updateThreadStateLastMessageAtom,
} from '@/helpers/atoms/Thread.atom'
@@ -34,7 +33,6 @@ export default function useDeleteThread() {
const deleteMessages = useSetAtom(deleteChatMessagesAtom)
const cleanMessages = useSetAtom(cleanChatMessagesAtom)
const deleteThreadState = useSetAtom(deleteThreadStateAtom)
- const threadStates = useAtomValue(threadStatesAtom)
const updateThreadLastMessage = useSetAtom(updateThreadStateLastMessageAtom)
const cleanThread = async (threadId: string) => {
@@ -49,6 +47,14 @@ export default function useDeleteThread() {
threadId,
messages.filter((msg) => msg.role === ChatCompletionRole.System)
)
+
+ thread.metadata = {
+ ...thread.metadata,
+ lastMessage: undefined,
+ }
+ await extensionManager
+ .get(ExtensionTypeEnum.Conversational)
+ ?.saveThread(thread)
updateThreadLastMessage(threadId, undefined)
}
}
@@ -66,21 +72,16 @@ export default function useDeleteThread() {
const availableThreads = threads.filter((c) => c.id !== threadId)
setThreads(availableThreads)
- const deletingThreadState = threadStates[threadId]
- const isFinishInit = deletingThreadState?.isFinishInit ?? true
-
// delete the thread state
deleteThreadState(threadId)
- if (isFinishInit) {
- deleteMessages(threadId)
- setCurrentPrompt('')
- toaster({
- title: 'Thread successfully deleted.',
- description: `Thread ${threadId} has been successfully deleted.`,
- })
- }
-
+ deleteMessages(threadId)
+ setCurrentPrompt('')
+ toaster({
+ title: 'Thread successfully deleted.',
+ description: `Thread ${threadId} has been successfully deleted.`,
+ type: 'success',
+ })
if (availableThreads.length > 0) {
setActiveThreadId(availableThreads[0].id)
} else {
diff --git a/web/hooks/useDownloadState.ts b/web/hooks/useDownloadState.ts
index d39ab5e58..37f41d2a1 100644
--- a/web/hooks/useDownloadState.ts
+++ b/web/hooks/useDownloadState.ts
@@ -26,6 +26,7 @@ const setDownloadStateSuccessAtom = atom(null, (get, set, modelId: string) => {
toaster({
title: 'Download Completed',
description: `Download ${modelId} completed`,
+ type: 'success',
})
})
@@ -61,6 +62,7 @@ const setDownloadStateCancelledAtom = atom(
toaster({
title: 'Cancel Download',
description: `Model ${modelId} cancel download`,
+ type: 'warning',
})
return
diff --git a/web/hooks/useEngineSettings.ts b/web/hooks/useEngineSettings.ts
index 258a89aa4..4a17f91df 100644
--- a/web/hooks/useEngineSettings.ts
+++ b/web/hooks/useEngineSettings.ts
@@ -1,7 +1,9 @@
-import { fs, joinPath } from '@janhq/core'
+import { useCallback } from 'react'
+
+import { fs, joinPath, events, AppConfigurationEventName } from '@janhq/core'
export const useEngineSettings = () => {
- const readOpenAISettings = async () => {
+ const readOpenAISettings = useCallback(async () => {
if (
!(await fs.existsSync(await joinPath(['file://engines', 'openai.json'])))
)
@@ -14,17 +16,24 @@ export const useEngineSettings = () => {
return typeof settings === 'object' ? settings : JSON.parse(settings)
}
return {}
- }
+ }, [])
+
const saveOpenAISettings = async ({
apiKey,
}: {
apiKey: string | undefined
}) => {
const settings = await readOpenAISettings()
+ const settingFilePath = await joinPath(['file://engines', 'openai.json'])
+
settings.api_key = apiKey
- await fs.writeFileSync(
- await joinPath(['file://engines', 'openai.json']),
- JSON.stringify(settings)
+
+ await fs.writeFileSync(settingFilePath, JSON.stringify(settings))
+
+ // Sec: Don't attach the settings data to the event
+ events.emit(
+ AppConfigurationEventName.OnConfigurationUpdate,
+ settingFilePath
)
}
return { readOpenAISettings, saveOpenAISettings }
diff --git a/web/hooks/useFactoryReset.ts b/web/hooks/useFactoryReset.ts
new file mode 100644
index 000000000..56994d4c4
--- /dev/null
+++ b/web/hooks/useFactoryReset.ts
@@ -0,0 +1,59 @@
+import { useEffect, useState } from 'react'
+
+import { fs, AppConfiguration, joinPath, getUserHomePath } from '@janhq/core'
+
+export default function useFactoryReset() {
+ const [defaultJanDataFolder, setdefaultJanDataFolder] = useState('')
+
+ useEffect(() => {
+ async function getDefaultJanDataFolder() {
+ const homePath = await getUserHomePath()
+ const defaultJanDataFolder = await joinPath([homePath, 'jan'])
+ setdefaultJanDataFolder(defaultJanDataFolder)
+ }
+ getDefaultJanDataFolder()
+ }, [])
+
+ const resetAll = async (keepCurrentFolder?: boolean) => {
+ // read the place of jan data folder
+ const appConfiguration: AppConfiguration | undefined =
+ await window.core?.api?.getAppConfigurations()
+
+ if (!appConfiguration) {
+ console.debug('Failed to get app configuration')
+ }
+
+ console.debug('appConfiguration: ', appConfiguration)
+ const janDataFolderPath = appConfiguration!.data_folder
+
+ if (defaultJanDataFolder === janDataFolderPath) {
+ console.debug('Jan data folder is already at user home')
+ } else {
+ // if jan data folder is not at user home, we update the app configuration to point to user home
+ if (!keepCurrentFolder) {
+ const configuration: AppConfiguration = {
+ data_folder: defaultJanDataFolder,
+ }
+ await window.core?.api?.updateAppConfiguration(configuration)
+ }
+ }
+
+ const modelPath = await joinPath([janDataFolderPath, 'models'])
+ const threadPath = await joinPath([janDataFolderPath, 'threads'])
+
+ console.debug(`Removing models at ${modelPath}`)
+ await fs.rmdirSync(modelPath, { recursive: true })
+
+ console.debug(`Removing threads at ${threadPath}`)
+ await fs.rmdirSync(threadPath, { recursive: true })
+
+ // reset the localStorage
+ localStorage.clear()
+ await window.core?.api?.relaunch()
+ }
+
+ return {
+ defaultJanDataFolder,
+ resetAll,
+ }
+}
diff --git a/web/hooks/useGetSystemResources.ts b/web/hooks/useGetSystemResources.ts
index 8dffa8eb4..de595ad7b 100644
--- a/web/hooks/useGetSystemResources.ts
+++ b/web/hooks/useGetSystemResources.ts
@@ -6,6 +6,7 @@ import { useSetAtom } from 'jotai'
import { extensionManager } from '@/extension/ExtensionManager'
import {
+ availableRamAtom,
cpuUsageAtom,
totalRamAtom,
usedRamAtom,
@@ -16,6 +17,7 @@ export default function useGetSystemResources() {
const [cpu, setCPU] = useState(0)
const setTotalRam = useSetAtom(totalRamAtom)
const setUsedRam = useSetAtom(usedRamAtom)
+ const setAvailableRam = useSetAtom(availableRamAtom)
const setCpuUsage = useSetAtom(cpuUsageAtom)
const getSystemResources = async () => {
@@ -40,6 +42,10 @@ export default function useGetSystemResources() {
setTotalRam(resourceInfor.mem.totalMemory)
setRam(Math.round(ram * 100))
+ if (resourceInfor.mem.totalMemory && resourceInfor.mem.usedMemory)
+ setAvailableRam(
+ resourceInfor.mem.totalMemory - resourceInfor.mem.usedMemory
+ )
setCPU(Math.round(currentLoadInfor?.cpu?.usage ?? 0))
setCpuUsage(Math.round(currentLoadInfor?.cpu?.usage ?? 0))
}
diff --git a/web/hooks/usePath.ts b/web/hooks/usePath.ts
index 88abae92c..aea25bef1 100644
--- a/web/hooks/usePath.ts
+++ b/web/hooks/usePath.ts
@@ -3,28 +3,23 @@ import { useAtomValue } from 'jotai'
import { selectedModelAtom } from '@/containers/DropdownListSidebar'
-import { activeThreadAtom, threadStatesAtom } from '@/helpers/atoms/Thread.atom'
+import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
export const usePath = () => {
const activeThread = useAtomValue(activeThreadAtom)
- const threadStates = useAtomValue(threadStatesAtom)
const selectedModel = useAtomValue(selectedModelAtom)
const onReviewInFinder = async (type: string) => {
- if (!activeThread) return
- const activeThreadState = threadStates[activeThread.id]
- if (!activeThreadState.isFinishInit) {
- alert('Thread is not started yet')
- return
- }
+ // TODO: this logic should be refactored.
+ if (type !== 'Model' && !activeThread) return
const userSpace = await getJanDataFolderPath()
let filePath = undefined
- const assistantId = activeThread.assistants[0]?.assistant_id
+ const assistantId = activeThread?.assistants[0]?.assistant_id
switch (type) {
case 'Engine':
case 'Thread':
- filePath = await joinPath(['threads', activeThread.id])
+ filePath = await joinPath(['threads', activeThread?.id ?? ''])
break
case 'Model':
if (!selectedModel) return
@@ -44,20 +39,20 @@ export const usePath = () => {
}
const onViewJson = async (type: string) => {
- if (!activeThread) return
- const activeThreadState = threadStates[activeThread.id]
- if (!activeThreadState.isFinishInit) {
- alert('Thread is not started yet')
- return
- }
+ // TODO: this logic should be refactored.
+ if (type !== 'Model' && !activeThread) return
const userSpace = await getJanDataFolderPath()
let filePath = undefined
- const assistantId = activeThread.assistants[0]?.assistant_id
+ const assistantId = activeThread?.assistants[0]?.assistant_id
switch (type) {
case 'Engine':
case 'Thread':
- filePath = await joinPath(['threads', activeThread.id, 'thread.json'])
+ filePath = await joinPath([
+ 'threads',
+ activeThread?.id ?? '',
+ 'thread.json',
+ ])
break
case 'Model':
if (!selectedModel) return
@@ -78,11 +73,6 @@ export const usePath = () => {
const onViewFile = async (id: string) => {
if (!activeThread) return
- const activeThreadState = threadStates[activeThread.id]
- if (!activeThreadState.isFinishInit) {
- alert('Thread is not started yet')
- return
- }
const userSpace = await getJanDataFolderPath()
let filePath = undefined
@@ -92,9 +82,21 @@ export const usePath = () => {
openFileExplorer(fullPath)
}
+ const onViewFileContainer = async () => {
+ if (!activeThread) return
+
+ const userSpace = await getJanDataFolderPath()
+ let filePath = undefined
+ filePath = await joinPath(['threads', `${activeThread.id}/files`])
+ if (!filePath) return
+ const fullPath = await joinPath([userSpace, filePath])
+ openFileExplorer(fullPath)
+ }
+
return {
onReviewInFinder,
onViewJson,
onViewFile,
+ onViewFileContainer,
}
}
diff --git a/web/hooks/useRecommendedModel.ts b/web/hooks/useRecommendedModel.ts
index 2ee4c1a7f..427d2bf73 100644
--- a/web/hooks/useRecommendedModel.ts
+++ b/web/hooks/useRecommendedModel.ts
@@ -26,7 +26,6 @@ export default function useRecommendedModel() {
const activeModel = useAtomValue(activeModelAtom)
const [downloadedModels, setDownloadedModels] = useState([])
const [recommendedModel, setRecommendedModel] = useState()
- const threadStates = useAtomValue(threadStatesAtom)
const activeThread = useAtomValue(activeThreadAtom)
const getAndSortDownloadedModels = useCallback(async (): Promise => {
@@ -43,30 +42,12 @@ export default function useRecommendedModel() {
Model | undefined
> => {
const models = await getAndSortDownloadedModels()
- if (!activeThread) {
- return
- }
+ if (!activeThread) return
+ const modelId = activeThread.assistants[0]?.model.id
+ const model = models.find((model) => model.id === modelId)
- const finishInit = threadStates[activeThread.id].isFinishInit ?? true
- if (finishInit) {
- const modelId = activeThread.assistants[0]?.model.id
- const model = models.find((model) => model.id === modelId)
-
- if (model) {
- setRecommendedModel(model)
- }
-
- return
- } else {
- const modelId = activeThread.assistants[0]?.model.id
- if (modelId !== '*') {
- const model = models.find((model) => model.id === modelId)
-
- if (model) {
- setRecommendedModel(model)
- }
- return
- }
+ if (model) {
+ setRecommendedModel(model)
}
if (activeModel) {
diff --git a/web/hooks/useSendChatMessage.ts b/web/hooks/useSendChatMessage.ts
index c8a32564b..7d89764db 100644
--- a/web/hooks/useSendChatMessage.ts
+++ b/web/hooks/useSendChatMessage.ts
@@ -1,5 +1,5 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
-import { useEffect, useRef, useState } from 'react'
+import { useEffect, useRef } from 'react'
import {
ChatCompletionMessage,
@@ -18,73 +18,74 @@ import {
ChatCompletionMessageContentType,
AssistantTool,
} from '@janhq/core'
-import { useAtom, useAtomValue, useSetAtom } from 'jotai'
+import { atom, useAtom, useAtomValue, useSetAtom } from 'jotai'
import { ulid } from 'ulid'
import { selectedModelAtom } from '@/containers/DropdownListSidebar'
import { currentPromptAtom, fileUploadAtom } from '@/containers/Providers/Jotai'
-import { toaster } from '@/containers/Toast'
-
import { getBase64 } from '@/utils/base64'
import { toRuntimeParams, toSettingParams } from '@/utils/modelParam'
-import { useActiveModel } from './useActiveModel'
+import { loadModelErrorAtom, useActiveModel } from './useActiveModel'
import { extensionManager } from '@/extension/ExtensionManager'
import {
addNewMessageAtom,
- generateResponseAtom,
getCurrentChatMessagesAtom,
} from '@/helpers/atoms/ChatMessage.atom'
import {
activeThreadAtom,
engineParamsUpdateAtom,
getActiveThreadModelParamsAtom,
- threadStatesAtom,
+ isGeneratingResponseAtom,
updateThreadAtom,
- updateThreadInitSuccessAtom,
updateThreadWaitingForResponseAtom,
} from '@/helpers/atoms/Thread.atom'
+export const queuedMessageAtom = atom(false)
+export const reloadModelAtom = atom(false)
+
export default function useSendChatMessage() {
const activeThread = useAtomValue(activeThreadAtom)
const addNewMessage = useSetAtom(addNewMessageAtom)
const updateThread = useSetAtom(updateThreadAtom)
const updateThreadWaiting = useSetAtom(updateThreadWaitingForResponseAtom)
- const [currentPrompt, setCurrentPrompt] = useAtom(currentPromptAtom)
- const setGenerateResponse = useSetAtom(generateResponseAtom)
+ const setCurrentPrompt = useSetAtom(currentPromptAtom)
const currentMessages = useAtomValue(getCurrentChatMessagesAtom)
const { activeModel } = useActiveModel()
const selectedModel = useAtomValue(selectedModelAtom)
const { startModel } = useActiveModel()
- const [queuedMessage, setQueuedMessage] = useState(false)
+ const setQueuedMessage = useSetAtom(queuedMessageAtom)
+ const loadModelFailed = useAtomValue(loadModelErrorAtom)
const modelRef = useRef()
- const threadStates = useAtomValue(threadStatesAtom)
- const updateThreadInitSuccess = useSetAtom(updateThreadInitSuccessAtom)
+ const loadModelFailedRef = useRef()
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
-
const engineParamsUpdate = useAtomValue(engineParamsUpdateAtom)
- const setEngineParamsUpdate = useSetAtom(engineParamsUpdateAtom)
- const [reloadModel, setReloadModel] = useState(false)
+ const setEngineParamsUpdate = useSetAtom(engineParamsUpdateAtom)
+ const setReloadModel = useSetAtom(reloadModelAtom)
const [fileUpload, setFileUpload] = useAtom(fileUploadAtom)
+ const setIsGeneratingResponse = useSetAtom(isGeneratingResponseAtom)
useEffect(() => {
modelRef.current = activeModel
}, [activeModel])
+ useEffect(() => {
+ loadModelFailedRef.current = loadModelFailed
+ }, [loadModelFailed])
+
const resendChatMessage = async (currentMessage: ThreadMessage) => {
if (!activeThread) {
console.error('No active thread')
return
}
-
+ setIsGeneratingResponse(true)
updateThreadWaiting(activeThread.id, true)
-
const messages: ChatCompletionMessage[] = [
activeThread.assistants[0]?.instructions,
]
@@ -121,85 +122,28 @@ export default function useSendChatMessage() {
if (activeModel?.id !== modelId) {
setQueuedMessage(true)
startModel(modelId)
- await WaitForModelStarting(modelId)
+ await waitForModelStarting(modelId)
setQueuedMessage(false)
}
events.emit(MessageEvent.OnMessageSent, messageRequest)
}
- // TODO: Refactor @louis
- const WaitForModelStarting = async (modelId: string) => {
- return new Promise((resolve) => {
- setTimeout(async () => {
- if (modelRef.current?.id !== modelId) {
- console.debug('waiting for model to start')
- await WaitForModelStarting(modelId)
- resolve()
- } else {
- resolve()
- }
- }, 200)
- })
- }
-
- const sendChatMessage = async () => {
- setGenerateResponse(true)
-
- if (!currentPrompt || currentPrompt.trim().length === 0) return
+ const sendChatMessage = async (message: string) => {
+ if (!message || message.trim().length === 0) return
if (!activeThread) {
console.error('No active thread')
return
}
+ setIsGeneratingResponse(true)
if (engineParamsUpdate) setReloadModel(true)
- const activeThreadState = threadStates[activeThread.id]
const runtimeParams = toRuntimeParams(activeModelParams)
const settingParams = toSettingParams(activeModelParams)
- // if the thread is not initialized, we need to initialize it first
- if (
- !activeThreadState.isFinishInit ||
- activeThread.assistants[0].model.id !== selectedModel?.id
- ) {
- if (!selectedModel) {
- toaster({ title: 'Please select a model' })
- return
- }
- const assistantId = activeThread.assistants[0].assistant_id ?? ''
- const assistantName = activeThread.assistants[0].assistant_name ?? ''
- const instructions = activeThread.assistants[0].instructions ?? ''
- const tools = activeThread.assistants[0].tools ?? []
-
- const updatedThread: Thread = {
- ...activeThread,
- assistants: [
- {
- assistant_id: assistantId,
- assistant_name: assistantName,
- instructions: instructions,
- tools: tools,
- model: {
- id: selectedModel.id,
- settings: settingParams,
- parameters: runtimeParams,
- engine: selectedModel.engine,
- },
- },
- ],
- }
- updateThreadInitSuccess(activeThread.id)
- updateThread(updatedThread)
-
- await extensionManager
- .get(ExtensionTypeEnum.Conversational)
- ?.saveThread(updatedThread)
- }
-
updateThreadWaiting(activeThread.id, true)
-
- const prompt = currentPrompt.trim()
+ const prompt = message.trim()
setCurrentPrompt('')
const base64Blob = fileUpload[0]
@@ -326,6 +270,14 @@ export default function useSendChatMessage() {
setFileUpload([])
}
+ const updatedThread: Thread = {
+ ...activeThread,
+ updated: timestamp,
+ }
+
+ // change last update thread when send message
+ updateThread(updatedThread)
+
await extensionManager
.get(ExtensionTypeEnum.Conversational)
?.addNewMessage(threadMessage)
@@ -335,7 +287,7 @@ export default function useSendChatMessage() {
if (activeModel?.id !== modelId) {
setQueuedMessage(true)
startModel(modelId)
- await WaitForModelStarting(modelId)
+ await waitForModelStarting(modelId)
setQueuedMessage(false)
}
@@ -345,10 +297,21 @@ export default function useSendChatMessage() {
setEngineParamsUpdate(false)
}
+ const waitForModelStarting = async (modelId: string) => {
+ return new Promise((resolve) => {
+ setTimeout(async () => {
+ if (modelRef.current?.id !== modelId && !loadModelFailedRef.current) {
+ await waitForModelStarting(modelId)
+ resolve()
+ } else {
+ resolve()
+ }
+ }, 200)
+ })
+ }
+
return {
- reloadModel,
sendChatMessage,
resendChatMessage,
- queuedMessage,
}
}
diff --git a/web/hooks/useSetActiveThread.ts b/web/hooks/useSetActiveThread.ts
index 035f0551a..3545d0d23 100644
--- a/web/hooks/useSetActiveThread.ts
+++ b/web/hooks/useSetActiveThread.ts
@@ -1,5 +1,3 @@
-import { useEffect } from 'react'
-
import {
InferenceEvent,
ExtensionTypeEnum,
@@ -15,6 +13,7 @@ import { setConvoMessagesAtom } from '@/helpers/atoms/ChatMessage.atom'
import {
ModelParams,
getActiveThreadIdAtom,
+ isGeneratingResponseAtom,
setActiveThreadIdAtom,
setThreadModelParamsAtom,
} from '@/helpers/atoms/Thread.atom'
@@ -24,6 +23,7 @@ export default function useSetActiveThread() {
const setActiveThreadId = useSetAtom(setActiveThreadIdAtom)
const setThreadMessage = useSetAtom(setConvoMessagesAtom)
const setThreadModelParams = useSetAtom(setThreadModelParamsAtom)
+ const setIsGeneratingResponse = useSetAtom(isGeneratingResponseAtom)
const setActiveThread = async (thread: Thread) => {
if (activeThreadId === thread.id) {
@@ -31,6 +31,7 @@ export default function useSetActiveThread() {
return
}
+ setIsGeneratingResponse(false)
events.emit(InferenceEvent.OnInferenceStopped, thread.id)
// load the corresponding messages
diff --git a/web/hooks/useSettings.ts b/web/hooks/useSettings.ts
index ef4e08480..168e72489 100644
--- a/web/hooks/useSettings.ts
+++ b/web/hooks/useSettings.ts
@@ -1,4 +1,4 @@
-import { useEffect, useState } from 'react'
+import { useCallback, useEffect, useState } from 'react'
import { fs, joinPath } from '@janhq/core'
import { atom, useAtom } from 'jotai'
@@ -32,7 +32,7 @@ export const useSettings = () => {
})
}
- const readSettings = async () => {
+ const readSettings = useCallback(async () => {
if (!window?.core?.api) {
return
}
@@ -42,7 +42,8 @@ export const useSettings = () => {
return typeof settings === 'object' ? settings : JSON.parse(settings)
}
return {}
- }
+ }, [])
+
const saveSettings = async ({
runMode,
notify,
diff --git a/web/hooks/useThreads.ts b/web/hooks/useThreads.ts
index b79cfea92..b7de014cc 100644
--- a/web/hooks/useThreads.ts
+++ b/web/hooks/useThreads.ts
@@ -5,24 +5,24 @@ import {
ConversationalExtension,
} from '@janhq/core'
-import { useAtom } from 'jotai'
+import { useAtomValue, useSetAtom } from 'jotai'
import useSetActiveThread from './useSetActiveThread'
import { extensionManager } from '@/extension/ExtensionManager'
import {
ModelParams,
+ activeThreadAtom,
threadModelParamsAtom,
threadStatesAtom,
threadsAtom,
} from '@/helpers/atoms/Thread.atom'
const useThreads = () => {
- const [threadStates, setThreadStates] = useAtom(threadStatesAtom)
- const [threads, setThreads] = useAtom(threadsAtom)
- const [threadModelRuntimeParams, setThreadModelRuntimeParams] = useAtom(
- threadModelParamsAtom
- )
+ const setThreadStates = useSetAtom(threadStatesAtom)
+ const setThreads = useSetAtom(threadsAtom)
+ const setThreadModelRuntimeParams = useSetAtom(threadModelParamsAtom)
+ const activeThread = useAtomValue(activeThreadAtom)
const { setActiveThread } = useSetActiveThread()
const getThreads = async () => {
@@ -39,7 +39,6 @@ const useThreads = () => {
hasMore: false,
waitingForResponse: false,
lastMessage,
- isFinishInit: true,
}
const modelParams = thread.assistants?.[0]?.model?.parameters
@@ -51,41 +50,12 @@ const useThreads = () => {
}
})
- // allow at max 1 unfinished init thread and it should be at the top of the list
- let unfinishedThreadId: string | undefined = undefined
- const unfinishedThreadState: Record = {}
-
- for (const key of Object.keys(threadStates)) {
- const threadState = threadStates[key]
- if (threadState.isFinishInit === false) {
- unfinishedThreadState[key] = threadState
- unfinishedThreadId = key
- break
- }
- }
- const unfinishedThread: Thread | undefined = threads.find(
- (thread) => thread.id === unfinishedThreadId
- )
-
- let allThreads: Thread[] = [...localThreads]
- if (unfinishedThread) {
- allThreads = [unfinishedThread, ...localThreads]
- }
-
- if (unfinishedThreadId) {
- localThreadStates[unfinishedThreadId] =
- unfinishedThreadState[unfinishedThreadId]
-
- threadModelParams[unfinishedThreadId] =
- threadModelRuntimeParams[unfinishedThreadId]
- }
-
// updating app states
setThreadStates(localThreadStates)
- setThreads(allThreads)
+ setThreads(localThreads)
setThreadModelRuntimeParams(threadModelParams)
- if (allThreads.length > 0) {
- setActiveThread(allThreads[0])
+ if (localThreads.length && !activeThread) {
+ setActiveThread(localThreads[0])
}
} catch (error) {
console.error(error)
diff --git a/web/hooks/useUpdateModelParameters.ts b/web/hooks/useUpdateModelParameters.ts
index 80070ef26..694394cee 100644
--- a/web/hooks/useUpdateModelParameters.ts
+++ b/web/hooks/useUpdateModelParameters.ts
@@ -2,12 +2,15 @@
import {
ConversationalExtension,
ExtensionTypeEnum,
+ InferenceEngine,
Thread,
ThreadAssistantInfo,
} from '@janhq/core'
import { useAtomValue, useSetAtom } from 'jotai'
+import { selectedModelAtom } from '@/containers/DropdownListSidebar'
+
import { toRuntimeParams, toSettingParams } from '@/utils/modelParam'
import { extensionManager } from '@/extension'
@@ -19,16 +22,22 @@ import {
threadsAtom,
} from '@/helpers/atoms/Thread.atom'
+export type UpdateModelParameter = {
+ params?: ModelParams
+ modelId?: string
+ engine?: InferenceEngine
+}
+
export default function useUpdateModelParameters() {
const threads = useAtomValue(threadsAtom)
const setThreadModelParams = useSetAtom(setThreadModelParamsAtom)
const activeThreadState = useAtomValue(activeThreadStateAtom)
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
+ const selectedModel = useAtomValue(selectedModelAtom)
const updateModelParameter = async (
threadId: string,
- name: string,
- value: number | boolean | string
+ settings: UpdateModelParameter
) => {
const thread = threads.find((thread) => thread.id === threadId)
if (!thread) {
@@ -40,21 +49,18 @@ export default function useUpdateModelParameters() {
console.error('No active thread')
return
}
+
+ const params = settings.modelId
+ ? settings.params
+ : { ...activeModelParams, ...settings.params }
+
const updatedModelParams: ModelParams = {
- ...activeModelParams,
- // Explicitly set the value to an array if the name is 'stop'
- // This is because the inference engine would only accept an array for the 'stop' parameter
- [name]: name === 'stop' ? (value === '' ? [] : [value]) : value,
+ ...params,
}
// update the state
setThreadModelParams(thread.id, updatedModelParams)
- if (!activeThreadState.isFinishInit) {
- // if thread is not initialized, we don't need to update thread.json
- return
- }
-
const assistants = thread.assistants.map(
(assistant: ThreadAssistantInfo) => {
const runtimeParams = toRuntimeParams(updatedModelParams)
@@ -62,6 +68,10 @@ export default function useUpdateModelParameters() {
assistant.model.parameters = runtimeParams
assistant.model.settings = settingParams
+ if (selectedModel) {
+ assistant.model.id = settings.modelId ?? selectedModel?.id
+ assistant.model.engine = settings.engine ?? selectedModel?.engine
+ }
return assistant
}
)
diff --git a/web/hooks/useVaultDirectory.ts b/web/hooks/useVaultDirectory.ts
deleted file mode 100644
index 3aa7383c9..000000000
--- a/web/hooks/useVaultDirectory.ts
+++ /dev/null
@@ -1,105 +0,0 @@
-import { useEffect } from 'react'
-
-import { fs, AppConfiguration } from '@janhq/core'
-
-import { atom, useAtom } from 'jotai'
-
-import { useMainViewState } from './useMainViewState'
-
-const isSameDirectoryAtom = atom(false)
-const isDirectoryConfirmAtom = atom(false)
-const isErrorSetNewDestAtom = atom(false)
-const currentPathAtom = atom('')
-const newDestinationPathAtom = atom('')
-
-export const SUCCESS_SET_NEW_DESTINATION = 'successSetNewDestination'
-
-export function useVaultDirectory() {
- const [isSameDirectory, setIsSameDirectory] = useAtom(isSameDirectoryAtom)
- const { setMainViewState } = useMainViewState()
- const [isDirectoryConfirm, setIsDirectoryConfirm] = useAtom(
- isDirectoryConfirmAtom
- )
- const [isErrorSetNewDest, setIsErrorSetNewDest] = useAtom(
- isErrorSetNewDestAtom
- )
- const [currentPath, setCurrentPath] = useAtom(currentPathAtom)
- const [newDestinationPath, setNewDestinationPath] = useAtom(
- newDestinationPathAtom
- )
-
- useEffect(() => {
- window.core?.api
- ?.getAppConfigurations()
- ?.then((appConfig: AppConfiguration) => {
- setCurrentPath(appConfig.data_folder)
- })
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [])
-
- const setNewDestination = async () => {
- const destFolder = await window.core?.api?.selectDirectory()
- setNewDestinationPath(destFolder)
-
- if (destFolder) {
- console.debug(`Destination folder selected: ${destFolder}`)
- try {
- const appConfiguration: AppConfiguration =
- await window.core?.api?.getAppConfigurations()
- const currentJanDataFolder = appConfiguration.data_folder
-
- if (currentJanDataFolder === destFolder) {
- console.debug(
- `Destination folder is the same as current folder. Ignore..`
- )
- setIsSameDirectory(true)
- setIsDirectoryConfirm(false)
- return
- } else {
- setIsSameDirectory(false)
- setIsDirectoryConfirm(true)
- }
- setIsErrorSetNewDest(false)
- } catch (e) {
- console.error(`Error: ${e}`)
- setIsErrorSetNewDest(true)
- }
- }
- }
-
- const applyNewDestination = async () => {
- try {
- const appConfiguration: AppConfiguration =
- await window.core?.api?.getAppConfigurations()
- const currentJanDataFolder = appConfiguration.data_folder
-
- appConfiguration.data_folder = newDestinationPath
-
- await fs.syncFile(currentJanDataFolder, newDestinationPath)
- await window.core?.api?.updateAppConfiguration(appConfiguration)
- console.debug(
- `File sync finished from ${currentPath} to ${newDestinationPath}`
- )
-
- setIsErrorSetNewDest(false)
- localStorage.setItem(SUCCESS_SET_NEW_DESTINATION, 'true')
- await window.core?.api?.relaunch()
- } catch (e) {
- console.error(`Error: ${e}`)
- setIsErrorSetNewDest(true)
- }
- }
-
- return {
- setNewDestination,
- newDestinationPath,
- applyNewDestination,
- isSameDirectory,
- setIsDirectoryConfirm,
- isDirectoryConfirm,
- setIsSameDirectory,
- currentPath,
- isErrorSetNewDest,
- setIsErrorSetNewDest,
- }
-}
diff --git a/web/next.config.js b/web/next.config.js
index 0b6a8bc92..a2e202c51 100644
--- a/web/next.config.js
+++ b/web/next.config.js
@@ -25,10 +25,8 @@ const nextConfig = {
...config.plugins,
new webpack.DefinePlugin({
VERSION: JSON.stringify(packageJson.version),
- ANALYTICS_ID:
- JSON.stringify(process.env.ANALYTICS_ID) ?? JSON.stringify('xxx'),
- ANALYTICS_HOST:
- JSON.stringify(process.env.ANALYTICS_HOST) ?? JSON.stringify('xxx'),
+ ANALYTICS_ID: JSON.stringify(process.env.ANALYTICS_ID),
+ ANALYTICS_HOST: JSON.stringify(process.env.ANALYTICS_HOST),
API_BASE_URL: JSON.stringify('http://localhost:1337'),
isMac: process.platform === 'darwin',
isWindows: process.platform === 'win32',
diff --git a/web/screens/Chat/AssistantSetting/index.tsx b/web/screens/Chat/AssistantSetting/index.tsx
index b97c39e67..df516def0 100644
--- a/web/screens/Chat/AssistantSetting/index.tsx
+++ b/web/screens/Chat/AssistantSetting/index.tsx
@@ -57,7 +57,7 @@ const AssistantSetting = ({
tools: [
{
type: 'retrieval',
- enabled: false,
+ enabled: true,
settings: {
...(activeThread.assistants[0].tools &&
activeThread.assistants[0].tools[0]?.settings),
diff --git a/web/screens/Chat/ChatBody/index.tsx b/web/screens/Chat/ChatBody/index.tsx
index 9f629e627..1ce6b591f 100644
--- a/web/screens/Chat/ChatBody/index.tsx
+++ b/web/screens/Chat/ChatBody/index.tsx
@@ -8,11 +8,9 @@ import { useAtomValue } from 'jotai'
import LogoMark from '@/containers/Brand/Logo/Mark'
-import GenerateResponse from '@/containers/Loader/GenerateResponse'
-
import { MainViewState } from '@/constants/screens'
-import { activeModelAtom } from '@/hooks/useActiveModel'
+import { loadModelErrorAtom } from '@/hooks/useActiveModel'
import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels'
import { useMainViewState } from '@/hooks/useMainViewState'
@@ -21,17 +19,13 @@ import ChatItem from '../ChatItem'
import ErrorMessage from '../ErrorMessage'
-import {
- generateResponseAtom,
- getCurrentChatMessagesAtom,
-} from '@/helpers/atoms/ChatMessage.atom'
+import { getCurrentChatMessagesAtom } from '@/helpers/atoms/ChatMessage.atom'
const ChatBody: React.FC = () => {
const messages = useAtomValue(getCurrentChatMessagesAtom)
- const activeModel = useAtomValue(activeModelAtom)
const { downloadedModels } = useGetDownloadedModels()
const { setMainViewState } = useMainViewState()
- const generateResponse = useAtomValue(generateResponseAtom)
+ const loadModelError = useAtomValue(loadModelErrorAtom)
if (downloadedModels.length === 0)
return (
@@ -92,22 +86,14 @@ const ChatBody: React.FC = () => {
message.content.length > 0) && (
)}
- {(message.status === MessageStatus.Error ||
- message.status === MessageStatus.Stopped) &&
+ {!loadModelError &&
+ (message.status === MessageStatus.Error ||
+ message.status === MessageStatus.Stopped) &&
index === messages.length - 1 && (
)}
))}
-
- {activeModel &&
- (generateResponse ||
- (messages.length &&
- messages[messages.length - 1].status ===
- MessageStatus.Pending &&
- !messages[messages.length - 1].content.length)) && (
-
- )}
)}
diff --git a/web/screens/Chat/ChatInput/index.tsx b/web/screens/Chat/ChatInput/index.tsx
index b960bdc57..ee1ac9a41 100644
--- a/web/screens/Chat/ChatInput/index.tsx
+++ b/web/screens/Chat/ChatInput/index.tsx
@@ -1,5 +1,5 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
-import { useEffect, useRef, useState } from 'react'
+import { useContext, useEffect, useRef, useState } from 'react'
import { InferenceEvent, MessageStatus, events } from '@janhq/core'
@@ -24,6 +24,8 @@ import { twMerge } from 'tailwind-merge'
import { currentPromptAtom, fileUploadAtom } from '@/containers/Providers/Jotai'
+import { FeatureToggleContext } from '@/context/FeatureToggle'
+
import { useActiveModel } from '@/hooks/useActiveModel'
import { useClickOutside } from '@/hooks/useClickOutside'
@@ -53,7 +55,8 @@ const ChatInput: React.FC = () => {
const textareaRef = useRef(null)
const fileInputRef = useRef(null)
const imageInputRef = useRef(null)
- const [ShowAttacmentMenus, setShowAttacmentMenus] = useState(false)
+ const [showAttacmentMenus, setShowAttacmentMenus] = useState(false)
+ const { experimentalFeature } = useContext(FeatureToggleContext)
const onPromptChange = (e: React.ChangeEvent) => {
setCurrentPrompt(e.target.value)
@@ -64,30 +67,35 @@ const ChatInput: React.FC = () => {
useEffect(() => {
if (isWaitingToSend && activeThreadId) {
setIsWaitingToSend(false)
- sendChatMessage()
+ sendChatMessage(currentPrompt)
}
+ }, [
+ activeThreadId,
+ isWaitingToSend,
+ currentPrompt,
+ setIsWaitingToSend,
+ sendChatMessage,
+ ])
+
+ useEffect(() => {
if (textareaRef.current) {
textareaRef.current.focus()
}
- // eslint-disable-next-line react-hooks/exhaustive-deps
- }, [waitingToSendMessage, activeThreadId])
+ }, [activeThreadId])
useEffect(() => {
if (textareaRef.current) {
textareaRef.current.style.height = '40px'
textareaRef.current.style.height = textareaRef.current.scrollHeight + 'px'
- textareaRef.current.focus()
}
}, [currentPrompt])
const onKeyDown = async (e: React.KeyboardEvent) => {
- if (e.key === 'Enter') {
- if (!e.shiftKey) {
- e.preventDefault()
- if (messages[messages.length - 1]?.status !== MessageStatus.Pending)
- sendChatMessage()
- else onStopInferenceClick()
- }
+ if (e.key === 'Enter' && !e.shiftKey) {
+ e.preventDefault()
+ if (messages[messages.length - 1]?.status !== MessageStatus.Pending)
+ sendChatMessage(currentPrompt)
+ else onStopInferenceClick()
}
}
@@ -142,50 +150,52 @@ const ChatInput: React.FC = () => {
value={currentPrompt}
onChange={onPromptChange}
/>
-
-
-
- {
- if (
- fileUpload.length > 0 ||
- (activeThread?.assistants[0].tools &&
- !activeThread?.assistants[0].tools[0]?.enabled)
- ) {
- e.stopPropagation()
- } else {
- setShowAttacmentMenus(!ShowAttacmentMenus)
- }
- }}
- />
-
-
- {fileUpload.length > 0 ||
- (activeThread?.assistants[0].tools &&
- !activeThread?.assistants[0].tools[0]?.enabled && (
-
- {fileUpload.length !== 0 && (
-
- Currently, we only support 1 attachment at the same time
-
- )}
- {activeThread?.assistants[0].tools &&
- activeThread?.assistants[0].tools[0]?.enabled ===
- false && (
+ {experimentalFeature && (
+
+
+ {
+ if (
+ fileUpload.length > 0 ||
+ (activeThread?.assistants[0].tools &&
+ !activeThread?.assistants[0].tools[0]?.enabled)
+ ) {
+ e.stopPropagation()
+ } else {
+ setShowAttacmentMenus(!showAttacmentMenus)
+ }
+ }}
+ />
+
+
+ {fileUpload.length > 0 ||
+ (activeThread?.assistants[0].tools &&
+ !activeThread?.assistants[0].tools[0]?.enabled && (
+
+ {fileUpload.length !== 0 && (
- Turn on Retrieval in Assistant Settings to use this
- feature
+ Currently, we only support 1 attachment at the same
+ time
)}
-
-
- ))}
-
-
+ {activeThread?.assistants[0].tools &&
+ activeThread?.assistants[0].tools[0]?.enabled ===
+ false && (
+
+ Turn on Retrieval in Assistant Settings to use this
+ feature
+
+ )}
+
+
+ ))}
+
+
+ )}
- {ShowAttacmentMenus && (
+ {showAttacmentMenus && (
{
}
themes="primary"
className="min-w-[100px]"
- onClick={sendChatMessage}
+ onClick={() => sendChatMessage(currentPrompt)}
>
Send
diff --git a/web/screens/Chat/ErrorMessage/index.tsx b/web/screens/Chat/ErrorMessage/index.tsx
index 8879b15be..84a89cee8 100644
--- a/web/screens/Chat/ErrorMessage/index.tsx
+++ b/web/screens/Chat/ErrorMessage/index.tsx
@@ -17,7 +17,6 @@ import {
deleteMessageAtom,
getCurrentChatMessagesAtom,
} from '@/helpers/atoms/ChatMessage.atom'
-import { totalRamAtom } from '@/helpers/atoms/SystemBar.atom'
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
@@ -25,8 +24,6 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
const thread = useAtomValue(activeThreadAtom)
const deleteMessage = useSetAtom(deleteMessageAtom)
const { resendChatMessage } = useSendChatMessage()
- const { activeModel } = useActiveModel()
- const totalRam = useAtomValue(totalRamAtom)
const regenerateMessage = async () => {
const lastMessageIndex = messages.length - 1
@@ -70,33 +67,26 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
{message.status === MessageStatus.Error && (
- {Number(activeModel?.metadata.size) > totalRam ? (
- <>
- Oops! Model size exceeds available RAM. Consider selecting a
- smaller model or upgrading your RAM for smoother performance.
- >
- ) : (
- <>
- Apologies, something's amiss!
- Jan's in beta. Find troubleshooting guides{' '}
-
- here
- {' '}
- or reach out to us on{' '}
-
- Discord
- {' '}
- for assistance.
- >
- )}
+ <>
+ Apologies, something's amiss!
+ Jan's in beta. Find troubleshooting guides{' '}
+
+ here
+ {' '}
+ or reach out to us on{' '}
+
+ Discord
+ {' '}
+ for assistance.
+ >
)}
diff --git a/web/screens/Chat/LoadModelErrorMessage/index.tsx b/web/screens/Chat/LoadModelErrorMessage/index.tsx
new file mode 100644
index 000000000..d3c4a704d
--- /dev/null
+++ b/web/screens/Chat/LoadModelErrorMessage/index.tsx
@@ -0,0 +1,48 @@
+import { MessageStatus, ThreadMessage } from '@janhq/core'
+import { useAtomValue } from 'jotai'
+
+import { useActiveModel } from '@/hooks/useActiveModel'
+
+import { totalRamAtom } from '@/helpers/atoms/SystemBar.atom'
+
+const LoadModelErrorMessage = () => {
+ const { activeModel } = useActiveModel()
+ const availableRam = useAtomValue(totalRamAtom)
+
+ return (
+ <>
+
+
+ {Number(activeModel?.metadata.size) > availableRam ? (
+ <>
+ Oops! Model size exceeds available RAM. Consider selecting a
+ smaller model or upgrading your RAM for smoother performance.
+ >
+ ) : (
+ <>
+ Apologies, something's amiss!
+ Jan's in beta. Find troubleshooting guides{' '}
+
+ here
+ {' '}
+ or reach out to us on{' '}
+
+ Discord
+ {' '}
+ for assistance.
+ >
+ )}
+
+
+ >
+ )
+}
+export default LoadModelErrorMessage
diff --git a/web/screens/Chat/MessageQueuedBanner/index.tsx b/web/screens/Chat/MessageQueuedBanner/index.tsx
index df9aa5a21..5847394b4 100644
--- a/web/screens/Chat/MessageQueuedBanner/index.tsx
+++ b/web/screens/Chat/MessageQueuedBanner/index.tsx
@@ -1,7 +1,9 @@
-import useSendChatMessage from '@/hooks/useSendChatMessage'
+import { useAtomValue } from 'jotai'
+
+import { queuedMessageAtom } from '@/hooks/useSendChatMessage'
const MessageQueuedBanner: React.FC = () => {
- const { queuedMessage } = useSendChatMessage()
+ const queuedMessage = useAtomValue(queuedMessageAtom)
return (
diff --git a/web/screens/Chat/MessageToolbar/index.tsx b/web/screens/Chat/MessageToolbar/index.tsx
index dfa8d63c6..070022122 100644
--- a/web/screens/Chat/MessageToolbar/index.tsx
+++ b/web/screens/Chat/MessageToolbar/index.tsx
@@ -4,6 +4,7 @@ import {
ThreadMessage,
ChatCompletionRole,
ConversationalExtension,
+ ContentType,
} from '@janhq/core'
import { useAtomValue, useSetAtom } from 'jotai'
import { RefreshCcw, CopyIcon, Trash2Icon, CheckIcon } from 'lucide-react'
@@ -53,7 +54,9 @@ const MessageToolbar = ({ message }: { message: ThreadMessage }) => {
{message.id === messages[messages.length - 1]?.id &&
- messages[messages.length - 1].status !== MessageStatus.Error && (
+ messages[messages.length - 1].status !== MessageStatus.Error &&
+ messages[messages.length - 1].content[0]?.type !==
+ ContentType.Pdf && (
void
}) => {
const { updateModelParameter } = useUpdateModelParameters()
@@ -73,7 +73,10 @@ const SettingComponent = ({
const { stopModel } = useActiveModel()
- const onValueChanged = (name: string, value: string | number | boolean) => {
+ const onValueChanged = (
+ name: string,
+ value: string | number | boolean | string[]
+ ) => {
if (!threadId) return
if (engineParams.some((x) => x.name.includes(name))) {
setEngineParamsUpdate(true)
@@ -83,7 +86,13 @@ const SettingComponent = ({
}
if (updater) updater(threadId, name, value)
else {
- updateModelParameter(threadId, name, value)
+ // Convert stop string to array
+ if (name === 'stop' && typeof value === 'string') {
+ value = [value]
+ }
+ updateModelParameter(threadId, {
+ params: { [name]: value },
+ })
}
}
diff --git a/web/screens/Chat/Sidebar/index.tsx b/web/screens/Chat/Sidebar/index.tsx
index 763d77b1d..8088501b9 100644
--- a/web/screens/Chat/Sidebar/index.tsx
+++ b/web/screens/Chat/Sidebar/index.tsx
@@ -1,5 +1,5 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
-import React from 'react'
+import React, { useContext } from 'react'
import { InferenceEngine } from '@janhq/core'
import { Input, Textarea, Switch } from '@janhq/uikit'
@@ -15,6 +15,8 @@ import DropdownListSidebar, {
selectedModelAtom,
} from '@/containers/DropdownListSidebar'
+import { FeatureToggleContext } from '@/context/FeatureToggle'
+
import { useCreateNewThread } from '@/hooks/useCreateNewThread'
import { getConfigurationsData } from '@/utils/componentSettings'
@@ -39,6 +41,7 @@ const Sidebar: React.FC = () => {
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
const selectedModel = useAtomValue(selectedModelAtom)
const { updateThreadMetadata } = useCreateNewThread()
+ const { experimentalFeature } = useContext(FeatureToggleContext)
const modelEngineParams = toSettingParams(activeModelParams)
const modelRuntimeParams = toRuntimeParams(activeModelParams)
@@ -131,78 +134,79 @@ const Sidebar: React.FC = () => {
}}
/>
-
-
- {activeThread?.assistants[0]?.tools &&
- componentDataAssistantSetting.length > 0 && (
-
- {
- if (activeThread)
- updateThreadMetadata({
- ...activeThread,
- assistants: [
- {
- ...activeThread.assistants[0],
- tools: [
- {
- type: 'retrieval',
- enabled: e,
- settings:
- (activeThread.assistants[0].tools &&
- activeThread.assistants[0].tools[0]
- ?.settings) ??
- {},
- },
- ],
- },
- ],
- })
- }}
- />
- }
- >
- {activeThread?.assistants[0]?.tools[0].enabled && (
-
-
-
-
-
-
-
-
+ {activeThread?.assistants[0]?.tools &&
+ componentDataAssistantSetting.length > 0 && (
+
+ {
+ if (activeThread)
+ updateThreadMetadata({
+ ...activeThread,
+ assistants: [
+ {
+ ...activeThread.assistants[0],
+ tools: [
+ {
+ type: 'retrieval',
+ enabled: e,
+ settings:
+ (activeThread.assistants[0].tools &&
+ activeThread.assistants[0]
+ .tools[0]?.settings) ??
+ {},
+ },
+ ],
+ },
+ ],
+ })
+ }}
/>
-
- )}
-
-
- )}
-
+ }
+ >
+ {activeThread?.assistants[0]?.tools[0].enabled && (
+
+
+
+
+
+
+
+
+
+ )}
+
+
+ )}
+
+ )}
-
-
-
-
+
+
{componentDataRuntimeSetting.length > 0 && (
diff --git a/web/screens/Chat/SimpleTextMessage/index.tsx b/web/screens/Chat/SimpleTextMessage/index.tsx
index feed96168..261bb3497 100644
--- a/web/screens/Chat/SimpleTextMessage/index.tsx
+++ b/web/screens/Chat/SimpleTextMessage/index.tsx
@@ -43,7 +43,7 @@ const SimpleTextMessage: React.FC = (props) => {
text = props.content[0]?.text?.value ?? ''
}
const clipboard = useClipboard({ timeout: 1000 })
- const { onViewFile } = usePath()
+ const { onViewFile, onViewFileContainer } = usePath()
const marked: Marked = new Marked(
markedHighlight({
@@ -200,13 +200,14 @@ const SimpleTextMessage: React.FC = (props) => {
className="aspect-auto h-[300px]"
alt={props.content[0]?.text.name}
src={props.content[0]?.text.annotations[0]}
+ onClick={() => onViewFile(`${props.id}.png`)}
/>
onViewFile(`${props.id}.png`)}
+ onClick={onViewFileContainer}
>
@@ -223,14 +224,17 @@ const SimpleTextMessage: React.FC = (props) => {
{props.content[0]?.type === ContentType.Pdf && (
-
+
+ onViewFile(`${props.id}.${props.content[0]?.type}`)
+ }
+ />
- onViewFile(`${props.id}.${props.content[0]?.type}`)
- }
+ onClick={onViewFileContainer}
>
diff --git a/web/screens/Chat/ThreadList/index.tsx b/web/screens/Chat/ThreadList/index.tsx
index b2e15d111..b4a045b1d 100644
--- a/web/screens/Chat/ThreadList/index.tsx
+++ b/web/screens/Chat/ThreadList/index.tsx
@@ -1,4 +1,4 @@
-import { useEffect } from 'react'
+import { useEffect, useState } from 'react'
import {
Modal,
@@ -49,17 +49,19 @@ export default function ThreadList() {
const activeThread = useAtomValue(activeThreadAtom)
const { deleteThread, cleanThread } = useDeleteThread()
const { downloadedModels } = useGetDownloadedModels()
+ const [isThreadsReady, setIsThreadsReady] = useState(false)
const { activeThreadId, setActiveThread: onThreadClick } =
useSetActiveThread()
useEffect(() => {
- getThreads()
+ getThreads().then(() => setIsThreadsReady(true))
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [])
useEffect(() => {
if (
+ isThreadsReady &&
downloadedModels.length !== 0 &&
threads.length === 0 &&
assistants.length !== 0 &&
@@ -68,7 +70,7 @@ export default function ThreadList() {
requestCreateNewThread(assistants[0])
}
// eslint-disable-next-line react-hooks/exhaustive-deps
- }, [assistants, threads, downloadedModels, activeThread])
+ }, [assistants, threads, downloadedModels, activeThread, isThreadsReady])
return (
@@ -84,7 +86,6 @@ export default function ThreadList() {
threads.map((thread, i) => {
const lastMessage =
threadStates[thread.id]?.lastMessage ?? 'No new message'
-
return (
-
- {thread.title}
-
- {thread.updated &&
- displayDate(new Date(thread.updated).getTime())}
-
-
+
+ {thread.updated && displayDate(thread.updated)}
+
+ {thread.title}
{lastMessage || 'No new message'}
@@ -161,9 +159,9 @@ export default function ThreadList() {
-
+
Delete thread
diff --git a/web/screens/Chat/index.tsx b/web/screens/Chat/index.tsx
index 6da8af13f..1f7896604 100644
--- a/web/screens/Chat/index.tsx
+++ b/web/screens/Chat/index.tsx
@@ -1,53 +1,87 @@
/* eslint-disable @typescript-eslint/naming-convention */
-import React, { useEffect, useState } from 'react'
+import React, { useContext, useEffect, useState } from 'react'
import { useDropzone } from 'react-dropzone'
import { useAtomValue, useSetAtom } from 'jotai'
-import { UploadCloudIcon, XIcon } from 'lucide-react'
+import { UploadCloudIcon } from 'lucide-react'
import { twMerge } from 'tailwind-merge'
+import GenerateResponse from '@/containers/Loader/GenerateResponse'
import ModelReload from '@/containers/Loader/ModelReload'
import ModelStart from '@/containers/Loader/ModelStart'
import { currentPromptAtom, fileUploadAtom } from '@/containers/Providers/Jotai'
import { showLeftSideBarAtom } from '@/containers/Providers/KeyListener'
-import useSendChatMessage from '@/hooks/useSendChatMessage'
+import { snackbar } from '@/containers/Toast'
+
+import { FeatureToggleContext } from '@/context/FeatureToggle'
+
+import { activeModelAtom, loadModelErrorAtom } from '@/hooks/useActiveModel'
+import { queuedMessageAtom, reloadModelAtom } from '@/hooks/useSendChatMessage'
import ChatBody from '@/screens/Chat/ChatBody'
import ThreadList from '@/screens/Chat/ThreadList'
import ChatInput from './ChatInput'
+import LoadModelErrorMessage from './LoadModelErrorMessage'
import RequestDownloadModel from './RequestDownloadModel'
import Sidebar from './Sidebar'
import {
activeThreadAtom,
engineParamsUpdateAtom,
+ isGeneratingResponseAtom,
} from '@/helpers/atoms/Thread.atom'
+const renderError = (code: string) => {
+ switch (code) {
+ case 'multiple-upload':
+ return 'Currently, we only support 1 attachment at the same time'
+
+ case 'retrieval-off':
+ return 'Turn on Retrieval in Assistant Settings to use this feature'
+
+ case 'file-invalid-type':
+ return 'We do not support this file type'
+
+ default:
+ return 'Oops, something error, please try again.'
+ }
+}
+
const ChatScreen: React.FC = () => {
const setCurrentPrompt = useSetAtom(currentPromptAtom)
const activeThread = useAtomValue(activeThreadAtom)
const showLeftSideBar = useAtomValue(showLeftSideBarAtom)
const engineParamsUpdate = useAtomValue(engineParamsUpdateAtom)
- const { queuedMessage, reloadModel } = useSendChatMessage()
const [dragOver, setDragOver] = useState(false)
+
+ const queuedMessage = useAtomValue(queuedMessageAtom)
+ const reloadModel = useAtomValue(reloadModelAtom)
const [dragRejected, setDragRejected] = useState({ code: '' })
const setFileUpload = useSetAtom(fileUploadAtom)
+ const { experimentalFeature } = useContext(FeatureToggleContext)
+
+ const activeModel = useAtomValue(activeModelAtom)
+
+ const isGeneratingResponse = useAtomValue(isGeneratingResponseAtom)
+ const loadModelError = useAtomValue(loadModelErrorAtom)
+
const { getRootProps, isDragReject } = useDropzone({
noClick: true,
multiple: false,
accept: {
- // 'image/*': ['.png', '.jpg', '.jpeg'],
'application/pdf': ['.pdf'],
},
onDragOver: (e) => {
+ // Retrieval file drag and drop is experimental feature
+ if (!experimentalFeature) return
if (
e.dataTransfer.items.length === 1 &&
activeThread?.assistants[0].tools &&
@@ -65,6 +99,8 @@ const ChatScreen: React.FC = () => {
},
onDragLeave: () => setDragOver(false),
onDrop: (files, rejectFiles) => {
+ // Retrieval file drag and drop is experimental feature
+ if (!experimentalFeature) return
if (
!files ||
files.length !== 1 ||
@@ -95,8 +131,13 @@ const ChatScreen: React.FC = () => {
},
})
- // TODO @faisal change this until we have sneakbar component
useEffect(() => {
+ if (dragRejected.code) {
+ snackbar({
+ description: renderError(dragRejected.code),
+ type: 'error',
+ })
+ }
setTimeout(() => {
if (dragRejected.code) {
setDragRejected({ code: '' })
@@ -104,22 +145,6 @@ const ChatScreen: React.FC = () => {
}, 2000)
}, [dragRejected.code])
- const renderError = (code: string) => {
- switch (code) {
- case 'multiple-upload':
- return 'Currently, we only support 1 attachment at the same time'
-
- case 'retrieval-off':
- return 'Turn on Retrieval in Assistant Settings to use this feature'
-
- case 'file-invalid-type':
- return 'We do not support this file type'
-
- default:
- return 'Oops, something error, please try again.'
- }
- }
-
return (
{/* Left side bar */}
@@ -133,33 +158,6 @@ const ChatScreen: React.FC = () => {
className="relative flex h-full w-full flex-col overflow-auto bg-background outline-none"
{...getRootProps()}
>
- {dragRejected.code !== '' && (
-
-
-
- {renderError(dragRejected.code)}
- setDragRejected({ code: '' })}
- />
-
-
- )}
-
{dragOver && (
{
)}
+
+ {activeModel && isGeneratingResponse && }
+ {loadModelError && }
+
{/* Right side bar */}
{activeThread && }
diff --git a/web/screens/ExploreModels/index.tsx b/web/screens/ExploreModels/index.tsx
index d988fcafc..398b2db08 100644
--- a/web/screens/ExploreModels/index.tsx
+++ b/web/screens/ExploreModels/index.tsx
@@ -52,9 +52,12 @@ const ExploreModelsScreen = () => {
if (loading) return
return (
-
+
-
+
![]() {
const { getServerLog } = useServerLog()
+ const serverEnabled = useAtomValue(serverEnabledAtom)
const [logs, setLogs] = useState([])
useEffect(() => {
getServerLog().then((log) => {
- if (typeof log?.split === 'function') setLogs(log.split(/\r?\n|\r|\n/g))
+ if (typeof log?.split === 'function') {
+ setLogs(log.split(/\r?\n|\r|\n/g))
+ }
})
// eslint-disable-next-line react-hooks/exhaustive-deps
- }, [logs])
+ }, [logs, serverEnabled])
return (
diff --git a/web/screens/LocalServer/index.tsx b/web/screens/LocalServer/index.tsx
index 1a954c692..b96f4c228 100644
--- a/web/screens/LocalServer/index.tsx
+++ b/web/screens/LocalServer/index.tsx
@@ -1,7 +1,6 @@
-/* eslint-disable @typescript-eslint/no-explicit-any */
'use client'
-import React, { useEffect, useState } from 'react'
+import React, { useCallback, useEffect, useState } from 'react'
import ScrollToBottom from 'react-scroll-to-bottom'
@@ -29,6 +28,7 @@ import { ExternalLinkIcon, InfoIcon } from 'lucide-react'
import { twMerge } from 'tailwind-merge'
import CardSidebar from '@/containers/CardSidebar'
+
import DropdownListSidebar, {
selectedModelAtom,
} from '@/containers/DropdownListSidebar'
@@ -58,7 +58,7 @@ const portAtom = atom('1337')
const LocalServerScreen = () => {
const [errorRangePort, setErrorRangePort] = useState(false)
const [serverEnabled, setServerEnabled] = useAtom(serverEnabledAtom)
- const showing = useAtomValue(showRightSideBarAtom)
+ const showRightSideBar = useAtomValue(showRightSideBarAtom)
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
const modelEngineParams = toSettingParams(activeModelParams)
@@ -66,43 +66,44 @@ const LocalServerScreen = () => {
const { openServerLog, clearServerLog } = useServerLog()
const { startModel, stateModel } = useActiveModel()
- const [selectedModel] = useAtom(selectedModelAtom)
+ const selectedModel = useAtomValue(selectedModelAtom)
const [isCorsEnabled, setIsCorsEnabled] = useAtom(corsEnabledAtom)
const [isVerboseEnabled, setIsVerboseEnabled] = useAtom(verboseEnabledAtom)
const [host, setHost] = useAtom(hostAtom)
const [port, setPort] = useAtom(portAtom)
+ const hostOptions = ['127.0.0.1', '0.0.0.0']
+
const FIRST_TIME_VISIT_API_SERVER = 'firstTimeVisitAPIServer'
const [firstTimeVisitAPIServer, setFirstTimeVisitAPIServer] =
useState (false)
- const handleChangePort = (value: any) => {
- if (Number(value) <= 0 || Number(value) >= 65536) {
- setErrorRangePort(true)
- } else {
- setErrorRangePort(false)
- }
- setPort(value)
- }
+ const handleChangePort = useCallback(
+ (value: string) => {
+ if (Number(value) <= 0 || Number(value) >= 65536) {
+ setErrorRangePort(true)
+ } else {
+ setErrorRangePort(false)
+ }
+ setPort(value)
+ },
+ [setPort]
+ )
useEffect(() => {
- if (
- localStorage.getItem(FIRST_TIME_VISIT_API_SERVER) === null ||
- localStorage.getItem(FIRST_TIME_VISIT_API_SERVER) === 'true'
- ) {
- localStorage.setItem(FIRST_TIME_VISIT_API_SERVER, 'true')
+ if (localStorage.getItem(FIRST_TIME_VISIT_API_SERVER) == null) {
setFirstTimeVisitAPIServer(true)
}
}, [firstTimeVisitAPIServer])
useEffect(() => {
handleChangePort(port)
- }, [])
+ }, [handleChangePort, port])
return (
-
+
{/* Left SideBar */}
@@ -116,7 +117,7 @@ const LocalServerScreen = () => {
|