fix(dolphin phi2): resolved conflict

This commit is contained in:
hahuyhoang411 2024-02-19 09:56:44 +07:00
commit f7a3c921fd
114 changed files with 2183 additions and 1365 deletions

View File

@ -1,6 +1,12 @@
name: Jan Build Electron App Nightly or Manual
on:
push:
branches:
- main
paths-ignore:
- 'README.md'
- 'docs/**'
schedule:
- cron: '0 20 * * 1,2,3' # At 8 PM UTC on Monday, Tuesday, and Wednesday which is 3 AM UTC+7 Tuesday, Wednesday, and Thursday
workflow_dispatch:
@ -23,12 +29,20 @@ jobs:
- name: Set public provider
id: set-public-provider
run: |
if [ ${{ github.event == 'workflow_dispatch' }} ]; then
if [ "${{ github.event_name }}" == "workflow_dispatch" ]; then
echo "::set-output name=public_provider::${{ github.event.inputs.public_provider }}"
echo "::set-output name=ref::${{ github.ref }}"
else
echo "::set-output name=public_provider::cloudflare-r2"
echo "::set-output name=ref::refs/heads/dev"
if [ "${{ github.event_name }}" == "schedule" ]; then
echo "::set-output name=public_provider::cloudflare-r2"
echo "::set-output name=ref::refs/heads/dev"
elif [ "${{ github.event_name }}" == "push" ]; then
echo "::set-output name=public_provider::cloudflare-r2"
echo "::set-output name=ref::${{ github.ref }}"
else
echo "::set-output name=public_provider::none"
echo "::set-output name=ref::${{ github.ref }}"
fi
fi
# Job create Update app version based on latest release tag with build number and save to output
get-update-version:
@ -73,6 +87,17 @@ jobs:
push_to_branch: dev
new_version: ${{ needs.get-update-version.outputs.new_version }}
noti-discord-pre-release-and-update-url-readme:
needs: [build-macos, build-windows-x64, build-linux-x64, get-update-version, set-public-provider]
secrets: inherit
if: github.event_name == 'push'
uses: ./.github/workflows/template-noti-discord-and-update-url-readme.yml
with:
ref: refs/heads/dev
build_reason: Pre-release
push_to_branch: dev
new_version: ${{ needs.get-update-version.outputs.new_version }}
noti-discord-manual-and-update-url-readme:
needs: [build-macos, build-windows-x64, build-linux-x64, get-update-version, set-public-provider]
secrets: inherit

View File

@ -1,52 +0,0 @@
name: Jan Build Electron Pre Release
on:
push:
branches:
- main
paths:
- "!README.md"
jobs:
# Job create Update app version based on latest release tag with build number and save to output
get-update-version:
uses: ./.github/workflows/template-get-update-version.yml
build-macos:
uses: ./.github/workflows/template-build-macos.yml
secrets: inherit
needs: [get-update-version]
with:
ref: ${{ github.ref }}
public_provider: cloudflare-r2
new_version: ${{ needs.get-update-version.outputs.new_version }}
build-windows-x64:
uses: ./.github/workflows/template-build-windows-x64.yml
secrets: inherit
needs: [get-update-version]
with:
ref: ${{ github.ref }}
public_provider: cloudflare-r2
new_version: ${{ needs.get-update-version.outputs.new_version }}
build-linux-x64:
uses: ./.github/workflows/template-build-linux-x64.yml
secrets: inherit
needs: [get-update-version]
with:
ref: ${{ github.ref }}
public_provider: cloudflare-r2
new_version: ${{ needs.get-update-version.outputs.new_version }}
noti-discord-nightly-and-update-url-readme:
needs: [build-macos, build-windows-x64, build-linux-x64, get-update-version]
secrets: inherit
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
uses: ./.github/workflows/template-noti-discord-and-update-url-readme.yml
with:
ref: refs/heads/dev
build_reason: Nightly
push_to_branch: dev
new_version: ${{ needs.get-update-version.outputs.new_version }}

View File

@ -98,8 +98,8 @@ jobs:
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
ANALYTICS_ID: ${{ secrets.JAN_APP_POSTHOG_PROJECT_API_KEY }}
ANALYTICS_HOST: ${{ secrets.JAN_APP_POSTHOG_URL }}
ANALYTICS_ID: ${{ secrets.JAN_APP_UMAMI_PROJECT_API_KEY }}
ANALYTICS_HOST: ${{ secrets.JAN_APP_UMAMI_URL }}
- name: Upload Artifact .deb file
if: inputs.public_provider != 'github'

View File

@ -137,8 +137,8 @@ jobs:
APPLE_APP_SPECIFIC_PASSWORD: ${{ secrets.APPLE_APP_SPECIFIC_PASSWORD }}
APP_PATH: "."
DEVELOPER_ID: ${{ secrets.DEVELOPER_ID }}
ANALYTICS_ID: ${{ secrets.JAN_APP_POSTHOG_PROJECT_API_KEY }}
ANALYTICS_HOST: ${{ secrets.JAN_APP_POSTHOG_URL }}
ANALYTICS_ID: ${{ secrets.JAN_APP_UMAMI_PROJECT_API_KEY }}
ANALYTICS_HOST: ${{ secrets.JAN_APP_UMAMI_URL }}
- name: Upload Artifact
if: inputs.public_provider != 'github'

View File

@ -127,8 +127,8 @@ jobs:
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
ANALYTICS_ID: ${{ secrets.JAN_APP_POSTHOG_PROJECT_API_KEY }}
ANALYTICS_HOST: ${{ secrets.JAN_APP_POSTHOG_URL }}
ANALYTICS_ID: ${{ secrets.JAN_APP_UMAMI_PROJECT_API_KEY }}
ANALYTICS_HOST: ${{ secrets.JAN_APP_UMAMI_URL }}
AZURE_KEY_VAULT_URI: ${{ secrets.AZURE_KEY_VAULT_URI }}
AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}

View File

@ -17,7 +17,7 @@ jobs:
with:
fetch-depth: "0"
token: ${{ secrets.PAT_SERVICE_ACCOUNT }}
ref: main
ref: dev
- name: Get Latest Release
uses: pozetroninc/github-action-get-latest-release@v0.7.0
@ -46,4 +46,4 @@ jobs:
git config --global user.name "Service Account"
git add README.md
git commit -m "Update README.md with Stable Download URLs"
git -c http.extraheader="AUTHORIZATION: bearer ${{ secrets.PAT_SERVICE_ACCOUNT }}" push origin HEAD:main
git -c http.extraheader="AUTHORIZATION: bearer ${{ secrets.PAT_SERVICE_ACCOUNT }}" push origin HEAD:dev

View File

@ -43,31 +43,31 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute
<tr style="text-align:center">
<td style="text-align:center"><b>Stable (Recommended)</b></td>
<td style="text-align:center">
<a href='https://github.com/janhq/jan/releases/download/v0.4.4/jan-win-x64-0.4.4.exe'>
<a href='https://github.com/janhq/jan/releases/download/v0.4.5/jan-win-x64-0.4.5.exe'>
<img src='./docs/static/img/windows.png' style="height:14px; width: 14px" />
<b>jan.exe</b>
</a>
</td>
<td style="text-align:center">
<a href='https://github.com/janhq/jan/releases/download/v0.4.4/jan-mac-x64-0.4.4.dmg'>
<a href='https://github.com/janhq/jan/releases/download/v0.4.5/jan-mac-x64-0.4.5.dmg'>
<img src='./docs/static/img/mac.png' style="height:15px; width: 15px" />
<b>Intel</b>
</a>
</td>
<td style="text-align:center">
<a href='https://github.com/janhq/jan/releases/download/v0.4.4/jan-mac-arm64-0.4.4.dmg'>
<a href='https://github.com/janhq/jan/releases/download/v0.4.5/jan-mac-arm64-0.4.5.dmg'>
<img src='./docs/static/img/mac.png' style="height:15px; width: 15px" />
<b>M1/M2</b>
</a>
</td>
<td style="text-align:center">
<a href='https://github.com/janhq/jan/releases/download/v0.4.4/jan-linux-amd64-0.4.4.deb'>
<a href='https://github.com/janhq/jan/releases/download/v0.4.5/jan-linux-amd64-0.4.5.deb'>
<img src='./docs/static/img/linux.png' style="height:14px; width: 14px" />
<b>jan.deb</b>
</a>
</td>
<td style="text-align:center">
<a href='https://github.com/janhq/jan/releases/download/v0.4.4/jan-linux-x86_64-0.4.4.AppImage'>
<a href='https://github.com/janhq/jan/releases/download/v0.4.5/jan-linux-x86_64-0.4.5.AppImage'>
<img src='./docs/static/img/linux.png' style="height:14px; width: 14px" />
<b>jan.AppImage</b>
</a>
@ -76,31 +76,31 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute
<tr style="text-align:center">
<td style="text-align:center"><b>Experimental (Nightly Build)</b></td>
<td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-win-x64-0.4.4-194.exe'>
<a href='https://delta.jan.ai/latest/jan-win-x64-0.4.5-224.exe'>
<img src='./docs/static/img/windows.png' style="height:14px; width: 14px" />
<b>jan.exe</b>
</a>
</td>
<td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-mac-x64-0.4.4-194.dmg'>
<a href='https://delta.jan.ai/latest/jan-mac-x64-0.4.5-224.dmg'>
<img src='./docs/static/img/mac.png' style="height:15px; width: 15px" />
<b>Intel</b>
</a>
</td>
<td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-mac-arm64-0.4.4-194.dmg'>
<a href='https://delta.jan.ai/latest/jan-mac-arm64-0.4.5-224.dmg'>
<img src='./docs/static/img/mac.png' style="height:15px; width: 15px" />
<b>M1/M2</b>
</a>
</td>
<td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-linux-amd64-0.4.4-194.deb'>
<a href='https://delta.jan.ai/latest/jan-linux-amd64-0.4.5-224.deb'>
<img src='./docs/static/img/linux.png' style="height:14px; width: 14px" />
<b>jan.deb</b>
</a>
</td>
<td style="text-align:center">
<a href='https://delta.jan.ai/latest/jan-linux-x86_64-0.4.4-194.AppImage'>
<a href='https://delta.jan.ai/latest/jan-linux-x86_64-0.4.5-224.AppImage'>
<img src='./docs/static/img/linux.png' style="height:14px; width: 14px" />
<b>jan.AppImage</b>
</a>

View File

@ -3,7 +3,6 @@
* @description Enum of all the routes exposed by the app
*/
export enum AppRoute {
appDataPath = 'appDataPath',
openExternalUrl = 'openExternalUrl',
openAppDirectory = 'openAppDirectory',
openFileExplore = 'openFileExplorer',
@ -12,6 +11,7 @@ export enum AppRoute {
updateAppConfiguration = 'updateAppConfiguration',
relaunch = 'relaunch',
joinPath = 'joinPath',
isSubdirectory = 'isSubdirectory',
baseName = 'baseName',
startServer = 'startServer',
stopServer = 'stopServer',
@ -61,6 +61,7 @@ export enum FileManagerRoute {
syncFile = 'syncFile',
getJanDataFolderPath = 'getJanDataFolderPath',
getResourcePath = 'getResourcePath',
getUserHomePath = 'getUserHomePath',
fileStat = 'fileStat',
writeBlob = 'writeBlob',
}

View File

@ -22,7 +22,11 @@ const executeOnMain: (extension: string, method: string, ...args: any[]) => Prom
* @param {object} network - Optional object to specify proxy/whether to ignore SSL certificates.
* @returns {Promise<any>} A promise that resolves when the file is downloaded.
*/
const downloadFile: (url: string, fileName: string, network?: { proxy?: string, ignoreSSL?: boolean }) => Promise<any> = (url, fileName, network) => {
const downloadFile: (
url: string,
fileName: string,
network?: { proxy?: string; ignoreSSL?: boolean }
) => Promise<any> = (url, fileName, network) => {
return global.core?.api?.downloadFile(url, fileName, network)
}
@ -79,6 +83,12 @@ const openExternalUrl: (url: string) => Promise<any> = (url) =>
*/
const getResourcePath: () => Promise<string> = () => global.core.api?.getResourcePath()
/**
* Gets the user's home path.
* @returns return user's home path
*/
const getUserHomePath = (): Promise<string> => global.core.api?.getUserHomePath()
/**
* Log to file from browser processes.
*
@ -87,6 +97,17 @@ const getResourcePath: () => Promise<string> = () => global.core.api?.getResourc
const log: (message: string, fileName?: string) => void = (message, fileName) =>
global.core.api?.log(message, fileName)
/**
* Check whether the path is a subdirectory of another path.
*
* @param from - The path to check.
* @param to - The path to check against.
*
* @returns {Promise<boolean>} - A promise that resolves with a boolean indicating whether the path is a subdirectory.
*/
const isSubdirectory: (from: string, to: string) => Promise<boolean> = (from: string, to: string) =>
global.core.api?.isSubdirectory(from, to)
/**
* Register extension point function type definition
*/
@ -94,7 +115,7 @@ export type RegisterExtensionPoint = (
extensionName: string,
extensionId: string,
method: Function,
priority?: number,
priority?: number
) => void
/**
@ -111,5 +132,7 @@ export {
openExternalUrl,
baseName,
log,
isSubdirectory,
getUserHomePath,
FileStat,
}

View File

@ -2,7 +2,8 @@ import fs from 'fs'
import { JanApiRouteConfiguration, RouteConfiguration } from './configuration'
import { join } from 'path'
import { ContentType, MessageStatus, Model, ThreadMessage } from './../../../index'
import { getJanDataFolderPath } from '../../utils'
import { getEngineConfiguration, getJanDataFolderPath } from '../../utils'
import { DEFAULT_CHAT_COMPLETION_URL } from './consts'
export const getBuilder = async (configuration: RouteConfiguration) => {
const directoryPath = join(getJanDataFolderPath(), configuration.dirName)
@ -309,7 +310,7 @@ export const chatCompletions = async (request: any, reply: any) => {
const engineConfiguration = await getEngineConfiguration(requestedModel.engine)
let apiKey: string | undefined = undefined
let apiUrl: string = 'http://127.0.0.1:3928/inferences/llamacpp/chat_completion' // default nitro url
let apiUrl: string = DEFAULT_CHAT_COMPLETION_URL
if (engineConfiguration) {
apiKey = engineConfiguration.api_key
@ -320,7 +321,7 @@ export const chatCompletions = async (request: any, reply: any) => {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
"Access-Control-Allow-Origin": "*"
'Access-Control-Allow-Origin': '*',
})
const headers: Record<string, any> = {
@ -346,13 +347,3 @@ export const chatCompletions = async (request: any, reply: any) => {
response.body.pipe(reply.raw)
}
}
const getEngineConfiguration = async (engineId: string) => {
if (engineId !== 'openai') {
return undefined
}
const directoryPath = join(getJanDataFolderPath(), 'engines')
const filePath = join(directoryPath, `${engineId}.json`)
const data = await fs.readFileSync(filePath, 'utf-8')
return JSON.parse(data)
}

View File

@ -0,0 +1,19 @@
// The PORT to use for the Nitro subprocess
export const NITRO_DEFAULT_PORT = 3928
// The HOST address to use for the Nitro subprocess
export const LOCAL_HOST = '127.0.0.1'
export const SUPPORTED_MODEL_FORMAT = '.gguf'
// The URL for the Nitro subprocess
const NITRO_HTTP_SERVER_URL = `http://${LOCAL_HOST}:${NITRO_DEFAULT_PORT}`
// The URL for the Nitro subprocess to load a model
export const NITRO_HTTP_LOAD_MODEL_URL = `${NITRO_HTTP_SERVER_URL}/inferences/llamacpp/loadmodel`
// The URL for the Nitro subprocess to validate a model
export const NITRO_HTTP_VALIDATE_MODEL_URL = `${NITRO_HTTP_SERVER_URL}/inferences/llamacpp/modelstatus`
// The URL for the Nitro subprocess to kill itself
export const NITRO_HTTP_KILL_URL = `${NITRO_HTTP_SERVER_URL}/processmanager/destroy`
export const DEFAULT_CHAT_COMPLETION_URL = `http://${LOCAL_HOST}:${NITRO_DEFAULT_PORT}/inferences/llamacpp/chat_completion` // default nitro url

View File

@ -0,0 +1,351 @@
import fs from 'fs'
import { join } from 'path'
import { getJanDataFolderPath, getJanExtensionsPath, getSystemResourceInfo } from '../../utils'
import { logServer } from '../../log'
import { ChildProcessWithoutNullStreams, spawn } from 'child_process'
import { Model, ModelSettingParams, PromptTemplate } from '../../../types'
import {
LOCAL_HOST,
NITRO_DEFAULT_PORT,
NITRO_HTTP_KILL_URL,
NITRO_HTTP_LOAD_MODEL_URL,
NITRO_HTTP_VALIDATE_MODEL_URL,
SUPPORTED_MODEL_FORMAT,
} from './consts'
// The subprocess instance for Nitro
let subprocess: ChildProcessWithoutNullStreams | undefined = undefined
// TODO: move this to core type
interface NitroModelSettings extends ModelSettingParams {
llama_model_path: string
cpu_threads: number
}
export const startModel = async (modelId: string, settingParams?: ModelSettingParams) => {
try {
await runModel(modelId, settingParams)
return {
message: `Model ${modelId} started`,
}
} catch (e) {
return {
error: e,
}
}
}
const runModel = async (modelId: string, settingParams?: ModelSettingParams): Promise<void> => {
const janDataFolderPath = getJanDataFolderPath()
const modelFolderFullPath = join(janDataFolderPath, 'models', modelId)
if (!fs.existsSync(modelFolderFullPath)) {
throw `Model not found: ${modelId}`
}
const files: string[] = fs.readdirSync(modelFolderFullPath)
// Look for GGUF model file
const ggufBinFile = files.find((file) => file.toLowerCase().includes(SUPPORTED_MODEL_FORMAT))
const modelMetadataPath = join(modelFolderFullPath, 'model.json')
const modelMetadata: Model = JSON.parse(fs.readFileSync(modelMetadataPath, 'utf-8'))
if (!ggufBinFile) {
throw 'No GGUF model file found'
}
const modelBinaryPath = join(modelFolderFullPath, ggufBinFile)
const nitroResourceProbe = await getSystemResourceInfo()
const nitroModelSettings: NitroModelSettings = {
...modelMetadata.settings,
...settingParams,
llama_model_path: modelBinaryPath,
// This is critical and requires real CPU physical core count (or performance core)
cpu_threads: Math.max(1, nitroResourceProbe.numCpuPhysicalCore),
...(modelMetadata.settings.mmproj && {
mmproj: join(modelFolderFullPath, modelMetadata.settings.mmproj),
}),
}
logServer(`[NITRO]::Debug: Nitro model settings: ${JSON.stringify(nitroModelSettings)}`)
// Convert settings.prompt_template to system_prompt, user_prompt, ai_prompt
if (modelMetadata.settings.prompt_template) {
const promptTemplate = modelMetadata.settings.prompt_template
const prompt = promptTemplateConverter(promptTemplate)
if (prompt?.error) {
return Promise.reject(prompt.error)
}
nitroModelSettings.system_prompt = prompt.system_prompt
nitroModelSettings.user_prompt = prompt.user_prompt
nitroModelSettings.ai_prompt = prompt.ai_prompt
}
await runNitroAndLoadModel(modelId, nitroModelSettings)
}
// TODO: move to util
const promptTemplateConverter = (promptTemplate: string): PromptTemplate => {
// Split the string using the markers
const systemMarker = '{system_message}'
const promptMarker = '{prompt}'
if (promptTemplate.includes(systemMarker) && promptTemplate.includes(promptMarker)) {
// Find the indices of the markers
const systemIndex = promptTemplate.indexOf(systemMarker)
const promptIndex = promptTemplate.indexOf(promptMarker)
// Extract the parts of the string
const system_prompt = promptTemplate.substring(0, systemIndex)
const user_prompt = promptTemplate.substring(systemIndex + systemMarker.length, promptIndex)
const ai_prompt = promptTemplate.substring(promptIndex + promptMarker.length)
// Return the split parts
return { system_prompt, user_prompt, ai_prompt }
} else if (promptTemplate.includes(promptMarker)) {
// Extract the parts of the string for the case where only promptMarker is present
const promptIndex = promptTemplate.indexOf(promptMarker)
const user_prompt = promptTemplate.substring(0, promptIndex)
const ai_prompt = promptTemplate.substring(promptIndex + promptMarker.length)
// Return the split parts
return { user_prompt, ai_prompt }
}
// Return an error if none of the conditions are met
return { error: 'Cannot split prompt template' }
}
const runNitroAndLoadModel = async (modelId: string, modelSettings: NitroModelSettings) => {
// Gather system information for CPU physical cores and memory
const tcpPortUsed = require('tcp-port-used')
await stopModel(modelId)
await tcpPortUsed.waitUntilFree(NITRO_DEFAULT_PORT, 300, 5000)
/**
* There is a problem with Windows process manager
* Should wait for awhile to make sure the port is free and subprocess is killed
* The tested threshold is 500ms
**/
if (process.platform === 'win32') {
await new Promise((resolve) => setTimeout(resolve, 500))
}
await spawnNitroProcess()
await loadLLMModel(modelSettings)
await validateModelStatus()
}
const spawnNitroProcess = async (): Promise<void> => {
logServer(`[NITRO]::Debug: Spawning Nitro subprocess...`)
let binaryFolder = join(
getJanExtensionsPath(),
'@janhq',
'inference-nitro-extension',
'dist',
'bin'
)
let executableOptions = executableNitroFile()
const tcpPortUsed = require('tcp-port-used')
const args: string[] = ['1', LOCAL_HOST, NITRO_DEFAULT_PORT.toString()]
// Execute the binary
logServer(
`[NITRO]::Debug: Spawn nitro at path: ${executableOptions.executablePath}, and args: ${args}`
)
subprocess = spawn(
executableOptions.executablePath,
['1', LOCAL_HOST, NITRO_DEFAULT_PORT.toString()],
{
cwd: binaryFolder,
env: {
...process.env,
CUDA_VISIBLE_DEVICES: executableOptions.cudaVisibleDevices,
},
}
)
// Handle subprocess output
subprocess.stdout.on('data', (data: any) => {
logServer(`[NITRO]::Debug: ${data}`)
})
subprocess.stderr.on('data', (data: any) => {
logServer(`[NITRO]::Error: ${data}`)
})
subprocess.on('close', (code: any) => {
logServer(`[NITRO]::Debug: Nitro exited with code: ${code}`)
subprocess = undefined
})
tcpPortUsed.waitUntilUsed(NITRO_DEFAULT_PORT, 300, 30000).then(() => {
logServer(`[NITRO]::Debug: Nitro is ready`)
})
}
type NitroExecutableOptions = {
executablePath: string
cudaVisibleDevices: string
}
const executableNitroFile = (): NitroExecutableOptions => {
const nvidiaInfoFilePath = join(getJanDataFolderPath(), 'settings', 'settings.json')
let binaryFolder = join(
getJanExtensionsPath(),
'@janhq',
'inference-nitro-extension',
'dist',
'bin'
)
let cudaVisibleDevices = ''
let binaryName = 'nitro'
/**
* The binary folder is different for each platform.
*/
if (process.platform === 'win32') {
/**
* For Windows: win-cpu, win-cuda-11-7, win-cuda-12-0
*/
let nvidiaInfo = JSON.parse(fs.readFileSync(nvidiaInfoFilePath, 'utf-8'))
if (nvidiaInfo['run_mode'] === 'cpu') {
binaryFolder = join(binaryFolder, 'win-cpu')
} else {
if (nvidiaInfo['cuda'].version === '12') {
binaryFolder = join(binaryFolder, 'win-cuda-12-0')
} else {
binaryFolder = join(binaryFolder, 'win-cuda-11-7')
}
cudaVisibleDevices = nvidiaInfo['gpu_highest_vram']
}
binaryName = 'nitro.exe'
} else if (process.platform === 'darwin') {
/**
* For MacOS: mac-arm64 (Silicon), mac-x64 (InteL)
*/
if (process.arch === 'arm64') {
binaryFolder = join(binaryFolder, 'mac-arm64')
} else {
binaryFolder = join(binaryFolder, 'mac-x64')
}
} else {
/**
* For Linux: linux-cpu, linux-cuda-11-7, linux-cuda-12-0
*/
let nvidiaInfo = JSON.parse(fs.readFileSync(nvidiaInfoFilePath, 'utf-8'))
if (nvidiaInfo['run_mode'] === 'cpu') {
binaryFolder = join(binaryFolder, 'linux-cpu')
} else {
if (nvidiaInfo['cuda'].version === '12') {
binaryFolder = join(binaryFolder, 'linux-cuda-12-0')
} else {
binaryFolder = join(binaryFolder, 'linux-cuda-11-7')
}
cudaVisibleDevices = nvidiaInfo['gpu_highest_vram']
}
}
return {
executablePath: join(binaryFolder, binaryName),
cudaVisibleDevices,
}
}
const validateModelStatus = async (): Promise<void> => {
// Send a GET request to the validation URL.
// Retry the request up to 3 times if it fails, with a delay of 500 milliseconds between retries.
const fetchRT = require('fetch-retry')
const fetchRetry = fetchRT(fetch)
return fetchRetry(NITRO_HTTP_VALIDATE_MODEL_URL, {
method: 'GET',
headers: {
'Content-Type': 'application/json',
},
retries: 5,
retryDelay: 500,
}).then(async (res: Response) => {
logServer(`[NITRO]::Debug: Validate model state success with response ${JSON.stringify(res)}`)
// If the response is OK, check model_loaded status.
if (res.ok) {
const body = await res.json()
// If the model is loaded, return an empty object.
// Otherwise, return an object with an error message.
if (body.model_loaded) {
return Promise.resolve()
}
}
return Promise.reject('Validate model status failed')
})
}
const loadLLMModel = async (settings: NitroModelSettings): Promise<Response> => {
logServer(`[NITRO]::Debug: Loading model with params ${JSON.stringify(settings)}`)
const fetchRT = require('fetch-retry')
const fetchRetry = fetchRT(fetch)
return fetchRetry(NITRO_HTTP_LOAD_MODEL_URL, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(settings),
retries: 3,
retryDelay: 500,
})
.then((res: any) => {
logServer(`[NITRO]::Debug: Load model success with response ${JSON.stringify(res)}`)
return Promise.resolve(res)
})
.catch((err: any) => {
logServer(`[NITRO]::Error: Load model failed with error ${err}`)
return Promise.reject(err)
})
}
/**
* Stop model and kill nitro process.
*/
export const stopModel = async (_modelId: string) => {
if (!subprocess) {
return {
error: "Model isn't running",
}
}
return new Promise((resolve, reject) => {
const controller = new AbortController()
setTimeout(() => {
controller.abort()
reject({
error: 'Failed to stop model: Timedout',
})
}, 5000)
const tcpPortUsed = require('tcp-port-used')
logServer(`[NITRO]::Debug: Request to kill Nitro`)
fetch(NITRO_HTTP_KILL_URL, {
method: 'DELETE',
signal: controller.signal,
})
.then(() => {
subprocess?.kill()
subprocess = undefined
})
.catch(() => {
// don't need to do anything, we still kill the subprocess
})
.then(() => tcpPortUsed.waitUntilFree(NITRO_DEFAULT_PORT, 300, 5000))
.then(() => logServer(`[NITRO]::Debug: Nitro process is terminated`))
.then(() =>
resolve({
message: 'Model stopped',
})
)
})
}

View File

@ -10,6 +10,8 @@ import {
} from '../common/builder'
import { JanApiRouteConfiguration } from '../common/configuration'
import { startModel, stopModel } from '../common/startStopModel'
import { ModelSettingParams } from '../../../types'
export const commonRouter = async (app: HttpServer) => {
// Common Routes
@ -17,19 +19,33 @@ export const commonRouter = async (app: HttpServer) => {
app.get(`/${key}`, async (_request) => getBuilder(JanApiRouteConfiguration[key]))
app.get(`/${key}/:id`, async (request: any) =>
retrieveBuilder(JanApiRouteConfiguration[key], request.params.id),
retrieveBuilder(JanApiRouteConfiguration[key], request.params.id)
)
app.delete(`/${key}/:id`, async (request: any) =>
deleteBuilder(JanApiRouteConfiguration[key], request.params.id),
deleteBuilder(JanApiRouteConfiguration[key], request.params.id)
)
})
// Download Model Routes
app.get(`/models/download/:modelId`, async (request: any) =>
downloadModel(request.params.modelId, { ignoreSSL: request.query.ignoreSSL === 'true', proxy: request.query.proxy }),
downloadModel(request.params.modelId, {
ignoreSSL: request.query.ignoreSSL === 'true',
proxy: request.query.proxy,
})
)
app.put(`/models/:modelId/start`, async (request: any) => {
let settingParams: ModelSettingParams | undefined = undefined
if (Object.keys(request.body).length !== 0) {
settingParams = JSON.parse(request.body) as ModelSettingParams
}
return startModel(request.params.modelId, settingParams)
})
app.put(`/models/:modelId/stop`, async (request: any) => stopModel(request.params.modelId))
// Chat Completion Routes
app.post(`/chat/completions`, async (request: any, reply: any) => chatCompletions(request, reply))

View File

@ -8,5 +8,7 @@ export const fsRouter = async (app: HttpServer) => {
app.post(`/app/${FileManagerRoute.getResourcePath}`, async (request: any, reply: any) => {})
app.post(`/app/${FileManagerRoute.getUserHomePath}`, async (request: any, reply: any) => {})
app.post(`/app/${FileManagerRoute.fileStat}`, async (request: any, reply: any) => {})
}

View File

@ -2,38 +2,36 @@ import fs from 'fs'
import util from 'util'
import { getAppLogPath, getServerLogPath } from './utils'
export const log = function (message: string) {
const appLogPath = getAppLogPath()
export const log = (message: string) => {
const path = getAppLogPath()
if (!message.startsWith('[')) {
message = `[APP]::${message}`
}
message = `${new Date().toISOString()} ${message}`
if (fs.existsSync(appLogPath)) {
var log_file = fs.createWriteStream(appLogPath, {
flags: 'a',
})
log_file.write(util.format(message) + '\n')
log_file.close()
console.debug(message)
}
writeLog(message, path)
}
export const logServer = function (message: string) {
const serverLogPath = getServerLogPath()
export const logServer = (message: string) => {
const path = getServerLogPath()
if (!message.startsWith('[')) {
message = `[SERVER]::${message}`
}
message = `${new Date().toISOString()} ${message}`
writeLog(message, path)
}
if (fs.existsSync(serverLogPath)) {
var log_file = fs.createWriteStream(serverLogPath, {
const writeLog = (message: string, logPath: string) => {
if (!fs.existsSync(logPath)) {
fs.writeFileSync(logPath, message)
} else {
const logFile = fs.createWriteStream(logPath, {
flags: 'a',
})
log_file.write(util.format(message) + '\n')
log_file.close()
logFile.write(util.format(message) + '\n')
logFile.close()
console.debug(message)
}
}

View File

@ -1,16 +1,18 @@
import { AppConfiguration } from "../../types";
import { join } from "path";
import fs from "fs";
import os from "os";
import { AppConfiguration, SystemResourceInfo } from '../../types'
import { join } from 'path'
import fs from 'fs'
import os from 'os'
import { log, logServer } from '../log'
import childProcess from 'child_process'
// TODO: move this to core
const configurationFileName = "settings.json";
const configurationFileName = 'settings.json'
// TODO: do no specify app name in framework module
const defaultJanDataFolder = join(os.homedir(), "jan");
const defaultJanDataFolder = join(os.homedir(), 'jan')
const defaultAppConfig: AppConfiguration = {
data_folder: defaultJanDataFolder,
};
}
/**
* Getting App Configurations.
@ -20,39 +22,39 @@ const defaultAppConfig: AppConfiguration = {
export const getAppConfigurations = (): AppConfiguration => {
// Retrieve Application Support folder path
// Fallback to user home directory if not found
const configurationFile = getConfigurationFilePath();
const configurationFile = getConfigurationFilePath()
if (!fs.existsSync(configurationFile)) {
// create default app config if we don't have one
console.debug(`App config not found, creating default config at ${configurationFile}`);
fs.writeFileSync(configurationFile, JSON.stringify(defaultAppConfig));
return defaultAppConfig;
console.debug(`App config not found, creating default config at ${configurationFile}`)
fs.writeFileSync(configurationFile, JSON.stringify(defaultAppConfig))
return defaultAppConfig
}
try {
const appConfigurations: AppConfiguration = JSON.parse(
fs.readFileSync(configurationFile, "utf-8"),
);
return appConfigurations;
fs.readFileSync(configurationFile, 'utf-8')
)
return appConfigurations
} catch (err) {
console.error(`Failed to read app config, return default config instead! Err: ${err}`);
return defaultAppConfig;
console.error(`Failed to read app config, return default config instead! Err: ${err}`)
return defaultAppConfig
}
};
}
const getConfigurationFilePath = () =>
join(
global.core?.appPath() || process.env[process.platform == "win32" ? "USERPROFILE" : "HOME"],
configurationFileName,
);
global.core?.appPath() || process.env[process.platform == 'win32' ? 'USERPROFILE' : 'HOME'],
configurationFileName
)
export const updateAppConfiguration = (configuration: AppConfiguration): Promise<void> => {
const configurationFile = getConfigurationFilePath();
console.debug("updateAppConfiguration, configurationFile: ", configurationFile);
const configurationFile = getConfigurationFilePath()
console.debug('updateAppConfiguration, configurationFile: ', configurationFile)
fs.writeFileSync(configurationFile, JSON.stringify(configuration));
return Promise.resolve();
};
fs.writeFileSync(configurationFile, JSON.stringify(configuration))
return Promise.resolve()
}
/**
* Utility function to get server log path
@ -60,13 +62,13 @@ export const updateAppConfiguration = (configuration: AppConfiguration): Promise
* @returns {string} The log path.
*/
export const getServerLogPath = (): string => {
const appConfigurations = getAppConfigurations();
const logFolderPath = join(appConfigurations.data_folder, "logs");
const appConfigurations = getAppConfigurations()
const logFolderPath = join(appConfigurations.data_folder, 'logs')
if (!fs.existsSync(logFolderPath)) {
fs.mkdirSync(logFolderPath, { recursive: true });
fs.mkdirSync(logFolderPath, { recursive: true })
}
return join(logFolderPath, "server.log");
};
return join(logFolderPath, 'server.log')
}
/**
* Utility function to get app log path
@ -74,13 +76,13 @@ export const getServerLogPath = (): string => {
* @returns {string} The log path.
*/
export const getAppLogPath = (): string => {
const appConfigurations = getAppConfigurations();
const logFolderPath = join(appConfigurations.data_folder, "logs");
const appConfigurations = getAppConfigurations()
const logFolderPath = join(appConfigurations.data_folder, 'logs')
if (!fs.existsSync(logFolderPath)) {
fs.mkdirSync(logFolderPath, { recursive: true });
fs.mkdirSync(logFolderPath, { recursive: true })
}
return join(logFolderPath, "app.log");
};
return join(logFolderPath, 'app.log')
}
/**
* Utility function to get data folder path
@ -88,9 +90,9 @@ export const getAppLogPath = (): string => {
* @returns {string} The data folder path.
*/
export const getJanDataFolderPath = (): string => {
const appConfigurations = getAppConfigurations();
return appConfigurations.data_folder;
};
const appConfigurations = getAppConfigurations()
return appConfigurations.data_folder
}
/**
* Utility function to get extension path
@ -98,6 +100,70 @@ export const getJanDataFolderPath = (): string => {
* @returns {string} The extensions path.
*/
export const getJanExtensionsPath = (): string => {
const appConfigurations = getAppConfigurations();
return join(appConfigurations.data_folder, "extensions");
};
const appConfigurations = getAppConfigurations()
return join(appConfigurations.data_folder, 'extensions')
}
/**
* Utility function to physical cpu count
*
* @returns {number} The physical cpu count.
*/
export const physicalCpuCount = async (): Promise<number> => {
const platform = os.platform()
if (platform === 'linux') {
const output = await exec('lscpu -p | egrep -v "^#" | sort -u -t, -k 2,4 | wc -l')
return parseInt(output.trim(), 10)
} else if (platform === 'darwin') {
const output = await exec('sysctl -n hw.physicalcpu_max')
return parseInt(output.trim(), 10)
} else if (platform === 'win32') {
const output = await exec('WMIC CPU Get NumberOfCores')
return output
.split(os.EOL)
.map((line: string) => parseInt(line))
.filter((value: number) => !isNaN(value))
.reduce((sum: number, number: number) => sum + number, 1)
} else {
const cores = os.cpus().filter((cpu: any, index: number) => {
const hasHyperthreading = cpu.model.includes('Intel')
const isOdd = index % 2 === 1
return !hasHyperthreading || isOdd
})
return cores.length
}
}
const exec = async (command: string): Promise<string> => {
return new Promise((resolve, reject) => {
childProcess.exec(command, { encoding: 'utf8' }, (error, stdout) => {
if (error) {
reject(error)
} else {
resolve(stdout)
}
})
})
}
export const getSystemResourceInfo = async (): Promise<SystemResourceInfo> => {
const cpu = await physicalCpuCount()
const message = `[NITRO]::CPU informations - ${cpu}`
log(message)
logServer(message)
return {
numCpuPhysicalCore: cpu,
memAvailable: 0, // TODO: this should not be 0
}
}
export const getEngineConfiguration = async (engineId: string) => {
if (engineId !== 'openai') {
return undefined
}
const directoryPath = join(getJanDataFolderPath(), 'engines')
const filePath = join(directoryPath, `${engineId}.json`)
const data = fs.readFileSync(filePath, 'utf-8')
return JSON.parse(data)
}

View File

@ -0,0 +1,6 @@
/**
* App configuration event name
*/
export enum AppConfigurationEventName {
OnConfigurationUpdate = 'OnConfigurationUpdate',
}

View File

@ -1 +1,2 @@
export * from './appConfigEntity'
export * from './appConfigEvent'

View File

@ -6,3 +6,4 @@ export * from './inference'
export * from './monitoring'
export * from './file'
export * from './config'
export * from './miscellaneous'

View File

@ -0,0 +1,2 @@
export * from './systemResourceInfo'
export * from './promptTemplate'

View File

@ -0,0 +1,6 @@
export type PromptTemplate = {
system_prompt?: string
ai_prompt?: string
user_prompt?: string
error?: string
}

View File

@ -0,0 +1,4 @@
export type SystemResourceInfo = {
numCpuPhysicalCore: number
memAvailable: number
}

View File

@ -123,6 +123,7 @@ export type ModelSettingParams = {
user_prompt?: string
llama_model_path?: string
mmproj?: string
cont_batching?: boolean
}
/**

View File

@ -43,5 +43,4 @@ export type ThreadState = {
waitingForResponse: boolean
error?: Error
lastMessage?: string
isFinishInit?: boolean
}

View File

@ -1,5 +1,5 @@
GTM_ID=xxxx
POSTHOG_PROJECT_API_KEY=xxxx
POSTHOG_APP_URL=xxxx
UMAMI_PROJECT_API_KEY=xxxx
UMAMI_APP_URL=xxxx
ALGOLIA_API_KEY=xxxx
ALGOLIA_APP_ID=xxxx

View File

@ -1,5 +1,5 @@
import { app, ipcMain, dialog, shell } from 'electron'
import { join, basename } from 'path'
import { join, basename, relative as getRelative, isAbsolute } from 'path'
import { WindowManager } from './../managers/window'
import { getResourcePath } from './../utils/path'
import { AppRoute, AppConfiguration } from '@janhq/core'
@ -50,6 +50,27 @@ export function handleAppIPCs() {
join(...paths)
)
/**
* Checks if the given path is a subdirectory of the given directory.
*
* @param _event - The IPC event object.
* @param from - The path to check.
* @param to - The directory to check against.
*
* @returns {Promise<boolean>} - A promise that resolves with the result.
*/
ipcMain.handle(
AppRoute.isSubdirectory,
async (_event, from: string, to: string) => {
const relative = getRelative(from, to)
const isSubdir =
relative && !relative.startsWith('..') && !isAbsolute(relative)
if (isSubdir === '') return false
else return isSubdir
}
)
/**
* Retrieve basename from given path, respect to the current OS.
*/

View File

@ -1,4 +1,4 @@
import { ipcMain } from 'electron'
import { ipcMain, app } from 'electron'
// @ts-ignore
import reflect from '@alumna/reflect'
@ -38,6 +38,10 @@ export function handleFileMangerIPCs() {
getResourcePath()
)
ipcMain.handle(FileManagerRoute.getUserHomePath, async (_event) =>
app.getPath('home')
)
// handle fs is directory here
ipcMain.handle(
FileManagerRoute.fileStat,

View File

@ -28,6 +28,22 @@ import { setupCore } from './utils/setup'
app
.whenReady()
.then(async () => {
if (!app.isPackaged) {
// Which means you're running from source code
const { default: installExtension, REACT_DEVELOPER_TOOLS } = await import(
'electron-devtools-installer'
) // Don't use import on top level, since the installer package is dev-only
try {
const name = installExtension(REACT_DEVELOPER_TOOLS)
console.log(`Added Extension: ${name}`)
} catch (err) {
console.log('An error occurred while installing devtools:')
console.error(err)
// Only log the error and don't throw it because it's not critical
}
}
})
.then(setupCore)
.then(createUserSpace)
.then(migrateExtensions)

View File

@ -86,7 +86,7 @@
"request": "^2.88.2",
"request-progress": "^3.0.0",
"rimraf": "^5.0.5",
"typescript": "^5.3.3",
"typescript": "^5.2.2",
"ulid": "^2.3.0",
"use-debounce": "^9.0.4"
},
@ -99,6 +99,7 @@
"@typescript-eslint/parser": "^6.7.3",
"electron": "28.0.0",
"electron-builder": "^24.9.1",
"electron-devtools-installer": "^3.2.0",
"electron-playwright-helpers": "^1.6.0",
"eslint-plugin-react": "^7.33.2",
"run-script-os": "^1.1.6"

View File

@ -1,9 +1,9 @@
import { PlaywrightTestConfig } from "@playwright/test";
import { PlaywrightTestConfig } from '@playwright/test'
const config: PlaywrightTestConfig = {
testDir: "./tests",
testDir: './tests',
retries: 0,
timeout: 120000,
};
globalTimeout: 300000,
}
export default config;
export default config

View File

@ -9,6 +9,7 @@ import {
let electronApp: ElectronApplication
let page: Page
const TIMEOUT: number = parseInt(process.env.TEST_TIMEOUT || '300000')
test.beforeAll(async () => {
process.env.CI = 'e2e'
@ -26,7 +27,9 @@ test.beforeAll(async () => {
})
await stubDialog(electronApp, 'showMessageBox', { response: 1 })
page = await electronApp.firstWindow()
page = await electronApp.firstWindow({
timeout: TIMEOUT,
})
})
test.afterAll(async () => {
@ -34,8 +37,12 @@ test.afterAll(async () => {
await page.close()
})
test('explores models', async () => {
await page.getByTestId('Hub').first().click()
await page.getByTestId('testid-explore-models').isVisible()
// More test cases here...
test('explores hub', async () => {
test.setTimeout(TIMEOUT)
await page.getByTestId('Hub').first().click({
timeout: TIMEOUT,
})
await page.getByTestId('hub-container-test-id').isVisible({
timeout: TIMEOUT,
})
})

View File

@ -1,55 +0,0 @@
import { _electron as electron } from 'playwright'
import { ElectronApplication, Page, expect, test } from '@playwright/test'
import {
findLatestBuild,
parseElectronApp,
stubDialog,
} from 'electron-playwright-helpers'
let electronApp: ElectronApplication
let page: Page
test.beforeAll(async () => {
process.env.CI = 'e2e'
const latestBuild = findLatestBuild('dist')
expect(latestBuild).toBeTruthy()
// parse the packaged Electron app and find paths and other info
const appInfo = parseElectronApp(latestBuild)
expect(appInfo).toBeTruthy()
expect(appInfo.asar).toBe(true)
expect(appInfo.executable).toBeTruthy()
expect(appInfo.main).toBeTruthy()
expect(appInfo.name).toBe('jan')
expect(appInfo.packageJson).toBeTruthy()
expect(appInfo.packageJson.name).toBe('jan')
expect(appInfo.platform).toBeTruthy()
expect(appInfo.platform).toBe(process.platform)
expect(appInfo.resourcesDir).toBeTruthy()
electronApp = await electron.launch({
args: [appInfo.main], // main file from package.json
executablePath: appInfo.executable, // path to the Electron executable
})
await stubDialog(electronApp, 'showMessageBox', { response: 1 })
page = await electronApp.firstWindow()
})
test.afterAll(async () => {
await electronApp.close()
await page.close()
})
test('renders the home page', async () => {
expect(page).toBeDefined()
// Welcome text is available
const welcomeText = await page
.getByTestId('testid-welcome-title')
.first()
.isVisible()
expect(welcomeText).toBe(false)
})

View File

@ -9,6 +9,7 @@ import {
let electronApp: ElectronApplication
let page: Page
const TIMEOUT: number = parseInt(process.env.TEST_TIMEOUT || '300000')
test.beforeAll(async () => {
process.env.CI = 'e2e'
@ -26,7 +27,9 @@ test.beforeAll(async () => {
})
await stubDialog(electronApp, 'showMessageBox', { response: 1 })
page = await electronApp.firstWindow()
page = await electronApp.firstWindow({
timeout: TIMEOUT,
})
})
test.afterAll(async () => {
@ -35,20 +38,24 @@ test.afterAll(async () => {
})
test('renders left navigation panel', async () => {
// Chat section should be there
const chatSection = await page.getByTestId('Chat').first().isVisible()
expect(chatSection).toBe(false)
// Home actions
/* Disable unstable feature tests
** const botBtn = await page.getByTestId("Bot").first().isEnabled();
** Enable back when it is whitelisted
*/
test.setTimeout(TIMEOUT)
const systemMonitorBtn = await page
.getByTestId('System Monitor')
.first()
.isEnabled()
const settingsBtn = await page.getByTestId('Settings').first().isEnabled()
.isEnabled({
timeout: TIMEOUT,
})
const settingsBtn = await page
.getByTestId('Thread')
.first()
.isEnabled({ timeout: TIMEOUT })
expect([systemMonitorBtn, settingsBtn].filter((e) => !e).length).toBe(0)
// Chat section should be there
await page.getByTestId('Local API Server').first().click({
timeout: TIMEOUT,
})
const localServer = await page.getByTestId('local-server-testid').first()
await expect(localServer).toBeVisible({
timeout: TIMEOUT,
})
})

View File

@ -9,6 +9,7 @@ import {
let electronApp: ElectronApplication
let page: Page
const TIMEOUT: number = parseInt(process.env.TEST_TIMEOUT || '300000')
test.beforeAll(async () => {
process.env.CI = 'e2e'
@ -26,7 +27,9 @@ test.beforeAll(async () => {
})
await stubDialog(electronApp, 'showMessageBox', { response: 1 })
page = await electronApp.firstWindow()
page = await electronApp.firstWindow({
timeout: TIMEOUT,
})
})
test.afterAll(async () => {
@ -35,6 +38,8 @@ test.afterAll(async () => {
})
test('shows settings', async () => {
await page.getByTestId('Settings').first().click()
await page.getByTestId('testid-setting-description').isVisible()
test.setTimeout(TIMEOUT)
await page.getByTestId('Settings').first().click({ timeout: TIMEOUT })
const settingDescription = page.getByTestId('testid-setting-description')
await expect(settingDescription).toBeVisible({ timeout: TIMEOUT })
})

View File

@ -1,41 +0,0 @@
import { _electron as electron } from 'playwright'
import { ElectronApplication, Page, expect, test } from '@playwright/test'
import {
findLatestBuild,
parseElectronApp,
stubDialog,
} from 'electron-playwright-helpers'
let electronApp: ElectronApplication
let page: Page
test.beforeAll(async () => {
process.env.CI = 'e2e'
const latestBuild = findLatestBuild('dist')
expect(latestBuild).toBeTruthy()
// parse the packaged Electron app and find paths and other info
const appInfo = parseElectronApp(latestBuild)
expect(appInfo).toBeTruthy()
electronApp = await electron.launch({
args: [appInfo.main], // main file from package.json
executablePath: appInfo.executable, // path to the Electron executable
})
await stubDialog(electronApp, 'showMessageBox', { response: 1 })
page = await electronApp.firstWindow()
})
test.afterAll(async () => {
await electronApp.close()
await page.close()
})
test('shows system monitor', async () => {
await page.getByTestId('System Monitor').first().click()
await page.getByTestId('testid-system-monitor').isVisible()
// More test cases here...
})

View File

@ -8,7 +8,10 @@
"license": "AGPL-3.0",
"scripts": {
"build": "tsc --module commonjs && rollup -c rollup.config.ts",
"build:publish": "rimraf *.tgz --glob && npm run build && npm pack && cpx *.tgz ../../electron/pre-install"
"build:publish:linux": "rimraf *.tgz --glob && npm run build && npm pack && cpx *.tgz ../../electron/pre-install",
"build:publish:darwin": "rimraf *.tgz --glob && npm run build && ../../.github/scripts/auto-sign.sh && npm pack && cpx *.tgz ../../electron/pre-install",
"build:publish:win32": "rimraf *.tgz --glob && npm run build && npm pack && cpx *.tgz ../../electron/pre-install",
"build:publish": "run-script-os"
},
"devDependencies": {
"@rollup/plugin-commonjs": "^25.0.7",
@ -22,7 +25,8 @@
"rollup-plugin-define": "^1.0.1",
"rollup-plugin-sourcemaps": "^0.6.3",
"rollup-plugin-typescript2": "^0.36.0",
"typescript": "^5.3.3"
"typescript": "^5.3.3",
"run-script-os": "^1.1.6"
},
"dependencies": {
"@janhq/core": "file:../../core",

View File

@ -12,12 +12,11 @@ export class Retrieval {
public chunkOverlap?: number = 0;
private retriever: any;
private embeddingModel: any = undefined;
private embeddingModel?: OpenAIEmbeddings = undefined;
private textSplitter?: RecursiveCharacterTextSplitter;
constructor(chunkSize: number = 4000, chunkOverlap: number = 200) {
this.updateTextSplitter(chunkSize, chunkOverlap);
this.embeddingModel = new OpenAIEmbeddings({});
}
public updateTextSplitter(chunkSize: number, chunkOverlap: number): void {
@ -36,7 +35,7 @@ export class Retrieval {
if (engine === "nitro") {
this.embeddingModel = new OpenAIEmbeddings(
{ openAIApiKey: "nitro-embedding" },
{ basePath: "http://127.0.0.1:3928/v1" },
{ basePath: "http://127.0.0.1:3928/v1" }
);
} else {
// Fallback to OpenAI Settings
@ -50,11 +49,12 @@ export class Retrieval {
public ingestAgentKnowledge = async (
filePath: string,
memoryPath: string,
memoryPath: string
): Promise<any> => {
const loader = new PDFLoader(filePath, {
splitPages: true,
});
if (!this.embeddingModel) return Promise.reject();
const doc = await loader.load();
const docs = await this.textSplitter!.splitDocuments(doc);
const vectorStore = await HNSWLib.fromDocuments(docs, this.embeddingModel);
@ -62,6 +62,7 @@ export class Retrieval {
};
public loadRetrievalAgent = async (memoryPath: string): Promise<void> => {
if (!this.embeddingModel) return Promise.reject();
const vectorStore = await HNSWLib.load(memoryPath, this.embeddingModel);
this.retriever = vectorStore.asRetriever(2);
return Promise.resolve();

View File

@ -119,19 +119,20 @@ export default class JSONConversationalExtension extends ConversationalExtension
if (!(await fs.existsSync(threadDirPath)))
await fs.mkdirSync(threadDirPath)
if (message.content[0].type === 'image') {
if (message.content[0]?.type === 'image') {
const filesPath = await joinPath([threadDirPath, 'files'])
if (!(await fs.existsSync(filesPath))) await fs.mkdirSync(filesPath)
const imagePath = await joinPath([filesPath, `${message.id}.png`])
const base64 = message.content[0].text.annotations[0]
await this.storeImage(base64, imagePath)
// if (fs.existsSync(imagePath)) {
// message.content[0].text.annotations[0] = imagePath
// }
if ((await fs.existsSync(imagePath)) && message.content?.length) {
// Use file path instead of blob
message.content[0].text.annotations[0] = `threads/${message.thread_id}/files/${message.id}.png`
}
}
if (message.content[0].type === 'pdf') {
if (message.content[0]?.type === 'pdf') {
const filesPath = await joinPath([threadDirPath, 'files'])
if (!(await fs.existsSync(filesPath))) await fs.mkdirSync(filesPath)
@ -139,7 +140,7 @@ export default class JSONConversationalExtension extends ConversationalExtension
const blob = message.content[0].text.annotations[0]
await this.storeFile(blob, filePath)
if (await fs.existsSync(filePath)) {
if ((await fs.existsSync(filePath)) && message.content?.length) {
// Use file path instead of blob
message.content[0].text.annotations[0] = `threads/${message.thread_id}/files/${message.id}.pdf`
}

View File

@ -1 +1 @@
0.2.14
0.3.5

View File

@ -35,7 +35,7 @@
"rollup-plugin-sourcemaps": "^0.6.3",
"rollup-plugin-typescript2": "^0.36.0",
"run-script-os": "^1.1.6",
"typescript": "^5.3.3"
"typescript": "^5.2.2"
},
"dependencies": {
"@janhq/core": "file:../../core",

View File

@ -27,6 +27,9 @@ export default [
TROUBLESHOOTING_URL: JSON.stringify(
"https://jan.ai/guides/troubleshooting"
),
JAN_SERVER_INFERENCE_URL: JSON.stringify(
"http://localhost:1337/v1/chat/completions"
),
}),
// Allow json resolution
json(),

View File

@ -1,22 +1,7 @@
declare const NODE: string;
declare const INFERENCE_URL: string;
declare const TROUBLESHOOTING_URL: string;
/**
* The parameters for the initModel function.
* @property settings - The settings for the machine learning model.
* @property settings.ctx_len - The context length.
* @property settings.ngl - The number of generated tokens.
* @property settings.cont_batching - Whether to use continuous batching.
* @property settings.embedding - Whether to use embedding.
*/
interface EngineSettings {
ctx_len: number;
ngl: number;
cpu_threads: number;
cont_batching: boolean;
embedding: boolean;
}
declare const JAN_SERVER_INFERENCE_URL: string;
/**
* The response from the initModel function.
@ -26,8 +11,3 @@ interface ModelOperationResponse {
error?: any;
modelFile?: string;
}
interface ResourcesInfo {
numCpuPhysicalCore: number;
memAvailable: number;
}

View File

@ -6,6 +6,7 @@ import { Observable } from "rxjs";
* @returns An Observable that emits the generated response as a string.
*/
export function requestInference(
inferenceUrl: string,
recentMessages: any[],
model: Model,
controller?: AbortController
@ -17,7 +18,7 @@ export function requestInference(
stream: true,
...model.parameters,
});
fetch(INFERENCE_URL, {
fetch(inferenceUrl, {
method: "POST",
headers: {
"Content-Type": "application/json",

View File

@ -24,6 +24,7 @@ import {
MessageEvent,
ModelEvent,
InferenceEvent,
ModelSettingParams,
} from "@janhq/core";
import { requestInference } from "./helpers/sse";
import { ulid } from "ulid";
@ -45,7 +46,7 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
private _currentModel: Model | undefined;
private _engineSettings: EngineSettings = {
private _engineSettings: ModelSettingParams = {
ctx_len: 2048,
ngl: 100,
cpu_threads: 1,
@ -67,35 +68,48 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
*/
private nitroProcessInfo: any = undefined;
private inferenceUrl = "";
/**
* Subscribes to events emitted by the @janhq/core package.
*/
async onLoad() {
if (!(await fs.existsSync(JanInferenceNitroExtension._homeDir))) {
await fs
.mkdirSync(JanInferenceNitroExtension._homeDir)
.catch((err: Error) => console.debug(err));
try {
await fs.mkdirSync(JanInferenceNitroExtension._homeDir);
} catch (e) {
console.debug(e);
}
}
// init inference url
// @ts-ignore
const electronApi = window?.electronAPI;
this.inferenceUrl = INFERENCE_URL;
if (!electronApi) {
this.inferenceUrl = JAN_SERVER_INFERENCE_URL;
}
console.debug("Inference url: ", this.inferenceUrl);
if (!(await fs.existsSync(JanInferenceNitroExtension._settingsDir)))
await fs.mkdirSync(JanInferenceNitroExtension._settingsDir);
this.writeDefaultEngineSettings();
// Events subscription
events.on(MessageEvent.OnMessageSent, (data: MessageRequest) =>
this.onMessageRequest(data),
this.onMessageRequest(data)
);
events.on(ModelEvent.OnModelInit, (model: Model) =>
this.onModelInit(model),
this.onModelInit(model)
);
events.on(ModelEvent.OnModelStop, (model: Model) =>
this.onModelStop(model),
this.onModelStop(model)
);
events.on(InferenceEvent.OnInferenceStopped, () =>
this.onInferenceStopped(),
this.onInferenceStopped()
);
// Attempt to fetch nvidia info
@ -120,7 +134,7 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
} else {
await fs.writeFileSync(
engineFile,
JSON.stringify(this._engineSettings, null, 2),
JSON.stringify(this._engineSettings, null, 2)
);
}
} catch (err) {
@ -133,6 +147,7 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
const modelFullPath = await joinPath(["models", model.id]);
this._currentModel = model;
const nitroInitResult = await executeOnMain(NODE, "runModel", {
modelFullPath,
model,
@ -143,12 +158,11 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
return;
}
this._currentModel = model;
events.emit(ModelEvent.OnModelReady, model);
this.getNitroProcesHealthIntervalId = setInterval(
() => this.periodicallyGetNitroHealth(),
JanInferenceNitroExtension._intervalHealthCheck,
JanInferenceNitroExtension._intervalHealthCheck
);
}
@ -205,7 +219,11 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
return new Promise(async (resolve, reject) => {
if (!this._currentModel) return Promise.reject("No model loaded");
requestInference(data.messages ?? [], this._currentModel).subscribe({
requestInference(
this.inferenceUrl,
data.messages ?? [],
this._currentModel
).subscribe({
next: (_content: any) => {},
complete: async () => {
resolve(message);
@ -225,6 +243,9 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
*/
private async onMessageRequest(data: MessageRequest) {
if (data.model?.engine !== InferenceEngine.nitro || !this._currentModel) {
console.log(
`Model is not nitro or no model loaded ${data.model?.engine} ${this._currentModel}`
);
return;
}
@ -250,7 +271,12 @@ export default class JanInferenceNitroExtension extends InferenceExtension {
...(this._currentModel || {}),
...(data.model || {}),
};
requestInference(data.messages ?? [], model, this.controller).subscribe({
requestInference(
this.inferenceUrl,
data.messages ?? [],
model,
this.controller
).subscribe({
next: (content: any) => {
const messageContent: ThreadContent = {
type: ContentType.Text,

View File

@ -3,11 +3,19 @@ import path from "path";
import { ChildProcessWithoutNullStreams, spawn } from "child_process";
import tcpPortUsed from "tcp-port-used";
import fetchRT from "fetch-retry";
import { log, getJanDataFolderPath } from "@janhq/core/node";
import {
log,
getJanDataFolderPath,
getSystemResourceInfo,
} from "@janhq/core/node";
import { getNitroProcessInfo, updateNvidiaInfo } from "./nvidia";
import { Model, InferenceEngine, ModelSettingParams } from "@janhq/core";
import {
Model,
InferenceEngine,
ModelSettingParams,
PromptTemplate,
} from "@janhq/core";
import { executableNitroFile } from "./execute";
import { physicalCpuCount } from "./utils";
// Polyfill fetch with retry
const fetchRetry = fetchRT(fetch);
@ -19,25 +27,6 @@ interface ModelInitOptions {
modelFullPath: string;
model: Model;
}
/**
* The response object of Prompt Template parsing.
*/
interface PromptTemplate {
system_prompt?: string;
ai_prompt?: string;
user_prompt?: string;
error?: string;
}
/**
* Model setting args for Nitro model load.
*/
interface ModelSettingArgs extends ModelSettingParams {
llama_model_path: string;
cpu_threads: number;
}
// The PORT to use for the Nitro subprocess
const PORT = 3928;
// The HOST address to use for the Nitro subprocess
@ -60,7 +49,7 @@ let subprocess: ChildProcessWithoutNullStreams | undefined = undefined;
// The current model file url
let currentModelFile: string = "";
// The current model settings
let currentSettings: ModelSettingArgs | undefined = undefined;
let currentSettings: ModelSettingParams | undefined = undefined;
/**
* Stops a Nitro subprocess.
@ -106,7 +95,7 @@ async function runModel(
if (wrapper.model.engine !== InferenceEngine.nitro) {
return Promise.reject("Not a nitro model");
} else {
const nitroResourceProbe = await getResourcesInfo();
const nitroResourceProbe = await getSystemResourceInfo();
// Convert settings.prompt_template to system_prompt, user_prompt, ai_prompt
if (wrapper.model.settings.prompt_template) {
const promptTemplate = wrapper.model.settings.prompt_template;
@ -220,6 +209,9 @@ function promptTemplateConverter(promptTemplate: string): PromptTemplate {
* @returns A Promise that resolves when the model is loaded successfully, or rejects with an error message if the model is not found or fails to load.
*/
function loadLLMModel(settings: any): Promise<Response> {
if (!settings?.ngl) {
settings.ngl = 100;
}
log(`[NITRO]::Debug: Loading model with params ${JSON.stringify(settings)}`);
return fetchRetry(NITRO_HTTP_LOAD_MODEL_URL, {
method: "POST",
@ -240,7 +232,7 @@ function loadLLMModel(settings: any): Promise<Response> {
})
.catch((err) => {
log(`[NITRO]::Error: Load model failed with error ${err}`);
return Promise.reject();
return Promise.reject(err);
});
}
@ -262,9 +254,9 @@ async function validateModelStatus(): Promise<void> {
retryDelay: 500,
}).then(async (res: Response) => {
log(
`[NITRO]::Debug: Validate model state success with response ${JSON.stringify(
res,
)}`,
`[NITRO]::Debug: Validate model state with response ${JSON.stringify(
res.status
)}`
);
// If the response is OK, check model_loaded status.
if (res.ok) {
@ -272,9 +264,19 @@ async function validateModelStatus(): Promise<void> {
// If the model is loaded, return an empty object.
// Otherwise, return an object with an error message.
if (body.model_loaded) {
log(
`[NITRO]::Debug: Validate model state success with response ${JSON.stringify(
body
)}`
);
return Promise.resolve();
}
}
log(
`[NITRO]::Debug: Validate model state failed with response ${JSON.stringify(
res.statusText
)}`
);
return Promise.reject("Validate model status failed");
});
}
@ -351,22 +353,6 @@ function spawnNitroProcess(): Promise<any> {
});
}
/**
* Get the system resources information
* TODO: Move to Core so that it can be reused
*/
function getResourcesInfo(): Promise<ResourcesInfo> {
return new Promise(async (resolve) => {
const cpu = await physicalCpuCount();
log(`[NITRO]::CPU informations - ${cpu}`);
const response: ResourcesInfo = {
numCpuPhysicalCore: cpu,
memAvailable: 0,
};
resolve(response);
});
}
/**
* Every module should have a dispose function
* This will be called when the extension is unloaded and should clean up any resources

View File

@ -1,56 +0,0 @@
import os from "os";
import childProcess from "child_process";
function exec(command: string): Promise<string> {
return new Promise((resolve, reject) => {
childProcess.exec(command, { encoding: "utf8" }, (error, stdout) => {
if (error) {
reject(error);
} else {
resolve(stdout);
}
});
});
}
let amount: number;
const platform = os.platform();
export async function physicalCpuCount(): Promise<number> {
return new Promise((resolve, reject) => {
if (platform === "linux") {
exec('lscpu -p | egrep -v "^#" | sort -u -t, -k 2,4 | wc -l')
.then((output) => {
amount = parseInt(output.trim(), 10);
resolve(amount);
})
.catch(reject);
} else if (platform === "darwin") {
exec("sysctl -n hw.physicalcpu_max")
.then((output) => {
amount = parseInt(output.trim(), 10);
resolve(amount);
})
.catch(reject);
} else if (platform === "win32") {
exec("WMIC CPU Get NumberOfCores")
.then((output) => {
amount = output
.split(os.EOL)
.map((line: string) => parseInt(line))
.filter((value: number) => !isNaN(value))
.reduce((sum: number, number: number) => sum + number, 1);
resolve(amount);
})
.catch(reject);
} else {
const cores = os.cpus().filter((cpu: any, index: number) => {
const hasHyperthreading = cpu.model.includes("Intel");
const isOdd = index % 2 === 1;
return !hasHyperthreading || isOdd;
});
amount = cores.length;
resolve(amount);
}
});
}

View File

@ -20,6 +20,8 @@ import {
MessageEvent,
ModelEvent,
InferenceEvent,
AppConfigurationEventName,
joinPath,
} from "@janhq/core";
import { requestInference } from "./helpers/sse";
import { ulid } from "ulid";
@ -31,7 +33,7 @@ import { join } from "path";
* It also subscribes to events emitted by the @janhq/core package and handles new message requests.
*/
export default class JanInferenceOpenAIExtension extends BaseExtension {
private static readonly _homeDir = "file://engines";
private static readonly _engineDir = "file://engines";
private static readonly _engineMetadataFileName = "openai.json";
private static _currentModel: OpenAIModel;
@ -48,9 +50,9 @@ export default class JanInferenceOpenAIExtension extends BaseExtension {
* Subscribes to events emitted by the @janhq/core package.
*/
async onLoad() {
if (!(await fs.existsSync(JanInferenceOpenAIExtension._homeDir))) {
if (!(await fs.existsSync(JanInferenceOpenAIExtension._engineDir))) {
await fs
.mkdirSync(JanInferenceOpenAIExtension._homeDir)
.mkdirSync(JanInferenceOpenAIExtension._engineDir)
.catch((err) => console.debug(err));
}
@ -71,6 +73,20 @@ export default class JanInferenceOpenAIExtension extends BaseExtension {
events.on(InferenceEvent.OnInferenceStopped, () => {
JanInferenceOpenAIExtension.handleInferenceStopped(this);
});
const settingsFilePath = await joinPath([
JanInferenceOpenAIExtension._engineDir,
JanInferenceOpenAIExtension._engineMetadataFileName,
]);
events.on(
AppConfigurationEventName.OnConfigurationUpdate,
(settingsKey: string) => {
// Update settings on changes
if (settingsKey === settingsFilePath)
JanInferenceOpenAIExtension.writeDefaultEngineSettings();
},
);
}
/**
@ -81,7 +97,7 @@ export default class JanInferenceOpenAIExtension extends BaseExtension {
static async writeDefaultEngineSettings() {
try {
const engineFile = join(
JanInferenceOpenAIExtension._homeDir,
JanInferenceOpenAIExtension._engineDir,
JanInferenceOpenAIExtension._engineMetadataFileName,
);
if (await fs.existsSync(engineFile)) {
@ -182,7 +198,7 @@ export default class JanInferenceOpenAIExtension extends BaseExtension {
},
error: async (err) => {
if (instance.isCancelled || message.content.length > 0) {
message.status = MessageStatus.Error;
message.status = MessageStatus.Stopped;
events.emit(MessageEvent.OnMessageUpdate, message);
return;
}
@ -194,7 +210,7 @@ export default class JanInferenceOpenAIExtension extends BaseExtension {
},
};
message.content = [messageContent];
message.status = MessageStatus.Ready;
message.status = MessageStatus.Error;
events.emit(MessageEvent.OnMessageUpdate, message);
},
});

View File

@ -13,7 +13,7 @@
"format": "gguf",
"settings": {
"ctx_len": 4096,
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant\n",
"prompt_template": "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant",
"llama_model_path": "dolphin-2_6-phi-2.Q8_0.gguf"
},
"parameters": {
@ -29,4 +29,4 @@
"size": 2960000000
},
"engine": "nitro"
}
}

View File

@ -26,6 +26,8 @@
"dotenv": "^16.3.1",
"fastify": "^4.24.3",
"request": "^2.88.2",
"fetch-retry": "^5.0.6",
"tcp-port-used": "^1.0.2",
"request-progress": "^3.0.0"
},
"devDependencies": {
@ -35,6 +37,7 @@
"@typescript-eslint/parser": "^6.7.3",
"eslint-plugin-react": "^7.33.2",
"run-script-os": "^1.1.6",
"@types/tcp-port-used": "^1.0.4",
"typescript": "^5.2.2"
}
}

View File

@ -18,6 +18,7 @@
},
"dependencies": {
"@radix-ui/react-avatar": "^1.0.4",
"@radix-ui/react-checkbox": "^1.0.4",
"@radix-ui/react-context": "^1.0.1",
"@radix-ui/react-dialog": "^1.0.5",
"@radix-ui/react-icons": "^1.3.0",

View File

@ -9,7 +9,7 @@
}
&-secondary-blue {
@apply bg-blue-200 text-blue-600 hover:bg-blue-500/50;
@apply bg-blue-200 text-blue-600 hover:bg-blue-300/50 dark:hover:bg-blue-200/80;
}
&-danger {
@ -17,7 +17,7 @@
}
&-secondary-danger {
@apply bg-red-200 text-red-600 hover:bg-red-500/50;
@apply bg-red-200 text-red-600 hover:bg-red-300/50 dark:hover:bg-red-200/80;
}
&-outline {
@ -67,14 +67,18 @@
[type='submit'] {
&.btn-primary {
@apply bg-primary hover:bg-primary/90;
@apply disabled:pointer-events-none disabled:bg-zinc-100 disabled:text-zinc-400;
}
&.btn-secondary {
@apply bg-secondary hover:bg-secondary/80;
@apply disabled:pointer-events-none disabled:bg-zinc-100 disabled:text-zinc-400;
}
&.btn-secondary-blue {
@apply bg-blue-200 text-blue-900 hover:bg-blue-200/80;
@apply disabled:pointer-events-none disabled:bg-zinc-100 disabled:text-zinc-400;
}
&.btn-danger {
@apply bg-danger hover:bg-danger/90;
@apply disabled:pointer-events-none disabled:bg-zinc-100 disabled:text-zinc-400;
}
}

View File

@ -0,0 +1,29 @@
'use client'
import * as React from 'react'
import * as CheckboxPrimitive from '@radix-ui/react-checkbox'
import { CheckIcon } from '@radix-ui/react-icons'
import { twMerge } from 'tailwind-merge'
const Checkbox = React.forwardRef<
React.ElementRef<typeof CheckboxPrimitive.Root>,
React.ComponentPropsWithoutRef<typeof CheckboxPrimitive.Root>
>(({ className, ...props }, ref) => (
<CheckboxPrimitive.Root
ref={ref}
className={twMerge('checkbox', className)}
{...props}
>
<CheckboxPrimitive.Indicator
className={twMerge(
'flex flex-shrink-0 items-center justify-center text-current'
)}
>
<CheckIcon className="checkbox--icon" />
</CheckboxPrimitive.Indicator>
</CheckboxPrimitive.Root>
))
Checkbox.displayName = CheckboxPrimitive.Root.displayName
export { Checkbox }

View File

@ -0,0 +1,7 @@
.checkbox {
@apply border-border data-[state=checked]:bg-primary h-5 w-5 flex-shrink-0 rounded-md border data-[state=checked]:text-white;
&--icon {
@apply h-4 w-4;
}
}

View File

@ -12,3 +12,4 @@ export * from './command'
export * from './textarea'
export * from './select'
export * from './slider'
export * from './checkbox'

View File

@ -1,6 +1,6 @@
.input {
@apply border-border placeholder:text-muted-foreground flex h-9 w-full rounded-lg border bg-transparent px-3 py-1 transition-colors;
@apply disabled:cursor-not-allowed disabled:bg-zinc-100;
@apply disabled:cursor-not-allowed disabled:bg-zinc-100 disabled:dark:bg-zinc-800 disabled:dark:text-zinc-600;
@apply focus-within:outline-none focus-visible:outline-0 focus-visible:ring-2 focus-visible:ring-blue-500 focus-visible:ring-offset-1;
@apply file:border-0 file:bg-transparent file:font-medium;
}

View File

@ -16,6 +16,7 @@
@import './textarea/styles.scss';
@import './select/styles.scss';
@import './slider/styles.scss';
@import './checkbox/styles.scss';
.animate-spin {
animation: spin 1s linear infinite;

View File

@ -1,5 +1,6 @@
.select {
@apply placeholder:text-muted-foreground border-border flex h-9 w-full items-center justify-between whitespace-nowrap rounded-md border bg-transparent px-3 py-2 text-sm shadow-sm disabled:cursor-not-allowed disabled:opacity-50 [&>span]:line-clamp-1;
@apply placeholder:text-muted-foreground border-border flex h-9 w-full items-center justify-between whitespace-nowrap rounded-md border bg-transparent px-3 py-2 text-sm shadow-sm disabled:cursor-not-allowed [&>span]:line-clamp-1;
@apply disabled:cursor-not-allowed disabled:bg-zinc-100 disabled:dark:bg-zinc-800 disabled:dark:text-zinc-600;
@apply focus-within:outline-none focus-visible:outline-0 focus-visible:ring-2 focus-visible:ring-blue-500 focus-visible:ring-offset-1;
&-caret {

View File

@ -13,6 +13,8 @@ import { useClickOutside } from '@/hooks/useClickOutside'
import { usePath } from '@/hooks/usePath'
import { openFileTitle } from '@/utils/titleUtils'
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
interface Props {
@ -38,13 +40,6 @@ export default function CardSidebar({
useClickOutside(() => setMore(false), null, [menu, toggle])
let openFolderTitle: string = 'Open Containing Folder'
if (isMac) {
openFolderTitle = 'Show in Finder'
} else if (isWindows) {
openFolderTitle = 'Show in File Explorer'
}
return (
<div
className={twMerge(
@ -118,7 +113,7 @@ export default function CardSidebar({
{title === 'Model' ? (
<div className="flex flex-col">
<span className="font-medium text-black dark:text-muted-foreground">
{openFolderTitle}
{openFileTitle()}
</span>
<span className="mt-1 text-muted-foreground">
Opens thread.json. Changes affect this thread only.
@ -126,7 +121,7 @@ export default function CardSidebar({
</div>
) : (
<span className="text-bold text-black dark:text-muted-foreground">
Show in Finder
{openFileTitle()}
</span>
)}
</>

View File

@ -26,6 +26,8 @@ import { useMainViewState } from '@/hooks/useMainViewState'
import useRecommendedModel from '@/hooks/useRecommendedModel'
import useUpdateModelParameters from '@/hooks/useUpdateModelParameters'
import { toGibibytes } from '@/utils/converter'
import ModelLabel from '../ModelLabel'
@ -34,68 +36,40 @@ import OpenAiKeyInput from '../OpenAiKeyInput'
import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom'
import {
ModelParams,
activeThreadAtom,
getActiveThreadIdAtom,
setThreadModelParamsAtom,
threadStatesAtom,
} from '@/helpers/atoms/Thread.atom'
export const selectedModelAtom = atom<Model | undefined>(undefined)
export default function DropdownListSidebar() {
const activeThreadId = useAtomValue(getActiveThreadIdAtom)
// TODO: Move all of the unscoped logics outside of the component
const DropdownListSidebar = ({
strictedThread = true,
}: {
strictedThread?: boolean
}) => {
const activeThread = useAtomValue(activeThreadAtom)
const threadStates = useAtomValue(threadStatesAtom)
const [selectedModel, setSelectedModel] = useAtom(selectedModelAtom)
const setThreadModelParams = useSetAtom(setThreadModelParamsAtom)
const { activeModel, stateModel } = useActiveModel()
const { stateModel } = useActiveModel()
const [serverEnabled, setServerEnabled] = useAtom(serverEnabledAtom)
const { setMainViewState } = useMainViewState()
const [loader, setLoader] = useState(0)
const { recommendedModel, downloadedModels } = useRecommendedModel()
/**
* Default value for max_tokens and ctx_len
* Its to avoid OOM issue since a model can set a big number for these settings
*/
const defaultValue = (value?: number) => {
if (value && value < 4096) return value
return 4096
}
const { updateModelParameter } = useUpdateModelParameters()
useEffect(() => {
setSelectedModel(selectedModel || activeModel || recommendedModel)
if (!activeThread) return
if (activeThread) {
const finishInit = threadStates[activeThread.id].isFinishInit ?? true
if (finishInit) return
const modelParams: ModelParams = {
...recommendedModel?.parameters,
...recommendedModel?.settings,
/**
* This is to set default value for these settings instead of maximum value
* Should only apply when model.json has these settings
*/
...(recommendedModel?.parameters.max_tokens && {
max_tokens: defaultValue(recommendedModel?.parameters.max_tokens),
}),
...(recommendedModel?.settings.ctx_len && {
ctx_len: defaultValue(recommendedModel?.settings.ctx_len),
}),
}
setThreadModelParams(activeThread.id, modelParams)
let model = downloadedModels.find(
(model) => model.id === activeThread.assistants[0].model.id
)
if (!model) {
model = recommendedModel
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [
recommendedModel,
activeThread,
setSelectedModel,
setThreadModelParams,
threadStates,
])
const [loader, setLoader] = useState(0)
setSelectedModel(model)
}, [recommendedModel, activeThread, downloadedModels, setSelectedModel])
// This is fake loader please fix this when we have realtime percentage when load model
useEffect(() => {
@ -132,25 +106,35 @@ export default function DropdownListSidebar() {
setServerEnabled(false)
}
if (activeThreadId) {
if (activeThread) {
const modelParams = {
...model?.parameters,
...model?.settings,
}
setThreadModelParams(activeThreadId, modelParams)
// Update model paramter to the thread state
setThreadModelParams(activeThread.id, modelParams)
// Update model parameter to the thread file
if (model)
updateModelParameter(activeThread.id, {
params: modelParams,
modelId: model.id,
engine: model.engine,
})
}
},
// eslint-disable-next-line react-hooks/exhaustive-deps
[
downloadedModels,
serverEnabled,
activeThreadId,
activeModel,
activeThread,
setSelectedModel,
setServerEnabled,
setThreadModelParams,
updateModelParameter,
]
)
if (!activeThread) {
if (strictedThread && !activeThread) {
return null
}
@ -236,10 +220,9 @@ export default function DropdownListSidebar() {
</Select>
</div>
<OpenAiKeyInput
selectedModel={selectedModel}
serverEnabled={serverEnabled}
/>
<OpenAiKeyInput />
</>
)
}
export default DropdownListSidebar

View File

@ -27,6 +27,8 @@ import { usePath } from '@/hooks/usePath'
import { showRightSideBarAtom } from '@/screens/Chat/Sidebar'
import { openFileTitle } from '@/utils/titleUtils'
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
const TopBar = () => {
@ -120,13 +122,14 @@ const TopBar = () => {
</span>
</div>
</div>
{activeThread && (
<div
className={twMerge(
'absolute right-0 h-full w-80',
showing && 'border-l border-border'
)}
>
<div
className={twMerge(
'absolute right-0 h-full w-80',
showing && 'border-l border-border'
)}
>
{((activeThread && mainViewState === MainViewState.Thread) ||
mainViewState === MainViewState.LocalServer) && (
<div className="flex h-full w-52 items-center justify-between px-4">
{showing && (
<div className="relative flex h-full items-center">
@ -161,7 +164,7 @@ const TopBar = () => {
className="text-muted-foreground"
/>
<span className="font-medium text-black dark:text-muted-foreground">
Show in Finder
{openFileTitle()}
</span>
</div>
<div
@ -206,7 +209,7 @@ const TopBar = () => {
/>
<div className="flex flex-col">
<span className="font-medium text-black dark:text-muted-foreground">
Show in Finder
{openFileTitle()}
</span>
</div>
</div>
@ -227,8 +230,8 @@ const TopBar = () => {
/>
</div>
</div>
</div>
)}
)}
</div>
</div>
)}
<CommandSearch />

View File

@ -12,7 +12,8 @@ import TopBar from '@/containers/Layout/TopBar'
import { MainViewState } from '@/constants/screens'
import { useMainViewState } from '@/hooks/useMainViewState'
import { SUCCESS_SET_NEW_DESTINATION } from '@/hooks/useVaultDirectory'
import { SUCCESS_SET_NEW_DESTINATION } from '@/screens/Settings/Advanced/DataFolder'
const BaseLayout = (props: PropsWithChildren) => {
const { children } = props

View File

@ -1,16 +1,19 @@
import React, { useEffect, useState } from 'react'
import { InferenceEngine, Model } from '@janhq/core'
import { InferenceEngine } from '@janhq/core'
import { Input } from '@janhq/uikit'
import { useAtomValue } from 'jotai'
import { useEngineSettings } from '@/hooks/useEngineSettings'
type Props = {
selectedModel?: Model
serverEnabled: boolean
}
import { selectedModelAtom } from '../DropdownListSidebar'
const OpenAiKeyInput: React.FC<Props> = ({ selectedModel, serverEnabled }) => {
import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom'
const OpenAiKeyInput: React.FC = () => {
const selectedModel = useAtomValue(selectedModelAtom)
const serverEnabled = useAtomValue(serverEnabledAtom)
const [openAISettings, setOpenAISettings] = useState<
{ api_key: string } | undefined
>(undefined)
@ -20,8 +23,7 @@ const OpenAiKeyInput: React.FC<Props> = ({ selectedModel, serverEnabled }) => {
readOpenAISettings().then((settings) => {
setOpenAISettings(settings)
})
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [])
}, [readOpenAISettings])
if (!selectedModel || selectedModel.engine !== InferenceEngine.openai) {
return null

View File

@ -13,20 +13,26 @@ import {
} from '@janhq/core'
import { useAtomValue, useSetAtom } from 'jotai'
import { activeModelAtom, stateModelAtom } from '@/hooks/useActiveModel'
import {
activeModelAtom,
loadModelErrorAtom,
stateModelAtom,
} from '@/hooks/useActiveModel'
import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels'
import { queuedMessageAtom } from '@/hooks/useSendChatMessage'
import { toaster } from '../Toast'
import { extensionManager } from '@/extension'
import {
addNewMessageAtom,
updateMessageAtom,
generateResponseAtom,
} from '@/helpers/atoms/ChatMessage.atom'
import {
updateThreadWaitingForResponseAtom,
threadsAtom,
isGeneratingResponseAtom,
} from '@/helpers/atoms/Thread.atom'
export default function EventHandler({ children }: { children: ReactNode }) {
@ -35,12 +41,14 @@ export default function EventHandler({ children }: { children: ReactNode }) {
const { downloadedModels } = useGetDownloadedModels()
const setActiveModel = useSetAtom(activeModelAtom)
const setStateModel = useSetAtom(stateModelAtom)
const setGenerateResponse = useSetAtom(generateResponseAtom)
const setQueuedMessage = useSetAtom(queuedMessageAtom)
const setLoadModelError = useSetAtom(loadModelErrorAtom)
const updateThreadWaiting = useSetAtom(updateThreadWaitingForResponseAtom)
const threads = useAtomValue(threadsAtom)
const modelsRef = useRef(downloadedModels)
const threadsRef = useRef(threads)
const setIsGeneratingResponse = useSetAtom(isGeneratingResponseAtom)
useEffect(() => {
threadsRef.current = threads
@ -52,7 +60,6 @@ export default function EventHandler({ children }: { children: ReactNode }) {
const onNewMessageResponse = useCallback(
(message: ThreadMessage) => {
setGenerateResponse(false)
addNewMessage(message)
},
[addNewMessage]
@ -64,6 +71,7 @@ export default function EventHandler({ children }: { children: ReactNode }) {
toaster({
title: 'Success!',
description: `Model ${model.id} has been started.`,
type: 'success',
})
setStateModel(() => ({
state: 'stop',
@ -85,18 +93,19 @@ export default function EventHandler({ children }: { children: ReactNode }) {
(res: any) => {
const errorMessage = `${res.error}`
console.error('Failed to load model: ' + errorMessage)
setLoadModelError(errorMessage)
setStateModel(() => ({
state: 'start',
loading: false,
model: res.modelId,
}))
setQueuedMessage(false)
},
[setStateModel]
[setStateModel, setQueuedMessage, setLoadModelError]
)
const onMessageResponseUpdate = useCallback(
(message: ThreadMessage) => {
setGenerateResponse(false)
updateMessage(
message.id,
message.thread_id,
@ -104,11 +113,17 @@ export default function EventHandler({ children }: { children: ReactNode }) {
message.status
)
if (message.status === MessageStatus.Pending) {
if (message.content.length) {
updateThreadWaiting(message.thread_id, false)
setIsGeneratingResponse(false)
}
return
}
// Mark the thread as not waiting for response
updateThreadWaiting(message.thread_id, false)
setIsGeneratingResponse(false)
const thread = threadsRef.current?.find((e) => e.id == message.thread_id)
if (thread) {
const messageContent = message.content[0]?.text.value ?? ''

View File

@ -6,8 +6,6 @@ import { Toaster } from 'react-hot-toast'
import { TooltipProvider } from '@janhq/uikit'
import { PostHogProvider } from 'posthog-js/react'
import GPUDriverPrompt from '@/containers/GPUDriverPromptModal'
import EventListenerWrapper from '@/containers/Providers/EventListener'
import JotaiWrapper from '@/containers/Providers/Jotai'
@ -21,7 +19,7 @@ import {
setupBaseExtensions,
} from '@/services/extensionService'
import { instance } from '@/utils/posthog'
import Umami from '@/utils/umami'
import KeyListener from './KeyListener'
@ -70,25 +68,22 @@ const Providers = (props: PropsWithChildren) => {
}, [setupCore])
return (
<PostHogProvider client={instance}>
<JotaiWrapper>
<ThemeWrapper>
{setupCore && activated && (
<KeyListener>
<FeatureToggleWrapper>
<EventListenerWrapper>
<TooltipProvider delayDuration={0}>
{children}
</TooltipProvider>
{!isMac && <GPUDriverPrompt />}
</EventListenerWrapper>
<Toaster position="top-right" />
</FeatureToggleWrapper>
</KeyListener>
)}
</ThemeWrapper>
</JotaiWrapper>
</PostHogProvider>
<JotaiWrapper>
<ThemeWrapper>
<Umami />
{setupCore && activated && (
<KeyListener>
<FeatureToggleWrapper>
<EventListenerWrapper>
<TooltipProvider delayDuration={0}>{children}</TooltipProvider>
{!isMac && <GPUDriverPrompt />}
</EventListenerWrapper>
<Toaster />
</FeatureToggleWrapper>
</KeyListener>
)}
</ThemeWrapper>
</JotaiWrapper>
)
}

View File

@ -6,7 +6,99 @@ import { twMerge } from 'tailwind-merge'
type Props = {
title?: string
description?: string
type?: 'default' | 'error' | 'success'
type?: 'default' | 'error' | 'success' | 'warning'
}
const ErrorIcon = () => {
return (
<svg
width="20"
height="20"
viewBox="0 0 20 20"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
fillRule="evenodd"
clipRule="evenodd"
d="M20 10C20 15.5228 15.5228 20 10 20H0.993697C0.110179 20 -0.332289 18.9229 0.292453 18.2929L2.2495 16.3195C0.843343 14.597 1.21409e-08 12.397 1.21409e-08 10C1.21409e-08 4.47715 4.47715 0 10 0C15.5228 0 20 4.47715 20 10ZM13.2071 6.79289C13.5976 7.18342 13.5976 7.81658 13.2071 8.20711L11.4142 10L13.2071 11.7929C13.5976 12.1834 13.5976 12.8166 13.2071 13.2071C12.8166 13.5976 12.1834 13.5976 11.7929 13.2071L10 11.4142L8.20711 13.2071C7.81658 13.5976 7.18342 13.5976 6.79289 13.2071C6.40237 12.8166 6.40237 12.1834 6.79289 11.7929L8.58579 10L6.79289 8.20711C6.40237 7.81658 6.40237 7.18342 6.79289 6.79289C7.18342 6.40237 7.81658 6.40237 8.20711 6.79289L10 8.58579L11.7929 6.79289C12.1834 6.40237 12.8166 6.40237 13.2071 6.79289Z"
fill="#EA2E4E"
/>
</svg>
)
}
const WarningIcon = () => {
return (
<svg
width="20"
height="20"
viewBox="0 0 20 20"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
fillRule="evenodd"
clipRule="evenodd"
d="M20 10C20 15.5228 15.5228 20 10 20H0.993697C0.110179 20 -0.332289 18.9229 0.292453 18.2929L2.2495 16.3195C0.843343 14.597 1.21409e-08 12.397 1.21409e-08 10C1.21409e-08 4.47715 4.47715 0 10 0C15.5228 0 20 4.47715 20 10ZM10.99 6C10.99 5.44772 10.5446 5 9.99502 5C9.44549 5 9 5.44772 9 6V10C9 10.5523 9.44549 11 9.99502 11C10.5446 11 10.99 10.5523 10.99 10V6ZM9.99502 13C9.44549 13 9 13.4477 9 14C9 14.5523 9.44549 15 9.99502 15H10.005C10.5545 15 11 14.5523 11 14C11 13.4477 10.5545 13 10.005 13H9.99502Z"
fill="#FACC15"
/>
</svg>
)
}
const SuccessIcon = () => {
return (
<svg
width="20"
height="20"
viewBox="0 0 20 20"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
fillRule="evenodd"
clipRule="evenodd"
d="M20 10C20 15.5228 15.5228 20 10 20H0.993697C0.110179 20 -0.332289 18.9229 0.292453 18.2929L2.2495 16.3195C0.843343 14.597 1.21409e-08 12.397 1.21409e-08 10C1.21409e-08 4.47715 4.47715 0 10 0C15.5228 0 20 4.47715 20 10ZM13.7071 8.70711C14.0976 8.31658 14.0976 7.68342 13.7071 7.29289C13.3166 6.90237 12.6834 6.90237 12.2929 7.29289L9 10.5858L7.70711 9.2929C7.31658 8.90237 6.68342 8.90237 6.29289 9.2929C5.90237 9.68342 5.90237 10.3166 6.29289 10.7071L8.29289 12.7071C8.48043 12.8946 8.73478 13 9 13C9.26522 13 9.51957 12.8946 9.70711 12.7071L13.7071 8.70711Z"
fill="#34D399"
/>
</svg>
)
}
const DefaultIcon = () => {
return (
<svg
width="20"
height="20"
viewBox="0 0 20 20"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
fillRule="evenodd"
clipRule="evenodd"
d="M10 20C15.5228 20 20 15.5228 20 10C20 4.47715 15.5228 0 10 0C4.47715 0 2.11188e-08 4.47715 2.11188e-08 10C2.11188e-08 12.397 0.843343 14.597 2.2495 16.3195L0.292453 18.2929C-0.332289 18.9229 0.110179 20 0.993697 20H10ZM5.5 8C5.5 7.44772 5.94772 7 6.5 7H13.5C14.0523 7 14.5 7.44772 14.5 8C14.5 8.55229 14.0523 9 13.5 9H6.5C5.94772 9 5.5 8.55229 5.5 8ZM6.5 11C5.94772 11 5.5 11.4477 5.5 12C5.5 12.5523 5.94772 13 6.5 13H9.5C10.0523 13 10.5 12.5523 10.5 12C10.5 11.4477 10.0523 11 9.5 11H6.5Z"
fill="#60A5FA"
/>
</svg>
)
}
const renderIcon = (type: string) => {
switch (type) {
case 'warning':
return <WarningIcon />
case 'error':
return <ErrorIcon />
case 'success':
return <SuccessIcon />
default:
return <DefaultIcon />
}
}
export function toaster(props: Props) {
@ -16,37 +108,52 @@ export function toaster(props: Props) {
return (
<div
className={twMerge(
'unset-drag relative flex min-w-[200px] max-w-[350px] gap-x-4 rounded-lg border border-border bg-background px-4 py-3',
t.visible ? 'animate-enter' : 'animate-leave',
type === 'success' && 'bg-primary text-primary-foreground'
'unset-drag dark:bg-zinc-white relative flex animate-enter items-center gap-x-4 rounded-lg bg-foreground px-4 py-2 text-white dark:border dark:border-border',
t.visible ? 'animate-enter' : 'animate-leave'
)}
>
<div>
<h1
className={twMerge(
'font-medium',
type === 'success' && 'font-medium text-primary-foreground'
)}
>
{title}
</h1>
<p
className={twMerge(
'mt-1 text-muted-foreground',
type === 'success' && 'text-primary-foreground/80'
)}
>
{description}
</p>
<div className="flex items-start gap-x-3 dark:text-black">
<div className="mt-1">{renderIcon(type)}</div>
<div className="pr-4">
<h1 className="font-bold">{title}</h1>
<p>{description}</p>
</div>
<XIcon
size={24}
className="absolute right-2 top-2 w-4 cursor-pointer dark:text-black"
onClick={() => toast.dismiss(t.id)}
/>
</div>
<XIcon
size={24}
className="absolute right-2 top-2 w-4 cursor-pointer text-muted-foreground"
onClick={() => toast.dismiss(t.id)}
/>
</div>
)
},
{ id: 'toast', duration: 3000 }
{ id: 'toast', duration: 2000, position: 'top-right' }
)
}
export function snackbar(props: Props) {
const { description, type = 'default' } = props
return toast.custom(
(t) => {
return (
<div
className={twMerge(
'unset-drag dark:bg-zinc-white relative bottom-2 flex animate-enter items-center gap-x-4 rounded-lg bg-foreground px-4 py-2 text-white dark:border dark:border-border',
t.visible ? 'animate-enter' : 'animate-leave'
)}
>
<div className="flex items-start gap-x-3 dark:text-black">
<div>{renderIcon(type)}</div>
<p className="pr-4">{description}</p>
<XIcon
size={24}
className="absolute right-2 top-1/2 w-4 -translate-y-1/2 cursor-pointer dark:text-black"
onClick={() => toast.dismiss(t.id)}
/>
</div>
</div>
)
},
{ id: 'snackbar', duration: 2000, position: 'bottom-center' }
)
}

View File

@ -14,8 +14,6 @@ import {
/**
* Stores all chat messages for all threads
*/
export const generateResponseAtom = atom<boolean>(false)
export const chatMessages = atom<Record<string, ThreadMessage[]>>({})
/**

View File

@ -2,5 +2,6 @@ import { atom } from 'jotai'
export const totalRamAtom = atom<number>(0)
export const usedRamAtom = atom<number>(0)
export const availableRamAtom = atom<number>(0)
export const cpuUsageAtom = atom<number>(0)

View File

@ -23,6 +23,7 @@ export const setActiveThreadIdAtom = atom(
export const waitingToSendMessage = atom<boolean | undefined>(undefined)
export const isGeneratingResponseAtom = atom<boolean | undefined>(undefined)
/**
* Stores all thread states for the current user
*/
@ -46,18 +47,6 @@ export const deleteThreadStateAtom = atom(
}
)
export const updateThreadInitSuccessAtom = atom(
null,
(get, set, threadId: string) => {
const currentState = { ...get(threadStatesAtom) }
currentState[threadId] = {
...currentState[threadId],
isFinishInit: true,
}
set(threadStatesAtom, currentState)
}
)
export const updateThreadWaitingForResponseAtom = atom(
null,
(get, set, threadId: string, waitingForResponse: boolean) => {

View File

@ -1,5 +1,5 @@
import { events, Model, ModelEvent } from '@janhq/core'
import { atom, useAtom, useAtomValue } from 'jotai'
import { atom, useAtom, useAtomValue, useSetAtom } from 'jotai'
import { toaster } from '@/containers/Toast'
@ -9,6 +9,7 @@ import { LAST_USED_MODEL_ID } from './useRecommendedModel'
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
export const activeModelAtom = atom<Model | undefined>(undefined)
export const loadModelErrorAtom = atom<string | undefined>(undefined)
export const stateModelAtom = atom({
state: 'start',
@ -21,6 +22,7 @@ export function useActiveModel() {
const activeThread = useAtomValue(activeThreadAtom)
const [stateModel, setStateModel] = useAtom(stateModelAtom)
const { downloadedModels } = useGetDownloadedModels()
const setLoadModelError = useSetAtom(loadModelErrorAtom)
const startModel = async (modelId: string) => {
if (
@ -31,6 +33,7 @@ export function useActiveModel() {
return
}
// TODO: incase we have multiple assistants, the configuration will be from assistant
setLoadModelError(undefined)
setActiveModel(undefined)
@ -42,6 +45,7 @@ export function useActiveModel() {
toaster({
title: `Model ${modelId} not found!`,
description: `Please download the model first.`,
type: 'warning',
})
setStateModel(() => ({
state: 'start',

View File

@ -7,21 +7,23 @@ import {
ThreadState,
Model,
} from '@janhq/core'
import { atom, useAtom, useAtomValue, useSetAtom } from 'jotai'
import { atom, useSetAtom } from 'jotai'
import { selectedModelAtom } from '@/containers/DropdownListSidebar'
import { fileUploadAtom } from '@/containers/Providers/Jotai'
import { generateThreadId } from '@/utils/thread'
import useDeleteThread from './useDeleteThread'
import useRecommendedModel from './useRecommendedModel'
import useSetActiveThread from './useSetActiveThread'
import { extensionManager } from '@/extension'
import {
threadsAtom,
setActiveThreadIdAtom,
threadStatesAtom,
updateThreadAtom,
updateThreadInitSuccessAtom,
setThreadModelParamsAtom,
} from '@/helpers/atoms/Thread.atom'
const createNewThreadAtom = atom(null, (get, set, newThread: Thread) => {
@ -32,7 +34,6 @@ const createNewThreadAtom = atom(null, (get, set, newThread: Thread) => {
hasMore: false,
waitingForResponse: false,
lastMessage: undefined,
isFinishInit: false,
}
currentState[newThread.id] = threadState
set(threadStatesAtom, currentState)
@ -43,46 +44,35 @@ const createNewThreadAtom = atom(null, (get, set, newThread: Thread) => {
})
export const useCreateNewThread = () => {
const threadStates = useAtomValue(threadStatesAtom)
const updateThreadFinishInit = useSetAtom(updateThreadInitSuccessAtom)
const createNewThread = useSetAtom(createNewThreadAtom)
const setActiveThreadId = useSetAtom(setActiveThreadIdAtom)
const { setActiveThread } = useSetActiveThread()
const updateThread = useSetAtom(updateThreadAtom)
const [fileUpload, setFileUpload] = useAtom(fileUploadAtom)
const { deleteThread } = useDeleteThread()
const setFileUpload = useSetAtom(fileUploadAtom)
const setSelectedModel = useSetAtom(selectedModelAtom)
const setThreadModelParams = useSetAtom(setThreadModelParamsAtom)
const { recommendedModel, downloadedModels } = useRecommendedModel()
const requestCreateNewThread = async (
assistant: Assistant,
model?: Model | undefined
) => {
// loop through threads state and filter if there's any thread that is not finish init
let unfinishedInitThreadId: string | undefined = undefined
for (const key in threadStates) {
const isFinishInit = threadStates[key].isFinishInit ?? true
if (!isFinishInit) {
unfinishedInitThreadId = key
break
}
}
const defaultModel = model ?? recommendedModel ?? downloadedModels[0]
if (unfinishedInitThreadId) {
await deleteThread(unfinishedInitThreadId)
}
const modelId = model ? model.id : '*'
const createdAt = Date.now()
const assistantInfo: ThreadAssistantInfo = {
assistant_id: assistant.id,
assistant_name: assistant.name,
tools: assistant.tools,
model: {
id: modelId,
settings: {},
parameters: {},
engine: undefined,
id: defaultModel?.id ?? '*',
settings: defaultModel?.settings ?? {},
parameters: defaultModel?.parameters ?? {},
engine: defaultModel?.engine,
},
instructions: assistant.instructions,
}
const threadId = generateThreadId(assistant.id)
const thread: Thread = {
id: threadId,
@ -94,22 +84,27 @@ export const useCreateNewThread = () => {
}
// add the new thread on top of the thread list to the state
//TODO: Why do we have thread list then thread states? Should combine them
createNewThread(thread)
setActiveThreadId(thread.id)
setSelectedModel(defaultModel)
setThreadModelParams(thread.id, {
...defaultModel?.settings,
...defaultModel?.parameters,
})
// Delete the file upload state
setFileUpload([])
// Update thread metadata
await updateThreadMetadata(thread)
setActiveThread(thread)
}
function updateThreadMetadata(thread: Thread) {
async function updateThreadMetadata(thread: Thread) {
updateThread(thread)
const threadState = threadStates[thread.id]
const isFinishInit = threadState?.isFinishInit ?? true
if (!isFinishInit) {
updateThreadFinishInit(thread.id)
}
extensionManager
await extensionManager
.get<ConversationalExtension>(ExtensionTypeEnum.Conversational)
?.saveThread(thread)
}

View File

@ -19,6 +19,7 @@ export default function useDeleteModel() {
toaster({
title: 'Model Deletion Successful',
description: `The model ${model.id} has been successfully deleted.`,
type: 'success',
})
}

View File

@ -21,7 +21,6 @@ import {
threadsAtom,
setActiveThreadIdAtom,
deleteThreadStateAtom,
threadStatesAtom,
updateThreadStateLastMessageAtom,
} from '@/helpers/atoms/Thread.atom'
@ -34,7 +33,6 @@ export default function useDeleteThread() {
const deleteMessages = useSetAtom(deleteChatMessagesAtom)
const cleanMessages = useSetAtom(cleanChatMessagesAtom)
const deleteThreadState = useSetAtom(deleteThreadStateAtom)
const threadStates = useAtomValue(threadStatesAtom)
const updateThreadLastMessage = useSetAtom(updateThreadStateLastMessageAtom)
const cleanThread = async (threadId: string) => {
@ -49,6 +47,14 @@ export default function useDeleteThread() {
threadId,
messages.filter((msg) => msg.role === ChatCompletionRole.System)
)
thread.metadata = {
...thread.metadata,
lastMessage: undefined,
}
await extensionManager
.get<ConversationalExtension>(ExtensionTypeEnum.Conversational)
?.saveThread(thread)
updateThreadLastMessage(threadId, undefined)
}
}
@ -66,21 +72,16 @@ export default function useDeleteThread() {
const availableThreads = threads.filter((c) => c.id !== threadId)
setThreads(availableThreads)
const deletingThreadState = threadStates[threadId]
const isFinishInit = deletingThreadState?.isFinishInit ?? true
// delete the thread state
deleteThreadState(threadId)
if (isFinishInit) {
deleteMessages(threadId)
setCurrentPrompt('')
toaster({
title: 'Thread successfully deleted.',
description: `Thread ${threadId} has been successfully deleted.`,
})
}
deleteMessages(threadId)
setCurrentPrompt('')
toaster({
title: 'Thread successfully deleted.',
description: `Thread ${threadId} has been successfully deleted.`,
type: 'success',
})
if (availableThreads.length > 0) {
setActiveThreadId(availableThreads[0].id)
} else {

View File

@ -26,6 +26,7 @@ const setDownloadStateSuccessAtom = atom(null, (get, set, modelId: string) => {
toaster({
title: 'Download Completed',
description: `Download ${modelId} completed`,
type: 'success',
})
})
@ -61,6 +62,7 @@ const setDownloadStateCancelledAtom = atom(
toaster({
title: 'Cancel Download',
description: `Model ${modelId} cancel download`,
type: 'warning',
})
return

View File

@ -1,7 +1,9 @@
import { fs, joinPath } from '@janhq/core'
import { useCallback } from 'react'
import { fs, joinPath, events, AppConfigurationEventName } from '@janhq/core'
export const useEngineSettings = () => {
const readOpenAISettings = async () => {
const readOpenAISettings = useCallback(async () => {
if (
!(await fs.existsSync(await joinPath(['file://engines', 'openai.json'])))
)
@ -14,17 +16,24 @@ export const useEngineSettings = () => {
return typeof settings === 'object' ? settings : JSON.parse(settings)
}
return {}
}
}, [])
const saveOpenAISettings = async ({
apiKey,
}: {
apiKey: string | undefined
}) => {
const settings = await readOpenAISettings()
const settingFilePath = await joinPath(['file://engines', 'openai.json'])
settings.api_key = apiKey
await fs.writeFileSync(
await joinPath(['file://engines', 'openai.json']),
JSON.stringify(settings)
await fs.writeFileSync(settingFilePath, JSON.stringify(settings))
// Sec: Don't attach the settings data to the event
events.emit(
AppConfigurationEventName.OnConfigurationUpdate,
settingFilePath
)
}
return { readOpenAISettings, saveOpenAISettings }

View File

@ -0,0 +1,59 @@
import { useEffect, useState } from 'react'
import { fs, AppConfiguration, joinPath, getUserHomePath } from '@janhq/core'
export default function useFactoryReset() {
const [defaultJanDataFolder, setdefaultJanDataFolder] = useState('')
useEffect(() => {
async function getDefaultJanDataFolder() {
const homePath = await getUserHomePath()
const defaultJanDataFolder = await joinPath([homePath, 'jan'])
setdefaultJanDataFolder(defaultJanDataFolder)
}
getDefaultJanDataFolder()
}, [])
const resetAll = async (keepCurrentFolder?: boolean) => {
// read the place of jan data folder
const appConfiguration: AppConfiguration | undefined =
await window.core?.api?.getAppConfigurations()
if (!appConfiguration) {
console.debug('Failed to get app configuration')
}
console.debug('appConfiguration: ', appConfiguration)
const janDataFolderPath = appConfiguration!.data_folder
if (defaultJanDataFolder === janDataFolderPath) {
console.debug('Jan data folder is already at user home')
} else {
// if jan data folder is not at user home, we update the app configuration to point to user home
if (!keepCurrentFolder) {
const configuration: AppConfiguration = {
data_folder: defaultJanDataFolder,
}
await window.core?.api?.updateAppConfiguration(configuration)
}
}
const modelPath = await joinPath([janDataFolderPath, 'models'])
const threadPath = await joinPath([janDataFolderPath, 'threads'])
console.debug(`Removing models at ${modelPath}`)
await fs.rmdirSync(modelPath, { recursive: true })
console.debug(`Removing threads at ${threadPath}`)
await fs.rmdirSync(threadPath, { recursive: true })
// reset the localStorage
localStorage.clear()
await window.core?.api?.relaunch()
}
return {
defaultJanDataFolder,
resetAll,
}
}

View File

@ -6,6 +6,7 @@ import { useSetAtom } from 'jotai'
import { extensionManager } from '@/extension/ExtensionManager'
import {
availableRamAtom,
cpuUsageAtom,
totalRamAtom,
usedRamAtom,
@ -16,6 +17,7 @@ export default function useGetSystemResources() {
const [cpu, setCPU] = useState<number>(0)
const setTotalRam = useSetAtom(totalRamAtom)
const setUsedRam = useSetAtom(usedRamAtom)
const setAvailableRam = useSetAtom(availableRamAtom)
const setCpuUsage = useSetAtom(cpuUsageAtom)
const getSystemResources = async () => {
@ -40,6 +42,10 @@ export default function useGetSystemResources() {
setTotalRam(resourceInfor.mem.totalMemory)
setRam(Math.round(ram * 100))
if (resourceInfor.mem.totalMemory && resourceInfor.mem.usedMemory)
setAvailableRam(
resourceInfor.mem.totalMemory - resourceInfor.mem.usedMemory
)
setCPU(Math.round(currentLoadInfor?.cpu?.usage ?? 0))
setCpuUsage(Math.round(currentLoadInfor?.cpu?.usage ?? 0))
}

View File

@ -3,28 +3,23 @@ import { useAtomValue } from 'jotai'
import { selectedModelAtom } from '@/containers/DropdownListSidebar'
import { activeThreadAtom, threadStatesAtom } from '@/helpers/atoms/Thread.atom'
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
export const usePath = () => {
const activeThread = useAtomValue(activeThreadAtom)
const threadStates = useAtomValue(threadStatesAtom)
const selectedModel = useAtomValue(selectedModelAtom)
const onReviewInFinder = async (type: string) => {
if (!activeThread) return
const activeThreadState = threadStates[activeThread.id]
if (!activeThreadState.isFinishInit) {
alert('Thread is not started yet')
return
}
// TODO: this logic should be refactored.
if (type !== 'Model' && !activeThread) return
const userSpace = await getJanDataFolderPath()
let filePath = undefined
const assistantId = activeThread.assistants[0]?.assistant_id
const assistantId = activeThread?.assistants[0]?.assistant_id
switch (type) {
case 'Engine':
case 'Thread':
filePath = await joinPath(['threads', activeThread.id])
filePath = await joinPath(['threads', activeThread?.id ?? ''])
break
case 'Model':
if (!selectedModel) return
@ -44,20 +39,20 @@ export const usePath = () => {
}
const onViewJson = async (type: string) => {
if (!activeThread) return
const activeThreadState = threadStates[activeThread.id]
if (!activeThreadState.isFinishInit) {
alert('Thread is not started yet')
return
}
// TODO: this logic should be refactored.
if (type !== 'Model' && !activeThread) return
const userSpace = await getJanDataFolderPath()
let filePath = undefined
const assistantId = activeThread.assistants[0]?.assistant_id
const assistantId = activeThread?.assistants[0]?.assistant_id
switch (type) {
case 'Engine':
case 'Thread':
filePath = await joinPath(['threads', activeThread.id, 'thread.json'])
filePath = await joinPath([
'threads',
activeThread?.id ?? '',
'thread.json',
])
break
case 'Model':
if (!selectedModel) return
@ -78,11 +73,6 @@ export const usePath = () => {
const onViewFile = async (id: string) => {
if (!activeThread) return
const activeThreadState = threadStates[activeThread.id]
if (!activeThreadState.isFinishInit) {
alert('Thread is not started yet')
return
}
const userSpace = await getJanDataFolderPath()
let filePath = undefined
@ -92,9 +82,21 @@ export const usePath = () => {
openFileExplorer(fullPath)
}
const onViewFileContainer = async () => {
if (!activeThread) return
const userSpace = await getJanDataFolderPath()
let filePath = undefined
filePath = await joinPath(['threads', `${activeThread.id}/files`])
if (!filePath) return
const fullPath = await joinPath([userSpace, filePath])
openFileExplorer(fullPath)
}
return {
onReviewInFinder,
onViewJson,
onViewFile,
onViewFileContainer,
}
}

View File

@ -26,7 +26,6 @@ export default function useRecommendedModel() {
const activeModel = useAtomValue(activeModelAtom)
const [downloadedModels, setDownloadedModels] = useState<Model[]>([])
const [recommendedModel, setRecommendedModel] = useState<Model | undefined>()
const threadStates = useAtomValue(threadStatesAtom)
const activeThread = useAtomValue(activeThreadAtom)
const getAndSortDownloadedModels = useCallback(async (): Promise<Model[]> => {
@ -43,30 +42,12 @@ export default function useRecommendedModel() {
Model | undefined
> => {
const models = await getAndSortDownloadedModels()
if (!activeThread) {
return
}
if (!activeThread) return
const modelId = activeThread.assistants[0]?.model.id
const model = models.find((model) => model.id === modelId)
const finishInit = threadStates[activeThread.id].isFinishInit ?? true
if (finishInit) {
const modelId = activeThread.assistants[0]?.model.id
const model = models.find((model) => model.id === modelId)
if (model) {
setRecommendedModel(model)
}
return
} else {
const modelId = activeThread.assistants[0]?.model.id
if (modelId !== '*') {
const model = models.find((model) => model.id === modelId)
if (model) {
setRecommendedModel(model)
}
return
}
if (model) {
setRecommendedModel(model)
}
if (activeModel) {

View File

@ -1,5 +1,5 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
import { useEffect, useRef, useState } from 'react'
import { useEffect, useRef } from 'react'
import {
ChatCompletionMessage,
@ -18,73 +18,74 @@ import {
ChatCompletionMessageContentType,
AssistantTool,
} from '@janhq/core'
import { useAtom, useAtomValue, useSetAtom } from 'jotai'
import { atom, useAtom, useAtomValue, useSetAtom } from 'jotai'
import { ulid } from 'ulid'
import { selectedModelAtom } from '@/containers/DropdownListSidebar'
import { currentPromptAtom, fileUploadAtom } from '@/containers/Providers/Jotai'
import { toaster } from '@/containers/Toast'
import { getBase64 } from '@/utils/base64'
import { toRuntimeParams, toSettingParams } from '@/utils/modelParam'
import { useActiveModel } from './useActiveModel'
import { loadModelErrorAtom, useActiveModel } from './useActiveModel'
import { extensionManager } from '@/extension/ExtensionManager'
import {
addNewMessageAtom,
generateResponseAtom,
getCurrentChatMessagesAtom,
} from '@/helpers/atoms/ChatMessage.atom'
import {
activeThreadAtom,
engineParamsUpdateAtom,
getActiveThreadModelParamsAtom,
threadStatesAtom,
isGeneratingResponseAtom,
updateThreadAtom,
updateThreadInitSuccessAtom,
updateThreadWaitingForResponseAtom,
} from '@/helpers/atoms/Thread.atom'
export const queuedMessageAtom = atom(false)
export const reloadModelAtom = atom(false)
export default function useSendChatMessage() {
const activeThread = useAtomValue(activeThreadAtom)
const addNewMessage = useSetAtom(addNewMessageAtom)
const updateThread = useSetAtom(updateThreadAtom)
const updateThreadWaiting = useSetAtom(updateThreadWaitingForResponseAtom)
const [currentPrompt, setCurrentPrompt] = useAtom(currentPromptAtom)
const setGenerateResponse = useSetAtom(generateResponseAtom)
const setCurrentPrompt = useSetAtom(currentPromptAtom)
const currentMessages = useAtomValue(getCurrentChatMessagesAtom)
const { activeModel } = useActiveModel()
const selectedModel = useAtomValue(selectedModelAtom)
const { startModel } = useActiveModel()
const [queuedMessage, setQueuedMessage] = useState(false)
const setQueuedMessage = useSetAtom(queuedMessageAtom)
const loadModelFailed = useAtomValue(loadModelErrorAtom)
const modelRef = useRef<Model | undefined>()
const threadStates = useAtomValue(threadStatesAtom)
const updateThreadInitSuccess = useSetAtom(updateThreadInitSuccessAtom)
const loadModelFailedRef = useRef<string | undefined>()
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
const engineParamsUpdate = useAtomValue(engineParamsUpdateAtom)
const setEngineParamsUpdate = useSetAtom(engineParamsUpdateAtom)
const [reloadModel, setReloadModel] = useState(false)
const setEngineParamsUpdate = useSetAtom(engineParamsUpdateAtom)
const setReloadModel = useSetAtom(reloadModelAtom)
const [fileUpload, setFileUpload] = useAtom(fileUploadAtom)
const setIsGeneratingResponse = useSetAtom(isGeneratingResponseAtom)
useEffect(() => {
modelRef.current = activeModel
}, [activeModel])
useEffect(() => {
loadModelFailedRef.current = loadModelFailed
}, [loadModelFailed])
const resendChatMessage = async (currentMessage: ThreadMessage) => {
if (!activeThread) {
console.error('No active thread')
return
}
setIsGeneratingResponse(true)
updateThreadWaiting(activeThread.id, true)
const messages: ChatCompletionMessage[] = [
activeThread.assistants[0]?.instructions,
]
@ -121,85 +122,28 @@ export default function useSendChatMessage() {
if (activeModel?.id !== modelId) {
setQueuedMessage(true)
startModel(modelId)
await WaitForModelStarting(modelId)
await waitForModelStarting(modelId)
setQueuedMessage(false)
}
events.emit(MessageEvent.OnMessageSent, messageRequest)
}
// TODO: Refactor @louis
const WaitForModelStarting = async (modelId: string) => {
return new Promise<void>((resolve) => {
setTimeout(async () => {
if (modelRef.current?.id !== modelId) {
console.debug('waiting for model to start')
await WaitForModelStarting(modelId)
resolve()
} else {
resolve()
}
}, 200)
})
}
const sendChatMessage = async () => {
setGenerateResponse(true)
if (!currentPrompt || currentPrompt.trim().length === 0) return
const sendChatMessage = async (message: string) => {
if (!message || message.trim().length === 0) return
if (!activeThread) {
console.error('No active thread')
return
}
setIsGeneratingResponse(true)
if (engineParamsUpdate) setReloadModel(true)
const activeThreadState = threadStates[activeThread.id]
const runtimeParams = toRuntimeParams(activeModelParams)
const settingParams = toSettingParams(activeModelParams)
// if the thread is not initialized, we need to initialize it first
if (
!activeThreadState.isFinishInit ||
activeThread.assistants[0].model.id !== selectedModel?.id
) {
if (!selectedModel) {
toaster({ title: 'Please select a model' })
return
}
const assistantId = activeThread.assistants[0].assistant_id ?? ''
const assistantName = activeThread.assistants[0].assistant_name ?? ''
const instructions = activeThread.assistants[0].instructions ?? ''
const tools = activeThread.assistants[0].tools ?? []
const updatedThread: Thread = {
...activeThread,
assistants: [
{
assistant_id: assistantId,
assistant_name: assistantName,
instructions: instructions,
tools: tools,
model: {
id: selectedModel.id,
settings: settingParams,
parameters: runtimeParams,
engine: selectedModel.engine,
},
},
],
}
updateThreadInitSuccess(activeThread.id)
updateThread(updatedThread)
await extensionManager
.get<ConversationalExtension>(ExtensionTypeEnum.Conversational)
?.saveThread(updatedThread)
}
updateThreadWaiting(activeThread.id, true)
const prompt = currentPrompt.trim()
const prompt = message.trim()
setCurrentPrompt('')
const base64Blob = fileUpload[0]
@ -326,6 +270,14 @@ export default function useSendChatMessage() {
setFileUpload([])
}
const updatedThread: Thread = {
...activeThread,
updated: timestamp,
}
// change last update thread when send message
updateThread(updatedThread)
await extensionManager
.get<ConversationalExtension>(ExtensionTypeEnum.Conversational)
?.addNewMessage(threadMessage)
@ -335,7 +287,7 @@ export default function useSendChatMessage() {
if (activeModel?.id !== modelId) {
setQueuedMessage(true)
startModel(modelId)
await WaitForModelStarting(modelId)
await waitForModelStarting(modelId)
setQueuedMessage(false)
}
@ -345,10 +297,21 @@ export default function useSendChatMessage() {
setEngineParamsUpdate(false)
}
const waitForModelStarting = async (modelId: string) => {
return new Promise<void>((resolve) => {
setTimeout(async () => {
if (modelRef.current?.id !== modelId && !loadModelFailedRef.current) {
await waitForModelStarting(modelId)
resolve()
} else {
resolve()
}
}, 200)
})
}
return {
reloadModel,
sendChatMessage,
resendChatMessage,
queuedMessage,
}
}

View File

@ -1,5 +1,3 @@
import { useEffect } from 'react'
import {
InferenceEvent,
ExtensionTypeEnum,
@ -15,6 +13,7 @@ import { setConvoMessagesAtom } from '@/helpers/atoms/ChatMessage.atom'
import {
ModelParams,
getActiveThreadIdAtom,
isGeneratingResponseAtom,
setActiveThreadIdAtom,
setThreadModelParamsAtom,
} from '@/helpers/atoms/Thread.atom'
@ -24,6 +23,7 @@ export default function useSetActiveThread() {
const setActiveThreadId = useSetAtom(setActiveThreadIdAtom)
const setThreadMessage = useSetAtom(setConvoMessagesAtom)
const setThreadModelParams = useSetAtom(setThreadModelParamsAtom)
const setIsGeneratingResponse = useSetAtom(isGeneratingResponseAtom)
const setActiveThread = async (thread: Thread) => {
if (activeThreadId === thread.id) {
@ -31,6 +31,7 @@ export default function useSetActiveThread() {
return
}
setIsGeneratingResponse(false)
events.emit(InferenceEvent.OnInferenceStopped, thread.id)
// load the corresponding messages

View File

@ -1,4 +1,4 @@
import { useEffect, useState } from 'react'
import { useCallback, useEffect, useState } from 'react'
import { fs, joinPath } from '@janhq/core'
import { atom, useAtom } from 'jotai'
@ -32,7 +32,7 @@ export const useSettings = () => {
})
}
const readSettings = async () => {
const readSettings = useCallback(async () => {
if (!window?.core?.api) {
return
}
@ -42,7 +42,8 @@ export const useSettings = () => {
return typeof settings === 'object' ? settings : JSON.parse(settings)
}
return {}
}
}, [])
const saveSettings = async ({
runMode,
notify,

View File

@ -5,24 +5,24 @@ import {
ConversationalExtension,
} from '@janhq/core'
import { useAtom } from 'jotai'
import { useAtomValue, useSetAtom } from 'jotai'
import useSetActiveThread from './useSetActiveThread'
import { extensionManager } from '@/extension/ExtensionManager'
import {
ModelParams,
activeThreadAtom,
threadModelParamsAtom,
threadStatesAtom,
threadsAtom,
} from '@/helpers/atoms/Thread.atom'
const useThreads = () => {
const [threadStates, setThreadStates] = useAtom(threadStatesAtom)
const [threads, setThreads] = useAtom(threadsAtom)
const [threadModelRuntimeParams, setThreadModelRuntimeParams] = useAtom(
threadModelParamsAtom
)
const setThreadStates = useSetAtom(threadStatesAtom)
const setThreads = useSetAtom(threadsAtom)
const setThreadModelRuntimeParams = useSetAtom(threadModelParamsAtom)
const activeThread = useAtomValue(activeThreadAtom)
const { setActiveThread } = useSetActiveThread()
const getThreads = async () => {
@ -39,7 +39,6 @@ const useThreads = () => {
hasMore: false,
waitingForResponse: false,
lastMessage,
isFinishInit: true,
}
const modelParams = thread.assistants?.[0]?.model?.parameters
@ -51,41 +50,12 @@ const useThreads = () => {
}
})
// allow at max 1 unfinished init thread and it should be at the top of the list
let unfinishedThreadId: string | undefined = undefined
const unfinishedThreadState: Record<string, ThreadState> = {}
for (const key of Object.keys(threadStates)) {
const threadState = threadStates[key]
if (threadState.isFinishInit === false) {
unfinishedThreadState[key] = threadState
unfinishedThreadId = key
break
}
}
const unfinishedThread: Thread | undefined = threads.find(
(thread) => thread.id === unfinishedThreadId
)
let allThreads: Thread[] = [...localThreads]
if (unfinishedThread) {
allThreads = [unfinishedThread, ...localThreads]
}
if (unfinishedThreadId) {
localThreadStates[unfinishedThreadId] =
unfinishedThreadState[unfinishedThreadId]
threadModelParams[unfinishedThreadId] =
threadModelRuntimeParams[unfinishedThreadId]
}
// updating app states
setThreadStates(localThreadStates)
setThreads(allThreads)
setThreads(localThreads)
setThreadModelRuntimeParams(threadModelParams)
if (allThreads.length > 0) {
setActiveThread(allThreads[0])
if (localThreads.length && !activeThread) {
setActiveThread(localThreads[0])
}
} catch (error) {
console.error(error)

View File

@ -2,12 +2,15 @@
import {
ConversationalExtension,
ExtensionTypeEnum,
InferenceEngine,
Thread,
ThreadAssistantInfo,
} from '@janhq/core'
import { useAtomValue, useSetAtom } from 'jotai'
import { selectedModelAtom } from '@/containers/DropdownListSidebar'
import { toRuntimeParams, toSettingParams } from '@/utils/modelParam'
import { extensionManager } from '@/extension'
@ -19,16 +22,22 @@ import {
threadsAtom,
} from '@/helpers/atoms/Thread.atom'
export type UpdateModelParameter = {
params?: ModelParams
modelId?: string
engine?: InferenceEngine
}
export default function useUpdateModelParameters() {
const threads = useAtomValue(threadsAtom)
const setThreadModelParams = useSetAtom(setThreadModelParamsAtom)
const activeThreadState = useAtomValue(activeThreadStateAtom)
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
const selectedModel = useAtomValue(selectedModelAtom)
const updateModelParameter = async (
threadId: string,
name: string,
value: number | boolean | string
settings: UpdateModelParameter
) => {
const thread = threads.find((thread) => thread.id === threadId)
if (!thread) {
@ -40,21 +49,18 @@ export default function useUpdateModelParameters() {
console.error('No active thread')
return
}
const params = settings.modelId
? settings.params
: { ...activeModelParams, ...settings.params }
const updatedModelParams: ModelParams = {
...activeModelParams,
// Explicitly set the value to an array if the name is 'stop'
// This is because the inference engine would only accept an array for the 'stop' parameter
[name]: name === 'stop' ? (value === '' ? [] : [value]) : value,
...params,
}
// update the state
setThreadModelParams(thread.id, updatedModelParams)
if (!activeThreadState.isFinishInit) {
// if thread is not initialized, we don't need to update thread.json
return
}
const assistants = thread.assistants.map(
(assistant: ThreadAssistantInfo) => {
const runtimeParams = toRuntimeParams(updatedModelParams)
@ -62,6 +68,10 @@ export default function useUpdateModelParameters() {
assistant.model.parameters = runtimeParams
assistant.model.settings = settingParams
if (selectedModel) {
assistant.model.id = settings.modelId ?? selectedModel?.id
assistant.model.engine = settings.engine ?? selectedModel?.engine
}
return assistant
}
)

View File

@ -1,105 +0,0 @@
import { useEffect } from 'react'
import { fs, AppConfiguration } from '@janhq/core'
import { atom, useAtom } from 'jotai'
import { useMainViewState } from './useMainViewState'
const isSameDirectoryAtom = atom(false)
const isDirectoryConfirmAtom = atom(false)
const isErrorSetNewDestAtom = atom(false)
const currentPathAtom = atom('')
const newDestinationPathAtom = atom('')
export const SUCCESS_SET_NEW_DESTINATION = 'successSetNewDestination'
export function useVaultDirectory() {
const [isSameDirectory, setIsSameDirectory] = useAtom(isSameDirectoryAtom)
const { setMainViewState } = useMainViewState()
const [isDirectoryConfirm, setIsDirectoryConfirm] = useAtom(
isDirectoryConfirmAtom
)
const [isErrorSetNewDest, setIsErrorSetNewDest] = useAtom(
isErrorSetNewDestAtom
)
const [currentPath, setCurrentPath] = useAtom(currentPathAtom)
const [newDestinationPath, setNewDestinationPath] = useAtom(
newDestinationPathAtom
)
useEffect(() => {
window.core?.api
?.getAppConfigurations()
?.then((appConfig: AppConfiguration) => {
setCurrentPath(appConfig.data_folder)
})
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [])
const setNewDestination = async () => {
const destFolder = await window.core?.api?.selectDirectory()
setNewDestinationPath(destFolder)
if (destFolder) {
console.debug(`Destination folder selected: ${destFolder}`)
try {
const appConfiguration: AppConfiguration =
await window.core?.api?.getAppConfigurations()
const currentJanDataFolder = appConfiguration.data_folder
if (currentJanDataFolder === destFolder) {
console.debug(
`Destination folder is the same as current folder. Ignore..`
)
setIsSameDirectory(true)
setIsDirectoryConfirm(false)
return
} else {
setIsSameDirectory(false)
setIsDirectoryConfirm(true)
}
setIsErrorSetNewDest(false)
} catch (e) {
console.error(`Error: ${e}`)
setIsErrorSetNewDest(true)
}
}
}
const applyNewDestination = async () => {
try {
const appConfiguration: AppConfiguration =
await window.core?.api?.getAppConfigurations()
const currentJanDataFolder = appConfiguration.data_folder
appConfiguration.data_folder = newDestinationPath
await fs.syncFile(currentJanDataFolder, newDestinationPath)
await window.core?.api?.updateAppConfiguration(appConfiguration)
console.debug(
`File sync finished from ${currentPath} to ${newDestinationPath}`
)
setIsErrorSetNewDest(false)
localStorage.setItem(SUCCESS_SET_NEW_DESTINATION, 'true')
await window.core?.api?.relaunch()
} catch (e) {
console.error(`Error: ${e}`)
setIsErrorSetNewDest(true)
}
}
return {
setNewDestination,
newDestinationPath,
applyNewDestination,
isSameDirectory,
setIsDirectoryConfirm,
isDirectoryConfirm,
setIsSameDirectory,
currentPath,
isErrorSetNewDest,
setIsErrorSetNewDest,
}
}

View File

@ -25,10 +25,8 @@ const nextConfig = {
...config.plugins,
new webpack.DefinePlugin({
VERSION: JSON.stringify(packageJson.version),
ANALYTICS_ID:
JSON.stringify(process.env.ANALYTICS_ID) ?? JSON.stringify('xxx'),
ANALYTICS_HOST:
JSON.stringify(process.env.ANALYTICS_HOST) ?? JSON.stringify('xxx'),
ANALYTICS_ID: JSON.stringify(process.env.ANALYTICS_ID),
ANALYTICS_HOST: JSON.stringify(process.env.ANALYTICS_HOST),
API_BASE_URL: JSON.stringify('http://localhost:1337'),
isMac: process.platform === 'darwin',
isWindows: process.platform === 'win32',

View File

@ -57,7 +57,7 @@ const AssistantSetting = ({
tools: [
{
type: 'retrieval',
enabled: false,
enabled: true,
settings: {
...(activeThread.assistants[0].tools &&
activeThread.assistants[0].tools[0]?.settings),

View File

@ -8,11 +8,9 @@ import { useAtomValue } from 'jotai'
import LogoMark from '@/containers/Brand/Logo/Mark'
import GenerateResponse from '@/containers/Loader/GenerateResponse'
import { MainViewState } from '@/constants/screens'
import { activeModelAtom } from '@/hooks/useActiveModel'
import { loadModelErrorAtom } from '@/hooks/useActiveModel'
import { useGetDownloadedModels } from '@/hooks/useGetDownloadedModels'
import { useMainViewState } from '@/hooks/useMainViewState'
@ -21,17 +19,13 @@ import ChatItem from '../ChatItem'
import ErrorMessage from '../ErrorMessage'
import {
generateResponseAtom,
getCurrentChatMessagesAtom,
} from '@/helpers/atoms/ChatMessage.atom'
import { getCurrentChatMessagesAtom } from '@/helpers/atoms/ChatMessage.atom'
const ChatBody: React.FC = () => {
const messages = useAtomValue(getCurrentChatMessagesAtom)
const activeModel = useAtomValue(activeModelAtom)
const { downloadedModels } = useGetDownloadedModels()
const { setMainViewState } = useMainViewState()
const generateResponse = useAtomValue(generateResponseAtom)
const loadModelError = useAtomValue(loadModelErrorAtom)
if (downloadedModels.length === 0)
return (
@ -92,22 +86,14 @@ const ChatBody: React.FC = () => {
message.content.length > 0) && (
<ChatItem {...message} key={message.id} />
)}
{(message.status === MessageStatus.Error ||
message.status === MessageStatus.Stopped) &&
{!loadModelError &&
(message.status === MessageStatus.Error ||
message.status === MessageStatus.Stopped) &&
index === messages.length - 1 && (
<ErrorMessage message={message} />
)}
</div>
))}
{activeModel &&
(generateResponse ||
(messages.length &&
messages[messages.length - 1].status ===
MessageStatus.Pending &&
!messages[messages.length - 1].content.length)) && (
<GenerateResponse />
)}
</ScrollToBottom>
)}
</Fragment>

View File

@ -1,5 +1,5 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
import { useEffect, useRef, useState } from 'react'
import { useContext, useEffect, useRef, useState } from 'react'
import { InferenceEvent, MessageStatus, events } from '@janhq/core'
@ -24,6 +24,8 @@ import { twMerge } from 'tailwind-merge'
import { currentPromptAtom, fileUploadAtom } from '@/containers/Providers/Jotai'
import { FeatureToggleContext } from '@/context/FeatureToggle'
import { useActiveModel } from '@/hooks/useActiveModel'
import { useClickOutside } from '@/hooks/useClickOutside'
@ -53,7 +55,8 @@ const ChatInput: React.FC = () => {
const textareaRef = useRef<HTMLTextAreaElement>(null)
const fileInputRef = useRef<HTMLInputElement>(null)
const imageInputRef = useRef<HTMLInputElement>(null)
const [ShowAttacmentMenus, setShowAttacmentMenus] = useState(false)
const [showAttacmentMenus, setShowAttacmentMenus] = useState(false)
const { experimentalFeature } = useContext(FeatureToggleContext)
const onPromptChange = (e: React.ChangeEvent<HTMLTextAreaElement>) => {
setCurrentPrompt(e.target.value)
@ -64,30 +67,35 @@ const ChatInput: React.FC = () => {
useEffect(() => {
if (isWaitingToSend && activeThreadId) {
setIsWaitingToSend(false)
sendChatMessage()
sendChatMessage(currentPrompt)
}
}, [
activeThreadId,
isWaitingToSend,
currentPrompt,
setIsWaitingToSend,
sendChatMessage,
])
useEffect(() => {
if (textareaRef.current) {
textareaRef.current.focus()
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [waitingToSendMessage, activeThreadId])
}, [activeThreadId])
useEffect(() => {
if (textareaRef.current) {
textareaRef.current.style.height = '40px'
textareaRef.current.style.height = textareaRef.current.scrollHeight + 'px'
textareaRef.current.focus()
}
}, [currentPrompt])
const onKeyDown = async (e: React.KeyboardEvent<HTMLTextAreaElement>) => {
if (e.key === 'Enter') {
if (!e.shiftKey) {
e.preventDefault()
if (messages[messages.length - 1]?.status !== MessageStatus.Pending)
sendChatMessage()
else onStopInferenceClick()
}
if (e.key === 'Enter' && !e.shiftKey) {
e.preventDefault()
if (messages[messages.length - 1]?.status !== MessageStatus.Pending)
sendChatMessage(currentPrompt)
else onStopInferenceClick()
}
}
@ -142,50 +150,52 @@ const ChatInput: React.FC = () => {
value={currentPrompt}
onChange={onPromptChange}
/>
<Tooltip>
<TooltipTrigger asChild>
<PaperclipIcon
size={20}
className="absolute bottom-2 right-4 cursor-pointer text-muted-foreground"
onClick={(e) => {
if (
fileUpload.length > 0 ||
(activeThread?.assistants[0].tools &&
!activeThread?.assistants[0].tools[0]?.enabled)
) {
e.stopPropagation()
} else {
setShowAttacmentMenus(!ShowAttacmentMenus)
}
}}
/>
</TooltipTrigger>
<TooltipPortal>
{fileUpload.length > 0 ||
(activeThread?.assistants[0].tools &&
!activeThread?.assistants[0].tools[0]?.enabled && (
<TooltipContent side="top" className="max-w-[154px] px-3">
{fileUpload.length !== 0 && (
<span>
Currently, we only support 1 attachment at the same time
</span>
)}
{activeThread?.assistants[0].tools &&
activeThread?.assistants[0].tools[0]?.enabled ===
false && (
{experimentalFeature && (
<Tooltip>
<TooltipTrigger asChild>
<PaperclipIcon
size={20}
className="absolute bottom-2 right-4 cursor-pointer text-muted-foreground"
onClick={(e) => {
if (
fileUpload.length > 0 ||
(activeThread?.assistants[0].tools &&
!activeThread?.assistants[0].tools[0]?.enabled)
) {
e.stopPropagation()
} else {
setShowAttacmentMenus(!showAttacmentMenus)
}
}}
/>
</TooltipTrigger>
<TooltipPortal>
{fileUpload.length > 0 ||
(activeThread?.assistants[0].tools &&
!activeThread?.assistants[0].tools[0]?.enabled && (
<TooltipContent side="top" className="max-w-[154px] px-3">
{fileUpload.length !== 0 && (
<span>
Turn on Retrieval in Assistant Settings to use this
feature
Currently, we only support 1 attachment at the same
time
</span>
)}
<TooltipArrow />
</TooltipContent>
))}
</TooltipPortal>
</Tooltip>
{activeThread?.assistants[0].tools &&
activeThread?.assistants[0].tools[0]?.enabled ===
false && (
<span>
Turn on Retrieval in Assistant Settings to use this
feature
</span>
)}
<TooltipArrow />
</TooltipContent>
))}
</TooltipPortal>
</Tooltip>
)}
{ShowAttacmentMenus && (
{showAttacmentMenus && (
<div
ref={refAttachmentMenus}
className="absolute bottom-10 right-0 w-36 cursor-pointer rounded-lg border border-border bg-background py-1 shadow"
@ -237,7 +247,7 @@ const ChatInput: React.FC = () => {
}
themes="primary"
className="min-w-[100px]"
onClick={sendChatMessage}
onClick={() => sendChatMessage(currentPrompt)}
>
Send
</Button>

View File

@ -17,7 +17,6 @@ import {
deleteMessageAtom,
getCurrentChatMessagesAtom,
} from '@/helpers/atoms/ChatMessage.atom'
import { totalRamAtom } from '@/helpers/atoms/SystemBar.atom'
import { activeThreadAtom } from '@/helpers/atoms/Thread.atom'
const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
@ -25,8 +24,6 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
const thread = useAtomValue(activeThreadAtom)
const deleteMessage = useSetAtom(deleteMessageAtom)
const { resendChatMessage } = useSendChatMessage()
const { activeModel } = useActiveModel()
const totalRam = useAtomValue(totalRamAtom)
const regenerateMessage = async () => {
const lastMessageIndex = messages.length - 1
@ -70,33 +67,26 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
{message.status === MessageStatus.Error && (
<div key={message.id} className="mt-10 flex flex-col items-center">
<span className="mb-3 text-center text-sm font-medium text-gray-500">
{Number(activeModel?.metadata.size) > totalRam ? (
<>
Oops! Model size exceeds available RAM. Consider selecting a
smaller model or upgrading your RAM for smoother performance.
</>
) : (
<>
<p>Apologies, something&apos;s amiss!</p>
Jan&apos;s in beta. Find troubleshooting guides{' '}
<a
href="https://jan.ai/guides/troubleshooting"
target="_blank"
className="text-blue-600 hover:underline dark:text-blue-300"
>
here
</a>{' '}
or reach out to us on{' '}
<a
href="https://discord.gg/AsJ8krTT3N"
target="_blank"
className="text-blue-600 hover:underline dark:text-blue-300"
>
Discord
</a>{' '}
for assistance.
</>
)}
<>
<p>Apologies, something&apos;s amiss!</p>
Jan&apos;s in beta. Find troubleshooting guides{' '}
<a
href="https://jan.ai/guides/troubleshooting"
target="_blank"
className="text-blue-600 hover:underline dark:text-blue-300"
>
here
</a>{' '}
or reach out to us on{' '}
<a
href="https://discord.gg/AsJ8krTT3N"
target="_blank"
className="text-blue-600 hover:underline dark:text-blue-300"
>
Discord
</a>{' '}
for assistance.
</>
</span>
</div>
)}

View File

@ -0,0 +1,48 @@
import { MessageStatus, ThreadMessage } from '@janhq/core'
import { useAtomValue } from 'jotai'
import { useActiveModel } from '@/hooks/useActiveModel'
import { totalRamAtom } from '@/helpers/atoms/SystemBar.atom'
const LoadModelErrorMessage = () => {
const { activeModel } = useActiveModel()
const availableRam = useAtomValue(totalRamAtom)
return (
<>
<div className="mt-10 flex flex-col items-center">
<span className="mb-3 text-center text-sm font-medium text-gray-500">
{Number(activeModel?.metadata.size) > availableRam ? (
<>
Oops! Model size exceeds available RAM. Consider selecting a
smaller model or upgrading your RAM for smoother performance.
</>
) : (
<>
<p>Apologies, something&apos;s amiss!</p>
Jan&apos;s in beta. Find troubleshooting guides{' '}
<a
href="https://jan.ai/guides/troubleshooting"
target="_blank"
className="text-blue-600 hover:underline dark:text-blue-300"
>
here
</a>{' '}
or reach out to us on{' '}
<a
href="https://discord.gg/AsJ8krTT3N"
target="_blank"
className="text-blue-600 hover:underline dark:text-blue-300"
>
Discord
</a>{' '}
for assistance.
</>
)}
</span>
</div>
</>
)
}
export default LoadModelErrorMessage

View File

@ -1,7 +1,9 @@
import useSendChatMessage from '@/hooks/useSendChatMessage'
import { useAtomValue } from 'jotai'
import { queuedMessageAtom } from '@/hooks/useSendChatMessage'
const MessageQueuedBanner: React.FC = () => {
const { queuedMessage } = useSendChatMessage()
const queuedMessage = useAtomValue(queuedMessageAtom)
return (
<div>

View File

@ -4,6 +4,7 @@ import {
ThreadMessage,
ChatCompletionRole,
ConversationalExtension,
ContentType,
} from '@janhq/core'
import { useAtomValue, useSetAtom } from 'jotai'
import { RefreshCcw, CopyIcon, Trash2Icon, CheckIcon } from 'lucide-react'
@ -53,7 +54,9 @@ const MessageToolbar = ({ message }: { message: ThreadMessage }) => {
<div className={twMerge('flex flex-row items-center')}>
<div className="flex overflow-hidden rounded-md border border-border bg-background/20">
{message.id === messages[messages.length - 1]?.id &&
messages[messages.length - 1].status !== MessageStatus.Error && (
messages[messages.length - 1].status !== MessageStatus.Error &&
messages[messages.length - 1].content[0]?.type !==
ContentType.Pdf && (
<div
className="cursor-pointer border-r border-border px-2 py-2 hover:bg-background/80"
onClick={onRegenerateClick}

View File

@ -56,7 +56,7 @@ const SettingComponent = ({
updater?: (
threadId: string,
name: string,
value: string | number | boolean
value: string | number | boolean | string[]
) => void
}) => {
const { updateModelParameter } = useUpdateModelParameters()
@ -73,7 +73,10 @@ const SettingComponent = ({
const { stopModel } = useActiveModel()
const onValueChanged = (name: string, value: string | number | boolean) => {
const onValueChanged = (
name: string,
value: string | number | boolean | string[]
) => {
if (!threadId) return
if (engineParams.some((x) => x.name.includes(name))) {
setEngineParamsUpdate(true)
@ -83,7 +86,13 @@ const SettingComponent = ({
}
if (updater) updater(threadId, name, value)
else {
updateModelParameter(threadId, name, value)
// Convert stop string to array
if (name === 'stop' && typeof value === 'string') {
value = [value]
}
updateModelParameter(threadId, {
params: { [name]: value },
})
}
}

View File

@ -1,5 +1,5 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
import React from 'react'
import React, { useContext } from 'react'
import { InferenceEngine } from '@janhq/core'
import { Input, Textarea, Switch } from '@janhq/uikit'
@ -15,6 +15,8 @@ import DropdownListSidebar, {
selectedModelAtom,
} from '@/containers/DropdownListSidebar'
import { FeatureToggleContext } from '@/context/FeatureToggle'
import { useCreateNewThread } from '@/hooks/useCreateNewThread'
import { getConfigurationsData } from '@/utils/componentSettings'
@ -39,6 +41,7 @@ const Sidebar: React.FC = () => {
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
const selectedModel = useAtomValue(selectedModelAtom)
const { updateThreadMetadata } = useCreateNewThread()
const { experimentalFeature } = useContext(FeatureToggleContext)
const modelEngineParams = toSettingParams(activeModelParams)
const modelRuntimeParams = toRuntimeParams(activeModelParams)
@ -131,78 +134,79 @@ const Sidebar: React.FC = () => {
}}
/>
</div>
<div>
{activeThread?.assistants[0]?.tools &&
componentDataAssistantSetting.length > 0 && (
<div className="mt-2">
<CardSidebar
title="Retrieval"
asChild
rightAction={
<Switch
name="retrieval"
className="mr-2"
checked={activeThread?.assistants[0].tools[0].enabled}
onCheckedChange={(e) => {
if (activeThread)
updateThreadMetadata({
...activeThread,
assistants: [
{
...activeThread.assistants[0],
tools: [
{
type: 'retrieval',
enabled: e,
settings:
(activeThread.assistants[0].tools &&
activeThread.assistants[0].tools[0]
?.settings) ??
{},
},
],
},
],
})
}}
/>
}
>
{activeThread?.assistants[0]?.tools[0].enabled && (
<div className="px-2 py-4">
<div className="mb-4">
<label
id="tool-title"
className="mb-2 inline-block font-bold text-zinc-500 dark:text-gray-300"
>
Embedding Engine
</label>
<div className="flex items-center justify-between">
<label className="font-medium text-zinc-500 dark:text-gray-300">
{selectedModel?.engine ===
InferenceEngine.openai
? 'OpenAI'
: 'Nitro'}
</label>
</div>
</div>
<AssistantSetting
componentData={componentDataAssistantSetting}
{experimentalFeature && (
<div>
{activeThread?.assistants[0]?.tools &&
componentDataAssistantSetting.length > 0 && (
<div className="mt-2">
<CardSidebar
title="Retrieval"
asChild
rightAction={
<Switch
name="retrieval"
className="mr-2"
checked={
activeThread?.assistants[0].tools[0].enabled
}
onCheckedChange={(e) => {
if (activeThread)
updateThreadMetadata({
...activeThread,
assistants: [
{
...activeThread.assistants[0],
tools: [
{
type: 'retrieval',
enabled: e,
settings:
(activeThread.assistants[0].tools &&
activeThread.assistants[0]
.tools[0]?.settings) ??
{},
},
],
},
],
})
}}
/>
</div>
)}
</CardSidebar>
</div>
)}
</div>
}
>
{activeThread?.assistants[0]?.tools[0].enabled && (
<div className="px-2 py-4">
<div className="mb-4">
<label
id="tool-title"
className="mb-2 inline-block font-bold text-zinc-500 dark:text-gray-300"
>
Embedding Engine
</label>
<div className="flex items-center justify-between">
<label className="font-medium text-zinc-500 dark:text-gray-300">
{selectedModel?.engine ===
InferenceEngine.openai
? 'OpenAI'
: 'Nitro'}
</label>
</div>
</div>
<AssistantSetting
componentData={componentDataAssistantSetting}
/>
</div>
)}
</CardSidebar>
</div>
)}
</div>
)}
</div>
</CardSidebar>
<CardSidebar title="Model">
<div className="px-2">
<div className="mt-4">
<DropdownListSidebar />
</div>
<div className="px-2 pt-4">
<DropdownListSidebar />
{componentDataRuntimeSetting.length > 0 && (
<div className="mt-6">

View File

@ -43,7 +43,7 @@ const SimpleTextMessage: React.FC<ThreadMessage> = (props) => {
text = props.content[0]?.text?.value ?? ''
}
const clipboard = useClipboard({ timeout: 1000 })
const { onViewFile } = usePath()
const { onViewFile, onViewFileContainer } = usePath()
const marked: Marked = new Marked(
markedHighlight({
@ -200,13 +200,14 @@ const SimpleTextMessage: React.FC<ThreadMessage> = (props) => {
className="aspect-auto h-[300px]"
alt={props.content[0]?.text.name}
src={props.content[0]?.text.annotations[0]}
onClick={() => onViewFile(`${props.id}.png`)}
/>
<div className="absolute left-0 top-0 z-20 hidden h-full w-full bg-black/20 group-hover/image:inline-block" />
<Tooltip>
<TooltipTrigger asChild>
<div
className="absolute right-2 top-2 z-20 hidden h-8 w-8 cursor-pointer items-center justify-center rounded-md bg-background group-hover/image:flex"
onClick={() => onViewFile(`${props.id}.png`)}
onClick={onViewFileContainer}
>
<FolderOpenIcon size={20} />
</div>
@ -223,14 +224,17 @@ const SimpleTextMessage: React.FC<ThreadMessage> = (props) => {
{props.content[0]?.type === ContentType.Pdf && (
<div className="group/file relative mb-2 inline-flex w-60 cursor-pointer gap-x-3 overflow-hidden rounded-lg bg-secondary p-4">
<div className="absolute left-0 top-0 z-20 hidden h-full w-full bg-black/20 backdrop-blur-sm group-hover/file:inline-block" />
<div
className="absolute left-0 top-0 z-20 hidden h-full w-full bg-black/20 backdrop-blur-sm group-hover/file:inline-block"
onClick={() =>
onViewFile(`${props.id}.${props.content[0]?.type}`)
}
/>
<Tooltip>
<TooltipTrigger asChild>
<div
className="absolute right-2 top-2 z-20 hidden h-8 w-8 cursor-pointer items-center justify-center rounded-md bg-background group-hover/file:flex"
onClick={() =>
onViewFile(`${props.id}.${props.content[0]?.type}`)
}
onClick={onViewFileContainer}
>
<FolderOpenIcon size={20} />
</div>

View File

@ -1,4 +1,4 @@
import { useEffect } from 'react'
import { useEffect, useState } from 'react'
import {
Modal,
@ -49,17 +49,19 @@ export default function ThreadList() {
const activeThread = useAtomValue(activeThreadAtom)
const { deleteThread, cleanThread } = useDeleteThread()
const { downloadedModels } = useGetDownloadedModels()
const [isThreadsReady, setIsThreadsReady] = useState(false)
const { activeThreadId, setActiveThread: onThreadClick } =
useSetActiveThread()
useEffect(() => {
getThreads()
getThreads().then(() => setIsThreadsReady(true))
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [])
useEffect(() => {
if (
isThreadsReady &&
downloadedModels.length !== 0 &&
threads.length === 0 &&
assistants.length !== 0 &&
@ -68,7 +70,7 @@ export default function ThreadList() {
requestCreateNewThread(assistants[0])
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [assistants, threads, downloadedModels, activeThread])
}, [assistants, threads, downloadedModels, activeThread, isThreadsReady])
return (
<div className="px-3 py-4">
@ -84,7 +86,6 @@ export default function ThreadList() {
threads.map((thread, i) => {
const lastMessage =
threadStates[thread.id]?.lastMessage ?? 'No new message'
return (
<div
key={i}
@ -96,13 +97,10 @@ export default function ThreadList() {
}}
>
<div className="relative z-10 p-4 py-4">
<div className="flex justify-between">
<h2 className="line-clamp-1 font-bold">{thread.title}</h2>
<p className="mb-1 line-clamp-1 text-xs leading-5 text-muted-foreground">
{thread.updated &&
displayDate(new Date(thread.updated).getTime())}
</p>
</div>
<p className="line-clamp-1 text-xs leading-5 text-muted-foreground">
{thread.updated && displayDate(thread.updated)}
</p>
<h2 className="line-clamp-1 font-bold">{thread.title}</h2>
<p className="mt-1 line-clamp-1 text-xs text-gray-700 group-hover/message:max-w-[160px] dark:text-gray-300">
{lastMessage || 'No new message'}
</p>
@ -161,9 +159,9 @@ export default function ThreadList() {
<div className="flex cursor-pointer items-center space-x-2 px-4 py-2 hover:bg-secondary">
<Trash2Icon
size={16}
className="text-muted-foreground"
className="text-red-600 dark:text-red-300"
/>
<span className="text-bold text-black dark:text-muted-foreground">
<span className="text-bold text-red-600 dark:text-red-300">
Delete thread
</span>
</div>

View File

@ -1,53 +1,87 @@
/* eslint-disable @typescript-eslint/naming-convention */
import React, { useEffect, useState } from 'react'
import React, { useContext, useEffect, useState } from 'react'
import { useDropzone } from 'react-dropzone'
import { useAtomValue, useSetAtom } from 'jotai'
import { UploadCloudIcon, XIcon } from 'lucide-react'
import { UploadCloudIcon } from 'lucide-react'
import { twMerge } from 'tailwind-merge'
import GenerateResponse from '@/containers/Loader/GenerateResponse'
import ModelReload from '@/containers/Loader/ModelReload'
import ModelStart from '@/containers/Loader/ModelStart'
import { currentPromptAtom, fileUploadAtom } from '@/containers/Providers/Jotai'
import { showLeftSideBarAtom } from '@/containers/Providers/KeyListener'
import useSendChatMessage from '@/hooks/useSendChatMessage'
import { snackbar } from '@/containers/Toast'
import { FeatureToggleContext } from '@/context/FeatureToggle'
import { activeModelAtom, loadModelErrorAtom } from '@/hooks/useActiveModel'
import { queuedMessageAtom, reloadModelAtom } from '@/hooks/useSendChatMessage'
import ChatBody from '@/screens/Chat/ChatBody'
import ThreadList from '@/screens/Chat/ThreadList'
import ChatInput from './ChatInput'
import LoadModelErrorMessage from './LoadModelErrorMessage'
import RequestDownloadModel from './RequestDownloadModel'
import Sidebar from './Sidebar'
import {
activeThreadAtom,
engineParamsUpdateAtom,
isGeneratingResponseAtom,
} from '@/helpers/atoms/Thread.atom'
const renderError = (code: string) => {
switch (code) {
case 'multiple-upload':
return 'Currently, we only support 1 attachment at the same time'
case 'retrieval-off':
return 'Turn on Retrieval in Assistant Settings to use this feature'
case 'file-invalid-type':
return 'We do not support this file type'
default:
return 'Oops, something error, please try again.'
}
}
const ChatScreen: React.FC = () => {
const setCurrentPrompt = useSetAtom(currentPromptAtom)
const activeThread = useAtomValue(activeThreadAtom)
const showLeftSideBar = useAtomValue(showLeftSideBarAtom)
const engineParamsUpdate = useAtomValue(engineParamsUpdateAtom)
const { queuedMessage, reloadModel } = useSendChatMessage()
const [dragOver, setDragOver] = useState(false)
const queuedMessage = useAtomValue(queuedMessageAtom)
const reloadModel = useAtomValue(reloadModelAtom)
const [dragRejected, setDragRejected] = useState({ code: '' })
const setFileUpload = useSetAtom(fileUploadAtom)
const { experimentalFeature } = useContext(FeatureToggleContext)
const activeModel = useAtomValue(activeModelAtom)
const isGeneratingResponse = useAtomValue(isGeneratingResponseAtom)
const loadModelError = useAtomValue(loadModelErrorAtom)
const { getRootProps, isDragReject } = useDropzone({
noClick: true,
multiple: false,
accept: {
// 'image/*': ['.png', '.jpg', '.jpeg'],
'application/pdf': ['.pdf'],
},
onDragOver: (e) => {
// Retrieval file drag and drop is experimental feature
if (!experimentalFeature) return
if (
e.dataTransfer.items.length === 1 &&
activeThread?.assistants[0].tools &&
@ -65,6 +99,8 @@ const ChatScreen: React.FC = () => {
},
onDragLeave: () => setDragOver(false),
onDrop: (files, rejectFiles) => {
// Retrieval file drag and drop is experimental feature
if (!experimentalFeature) return
if (
!files ||
files.length !== 1 ||
@ -95,8 +131,13 @@ const ChatScreen: React.FC = () => {
},
})
// TODO @faisal change this until we have sneakbar component
useEffect(() => {
if (dragRejected.code) {
snackbar({
description: renderError(dragRejected.code),
type: 'error',
})
}
setTimeout(() => {
if (dragRejected.code) {
setDragRejected({ code: '' })
@ -104,22 +145,6 @@ const ChatScreen: React.FC = () => {
}, 2000)
}, [dragRejected.code])
const renderError = (code: string) => {
switch (code) {
case 'multiple-upload':
return 'Currently, we only support 1 attachment at the same time'
case 'retrieval-off':
return 'Turn on Retrieval in Assistant Settings to use this feature'
case 'file-invalid-type':
return 'We do not support this file type'
default:
return 'Oops, something error, please try again.'
}
}
return (
<div className="flex h-full w-full">
{/* Left side bar */}
@ -133,33 +158,6 @@ const ChatScreen: React.FC = () => {
className="relative flex h-full w-full flex-col overflow-auto bg-background outline-none"
{...getRootProps()}
>
{dragRejected.code !== '' && (
<div className="absolute bottom-3 left-1/2 z-50 inline-flex w-full -translate-x-1/2 justify-center px-16">
<div className="flex items-start justify-between gap-x-4 rounded-lg bg-foreground px-4 py-2 text-white dark:border dark:border-border dark:bg-zinc-900">
<svg
width="20"
height="20"
viewBox="0 0 20 20"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
fillRule="evenodd"
clipRule="evenodd"
d="M20 10C20 15.5228 15.5228 20 10 20H0.993697C0.110179 20 -0.332289 18.9229 0.292453 18.2929L2.2495 16.3195C0.843343 14.597 1.21409e-08 12.397 1.21409e-08 10C1.21409e-08 4.47715 4.47715 0 10 0C15.5228 0 20 4.47715 20 10ZM13.2071 6.79289C13.5976 7.18342 13.5976 7.81658 13.2071 8.20711L11.4142 10L13.2071 11.7929C13.5976 12.1834 13.5976 12.8166 13.2071 13.2071C12.8166 13.5976 12.1834 13.5976 11.7929 13.2071L10 11.4142L8.20711 13.2071C7.81658 13.5976 7.18342 13.5976 6.79289 13.2071C6.40237 12.8166 6.40237 12.1834 6.79289 11.7929L8.58579 10L6.79289 8.20711C6.40237 7.81658 6.40237 7.18342 6.79289 6.79289C7.18342 6.40237 7.81658 6.40237 8.20711 6.79289L10 8.58579L11.7929 6.79289C12.1834 6.40237 12.8166 6.40237 13.2071 6.79289Z"
fill="#F87171"
/>
</svg>
<p>{renderError(dragRejected.code)}</p>
<XIcon
size={24}
className="cursor-pointer"
onClick={() => setDragRejected({ code: '' })}
/>
</div>
</div>
)}
{dragOver && (
<div className="absolute z-50 mx-auto h-full w-full bg-background/50 p-8 backdrop-blur-lg">
<div
@ -213,9 +211,13 @@ const ChatScreen: React.FC = () => {
</span>
</div>
)}
{activeModel && isGeneratingResponse && <GenerateResponse />}
{loadModelError && <LoadModelErrorMessage />}
<ChatInput />
</div>
</div>
{/* Right side bar */}
{activeThread && <Sidebar />}
</div>

View File

@ -52,9 +52,12 @@ const ExploreModelsScreen = () => {
if (loading) return <Loader description="loading ..." />
return (
<div className="flex h-full w-full overflow-y-auto bg-background">
<div
className="flex h-full w-full overflow-y-auto bg-background"
data-testid="hub-container-test-id"
>
<div className="h-full w-full p-4">
<div className="h-full" data-test-id="testid-explore-models">
<div className="h-full">
<ScrollArea>
<div className="relative">
<img

View File

@ -3,19 +3,26 @@ import { useEffect, useState } from 'react'
import React from 'react'
import { useAtomValue } from 'jotai'
import { useServerLog } from '@/hooks/useServerLog'
import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom'
const Logs = () => {
const { getServerLog } = useServerLog()
const serverEnabled = useAtomValue(serverEnabledAtom)
const [logs, setLogs] = useState([])
useEffect(() => {
getServerLog().then((log) => {
if (typeof log?.split === 'function') setLogs(log.split(/\r?\n|\r|\n/g))
if (typeof log?.split === 'function') {
setLogs(log.split(/\r?\n|\r|\n/g))
}
})
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [logs])
}, [logs, serverEnabled])
return (
<div className="overflow-hidden">

View File

@ -1,7 +1,6 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
'use client'
import React, { useEffect, useState } from 'react'
import React, { useCallback, useEffect, useState } from 'react'
import ScrollToBottom from 'react-scroll-to-bottom'
@ -29,6 +28,7 @@ import { ExternalLinkIcon, InfoIcon } from 'lucide-react'
import { twMerge } from 'tailwind-merge'
import CardSidebar from '@/containers/CardSidebar'
import DropdownListSidebar, {
selectedModelAtom,
} from '@/containers/DropdownListSidebar'
@ -58,7 +58,7 @@ const portAtom = atom('1337')
const LocalServerScreen = () => {
const [errorRangePort, setErrorRangePort] = useState(false)
const [serverEnabled, setServerEnabled] = useAtom(serverEnabledAtom)
const showing = useAtomValue(showRightSideBarAtom)
const showRightSideBar = useAtomValue(showRightSideBarAtom)
const activeModelParams = useAtomValue(getActiveThreadModelParamsAtom)
const modelEngineParams = toSettingParams(activeModelParams)
@ -66,43 +66,44 @@ const LocalServerScreen = () => {
const { openServerLog, clearServerLog } = useServerLog()
const { startModel, stateModel } = useActiveModel()
const [selectedModel] = useAtom(selectedModelAtom)
const selectedModel = useAtomValue(selectedModelAtom)
const [isCorsEnabled, setIsCorsEnabled] = useAtom(corsEnabledAtom)
const [isVerboseEnabled, setIsVerboseEnabled] = useAtom(verboseEnabledAtom)
const [host, setHost] = useAtom(hostAtom)
const [port, setPort] = useAtom(portAtom)
const hostOptions = ['127.0.0.1', '0.0.0.0']
const FIRST_TIME_VISIT_API_SERVER = 'firstTimeVisitAPIServer'
const [firstTimeVisitAPIServer, setFirstTimeVisitAPIServer] =
useState<boolean>(false)
const handleChangePort = (value: any) => {
if (Number(value) <= 0 || Number(value) >= 65536) {
setErrorRangePort(true)
} else {
setErrorRangePort(false)
}
setPort(value)
}
const handleChangePort = useCallback(
(value: string) => {
if (Number(value) <= 0 || Number(value) >= 65536) {
setErrorRangePort(true)
} else {
setErrorRangePort(false)
}
setPort(value)
},
[setPort]
)
useEffect(() => {
if (
localStorage.getItem(FIRST_TIME_VISIT_API_SERVER) === null ||
localStorage.getItem(FIRST_TIME_VISIT_API_SERVER) === 'true'
) {
localStorage.setItem(FIRST_TIME_VISIT_API_SERVER, 'true')
if (localStorage.getItem(FIRST_TIME_VISIT_API_SERVER) == null) {
setFirstTimeVisitAPIServer(true)
}
}, [firstTimeVisitAPIServer])
useEffect(() => {
handleChangePort(port)
}, [])
}, [handleChangePort, port])
return (
<div className="flex h-full w-full">
<div className="flex h-full w-full" data-testid="local-server-testid">
{/* Left SideBar */}
<div className="flex h-full w-60 flex-shrink-0 flex-col overflow-y-auto border-r border-border">
<div className="p-4">
@ -116,7 +117,7 @@ const LocalServerScreen = () => {
<Button
block
themes={serverEnabled ? 'danger' : 'primary'}
disabled={stateModel.loading || errorRangePort}
disabled={stateModel.loading || errorRangePort || !selectedModel}
onClick={() => {
if (serverEnabled) {
window.core?.api?.stopServer()
@ -166,8 +167,19 @@ const LocalServerScreen = () => {
<SelectValue />
</SelectTrigger>
<SelectContent>
<SelectItem value="127.0.0.1">127.0.0.1</SelectItem>
<SelectItem value="0.0.0.0">0.0.0.0</SelectItem>
{hostOptions.map((option, i) => {
return (
<SelectItem
key={i}
value={option}
className={twMerge(
host === option && 'bg-secondary'
)}
>
{option}
</SelectItem>
)
})}
</SelectContent>
</Select>
@ -176,6 +188,7 @@ const LocalServerScreen = () => {
'w-[70px] flex-shrink-0',
errorRangePort && 'border-danger'
)}
type="number"
value={port}
onChange={(e) => {
handleChangePort(e.target.value)
@ -275,7 +288,7 @@ const LocalServerScreen = () => {
{/* Middle Bar */}
<ScrollToBottom className="relative flex h-full w-full flex-col overflow-auto bg-background">
<div className="sticky top-0 flex items-center justify-between bg-zinc-100 px-4 py-2 dark:bg-secondary/30">
<div className="sticky top-0 flex items-center justify-between bg-zinc-100 px-4 py-2 dark:bg-zinc-600">
<h2 className="font-bold">Server Logs</h2>
<div className="space-x-2">
<Button
@ -345,15 +358,13 @@ const LocalServerScreen = () => {
<div
className={twMerge(
'h-full flex-shrink-0 overflow-x-hidden border-l border-border bg-background transition-all duration-100 dark:bg-background/20',
showing
showRightSideBar
? 'w-80 translate-x-0 opacity-100'
: 'w-0 translate-x-full opacity-0'
)}
>
<div className="px-4">
<div className="mt-4">
<DropdownListSidebar />
</div>
<div className="px-4 pt-4">
<DropdownListSidebar strictedThread={false} />
{componentDataEngineSetting.filter(
(x) => x.name === 'prompt_template'

Some files were not shown because too many files have changed in this diff Show More