diff --git a/.github/ISSUE_TEMPLATE/roadmap.md b/.github/ISSUE_TEMPLATE/roadmap.md new file mode 100644 index 000000000..dbb0dfdd5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/roadmap.md @@ -0,0 +1,26 @@ +## Goal + +## Tasklist + +### Frontend +- [ ] link to janhq/jan epics + +**Bugs** +- [ ] link to bugs + +### Backend +- [ ] link to janhq/cortex.cpp epics + +**Bugs** +- [ ] link to bug issues + +### Infra +- [ ] link to infra issues + +### Administrative / Management +- [ ] link to infra issues + +### Marketing + +------- +## Resources diff --git a/core/src/browser/core.test.ts b/core/src/browser/core.test.ts index f38cc0b40..720ea9dcf 100644 --- a/core/src/browser/core.test.ts +++ b/core/src/browser/core.test.ts @@ -3,7 +3,6 @@ import { joinPath } from './core' import { openFileExplorer } from './core' import { getJanDataFolderPath } from './core' import { abortDownload } from './core' -import { getFileSize } from './core' import { executeOnMain } from './core' describe('test core apis', () => { @@ -66,18 +65,6 @@ describe('test core apis', () => { expect(result).toBe('aborted') }) - it('should get file size', async () => { - const url = 'http://example.com/file' - globalThis.core = { - api: { - getFileSize: jest.fn().mockResolvedValue(1024), - }, - } - const result = await getFileSize(url) - expect(globalThis.core.api.getFileSize).toHaveBeenCalledWith(url) - expect(result).toBe(1024) - }) - it('should execute function on main process', async () => { const extension = 'testExtension' const method = 'testMethod' diff --git a/core/src/browser/core.ts b/core/src/browser/core.ts index b19e0b339..7058fc172 100644 --- a/core/src/browser/core.ts +++ b/core/src/browser/core.ts @@ -28,15 +28,6 @@ const downloadFile: (downloadRequest: DownloadRequest, network?: NetworkConfig) network ) => globalThis.core?.api?.downloadFile(downloadRequest, network) -/** - * Get unit in bytes for a remote file. - * - * @param url - The url of the file. - * @returns {Promise} - A promise that resolves with the file size. - */ -const getFileSize: (url: string) => Promise = (url: string) => - globalThis.core.api?.getFileSize(url) - /** * Aborts the download of a specific file. * @param {string} fileName - The name of the file whose download is to be aborted. @@ -167,7 +158,6 @@ export { getUserHomePath, systemInformation, showToast, - getFileSize, dirName, FileStat, } diff --git a/core/src/node/api/processors/download.test.ts b/core/src/node/api/processors/download.test.ts index 21d94165d..c4b171a7d 100644 --- a/core/src/node/api/processors/download.test.ts +++ b/core/src/node/api/processors/download.test.ts @@ -23,6 +23,11 @@ jest.mock('fs', () => ({ createWriteStream: jest.fn(), })) +const requestMock = jest.fn((options, callback) => { + callback(new Error('Test error'), null) +}) +jest.mock('request', () => requestMock) + jest.mock('request-progress', () => { return jest.fn().mockImplementation(() => { return { @@ -54,18 +59,6 @@ describe('Downloader', () => { beforeEach(() => { jest.resetAllMocks() }) - it('should handle getFileSize errors correctly', async () => { - const observer = jest.fn() - const url = 'http://example.com/file' - - const downloader = new Downloader(observer) - const requestMock = jest.fn((options, callback) => { - callback(new Error('Test error'), null) - }) - jest.mock('request', () => requestMock) - - await expect(downloader.getFileSize(observer, url)).rejects.toThrow('Test error') - }) it('should pause download correctly', () => { const observer = jest.fn() diff --git a/core/src/node/api/processors/download.ts b/core/src/node/api/processors/download.ts index ebeb7c299..709ad9687 100644 --- a/core/src/node/api/processors/download.ts +++ b/core/src/node/api/processors/download.ts @@ -135,25 +135,4 @@ export class Downloader implements Processor { pauseDownload(_observer: any, fileName: any) { DownloadManager.instance.networkRequests[fileName]?.pause() } - - async getFileSize(_observer: any, url: string): Promise { - return new Promise((resolve, reject) => { - const request = require('request') - request( - { - url, - method: 'HEAD', - }, - function (err: any, response: any) { - if (err) { - console.error('Getting file size failed:', err) - reject(err) - } else { - const size: number = response.headers['content-length'] ?? -1 - resolve(size) - } - } - ) - }) - } } diff --git a/core/src/node/api/restful/common.ts b/core/src/node/api/restful/common.ts index 39f7b8d8b..989104e03 100644 --- a/core/src/node/api/restful/common.ts +++ b/core/src/node/api/restful/common.ts @@ -1,7 +1,6 @@ import { HttpServer } from '../HttpServer' import { chatCompletions, - deleteBuilder, downloadModel, getBuilder, retrieveBuilder, @@ -14,8 +13,6 @@ import { } from './helper/builder' import { JanApiRouteConfiguration } from './helper/configuration' -import { startModel, stopModel } from './helper/startStopModel' -import { ModelSettingParams } from '../../../types' export const commonRouter = async (app: HttpServer) => { const normalizeData = (data: any) => { @@ -28,19 +25,25 @@ export const commonRouter = async (app: HttpServer) => { // Read & Delete :: Threads | Models | Assistants Object.keys(JanApiRouteConfiguration).forEach((key) => { app.get(`/${key}`, async (_req, _res) => { - if (key === 'models') { + if (key.includes('models')) { return models(_req, _res) } return getBuilder(JanApiRouteConfiguration[key]).then(normalizeData) }) - app.get(`/${key}/:id`, async (request: any) => - retrieveBuilder(JanApiRouteConfiguration[key], request.params.id) - ) + app.get(`/${key}/:id`, async (_req: any, _res: any) => { + if (key.includes('models')) { + return models(_req, _res) + } + return retrieveBuilder(JanApiRouteConfiguration[key], _req.params.id) + }) - app.delete(`/${key}/:id`, async (request: any) => - deleteBuilder(JanApiRouteConfiguration[key], request.params.id) - ) + app.delete(`/${key}/:id`, async (_req: any, _res: any) => { + if (key.includes('models')) { + return models(_req, _res) + } + return retrieveBuilder(JanApiRouteConfiguration[key], _req.params.id) + }) }) // Threads @@ -70,16 +73,9 @@ export const commonRouter = async (app: HttpServer) => { }) ) - app.put(`/models/:modelId/start`, async (request: any) => { - let settingParams: ModelSettingParams | undefined = undefined - if (Object.keys(request.body).length !== 0) { - settingParams = JSON.parse(request.body) as ModelSettingParams - } + app.post(`/models/start`, async (request: any, reply: any) => models(request, reply)) - return startModel(request.params.modelId, settingParams) - }) - - app.put(`/models/:modelId/stop`, async (request: any) => stopModel(request.params.modelId)) + app.post(`/models/stop`, async (request: any, reply: any) => models(request, reply)) // Chat Completion app.post(`/chat/completions`, async (request: any, reply: any) => chatCompletions(request, reply)) diff --git a/core/src/node/api/restful/helper/builder.test.ts b/core/src/node/api/restful/helper/builder.test.ts index f21257098..cfaee6007 100644 --- a/core/src/node/api/restful/helper/builder.test.ts +++ b/core/src/node/api/restful/helper/builder.test.ts @@ -1,17 +1,7 @@ -import { - existsSync, - readdirSync, - readFileSync, - writeFileSync, - mkdirSync, - appendFileSync, - rmdirSync, -} from 'fs' -import { join } from 'path' +import { existsSync, readdirSync, readFileSync, writeFileSync, mkdirSync, appendFileSync } from 'fs' import { getBuilder, retrieveBuilder, - deleteBuilder, getMessages, retrieveMessage, createThread, @@ -82,34 +72,6 @@ describe('builder helper functions', () => { }) }) - describe('deleteBuilder', () => { - it('should return a message if trying to delete Jan assistant', async () => { - const result = await deleteBuilder({ ...mockConfiguration, dirName: 'assistants' }, 'jan') - expect(result).toEqual({ message: 'Cannot delete Jan assistant' }) - }) - - it('should return a message if data is not found', async () => { - ;(existsSync as jest.Mock).mockReturnValue(true) - ;(readdirSync as jest.Mock).mockReturnValue(['file1']) - ;(readFileSync as jest.Mock).mockReturnValue(JSON.stringify({ id: 'model1' })) - - const result = await deleteBuilder(mockConfiguration, 'nonexistentId') - expect(result).toEqual({ message: 'Not found' }) - }) - - it('should delete the directory and return success message', async () => { - ;(existsSync as jest.Mock).mockReturnValue(true) - ;(readdirSync as jest.Mock).mockReturnValue(['file1']) - ;(readFileSync as jest.Mock).mockReturnValue(JSON.stringify({ id: 'model1' })) - - const result = await deleteBuilder(mockConfiguration, 'model1') - expect(rmdirSync).toHaveBeenCalledWith(join('/mock/path', 'mockDir', 'model1'), { - recursive: true, - }) - expect(result).toEqual({ id: 'model1', object: 'mockObject', deleted: true }) - }) - }) - describe('getMessages', () => { it('should return an empty array if message file does not exist', async () => { ;(existsSync as jest.Mock).mockReturnValue(false) diff --git a/core/src/node/api/restful/helper/builder.ts b/core/src/node/api/restful/helper/builder.ts index c3493a8be..e081708cf 100644 --- a/core/src/node/api/restful/helper/builder.ts +++ b/core/src/node/api/restful/helper/builder.ts @@ -73,34 +73,6 @@ export const retrieveBuilder = async (configuration: RouteConfiguration, id: str return filteredData } -export const deleteBuilder = async (configuration: RouteConfiguration, id: string) => { - if (configuration.dirName === 'assistants' && id === 'jan') { - return { - message: 'Cannot delete Jan assistant', - } - } - - const directoryPath = join(getJanDataFolderPath(), configuration.dirName) - try { - const data = await retrieveBuilder(configuration, id) - if (!data) { - return { - message: 'Not found', - } - } - - const objectPath = join(directoryPath, id) - rmdirSync(objectPath, { recursive: true }) - return { - id: id, - object: configuration.delete.object, - deleted: true, - } - } catch (ex) { - console.error(ex) - } -} - export const getMessages = async (threadId: string): Promise => { const threadDirPath = join(getJanDataFolderPath(), 'threads', threadId) const messageFile = 'messages.jsonl' @@ -308,7 +280,7 @@ export const models = async (request: any, reply: any) => { 'Content-Type': 'application/json', } - const response = await fetch(`${CORTEX_API_URL}/models`, { + const response = await fetch(`${CORTEX_API_URL}/models${request.url.split('/models')[1] ?? ""}`, { method: request.method, headers: headers, body: JSON.stringify(request.body), diff --git a/core/src/node/api/restful/helper/startStopModel.test.ts b/core/src/node/api/restful/helper/startStopModel.test.ts deleted file mode 100644 index 7c1a56cf1..000000000 --- a/core/src/node/api/restful/helper/startStopModel.test.ts +++ /dev/null @@ -1,10 +0,0 @@ -import { startModel } from './startStopModel' - -describe('startModel', () => { - it('test_startModel_error', async () => { - const modelId = 'testModelId' - const settingParams = undefined - - expect(startModel(modelId, settingParams)).resolves.toThrow() - }) -}) diff --git a/core/src/node/api/restful/helper/startStopModel.ts b/core/src/node/api/restful/helper/startStopModel.ts deleted file mode 100644 index 2e9db6d15..000000000 --- a/core/src/node/api/restful/helper/startStopModel.ts +++ /dev/null @@ -1,25 +0,0 @@ -import { ModelSettingParams } from '../../../../types' -import { CORTEX_DEFAULT_PORT, LOCAL_HOST } from './consts' - -/** - * Start a model - * @param modelId - * @param settingParams - * @returns - */ -export const startModel = async (modelId: string, settingParams?: ModelSettingParams) => { - return fetch(`http://${LOCAL_HOST}:${CORTEX_DEFAULT_PORT}/v1/models/start`, { - method: 'POST', - body: JSON.stringify({ model: modelId, ...settingParams }), - }) -} - -/* - * Stop model. - */ -export const stopModel = async (modelId: string) => { - return fetch(`http://${LOCAL_HOST}:${CORTEX_DEFAULT_PORT}/v1/models/stop`, { - method: 'POST', - body: JSON.stringify({ model: modelId }), - }) -} diff --git a/core/src/types/api/index.ts b/core/src/types/api/index.ts index 093314a15..dc9afcb00 100644 --- a/core/src/types/api/index.ts +++ b/core/src/types/api/index.ts @@ -27,6 +27,7 @@ export enum NativeRoute { quickAskSizeUpdated = 'quickAskSizeUpdated', ackDeepLink = 'ackDeepLink', + factoryReset = 'factoryReset' } /** @@ -65,7 +66,6 @@ export enum DownloadRoute { pauseDownload = 'pauseDownload', resumeDownload = 'resumeDownload', getDownloadProgress = 'getDownloadProgress', - getFileSize = 'getFileSize', } export enum DownloadEvent { diff --git a/core/src/types/setting/settingComponent.ts b/core/src/types/setting/settingComponent.ts index 2eae4e16f..2474f6bd4 100644 --- a/core/src/types/setting/settingComponent.ts +++ b/core/src/types/setting/settingComponent.ts @@ -12,7 +12,7 @@ export type SettingComponentProps = { export type ConfigType = 'runtime' | 'setting' -export type ControllerType = 'slider' | 'checkbox' | 'input' +export type ControllerType = 'slider' | 'checkbox' | 'input' | 'tag' export type InputType = 'password' | 'text' | 'email' | 'number' | 'tel' | 'url' @@ -22,7 +22,7 @@ export type InputAction = InputActionsTuple[number] export type InputComponentProps = { placeholder: string - value: string + value: string | string[] type?: InputType textAlign?: 'left' | 'right' inputActions?: InputAction[] diff --git a/docs/src/pages/docs/_meta.json b/docs/src/pages/docs/_meta.json index 231f6a763..8ed88963c 100644 --- a/docs/src/pages/docs/_meta.json +++ b/docs/src/pages/docs/_meta.json @@ -13,6 +13,7 @@ }, "desktop": "Desktop", "data-folder": "Jan Data Folder", + "privacy": "Privacy", "user-guides": { "title": "BASIC USAGE", "type": "separator" diff --git a/docs/src/pages/docs/privacy.mdx b/docs/src/pages/docs/privacy.mdx new file mode 100644 index 000000000..d3be5b6de --- /dev/null +++ b/docs/src/pages/docs/privacy.mdx @@ -0,0 +1,63 @@ +--- +title: Jan Privacy +description: Jan is an app that allows you to own your AI. We prioritize your control over your data and explain what data we collect and why. +keywords: + [ + Jan AI, + Jan, + ChatGPT alternative, + local AI, + private AI, + conversational AI, + OpenAI platform alternative, + no-subscription fee, + large language model, + about Jan, + desktop application, + thinking machine, + jan vision, + ] +--- + +# Privacy + +Jan is an app that allows you to own your AI. We prioritize your control over your data and explain what data we collect and why. + +- Jan can't see your chats with AI +- You're free to opt out + +## Why and what we track + +To build a reliable, user-friendly AI that you own, we need to understand how Jan is used. We collect two types of data: performance data and usage data. + +### Performance data +We track app crashes and collect technical details about what went wrong, along with basic information about the hardware you’re using. + +When Jan crashes, we collect technical details about what went wrong. + +- Specific AI model in use during the crash +- Hardware: `CPU`, `GPU`, `RAM` +- Logs: `Date/Time`, `OS & version`, `app version`, `error codes & messages`. + +### Usage data + +We track data like how often the app is opened to check: + +- **Active Users**: How many people use Jan daily to measure engagement +- **Retention Rates**: To understand if users are finding value in Jan over time + +Usage data is tied to a randomly generated telemetry ID. None of our usage data can be linked to your personal identity. + +## What we **don’t** track: +- Your conversations with Jan. Those stay on your device. +- Your files. We don’t scan, upload, or even look at them. +- Anything tied to your identity. + +## Using Cloud Models + +Jan allows you to connect cloud model APIs. If you choose to use cloud-based models (e.g. GPT, Claude models), the API provider handling the model will have access to your messages as part of processing the request. Again, Jan doesn't see or store these messages - they go directly to the provider. Remember: With local models, everything stays on your device, so no one - not even us- can see your messages. + +## Where we store & process data +We use [PostHog](https://posthog.com/eu) EU for analytics, ensuring all data is processed within the European Union. This setup complies with GDPR and other strict privacy regulations. PostHog lets us self-host and securely manage the data we collect. Read more [on PostHog's GDPR doc](https://posthog.com/docs/privacy/gdpr-compliance). + +For a detailed breakdown of the analytics data we collect, you can check out our analytics repo. If you have any questions or concerns, feel free to reach out to us at hi@jan.ai. \ No newline at end of file diff --git a/docs/src/pages/integrations/coding/_assets/tabby-answer-engine.png b/docs/src/pages/integrations/coding/_assets/tabby-answer-engine.png new file mode 100644 index 000000000..420b609fc Binary files /dev/null and b/docs/src/pages/integrations/coding/_assets/tabby-answer-engine.png differ diff --git a/docs/src/pages/integrations/coding/_assets/tabby-chat-sidebar.png b/docs/src/pages/integrations/coding/_assets/tabby-chat-sidebar.png new file mode 100644 index 000000000..50cfbd226 Binary files /dev/null and b/docs/src/pages/integrations/coding/_assets/tabby-chat-sidebar.png differ diff --git a/docs/src/pages/integrations/coding/tabby.mdx b/docs/src/pages/integrations/coding/tabby.mdx new file mode 100644 index 000000000..6647b4eb4 --- /dev/null +++ b/docs/src/pages/integrations/coding/tabby.mdx @@ -0,0 +1,104 @@ +--- +title: Tabby +description: A step-by-step guide on integrating Jan with Tabby and VSCode, JetBrains, or other IDEs. +keywords: + [ + Jan, + Customizable Intelligence, LLM, + local AI, + privacy focus, + free and open source, + private and offline, + conversational AI, + no-subscription fee, + large language models, + Tabby integration, + VSCode integration, + JetBrains integration, + ] +--- + +import { Tabs, Steps } from 'nextra/components' + +# Tabby + +## Integrate Jan with Tabby and Your Favorite IDEs + +[Tabby](https://www.tabbyml.com/) is an open-source, self-hosted AI coding assistant. +With Tabby, teams can easily set up their own LLM-powered code completion server. + +Tabby provides integrations with VSCode, JetBrains, and other IDEs to help developers code more efficiently, +and it can be used with various LLM services, including Jan. + +To integrate Jan with Tabby, follow these steps: + + + +### Step 1: Enable the Jan API Server + +To set up Tabby with Jan's Local Server, you must activate the Jan API Server with your chosen model. + +1. Click the `Local API Server` (`<>`) button above the Settings. Jan will direct you to the **Local API Server** section. +2. Configure the server, including the **IP Port**, **Cross-Origin Resource Sharing (CORS)**, and **Verbose Server Logs**. +3. Press the **Start Server** button. + +### Step 2: Find the Model ID and Ensure the Model is Activated + +1. Go to `Settings` > `My Models`. +2. Models are listed with their **Model ID** beneath their names. +3. Click the **three dots (⋮)** button next to the model. +4. Select **Start Model** to activate the model. + +### Step 3: Installing Tabby Server + +Use the following documentation to install the Tabby server: +- [Docker](https://tabby.tabbyml.com/docs/quick-start/installation/docker/) +- [Apple Silicon](https://tabby.tabbyml.com/docs/quick-start/installation/apple/) +- [Linux](https://tabby.tabbyml.com/docs/quick-start/installation/linux/) +- [Windows](https://tabby.tabbyml.com/docs/quick-start/installation/windows/) + +Then, follow the steps to connect Jan with the Tabby server: +[Connect Jan with Tabby](https://tabby.tabbyml.com/docs/references/models-http-api/jan.ai/). + +For example, to connect Jan with Tabby, save the following configuration under `~/.tabby/config.toml`: + +```toml title="~/.tabby/config.toml" +# Chat model +[model.chat.http] +kind = "openai/chat" +model_name = "model_id" +api_endpoint = "http://localhost:1337/v1" +api_key = "" +``` + +Currently, the Jan completion and embedding API is under construction. +Once completed, you can also connect Jan with Tabby for completion and embedding tasks. + +### Step 4: Installing Tabby on Your Favorite IDEs + +Refer to the following documentation to install the Tabby extension on your favorite IDEs: +- [Visual Studio Code](https://tabby.tabbyml.com/docs/extensions/installation/vscode/) +- [JetBrains IntelliJ Platform](https://tabby.tabbyml.com/docs/extensions/installation/intellij/) +- [VIM / NeoVIM](https://tabby.tabbyml.com/docs/extensions/installation/vim/) + + + +## How to Use Tabby with Jan Integration + +### Answer Engine: Chat with Your Codes and Documentation + +Tabby offers an [Answer Engine](https://tabby.tabbyml.com/docs/administration/answer-engine/) on the homepage, +which can leverage the Jan LLM and related contexts like code, documentation, and web pages to answer user questions. + +Simply open the Tabby homepage at [localhost:8080](http://localhost:8080) and ask your questions. + +![Answer Engine](./_assets/tabby-answer-engine.png) + +### IDE Chat Sidebar + +After installing the Tabby extension on your preferred IDEs, you can engage in a conversation with Jan to: + +1. Discuss your code, receive suggestions, and seek assistance. +2. Request Jan to inline edit your code, and then review and accept the proposed changes. + +![Chat Sidebar](./_assets/tabby-chat-sidebar.png) \ No newline at end of file diff --git a/electron/handlers/native.ts b/electron/handlers/native.ts index 869b9fd58..813231bd4 100644 --- a/electron/handlers/native.ts +++ b/electron/handlers/native.ts @@ -12,6 +12,9 @@ import { } from '@janhq/core/node' import { SelectFileOption } from '@janhq/core' import { menu } from '../utils/menu' +import { migrate } from '../utils/migration' +import { createUserSpace } from '../utils/path' +import { setupExtensions } from '../utils/extension' const isMac = process.platform === 'darwin' @@ -33,14 +36,28 @@ export function handleAppIPCs() { nativeTheme.themeSource = 'light' }) + /** + * Handles the "setCloseApp" IPC message by closing the main application window. + * This effectively closes the application if no other windows are open. + */ ipcMain.handle(NativeRoute.setCloseApp, () => { windowManager.mainWindow?.close() }) + /** + * Handles the "setMinimizeApp" IPC message by minimizing the main application window. + * The window will be minimized to the system's taskbar or dock. + */ ipcMain.handle(NativeRoute.setMinimizeApp, () => { windowManager.mainWindow?.minimize() }) + /** + * Handles the "setMaximizeApp" IPC message. It toggles the maximization state of the main window. + * If the window is currently maximized, it will be un-maximized (restored to its previous size). + * If the window is not maximized, it will be maximized to fill the screen. + * @param _event - The IPC event object. + */ ipcMain.handle(NativeRoute.setMaximizeApp, async (_event) => { if (windowManager.mainWindow?.isMaximized()) { windowManager.mainWindow.unmaximize() @@ -104,6 +121,11 @@ export function handleAppIPCs() { } }) + /** + * Handles the "selectDirectory" IPC message to open a dialog for selecting a directory. + * If no main window is found, logs an error and exits. + * @returns {string} The path of the selected directory, or nothing if canceled. + */ ipcMain.handle(NativeRoute.selectDirectory, async () => { const mainWindow = windowManager.mainWindow if (!mainWindow) { @@ -122,6 +144,14 @@ export function handleAppIPCs() { } }) + /** + * Handles the "selectFiles" IPC message to open a dialog for selecting files. + * Allows options for setting the dialog title, button label, and selection properties. + * Logs an error if no main window is found. + * @param _event - The IPC event object. + * @param option - Options for customizing file selection dialog. + * @returns {string[]} An array of selected file paths, or nothing if canceled. + */ ipcMain.handle( NativeRoute.selectFiles, async (_event, option?: SelectFileOption) => { @@ -156,11 +186,20 @@ export function handleAppIPCs() { } ) + /** + * Handles the "hideQuickAskWindow" IPC message to hide the quick ask window. + * @returns A promise that resolves when the window is hidden. + */ ipcMain.handle( NativeRoute.hideQuickAskWindow, async (): Promise => windowManager.hideQuickAskWindow() ) + /** + * Handles the "sendQuickAskInput" IPC message to send user input to the main window. + * @param _event - The IPC event object. + * @param input - User input string to be sent. + */ ipcMain.handle( NativeRoute.sendQuickAskInput, async (_event, input: string): Promise => { @@ -171,6 +210,12 @@ export function handleAppIPCs() { } ) + /** + * Handles the "showOpenMenu" IPC message to show the context menu at given coordinates. + * Only applicable on non-Mac platforms. + * @param e - The event object. + * @param args - Contains coordinates where the menu should appear. + */ ipcMain.handle(NativeRoute.showOpenMenu, function (e, args) { if (!isMac && windowManager.mainWindow) { menu.popup({ @@ -181,23 +226,55 @@ export function handleAppIPCs() { } }) + /** + * Handles the "hideMainWindow" IPC message to hide the main application window. + * @returns A promise that resolves when the window is hidden. + */ ipcMain.handle( NativeRoute.hideMainWindow, async (): Promise => windowManager.hideMainWindow() ) + /** + * Handles the "showMainWindow" IPC message to show the main application window. + * @returns A promise that resolves when the window is shown. + */ ipcMain.handle( NativeRoute.showMainWindow, async (): Promise => windowManager.showMainWindow() ) + /** + * Handles the "quickAskSizeUpdated" IPC message to update the size of the quick ask window. + * Resizes window by the given height offset. + * @param _event - The IPC event object. + * @param heightOffset - The amount of height to increase. + * @returns A promise that resolves when the window is resized. + */ ipcMain.handle( NativeRoute.quickAskSizeUpdated, async (_event, heightOffset: number): Promise => windowManager.expandQuickAskWindow(heightOffset) ) + /** + * Handles the "ackDeepLink" IPC message to acknowledge a deep link. + * Triggers handling of deep link in the application. + * @param _event - The IPC event object. + * @returns A promise that resolves when the deep link is acknowledged. + */ ipcMain.handle(NativeRoute.ackDeepLink, async (_event): Promise => { windowManager.ackDeepLink() }) + + /** + * Handles the "factoryReset" IPC message to reset the application to its initial state. + * Clears loaded modules, recreates user space, runs migrations, and sets up extensions. + * @param _event - The IPC event object. + * @returns A promise that resolves after the reset operations are complete. + */ + ipcMain.handle(NativeRoute.factoryReset, async (_event): Promise => { + ModuleManager.instance.clearImportedModules() + return createUserSpace().then(migrate).then(setupExtensions) + }) } diff --git a/electron/tests/e2e/thread.e2e.spec.ts b/electron/tests/e2e/thread.e2e.spec.ts index 5d7328053..dfd131988 100644 --- a/electron/tests/e2e/thread.e2e.spec.ts +++ b/electron/tests/e2e/thread.e2e.spec.ts @@ -25,7 +25,7 @@ test('Select GPT model from Hub and Chat with Invalid API Key', async ({ { timeout: TIMEOUT } ) - const APIKeyError = page.getByTestId('invalid-API-key-error') + const APIKeyError = page.getByTestId('passthrough-error-message') await expect(APIKeyError).toBeVisible({ timeout: TIMEOUT, }) diff --git a/electron/utils/migration.ts b/electron/utils/migration.ts index 80851f9de..505de0f7b 100644 --- a/electron/utils/migration.ts +++ b/electron/utils/migration.ts @@ -3,7 +3,6 @@ import { app } from 'electron' import { join } from 'path' import { rmdirSync, - readFileSync, existsSync, mkdirSync, readdirSync, diff --git a/extensions/inference-anthropic-extension/package.json b/extensions/inference-anthropic-extension/package.json index 19c0df5e8..8115ba2df 100644 --- a/extensions/inference-anthropic-extension/package.json +++ b/extensions/inference-anthropic-extension/package.json @@ -1,7 +1,7 @@ { "name": "@janhq/inference-anthropic-extension", "productName": "Anthropic Inference Engine", - "version": "1.0.2", + "version": "1.0.3", "description": "This extension enables Anthropic chat completion API calls", "main": "dist/index.js", "module": "dist/module.js", diff --git a/extensions/inference-anthropic-extension/resources/models.json b/extensions/inference-anthropic-extension/resources/models.json index 1462837ac..59e41245b 100644 --- a/extensions/inference-anthropic-extension/resources/models.json +++ b/extensions/inference-anthropic-extension/resources/models.json @@ -5,9 +5,9 @@ "url": "https://www.anthropic.com/" } ], - "id": "claude-3-opus-20240229", + "id": "claude-3-opus-latest", "object": "model", - "name": "Claude 3 Opus", + "name": "Claude 3 Opus Latest", "version": "1.0", "description": "Claude 3 Opus is a powerful model suitables for highly complex task.", "format": "api", @@ -29,15 +29,15 @@ "url": "https://www.anthropic.com/" } ], - "id": "claude-3-sonnet-20240229", + "id": "claude-3-5-haiku-latest", "object": "model", - "name": "Claude 3 Sonnet", + "name": "Claude 3.5 Haiku Latest", "version": "1.0", - "description": "Claude 3 Sonnet is an ideal model balance of intelligence and speed for enterprise workloads.", + "description": "Claude 3.5 Haiku is the fastest model provides near-instant responsiveness.", "format": "api", "settings": {}, "parameters": { - "max_tokens": 4096, + "max_tokens": 8192, "temperature": 0.7, "stream": false }, @@ -53,39 +53,15 @@ "url": "https://www.anthropic.com/" } ], - "id": "claude-3-haiku-20240307", + "id": "claude-3-5-sonnet-latest", "object": "model", - "name": "Claude 3 Haiku", - "version": "1.0", - "description": "Claude 3 Haiku is the fastest model provides near-instant responsiveness.", - "format": "api", - "settings": {}, - "parameters": { - "max_tokens": 4096, - "temperature": 0.7, - "stream": false - }, - "metadata": { - "author": "Anthropic", - "tags": ["General", "Big Context Length"] - }, - "engine": "anthropic" - }, - { - "sources": [ - { - "url": "https://www.anthropic.com/" - } - ], - "id": "claude-3-5-sonnet-20240620", - "object": "model", - "name": "Claude 3.5 Sonnet", + "name": "Claude 3.5 Sonnet Latest", "version": "1.0", "description": "Claude 3.5 Sonnet raises the industry bar for intelligence, outperforming competitor models and Claude 3 Opus on a wide range of evaluations, with the speed and cost of our mid-tier model, Claude 3 Sonnet.", "format": "api", "settings": {}, "parameters": { - "max_tokens": 4096, + "max_tokens": 8192, "temperature": 0.7, "stream": true }, diff --git a/extensions/inference-cortex-extension/bin/version.txt b/extensions/inference-cortex-extension/bin/version.txt index 52da54083..a6a3a43c3 100644 --- a/extensions/inference-cortex-extension/bin/version.txt +++ b/extensions/inference-cortex-extension/bin/version.txt @@ -1 +1 @@ -1.0.3-rc5 \ No newline at end of file +1.0.4 \ No newline at end of file diff --git a/extensions/inference-cortex-extension/download.bat b/extensions/inference-cortex-extension/download.bat index 1ab14a03c..7d9a9213a 100644 --- a/extensions/inference-cortex-extension/download.bat +++ b/extensions/inference-cortex-extension/download.bat @@ -2,12 +2,11 @@ set BIN_PATH=./bin set SHARED_PATH=./../../electron/shared set /p CORTEX_VERSION=<./bin/version.txt -set ENGINE_VERSION=0.1.39 +set ENGINE_VERSION=0.1.40 @REM Download cortex.llamacpp binaries -set VERSION=v0.1.39 -set DOWNLOAD_URL=https://github.com/janhq/cortex.llamacpp/releases/download/%VERSION%/cortex.llamacpp-0.1.39-windows-amd64 -set CUDA_DOWNLOAD_URL=https://github.com/janhq/cortex.llamacpp/releases/download/%VERSION% +set DOWNLOAD_URL=https://github.com/janhq/cortex.llamacpp/releases/download/v%ENGINE_VERSION%/cortex.llamacpp-%ENGINE_VERSION%-windows-amd64 +set CUDA_DOWNLOAD_URL=https://github.com/janhq/cortex.llamacpp/releases/download/v%ENGINE_VERSION% set SUBFOLDERS=windows-amd64-noavx-cuda-12-0 windows-amd64-noavx-cuda-11-7 windows-amd64-avx2-cuda-12-0 windows-amd64-avx2-cuda-11-7 windows-amd64-noavx windows-amd64-avx windows-amd64-avx2 windows-amd64-avx512 windows-amd64-vulkan call .\node_modules\.bin\download -e --strip 1 -o %BIN_PATH% https://github.com/janhq/cortex.cpp/releases/download/v%CORTEX_VERSION%/cortex-%CORTEX_VERSION%-windows-amd64.tar.gz diff --git a/extensions/inference-cortex-extension/download.sh b/extensions/inference-cortex-extension/download.sh index aa09d6749..f62e5961b 100755 --- a/extensions/inference-cortex-extension/download.sh +++ b/extensions/inference-cortex-extension/download.sh @@ -2,7 +2,7 @@ # Read CORTEX_VERSION CORTEX_VERSION=$(cat ./bin/version.txt) -ENGINE_VERSION=0.1.39 +ENGINE_VERSION=0.1.40 CORTEX_RELEASE_URL="https://github.com/janhq/cortex.cpp/releases/download" ENGINE_DOWNLOAD_URL="https://github.com/janhq/cortex.llamacpp/releases/download/v${ENGINE_VERSION}/cortex.llamacpp-${ENGINE_VERSION}" CUDA_DOWNLOAD_URL="https://github.com/janhq/cortex.llamacpp/releases/download/v${ENGINE_VERSION}" @@ -42,8 +42,8 @@ elif [ "$OS_TYPE" == "Darwin" ]; then chmod +x "./bin/cortex-server" # Download engines for macOS - download "${ENGINE_DOWNLOAD_URL}-mac-arm64.tar.gz" -e --strip 1 -o "${SHARED_PATH}/engines/cortex.llamacpp/mac-arm64/v0.1.39" - download "${ENGINE_DOWNLOAD_URL}-mac-amd64.tar.gz" -e --strip 1 -o "${SHARED_PATH}/engines/cortex.llamacpp/mac-amd64/v0.1.39" + download "${ENGINE_DOWNLOAD_URL}-mac-arm64.tar.gz" -e --strip 1 -o "${SHARED_PATH}/engines/cortex.llamacpp/mac-arm64/v${ENGINE_VERSION}" + download "${ENGINE_DOWNLOAD_URL}-mac-amd64.tar.gz" -e --strip 1 -o "${SHARED_PATH}/engines/cortex.llamacpp/mac-amd64/v${ENGINE_VERSION}" else echo "Unsupported operating system: $OS_TYPE" diff --git a/extensions/inference-cortex-extension/package.json b/extensions/inference-cortex-extension/package.json index c6d3f70b6..b0f20bae6 100644 --- a/extensions/inference-cortex-extension/package.json +++ b/extensions/inference-cortex-extension/package.json @@ -1,7 +1,7 @@ { "name": "@janhq/inference-cortex-extension", "productName": "Cortex Inference Engine", - "version": "1.0.22", + "version": "1.0.23", "description": "This extension embeds cortex.cpp, a lightweight inference engine written in C++. See https://jan.ai.\nAdditional dependencies could be installed to run without Cuda Toolkit installation.", "main": "dist/index.js", "node": "dist/node/index.cjs.js", diff --git a/extensions/inference-cortex-extension/resources/default_settings.json b/extensions/inference-cortex-extension/resources/default_settings.json index 09d014a12..1e5ec8db6 100644 --- a/extensions/inference-cortex-extension/resources/default_settings.json +++ b/extensions/inference-cortex-extension/resources/default_settings.json @@ -1,33 +1,59 @@ [ { - "key": "test", - "title": "Test", - "description": "Test", - "controllerType": "input", - "controllerProps": { - "placeholder": "Test", - "value": "" - } - }, - { - "key": "embedding", - "title": "Embedding", - "description": "Whether to enable embedding.", + "key": "cont_batching", + "title": "Continuous batching", + "description": "The number of parallel operations", "controllerType": "checkbox", "controllerProps": { "value": true } }, { - "key": "ctx_len", - "title": "Context Length", - "description": "The context length for model operations varies; the maximum depends on the specific model used.", - "controllerType": "slider", + "key": "n_parallel", + "title": "Parallel operations", + "description": "The number of parallel operations", + "controllerType": "input", "controllerProps": { - "min": 0, - "max": 4096, - "step": 128, - "value": 2048 + "value": "4", + "placeholder": "4" + } + }, + { + "key": "flash_attn", + "title": "Flash Attention enabled", + "description": "To enable Flash Attention, default is true", + "controllerType": "checkbox", + "controllerProps": { + "value": true + } + }, + + { + "key": "caching_enabled", + "title": "Caching enabled", + "description": "To enable prompt caching or not", + "controllerType": "checkbox", + "controllerProps": { + "value": true + } + }, + { + "key": "cache_type", + "title": "KV Cache Type", + "description": "KV cache type: f16, q8_0, q4_0, default is f16 (change this could break the model).", + "controllerType": "input", + "controllerProps": { + "placeholder": "f16", + "value": "f16" + } + }, + { + "key": "use_mmap", + "title": "To enable mmap", + "description": "To enable mmap, default is true", + "controllerType": "checkbox", + "controllerProps": { + "value": true } } ] diff --git a/extensions/inference-cortex-extension/rollup.config.ts b/extensions/inference-cortex-extension/rollup.config.ts index 00fae78ba..8fa61e91d 100644 --- a/extensions/inference-cortex-extension/rollup.config.ts +++ b/extensions/inference-cortex-extension/rollup.config.ts @@ -117,10 +117,10 @@ export default [ qwen2572bJson, ]), NODE: JSON.stringify(`${packageJson.name}/${packageJson.node}`), - DEFAULT_SETTINGS: JSON.stringify(defaultSettingJson), + SETTINGS: JSON.stringify(defaultSettingJson), CORTEX_API_URL: JSON.stringify('http://127.0.0.1:39291'), CORTEX_SOCKET_URL: JSON.stringify('ws://127.0.0.1:39291'), - CORTEX_ENGINE_VERSION: JSON.stringify('v0.1.39'), + CORTEX_ENGINE_VERSION: JSON.stringify('v0.1.40'), }), // Allow json resolution json(), diff --git a/extensions/inference-cortex-extension/src/@types/global.d.ts b/extensions/inference-cortex-extension/src/@types/global.d.ts index 381a80f5e..139d836a5 100644 --- a/extensions/inference-cortex-extension/src/@types/global.d.ts +++ b/extensions/inference-cortex-extension/src/@types/global.d.ts @@ -2,7 +2,7 @@ declare const NODE: string declare const CORTEX_API_URL: string declare const CORTEX_SOCKET_URL: string declare const CORTEX_ENGINE_VERSION: string -declare const DEFAULT_SETTINGS: Array +declare const SETTINGS: Array declare const MODELS: Array /** diff --git a/extensions/inference-cortex-extension/src/index.ts b/extensions/inference-cortex-extension/src/index.ts index 531b407f2..4e9ffd55a 100644 --- a/extensions/inference-cortex-extension/src/index.ts +++ b/extensions/inference-cortex-extension/src/index.ts @@ -20,6 +20,7 @@ import { ModelEvent, SystemInformation, dirName, + AppConfigurationEventName, } from '@janhq/core' import PQueue from 'p-queue' import ky from 'ky' @@ -35,6 +36,15 @@ enum DownloadTypes { DownloadStarted = 'onFileDownloadStarted', } +export enum Settings { + n_parallel = 'n_parallel', + cont_batching = 'cont_batching', + caching_enabled = 'caching_enabled', + flash_attn = 'flash_attn', + cache_type = 'cache_type', + use_mmap = 'use_mmap', +} + /** * A class that implements the InferenceExtension interface from the @janhq/core package. * The class provides methods for initializing and stopping a model, and for making inference requests. @@ -49,6 +59,14 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine { shouldReconnect = true + /** Default Engine model load settings */ + n_parallel: number = 4 + cont_batching: boolean = true + caching_enabled: boolean = true + flash_attn: boolean = true + use_mmap: boolean = true + cache_type: string = 'f16' + /** * The URL for making inference requests. */ @@ -59,6 +77,8 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine { */ socket?: WebSocket = undefined + abortControllers = new Map() + /** * Subscribes to events emitted by the @janhq/core package. */ @@ -69,8 +89,25 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine { super.onLoad() + // Register Settings + this.registerSettings(SETTINGS) + + this.n_parallel = + Number(await this.getSetting(Settings.n_parallel, '4')) ?? 4 + this.cont_batching = await this.getSetting( + Settings.cont_batching, + true + ) + this.caching_enabled = await this.getSetting( + Settings.caching_enabled, + true + ) + this.flash_attn = await this.getSetting(Settings.flash_attn, true) + this.use_mmap = await this.getSetting(Settings.use_mmap, true) + this.cache_type = await this.getSetting(Settings.cache_type, 'f16') + this.queue.add(() => this.clean()) - + // Run the process watchdog const systemInfo = await systemInformation() this.queue.add(() => executeOnMain(NODE, 'run', systemInfo)) @@ -81,6 +118,15 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine { window.addEventListener('beforeunload', () => { this.clean() }) + + const currentMode = systemInfo.gpuSetting?.run_mode + + events.on(AppConfigurationEventName.OnConfigurationUpdate, async () => { + const systemInfo = await systemInformation() + // Update run mode on settings update + if (systemInfo.gpuSetting?.run_mode !== currentMode) + this.queue.add(() => this.setDefaultEngine(systemInfo)) + }) } async onUnload() { @@ -91,6 +137,22 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine { super.onUnload() } + onSettingUpdate(key: string, value: T): void { + if (key === Settings.n_parallel && typeof value === 'string') { + this.n_parallel = Number(value) ?? 1 + } else if (key === Settings.cont_batching && typeof value === 'boolean') { + this.cont_batching = value as boolean + } else if (key === Settings.caching_enabled && typeof value === 'boolean') { + this.caching_enabled = value as boolean + } else if (key === Settings.flash_attn && typeof value === 'boolean') { + this.flash_attn = value as boolean + } else if (key === Settings.cache_type && typeof value === 'string') { + this.cache_type = value as string + } else if (key === Settings.use_mmap && typeof value === 'boolean') { + this.use_mmap = value as boolean + } + } + override async loadModel( model: Model & { file_path?: string } ): Promise { @@ -124,6 +186,10 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine { const { mmproj, ...settings } = model.settings model.settings = settings } + const controller = new AbortController() + const { signal } = controller + + this.abortControllers.set(model.id, controller) return await this.queue.add(() => ky @@ -135,13 +201,21 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine { model.engine === InferenceEngine.nitro // Legacy model cache ? InferenceEngine.cortex_llamacpp : model.engine, + cont_batching: this.cont_batching, + n_parallel: this.n_parallel, + caching_enabled: this.caching_enabled, + flash_attn: this.flash_attn, + cache_type: this.cache_type, + use_mmap: this.use_mmap, }, timeout: false, + signal, }) .json() .catch(async (e) => { throw (await e.response?.json()) ?? e }) + .finally(() => this.abortControllers.delete(model.id)) .then() ) } @@ -152,6 +226,9 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine { json: { model: model.id }, }) .json() + .finally(() => { + this.abortControllers.get(model.id)?.abort() + }) .then() } @@ -180,12 +257,20 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine { 'engineVariant', systemInfo.gpuSetting ) - return ky - .post( - `${CORTEX_API_URL}/v1/engines/${InferenceEngine.cortex_llamacpp}/default?version=${CORTEX_ENGINE_VERSION}&variant=${variant}`, - { json: {} } - ) - .then(() => {}) + return ( + ky + // Fallback support for legacy API + .post( + `${CORTEX_API_URL}/v1/engines/${InferenceEngine.cortex_llamacpp}/default?version=${CORTEX_ENGINE_VERSION}&variant=${variant}`, + { + json: { + version: CORTEX_ENGINE_VERSION, + variant, + }, + } + ) + .then(() => {}) + ) } /** @@ -251,6 +336,7 @@ export default class JanInferenceCortexExtension extends LocalOAIEngine { this.socket.onclose = (event) => { console.log('WebSocket closed:', event) + events.emit(ModelEvent.OnModelStopped, {}) if (this.shouldReconnect) { console.log(`Attempting to reconnect...`) setTimeout(() => this.subscribeToEvents(), 1000) diff --git a/extensions/inference-cortex-extension/src/node/execute.test.ts b/extensions/inference-cortex-extension/src/node/execute.test.ts index 73f114ce1..1bcefce9d 100644 --- a/extensions/inference-cortex-extension/src/node/execute.test.ts +++ b/extensions/inference-cortex-extension/src/node/execute.test.ts @@ -2,6 +2,7 @@ import { describe, expect, it } from '@jest/globals' import { engineVariant, executableCortexFile } from './execute' import { GpuSetting } from '@janhq/core/node' import { cpuInfo } from 'cpu-instructions' +import { fork } from 'child_process' let testSettings: GpuSetting = { run_mode: 'cpu', @@ -31,9 +32,13 @@ let mockCpuInfo = cpuInfo.cpuInfo as jest.Mock mockCpuInfo.mockReturnValue([]) jest.mock('@janhq/core/node', () => ({ - appResourcePath: () => ".", - log: jest.fn() + appResourcePath: () => '.', + log: jest.fn(), })) +jest.mock('child_process', () => ({ + fork: jest.fn(), +})) +const mockFork = fork as jest.Mock describe('test executable cortex file', () => { afterAll(function () { @@ -43,6 +48,14 @@ describe('test executable cortex file', () => { }) it('executes on MacOS', () => { + const mockProcess = { + on: jest.fn((event, callback) => { + if (event === 'message') { + callback('noavx') + } + }), + send: jest.fn(), + } Object.defineProperty(process, 'platform', { value: 'darwin', }) @@ -51,7 +64,7 @@ describe('test executable cortex file', () => { }) expect(executableCortexFile(testSettings)).toEqual( expect.objectContaining({ - enginePath: expect.stringContaining("shared"), + enginePath: expect.stringContaining('shared'), executablePath: originalPlatform === 'darwin' ? expect.stringContaining(`cortex-server`) @@ -60,13 +73,35 @@ describe('test executable cortex file', () => { vkVisibleDevices: '', }) ) - expect(engineVariant(testSettings)).toEqual('mac-arm64') + + mockFork.mockReturnValue(mockProcess) + expect(engineVariant(testSettings)).resolves.toEqual('mac-arm64') + }) + + it('executes on MacOS', () => { + Object.defineProperty(process, 'platform', { + value: 'darwin', + }) + Object.defineProperty(process, 'arch', { + value: 'arm64', + }) + + const mockProcess = { + on: jest.fn((event, callback) => { + if (event === 'message') { + callback('noavx') + } + }), + send: jest.fn(), + } + mockFork.mockReturnValue(mockProcess) Object.defineProperty(process, 'arch', { value: 'x64', }) + expect(executableCortexFile(testSettings)).toEqual( expect.objectContaining({ - enginePath: expect.stringContaining("shared"), + enginePath: expect.stringContaining('shared'), executablePath: originalPlatform === 'darwin' ? expect.stringContaining(`cortex-server`) @@ -75,7 +110,7 @@ describe('test executable cortex file', () => { vkVisibleDevices: '', }) ) - expect(engineVariant(testSettings)).toEqual('mac-amd64') + expect(engineVariant(testSettings)).resolves.toEqual('mac-amd64') }) it('executes on Windows CPU', () => { @@ -86,16 +121,25 @@ describe('test executable cortex file', () => { ...testSettings, run_mode: 'cpu', } - mockCpuInfo.mockReturnValue(['avx']) + const mockProcess = { + on: jest.fn((event, callback) => { + if (event === 'message') { + callback('avx') + } + }), + send: jest.fn(), + } + mockFork.mockReturnValue(mockProcess) + expect(executableCortexFile(settings)).toEqual( expect.objectContaining({ - enginePath: expect.stringContaining("shared"), + enginePath: expect.stringContaining('shared'), executablePath: expect.stringContaining(`cortex-server.exe`), cudaVisibleDevices: '', vkVisibleDevices: '', }) ) - expect(engineVariant()).toEqual('windows-amd64-avx') + expect(engineVariant()).resolves.toEqual('windows-amd64-avx') }) it('executes on Windows Cuda 11', () => { @@ -122,16 +166,27 @@ describe('test executable cortex file', () => { }, ], } - mockCpuInfo.mockReturnValue(['avx2']) + + const mockProcess = { + on: jest.fn((event, callback) => { + if (event === 'message') { + callback('avx2') + } + }), + send: jest.fn(), + } + mockFork.mockReturnValue(mockProcess) expect(executableCortexFile(settings)).toEqual( expect.objectContaining({ - enginePath: expect.stringContaining("shared"), + enginePath: expect.stringContaining('shared'), executablePath: expect.stringContaining(`cortex-server.exe`), cudaVisibleDevices: '0', vkVisibleDevices: '0', }) ) - expect(engineVariant(settings)).toEqual('windows-amd64-avx2-cuda-11-7') + expect(engineVariant(settings)).resolves.toEqual( + 'windows-amd64-avx2-cuda-11-7' + ) }) it('executes on Windows Cuda 12', () => { @@ -158,18 +213,36 @@ describe('test executable cortex file', () => { }, ], } - mockCpuInfo.mockReturnValue(['noavx']) + mockFork.mockReturnValue({ + on: jest.fn((event, callback) => { + if (event === 'message') { + callback('noavx') + } + }), + send: jest.fn(), + }) expect(executableCortexFile(settings)).toEqual( expect.objectContaining({ - enginePath: expect.stringContaining("shared"), + enginePath: expect.stringContaining('shared'), executablePath: expect.stringContaining(`cortex-server.exe`), cudaVisibleDevices: '0', vkVisibleDevices: '0', }) ) - expect(engineVariant(settings)).toEqual('windows-amd64-noavx-cuda-12-0') - mockCpuInfo.mockReturnValue(['avx512']) - expect(engineVariant(settings)).toEqual('windows-amd64-avx2-cuda-12-0') + expect(engineVariant(settings)).resolves.toEqual( + 'windows-amd64-noavx-cuda-12-0' + ) + mockFork.mockReturnValue({ + on: jest.fn((event, callback) => { + if (event === 'message') { + callback('avx512') + } + }), + send: jest.fn(), + }) + expect(engineVariant(settings)).resolves.toEqual( + 'windows-amd64-avx2-cuda-12-0' + ) }) it('executes on Linux CPU', () => { @@ -180,16 +253,23 @@ describe('test executable cortex file', () => { ...testSettings, run_mode: 'cpu', } - mockCpuInfo.mockReturnValue(['noavx']) + mockFork.mockReturnValue({ + on: jest.fn((event, callback) => { + if (event === 'message') { + callback('noavx') + } + }), + send: jest.fn(), + }) expect(executableCortexFile(settings)).toEqual( expect.objectContaining({ - enginePath: expect.stringContaining("shared"), + enginePath: expect.stringContaining('shared'), executablePath: expect.stringContaining(`cortex-server`), cudaVisibleDevices: '', vkVisibleDevices: '', }) ) - expect(engineVariant()).toEqual('linux-amd64-noavx') + expect(engineVariant()).resolves.toEqual('linux-amd64-noavx') }) it('executes on Linux Cuda 11', () => { @@ -216,16 +296,25 @@ describe('test executable cortex file', () => { }, ], } - mockCpuInfo.mockReturnValue(['avx512']) + + mockFork.mockReturnValue({ + on: jest.fn((event, callback) => { + if (event === 'message') { + callback('avx512') + } + }), + send: jest.fn(), + }) + expect(executableCortexFile(settings)).toEqual( expect.objectContaining({ - enginePath: expect.stringContaining("shared"), + enginePath: expect.stringContaining('shared'), executablePath: expect.stringContaining(`cortex-server`), cudaVisibleDevices: '0', vkVisibleDevices: '0', }) ) - expect(engineVariant(settings)).toEqual('linux-amd64-avx2-cuda-11-7') + expect(engineVariant(settings)).resolves.toBe('linux-amd64-avx2-cuda-11-7') }) it('executes on Linux Cuda 12', () => { @@ -252,15 +341,25 @@ describe('test executable cortex file', () => { }, ], } + mockFork.mockReturnValue({ + on: jest.fn((event, callback) => { + if (event === 'message') { + callback('avx2') + } + }), + send: jest.fn(), + }) expect(executableCortexFile(settings)).toEqual( expect.objectContaining({ - enginePath: expect.stringContaining("shared"), + enginePath: expect.stringContaining('shared'), executablePath: expect.stringContaining(`cortex-server`), cudaVisibleDevices: '0', vkVisibleDevices: '0', }) ) - expect(engineVariant(settings)).toEqual('linux-amd64-avx2-cuda-12-0') + expect(engineVariant(settings)).resolves.toEqual( + 'linux-amd64-avx2-cuda-12-0' + ) }) // Generate test for different cpu instructions on Linux @@ -275,7 +374,14 @@ describe('test executable cortex file', () => { const cpuInstructions = ['avx512', 'avx2', 'avx', 'noavx'] cpuInstructions.forEach((instruction) => { - mockCpuInfo.mockReturnValue([instruction]) + mockFork.mockReturnValue({ + on: jest.fn((event, callback) => { + if (event === 'message') { + callback(instruction) + } + }), + send: jest.fn(), + }) expect(executableCortexFile(settings)).toEqual( expect.objectContaining({ @@ -286,7 +392,9 @@ describe('test executable cortex file', () => { vkVisibleDevices: '', }) ) - expect(engineVariant(settings)).toEqual(`linux-amd64-${instruction}`) + expect(engineVariant(settings)).resolves.toEqual( + `linux-amd64-${instruction}` + ) }) }) // Generate test for different cpu instructions on Windows @@ -300,7 +408,14 @@ describe('test executable cortex file', () => { } const cpuInstructions = ['avx512', 'avx2', 'avx', 'noavx'] cpuInstructions.forEach((instruction) => { - mockCpuInfo.mockReturnValue([instruction]) + mockFork.mockReturnValue({ + on: jest.fn((event, callback) => { + if (event === 'message') { + callback(instruction) + } + }), + send: jest.fn(), + }) expect(executableCortexFile(settings)).toEqual( expect.objectContaining({ enginePath: expect.stringContaining('shared'), @@ -309,7 +424,9 @@ describe('test executable cortex file', () => { vkVisibleDevices: '', }) ) - expect(engineVariant(settings)).toEqual(`windows-amd64-${instruction}`) + expect(engineVariant(settings)).resolves.toEqual( + `windows-amd64-${instruction}` + ) }) }) @@ -340,16 +457,23 @@ describe('test executable cortex file', () => { } const cpuInstructions = ['avx512', 'avx2', 'avx', 'noavx'] cpuInstructions.forEach((instruction) => { - mockCpuInfo.mockReturnValue([instruction]) + mockFork.mockReturnValue({ + on: jest.fn((event, callback) => { + if (event === 'message') { + callback(instruction) + } + }), + send: jest.fn(), + }) expect(executableCortexFile(settings)).toEqual( expect.objectContaining({ - enginePath: expect.stringContaining("shared"), + enginePath: expect.stringContaining('shared'), executablePath: expect.stringContaining(`cortex-server.exe`), cudaVisibleDevices: '0', vkVisibleDevices: '0', }) ) - expect(engineVariant(settings)).toEqual( + expect(engineVariant(settings)).resolves.toEqual( `windows-amd64-${instruction === 'avx512' || instruction === 'avx2' ? 'avx2' : 'noavx'}-cuda-12-0` ) }) @@ -382,16 +506,23 @@ describe('test executable cortex file', () => { ], } cpuInstructions.forEach((instruction) => { - mockCpuInfo.mockReturnValue([instruction]) + mockFork.mockReturnValue({ + on: jest.fn((event, callback) => { + if (event === 'message') { + callback(instruction) + } + }), + send: jest.fn(), + }) expect(executableCortexFile(settings)).toEqual( expect.objectContaining({ - enginePath: expect.stringContaining("shared"), + enginePath: expect.stringContaining('shared'), executablePath: expect.stringContaining(`cortex-server`), cudaVisibleDevices: '0', vkVisibleDevices: '0', }) ) - expect(engineVariant(settings)).toEqual( + expect(engineVariant(settings)).resolves.toEqual( `linux-amd64-${instruction === 'avx512' || instruction === 'avx2' ? 'avx2' : 'noavx'}-cuda-12-0` ) }) @@ -425,16 +556,23 @@ describe('test executable cortex file', () => { ], } cpuInstructions.forEach((instruction) => { - mockCpuInfo.mockReturnValue([instruction]) + mockFork.mockReturnValue({ + on: jest.fn((event, callback) => { + if (event === 'message') { + callback(instruction) + } + }), + send: jest.fn(), + }) expect(executableCortexFile(settings)).toEqual( expect.objectContaining({ - enginePath: expect.stringContaining("shared"), + enginePath: expect.stringContaining('shared'), executablePath: expect.stringContaining(`cortex-server`), cudaVisibleDevices: '0', vkVisibleDevices: '0', }) ) - expect(engineVariant(settings)).toEqual(`linux-amd64-vulkan`) + expect(engineVariant(settings)).resolves.toEqual(`linux-amd64-vulkan`) }) }) @@ -452,10 +590,17 @@ describe('test executable cortex file', () => { ...testSettings, run_mode: 'cpu', } - mockCpuInfo.mockReturnValue([]) + mockFork.mockReturnValue({ + on: jest.fn((event, callback) => { + if (event === 'message') { + callback('noavx') + } + }), + send: jest.fn(), + }) expect(executableCortexFile(settings)).toEqual( expect.objectContaining({ - enginePath: expect.stringContaining("shared"), + enginePath: expect.stringContaining('shared'), executablePath: originalPlatform === 'darwin' ? expect.stringContaining(`cortex-server`) diff --git a/extensions/inference-groq-extension/resources/models.json b/extensions/inference-groq-extension/resources/models.json index 6fce1c71b..04b60bfdd 100644 --- a/extensions/inference-groq-extension/resources/models.json +++ b/extensions/inference-groq-extension/resources/models.json @@ -61,6 +61,254 @@ }, "engine": "groq" }, + { + "sources": [ + { + "url": "https://groq.com" + } + ], + "id": "llama-3.1-70b-versatile", + "object": "model", + "name": "Groq Llama 3.1 70b Versatile", + "version": "1.1", + "description": "Groq Llama 3.1 70b Versatile with supercharged speed!", + "format": "api", + "settings": {}, + "parameters": { + "max_tokens": 8000, + "temperature": 0.7, + "top_p": 0.95, + "stream": true, + "stop": [], + "frequency_penalty": 0, + "presence_penalty": 0 + }, + "metadata": { + "author": "Meta", + "tags": [ + "General", + "Big Context Length" + ] + }, + "engine": "groq" + }, + { + "sources": [ + { + "url": "https://groq.com" + } + ], + "id": "llama-3.1-8b-instant", + "object": "model", + "name": "Groq Llama 3.1 8b Instant", + "version": "1.1", + "description": "Groq Llama 3.1 8b with supercharged speed!", + "format": "api", + "settings": {}, + "parameters": { + "max_tokens": 8000, + "temperature": 0.7, + "top_p": 0.95, + "stream": true, + "stop": [], + "frequency_penalty": 0, + "presence_penalty": 0 + }, + "metadata": { + "author": "Meta", + "tags": [ + "General", + "Big Context Length" + ] + }, + "engine": "groq" + }, + { + "sources": [ + { + "url": "https://groq.com" + } + ], + "id": "llama-3.2-11b-text-preview", + "object": "model", + "name": "Groq Llama 3.2 11b Text Preview", + "version": "1.1", + "description": "Groq Llama 3.2 11b Text Preview with supercharged speed!", + "format": "api", + "settings": {}, + "parameters": { + "max_tokens": 8192, + "temperature": 0.7, + "top_p": 0.95, + "stream": true, + "stop": [], + "frequency_penalty": 0, + "presence_penalty": 0 + }, + "metadata": { + "author": "Meta", + "tags": [ + "General", + "Big Context Length" + ] + }, + "engine": "groq" + }, + { + "sources": [ + { + "url": "https://groq.com" + } + ], + "id": "llama-3.2-11b-vision-preview", + "object": "model", + "name": "Groq Llama 3.2 11b Vision Preview", + "version": "1.1", + "description": "Groq Llama 3.2 11b Vision Preview with supercharged speed!", + "format": "api", + "settings": {}, + "parameters": { + "max_tokens": 8192, + "temperature": 0.7, + "top_p": 0.95, + "stream": true, + "stop": [], + "frequency_penalty": 0, + "presence_penalty": 0 + }, + "metadata": { + "author": "Meta", + "tags": [ + "General", + "Big Context Length" + ] + }, + "engine": "groq" + }, + { + "sources": [ + { + "url": "https://groq.com" + } + ], + "id": "llama-3.2-1b-preview", + "object": "model", + "name": "Groq Llama 3.2 1b Preview", + "version": "1.1", + "description": "Groq Llama 3.2 1b Preview with supercharged speed!", + "format": "api", + "settings": {}, + "parameters": { + "max_tokens": 8192, + "temperature": 0.7, + "top_p": 0.95, + "stream": true, + "stop": [], + "frequency_penalty": 0, + "presence_penalty": 0 + }, + "metadata": { + "author": "Meta", + "tags": [ + "General", + "Big Context Length" + ] + }, + "engine": "groq" + }, + { + "sources": [ + { + "url": "https://groq.com" + } + ], + "id": "llama-3.2-3b-preview", + "object": "model", + "name": "Groq Llama 3.2 3b Preview", + "version": "1.1", + "description": "Groq Llama 3.2 3b Preview with supercharged speed!", + "format": "api", + "settings": {}, + "parameters": { + "max_tokens": 8192, + "temperature": 0.7, + "top_p": 0.95, + "stream": true, + "stop": [], + "frequency_penalty": 0, + "presence_penalty": 0 + }, + "metadata": { + "author": "Meta", + "tags": [ + "General", + "Big Context Length" + ] + }, + "engine": "groq" + }, + { + "sources": [ + { + "url": "https://groq.com" + } + ], + "id": "llama-3.2-90b-text-preview", + "object": "model", + "name": "Groq Llama 3.2 90b Text Preview", + "version": "1.1", + "description": "Groq Llama 3.2 90b Text Preview with supercharged speed!", + "format": "api", + "settings": {}, + "parameters": { + "max_tokens": 8192, + "temperature": 0.7, + "top_p": 0.95, + "stream": true, + "stop": [], + "frequency_penalty": 0, + "presence_penalty": 0 + }, + "metadata": { + "author": "Meta", + "tags": [ + "General", + "Big Context Length" + ] + }, + "engine": "groq" + }, + { + "sources": [ + { + "url": "https://groq.com" + } + ], + "id": "llama-3.2-90b-vision-preview", + "object": "model", + "name": "Groq Llama 3.2 90b Vision Preview", + "version": "1.1", + "description": "Groq Llama 3.2 90b Vision Preview with supercharged speed!", + "format": "api", + "settings": {}, + "parameters": { + "max_tokens": 8192, + "temperature": 0.7, + "top_p": 0.95, + "stream": true, + "stop": [], + "frequency_penalty": 0, + "presence_penalty": 0 + }, + "metadata": { + "author": "Meta", + "tags": [ + "General", + "Big Context Length" + ] + }, + "engine": "groq" + }, { "sources": [ { @@ -91,6 +339,36 @@ }, "engine": "groq" }, + { + "sources": [ + { + "url": "https://groq.com" + } + ], + "id": "gemma2-9b-it", + "object": "model", + "name": "Groq Gemma 9B Instruct", + "version": "1.2", + "description": "Groq Gemma 9b Instruct with supercharged speed!", + "format": "api", + "settings": {}, + "parameters": { + "max_tokens": 8192, + "temperature": 0.7, + "top_p": 0.95, + "stream": true, + "stop": [], + "frequency_penalty": 0, + "presence_penalty": 0 + }, + "metadata": { + "author": "Google", + "tags": [ + "General" + ] + }, + "engine": "groq" + }, { "sources": [ { diff --git a/extensions/monitoring-extension/src/index.ts b/extensions/monitoring-extension/src/index.ts index 1d21fde77..eca71326e 100644 --- a/extensions/monitoring-extension/src/index.ts +++ b/extensions/monitoring-extension/src/index.ts @@ -1,7 +1,9 @@ import { + AppConfigurationEventName, GpuSetting, MonitoringExtension, OperatingSystemInfo, + events, executeOnMain, } from '@janhq/core' @@ -37,6 +39,7 @@ export default class JanMonitoringExtension extends MonitoringExtension { // Attempt to fetch nvidia info await executeOnMain(NODE, 'updateNvidiaInfo') + events.emit(AppConfigurationEventName.OnConfigurationUpdate, {}) } onSettingUpdate(key: string, value: T): void { diff --git a/joi/src/core/ScrollArea/styles.scss b/joi/src/core/ScrollArea/styles.scss index 3ab0bd306..fd8a43e53 100644 --- a/joi/src/core/ScrollArea/styles.scss +++ b/joi/src/core/ScrollArea/styles.scss @@ -66,4 +66,5 @@ } ::-webkit-scrollbar-thumb { background: hsla(var(--scrollbar-thumb)); + border-radius: 20px; } diff --git a/web/app/search/layout.tsx b/web/app/search/layout.tsx index 6c491c381..8af34dc00 100644 --- a/web/app/search/layout.tsx +++ b/web/app/search/layout.tsx @@ -52,9 +52,8 @@ export default function RootLayout() { - - - + + diff --git a/web/containers/EngineSetting/index.tsx b/web/containers/EngineSetting/index.tsx index acbd507ce..0ae2929bf 100644 --- a/web/containers/EngineSetting/index.tsx +++ b/web/containers/EngineSetting/index.tsx @@ -4,7 +4,10 @@ import SettingComponentBuilder from '@/containers/ModelSetting/SettingComponent' type Props = { componentData: SettingComponentProps[] - onValueChanged: (key: string, value: string | number | boolean) => void + onValueChanged: ( + key: string, + value: string | number | boolean | string[] + ) => void disabled?: boolean } diff --git a/web/containers/ErrorMessage/index.tsx b/web/containers/ErrorMessage/index.tsx index be26ad44a..4c97da14b 100644 --- a/web/containers/ErrorMessage/index.tsx +++ b/web/containers/ErrorMessage/index.tsx @@ -29,7 +29,6 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => { switch (message.error_code) { case ErrorCode.InvalidApiKey: case ErrorCode.AuthenticationError: - case ErrorCode.InvalidRequestError: return ( Invalid API key. Please check your API key from{' '} @@ -53,7 +52,7 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => { ) default: return ( -

+

{message.content[0]?.text?.value && ( )} diff --git a/web/containers/Layout/index.tsx b/web/containers/Layout/index.tsx index 8a3f417f4..e787163d4 100644 --- a/web/containers/Layout/index.tsx +++ b/web/containers/Layout/index.tsx @@ -1,10 +1,8 @@ 'use client' -import { useEffect } from 'react' +import { useEffect, useMemo } from 'react' -import { motion as m } from 'framer-motion' - -import { useAtom, useAtomValue } from 'jotai' +import { useAtomValue, useSetAtom } from 'jotai' import { twMerge } from 'tailwind-merge' @@ -36,7 +34,7 @@ import { mainViewStateAtom } from '@/helpers/atoms/App.atom' import { reduceTransparentAtom } from '@/helpers/atoms/Setting.atom' const BaseLayout = () => { - const [mainViewState, setMainViewState] = useAtom(mainViewStateAtom) + const setMainViewState = useSetAtom(mainViewStateAtom) const importModelStage = useAtomValue(getImportModelStageAtom) const reduceTransparent = useAtomValue(reduceTransparentAtom) @@ -68,24 +66,7 @@ const BaseLayout = () => {

-
-
- - - -
-
+ {importModelStage === 'SELECTING_MODEL' && } {importModelStage === 'MODEL_SELECTED' && } diff --git a/web/containers/ListContainer/index.tsx b/web/containers/ListContainer/index.tsx index 2b720fb43..44e5b2527 100644 --- a/web/containers/ListContainer/index.tsx +++ b/web/containers/ListContainer/index.tsx @@ -1,15 +1,30 @@ -import { ReactNode, useCallback, useEffect, useRef } from 'react' +import { PropsWithChildren, useCallback, useEffect, useRef } from 'react' import { ScrollArea } from '@janhq/joi' -type Props = { - children: ReactNode -} +import { useAtomValue } from 'jotai' -const ListContainer = ({ children }: Props) => { +import { activeThreadAtom } from '@/helpers/atoms/Thread.atom' + +const ListContainer = ({ children }: PropsWithChildren) => { const listRef = useRef(null) const prevScrollTop = useRef(0) const isUserManuallyScrollingUp = useRef(false) + const activeThread = useAtomValue(activeThreadAtom) + const prevActiveThread = useRef(activeThread) + + // Handle active thread changes + useEffect(() => { + if (prevActiveThread.current?.id !== activeThread?.id) { + isUserManuallyScrollingUp.current = false + const scrollHeight = listRef.current?.scrollHeight ?? 0 + listRef.current?.scrollTo({ + top: scrollHeight, + behavior: 'instant', + }) + prevActiveThread.current = activeThread // Update the previous active thread reference + } + }, [activeThread]) const handleScroll = useCallback((event: React.UIEvent) => { const currentScrollTop = event.currentTarget.scrollTop diff --git a/web/containers/Loader/GenerateResponse.test.tsx b/web/containers/Loader/GenerateResponse.test.tsx deleted file mode 100644 index 7e3e5c3a4..000000000 --- a/web/containers/Loader/GenerateResponse.test.tsx +++ /dev/null @@ -1,75 +0,0 @@ -// GenerateResponse.test.tsx -import React from 'react'; -import { render, screen, act } from '@testing-library/react'; -import '@testing-library/jest-dom'; -import GenerateResponse from './GenerateResponse'; - -jest.useFakeTimers(); - -describe('GenerateResponse Component', () => { - it('renders initially with 1% loader width', () => { - render(); - const loader = screen.getByTestId('response-loader'); - expect(loader).toHaveStyle('width: 24%'); - }); - - it('updates loader width over time', () => { - render(); - const loader = screen.getByTestId('response-loader'); - - // Advance timers to simulate time passing - act(() => { - jest.advanceTimersByTime(1000); - }); - - expect(loader).not.toHaveStyle('width: 1%'); - expect(parseFloat(loader.style.width)).toBeGreaterThan(1); - }); - - it('pauses at specific percentages', () => { - render(); - const loader = screen.getByTestId('response-loader'); - - // Advance to 24% - act(() => { - for (let i = 0; i < 24; i++) { - jest.advanceTimersByTime(50); - } - }); - - expect(loader).toHaveStyle('width: 50%'); - - // Advance past the pause - act(() => { - jest.advanceTimersByTime(300); - }); - - expect(loader).toHaveStyle('width: 78%'); - }); - - it('stops at 85%', () => { - render(); - const loader = screen.getByTestId('response-loader'); - - // Advance to 50% - act(() => { - for (let i = 0; i < 85; i++) { - jest.advanceTimersByTime(50); - } - }); - - expect(loader).toHaveStyle('width: 50%'); - - // Check if it stays at 78% - act(() => { - jest.advanceTimersByTime(1000); - }); - - expect(loader).toHaveStyle('width: 78%'); - }); - - it('displays the correct text', () => { - render(); - expect(screen.getByText('Generating response...')).toBeInTheDocument(); - }); -}); diff --git a/web/containers/Loader/GenerateResponse.tsx b/web/containers/Loader/GenerateResponse.tsx index 7f23d73ea..d43c8cab9 100644 --- a/web/containers/Loader/GenerateResponse.tsx +++ b/web/containers/Loader/GenerateResponse.tsx @@ -1,5 +1,7 @@ import React, { useEffect, useState } from 'react' +import { motion as m } from 'framer-motion' + export default function GenerateResponse() { const [loader, setLoader] = useState(0) @@ -28,10 +30,17 @@ export default function GenerateResponse() { return (
-
Generating response...
diff --git a/web/containers/Loader/ModelStart.tsx b/web/containers/Loader/ModelStart.tsx index 4979bbb33..cad8afab0 100644 --- a/web/containers/Loader/ModelStart.tsx +++ b/web/containers/Loader/ModelStart.tsx @@ -1,5 +1,7 @@ import React, { useEffect, useState } from 'react' +import { motion as m } from 'framer-motion' + import { useActiveModel } from '@/hooks/useActiveModel' export default function ModelStart() { @@ -37,10 +39,17 @@ export default function ModelStart() { return (
-
{stateModel.state === 'start' ? 'Starting' : 'Stopping'} diff --git a/web/containers/MainViewContainer/index.tsx b/web/containers/MainViewContainer/index.tsx index 4f3b4986a..ba7f87fd2 100644 --- a/web/containers/MainViewContainer/index.tsx +++ b/web/containers/MainViewContainer/index.tsx @@ -1,5 +1,10 @@ +import { memo } from 'react' + +import { motion as m } from 'framer-motion' import { useAtomValue } from 'jotai' +import { twMerge } from 'tailwind-merge' + import { MainViewState } from '@/constants/screens' import HubScreen from '@/screens/Hub' @@ -31,7 +36,26 @@ const MainViewContainer = () => { break } - return children + return ( +
+
+ + {children} + +
+
+ ) } -export default MainViewContainer +export default memo(MainViewContainer) diff --git a/web/containers/ModelConfigInput/index.test.tsx b/web/containers/ModelConfigInput/index.test.tsx index b92bdfcb2..cf9cb9da3 100644 --- a/web/containers/ModelConfigInput/index.test.tsx +++ b/web/containers/ModelConfigInput/index.test.tsx @@ -2,7 +2,6 @@ import '@testing-library/jest-dom' import React from 'react' import { render, fireEvent } from '@testing-library/react' import ModelConfigInput from './index' -import { Tooltip } from '@janhq/joi' // Mocking the Tooltip component to simplify testing jest.mock('@janhq/joi', () => ({ diff --git a/web/containers/ModelConfigInput/index.tsx b/web/containers/ModelConfigInput/index.tsx index f0e6ea1f2..e67080df2 100644 --- a/web/containers/ModelConfigInput/index.tsx +++ b/web/containers/ModelConfigInput/index.tsx @@ -19,28 +19,30 @@ const ModelConfigInput = ({ description, placeholder, onValueChanged, -}: Props) => ( -
-
-

{title}

- - } - content={description} +}: Props) => { + return ( +
+
+

{title}

+ + } + content={description} + /> +
+