diff --git a/.github/workflows/jan-docs-new-release.yaml b/.github/workflows/jan-docs-new-release.yaml index 2acca92de..a8e94b6d7 100644 --- a/.github/workflows/jan-docs-new-release.yaml +++ b/.github/workflows/jan-docs-new-release.yaml @@ -58,6 +58,6 @@ jobs: accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} projectName: ${{ env.CLOUDFLARE_PROJECT_NAME }} directory: ./docs/out - branch: dev + branch: main # Optional: Enable this if you want to have GitHub Deployments triggered gitHubToken: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/jan-electron-build-beta.yml b/.github/workflows/jan-electron-build-beta.yml index 4f2625266..67496b355 100644 --- a/.github/workflows/jan-electron-build-beta.yml +++ b/.github/workflows/jan-electron-build-beta.yml @@ -133,4 +133,26 @@ jobs: run: | gh release edit v${{ needs.create-draft-release.outputs.version }} --draft=false --prerelease env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + noti-discord-and-update-url-readme: + needs: [build-macos-x64, build-macos-arm64, create-draft-release, build-windows-x64, build-linux-x64, combine-beta-mac-yml] + runs-on: ubuntu-latest + steps: + - name: Set version to environment variable + run: | + echo "VERSION=${{ needs.create-draft-release.outputs.version }}" >> $GITHUB_ENV + + - name: Notify Discord + uses: Ilshidur/action-discord@master + with: + args: | + Jan-beta App version {{ VERSION }}, has been released, use the following links to download the app with faster speed or visit the Github release page for more information: + - Windows: https://delta.jan.ai/beta/jan-beta-win-x64-{{ VERSION }}.exe + - macOS Intel: https://delta.jan.ai/beta/jan-beta-mac-x64-{{ VERSION }}.dmg + - macOS Apple Silicon: https://delta.jan.ai/beta/jan-beta-mac-arm64-{{ VERSION }}.dmg + - Linux Deb: https://delta.jan.ai/beta/jan-beta-linux-amd64-{{ VERSION }}.deb + - Linux AppImage: https://delta.jan.ai/beta/jan-beta-linux-x86_64-{{ VERSION }}.AppImage + - Github Release URL: https://github.com/janhq/jan/releases/tag/v{{ VERSION }} + env: + DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK_JAN_BETA }} \ No newline at end of file diff --git a/JanBanner.png b/JanBanner.png new file mode 100644 index 000000000..165831871 Binary files /dev/null and b/JanBanner.png differ diff --git a/README.md b/README.md index ecede5bd9..043960537 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -# Jan - Turn your computer into an AI computer +# Jan - Local AI Assistant -![Jan banner](https://github.com/janhq/jan/assets/89722390/35daac7d-b895-487c-a6ac-6663daaad78e) +![Jan banner](./JanBanner.png)

@@ -12,18 +12,22 @@

- Getting Started + Getting Started - Docs - Changelog - Bug reports - Discord

-> [!Warning] >**Jan is currently in Development**: Expect breaking changes and bugs! +

+⚠️ Jan is currently in Development: Expect breaking changes and bugs! +

-Jan is an open-source ChatGPT alternative that runs 100% offline on your computer. -**Jan runs on any hardware.** From PCs to multi-GPU clusters, Jan supports universal architectures: +Jan is a ChatGPT-alternative that runs 100% offline on your device. Our goal is to make it easy for a layperson to download and run LLMs and use AI with **full control** and **privacy**. + +Jan is powered by [Cortex](https://github.com/janhq/cortex.cpp), our embeddable local AI engine that runs on any hardware. +From PCs to multi-GPU clusters, Jan & Cortex supports universal architectures: - [x] NVIDIA GPUs (fast) - [x] Apple M-series (fast) @@ -31,6 +35,12 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute - [x] Linux Debian - [x] Windows x64 +#### Features: +- [Model Library](https://jan.ai/docs/models/manage-models#add-models) with popular LLMs like Llama, Gemma, Mistral, or Qwen +- Connect to [Remote AI APIs](https://jan.ai/docs/remote-models/openai) like Groq and OpenRouter +- Local API Server with OpenAI-equivalent API +- [Extensions](https://jan.ai/docs/extensions) for customizing Jan + ## Download @@ -74,7 +84,40 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute - + + + + + + + + +
Experimental (Nightly Build)Beta (Preview) + + + jan.exe + + + + + Intel + + + + + M1/M2/M3/M4 + + + + + jan.deb + + + + + jan.AppImage + +
Nightly Build (Experimental) @@ -108,79 +151,64 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute
-Download the latest version of Jan at https://jan.ai/ or visit the **[GitHub Releases](https://github.com/janhq/jan/releases)** to download any previous release. +Download the latest version of Jan at https://jan.ai/ or visit the [GitHub Releases](https://github.com/janhq/jan/releases) to download any previous release. ## Demo -![Demo](/demo.gif) +https://github.com/user-attachments/assets/c3592fa2-c504-4d9d-a885-7e00122a50f3 -_Realtime Video: Jan v0.4.3-nightly on a Mac M1, 16GB Sonoma 14_ +*Real-time Video: Jan v0.5.7 on a Mac M2, 16GB Sonoma 14.2* ## Quicklinks -#### Jan +### Jan -- [Jan website](https://jan.ai/) +- [Jan Website](https://jan.ai/) - [Jan GitHub](https://github.com/janhq/jan) -- [User Guides](https://jan.ai/guides/) -- [Developer docs](https://jan.ai/developer/) -- [API reference](https://jan.ai/api-reference/) -- [Specs](https://jan.ai/docs/) +- [Documentation](https://jan.ai/docs) +- [Jan Changelog](https://jan.ai/changelog) +- [Jan Blog](https://jan.ai/blog) -#### Nitro +### Cortex.cpp +Jan is powered by **Cortex.cpp**. It is a C++ command-line interface (CLI) designed as an alternative to [Ollama](https://ollama.com/). By default, it runs on the llama.cpp engine but also supports other engines, including ONNX and TensorRT-LLM, making it a multi-engine platform. -Nitro is a high-efficiency C++ inference engine for edge computing. It is lightweight and embeddable, and can be used on its own within your own projects. - -- [Nitro Website](https://nitro.jan.ai) -- [Nitro GitHub](https://github.com/janhq/nitro) -- [Documentation](https://nitro.jan.ai/docs) -- [API Reference](https://nitro.jan.ai/api-reference) - -## Troubleshooting - -As Jan is in development mode, you might get stuck on a broken build. - -To reset your installation: - -1. Use the following commands to remove any dangling backend processes: - - ```sh - ps aux | grep nitro - ``` - - Look for processes like "nitro" and "nitro_arm_64," and kill them one by one with: - - ```sh - kill -9 - ``` - -2. **Remove Jan from your Applications folder and Cache folder** - - ```bash - make clean - ``` - - This will remove all build artifacts and cached files: - - - Delete Jan extension from your `~/jan/extensions` folder - - Delete all `node_modules` in current folder - - Clear Application cache in `~/Library/Caches/jan` +- [Cortex Website](https://cortex.so/) +- [Cortex GitHub](https://github.com/janhq/cortex.cpp) +- [Documentation](https://cortex.so/docs/) +- [Models Library](https://cortex.so/models) +- API Reference: *Under development* + ## Requirements for running Jan -- MacOS: 13 or higher -- Windows: +- **MacOS**: 13 or higher +- **Windows**: - Windows 10 or higher - To enable GPU support: - Nvidia GPU with CUDA Toolkit 11.7 or higher - Nvidia driver 470.63.01 or higher -- Linux: +- **Linux**: - glibc 2.27 or higher (check with `ldd --version`) - gcc 11, g++ 11, cpp 11 or higher, refer to this [link](https://jan.ai/guides/troubleshooting/gpu-not-used/#specific-requirements-for-linux) for more information - To enable GPU support: - Nvidia GPU with CUDA Toolkit 11.7 or higher - Nvidia driver 470.63.01 or higher +## Troubleshooting + +As Jan is in development mode, you might get stuck on a some common issues: +- [Troubleshooting a broken build](https://jan.ai/docs/troubleshooting#broken-build) +- [Troubleshooting NVIDIA GPU](https://jan.ai/docs/troubleshooting#troubleshooting-nvidia-gpu) +- [Troubleshooting Something's Amiss](https://jan.ai/docs/troubleshooting#somethings-amiss) + + +If you can't find what you need in our troubleshooting guide, feel free reach out to us for extra help: +1. Copy your [error logs & device specifications](https://jan.ai/docs/troubleshooting#how-to-get-error-logs). +2. Go to our [Discord](https://discord.com/invite/FTk2MvZwJH) & send it to **#🆘|get-help** channel for further support. + +*Check the logs to ensure the information is what you intend to send. Note that we retain your logs for only 24 hours, so report any issues promptly.* + + ## Contributing Contributions are welcome! Please read the [CONTRIBUTING.md](CONTRIBUTING.md) file @@ -209,11 +237,7 @@ Contributions are welcome! Please read the [CONTRIBUTING.md](CONTRIBUTING.md) fi This will start the development server and open the desktop app. -3. (Optional) **Run the API server without frontend** - ```bash - yarn dev:server - ``` ### For production build @@ -225,102 +249,6 @@ make build This will build the app MacOS m1/m2 for production (with code signing already done) and put the result in `dist` folder. -### Docker mode - -- Supported OS: Linux, WSL2 Docker -- Pre-requisites: - - - Docker Engine and Docker Compose are required to run Jan in Docker mode. Follow the [instructions](https://docs.docker.com/engine/install/ubuntu/) below to get started with Docker Engine on Ubuntu. - - ```bash - curl -fsSL https://get.docker.com -o get-docker.sh - sudo sh ./get-docker.sh --dry-run - ``` - - - If you intend to run Jan in GPU mode, you need to install `nvidia-driver` and `nvidia-docker2`. Follow the instruction [here](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) for installation. - -- Run Jan in Docker mode - > User can choose between `docker-compose.yml` with latest prebuilt docker image or `docker-compose-dev.yml` with local docker build - -| Docker compose Profile | Description | -| ---------------------- | -------------------------------------------- | -| `cpu-fs` | Run Jan in CPU mode with default file system | -| `cpu-s3fs` | Run Jan in CPU mode with S3 file system | -| `gpu-fs` | Run Jan in GPU mode with default file system | -| `gpu-s3fs` | Run Jan in GPU mode with S3 file system | - -| Environment Variable | Description | -| ----------------------- | ------------------------------------------------------------------------------------------------------- | -| `S3_BUCKET_NAME` | S3 bucket name - leave blank for default file system | -| `AWS_ACCESS_KEY_ID` | AWS access key ID - leave blank for default file system | -| `AWS_SECRET_ACCESS_KEY` | AWS secret access key - leave blank for default file system | -| `AWS_ENDPOINT` | AWS endpoint URL - leave blank for default file system | -| `AWS_REGION` | AWS region - leave blank for default file system | -| `API_BASE_URL` | Jan Server URL, please modify it as your public ip address or domain name default http://localhost:1377 | - -- **Option 1**: Run Jan in CPU mode - - ```bash - # cpu mode with default file system - docker compose --profile cpu-fs up -d - - # cpu mode with S3 file system - docker compose --profile cpu-s3fs up -d - ``` - -- **Option 2**: Run Jan in GPU mode - - - **Step 1**: Check CUDA compatibility with your NVIDIA driver by running `nvidia-smi` and check the CUDA version in the output - - ```bash - nvidia-smi - - # Output - +---------------------------------------------------------------------------------------+ - | NVIDIA-SMI 531.18 Driver Version: 531.18 CUDA Version: 12.1 | - |-----------------------------------------+----------------------+----------------------+ - | GPU Name TCC/WDDM | Bus-Id Disp.A | Volatile Uncorr. ECC | - | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | - | | | MIG M. | - |=========================================+======================+======================| - | 0 NVIDIA GeForce RTX 4070 Ti WDDM | 00000000:01:00.0 On | N/A | - | 0% 44C P8 16W / 285W| 1481MiB / 12282MiB | 2% Default | - | | | N/A | - +-----------------------------------------+----------------------+----------------------+ - | 1 NVIDIA GeForce GTX 1660 Ti WDDM | 00000000:02:00.0 Off | N/A | - | 0% 49C P8 14W / 120W| 0MiB / 6144MiB | 0% Default | - | | | N/A | - +-----------------------------------------+----------------------+----------------------+ - | 2 NVIDIA GeForce GTX 1660 Ti WDDM | 00000000:05:00.0 Off | N/A | - | 29% 38C P8 11W / 120W| 0MiB / 6144MiB | 0% Default | - | | | N/A | - +-----------------------------------------+----------------------+----------------------+ - - +---------------------------------------------------------------------------------------+ - | Processes: | - | GPU GI CI PID Type Process name GPU Memory | - | ID ID Usage | - |=======================================================================================| - ``` - - - **Step 2**: Visit [NVIDIA NGC Catalog ](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/cuda/tags) and find the smallest minor version of image tag that matches your CUDA version (e.g., 12.1 -> 12.1.0) - - - **Step 3**: Update the `Dockerfile.gpu` line number 5 with the latest minor version of the image tag from step 2 (e.g. change `FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04 AS base` to `FROM nvidia/cuda:12.1.0-runtime-ubuntu22.04 AS base`) - - - **Step 4**: Run command to start Jan in GPU mode - - ```bash - # GPU mode with default file system - docker compose --profile gpu-fs up -d - - # GPU mode with S3 file system - docker compose --profile gpu-s3fs up -d - ``` - -This will start the web server and you can access Jan at `http://localhost:3000`. - -> Note: RAG feature is not supported in Docker mode with s3fs yet. - ## Acknowledgements Jan builds on top of other open-source projects: @@ -334,18 +262,18 @@ Jan builds on top of other open-source projects: - Bugs & requests: file a GitHub ticket - For discussion: join our Discord [here](https://discord.gg/FTk2MvZwJH) -- For business inquiries: email hello@jan.ai +- For business inquiries: email hello@jan.ai - For jobs: please email hr@jan.ai ## Trust & Safety -Beware of scams. +Beware of scams! -- We will never ask you for personal info -- We are a free product; there's no paid version -- We don't have a token or ICO -- We are not actively fundraising or seeking donations +- We will never request your personal information. +- Our product is completely free; no paid version exists. +- We do not have a token or ICO. +- We are a [bootstrapped company](https://en.wikipedia.org/wiki/Bootstrapping), and don't have any external investors (*yet*). We're open to exploring opportunities with strategic partners want to tackle [our mission](https://jan.ai/about#mission) together. ## License -Jan is free and open source, under the AGPLv3 license. +Jan is free and open source, under the **AGPLv3** license. diff --git a/core/tsconfig.json b/core/tsconfig.json index 02caf65e2..b30d65851 100644 --- a/core/tsconfig.json +++ b/core/tsconfig.json @@ -1,7 +1,7 @@ { "compilerOptions": { "moduleResolution": "node", - "target": "es5", + "target": "ES2015", "module": "ES2020", "lib": ["es2015", "es2016", "es2017", "dom"], "strict": true, @@ -13,7 +13,7 @@ "declarationDir": "dist/types", "outDir": "dist/lib", "importHelpers": true, - "types": ["@types/jest"], + "types": ["@types/jest"] }, "include": ["src"], "exclude": ["**/*.test.ts"] diff --git a/docs/public/assets/images/changelog/jan-v0.5.5.jpeg b/docs/public/assets/images/changelog/jan-v0.5.5.jpeg new file mode 100644 index 000000000..a0b1c4984 Binary files /dev/null and b/docs/public/assets/images/changelog/jan-v0.5.5.jpeg differ diff --git a/docs/public/assets/images/changelog/jan-v0.5.7.gif b/docs/public/assets/images/changelog/jan-v0.5.7.gif new file mode 100644 index 000000000..cddbf62b0 Binary files /dev/null and b/docs/public/assets/images/changelog/jan-v0.5.7.gif differ diff --git a/docs/src/pages/changelog/2024-02-10-jan-is-more-stable.mdx b/docs/src/pages/changelog/2024-02-10-jan-is-more-stable.mdx new file mode 100644 index 000000000..59e78a0a7 --- /dev/null +++ b/docs/src/pages/changelog/2024-02-10-jan-is-more-stable.mdx @@ -0,0 +1,27 @@ +--- +title: "Jan is more stable 👋" +version: 0.5.5 +description: "Jan supports Llama 3.2 and Qwen 2.5" +date: 2024-10-02 +ogImage: "/assets/images/changelog/jan-v0.5.5.jpeg" +--- + +import ChangelogHeader from "@/components/Changelog/ChangelogHeader" + + + +Highlights 🎉 + +- Meta's Llama 3.2 and Alibaba's Qwen 2.5 added to the hub +- Improved starter screen +- Better local vs. cloud model navigation + +Fixes 💫 + +- Solved GPU acceleration for GGUF models +- Improved model caching & threading +- Resolved input & toolbar overlaps + +Update your product or download the latest: https://jan.ai + +For more details, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.5.5). \ No newline at end of file diff --git a/docs/src/pages/changelog/2024-10-24-jan-stable.mdx b/docs/src/pages/changelog/2024-10-24-jan-stable.mdx new file mode 100644 index 000000000..7c24d51ee --- /dev/null +++ b/docs/src/pages/changelog/2024-10-24-jan-stable.mdx @@ -0,0 +1,26 @@ +--- +title: "Jan has Stable, Beta and Nightly versions" +version: 0.5.7 +description: "This release is mostly focused on bug fixes." +date: 2024-10-24 +ogImage: "/assets/images/changelog/jan-v0.5.7.gif" +--- + +import ChangelogHeader from "@/components/Changelog/ChangelogHeader" + + + +Highlights 🎉 + +- Jan has Stable, Beta and Nightly versions +- Saving instructions for new threads is now stable + +Fixes 💫 + +- Fixed broken links, hardware issues, and multi-modal download +- Resolved text overlap, scrolling, and multi-monitor reset problems +- Adjusted LLava model EOS token and context input + +Update your product or download the latest: https://jan.ai + +For more details, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.5.7). \ No newline at end of file diff --git a/docs/src/pages/docs/_assets/jan-app.png b/docs/src/pages/docs/_assets/jan-app.png new file mode 100644 index 000000000..a45943055 Binary files /dev/null and b/docs/src/pages/docs/_assets/jan-app.png differ diff --git a/docs/src/pages/docs/_assets/jan-display.png b/docs/src/pages/docs/_assets/jan-display.png deleted file mode 100644 index 2fc3610ba..000000000 Binary files a/docs/src/pages/docs/_assets/jan-display.png and /dev/null differ diff --git a/docs/src/pages/docs/index.mdx b/docs/src/pages/docs/index.mdx index 027ad11b6..0efb1ba51 100644 --- a/docs/src/pages/docs/index.mdx +++ b/docs/src/pages/docs/index.mdx @@ -22,7 +22,7 @@ import FAQBox from '@/components/FaqBox' # Jan -![Jan's Cover Image](./_assets/jan-display.png) +![Jan's Cover Image](./_assets/jan-app.png) Jan is a ChatGPT-alternative that runs 100% offline on your [Desktop](/docs/desktop-installation). Our goal is to make it easy for a layperson[^1] to download and run LLMs and use AI with full control and [privacy](https://www.reuters.com/legal/legalindustry/privacy-paradox-with-ai-2023-10-31/). diff --git a/extensions/assistant-extension/tsconfig.json b/extensions/assistant-extension/tsconfig.json index e425358c3..3838d1c42 100644 --- a/extensions/assistant-extension/tsconfig.json +++ b/extensions/assistant-extension/tsconfig.json @@ -1,7 +1,7 @@ { "compilerOptions": { "moduleResolution": "node", - "target": "es5", + "target": "ES2015", "module": "ES2020", "lib": ["es2015", "es2016", "es2017", "dom"], "strict": true, diff --git a/extensions/inference-nitro-extension/tsconfig.json b/extensions/inference-nitro-extension/tsconfig.json index 19d8572b5..bdb35163a 100644 --- a/extensions/inference-nitro-extension/tsconfig.json +++ b/extensions/inference-nitro-extension/tsconfig.json @@ -1,7 +1,7 @@ { "compilerOptions": { "moduleResolution": "node", - "target": "es5", + "target": "ES2015", "module": "ES2020", "lib": ["es2015", "es2016", "es2017", "dom"], "strict": true, diff --git a/extensions/inference-openai-extension/package.json b/extensions/inference-openai-extension/package.json index cd776257c..50fe12349 100644 --- a/extensions/inference-openai-extension/package.json +++ b/extensions/inference-openai-extension/package.json @@ -1,7 +1,7 @@ { "name": "@janhq/inference-openai-extension", "productName": "OpenAI Inference Engine", - "version": "1.0.2", + "version": "1.0.3", "description": "This extension enables OpenAI chat completion API calls", "main": "dist/index.js", "module": "dist/module.js", diff --git a/extensions/inference-openai-extension/resources/models.json b/extensions/inference-openai-extension/resources/models.json index 72517d540..124e123b9 100644 --- a/extensions/inference-openai-extension/resources/models.json +++ b/extensions/inference-openai-extension/resources/models.json @@ -23,40 +23,7 @@ }, "metadata": { "author": "OpenAI", - "tags": [ - "General" - ] - }, - "engine": "openai" - }, - { - "sources": [ - { - "url": "https://openai.com" - } - ], - "id": "gpt-4-vision-preview", - "object": "model", - "name": "OpenAI GPT 4 with Vision (Preview)", - "version": "1.1", - "description": "OpenAI GPT-4 Vision model features vision understanding capabilities", - "format": "api", - "settings": { - "vision_model": true, - "textModel": false - }, - "parameters": { - "max_tokens": 4096, - "temperature": 0.7, - "top_p": 0.95, - "stream": true - }, - "metadata": { - "author": "OpenAI", - "tags": [ - "General", - "Vision" - ] + "tags": ["General"] }, "engine": "openai" }, @@ -84,9 +51,7 @@ }, "metadata": { "author": "OpenAI", - "tags": [ - "General" - ] + "tags": ["General"] }, "engine": "openai" }, @@ -114,9 +79,7 @@ }, "metadata": { "author": "OpenAI", - "tags": [ - "General" - ] + "tags": ["General"] }, "engine": "openai" }, @@ -144,9 +107,7 @@ }, "metadata": { "author": "OpenAI", - "tags": [ - "General" - ] + "tags": ["General"] }, "engine": "openai" }, @@ -174,9 +135,7 @@ }, "metadata": { "author": "OpenAI", - "tags": [ - "General" - ] + "tags": ["General"] }, "engine": "openai" } diff --git a/extensions/tensorrt-llm-extension/tsconfig.json b/extensions/tensorrt-llm-extension/tsconfig.json index be07e716c..94465ebb6 100644 --- a/extensions/tensorrt-llm-extension/tsconfig.json +++ b/extensions/tensorrt-llm-extension/tsconfig.json @@ -1,7 +1,7 @@ { "compilerOptions": { "moduleResolution": "node", - "target": "es5", + "target": "ES2015", "module": "ES2020", "lib": ["es2015", "es2016", "es2017", "dom"], "strict": true, diff --git a/joi/rollup.config.mjs b/joi/rollup.config.mjs index 333a61c5c..8f20635a5 100644 --- a/joi/rollup.config.mjs +++ b/joi/rollup.config.mjs @@ -38,7 +38,11 @@ export default [ postcss({ plugins: [autoprefixer(), tailwindcss(tailwindConfig)], sourceMap: true, - use: ['sass'], + use: { + sass: { + silenceDeprecations: ['legacy-js-api'], + }, + }, minimize: true, extract: 'main.css', }), diff --git a/web/containers/Layout/BottomPanel/SystemMonitor/TableActiveModel/index.tsx b/web/containers/Layout/BottomPanel/SystemMonitor/TableActiveModel/index.tsx index 5e8549c7f..6ff6c894a 100644 --- a/web/containers/Layout/BottomPanel/SystemMonitor/TableActiveModel/index.tsx +++ b/web/containers/Layout/BottomPanel/SystemMonitor/TableActiveModel/index.tsx @@ -10,8 +10,6 @@ import { isLocalEngine } from '@/utils/modelEngine' import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom' -const Column = ['Model', 'Size', ''] - const TableActiveModel = () => { const { activeModel, stateModel, stopModel } = useActiveModel() @@ -21,25 +19,11 @@ const TableActiveModel = () => {
- - - {Column.map((col, i) => { - return ( - - ) - })} - - {activeModel && isLocalEngine(activeModel.engine) ? (
- {col} -

{activeModel.name}

diff --git a/web/containers/Layout/RibbonPanel/index.tsx b/web/containers/Layout/RibbonPanel/index.tsx index 2eb1bad70..ee00b2237 100644 --- a/web/containers/Layout/RibbonPanel/index.tsx +++ b/web/containers/Layout/RibbonPanel/index.tsx @@ -95,10 +95,11 @@ export default function RibbonPanel() { return (
onMenuClick(menu.state)} > onMenuClick(menu.state)} > {menu.icon}
diff --git a/web/screens/LocalServer/LocalServerRightPanel/index.tsx b/web/screens/LocalServer/LocalServerRightPanel/index.tsx index 13e3cad57..628a61512 100644 --- a/web/screens/LocalServer/LocalServerRightPanel/index.tsx +++ b/web/screens/LocalServer/LocalServerRightPanel/index.tsx @@ -1,8 +1,8 @@ import { useCallback, useEffect, useMemo, useState } from 'react' -import { Accordion, AccordionItem } from '@janhq/joi' +import { Accordion, AccordionItem, Input, Tooltip } from '@janhq/joi' import { useAtomValue, useSetAtom } from 'jotai' -import { AlertTriangleIcon, InfoIcon } from 'lucide-react' +import { AlertTriangleIcon, CheckIcon, CopyIcon, InfoIcon } from 'lucide-react' import EngineSetting from '@/containers/EngineSetting' import { modalTroubleShootingAtom } from '@/containers/ModalTroubleShoot' @@ -12,6 +12,8 @@ import RightPanelContainer from '@/containers/RightPanelContainer' import { loadModelErrorAtom } from '@/hooks/useActiveModel' +import { useClipboard } from '@/hooks/useClipboard' + import { getConfigurationsData } from '@/utils/componentSettings' import { @@ -29,6 +31,8 @@ const LocalServerRightPanel = () => { const selectedModel = useAtomValue(selectedModelAtom) + const clipboard = useClipboard({ timeout: 1000 }) + const [currentModelSettingParams, setCurrentModelSettingParams] = useState( extractModelLoadParams(selectedModel?.settings) ) @@ -92,6 +96,35 @@ const LocalServerRightPanel = () => { +
+ + ) : ( + { + clipboard.copy(selectedModel?.id) + }} + /> + } + content="Copy Model ID" + /> + ) + } + /> +
+ {loadModelError && serverEnabled && (
{ saveSettings({ gpusInUse: updatedGpusInUse }) } + const gpuSelectionPlaceHolder = + gpuList.length > 0 ? 'Select GPU' : "You don't have any compatible GPU" + /** * Handle click outside */ @@ -315,25 +318,32 @@ const Advanced = () => {
- {gpuList.length > 0 && ( -
- -
- - } - onClick={() => setOpen(!open)} - /> +
+ +
+ + } + onClick={() => setOpen(!open)} + /> + {gpuList.length > 0 && (
{
-
+ )}
- )} + )} diff --git a/web/screens/Settings/HuggingFaceRepoDetailModal/ModelDownloadRow/index.tsx b/web/screens/Settings/HuggingFaceRepoDetailModal/ModelDownloadRow/index.tsx index c3f09f171..9c2ff14a5 100644 --- a/web/screens/Settings/HuggingFaceRepoDetailModal/ModelDownloadRow/index.tsx +++ b/web/screens/Settings/HuggingFaceRepoDetailModal/ModelDownloadRow/index.tsx @@ -117,7 +117,7 @@ const ModelDownloadRow: React.FC = ({ return (
-
+
{quantization && ( @@ -126,8 +126,7 @@ const ModelDownloadRow: React.FC = ({ )}

diff --git a/web/screens/Settings/MyModels/MyModelList/index.tsx b/web/screens/Settings/MyModels/MyModelList/index.tsx index c9ca6e867..7557e9952 100644 --- a/web/screens/Settings/MyModels/MyModelList/index.tsx +++ b/web/screens/Settings/MyModels/MyModelList/index.tsx @@ -85,7 +85,7 @@ const MyModelList = ({ model }: Props) => {
{stateModel.loading && stateModel.model?.id === model.id ? ( @@ -99,7 +99,7 @@ const MyModelList = ({ model }: Props) => { Active @@ -107,7 +107,7 @@ const MyModelList = ({ model }: Props) => { ) : ( Inactive diff --git a/web/screens/Thread/ThreadCenterPanel/ChatInput/RichTextEditor.tsx b/web/screens/Thread/ThreadCenterPanel/ChatInput/RichTextEditor.tsx index 8f296368f..88ddad485 100644 --- a/web/screens/Thread/ThreadCenterPanel/ChatInput/RichTextEditor.tsx +++ b/web/screens/Thread/ThreadCenterPanel/ChatInput/RichTextEditor.tsx @@ -5,7 +5,7 @@ import { MessageStatus } from '@janhq/core' import hljs from 'highlight.js' import { useAtom, useAtomValue } from 'jotai' -import { BaseEditor, createEditor, Editor, Element, Transforms } from 'slate' +import { BaseEditor, createEditor, Editor, Transforms } from 'slate' import { withHistory } from 'slate-history' // Import withHistory import { Editable, @@ -129,14 +129,27 @@ const RichTextEditor = ({ }) } - if (Editor.isBlock(editor, node) && node.type === 'code') { + if (Editor.isBlock(editor, node) && node.type === 'paragraph') { node.children.forEach((child: { text: any }, childIndex: number) => { const text = child.text + const { selection } = editor + + if (selection) { + const selectedNode = Editor.node(editor, selection) + + if (Editor.isBlock(editor, selectedNode[0] as CustomElement)) { + const isNodeEmpty = Editor.string(editor, selectedNode[1]) === '' + + if (isNodeEmpty) { + // Reset language when a node is cleared + currentLanguage.current = 'plaintext' + } + } + } // Match code block start and end const startMatch = text.match(/^```(\w*)$/) const endMatch = text.match(/^```$/) - const inlineMatch = text.match(/^`([^`]+)`$/) // Match inline code if (startMatch) { // If it's the start of a code block, store the language @@ -144,38 +157,6 @@ const RichTextEditor = ({ } else if (endMatch) { // Reset language when code block ends currentLanguage.current = 'plaintext' - } else if (inlineMatch) { - // Apply syntax highlighting to inline code - const codeContent = inlineMatch[1] // Get the content within the backticks - try { - hljs.highlight(codeContent, { - language: - currentLanguage.current.length > 1 - ? currentLanguage.current - : 'plaintext', - }).value - } catch (err) { - hljs.highlight(codeContent, { - language: 'javascript', - }).value - } - - // Calculate the range for the inline code - const length = codeContent.length - ranges.push({ - anchor: { - path: [...path, childIndex], - offset: inlineMatch.index + 1, - }, - focus: { - path: [...path, childIndex], - offset: inlineMatch.index + 1 + length, - }, - type: 'code', - code: true, - language: currentLanguage.current, - className: '', // Specify class name if needed - }) } else if (currentLanguage.current !== 'plaintext') { // Highlight entire code line if in a code block const leadingSpaces = text.match(/^\s*/)?.[0] ?? '' // Capture leading spaces @@ -206,7 +187,7 @@ const RichTextEditor = ({ anchor: { path: [...path, childIndex], offset: 0 }, focus: { path: [...path, childIndex], - offset: leadingSpaces.length, + offset: slateTextIndex, }, type: 'code', code: true, @@ -240,6 +221,7 @@ const RichTextEditor = ({ slateTextIndex += length }) } else { + currentLanguage.current = 'plaintext' ranges.push({ anchor: { path: [...path, childIndex], offset: 0 }, focus: { path: [...path, childIndex], offset: text.length }, @@ -301,6 +283,11 @@ const RichTextEditor = ({ textareaRef.current.style.overflow = textareaRef.current.clientHeight >= 390 ? 'auto' : 'hidden' } + + if (currentPrompt.length === 0) { + resetEditor() + } + // eslint-disable-next-line react-hooks/exhaustive-deps }, [textareaRef.current?.clientHeight, currentPrompt, activeSettingInputBox]) const onStopInferenceClick = async () => { @@ -317,13 +304,15 @@ const RichTextEditor = ({ // Adjust the height of the textarea to its initial state if (textareaRef.current) { - textareaRef.current.style.height = '40px' // Reset to the initial height or your desired height + textareaRef.current.style.height = activeSettingInputBox + ? '100px' + : '44px' textareaRef.current.style.overflow = 'hidden' // Reset overflow style } // Ensure the editor re-renders decorations editor.onChange() - }, [editor]) + }, [activeSettingInputBox, editor]) const handleKeyDown = useCallback( (event: React.KeyboardEvent) => { @@ -334,35 +323,6 @@ const RichTextEditor = ({ resetEditor() } else onStopInferenceClick() } - - if (event.key === '`') { - // Determine whether any of the currently selected blocks are code blocks. - const [match] = Editor.nodes(editor, { - match: (n) => - Element.isElement(n) && (n as CustomElement).type === 'code', - }) - // Toggle the block type dependsing on whether there's already a match. - Transforms.setNodes( - editor, - { type: match ? 'paragraph' : 'code' }, - { match: (n) => Element.isElement(n) && Editor.isBlock(editor, n) } - ) - } - - if (event.key === 'Tab') { - const [match] = Editor.nodes(editor, { - match: (n) => { - return (n as CustomElement).type === 'code' - }, - mode: 'lowest', - }) - - if (match) { - event.preventDefault() - // Insert a tab character - Editor.insertText(editor, ' ') // Insert 2 spaces - } - } }, // eslint-disable-next-line react-hooks/exhaustive-deps [currentPrompt, editor, messages] diff --git a/web/screens/Thread/ThreadCenterPanel/ChatInput/index.tsx b/web/screens/Thread/ThreadCenterPanel/ChatInput/index.tsx index 83a68fa8a..afa84b5bf 100644 --- a/web/screens/Thread/ThreadCenterPanel/ChatInput/index.tsx +++ b/web/screens/Thread/ThreadCenterPanel/ChatInput/index.tsx @@ -125,7 +125,6 @@ const ChatInput = () => {
{renderPreview(fileUpload)} - { disabled={stateModel.loading || !activeThread} />