Merge branch 'dev' into more-groq-models
This commit is contained in:
commit
10d4b3f4e0
2
.github/workflows/jan-docs-new-release.yaml
vendored
2
.github/workflows/jan-docs-new-release.yaml
vendored
@ -58,6 +58,6 @@ jobs:
|
||||
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
|
||||
projectName: ${{ env.CLOUDFLARE_PROJECT_NAME }}
|
||||
directory: ./docs/out
|
||||
branch: dev
|
||||
branch: main
|
||||
# Optional: Enable this if you want to have GitHub Deployments triggered
|
||||
gitHubToken: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
24
.github/workflows/jan-electron-build-beta.yml
vendored
24
.github/workflows/jan-electron-build-beta.yml
vendored
@ -133,4 +133,26 @@ jobs:
|
||||
run: |
|
||||
gh release edit v${{ needs.create-draft-release.outputs.version }} --draft=false --prerelease
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
noti-discord-and-update-url-readme:
|
||||
needs: [build-macos-x64, build-macos-arm64, create-draft-release, build-windows-x64, build-linux-x64, combine-beta-mac-yml]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set version to environment variable
|
||||
run: |
|
||||
echo "VERSION=${{ needs.create-draft-release.outputs.version }}" >> $GITHUB_ENV
|
||||
|
||||
- name: Notify Discord
|
||||
uses: Ilshidur/action-discord@master
|
||||
with:
|
||||
args: |
|
||||
Jan-beta App version {{ VERSION }}, has been released, use the following links to download the app with faster speed or visit the Github release page for more information:
|
||||
- Windows: https://delta.jan.ai/beta/jan-beta-win-x64-{{ VERSION }}.exe
|
||||
- macOS Intel: https://delta.jan.ai/beta/jan-beta-mac-x64-{{ VERSION }}.dmg
|
||||
- macOS Apple Silicon: https://delta.jan.ai/beta/jan-beta-mac-arm64-{{ VERSION }}.dmg
|
||||
- Linux Deb: https://delta.jan.ai/beta/jan-beta-linux-amd64-{{ VERSION }}.deb
|
||||
- Linux AppImage: https://delta.jan.ai/beta/jan-beta-linux-x86_64-{{ VERSION }}.AppImage
|
||||
- Github Release URL: https://github.com/janhq/jan/releases/tag/v{{ VERSION }}
|
||||
env:
|
||||
DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK_JAN_BETA }}
|
||||
BIN
JanBanner.png
Normal file
BIN
JanBanner.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 3.7 MiB |
254
README.md
254
README.md
@ -1,6 +1,6 @@
|
||||
# Jan - Turn your computer into an AI computer
|
||||
# Jan - Local AI Assistant
|
||||
|
||||

|
||||

|
||||
|
||||
<p align="center">
|
||||
<!-- ALL-CONTRIBUTORS-BADGE:START - Do not remove or modify this section -->
|
||||
@ -12,18 +12,22 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://jan.ai/guides">Getting Started</a>
|
||||
<a href="https://jan.ai/docs/quickstart">Getting Started</a>
|
||||
- <a href="https://jan.ai/docs">Docs</a>
|
||||
- <a href="https://github.com/janhq/jan/releases">Changelog</a>
|
||||
- <a href="https://github.com/janhq/jan/issues">Bug reports</a>
|
||||
- <a href="https://discord.gg/AsJ8krTT3N">Discord</a>
|
||||
</p>
|
||||
|
||||
> [!Warning] >**Jan is currently in Development**: Expect breaking changes and bugs!
|
||||
<p align="center">
|
||||
⚠️ <b> Jan is currently in Development</b>: Expect breaking changes and bugs!
|
||||
</p>
|
||||
|
||||
Jan is an open-source ChatGPT alternative that runs 100% offline on your computer.
|
||||
|
||||
**Jan runs on any hardware.** From PCs to multi-GPU clusters, Jan supports universal architectures:
|
||||
Jan is a ChatGPT-alternative that runs 100% offline on your device. Our goal is to make it easy for a layperson to download and run LLMs and use AI with **full control** and **privacy**.
|
||||
|
||||
Jan is powered by [Cortex](https://github.com/janhq/cortex.cpp), our embeddable local AI engine that runs on any hardware.
|
||||
From PCs to multi-GPU clusters, Jan & Cortex supports universal architectures:
|
||||
|
||||
- [x] NVIDIA GPUs (fast)
|
||||
- [x] Apple M-series (fast)
|
||||
@ -31,6 +35,12 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute
|
||||
- [x] Linux Debian
|
||||
- [x] Windows x64
|
||||
|
||||
#### Features:
|
||||
- [Model Library](https://jan.ai/docs/models/manage-models#add-models) with popular LLMs like Llama, Gemma, Mistral, or Qwen
|
||||
- Connect to [Remote AI APIs](https://jan.ai/docs/remote-models/openai) like Groq and OpenRouter
|
||||
- Local API Server with OpenAI-equivalent API
|
||||
- [Extensions](https://jan.ai/docs/extensions) for customizing Jan
|
||||
|
||||
## Download
|
||||
|
||||
<table>
|
||||
@ -74,7 +84,40 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute
|
||||
</td>
|
||||
</tr>
|
||||
<tr style="text-align:center">
|
||||
<td style="text-align:center"><b>Experimental (Nightly Build)</b></td>
|
||||
<td style="text-align:center"><b>Beta (Preview)</b></td>
|
||||
<td style="text-align:center">
|
||||
<a href='https://app.jan.ai/download/beta/win-x64'>
|
||||
<img src='https://github.com/janhq/jan/blob/dev/docs/static/img/windows.png' style="height:14px; width: 14px" />
|
||||
<b>jan.exe</b>
|
||||
</a>
|
||||
</td>
|
||||
<td style="text-align:center">
|
||||
<a href='https://app.jan.ai/download/beta/mac-x64'>
|
||||
<img src='https://github.com/janhq/jan/blob/dev/docs/static/img/mac.png' style="height:15px; width: 15px" />
|
||||
<b>Intel</b>
|
||||
</a>
|
||||
</td>
|
||||
<td style="text-align:center">
|
||||
<a href='https://app.jan.ai/download/beta/mac-arm64'>
|
||||
<img src='https://github.com/janhq/jan/blob/dev/docs/static/img/mac.png' style="height:15px; width: 15px" />
|
||||
<b>M1/M2/M3/M4</b>
|
||||
</a>
|
||||
</td>
|
||||
<td style="text-align:center">
|
||||
<a href='https://app.jan.ai/download/beta/linux-amd64-deb'>
|
||||
<img src='https://github.com/janhq/jan/blob/dev/docs/static/img/linux.png' style="height:14px; width: 14px" />
|
||||
<b>jan.deb</b>
|
||||
</a>
|
||||
</td>
|
||||
<td style="text-align:center">
|
||||
<a href='https://app.jan.ai/download/beta/linux-amd64-appimage'>
|
||||
<img src='https://github.com/janhq/jan/blob/dev/docs/static/img/linux.png' style="height:14px; width: 14px" />
|
||||
<b>jan.AppImage</b>
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
<tr style="text-align:center">
|
||||
<td style="text-align:center"><b>Nightly Build (Experimental)</b></td>
|
||||
<td style="text-align:center">
|
||||
<a href='https://app.jan.ai/download/nightly/win-x64'>
|
||||
<img src='https://github.com/janhq/jan/blob/dev/docs/static/img/windows.png' style="height:14px; width: 14px" />
|
||||
@ -108,79 +151,64 @@ Jan is an open-source ChatGPT alternative that runs 100% offline on your compute
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
Download the latest version of Jan at https://jan.ai/ or visit the **[GitHub Releases](https://github.com/janhq/jan/releases)** to download any previous release.
|
||||
Download the latest version of Jan at https://jan.ai/ or visit the [GitHub Releases](https://github.com/janhq/jan/releases) to download any previous release.
|
||||
|
||||
## Demo
|
||||
|
||||

|
||||
https://github.com/user-attachments/assets/c3592fa2-c504-4d9d-a885-7e00122a50f3
|
||||
|
||||
_Realtime Video: Jan v0.4.3-nightly on a Mac M1, 16GB Sonoma 14_
|
||||
*Real-time Video: Jan v0.5.7 on a Mac M2, 16GB Sonoma 14.2*
|
||||
|
||||
## Quicklinks
|
||||
|
||||
#### Jan
|
||||
### Jan
|
||||
|
||||
- [Jan website](https://jan.ai/)
|
||||
- [Jan Website](https://jan.ai/)
|
||||
- [Jan GitHub](https://github.com/janhq/jan)
|
||||
- [User Guides](https://jan.ai/guides/)
|
||||
- [Developer docs](https://jan.ai/developer/)
|
||||
- [API reference](https://jan.ai/api-reference/)
|
||||
- [Specs](https://jan.ai/docs/)
|
||||
- [Documentation](https://jan.ai/docs)
|
||||
- [Jan Changelog](https://jan.ai/changelog)
|
||||
- [Jan Blog](https://jan.ai/blog)
|
||||
|
||||
#### Nitro
|
||||
### Cortex.cpp
|
||||
Jan is powered by **Cortex.cpp**. It is a C++ command-line interface (CLI) designed as an alternative to [Ollama](https://ollama.com/). By default, it runs on the llama.cpp engine but also supports other engines, including ONNX and TensorRT-LLM, making it a multi-engine platform.
|
||||
|
||||
Nitro is a high-efficiency C++ inference engine for edge computing. It is lightweight and embeddable, and can be used on its own within your own projects.
|
||||
|
||||
- [Nitro Website](https://nitro.jan.ai)
|
||||
- [Nitro GitHub](https://github.com/janhq/nitro)
|
||||
- [Documentation](https://nitro.jan.ai/docs)
|
||||
- [API Reference](https://nitro.jan.ai/api-reference)
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
As Jan is in development mode, you might get stuck on a broken build.
|
||||
|
||||
To reset your installation:
|
||||
|
||||
1. Use the following commands to remove any dangling backend processes:
|
||||
|
||||
```sh
|
||||
ps aux | grep nitro
|
||||
```
|
||||
|
||||
Look for processes like "nitro" and "nitro_arm_64," and kill them one by one with:
|
||||
|
||||
```sh
|
||||
kill -9 <PID>
|
||||
```
|
||||
|
||||
2. **Remove Jan from your Applications folder and Cache folder**
|
||||
|
||||
```bash
|
||||
make clean
|
||||
```
|
||||
|
||||
This will remove all build artifacts and cached files:
|
||||
|
||||
- Delete Jan extension from your `~/jan/extensions` folder
|
||||
- Delete all `node_modules` in current folder
|
||||
- Clear Application cache in `~/Library/Caches/jan`
|
||||
|
||||
- [Cortex Website](https://cortex.so/)
|
||||
- [Cortex GitHub](https://github.com/janhq/cortex.cpp)
|
||||
- [Documentation](https://cortex.so/docs/)
|
||||
- [Models Library](https://cortex.so/models)
|
||||
- API Reference: *Under development*
|
||||
|
||||
## Requirements for running Jan
|
||||
|
||||
- MacOS: 13 or higher
|
||||
- Windows:
|
||||
- **MacOS**: 13 or higher
|
||||
- **Windows**:
|
||||
- Windows 10 or higher
|
||||
- To enable GPU support:
|
||||
- Nvidia GPU with CUDA Toolkit 11.7 or higher
|
||||
- Nvidia driver 470.63.01 or higher
|
||||
- Linux:
|
||||
- **Linux**:
|
||||
- glibc 2.27 or higher (check with `ldd --version`)
|
||||
- gcc 11, g++ 11, cpp 11 or higher, refer to this [link](https://jan.ai/guides/troubleshooting/gpu-not-used/#specific-requirements-for-linux) for more information
|
||||
- To enable GPU support:
|
||||
- Nvidia GPU with CUDA Toolkit 11.7 or higher
|
||||
- Nvidia driver 470.63.01 or higher
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
As Jan is in development mode, you might get stuck on a some common issues:
|
||||
- [Troubleshooting a broken build](https://jan.ai/docs/troubleshooting#broken-build)
|
||||
- [Troubleshooting NVIDIA GPU](https://jan.ai/docs/troubleshooting#troubleshooting-nvidia-gpu)
|
||||
- [Troubleshooting Something's Amiss](https://jan.ai/docs/troubleshooting#somethings-amiss)
|
||||
|
||||
|
||||
If you can't find what you need in our troubleshooting guide, feel free reach out to us for extra help:
|
||||
1. Copy your [error logs & device specifications](https://jan.ai/docs/troubleshooting#how-to-get-error-logs).
|
||||
2. Go to our [Discord](https://discord.com/invite/FTk2MvZwJH) & send it to **#🆘|get-help** channel for further support.
|
||||
|
||||
*Check the logs to ensure the information is what you intend to send. Note that we retain your logs for only 24 hours, so report any issues promptly.*
|
||||
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome! Please read the [CONTRIBUTING.md](CONTRIBUTING.md) file
|
||||
@ -209,11 +237,7 @@ Contributions are welcome! Please read the [CONTRIBUTING.md](CONTRIBUTING.md) fi
|
||||
|
||||
This will start the development server and open the desktop app.
|
||||
|
||||
3. (Optional) **Run the API server without frontend**
|
||||
|
||||
```bash
|
||||
yarn dev:server
|
||||
```
|
||||
|
||||
### For production build
|
||||
|
||||
@ -225,102 +249,6 @@ make build
|
||||
|
||||
This will build the app MacOS m1/m2 for production (with code signing already done) and put the result in `dist` folder.
|
||||
|
||||
### Docker mode
|
||||
|
||||
- Supported OS: Linux, WSL2 Docker
|
||||
- Pre-requisites:
|
||||
|
||||
- Docker Engine and Docker Compose are required to run Jan in Docker mode. Follow the [instructions](https://docs.docker.com/engine/install/ubuntu/) below to get started with Docker Engine on Ubuntu.
|
||||
|
||||
```bash
|
||||
curl -fsSL https://get.docker.com -o get-docker.sh
|
||||
sudo sh ./get-docker.sh --dry-run
|
||||
```
|
||||
|
||||
- If you intend to run Jan in GPU mode, you need to install `nvidia-driver` and `nvidia-docker2`. Follow the instruction [here](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) for installation.
|
||||
|
||||
- Run Jan in Docker mode
|
||||
> User can choose between `docker-compose.yml` with latest prebuilt docker image or `docker-compose-dev.yml` with local docker build
|
||||
|
||||
| Docker compose Profile | Description |
|
||||
| ---------------------- | -------------------------------------------- |
|
||||
| `cpu-fs` | Run Jan in CPU mode with default file system |
|
||||
| `cpu-s3fs` | Run Jan in CPU mode with S3 file system |
|
||||
| `gpu-fs` | Run Jan in GPU mode with default file system |
|
||||
| `gpu-s3fs` | Run Jan in GPU mode with S3 file system |
|
||||
|
||||
| Environment Variable | Description |
|
||||
| ----------------------- | ------------------------------------------------------------------------------------------------------- |
|
||||
| `S3_BUCKET_NAME` | S3 bucket name - leave blank for default file system |
|
||||
| `AWS_ACCESS_KEY_ID` | AWS access key ID - leave blank for default file system |
|
||||
| `AWS_SECRET_ACCESS_KEY` | AWS secret access key - leave blank for default file system |
|
||||
| `AWS_ENDPOINT` | AWS endpoint URL - leave blank for default file system |
|
||||
| `AWS_REGION` | AWS region - leave blank for default file system |
|
||||
| `API_BASE_URL` | Jan Server URL, please modify it as your public ip address or domain name default http://localhost:1377 |
|
||||
|
||||
- **Option 1**: Run Jan in CPU mode
|
||||
|
||||
```bash
|
||||
# cpu mode with default file system
|
||||
docker compose --profile cpu-fs up -d
|
||||
|
||||
# cpu mode with S3 file system
|
||||
docker compose --profile cpu-s3fs up -d
|
||||
```
|
||||
|
||||
- **Option 2**: Run Jan in GPU mode
|
||||
|
||||
- **Step 1**: Check CUDA compatibility with your NVIDIA driver by running `nvidia-smi` and check the CUDA version in the output
|
||||
|
||||
```bash
|
||||
nvidia-smi
|
||||
|
||||
# Output
|
||||
+---------------------------------------------------------------------------------------+
|
||||
| NVIDIA-SMI 531.18 Driver Version: 531.18 CUDA Version: 12.1 |
|
||||
|-----------------------------------------+----------------------+----------------------+
|
||||
| GPU Name TCC/WDDM | Bus-Id Disp.A | Volatile Uncorr. ECC |
|
||||
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|
||||
| | | MIG M. |
|
||||
|=========================================+======================+======================|
|
||||
| 0 NVIDIA GeForce RTX 4070 Ti WDDM | 00000000:01:00.0 On | N/A |
|
||||
| 0% 44C P8 16W / 285W| 1481MiB / 12282MiB | 2% Default |
|
||||
| | | N/A |
|
||||
+-----------------------------------------+----------------------+----------------------+
|
||||
| 1 NVIDIA GeForce GTX 1660 Ti WDDM | 00000000:02:00.0 Off | N/A |
|
||||
| 0% 49C P8 14W / 120W| 0MiB / 6144MiB | 0% Default |
|
||||
| | | N/A |
|
||||
+-----------------------------------------+----------------------+----------------------+
|
||||
| 2 NVIDIA GeForce GTX 1660 Ti WDDM | 00000000:05:00.0 Off | N/A |
|
||||
| 29% 38C P8 11W / 120W| 0MiB / 6144MiB | 0% Default |
|
||||
| | | N/A |
|
||||
+-----------------------------------------+----------------------+----------------------+
|
||||
|
||||
+---------------------------------------------------------------------------------------+
|
||||
| Processes: |
|
||||
| GPU GI CI PID Type Process name GPU Memory |
|
||||
| ID ID Usage |
|
||||
|=======================================================================================|
|
||||
```
|
||||
|
||||
- **Step 2**: Visit [NVIDIA NGC Catalog ](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/cuda/tags) and find the smallest minor version of image tag that matches your CUDA version (e.g., 12.1 -> 12.1.0)
|
||||
|
||||
- **Step 3**: Update the `Dockerfile.gpu` line number 5 with the latest minor version of the image tag from step 2 (e.g. change `FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04 AS base` to `FROM nvidia/cuda:12.1.0-runtime-ubuntu22.04 AS base`)
|
||||
|
||||
- **Step 4**: Run command to start Jan in GPU mode
|
||||
|
||||
```bash
|
||||
# GPU mode with default file system
|
||||
docker compose --profile gpu-fs up -d
|
||||
|
||||
# GPU mode with S3 file system
|
||||
docker compose --profile gpu-s3fs up -d
|
||||
```
|
||||
|
||||
This will start the web server and you can access Jan at `http://localhost:3000`.
|
||||
|
||||
> Note: RAG feature is not supported in Docker mode with s3fs yet.
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
Jan builds on top of other open-source projects:
|
||||
@ -334,18 +262,18 @@ Jan builds on top of other open-source projects:
|
||||
|
||||
- Bugs & requests: file a GitHub ticket
|
||||
- For discussion: join our Discord [here](https://discord.gg/FTk2MvZwJH)
|
||||
- For business inquiries: email hello@jan.ai
|
||||
- For business inquiries: email hello@jan.ai
|
||||
- For jobs: please email hr@jan.ai
|
||||
|
||||
## Trust & Safety
|
||||
|
||||
Beware of scams.
|
||||
Beware of scams!
|
||||
|
||||
- We will never ask you for personal info
|
||||
- We are a free product; there's no paid version
|
||||
- We don't have a token or ICO
|
||||
- We are not actively fundraising or seeking donations
|
||||
- We will never request your personal information.
|
||||
- Our product is completely free; no paid version exists.
|
||||
- We do not have a token or ICO.
|
||||
- We are a [bootstrapped company](https://en.wikipedia.org/wiki/Bootstrapping), and don't have any external investors (*yet*). We're open to exploring opportunities with strategic partners want to tackle [our mission](https://jan.ai/about#mission) together.
|
||||
|
||||
## License
|
||||
|
||||
Jan is free and open source, under the AGPLv3 license.
|
||||
Jan is free and open source, under the **AGPLv3** license.
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"moduleResolution": "node",
|
||||
"target": "es5",
|
||||
"target": "ES2015",
|
||||
"module": "ES2020",
|
||||
"lib": ["es2015", "es2016", "es2017", "dom"],
|
||||
"strict": true,
|
||||
@ -13,7 +13,7 @@
|
||||
"declarationDir": "dist/types",
|
||||
"outDir": "dist/lib",
|
||||
"importHelpers": true,
|
||||
"types": ["@types/jest"],
|
||||
"types": ["@types/jest"]
|
||||
},
|
||||
"include": ["src"],
|
||||
"exclude": ["**/*.test.ts"]
|
||||
|
||||
BIN
docs/public/assets/images/changelog/jan-v0.5.5.jpeg
Normal file
BIN
docs/public/assets/images/changelog/jan-v0.5.5.jpeg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 55 KiB |
BIN
docs/public/assets/images/changelog/jan-v0.5.7.gif
Normal file
BIN
docs/public/assets/images/changelog/jan-v0.5.7.gif
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 2.9 MiB |
27
docs/src/pages/changelog/2024-02-10-jan-is-more-stable.mdx
Normal file
27
docs/src/pages/changelog/2024-02-10-jan-is-more-stable.mdx
Normal file
@ -0,0 +1,27 @@
|
||||
---
|
||||
title: "Jan is more stable 👋"
|
||||
version: 0.5.5
|
||||
description: "Jan supports Llama 3.2 and Qwen 2.5"
|
||||
date: 2024-10-02
|
||||
ogImage: "/assets/images/changelog/jan-v0.5.5.jpeg"
|
||||
---
|
||||
|
||||
import ChangelogHeader from "@/components/Changelog/ChangelogHeader"
|
||||
|
||||
<ChangelogHeader title= "Jan is faster now" date="2024-09-01" ogImage= "/assets/images/changelog/jan-v0.5.5.jpeg" />
|
||||
|
||||
Highlights 🎉
|
||||
|
||||
- Meta's Llama 3.2 and Alibaba's Qwen 2.5 added to the hub
|
||||
- Improved starter screen
|
||||
- Better local vs. cloud model navigation
|
||||
|
||||
Fixes 💫
|
||||
|
||||
- Solved GPU acceleration for GGUF models
|
||||
- Improved model caching & threading
|
||||
- Resolved input & toolbar overlaps
|
||||
|
||||
Update your product or download the latest: https://jan.ai
|
||||
|
||||
For more details, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.5.5).
|
||||
26
docs/src/pages/changelog/2024-10-24-jan-stable.mdx
Normal file
26
docs/src/pages/changelog/2024-10-24-jan-stable.mdx
Normal file
@ -0,0 +1,26 @@
|
||||
---
|
||||
title: "Jan has Stable, Beta and Nightly versions"
|
||||
version: 0.5.7
|
||||
description: "This release is mostly focused on bug fixes."
|
||||
date: 2024-10-24
|
||||
ogImage: "/assets/images/changelog/jan-v0.5.7.gif"
|
||||
---
|
||||
|
||||
import ChangelogHeader from "@/components/Changelog/ChangelogHeader"
|
||||
|
||||
<ChangelogHeader title= "Jan is faster now" date="2024-09-01" ogImage= "/assets/images/changelog/jan-v0.5.7.gif" />
|
||||
|
||||
Highlights 🎉
|
||||
|
||||
- Jan has Stable, Beta and Nightly versions
|
||||
- Saving instructions for new threads is now stable
|
||||
|
||||
Fixes 💫
|
||||
|
||||
- Fixed broken links, hardware issues, and multi-modal download
|
||||
- Resolved text overlap, scrolling, and multi-monitor reset problems
|
||||
- Adjusted LLava model EOS token and context input
|
||||
|
||||
Update your product or download the latest: https://jan.ai
|
||||
|
||||
For more details, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.5.7).
|
||||
BIN
docs/src/pages/docs/_assets/jan-app.png
Normal file
BIN
docs/src/pages/docs/_assets/jan-app.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 363 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 1.8 MiB |
@ -22,7 +22,7 @@ import FAQBox from '@/components/FaqBox'
|
||||
|
||||
# Jan
|
||||
|
||||

|
||||

|
||||
|
||||
|
||||
Jan is a ChatGPT-alternative that runs 100% offline on your [Desktop](/docs/desktop-installation). Our goal is to make it easy for a layperson[^1] to download and run LLMs and use AI with full control and [privacy](https://www.reuters.com/legal/legalindustry/privacy-paradox-with-ai-2023-10-31/).
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"moduleResolution": "node",
|
||||
"target": "es5",
|
||||
"target": "ES2015",
|
||||
"module": "ES2020",
|
||||
"lib": ["es2015", "es2016", "es2017", "dom"],
|
||||
"strict": true,
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"moduleResolution": "node",
|
||||
"target": "es5",
|
||||
"target": "ES2015",
|
||||
"module": "ES2020",
|
||||
"lib": ["es2015", "es2016", "es2017", "dom"],
|
||||
"strict": true,
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "@janhq/inference-openai-extension",
|
||||
"productName": "OpenAI Inference Engine",
|
||||
"version": "1.0.2",
|
||||
"version": "1.0.3",
|
||||
"description": "This extension enables OpenAI chat completion API calls",
|
||||
"main": "dist/index.js",
|
||||
"module": "dist/module.js",
|
||||
|
||||
@ -23,40 +23,7 @@
|
||||
},
|
||||
"metadata": {
|
||||
"author": "OpenAI",
|
||||
"tags": [
|
||||
"General"
|
||||
]
|
||||
},
|
||||
"engine": "openai"
|
||||
},
|
||||
{
|
||||
"sources": [
|
||||
{
|
||||
"url": "https://openai.com"
|
||||
}
|
||||
],
|
||||
"id": "gpt-4-vision-preview",
|
||||
"object": "model",
|
||||
"name": "OpenAI GPT 4 with Vision (Preview)",
|
||||
"version": "1.1",
|
||||
"description": "OpenAI GPT-4 Vision model features vision understanding capabilities",
|
||||
"format": "api",
|
||||
"settings": {
|
||||
"vision_model": true,
|
||||
"textModel": false
|
||||
},
|
||||
"parameters": {
|
||||
"max_tokens": 4096,
|
||||
"temperature": 0.7,
|
||||
"top_p": 0.95,
|
||||
"stream": true
|
||||
},
|
||||
"metadata": {
|
||||
"author": "OpenAI",
|
||||
"tags": [
|
||||
"General",
|
||||
"Vision"
|
||||
]
|
||||
"tags": ["General"]
|
||||
},
|
||||
"engine": "openai"
|
||||
},
|
||||
@ -84,9 +51,7 @@
|
||||
},
|
||||
"metadata": {
|
||||
"author": "OpenAI",
|
||||
"tags": [
|
||||
"General"
|
||||
]
|
||||
"tags": ["General"]
|
||||
},
|
||||
"engine": "openai"
|
||||
},
|
||||
@ -114,9 +79,7 @@
|
||||
},
|
||||
"metadata": {
|
||||
"author": "OpenAI",
|
||||
"tags": [
|
||||
"General"
|
||||
]
|
||||
"tags": ["General"]
|
||||
},
|
||||
"engine": "openai"
|
||||
},
|
||||
@ -144,9 +107,7 @@
|
||||
},
|
||||
"metadata": {
|
||||
"author": "OpenAI",
|
||||
"tags": [
|
||||
"General"
|
||||
]
|
||||
"tags": ["General"]
|
||||
},
|
||||
"engine": "openai"
|
||||
},
|
||||
@ -174,9 +135,7 @@
|
||||
},
|
||||
"metadata": {
|
||||
"author": "OpenAI",
|
||||
"tags": [
|
||||
"General"
|
||||
]
|
||||
"tags": ["General"]
|
||||
},
|
||||
"engine": "openai"
|
||||
}
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"moduleResolution": "node",
|
||||
"target": "es5",
|
||||
"target": "ES2015",
|
||||
"module": "ES2020",
|
||||
"lib": ["es2015", "es2016", "es2017", "dom"],
|
||||
"strict": true,
|
||||
|
||||
@ -38,7 +38,11 @@ export default [
|
||||
postcss({
|
||||
plugins: [autoprefixer(), tailwindcss(tailwindConfig)],
|
||||
sourceMap: true,
|
||||
use: ['sass'],
|
||||
use: {
|
||||
sass: {
|
||||
silenceDeprecations: ['legacy-js-api'],
|
||||
},
|
||||
},
|
||||
minimize: true,
|
||||
extract: 'main.css',
|
||||
}),
|
||||
|
||||
@ -10,8 +10,6 @@ import { isLocalEngine } from '@/utils/modelEngine'
|
||||
|
||||
import { serverEnabledAtom } from '@/helpers/atoms/LocalServer.atom'
|
||||
|
||||
const Column = ['Model', 'Size', '']
|
||||
|
||||
const TableActiveModel = () => {
|
||||
const { activeModel, stateModel, stopModel } = useActiveModel()
|
||||
|
||||
@ -21,25 +19,11 @@ const TableActiveModel = () => {
|
||||
<div className="w-1/2">
|
||||
<div className="overflow-hidden border-b border-[hsla(var(--app-border))]">
|
||||
<table className="w-full px-8">
|
||||
<thead className="w-full border-b border-[hsla(var(--app-border))] bg-[hsla(var(--tertiary-bg))]">
|
||||
<tr>
|
||||
{Column.map((col, i) => {
|
||||
return (
|
||||
<th
|
||||
key={i}
|
||||
className="px-4 py-2 text-left font-normal last:text-center"
|
||||
>
|
||||
{col}
|
||||
</th>
|
||||
)
|
||||
})}
|
||||
</tr>
|
||||
</thead>
|
||||
{activeModel && isLocalEngine(activeModel.engine) ? (
|
||||
<tbody>
|
||||
<tr>
|
||||
<td
|
||||
className="max-w-[200px] px-4 py-2 font-bold"
|
||||
className="max-w-[200px] px-4 py-2 font-medium"
|
||||
title={activeModel.name}
|
||||
>
|
||||
<p className="line-clamp-2">{activeModel.name}</p>
|
||||
|
||||
@ -95,10 +95,11 @@ export default function RibbonPanel() {
|
||||
return (
|
||||
<div
|
||||
className={twMerge(
|
||||
'relative my-0.5 flex h-8 w-8 items-center justify-center rounded-md hover:bg-[hsla(var(--ribbon-panel-icon-hover))]',
|
||||
'relative my-0.5 flex h-8 w-8 cursor-pointer items-center justify-center rounded-md hover:bg-[hsla(var(--ribbon-panel-icon-hover))]',
|
||||
i === 1 && 'mb-auto'
|
||||
)}
|
||||
key={i}
|
||||
onClick={() => onMenuClick(menu.state)}
|
||||
>
|
||||
<Tooltip
|
||||
side="right"
|
||||
@ -112,7 +113,6 @@ export default function RibbonPanel() {
|
||||
isActive &&
|
||||
'z-10 text-[hsla(var(--ribbon-panel-icon-active))]'
|
||||
)}
|
||||
onClick={() => onMenuClick(menu.state)}
|
||||
>
|
||||
{menu.icon}
|
||||
</div>
|
||||
|
||||
@ -1,8 +1,8 @@
|
||||
import { useCallback, useEffect, useMemo, useState } from 'react'
|
||||
|
||||
import { Accordion, AccordionItem } from '@janhq/joi'
|
||||
import { Accordion, AccordionItem, Input, Tooltip } from '@janhq/joi'
|
||||
import { useAtomValue, useSetAtom } from 'jotai'
|
||||
import { AlertTriangleIcon, InfoIcon } from 'lucide-react'
|
||||
import { AlertTriangleIcon, CheckIcon, CopyIcon, InfoIcon } from 'lucide-react'
|
||||
|
||||
import EngineSetting from '@/containers/EngineSetting'
|
||||
import { modalTroubleShootingAtom } from '@/containers/ModalTroubleShoot'
|
||||
@ -12,6 +12,8 @@ import RightPanelContainer from '@/containers/RightPanelContainer'
|
||||
|
||||
import { loadModelErrorAtom } from '@/hooks/useActiveModel'
|
||||
|
||||
import { useClipboard } from '@/hooks/useClipboard'
|
||||
|
||||
import { getConfigurationsData } from '@/utils/componentSettings'
|
||||
|
||||
import {
|
||||
@ -29,6 +31,8 @@ const LocalServerRightPanel = () => {
|
||||
|
||||
const selectedModel = useAtomValue(selectedModelAtom)
|
||||
|
||||
const clipboard = useClipboard({ timeout: 1000 })
|
||||
|
||||
const [currentModelSettingParams, setCurrentModelSettingParams] = useState(
|
||||
extractModelLoadParams(selectedModel?.settings)
|
||||
)
|
||||
@ -92,6 +96,35 @@ const LocalServerRightPanel = () => {
|
||||
|
||||
<ModelDropdown strictedThread={false} disabled={serverEnabled} />
|
||||
|
||||
<div className="mt-2">
|
||||
<Input
|
||||
value={selectedModel?.id || ''}
|
||||
className="cursor-pointer"
|
||||
readOnly
|
||||
suffixIcon={
|
||||
clipboard.copied ? (
|
||||
<CheckIcon
|
||||
size={14}
|
||||
className="text-[hsla(var(--success-bg))]"
|
||||
/>
|
||||
) : (
|
||||
<Tooltip
|
||||
trigger={
|
||||
<CopyIcon
|
||||
size={14}
|
||||
className="text-[hsla(var(--text-secondary))]"
|
||||
onClick={() => {
|
||||
clipboard.copy(selectedModel?.id)
|
||||
}}
|
||||
/>
|
||||
}
|
||||
content="Copy Model ID"
|
||||
/>
|
||||
)
|
||||
}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{loadModelError && serverEnabled && (
|
||||
<div className="mt-3 flex space-x-2">
|
||||
<AlertTriangleIcon
|
||||
|
||||
@ -211,6 +211,9 @@ const Advanced = () => {
|
||||
saveSettings({ gpusInUse: updatedGpusInUse })
|
||||
}
|
||||
|
||||
const gpuSelectionPlaceHolder =
|
||||
gpuList.length > 0 ? 'Select GPU' : "You don't have any compatible GPU"
|
||||
|
||||
/**
|
||||
* Handle click outside
|
||||
*/
|
||||
@ -315,25 +318,32 @@ const Advanced = () => {
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{gpuList.length > 0 && (
|
||||
<div className="mt-2 flex w-full flex-col rounded-lg px-2 py-4">
|
||||
<label className="mb-2 mr-2 inline-block font-medium">
|
||||
Choose device(s)
|
||||
</label>
|
||||
<div className="relative w-full md:w-1/2" ref={setToggle}>
|
||||
<Input
|
||||
value={selectedGpu.join() || ''}
|
||||
className="w-full cursor-pointer"
|
||||
readOnly
|
||||
placeholder=""
|
||||
suffixIcon={
|
||||
<ChevronDownIcon
|
||||
size={14}
|
||||
className={twMerge(open && 'rotate-180')}
|
||||
/>
|
||||
}
|
||||
onClick={() => setOpen(!open)}
|
||||
/>
|
||||
<div className="mt-2 flex w-full flex-col rounded-lg px-2 py-4">
|
||||
<label className="mb-2 mr-2 inline-block font-medium">
|
||||
Choose device(s)
|
||||
</label>
|
||||
<div className="relative w-full md:w-1/2" ref={setToggle}>
|
||||
<Input
|
||||
value={selectedGpu.join() || ''}
|
||||
className={twMerge(
|
||||
'w-full cursor-pointer',
|
||||
gpuList.length === 0 && 'pointer-events-none'
|
||||
)}
|
||||
readOnly
|
||||
disabled={gpuList.length === 0}
|
||||
placeholder={gpuSelectionPlaceHolder}
|
||||
suffixIcon={
|
||||
<ChevronDownIcon
|
||||
size={14}
|
||||
className={twMerge(
|
||||
gpuList.length === 0 && 'pointer-events-none',
|
||||
open && 'rotate-180'
|
||||
)}
|
||||
/>
|
||||
}
|
||||
onClick={() => setOpen(!open)}
|
||||
/>
|
||||
{gpuList.length > 0 && (
|
||||
<div
|
||||
className={twMerge(
|
||||
'absolute right-0 top-0 z-20 mt-10 max-h-80 w-full overflow-hidden rounded-lg border border-[hsla(var(--app-border))] bg-[hsla(var(--app-bg))] shadow-sm',
|
||||
@ -391,9 +401,9 @@ const Advanced = () => {
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
|
||||
@ -117,7 +117,7 @@ const ModelDownloadRow: React.FC<Props> = ({
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-4 rounded border border-[hsla(var(--app-border))] p-3 md:flex-row md:items-center md:justify-between xl:w-full">
|
||||
<div className="flex justify-between">
|
||||
<div className="flex max-w-[50%] justify-between">
|
||||
<div className="flex">
|
||||
{quantization && (
|
||||
<Badge variant="soft" className="mr-1">
|
||||
@ -126,8 +126,7 @@ const ModelDownloadRow: React.FC<Props> = ({
|
||||
)}
|
||||
<h1
|
||||
className={twMerge(
|
||||
'mr-5 line-clamp-1 font-medium text-[hsla(var(--text-secondary))]',
|
||||
quantization && 'max-w-[25ch]'
|
||||
'mr-5 line-clamp-1 font-medium text-[hsla(var(--text-secondary))]'
|
||||
)}
|
||||
title={fileName}
|
||||
>
|
||||
|
||||
@ -85,7 +85,7 @@ const MyModelList = ({ model }: Props) => {
|
||||
<div className="relative flex items-center gap-x-4">
|
||||
{stateModel.loading && stateModel.model?.id === model.id ? (
|
||||
<Badge
|
||||
className="inline-flex items-center space-x-2"
|
||||
className="inline-flex w-[80px] items-center space-x-2"
|
||||
theme="secondary"
|
||||
>
|
||||
<span className="h-2 w-2 rounded-full bg-gray-500" />
|
||||
@ -99,7 +99,7 @@ const MyModelList = ({ model }: Props) => {
|
||||
<Badge
|
||||
theme="success"
|
||||
variant="soft"
|
||||
className="inline-flex items-center space-x-2"
|
||||
className="inline-flex w-[80px] items-center space-x-2"
|
||||
>
|
||||
<span className="h-2 w-2 rounded-full bg-green-500" />
|
||||
<span>Active</span>
|
||||
@ -107,7 +107,7 @@ const MyModelList = ({ model }: Props) => {
|
||||
) : (
|
||||
<Badge
|
||||
theme="secondary"
|
||||
className="inline-flex items-center space-x-2"
|
||||
className="inline-flex w-[80px] items-center space-x-2"
|
||||
>
|
||||
<span className="h-2 w-2 rounded-full bg-gray-500" />
|
||||
<span>Inactive</span>
|
||||
|
||||
@ -5,7 +5,7 @@ import { MessageStatus } from '@janhq/core'
|
||||
import hljs from 'highlight.js'
|
||||
|
||||
import { useAtom, useAtomValue } from 'jotai'
|
||||
import { BaseEditor, createEditor, Editor, Element, Transforms } from 'slate'
|
||||
import { BaseEditor, createEditor, Editor, Transforms } from 'slate'
|
||||
import { withHistory } from 'slate-history' // Import withHistory
|
||||
import {
|
||||
Editable,
|
||||
@ -129,14 +129,27 @@ const RichTextEditor = ({
|
||||
})
|
||||
}
|
||||
|
||||
if (Editor.isBlock(editor, node) && node.type === 'code') {
|
||||
if (Editor.isBlock(editor, node) && node.type === 'paragraph') {
|
||||
node.children.forEach((child: { text: any }, childIndex: number) => {
|
||||
const text = child.text
|
||||
const { selection } = editor
|
||||
|
||||
if (selection) {
|
||||
const selectedNode = Editor.node(editor, selection)
|
||||
|
||||
if (Editor.isBlock(editor, selectedNode[0] as CustomElement)) {
|
||||
const isNodeEmpty = Editor.string(editor, selectedNode[1]) === ''
|
||||
|
||||
if (isNodeEmpty) {
|
||||
// Reset language when a node is cleared
|
||||
currentLanguage.current = 'plaintext'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Match code block start and end
|
||||
const startMatch = text.match(/^```(\w*)$/)
|
||||
const endMatch = text.match(/^```$/)
|
||||
const inlineMatch = text.match(/^`([^`]+)`$/) // Match inline code
|
||||
|
||||
if (startMatch) {
|
||||
// If it's the start of a code block, store the language
|
||||
@ -144,38 +157,6 @@ const RichTextEditor = ({
|
||||
} else if (endMatch) {
|
||||
// Reset language when code block ends
|
||||
currentLanguage.current = 'plaintext'
|
||||
} else if (inlineMatch) {
|
||||
// Apply syntax highlighting to inline code
|
||||
const codeContent = inlineMatch[1] // Get the content within the backticks
|
||||
try {
|
||||
hljs.highlight(codeContent, {
|
||||
language:
|
||||
currentLanguage.current.length > 1
|
||||
? currentLanguage.current
|
||||
: 'plaintext',
|
||||
}).value
|
||||
} catch (err) {
|
||||
hljs.highlight(codeContent, {
|
||||
language: 'javascript',
|
||||
}).value
|
||||
}
|
||||
|
||||
// Calculate the range for the inline code
|
||||
const length = codeContent.length
|
||||
ranges.push({
|
||||
anchor: {
|
||||
path: [...path, childIndex],
|
||||
offset: inlineMatch.index + 1,
|
||||
},
|
||||
focus: {
|
||||
path: [...path, childIndex],
|
||||
offset: inlineMatch.index + 1 + length,
|
||||
},
|
||||
type: 'code',
|
||||
code: true,
|
||||
language: currentLanguage.current,
|
||||
className: '', // Specify class name if needed
|
||||
})
|
||||
} else if (currentLanguage.current !== 'plaintext') {
|
||||
// Highlight entire code line if in a code block
|
||||
const leadingSpaces = text.match(/^\s*/)?.[0] ?? '' // Capture leading spaces
|
||||
@ -206,7 +187,7 @@ const RichTextEditor = ({
|
||||
anchor: { path: [...path, childIndex], offset: 0 },
|
||||
focus: {
|
||||
path: [...path, childIndex],
|
||||
offset: leadingSpaces.length,
|
||||
offset: slateTextIndex,
|
||||
},
|
||||
type: 'code',
|
||||
code: true,
|
||||
@ -240,6 +221,7 @@ const RichTextEditor = ({
|
||||
slateTextIndex += length
|
||||
})
|
||||
} else {
|
||||
currentLanguage.current = 'plaintext'
|
||||
ranges.push({
|
||||
anchor: { path: [...path, childIndex], offset: 0 },
|
||||
focus: { path: [...path, childIndex], offset: text.length },
|
||||
@ -301,6 +283,11 @@ const RichTextEditor = ({
|
||||
textareaRef.current.style.overflow =
|
||||
textareaRef.current.clientHeight >= 390 ? 'auto' : 'hidden'
|
||||
}
|
||||
|
||||
if (currentPrompt.length === 0) {
|
||||
resetEditor()
|
||||
}
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [textareaRef.current?.clientHeight, currentPrompt, activeSettingInputBox])
|
||||
|
||||
const onStopInferenceClick = async () => {
|
||||
@ -317,13 +304,15 @@ const RichTextEditor = ({
|
||||
|
||||
// Adjust the height of the textarea to its initial state
|
||||
if (textareaRef.current) {
|
||||
textareaRef.current.style.height = '40px' // Reset to the initial height or your desired height
|
||||
textareaRef.current.style.height = activeSettingInputBox
|
||||
? '100px'
|
||||
: '44px'
|
||||
textareaRef.current.style.overflow = 'hidden' // Reset overflow style
|
||||
}
|
||||
|
||||
// Ensure the editor re-renders decorations
|
||||
editor.onChange()
|
||||
}, [editor])
|
||||
}, [activeSettingInputBox, editor])
|
||||
|
||||
const handleKeyDown = useCallback(
|
||||
(event: React.KeyboardEvent) => {
|
||||
@ -334,35 +323,6 @@ const RichTextEditor = ({
|
||||
resetEditor()
|
||||
} else onStopInferenceClick()
|
||||
}
|
||||
|
||||
if (event.key === '`') {
|
||||
// Determine whether any of the currently selected blocks are code blocks.
|
||||
const [match] = Editor.nodes(editor, {
|
||||
match: (n) =>
|
||||
Element.isElement(n) && (n as CustomElement).type === 'code',
|
||||
})
|
||||
// Toggle the block type dependsing on whether there's already a match.
|
||||
Transforms.setNodes(
|
||||
editor,
|
||||
{ type: match ? 'paragraph' : 'code' },
|
||||
{ match: (n) => Element.isElement(n) && Editor.isBlock(editor, n) }
|
||||
)
|
||||
}
|
||||
|
||||
if (event.key === 'Tab') {
|
||||
const [match] = Editor.nodes(editor, {
|
||||
match: (n) => {
|
||||
return (n as CustomElement).type === 'code'
|
||||
},
|
||||
mode: 'lowest',
|
||||
})
|
||||
|
||||
if (match) {
|
||||
event.preventDefault()
|
||||
// Insert a tab character
|
||||
Editor.insertText(editor, ' ') // Insert 2 spaces
|
||||
}
|
||||
}
|
||||
},
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
[currentPrompt, editor, messages]
|
||||
|
||||
@ -125,7 +125,6 @@ const ChatInput = () => {
|
||||
<div className="relative p-4 pb-2">
|
||||
<div className="relative flex w-full flex-col">
|
||||
{renderPreview(fileUpload)}
|
||||
|
||||
<RichTextEditor
|
||||
className={twMerge(
|
||||
'relative mb-1 max-h-[400px] resize-none rounded-lg border border-[hsla(var(--app-border))] p-3 pr-20',
|
||||
@ -141,7 +140,7 @@ const ChatInput = () => {
|
||||
disabled={stateModel.loading || !activeThread}
|
||||
/>
|
||||
<TextArea
|
||||
className="absolute inset-0 top-14 h-0 w-0"
|
||||
className="sr-only"
|
||||
data-testid="txt-input-chat"
|
||||
onChange={(e) => setCurrentPrompt(e.target.value)}
|
||||
/>
|
||||
|
||||
@ -276,18 +276,18 @@ const SimpleTextMessage: React.FC<ThreadMessage> = (props) => {
|
||||
</div>
|
||||
)}
|
||||
|
||||
{editMessage === props.id && (
|
||||
{editMessage === props.id ? (
|
||||
<div>
|
||||
<EditChatInput message={props} />
|
||||
</div>
|
||||
) : (
|
||||
<div
|
||||
className={twMerge(
|
||||
'message max-width-[100%] flex flex-col gap-y-2 overflow-auto leading-relaxed'
|
||||
)}
|
||||
dangerouslySetInnerHTML={{ __html: parsedText }}
|
||||
/>
|
||||
)}
|
||||
|
||||
<div
|
||||
className={twMerge(
|
||||
'message max-width-[100%] flex flex-col gap-y-2 overflow-auto leading-relaxed'
|
||||
)}
|
||||
dangerouslySetInnerHTML={{ __html: parsedText }}
|
||||
/>
|
||||
</>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user