docs: resolve conflicts and add new changelog posts

- Resolve conflicts in deepseek-r1-locally.mdx and run-ai-models-locally.mdx
- Keep SEO-optimized content and structure
- Add new changelog posts for v0.5.13 and v0.5.14
This commit is contained in:
eckartal 2025-02-07 21:00:53 +07:00
commit 52b3ba40a3
154 changed files with 6713 additions and 3364 deletions

View File

@ -6,6 +6,7 @@ on:
jobs:
assign-author:
runs-on: ubuntu-latest
if: ${{ github.event.pull_request.head.repo.full_name == github.repository }}
permissions:
pull-requests: write
steps:

View File

@ -103,7 +103,7 @@ jobs:
# check public_provider is true or not
echo "public_provider is ${{ inputs.public_provider }}"
if [ "${{ inputs.public_provider }}" == "none" ]; then
make build
make build
else
make build-and-publish
fi
@ -122,8 +122,6 @@ jobs:
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
ANALYTICS_ID: ${{ secrets.JAN_APP_UMAMI_PROJECT_API_KEY }}
ANALYTICS_HOST: ${{ secrets.JAN_APP_UMAMI_URL }}
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}

View File

@ -134,7 +134,7 @@ jobs:
# check public_provider is true or not
echo "public_provider is ${{ inputs.public_provider }}"
if [ "${{ inputs.public_provider }}" == "none" ]; then
make build
make build
else
make build-and-publish
fi
@ -168,8 +168,6 @@ jobs:
APPLE_APP_SPECIFIC_PASSWORD: ${{ secrets.APPLE_APP_SPECIFIC_PASSWORD }}
APP_PATH: '.'
DEVELOPER_ID: ${{ secrets.DEVELOPER_ID }}
ANALYTICS_ID: ${{ secrets.JAN_APP_UMAMI_PROJECT_API_KEY }}
ANALYTICS_HOST: ${{ secrets.JAN_APP_UMAMI_URL }}
POSTHOG_KEY: ${{ secrets.POSTHOG_KEY }}
POSTHOG_HOST: ${{ secrets.POSTHOG_HOST }}

View File

@ -136,7 +136,7 @@ jobs:
# check public_provider is true or not
echo "public_provider is ${{ inputs.public_provider }}"
if [ "${{ inputs.public_provider }}" == "none" ]; then
make build
make build
else
make build-and-publish
fi
@ -160,8 +160,6 @@ jobs:
make build-and-publish
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
ANALYTICS_ID: ${{ secrets.JAN_APP_UMAMI_PROJECT_API_KEY }}
ANALYTICS_HOST: ${{ secrets.JAN_APP_UMAMI_URL }}
AZURE_KEY_VAULT_URI: ${{ secrets.AZURE_KEY_VAULT_URI }}
AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }}
AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }}

View File

@ -5,6 +5,7 @@ import {
EngineReleased,
EngineConfig,
DefaultEngineVariant,
Model,
} from '../../types'
import { BaseExtension, ExtensionTypeEnum } from '../extension'
@ -103,6 +104,11 @@ export abstract class EngineManagementExtension extends BaseExtension {
engineConfig?: EngineConfig
): Promise<{ messages: string }>
/**
* Add a new remote model for a specific engine
*/
abstract addRemoteModel(model: Model): Promise<void>
/**
* @returns A Promise that resolves to an object of remote models list .
*/

View File

@ -1,10 +1,13 @@
import { BaseExtension, ExtensionTypeEnum } from '../extension'
import { Model, ModelInterface, OptionType } from '../../types'
import { Model, ModelInterface, ModelSource, OptionType } from '../../types'
/**
* Model extension for managing models.
*/
export abstract class ModelExtension extends BaseExtension implements ModelInterface {
export abstract class ModelExtension
extends BaseExtension
implements ModelInterface
{
/**
* Model extension type.
*/
@ -25,4 +28,16 @@ export abstract class ModelExtension extends BaseExtension implements ModelInter
abstract updateModel(modelInfo: Partial<Model>): Promise<Model>
abstract deleteModel(model: string): Promise<void>
abstract isModelLoaded(model: string): Promise<boolean>
/**
* Get model sources
*/
abstract getSources(): Promise<ModelSource[]>
/**
* Add a model source
*/
abstract addSource(source: string): Promise<void>
/**
* Delete a model source
*/
abstract deleteSource(source: string): Promise<void>
}

View File

@ -2,3 +2,4 @@ export * from './modelEntity'
export * from './modelInterface'
export * from './modelEvent'
export * from './modelImport'
export * from './modelSource'

View File

@ -1,5 +1,3 @@
import { FileMetadata } from '../file'
/**
* Represents the information about a model.
* @stored
@ -70,6 +68,11 @@ export type Model = {
*/
id: string
/**
* The model identifier, modern version of id.
*/
mode?: string
/**
* Human-readable name that is used for UI.
*/

View File

@ -1,5 +1,6 @@
import { Model } from './modelEntity'
import { OptionType } from './modelImport'
import { ModelSource } from './modelSource'
/**
* Model extension for managing models.
@ -50,4 +51,17 @@ export interface ModelInterface {
name?: string,
optionType?: OptionType
): Promise<void>
/**
* Get model sources
*/
getSources(): Promise<ModelSource[]>
/**
* Add a model source
*/
addSource(source: string): Promise<void>
/**
* Delete a model source
*/
deleteSource(source: string): Promise<void>
}

View File

@ -0,0 +1,67 @@
/**
* GGUF Metadata of the model source
*/
export interface GGUF {
architecture: string
bos_token: string
chat_template: string
context_length: number
eos_token: string
total: number
}
/**
* Card Metadata of the model source
*/
export interface CardData {
license: string
pipeline_tag: string
}
/**
* Model Metadata of the model source
*/
export interface Metadata {
author: string
cardData: CardData
createdAt: string
description: string
disabled: boolean
downloads: number
gated: boolean
gguf: GGUF
id: string
inference: string
lastModified: string
likes: number
modelId: string
pipeline_tag: string
private: boolean
sha: string
siblings: Array<{
rfilename: string
size: number
}>
spaces: string[]
tags: string[]
usedStorage: number
apiKey?: string
}
/**
* Model source sibling information
*/
export interface ModelSibling {
id: string
size: number
}
/**
* Model source object
*/
export interface ModelSource {
id: string
metadata: Metadata
models: ModelSibling[]
type?: string
}

View File

@ -65,7 +65,7 @@ const DropdownDownload = ({ lastRelease }: Props) => {
const userAgent = navigator.userAgent
if (userAgent.includes('Windows')) {
// windows user
setDefaultSystem(systems[2])
setDefaultSystem(systems[1])
} else if (userAgent.includes('Linux')) {
// linux user
setDefaultSystem(systems[3])

View File

@ -23,6 +23,4 @@ Adhering to Jan's privacy preserving philosophy, our analytics philosophy is to
## What is tracked
1. By default, Github tracks downloads and device metadata for all public GitHub repositories. This helps us troubleshoot & ensure cross-platform support.
2. We use [Umami](https://umami.is/) to collect, analyze, and understand application data while maintaining visitor privacy and data ownership. We are using the Umami Cloud in Europe to ensure GDPR compliance. Please see [Umami Privacy Policy](https://umami.is/privacy) for more details.
3. We use Umami to track a single `app.opened` event without additional user metadata, in order to understand retention. In addition, we track `app.version` to understand app version usage.
4. Additionally, we plan to enable a `Settings` feature for users to turn off all tracking.
2. Additionally, we plan to enable a `Settings` feature for users to turn off all tracking.

View File

@ -0,0 +1,23 @@
---
title: "A few key issues have been solved!"
version: 0.5.13
description: "Jan v0.5.13 is here: A few key issues have been solved."
date: 2024-01-06
ogImage: "/assets/images/changelog/jan-v0-5-13.gif"
---
import ChangelogHeader from "@/components/Changelog/ChangelogHeader"
<ChangelogHeader title= "Jan v0.5.13: A few key issues have been solved!" date="2024-01-06" ogImage= "/assets/images/changelog/jan-v0-5-13.gif" />
👋 Jan v0.5.13 is here: A few key issues have been solved!
### Highlights 🎉
- Resolved model loading issues on MacOS Intel
- Fixed app resetting max_tokens to 8192 on new threads - now uses model settings
- Fixed Vulkan settings visibility for some users
Update your product or download the latest: https://jan.ai
For more details, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.5.13).

View File

@ -0,0 +1,36 @@
---
title: "Run DeepSeek R1 Distills error-free!"
version: 0.5.14
description: "Jan v0.5.14 is out: Run DeepSeek R1 Distills error-free!"
date: 2024-01-23
ogImage: "/assets/images/changelog/jan-v0-5-14-deepseek-r1.gif"
---
import ChangelogHeader from "@/components/Changelog/ChangelogHeader"
<ChangelogHeader title= "Jan v0.5.14: Run DeepSeek R1 Distills error-free!" date="2024-01-23" ogImage= "/assets/images/changelog/jan-v0-5-14-deepseek-r1.gif" />
👋 Jan v0.5.14 is out: Run DeepSeek R1 Distills error-free!
You can run DeepSeek R1 distills in Jan error-free. Follow our [step-by-step guide to run DeepSeek R1 locally](/post/deepseek-r1-locally) and get this AI model running on your device in minutes.
llama.cpp version updated via Cortex—thanks to GG & llama.cpp community!
- Paste GGUF links into Jan Hub to download
- Already downloaded the model but facing issues? Update Jan.
Models:
Qwen
- DeepSeek-R1-Distill-Qwen-1.5B-GGUF: https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-1.5B-GGUF
- DeepSeek-R1-Distill-Qwen-7B-GGUF: https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-7B-GGUF
- DeepSeek-R1-Distill-Qwen-14B-GGUF: https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-14B-GGUF
- DeepSeek-R1-Distill-Qwen-32B-GGUF: https://huggingface.co/bartowski/DeepSeek-R1-Distill-Qwen-32B-GGUF
Llama
- DeepSeek-R1-Distill-Llama-8B-GGUF: https://huggingface.co/bartowski/DeepSeek-R1-Distill-Llama-8B-GGUF
- DeepSeek-R1-Distill-Llama-70B-GGUF: https://huggingface.co/bartowski/DeepSeek-R1-Distill-Llama-70B-GGUF
Update your Jan or [download the latest](https://jan.ai/).
For more details, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.5.14).

View File

@ -14,6 +14,11 @@
"desktop": "Installation",
"data-folder": "Jan Data Folder",
"privacy": "Privacy",
"privacy-policy": {
"type": "page",
"display": "hidden",
"title": "Privacy Policy"
},
"user-guides": {
"title": "BASIC USAGE",
"type": "separator"

View File

@ -41,7 +41,7 @@ Ensure that your system meets the following requirements to use Jan effectively:
### Mac Performance Guide
<Callout type="info">
**Apple Silicon Macs** leverage Metal for GPU acceleration, providing faster performance than **Appple Intel Macs**, which rely solely on CPU processing.
**Apple Silicon Macs** leverage Metal for GPU acceleration, providing faster performance than **Apple Intel Macs**, which rely solely on CPU processing.
</Callout>
**Apple Silicon (M1, M2, M3)**
- Metal acceleration enabled by default, no configuration required

View File

@ -0,0 +1,125 @@
---
title: Jan Privacy Policy
description: Jan's data collection practices, privacy measures, and your rights. Learn how we protect your data and maintain transparency.
---
# Privacy Policy
<div className="text-sm text-gray-500 mt-2 mb-8">
Last Updated: January 16, 2025
</div>
## Introduction
We are committed to protecting your privacy and ensuring you have control over your data. This Privacy Policy outlines what information Menlo Research Pte Ltd (the "Company") collects from users of the Jan desktop app and website (the "Services"), how the Company uses that information, and the measures the Company takes to safeguard that information.
## 1. Data Collection and Consent
### Explicit Consent
The Company does not collect any data until you explicitly allow tracking.
### Tracking Preferences
Upon first launching the Jan desktop app or visiting the website, you will be prompted to set your tracking preferences. These preferences can be modified at any time via the app's Settings menu or the website's Privacy Settings.
### Legal Basis
Pursuant to the European Union's General Data Protection Regulation (EU) 2016/679 (the "GDPR"), the Company processes data based on your explicit consent (GDPR Article 6(1)(a)). This means:
- The Company only processes your data after receiving clear, affirmative consent from you.
- You may withdraw your consent at any time through the app's Settings menu or the website's Privacy Settings.
- If you withdraw your consent, the Company will stop optional data collection from the effective date of withdrawal.
- Your withdrawal of consent will not affect the lawfulness of processing before its withdrawal.
## 2. Data We Do Not Collect
Regardless of your analytics permissions, the Company does not collect the following:
- Chat History: Your conversations with the Jan app are private and inaccessible to the Company.
- Chat Settings: Your personalized settings remain solely with you.
- Language Models: The specific language models you use are not tracked.
## 3. Uses of Information
To build a reliable and user-friendly product offering, understanding how the Jan app is used is essential. If you permit tracking, the Company collects product analytics data to:
- Improve User Experience: Enhance app functionality based on usage patterns; and
- Measure Engagement: Assess active users and retention rates to ensure ongoing value.
## 4. Product Analytics
### Data Collected
When you opt-in to tracking, we collect the following anonymous data:
- Active Users: Number of daily active users to gauge engagement.
- Retention Rates: Track if users continue to find value in the Jan app over time.
### Data Anonymity
- User ID: Analytics data is tied to a randomly generated user ID, ensuring no link to your personal identity.
- Privacy Assurance: Your chat history and personal data are not tracked or linked to your usage data.
## 5. What We Do Not Track
Even with analytics permissions granted, the Company does not track the following:
- Conversations: Your interactions with the Jan app remain private.
- Files: The Company does not scan, upload, or view your files.
- Personal Identity: The Company does not collect personally identifiable information about users.
- Prompts: Your prompts and prompt templates are not monitored.
- Conversation Metrics: The Company does not track context length or conversation length.
- Model Usage: The specific models you use or their types are not tracked.
- Storage: You retain full control over storing your files and logs, and your privacy is prioritized.
## 6. Using Cloud Models
The Jan app allows you to connect to cloud-based model APIs (e.g. GPT, Claude models).
- Data Handling: The API provider processes your messages directly; the Jan app does not access or store these messages.
- Local Models: Choosing local models ensures all data remains on your device, with no external access.
## 7. Data Storage and Processing
### Analytics Provider
The Company uses PostHog EU for analytics, which ensures all data is processed within the European Union.
### Data Security
- Encryption: All data transfers are encrypted using Transport Layer Security (TLS) to ensure secure transmission.
- Storage: PostHog securely manages the data the Company collects. For more information, please refer to PostHog's GDPR documentation.
## 8. Data Retention
- Retention Period: The Company retains analytics data for up to 12 months unless otherwise required to comply with any applicable legal requirements.
- Deletion Requests: If you wish to request the deletion of your analytics data, you may do so by sending a written request to hello@jan.ai.
## 9. Your Rights and Choices
- Access and Control: You may access, modify, or delete your tracking preferences at any time through the Jan app or website settings.
- Data Requests: If you have any requests related to your data, please address them to hello@jan.ai.
## 10. Children's Privacy
Our Services are not targeted at children under the age of 13. The Company does not knowingly collect data from children under the age of 13. If the Company becomes aware that data of persons under the age of 13 has been collected without verifiable parental consent, the Company will take appropriate actions to delete this information.
## 11. Changes to the Privacy Policy
The Company reserves the right, at its sole discretion, to update this Privacy Policy at any time to reflect changes in the practices or legal requirements of the Company. The Company will use reasonable efforts to notify you of any significant changes via app notifications, the website, or email. Your continued use of the Services following such updates means you accept those changes.
## 12. Cookies and Tracking Technologies
Our website utilizes cookies to:
- Enhance user experience; and
- Measure website traffic and usage patterns.
Most browsers allow you to remove or manage cookie functions and adjust your privacy and security preferences.
For more details, please refer to our Cookie Policy.
## 13. Contact Us
For any questions or concerns about this Privacy Policy or our data practices, please contact hello@jan.ai.

View File

@ -1,5 +1,5 @@
---
title: Jan Privacy
title: Jan's Privacy Approach
description: Jan is an app that allows you to own your AI. We prioritize your control over your data and explain what data we collect and why.
keywords:
[
@ -19,45 +19,57 @@ keywords:
]
---
---
import { Callout } from 'nextra/components'
# Privacy
Jan is an app that allows you to own your AI. We prioritize your control over your data and explain what data we collect and why.
Jan is an app that allows you to own your AI. We prioritize local AI models and your control over your data. This page explains what data we collect and why. No tricks.
- Jan can't see your chats with AI
- You're free to opt out
<Callout>
For a comprehensive overview of our privacy practices, you can read our full [Privacy Policy](/docs/privacy-policy).
</Callout>
<Callout type="info">
We don't collect any data until you explicitly allow tracking.
</Callout>
You'll be asked about your tracking preferences when you first launch the app, and you can change them at any time in Settings.
Regardless of your analytics permissions, Jan will **never** access your chat history, chat settings, or the language models you have used.
## Why and what we track
To build a reliable, user-friendly AI that you own, we need to understand how Jan is used. We collect two types of data: performance data and usage data.
To build a reliable, user-friendly AI that you own, we need to understand how Jan is used. If users allowed us to track, we collect product analytics data.
### Performance data
We track app crashes and collect technical details about what went wrong, along with basic information about the hardware youre using.
When Jan crashes, we collect technical details about what went wrong.
- Specific AI model in use during the crash
- Hardware: `CPU`, `GPU`, `RAM`
- Logs: `Date/Time`, `OS & version`, `app version`, `error codes & messages`.
### Usage data
### Product Analytics
We track data like how often the app is opened to check:
- **Active Users**: How many people use Jan daily to measure engagement
- **Retention Rates**: To understand if users are finding value in Jan over time
Usage data is tied to a randomly generated telemetry ID. None of our usage data can be linked to your personal identity.
Product analytics data is tied to a randomly generated user ID. None of our usage data can be linked to your personal identity. Your chat history and personal data are never tracked.
## What we **dont** track:
- Your conversations with Jan. Those stay on your device.
- Your files. We dont scan, upload, or even look at them.
- Anything tied to your identity.
## What we **don't** track
<Callout type="info">
Even if you grant analytics permissions, Jan doesn't track many of your private activities.
</Callout>
- We don't track your conversations with Jan.
- We don't scan, upload, or look at your files.
- We don't collect anything tied to your identity.
- We don't track your prompts and prompt templates.
- We don't monitor context length or conversation length.
- We don't track the models you have used or their types.
You store the files and logs that are a priority for your privacy yourself.
## Using Cloud Models
Jan allows you to connect cloud model APIs. If you choose to use cloud-based models (e.g. GPT, Claude models), the API provider handling the model will have access to your messages as part of processing the request. Again, Jan doesn't see or store these messages - they go directly to the provider. Remember: With local models, everything stays on your device, so no one - not even us- can see your messages.
## Where we store & process data
We use [PostHog](https://posthog.com/eu) EU for analytics, ensuring all data is processed within the European Union. This setup complies with GDPR and other strict privacy regulations. PostHog lets us self-host and securely manage the data we collect. Read more [on PostHog's GDPR doc](https://posthog.com/docs/privacy/gdpr-compliance).
For a detailed breakdown of the analytics data we collect, you can check out our analytics repo. If you have any questions or concerns, feel free to reach out to us at hi@jan.ai.
We use [PostHog](https://posthog.com/eu) EU for analytics, ensuring all data is processed within the European Union. This setup complies with GDPR and other strict privacy regulations. PostHog lets us securely manage the data we collect. Read more [on PostHog's GDPR doc](https://posthog.com/docs/privacy/gdpr-compliance).

View File

@ -1,6 +1,6 @@
---
title: "Jan: Open source ChatGPT-alternative that runs 100% offline"
description: "Chat with AI without privact concerns. Jan is an open-source alternative to ChatGPT, running AI models locally on your device."
description: "Chat with AI without privacy concerns. Jan is an open-source alternative to ChatGPT, running AI models locally on your device."
keywords:
[
Jan,

View File

@ -1,47 +0,0 @@
---
title: Raycast
keywords:
[
Jan,
Customizable Intelligence, LLM,
local AI,
privacy focus,
free and open source,
private and offline,
conversational AI,
no-subscription fee,
large language models,
raycast integration,
Raycast,
]
description: A step-by-step guide on integrating Jan with Raycast.
---
import { Steps } from 'nextra/components'
# Raycast
## Integrate Raycast with Jan
[Raycast](https://www.raycast.com/) is a productivity tool designed for macOS that enhances workflow efficiency by providing quick access to various tasks and functionalities through a keyboard-driven interface. To integrate Raycast with Jan, follow the steps below:
<Steps>
### Step 1: Download the TinyLlama Model
1. Open Jan app.
2. Go to the **Hub** and download the TinyLlama model.
3. The model will be available at `~jan/models/tinyllama-1.1b`.
### Step 2: Clone and Run the Program
1. Clone this [GitHub repository](https://github.com/InNoobWeTrust/nitro-raycast).
2. Execute the project using the following command:
```bash title="Node.js"
npm i && npm run dev
```
### Step 3: Search for Nitro and Run the Model
Search for `Nitro` using the program, and you can use the models from Jan in RayCast.
</Steps>

View File

@ -1,5 +1,9 @@
---
<<<<<<< HEAD
title: "Run DeepSeek R1 locally on your device (Beginner-Friendly Guide)"
=======
title: "Beginner's Guide: Run DeepSeek R1 Locally"
>>>>>>> origin/dev
description: "A straightforward guide to running DeepSeek R1 locally for enhanced privacy, regardless of your background."
tags: DeepSeek, R1, local AI, Jan, GGUF, Qwen, Llama
categories: guides
@ -10,6 +14,7 @@ ogImage: assets/run-deepseek-r1-locally-in-jan.jpg
import { Callout } from 'nextra/components'
import CTABlog from '@/components/Blog/CTA'
<<<<<<< HEAD
# Run DeepSeek R1 locally on your device (Beginner-Friendly Guide)
![image](./_assets/run-deepseek-r1-locally-in-jan.jpg)
@ -19,6 +24,13 @@ DeepSeek R1 is one of the best open-source models in the market right now, and y
<Callout type="info">
New to running AI models locally? Check out our [comprehensive guide on running AI models locally](/post/run-ai-models-locally) first. It covers essential concepts that will help you better understand this DeepSeek R1 guide.
</Callout>
=======
# Beginner's Guide: Run DeepSeek R1 Locally
![image](./_assets/run-deepseek-r1-locally-in-jan.jpg)
DeepSeek R1 brings state-of-the-art AI capabilities to your local machine. With optimized versions available for different hardware configurations, you can run this powerful model directly on your laptop or desktop computer. This guide will show you how to run open-source AI models like DeepSeek, Llama, or Mistral locally on your computer, regardless of your background.
>>>>>>> origin/dev
Why use an optimized version?
- Efficient performance on standard hardware

View File

@ -16,19 +16,15 @@ Most people think running AI models locally is complicated. It's not. The real c
## Quick steps:
1. Download [Jan](https://jan.ai)
2. Pick a recommended model
3. Start chatting
Read [Quickstart](https://jan.ai/docs/quickstart) to get started. For more details, keep reading.
![Run AI models locally with Jan](./_assets/jan-local-ai.jpg)
*Jan is for running AI models locally. Download [Jan](https://jan.ai)*
2. Choose a model that fits your hardware
3. Start using AI locally!
<Callout type="info">
Benefits of running AI locally:
- **Privacy:** Your data stays on your computer
- **No internet needed:** Use AI even offline
- **No limits:** Chat as much as you want
- **Privacy:** Your data stays on your device
- **No subscription:** Pay once for hardware
- **Speed:** No internet latency
- **Reliability:** Works offline
- **Full control:** Choose which AI models to use
</Callout>
@ -36,10 +32,9 @@ Benefits of running AI locally:
[Jan](https://jan.ai) makes it straightforward to run AI models. Download Jan and you're ready to go - the setup process is streamlined and automated.
<Callout type="tip">
<Callout type="info">
What you can do with Jan:
- Download AI models with one click
- Everything is set up automatically
- Download Jan
- Find models that work on your computer
</Callout>
@ -53,9 +48,6 @@ Think of AI models like engines powering applications - some are compact and eff
When looking at AI models, you'll see names like "Llama-2-7B" or "Mistral-7B". Here's what that means:
![AI model parameters explained](./_assets/local-ai-model-parameters.jpg)
*Model sizes: Bigger models = Better results + More resources*
- The "B" means "billion parameters" (like brain cells)
- More parameters = smarter AI but needs a faster computer
- Fewer parameters = simpler AI but works on most computers
@ -71,9 +63,6 @@ Which size to choose?
Quantization is a technique that optimizes AI models to run efficiently on your computer. Think of it like an engine tuning process that balances performance with resource usage:
![AI model quantization explained](./_assets/open-source-ai-quantization.jpg)
*Quantization: Balance between size and quality*
Simple guide:
- **Q4:** Most efficient choice - good balance of speed and quality
- **Q6:** Enhanced quality with moderate resource usage

View File

@ -10,27 +10,20 @@ Homebrew Computer Company is committed to protecting your privacy and ensuring t
## Data Collection
Jan, Cortex, and all Homebrew Computer Company products do not collect personally identifying information. You can read about [our philosophy](/about#philosophy) here and audit our open-source codebases.
Jan, Cortex, and all Homebrew Computer Company products do not collect personally identifying information. You can read about [our philosophy](/about#philosophy) here and audit our open-source codebases.
### When you voluntarily provide data
We -do- collect personal information you voluntarily provide us, e.g., when you sign up for our newsletter, join our Discord, or contact us via email.
We -do- collect personal information you voluntarily provide us, e.g., when you sign up for our newsletter, join our Discord, or contact us via email.
### Jan
### Jan
Jan runs with privacy by default and is used 100% offline on your own computer. Your data (e.g., conversation history, usage logs) are stored locally and never leave your computer.
Jan runs with privacy by default and is used 100% offline on your own computer. Your data (e.g., conversation history, usage logs) are stored locally and never leave your computer.
<Callout type="info">
If you use a Remote AI API (e.g., OpenAI API, Groq API), your data will naturally travel to their servers. They will be subject to the privacy policy of the respective API provider.
</Callout>
Jan uses [Umami](https://umami.is/) for analytics, which is a privacy-focused, GDPR-compliant analytics tool that does not track personal information. We use this to get aggregate reports on OS and hardware types and prioritize our engineering roadmap. As per [Umami's Privacy Policy](https://umami.is/privacy), Umami uses the following data points to generate its reports:
- OS and device characteristics
- IP address
Jan does not get any of this data, and we do not track IP addresses or other identifying information. We are actively looking into more privacy-respecting ways to handle analytics, crash reports, and telemetry and would love to work with the community on this.
### Cortex
Cortex is a library that runs large language models (LLMs) locally on your computer. Cortex does not collect any personal information.

View File

@ -6,15 +6,20 @@ export function formatCompactNumber(count: number) {
export const totalDownload = (release: []) => {
if (release instanceof Array) {
const count = release
.map((version: { assets: any[] }) =>
version.assets.map((os) => os.download_count)
)
.map((version: { assets: any[]; name: string }) => {
// it will be correct since 0.5.15
const tag = version.name >= '0.5.15' && version.name.includes('0.5.15')
return version.assets
.filter((os) => !(tag && os.name.endsWith('.yml')))
.map((os) => os.download_count)
})
.map((x: any[]) => x.reduce((a: any, b: any) => a + b, 0))
.reduce((a: any, b: any) => a + b, 0)
return formatCompactNumber(count)
} else {
// return dummy avoid reate limit API when dev mode
// return dummy to avoid rate limit API when in dev mode
return formatCompactNumber(9000000)
}
}

File diff suppressed because it is too large Load Diff

View File

@ -17,7 +17,7 @@ test('explores hub', async ({ hubPage }) => {
await hubPage.navigateByMenu()
await hubPage.verifyContainerVisible()
await hubPage.scrollToBottom()
const useModelBtn = page.getByTestId(/^use-model-btn-.*/).first()
const useModelBtn = page.getByTestId(/^setup-btn/).first()
await expect(useModelBtn).toBeVisible({
timeout: TIMEOUT,

View File

@ -1,33 +1,18 @@
import { expect } from '@playwright/test'
import { page, test, TIMEOUT } from '../config/fixtures'
test('Select GPT model from Hub and Chat with Invalid API Key', async ({
hubPage,
}) => {
await hubPage.navigateByMenu()
await hubPage.verifyContainerVisible()
// Select the first GPT model
await page
.locator('[data-testid^="use-model-btn"][data-testid*="gpt"]')
.first()
.click()
await page.getByTestId('txt-input-chat').fill('dummy value')
test('show onboarding screen without any threads created or models downloaded', async () => {
await page.getByTestId('Thread').first().click({
timeout: TIMEOUT,
})
const denyButton = page.locator('[data-testid="btn-deny-product-analytics"]')
if ((await denyButton.count()) > 0) {
await denyButton.click({ force: true })
} else {
await page.getByTestId('btn-send-chat').click({ force: true })
}
await page.waitForFunction(
() => {
const loaders = document.querySelectorAll('[data-testid$="loader"]')
return !loaders.length
},
{ timeout: TIMEOUT }
)
const onboardScreen = page.getByTestId('onboard-screen')
await expect(onboardScreen).toBeVisible({
timeout: TIMEOUT,
})
})

View File

@ -63,7 +63,7 @@ export default class CortexConversationalExtension extends ConversationalExtensi
async modifyThread(thread: Thread): Promise<void> {
return this.queue
.add(() =>
ky.post(`${API_URL}/v1/threads/${thread.id}`, { json: thread })
ky.patch(`${API_URL}/v1/threads/${thread.id}`, { json: thread })
)
.then()
}
@ -101,7 +101,7 @@ export default class CortexConversationalExtension extends ConversationalExtensi
async modifyMessage(message: ThreadMessage): Promise<ThreadMessage> {
return this.queue.add(() =>
ky
.post(
.patch(
`${API_URL}/v1/threads/${message.thread_id}/messages/${message.id}`,
{
json: message,

View File

@ -13,7 +13,7 @@ export default defineConfig([
NODE: JSON.stringify(`${pkgJson.name}/${pkgJson.node}`),
API_URL: JSON.stringify('http://127.0.0.1:39291'),
SOCKET_URL: JSON.stringify('ws://127.0.0.1:39291'),
CORTEX_ENGINE_VERSION: JSON.stringify('v0.1.46'),
CORTEX_ENGINE_VERSION: JSON.stringify('v0.1.49'),
DEFAULT_REMOTE_ENGINES: JSON.stringify(engines),
DEFAULT_REMOTE_MODELS: JSON.stringify(models),
},
@ -26,7 +26,7 @@ export default defineConfig([
file: 'dist/node/index.cjs.js',
},
define: {
CORTEX_ENGINE_VERSION: JSON.stringify('v0.1.46'),
CORTEX_ENGINE_VERSION: JSON.stringify('v0.1.49'),
},
},
{

View File

@ -1 +1 @@
1.0.9-rc4
1.0.9-rc7

View File

@ -111,7 +111,7 @@ export default defineConfig([
SETTINGS: JSON.stringify(defaultSettingJson),
CORTEX_API_URL: JSON.stringify('http://127.0.0.1:39291'),
CORTEX_SOCKET_URL: JSON.stringify('ws://127.0.0.1:39291'),
CORTEX_ENGINE_VERSION: JSON.stringify('v0.1.46'),
CORTEX_ENGINE_VERSION: JSON.stringify('v0.1.49'),
},
},
{

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,6 @@
import { defineConfig } from 'rolldown'
import settingJson from './resources/settings.json' with { type: 'json' }
import modelSources from './resources/default.json' with { type: 'json' }
export default defineConfig({
input: 'src/index.ts',
@ -12,5 +13,6 @@ export default defineConfig({
SETTINGS: JSON.stringify(settingJson),
API_URL: JSON.stringify('http://127.0.0.1:39291'),
SOCKET_URL: JSON.stringify('ws://127.0.0.1:39291'),
DEFAULT_MODEL_SOURCES: JSON.stringify(modelSources),
},
})

View File

@ -2,6 +2,7 @@ declare const NODE: string
declare const API_URL: string
declare const SOCKET_URL: string
declare const SETTINGS: SettingComponentProps[]
declare const DEFAULT_MODEL_SOURCES: any
interface Core {
api: APIFunctions

View File

@ -1,6 +1,6 @@
import PQueue from 'p-queue'
import ky from 'ky'
import { extractModelLoadParams, Model } from '@janhq/core'
import { extractModelLoadParams, Model, ModelSource } from '@janhq/core'
import { extractInferenceParams } from '@janhq/core'
/**
* cortex.cpp Model APIs interface
@ -19,9 +19,12 @@ interface ICortexAPI {
updateModel(model: object): Promise<void>
cancelModelPull(model: string): Promise<void>
configs(body: { [key: string]: any }): Promise<void>
getSources(): Promise<ModelSource[]>
addSource(source: string): Promise<void>
deleteSource(source: string): Promise<void>
}
type ModelList = {
type Data = {
data: any[]
}
@ -53,7 +56,7 @@ export class CortexAPI implements ICortexAPI {
*/
getModels(): Promise<Model[]> {
return this.queue
.add(() => ky.get(`${API_URL}/v1/models?limit=-1`).json<ModelList>())
.add(() => ky.get(`${API_URL}/v1/models?limit=-1`).json<Data>())
.then((e) =>
typeof e === 'object' ? e.data.map((e) => this.transformModel(e)) : []
)
@ -148,6 +151,47 @@ export class CortexAPI implements ICortexAPI {
.catch(() => false)
}
// BEGIN - Model Sources
/**
* Get model sources
* @param model
*/
async getSources(): Promise<ModelSource[]> {
return this.queue
.add(() => ky.get(`${API_URL}/v1/models/sources`).json<Data>())
.then((e) => (typeof e === 'object' ? (e.data as ModelSource[]) : []))
.catch(() => [])
}
/**
* Add a model source
* @param model
*/
async addSource(source: string): Promise<any> {
return this.queue.add(() =>
ky.post(`${API_URL}/v1/models/sources`, {
json: {
source,
},
})
)
}
/**
* Delete a model source
* @param model
*/
async deleteSource(source: string): Promise<any> {
return this.queue.add(() =>
ky.delete(`${API_URL}/v1/models/sources`, {
json: {
source,
},
})
)
}
// END - Model Sources
/**
* Do health check on cortex.cpp
* @returns

View File

@ -11,6 +11,7 @@ import {
events,
DownloadEvent,
OptionType,
ModelSource,
} from '@janhq/core'
import { CortexAPI } from './cortex'
import { scanModelsFolder } from './legacy/model-json'
@ -243,6 +244,35 @@ export default class JanModelExtension extends ModelExtension {
return this.cortexAPI.importModel(model, modelPath, name, option)
}
// BEGIN - Model Sources
/**
* Get model sources
* @param model
*/
async getSources(): Promise<ModelSource[]> {
const sources = await this.cortexAPI.getSources()
return sources.concat(
DEFAULT_MODEL_SOURCES.filter((e) => !sources.some((x) => x.id === e.id))
)
}
/**
* Add a model source
* @param model
*/
async addSource(source: string): Promise<any> {
return this.cortexAPI.addSource(source)
}
/**
* Delete a model source
* @param model
*/
async deleteSource(source: string): Promise<any> {
return this.cortexAPI.deleteSource(source)
}
// END - Model Sources
/**
* Check model status
* @param model

View File

@ -509,61 +509,61 @@ __metadata:
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fassistant-extension%40workspace%3Aassistant-extension":
version: 0.1.10
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=7dd866&locator=%40janhq%2Fassistant-extension%40workspace%3Aassistant-extension"
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=5eb526&locator=%40janhq%2Fassistant-extension%40workspace%3Aassistant-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
checksum: 10c0/da0eed6e552ce2ff6f52a087e6e221101c3d0c03d92820840ee80c3ca1a17317a66525cb5bf59b6c1e8bd2e36e54763008f97e13000ae339dac49f5682fcfa65
checksum: 10c0/e53df943c345a1496d45d86e65bf40cf0fe0dd716ac1c1753453bad6877f36035a4fb305cb5e1690c18d426609ba125d1370304c7399fd4abac760e09fef2c52
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fconversational-extension%40workspace%3Aconversational-extension":
version: 0.1.10
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=7dd866&locator=%40janhq%2Fconversational-extension%40workspace%3Aconversational-extension"
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=5eb526&locator=%40janhq%2Fconversational-extension%40workspace%3Aconversational-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
checksum: 10c0/da0eed6e552ce2ff6f52a087e6e221101c3d0c03d92820840ee80c3ca1a17317a66525cb5bf59b6c1e8bd2e36e54763008f97e13000ae339dac49f5682fcfa65
checksum: 10c0/e53df943c345a1496d45d86e65bf40cf0fe0dd716ac1c1753453bad6877f36035a4fb305cb5e1690c18d426609ba125d1370304c7399fd4abac760e09fef2c52
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fengine-management-extension%40workspace%3Aengine-management-extension":
version: 0.1.10
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=7dd866&locator=%40janhq%2Fengine-management-extension%40workspace%3Aengine-management-extension"
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=5eb526&locator=%40janhq%2Fengine-management-extension%40workspace%3Aengine-management-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
checksum: 10c0/da0eed6e552ce2ff6f52a087e6e221101c3d0c03d92820840ee80c3ca1a17317a66525cb5bf59b6c1e8bd2e36e54763008f97e13000ae339dac49f5682fcfa65
checksum: 10c0/e53df943c345a1496d45d86e65bf40cf0fe0dd716ac1c1753453bad6877f36035a4fb305cb5e1690c18d426609ba125d1370304c7399fd4abac760e09fef2c52
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Finference-cortex-extension%40workspace%3Ainference-cortex-extension":
version: 0.1.10
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=7dd866&locator=%40janhq%2Finference-cortex-extension%40workspace%3Ainference-cortex-extension"
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=5eb526&locator=%40janhq%2Finference-cortex-extension%40workspace%3Ainference-cortex-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
checksum: 10c0/da0eed6e552ce2ff6f52a087e6e221101c3d0c03d92820840ee80c3ca1a17317a66525cb5bf59b6c1e8bd2e36e54763008f97e13000ae339dac49f5682fcfa65
checksum: 10c0/e53df943c345a1496d45d86e65bf40cf0fe0dd716ac1c1753453bad6877f36035a4fb305cb5e1690c18d426609ba125d1370304c7399fd4abac760e09fef2c52
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fmodel-extension%40workspace%3Amodel-extension":
version: 0.1.10
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=7dd866&locator=%40janhq%2Fmodel-extension%40workspace%3Amodel-extension"
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=5eb526&locator=%40janhq%2Fmodel-extension%40workspace%3Amodel-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
checksum: 10c0/da0eed6e552ce2ff6f52a087e6e221101c3d0c03d92820840ee80c3ca1a17317a66525cb5bf59b6c1e8bd2e36e54763008f97e13000ae339dac49f5682fcfa65
checksum: 10c0/e53df943c345a1496d45d86e65bf40cf0fe0dd716ac1c1753453bad6877f36035a4fb305cb5e1690c18d426609ba125d1370304c7399fd4abac760e09fef2c52
languageName: node
linkType: hard
"@janhq/core@file:../../core/package.tgz::locator=%40janhq%2Fmonitoring-extension%40workspace%3Amonitoring-extension":
version: 0.1.10
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=7dd866&locator=%40janhq%2Fmonitoring-extension%40workspace%3Amonitoring-extension"
resolution: "@janhq/core@file:../../core/package.tgz#../../core/package.tgz::hash=5eb526&locator=%40janhq%2Fmonitoring-extension%40workspace%3Amonitoring-extension"
dependencies:
rxjs: "npm:^7.8.1"
ulidx: "npm:^2.3.0"
checksum: 10c0/da0eed6e552ce2ff6f52a087e6e221101c3d0c03d92820840ee80c3ca1a17317a66525cb5bf59b6c1e8bd2e36e54763008f97e13000ae339dac49f5682fcfa65
checksum: 10c0/e53df943c345a1496d45d86e65bf40cf0fe0dd716ac1c1753453bad6877f36035a4fb305cb5e1690c18d426609ba125d1370304c7399fd4abac760e09fef2c52
languageName: node
linkType: hard

View File

@ -30,6 +30,7 @@
"dependencies": {
"@radix-ui/react-accordion": "^1.1.2",
"@radix-ui/react-dialog": "^1.0.5",
"@radix-ui/react-dropdown-menu": "^2.1.4",
"@radix-ui/react-icons": "^1.3.0",
"@radix-ui/react-scroll-area": "^1.0.5",
"@radix-ui/react-select": "^2.0.0",

View File

@ -0,0 +1,45 @@
import React, { Fragment, PropsWithChildren, ReactNode } from 'react'
import * as DropdownMenu from '@radix-ui/react-dropdown-menu'
import './styles.scss'
import { twMerge } from 'tailwind-merge'
type Props = {
options?: { name: ReactNode; value: string; suffix?: ReactNode }[]
className?: string
onValueChanged?: (value: string) => void
}
const Dropdown = (props: PropsWithChildren & Props) => {
return (
<DropdownMenu.Root>
<DropdownMenu.Trigger asChild>{props.children}</DropdownMenu.Trigger>
<DropdownMenu.Portal>
<DropdownMenu.Content
className={twMerge(props.className, 'DropdownMenuContent')}
sideOffset={0}
align="end"
>
{props.options?.map((e, i) => (
<Fragment key={e.value}>
{i !== 0 && (
<DropdownMenu.Separator className="DropdownMenuSeparator" />
)}
<DropdownMenu.Item
className="DropdownMenuItem"
onClick={() => props.onValueChanged?.(e.value)}
>
{e.name}
<div />
{e.suffix}
</DropdownMenu.Item>
</Fragment>
))}
<DropdownMenu.Arrow className="DropdownMenuArrow" />
</DropdownMenu.Content>
</DropdownMenu.Portal>
</DropdownMenu.Root>
)
}
export { Dropdown }

View File

@ -0,0 +1,155 @@
.DropdownMenuContent,
.DropdownMenuSubContent {
min-width: 220px;
background-color: white;
border-radius: 6px;
overflow: hidden;
padding: 0px;
box-shadow:
0px 10px 38px -10px rgba(22, 23, 24, 0.35),
0px 10px 20px -15px rgba(22, 23, 24, 0.2);
animation-duration: 400ms;
animation-timing-function: cubic-bezier(0.16, 1, 0.3, 1);
will-change: transform, opacity;
}
.DropdownMenuContent[data-side='top'],
.DropdownMenuSubContent[data-side='top'] {
animation-name: slideDownAndFade;
}
.DropdownMenuContent[data-side='right'],
.DropdownMenuSubContent[data-side='right'] {
animation-name: slideLeftAndFade;
}
.DropdownMenuContent[data-side='bottom'],
.DropdownMenuSubContent[data-side='bottom'] {
animation-name: slideUpAndFade;
}
.DropdownMenuContent[data-side='left'],
.DropdownMenuSubContent[data-side='left'] {
animation-name: slideRightAndFade;
}
.DropdownMenuItem {
padding: 14px;
cursor: pointer;
outline: none;
flex: 1;
display: flex;
justify-content: space-between; /* Distribute space between children */
align-items: center; /* Optional: Align items vertically */
gap: 16px;
border-color: hsla(var(--app-border));
}
.DropdownMenuCheckboxItem,
.DropdownMenuRadioItem,
.DropdownMenuSubTrigger {
font-size: 13px;
line-height: 1;
border-radius: 3px;
display: flex;
align-items: center;
height: 25px;
padding: 0 0;
position: relative;
padding-left: 25px;
user-select: none;
outline: none;
}
.DropdownMenuItem[data-disabled],
.DropdownMenuCheckboxItem[data-disabled],
.DropdownMenuRadioItem[data-disabled],
.DropdownMenuSubTrigger[data-disabled] {
pointer-events: none;
}
.DropdownMenuItem[data-highlighted],
.DropdownMenuCheckboxItem[data-highlighted],
.DropdownMenuRadioItem[data-highlighted],
.DropdownMenuSubTrigger[data-highlighted] {
background-color: hsla(var(--secondary-bg));
}
.DropdownMenuSeparator {
height: 1px;
width: '100%';
background-color: hsla(var(--app-border));
}
.DropdownMenuItem::hover {
background-color: hsla(var(--secondary-bg));
}
.DropdownMenuLabel {
padding-left: 25px;
font-size: 12px;
line-height: 25px;
color: var(--mauve-11);
}
.DropdownMenuItemIndicator {
position: absolute;
left: 0;
width: 25px;
display: inline-flex;
align-items: center;
justify-content: center;
}
.DropdownMenuArrow {
fill: white;
}
.RightSlot {
margin-left: auto;
padding-left: 20px;
color: var(--mauve-11);
}
[data-highlighted] > .RightSlot {
color: white;
}
[data-disabled] .RightSlot {
color: var(--mauve-8);
}
@keyframes slideUpAndFade {
from {
opacity: 0;
transform: translateY(2px);
}
to {
opacity: 1;
transform: translateY(0);
}
}
@keyframes slideRightAndFade {
from {
opacity: 0;
transform: translateX(-2px);
}
to {
opacity: 1;
transform: translateX(0);
}
}
@keyframes slideDownAndFade {
from {
opacity: 0;
transform: translateY(-2px);
}
to {
opacity: 1;
transform: translateY(0);
}
}
@keyframes slideLeftAndFade {
from {
opacity: 0;
transform: translateX(2px);
}
to {
opacity: 1;
transform: translateX(0);
}
}

View File

@ -1,11 +1,7 @@
import React, { ReactNode } from 'react'
import React from 'react'
import * as SelectPrimitive from '@radix-ui/react-select'
import {
CheckIcon,
ChevronDownIcon,
ChevronUpIcon,
} from '@radix-ui/react-icons'
import { CheckIcon, ChevronDownIcon } from '@radix-ui/react-icons'
import './styles.scss'
import { twMerge } from 'tailwind-merge'

View File

@ -15,6 +15,7 @@ jest.mock('./core/Select/styles.scss', () => ({}))
jest.mock('./core/TextArea/styles.scss', () => ({}))
jest.mock('./core/Tabs/styles.scss', () => ({}))
jest.mock('./core/Accordion/styles.scss', () => ({}))
jest.mock('./core/Dropdown/styles.scss', () => ({}))
describe('Exports', () => {
it('exports all components and hooks', () => {

View File

@ -12,6 +12,7 @@ export * from './core/Select'
export * from './core/TextArea'
export * from './core/Tabs'
export * from './core/Accordion'
export * from './core/Dropdown'
export * from './hooks/useClipboard'
export * from './hooks/usePageLeave'

View File

@ -8,8 +8,7 @@
"bg": "215, 25%, 9%, 1",
"transparent": "0, 0%, 13%, 0.3",
"border": "0, 0%, 100%, 0.1",
"link": "221, 79%, 59%, 1",
"code-block": "0, 0%, 10%, 1"
"link": "221, 79%, 59%, 1"
},
"primary": {

View File

@ -8,8 +8,7 @@
"bg": "0, 0%, 13%, 1",
"transparent": "0, 0%, 13%, 0.3",
"border": "0, 0%, 100%, 0.1",
"link": "221, 83%, 53%, 1",
"code-block": "0, 0%, 17%, 1"
"link": "221, 83%, 53%, 1"
},
"primary": {

View File

@ -8,8 +8,7 @@
"bg": "0, 0%, 100%, 1",
"transparent": "0, 0%, 100%, 0.8",
"border": "0, 0%, 0%, 0.1",
"link": "221, 83%, 53%, 1",
"code-block": "0, 0%, 17%, 1"
"link": "221, 83%, 53%, 1"
},
"primary": {

View File

@ -8,8 +8,7 @@
"bg": "211, 100%, 15%, 1",
"transparent": "221, 79%, 59%, 0.08",
"border": "0, 0%, 100%, 0.1",
"link": "142, 76%, 36%, 1",
"code-block": "222, 96%, 10%, 1"
"link": "142, 76%, 36%, 1"
},
"primary": {

View File

@ -7,9 +7,9 @@ import { twMerge } from 'tailwind-merge'
import { MainViewState } from '@/constants/screens'
import { LEFT_PANEL_WIDTH } from '../LeftPanelContainer'
import { leftPanelWidthAtom } from '../LeftPanelContainer'
import { RIGHT_PANEL_WIDTH } from '../RightPanelContainer'
import { rightPanelWidthAtom } from '../RightPanelContainer'
import {
mainViewStateAtom,
@ -28,6 +28,8 @@ const CenterPanelContainer = ({ children, isShowStarterScreen }: Props) => {
const showLeftPanel = useAtomValue(showLeftPanelAtom)
const showRightPanel = useAtomValue(showRightPanelAtom)
const mainViewState = useAtomValue(mainViewStateAtom)
const rightPanelWidth = useAtomValue(rightPanelWidthAtom)
const leftPanelWidth = useAtomValue(leftPanelWidthAtom)
return (
<div
@ -36,7 +38,7 @@ const CenterPanelContainer = ({ children, isShowStarterScreen }: Props) => {
maxWidth: matches
? '100%'
: mainViewState === MainViewState.Thread && !isShowStarterScreen
? `calc(100% - (${showRightPanel ? Number(localStorage.getItem(RIGHT_PANEL_WIDTH)) : 0}px + ${showLeftPanel ? Number(localStorage.getItem(LEFT_PANEL_WIDTH)) : 0}px))`
? `calc(100% - (${showRightPanel ? rightPanelWidth : 0}px + ${showLeftPanel ? leftPanelWidth : 0}px))`
: '100%',
}}
>

View File

@ -69,7 +69,7 @@ describe('ErrorMessage Component', () => {
render(<ErrorMessage message={message} />)
expect(screen.getByText('troubleshooting assistance')).toBeInTheDocument()
expect(screen.getByText('Troubleshooting')).toBeInTheDocument()
})
it('opens troubleshooting modal when link is clicked', () => {
@ -84,7 +84,7 @@ describe('ErrorMessage Component', () => {
render(<ErrorMessage message={message} />)
fireEvent.click(screen.getByText('troubleshooting assistance'))
fireEvent.click(screen.getByText('Troubleshooting'))
expect(mockSetModalTroubleShooting).toHaveBeenCalledWith(true)
})
})

View File

@ -1,3 +1,5 @@
import { useRef, useState } from 'react'
import {
EngineManager,
ErrorCode,
@ -7,6 +9,8 @@ import {
import { useAtomValue, useSetAtom } from 'jotai'
import { CheckIcon, ClipboardIcon, SearchCodeIcon } from 'lucide-react'
import AutoLink from '@/containers/AutoLink'
import ModalTroubleShooting, {
modalTroubleShootingAtom,
@ -24,30 +28,25 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
const setMainState = useSetAtom(mainViewStateAtom)
const setSelectedSettingScreen = useSetAtom(selectedSettingAtom)
const activeAssistant = useAtomValue(activeAssistantAtom)
const defaultDesc = () => {
return (
<>
<p>
{`Something's wrong.`} Access&nbsp;
<span
className="cursor-pointer text-[hsla(var(--app-link))] underline"
onClick={() => setModalTroubleShooting(true)}
>
troubleshooting assistance
</span>
&nbsp;now.
</p>
<ModalTroubleShooting />
</>
)
}
const errorDivRef = useRef<HTMLDivElement>(null)
const [copied, setCopied] = useState(false)
const getEngine = () => {
const engineName = activeAssistant?.model?.engine
return engineName ? EngineManager.instance().get(engineName) : null
}
const handleCopy = () => {
if (errorDivRef.current) {
const errorText = errorDivRef.current.innerText
if (errorText) {
navigator.clipboard.writeText(errorText)
setCopied(true)
setTimeout(() => setCopied(false), 2000)
}
}
}
const getErrorTitle = () => {
const engine = getEngine()
@ -69,9 +68,9 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
</button>{' '}
and try again.
</span>
{defaultDesc()}
</>
)
default:
return (
<p
@ -90,7 +89,6 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
{message?.content[0]?.text?.value && (
<AutoLink text={message?.content[0]?.text?.value} />
)}
{defaultDesc()}
</>
)}
</p>
@ -99,15 +97,54 @@ const ErrorMessage = ({ message }: { message: ThreadMessage }) => {
}
return (
<div className="mx-auto my-6 max-w-[700px]">
{!!message.metadata?.error && (
<div
key={message.id}
className="mx-6 flex flex-col items-center space-y-2 text-center font-medium text-[hsla(var(--text-secondary))]"
>
{getErrorTitle()}
<div className="mx-auto my-6 max-w-[700px] px-4">
<div
className="mx-auto max-w-[400px] rounded-lg border border-[hsla(var(--app-border))]"
key={message.id}
>
<div className="flex justify-between border-b border-inherit px-4 py-2">
<h6 className="text-[hsla(var(--destructive-bg))]">Error</h6>
<div className="flex gap-x-4 text-xs">
<div>
<span
className="flex cursor-pointer items-center gap-x-1 text-[hsla(var(--app-link))]"
onClick={() => setModalTroubleShooting(true)}
>
<SearchCodeIcon size={14} className="text-inherit" />
Troubleshooting
</span>
<ModalTroubleShooting />
</div>
<div
className="flex cursor-pointer items-center gap-x-1 text-[hsla(var(--text-secondary))]"
onClick={handleCopy}
>
{copied ? (
<>
<CheckIcon
size={14}
className="text-[hsla(var(--success-bg))]"
/>
Copied
</>
) : (
<>
<ClipboardIcon size={14} className="text-inherit" />
Copy
</>
)}
</div>
</div>
</div>
)}
<div className="max-h-[80px] w-full overflow-x-auto p-4 py-2">
<div
className="text-xs leading-relaxed text-[hsla(var(--text-secondary))]"
ref={errorDivRef}
>
{getErrorTitle()}
</div>
</div>
</div>
</div>
)
}

View File

@ -6,7 +6,7 @@ import { useActiveModel } from '@/hooks/useActiveModel'
import { useGetEngines } from '@/hooks/useEngineManagement'
import { toGibibytes } from '@/utils/converter'
import { toGigabytes } from '@/utils/converter'
import { isLocalEngine } from '@/utils/modelEngine'
@ -34,7 +34,7 @@ const TableActiveModel = () => {
<td className="px-4 py-2">
<Badge theme="secondary">
{activeModel.metadata?.size
? toGibibytes(activeModel.metadata?.size)
? toGigabytes(activeModel.metadata?.size)
: '-'}
</Badge>
</td>

View File

@ -16,7 +16,7 @@ import useGetSystemResources from '@/hooks/useGetSystemResources'
import { usePath } from '@/hooks/usePath'
import { toGibibytes } from '@/utils/converter'
import { toGigabytes } from '@/utils/converter'
import { utilizedMemory } from '@/utils/memory'
@ -134,8 +134,8 @@ const SystemMonitor = () => {
<div className="flex items-center justify-between gap-2">
<h6 className="font-bold">Memory</h6>
<span>
{toGibibytes(usedRam, { hideUnit: true })}/
{toGibibytes(totalRam, { hideUnit: true })} GB
{toGigabytes(usedRam, { hideUnit: true })}/
{toGigabytes(totalRam, { hideUnit: true })} GB
</span>
</div>
<div className="flex items-center gap-x-4">

View File

@ -1,11 +1,9 @@
import { Fragment } from 'react'
import { Button } from '@janhq/joi'
import { Button, Tooltip } from '@janhq/joi'
import { useAtom, useAtomValue, useSetAtom } from 'jotai'
import {
PanelLeftCloseIcon,
PanelLeftOpenIcon,
PanelRightOpenIcon,
PanelRightCloseIcon,
MinusIcon,
MenuIcon,
@ -13,6 +11,9 @@ import {
PaletteIcon,
XIcon,
PenSquareIcon,
Settings2,
History,
PanelLeftOpenIcon,
} from 'lucide-react'
import { twMerge } from 'tailwind-merge'
@ -91,7 +92,14 @@ const TopPanel = () => {
</Button>
) : (
<Button theme="icon" onClick={() => setShowLeftPanel(true)}>
<PanelLeftOpenIcon size={16} />
{mainViewState === MainViewState.Thread ? (
<Tooltip
trigger={<History size={16} />}
content="Threads History"
/>
) : (
<PanelLeftOpenIcon size={16} />
)}
</Button>
)}
</Fragment>
@ -135,7 +143,10 @@ const TopPanel = () => {
}
}}
>
<PanelRightOpenIcon size={16} />
<Tooltip
trigger={<Settings2 size={16} />}
content="Thread Settings"
/>
</Button>
)}
</Fragment>

View File

@ -21,7 +21,6 @@ import { SUCCESS_SET_NEW_DESTINATION } from '@/screens/Settings/Advanced/DataFol
import CancelModelImportModal from '@/screens/Settings/CancelModelImportModal'
import ChooseWhatToImportModal from '@/screens/Settings/ChooseWhatToImportModal'
import EditModelInfoModal from '@/screens/Settings/EditModelInfoModal'
import HuggingFaceRepoDetailModal from '@/screens/Settings/HuggingFaceRepoDetailModal'
import ImportModelOptionModal from '@/screens/Settings/ImportModelOptionModal'
import ImportingModelModal from '@/screens/Settings/ImportingModelModal'
import SelectingModelModal from '@/screens/Settings/SelectingModelModal'
@ -148,7 +147,6 @@ const BaseLayout = () => {
{importModelStage === 'CONFIRM_CANCEL' && <CancelModelImportModal />}
<ChooseWhatToImportModal />
<InstallingExtensionModal />
<HuggingFaceRepoDetailModal />
{showProductAnalyticPrompt && (
<div className="fixed bottom-4 z-50 m-4 max-w-full rounded-xl border border-[hsla(var(--app-border))] bg-[hsla(var(--app-bg))] p-6 shadow-2xl sm:bottom-8 sm:right-4 sm:m-0 sm:max-w-[400px]">
<div className="mb-4 flex items-center gap-x-2">

View File

@ -7,7 +7,7 @@ import {
} from 'react'
import { ScrollArea, useClickOutside, useMediaQuery } from '@janhq/joi'
import { useAtom, useAtomValue } from 'jotai'
import { atom, useAtom, useAtomValue } from 'jotai'
import { twMerge } from 'tailwind-merge'
@ -18,13 +18,12 @@ type Props = PropsWithChildren
const DEFAULT_LEFT_PANEL_WIDTH = 200
export const LEFT_PANEL_WIDTH = 'leftPanelWidth'
export const leftPanelWidthAtom = atom(DEFAULT_LEFT_PANEL_WIDTH)
const LeftPanelContainer = ({ children }: Props) => {
const [leftPanelRef, setLeftPanelRef] = useState<HTMLDivElement | null>(null)
const [isResizing, setIsResizing] = useState(false)
const [threadLeftPanelWidth, setLeftPanelWidth] = useState(
Number(localStorage.getItem(LEFT_PANEL_WIDTH)) || DEFAULT_LEFT_PANEL_WIDTH
)
const [leftPanelWidth, setLeftPanelWidth] = useAtom(leftPanelWidthAtom)
const [showLeftPanel, setShowLeftPanel] = useAtom(showLeftPanelAtom)
const matches = useMediaQuery('(max-width: 880px)')
const reduceTransparent = useAtomValue(reduceTransparentAtom)
@ -37,10 +36,12 @@ const LeftPanelContainer = ({ children }: Props) => {
const startResizing = useCallback(() => {
setIsResizing(true)
document.body.classList.add('select-none')
}, [])
const stopResizing = useCallback(() => {
setIsResizing(false)
document.body.classList.remove('select-none')
}, [])
const resize = useCallback(
@ -69,7 +70,7 @@ const LeftPanelContainer = ({ children }: Props) => {
}
}
},
[isResizing, leftPanelRef, setShowLeftPanel]
[isResizing, leftPanelRef, setLeftPanelWidth, setShowLeftPanel]
)
useEffect(() => {
@ -83,7 +84,7 @@ const LeftPanelContainer = ({ children }: Props) => {
window.removeEventListener('mousemove', resize)
window.removeEventListener('mouseup', stopResizing)
}
}, [resize, stopResizing])
}, [resize, setLeftPanelWidth, stopResizing])
return (
<div
@ -97,7 +98,7 @@ const LeftPanelContainer = ({ children }: Props) => {
reduceTransparent &&
'left-0 border-r border-[hsla(var(--app-border))] bg-[hsla(var(--left-panel-bg))]'
)}
style={{ width: showLeftPanel ? threadLeftPanelWidth : 0 }}
style={{ width: showLeftPanel ? leftPanelWidth : 0 }}
onMouseDown={(e) => isResizing && e.stopPropagation()}
>
<ScrollArea className="h-full w-full">

View File

@ -44,11 +44,6 @@ export default function ModelReload() {
Reloading model {stateModel.model?.id}
</span>
</div>
<div className="my-4 mb-2 text-center">
<span className="text-[hsla(var(--text-secondary)]">
Model is reloading to apply new changes.
</span>
</div>
</div>
)
}

View File

@ -0,0 +1,53 @@
import { motion } from 'framer-motion'
const Spinner = ({ size = 40, strokeWidth = 4 }) => {
const radius = size / 2 - strokeWidth
const circumference = 2 * Math.PI * radius
return (
<motion.svg
width={size}
height={size}
viewBox={`0 0 ${size} ${size}`}
style={{ overflow: 'visible' }}
animate={{ rotate: 360 }}
transition={{
repeat: Infinity,
duration: 2, // Adjust for desired speed
ease: 'linear',
}}
>
{/* Static background circle */}
<circle
cx={size / 2}
cy={size / 2}
r={radius}
stroke="#e0e0e0"
strokeWidth={strokeWidth}
fill="none"
/>
{/* Smooth animated arc */}
<motion.circle
cx={size / 2}
cy={size / 2}
r={radius}
stroke="currentColor"
strokeWidth={strokeWidth}
fill="none"
strokeDasharray={circumference}
strokeDashoffset={circumference * 0.9} // Adjusted offset for smooth arc
animate={{
strokeDashoffset: [circumference, circumference * 0.1], // Continuous motion
}}
transition={{
repeat: Infinity,
duration: 1.5, // Adjust for animation speed
ease: 'easeInOut', // Smooth easing
}}
strokeLinecap="round" // For a rounded end
/>
</motion.svg>
)
}
export default Spinner

View File

@ -1,4 +1,4 @@
import { memo } from 'react'
import { memo, useEffect, useState } from 'react'
import { motion as m } from 'framer-motion'
import { useAtomValue } from 'jotai'
@ -12,10 +12,27 @@ import LocalServerScreen from '@/screens/LocalServer'
import SettingsScreen from '@/screens/Settings'
import ThreadScreen from '@/screens/Thread'
import { mainViewStateAtom } from '@/helpers/atoms/App.atom'
import {
mainViewStateAtom,
showSystemMonitorPanelAtom,
} from '@/helpers/atoms/App.atom'
const MainViewContainer = () => {
const mainViewState = useAtomValue(mainViewStateAtom)
const showSystemMonitorPanel = useAtomValue(showSystemMonitorPanelAtom)
const [height, setHeight] = useState<number>(0)
useEffect(() => {
if (showSystemMonitorPanel) {
const element = document.querySelector('.system-monitor-panel')
if (element) {
setHeight(element.clientHeight) // You can also use offsetHeight if needed
}
} else {
setHeight(0)
}
}, [showSystemMonitorPanel])
let children = null
switch (mainViewState) {
@ -37,7 +54,10 @@ const MainViewContainer = () => {
}
return (
<div className={twMerge('relative flex w-[calc(100%-48px)]')}>
<div
className={twMerge('relative flex w-[calc(100%-48px)]')}
style={{ height: `calc(100% - ${height}px)` }}
>
<div className="w-full">
<m.div
key={mainViewState}

View File

@ -1,7 +1,5 @@
import { useCallback } from 'react'
import { Model } from '@janhq/core'
import { Modal, Button, Progress, ModalClose } from '@janhq/joi'
import { useAtomValue, useSetAtom } from 'jotai'
@ -16,22 +14,22 @@ import {
import { formatDownloadPercentage } from '@/utils/converter'
type Props = {
model: Model
modelId: string
isFromList?: boolean
}
const ModalCancelDownload = ({ model, isFromList }: Props) => {
const ModalCancelDownload = ({ modelId, isFromList }: Props) => {
const { abortModelDownload } = useDownloadModel()
const removeDownloadState = useSetAtom(removeDownloadStateAtom)
const allDownloadStates = useAtomValue(modelDownloadStateAtom)
const downloadState = allDownloadStates[model.id]
const downloadState = allDownloadStates[modelId]
const cancelText = `Cancel ${formatDownloadPercentage(downloadState?.percent ?? 0)}`
const onAbortDownloadClick = useCallback(() => {
removeDownloadState(model.id)
abortModelDownload(downloadState?.modelId ?? model.id)
}, [downloadState, abortModelDownload, removeDownloadState, model])
removeDownloadState(modelId)
abortModelDownload(downloadState?.modelId ?? modelId)
}, [downloadState, abortModelDownload, removeDownloadState, modelId])
return (
<Modal
@ -42,7 +40,11 @@ const ModalCancelDownload = ({ model, isFromList }: Props) => {
{cancelText}
</Button>
) : (
<Button variant="soft">
<Button
className="text-[hsla(var(--primary-bg))]"
variant="soft"
theme="ghost"
>
<div className="flex items-center space-x-2">
<span className="inline-block">Cancel</span>
<Progress

View File

@ -0,0 +1,99 @@
import { useCallback, useMemo } from 'react'
import { Button, Tooltip } from '@janhq/joi'
import { useAtomValue, useSetAtom } from 'jotai'
import { MainViewState } from '@/constants/screens'
import { useCreateNewThread } from '@/hooks/useCreateNewThread'
import useDownloadModel from '@/hooks/useDownloadModel'
import ModalCancelDownload from '../ModalCancelDownload'
import { mainViewStateAtom } from '@/helpers/atoms/App.atom'
import { assistantsAtom } from '@/helpers/atoms/Assistant.atom'
import {
downloadedModelsAtom,
getDownloadingModelAtom,
} from '@/helpers/atoms/Model.atom'
interface Props {
id: string
theme?: 'primary' | 'ghost' | 'icon' | 'destructive' | undefined
variant?: 'solid' | 'soft' | 'outline' | undefined
}
const ModelDownloadButton = ({ id, theme, variant }: Props) => {
const { downloadModel } = useDownloadModel()
const downloadingModels = useAtomValue(getDownloadingModelAtom)
const downloadedModels = useAtomValue(downloadedModelsAtom)
const assistants = useAtomValue(assistantsAtom)
const setMainViewState = useSetAtom(mainViewStateAtom)
const { requestCreateNewThread } = useCreateNewThread()
const isDownloaded = useMemo(
() => downloadedModels.some((md) => md.id === id),
[downloadedModels, id]
)
const isDownloading = useMemo(
() => downloadingModels.some((md) => md === id),
[downloadingModels, id]
)
const onDownloadClick = useCallback(() => {
downloadModel(id)
}, [id, downloadModel])
const onUseModelClick = useCallback(async () => {
const downloadedModel = downloadedModels.find((e) => e.id === id)
if (downloadedModel)
await requestCreateNewThread(assistants[0], downloadedModel)
setMainViewState(MainViewState.Thread)
}, [
assistants,
downloadedModels,
setMainViewState,
requestCreateNewThread,
id,
])
const defaultButton = (
<Button
theme={theme ? theme : 'primary'}
variant={variant ? variant : 'solid'}
onClick={(e) => {
e.stopPropagation()
onDownloadClick()
}}
>
Download
</Button>
)
const downloadingButton = <ModalCancelDownload modelId={id} />
const downloadedButton = (
<Tooltip
trigger={
<Button
onClick={onUseModelClick}
data-testid={`use-model-btn-${id}`}
variant="outline"
theme="ghost"
className="min-w-[70px]"
>
Use
</Button>
}
content="Threads are disabled while the server is running"
/>
)
return (
<>
{isDownloading
? downloadingButton
: isDownloaded
? downloadedButton
: defaultButton}
</>
)
}
export default ModelDownloadButton

View File

@ -37,7 +37,7 @@ import useRecommendedModel from '@/hooks/useRecommendedModel'
import useUpdateModelParameters from '@/hooks/useUpdateModelParameters'
import { formatDownloadPercentage, toGibibytes } from '@/utils/converter'
import { formatDownloadPercentage, toGigabytes } from '@/utils/converter'
import { manualRecommendationModel } from '@/utils/model'
import { getLogoEngine } from '@/utils/modelEngine'
@ -481,13 +481,13 @@ const ModelDropdown = ({
{model.name}
</p>
<ModelLabel
metadata={model.metadata}
size={model.metadata?.size}
compact
/>
</div>
<div className="flex items-center gap-2 text-[hsla(var(--text-tertiary))]">
<span className="font-medium">
{toGibibytes(model.metadata?.size)}
{toGigabytes(model.metadata?.size)}
</span>
{!isDownloading ? (
<DownloadCloudIcon
@ -577,14 +577,14 @@ const ModelDropdown = ({
{model.name}
</p>
<ModelLabel
metadata={model.metadata}
size={model.metadata?.size}
compact
/>
</div>
<div className="flex items-center gap-2 text-[hsla(var(--text-tertiary))]">
{!isDownloaded && (
<span className="font-medium">
{toGibibytes(model.metadata?.size)}
{toGigabytes(model.metadata?.size)}
</span>
)}
{!isDownloading && !isDownloaded ? (

View File

@ -36,46 +36,6 @@ describe('ModelLabel', () => {
jest.clearAllMocks()
})
it('renders NotEnoughMemoryLabel when minimumRamModel is greater than totalRam', async () => {
mockUseAtomValue
.mockReturnValueOnce(0)
.mockReturnValueOnce(0)
.mockReturnValueOnce(0)
mockUseActiveModel.mockReturnValue({
activeModel: { metadata: { size: 0 } },
})
mockUseSettings.mockReturnValue({ settings: { run_mode: 'cpu' } })
render(<ModelLabel {...defaultProps} />)
await waitFor(() => {
expect(screen.getByText('Not enough RAM')).toBeDefined()
})
})
it('renders SlowOnYourDeviceLabel when minimumRamModel is less than totalRam but greater than availableRam', async () => {
mockUseAtomValue
.mockReturnValueOnce(100)
.mockReturnValueOnce(50)
.mockReturnValueOnce(10)
mockUseActiveModel.mockReturnValue({
activeModel: { metadata: { size: 0 } },
})
mockUseSettings.mockReturnValue({ settings: { run_mode: 'cpu' } })
const props = {
...defaultProps,
metadata: {
...defaultProps.metadata,
size: 50,
},
}
render(<ModelLabel {...props} />)
await waitFor(() => {
expect(screen.getByText('Slow on your device')).toBeDefined()
})
})
it('renders nothing when minimumRamModel is less than availableRam', () => {
mockUseAtomValue
.mockReturnValueOnce(100)

View File

@ -1,7 +1,5 @@
import React from 'react'
import { ModelMetadata } from '@janhq/core'
import { Badge } from '@janhq/joi'
import { useAtomValue } from 'jotai'
import { useActiveModel } from '@/hooks/useActiveModel'
@ -19,18 +17,11 @@ import {
} from '@/helpers/atoms/SystemBar.atom'
type Props = {
metadata: ModelMetadata
size?: number
compact?: boolean
}
const UnsupportedModel = () => {
return (
<Badge className="space-x-1 rounded-md" theme="warning">
<span>Coming Soon</span>
</Badge>
)
}
const ModelLabel = ({ metadata, compact }: Props) => {
const ModelLabel = ({ size, compact }: Props) => {
const { activeModel } = useActiveModel()
const totalRam = useAtomValue(totalRamAtom)
const usedRam = useAtomValue(usedRamAtom)
@ -59,11 +50,7 @@ const ModelLabel = ({ metadata, compact }: Props) => {
return null
}
return metadata?.tags?.includes('Coming Soon') ? (
<UnsupportedModel />
) : (
getLabel(metadata?.size ?? 0)
)
return getLabel(size ?? 0)
}
export default React.memo(ModelLabel)

View File

@ -1,18 +1,16 @@
import React, { ChangeEvent, useCallback, useState } from 'react'
import React, { ChangeEvent, useCallback, useState, useRef } from 'react'
import { Input } from '@janhq/joi'
import { useSetAtom } from 'jotai'
import { SearchIcon } from 'lucide-react'
import { useDebouncedCallback } from 'use-debounce'
import { toaster } from '@/containers/Toast'
import { useGetHFRepoData } from '@/hooks/useGetHFRepoData'
import {
importHuggingFaceModelStageAtom,
importingHuggingFaceRepoDataAtom,
} from '@/helpers/atoms/HuggingFace.atom'
useGetModelSources,
useModelSourcesMutation,
} from '@/hooks/useModelSource'
import Spinner from '../Loader/Spinner'
type Props = {
onSearchLocal?: (searchText: string) => void
@ -20,37 +18,28 @@ type Props = {
const ModelSearch = ({ onSearchLocal }: Props) => {
const [searchText, setSearchText] = useState('')
const { getHfRepoData } = useGetHFRepoData()
const setImportingHuggingFaceRepoData = useSetAtom(
importingHuggingFaceRepoDataAtom
)
const setImportHuggingFaceModelStage = useSetAtom(
importHuggingFaceModelStageAtom
)
const [isSearching, setSearching] = useState(false)
const { mutate } = useGetModelSources()
const { addModelSource } = useModelSourcesMutation()
const inputRef = useRef<HTMLInputElement | null>(null)
const debounced = useDebouncedCallback(async () => {
if (searchText.indexOf('/') === -1) {
// If we don't find / in the text, perform a local search
onSearchLocal?.(searchText)
return
}
// Attempt to search local
onSearchLocal?.(searchText)
try {
const data = await getHfRepoData(searchText)
setImportingHuggingFaceRepoData(data)
setImportHuggingFaceModelStage('REPO_DETAIL')
} catch (err) {
let errMessage = 'Unexpected Error'
if (err instanceof Error) {
errMessage = err.message
}
toaster({
title: errMessage,
type: 'error',
setSearching(true)
// Attempt to search model source
addModelSource(searchText)
.then(() => mutate())
.then(() => onSearchLocal?.(searchText))
.catch((e) => {
console.debug(e)
})
console.error(err)
}
.finally(() => setSearching(false))
}, 300)
const onSearchChanged = useCallback(
@ -80,13 +69,24 @@ const ModelSearch = ({ onSearchLocal }: Props) => {
return (
<Input
prefixIcon={<SearchIcon size={16} />}
placeholder="Search or paste Hugging Face URL"
ref={inputRef}
prefixIcon={
isSearching ? (
<Spinner size={16} strokeWidth={2} />
) : (
<SearchIcon size={16} />
)
}
placeholder="Search or enter Hugging Face URL"
onChange={onSearchChanged}
onKeyDown={onKeyDown}
value={searchText}
clearable={searchText.length > 0}
onClear={onClear}
className="border-0 bg-[hsla(var(--app-bg))]"
onClick={() => {
onSearchLocal?.(inputRef.current?.value ?? '')
}}
/>
)
}

View File

@ -4,25 +4,21 @@ import { useSetAtom } from 'jotai'
import { useDebouncedCallback } from 'use-debounce'
import { useGetHFRepoData } from '@/hooks/useGetHFRepoData'
import { MainViewState } from '@/constants/screens'
import { useModelSourcesMutation } from '@/hooks/useModelSource'
import { loadingModalInfoAtom } from '../LoadingModal'
import { toaster } from '../Toast'
import {
importHuggingFaceModelStageAtom,
importingHuggingFaceRepoDataAtom,
} from '@/helpers/atoms/HuggingFace.atom'
import { mainViewStateAtom } from '@/helpers/atoms/App.atom'
import { modelDetailAtom } from '@/helpers/atoms/Model.atom'
const DeepLinkListener: React.FC = () => {
const { getHfRepoData } = useGetHFRepoData()
const { addModelSource } = useModelSourcesMutation()
const setLoadingInfo = useSetAtom(loadingModalInfoAtom)
const setImportingHuggingFaceRepoData = useSetAtom(
importingHuggingFaceRepoDataAtom
)
const setImportHuggingFaceModelStage = useSetAtom(
importHuggingFaceModelStageAtom
)
const setMainView = useSetAtom(mainViewStateAtom)
const setModelDetail = useSetAtom(modelDetailAtom)
const handleDeepLinkAction = useDebouncedCallback(
async (deepLinkAction: DeepLinkAction) => {
@ -38,17 +34,17 @@ const DeepLinkListener: React.FC = () => {
try {
setLoadingInfo({
title: 'Getting Hugging Face models',
title: 'Getting Hugging Face model details',
message: 'Please wait..',
})
const data = await getHfRepoData(deepLinkAction.resource)
setImportingHuggingFaceRepoData(data)
setImportHuggingFaceModelStage('REPO_DETAIL')
await addModelSource(deepLinkAction.resource)
setLoadingInfo(undefined)
setMainView(MainViewState.Hub)
setModelDetail(deepLinkAction.resource)
} catch (err) {
setLoadingInfo(undefined)
toaster({
title: 'Failed to get Hugging Face models',
title: 'Failed to get Hugging Face model details',
description: err instanceof Error ? err.message : 'Unexpected Error',
type: 'error',
})

View File

@ -4,15 +4,11 @@ import { PropsWithChildren } from 'react'
import { Toaster } from 'react-hot-toast'
import { SWRConfig } from 'swr'
import EventListener from '@/containers/Providers/EventListener'
import JotaiWrapper from '@/containers/Providers/Jotai'
import ThemeWrapper from '@/containers/Providers/Theme'
import Umami from '@/utils/umami'
import { CoreConfigurator } from './CoreConfigurator'
import DataLoader from './DataLoader'
@ -28,7 +24,6 @@ const Providers = ({ children }: PropsWithChildren) => {
<SWRConfigProvider>
<ThemeWrapper>
<JotaiWrapper>
<Umami />
<CoreConfigurator>
<>
<Responsive />

View File

@ -1,9 +1,11 @@
import '@testing-library/jest-dom'
import { waitFor } from '@testing-library/react'
import React from 'react'
import { render, fireEvent } from '@testing-library/react'
import RightPanelContainer from './index'
import { useAtom } from 'jotai'
import RightPanelContainer, { rightPanelWidthAtom } from './index'
import { showRightPanelAtom } from '@/helpers/atoms/App.atom'
import { reduceTransparentAtom } from '@/helpers/atoms/Setting.atom'
// Mocking ResizeObserver
class ResizeObserver {
@ -34,24 +36,24 @@ jest.mock('jotai', () => {
const originalModule = jest.requireActual('jotai')
return {
...originalModule,
useAtom: jest.fn(),
useAtomValue: jest.fn(),
useAtomValue: jest.fn((atom) => {
if (atom === reduceTransparentAtom) return false
if (atom === showRightPanelAtom) return true
}),
useAtom: jest.fn((atom) => {
if (atom === rightPanelWidthAtom) return [280, jest.fn()]
if (atom === showRightPanelAtom) return [true, mockSetShowRightPanel]
return [null, jest.fn()]
}),
}
})
const mockSetShowRightPanel = jest.fn()
const mockShowRightPanel = true // Change this to test the panel visibility
beforeEach(() => {
// Setting up the localStorage mock
localStorage.clear()
localStorage.setItem('rightPanelWidth', '280') // Setting a default width
// Mocking the atom behavior
;(useAtom as jest.Mock).mockImplementation(() => [
mockShowRightPanel,
mockSetShowRightPanel,
])
})
describe('RightPanelContainer', () => {
@ -66,12 +68,15 @@ describe('RightPanelContainer', () => {
expect(getByText('Child Content')).toBeInTheDocument()
})
it('initializes width from localStorage', () => {
it('initializes width from localStorage', async () => {
const { container } = render(<RightPanelContainer />)
// Check the width from localStorage is applied
const rightPanel = container.firstChild as HTMLDivElement
expect(rightPanel.style.width).toBe('280px') // Width from localStorage
// Wait for the width to be applied
await waitFor(() => {
expect(rightPanel.style.width).toBe('280px') // Correct width from localStorage
})
})
it('changes width on resizing', () => {

View File

@ -7,7 +7,7 @@ import {
} from 'react'
import { ScrollArea, useClickOutside, useMediaQuery } from '@janhq/joi'
import { useAtom, useAtomValue } from 'jotai'
import { atom, useAtom, useAtomValue } from 'jotai'
import { twMerge } from 'tailwind-merge'
@ -19,11 +19,11 @@ type Props = PropsWithChildren
const DEFAULT_RIGHT_PANEL_WIDTH = 280
export const RIGHT_PANEL_WIDTH = 'rightPanelWidth'
export const rightPanelWidthAtom = atom(DEFAULT_RIGHT_PANEL_WIDTH)
const RightPanelContainer = ({ children }: Props) => {
const [isResizing, setIsResizing] = useState(false)
const [threadRightPanelWidth, setRightPanelWidth] = useState(
Number(localStorage.getItem(RIGHT_PANEL_WIDTH)) || DEFAULT_RIGHT_PANEL_WIDTH
)
const [rightPanelWidth, setRightPanelWidth] = useAtom(rightPanelWidthAtom)
const [rightPanelRef, setRightPanelRef] = useState<HTMLDivElement | null>(
null
)
@ -40,10 +40,12 @@ const RightPanelContainer = ({ children }: Props) => {
const startResizing = useCallback(() => {
setIsResizing(true)
document.body.classList.add('select-none')
}, [])
const stopResizing = useCallback(() => {
setIsResizing(false)
document.body.classList.remove('select-none')
}, [])
const resize = useCallback(
@ -72,7 +74,7 @@ const RightPanelContainer = ({ children }: Props) => {
}
}
},
[isResizing, rightPanelRef, setShowRightPanel]
[isResizing, rightPanelRef, setRightPanelWidth, setShowRightPanel]
)
useEffect(() => {
@ -86,7 +88,7 @@ const RightPanelContainer = ({ children }: Props) => {
window.removeEventListener('mousemove', resize)
window.removeEventListener('mouseup', stopResizing)
}
}, [resize, stopResizing])
}, [resize, setRightPanelWidth, stopResizing])
return (
<div
@ -100,7 +102,7 @@ const RightPanelContainer = ({ children }: Props) => {
reduceTransparent &&
'border-l border-[hsla(var(--app-border))] bg-[hsla(var(--right-panel-bg))]'
)}
style={{ width: showRightPanel ? threadRightPanelWidth : 0 }}
style={{ width: showRightPanel ? rightPanelWidth : 0 }}
onMouseDown={(e) => isResizing && e.preventDefault()}
>
<ScrollArea className="h-full w-full">

View File

@ -30,3 +30,30 @@ export const copyOverInstructionEnabledAtom = atomWithStorage(
COPY_OVER_INSTRUCTION_ENABLED,
false
)
/**
* App Banner Hub Atom - storage last banner setting - default undefined
*/
const appBannerHubStorageAtom = atomWithStorage<string | undefined>(
'appBannerHub',
undefined,
undefined,
{
getOnInit: true,
}
)
/**
* App Hub Banner configured image - Retrieve from appBannerHubStorageAtom - fallback a random banner
*/
export const getAppBannerHubAtom = atom<string>(
(get) =>
get(appBannerHubStorageAtom) ??
`./images/HubBanner/banner-${Math.floor(Math.random() * 30) + 1}.jpg`
)
/**
* Set App Hub Banner - store in appBannerHubStorageAtom
*/
export const setAppBannerHubAtom = atom(null, (get, set, banner: string) => {
set(appBannerHubStorageAtom, banner)
})

View File

@ -5,8 +5,15 @@ const EXPERIMENTAL_FEATURE = 'experimentalFeature'
const PROXY_FEATURE_ENABLED = 'proxyFeatureEnabled'
const VULKAN_ENABLED = 'vulkanEnabled'
const IGNORE_SSL = 'ignoreSSLFeature'
const VERIFY_PROXY_SSL = 'verifyProxySSL'
const VERIFY_PROXY_HOST_SSL = 'verifyProxyHostSSL'
const VERIFY_PEER_SSL = 'verifyPeerSSL'
const VERIFY_HOST_SSL = 'verifyHostSSL'
const HTTPS_PROXY_FEATURE = 'httpsProxyFeature'
const PROXY_USERNAME = 'proxyUsername'
const PROXY_PASSWORD = 'proxyPassword'
const QUICK_ASK_ENABLED = 'quickAskEnabled'
const NO_PROXY = 'noProxy'
export const janDataFolderPathAtom = atom('')
@ -27,9 +34,56 @@ export const proxyAtom = atomWithStorage(HTTPS_PROXY_FEATURE, '', undefined, {
getOnInit: true,
})
export const proxyUsernameAtom = atomWithStorage(
PROXY_USERNAME,
'',
undefined,
{ getOnInit: true }
)
export const proxyPasswordAtom = atomWithStorage(
PROXY_PASSWORD,
'',
undefined,
{ getOnInit: true }
)
export const ignoreSslAtom = atomWithStorage(IGNORE_SSL, false, undefined, {
getOnInit: true,
})
export const noProxyAtom = atomWithStorage(NO_PROXY, '', undefined, {
getOnInit: false,
})
export const verifyProxySslAtom = atomWithStorage(
VERIFY_PROXY_SSL,
false,
undefined,
{ getOnInit: true }
)
export const verifyProxyHostSslAtom = atomWithStorage(
VERIFY_PROXY_HOST_SSL,
false,
undefined,
{ getOnInit: true }
)
export const verifyPeerSslAtom = atomWithStorage(
VERIFY_PEER_SSL,
false,
undefined,
{ getOnInit: true }
)
export const verifyHostSslAtom = atomWithStorage(
VERIFY_HOST_SSL,
false,
undefined,
{ getOnInit: true }
)
export const vulkanEnabledAtom = atomWithStorage(
VULKAN_ENABLED,
false,

View File

@ -1,14 +0,0 @@
import { importHuggingFaceModelStageAtom } from './HuggingFace.atom';
import { importingHuggingFaceRepoDataAtom } from './HuggingFace.atom';
test('importHuggingFaceModelStageAtom should have initial value of NONE', () => {
const result = importHuggingFaceModelStageAtom.init;
expect(result).toBe('NONE');
});
test('importingHuggingFaceRepoDataAtom should have initial value of undefined', () => {
const result = importingHuggingFaceRepoDataAtom.init;
expect(result).toBeUndefined();
});

View File

@ -1,12 +0,0 @@
import { HuggingFaceRepoData } from '@janhq/core'
import { atom } from 'jotai'
// modals
export type ImportHuggingFaceModelStage = 'NONE' | 'REPO_DETAIL'
export const importingHuggingFaceRepoDataAtom = atom<
HuggingFaceRepoData | undefined
>(undefined)
export const importHuggingFaceModelStageAtom =
atom<ImportHuggingFaceModelStage>('NONE')

View File

@ -60,6 +60,11 @@ export const showEngineListModelAtom = atom<string[]>([
InferenceEngine.cortex_tensorrtllm,
])
/**
* Atom to store the current model detail page of a certain model id
*/
export const modelDetailAtom = atom<string | undefined>(undefined)
/// End Models Atom
/// Model Download Atom

View File

@ -0,0 +1,137 @@
import { renderHook, act } from '@testing-library/react'
import { useConfigurations } from './useConfigurations'
import { useAtomValue } from 'jotai'
import { extensionManager } from '@/extension'
// Mock dependencies
jest.mock('jotai', () => {
const originalModule = jest.requireActual('jotai')
return {
...originalModule,
useAtomValue: jest.fn(),
}
})
jest.mock('@/extension', () => ({
extensionManager: {
get: jest.fn(),
},
}))
describe('useConfigurations', () => {
beforeEach(() => {
jest.clearAllMocks()
})
it('should call configurePullOptions with correct proxy settings when proxy is enabled', () => {
// Explicitly set mock return values for each call
(useAtomValue as jest.Mock)
.mockReturnValueOnce(true) // proxyEnabled
.mockReturnValueOnce('http://proxy.example.com') // proxyUrl
.mockReturnValueOnce('') // proxyIgnoreSSL
.mockReturnValueOnce(true) // verifyProxySSL
.mockReturnValueOnce(true) // verifyProxyHostSSL
.mockReturnValueOnce(true) // verifyPeerSSL
.mockReturnValueOnce(true) // verifyHostSSL
.mockReturnValueOnce('') // noProxy
.mockReturnValueOnce('username') // proxyUsername
.mockReturnValueOnce('password') // proxyPassword
const mockConfigurePullOptions = jest.fn()
;(extensionManager.get as jest.Mock).mockReturnValue({
configurePullOptions: mockConfigurePullOptions,
})
const { result } = renderHook(() => useConfigurations())
act(() => {
result.current.configurePullOptions()
})
expect(mockConfigurePullOptions).toHaveBeenCalledWith({
proxy_username: 'username',
proxy_password: 'password',
proxy_url: 'http://proxy.example.com',
verify_proxy_ssl: true,
verify_proxy_host_ssl: true,
verify_peer_ssl: true,
verify_host_ssl: true,
no_proxy: '',
})
})
it('should call configurePullOptions with empty proxy settings when proxy is disabled', () => {
// Mock atom values
;(useAtomValue as jest.Mock)
.mockReturnValueOnce(false) // proxyEnabled
.mockReturnValueOnce('') // proxyUrl
.mockReturnValueOnce(false) // proxyIgnoreSSL
.mockReturnValueOnce('') // noProxy
.mockReturnValueOnce('') // proxyUsername
.mockReturnValueOnce('') // proxyPassword
.mockReturnValueOnce(false) // verifyProxySSL
.mockReturnValueOnce(false) // verifyProxyHostSSL
.mockReturnValueOnce(false) // verifyPeerSSL
.mockReturnValueOnce(false) // verifyHostSSL
const mockConfigurePullOptions = jest.fn()
;(extensionManager.get as jest.Mock).mockReturnValue({
configurePullOptions: mockConfigurePullOptions,
})
const { result } = renderHook(() => useConfigurations())
act(() => {
result.current.configurePullOptions()
})
expect(mockConfigurePullOptions).toHaveBeenCalledWith({
proxy_username: '',
proxy_password: '',
proxy_url: '',
verify_proxy_ssl: false,
verify_proxy_host_ssl: false,
verify_peer_ssl: false,
verify_host_ssl: false,
no_proxy: '',
})
})
it('should set all verify SSL to false when proxyIgnoreSSL is true', () => {
// Mock atom values
;(useAtomValue as jest.Mock)
.mockReturnValueOnce(true) // proxyEnabled
.mockReturnValueOnce('http://proxy.example.com') // proxyUrl
.mockReturnValueOnce(true) // proxyIgnoreSSL
.mockReturnValueOnce(true) // verifyProxySSL
.mockReturnValueOnce(true) // verifyProxyHostSSL
.mockReturnValueOnce(true) // verifyPeerSSL
.mockReturnValueOnce(true) // verifyHostSSL
.mockReturnValueOnce('') // noProxy
.mockReturnValueOnce('username') // proxyUsername
.mockReturnValueOnce('password') // proxyPassword
const mockConfigurePullOptions = jest.fn()
;(extensionManager.get as jest.Mock).mockReturnValue({
configurePullOptions: mockConfigurePullOptions,
})
const { result } = renderHook(() => useConfigurations())
act(() => {
result.current.configurePullOptions()
})
expect(mockConfigurePullOptions).toHaveBeenCalledWith({
proxy_username: 'username',
proxy_password: 'password',
proxy_url: 'http://proxy.example.com',
verify_proxy_ssl: false,
verify_proxy_host_ssl: false,
verify_peer_ssl: false,
verify_host_ssl: false,
no_proxy: '',
})
})
})

View File

@ -6,14 +6,28 @@ import { useAtomValue } from 'jotai'
import { extensionManager } from '@/extension'
import {
ignoreSslAtom,
noProxyAtom,
proxyAtom,
proxyEnabledAtom,
proxyPasswordAtom,
proxyUsernameAtom,
verifyHostSslAtom,
verifyPeerSslAtom,
verifyProxyHostSslAtom,
verifyProxySslAtom,
} from '@/helpers/atoms/AppConfig.atom'
export const useConfigurations = () => {
const proxyEnabled = useAtomValue(proxyEnabledAtom)
const proxyUrl = useAtomValue(proxyAtom)
const proxyIgnoreSSL = useAtomValue(ignoreSslAtom)
const verifyProxySSL = useAtomValue(verifyProxySslAtom)
const verifyProxyHostSSL = useAtomValue(verifyProxyHostSslAtom)
const verifyPeerSSL = useAtomValue(verifyPeerSslAtom)
const verifyHostSSL = useAtomValue(verifyHostSslAtom)
const noProxy = useAtomValue(noProxyAtom)
const proxyUsername = useAtomValue(proxyUsernameAtom)
const proxyPassword = useAtomValue(proxyPasswordAtom)
const configurePullOptions = useCallback(() => {
extensionManager
@ -21,20 +35,45 @@ export const useConfigurations = () => {
?.configurePullOptions(
proxyEnabled
? {
proxy_username: proxyUsername,
proxy_password: proxyPassword,
proxy_url: proxyUrl,
verify_peer_ssl: !proxyIgnoreSSL,
verify_proxy_ssl: proxyIgnoreSSL ? false : verifyProxySSL,
verify_proxy_host_ssl: proxyIgnoreSSL
? false
: verifyProxyHostSSL,
verify_peer_ssl: proxyIgnoreSSL ? false : verifyPeerSSL,
verify_host_ssl: proxyIgnoreSSL ? false : verifyHostSSL,
no_proxy: noProxy,
}
: {
proxy_username: '',
proxy_password: '',
proxy_url: '',
verify_proxy_ssl: false,
verify_proxy_host_ssl: false,
verify_peer_ssl: false,
verify_host_ssl: false,
no_proxy: '',
}
)
}, [proxyEnabled, proxyUrl, proxyIgnoreSSL])
}, [
proxyEnabled,
proxyUrl,
proxyIgnoreSSL,
noProxy,
proxyUsername,
proxyPassword,
verifyProxySSL,
verifyProxyHostSSL,
verifyPeerSSL,
verifyHostSSL,
])
useEffect(() => {
configurePullOptions()
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [])
}, [configurePullOptions])
return {
configurePullOptions,

View File

@ -51,11 +51,13 @@ export const useCreateNewThread = () => {
const { recommendedModel } = useRecommendedModel()
const selectedModel = useAtomValue(selectedModelAtom)
const requestCreateNewThread = async (
assistant: (ThreadAssistantInfo & { id: string; name: string }) | Assistant,
model?: Model | undefined
) => {
const defaultModel = model || recommendedModel
const defaultModel = model || selectedModel || recommendedModel
if (!model) {
// if we have model, which means user wants to create new thread from Model hub. Allow them.
@ -82,12 +84,12 @@ export const useCreateNewThread = () => {
// Default context length is 8192
const defaultContextLength = Math.min(
8192,
defaultModel?.settings.ctx_len ?? 8192
defaultModel?.settings?.ctx_len ?? 8192
)
const overriddenSettings = {
ctx_len: defaultModel?.settings.ctx_len
? Math.min(8192, defaultModel.settings.ctx_len)
ctx_len: defaultModel?.settings?.ctx_len
? Math.min(8192, defaultModel?.settings?.ctx_len)
: undefined,
}
@ -95,10 +97,10 @@ export const useCreateNewThread = () => {
const overriddenParameters = {
max_tokens: defaultContextLength
? Math.min(
defaultModel?.parameters.max_tokens ?? 8192,
defaultModel?.parameters?.max_tokens ?? 8192,
defaultContextLength
)
: defaultModel?.parameters.max_tokens,
: defaultModel?.parameters?.max_tokens,
}
const createdAt = Date.now()

View File

@ -8,12 +8,19 @@ import {
EngineConfig,
events,
EngineEvent,
Model,
ModelEvent,
ModelSource,
ModelSibling,
} from '@janhq/core'
import { useAtom } from 'jotai'
import { useAtom, useAtomValue } from 'jotai'
import { atomWithStorage } from 'jotai/utils'
import useSWR from 'swr'
import { getDescriptionByEngine, getTitleByEngine } from '@/utils/modelEngine'
import { extensionManager } from '@/extension/ExtensionManager'
import { downloadedModelsAtom } from '@/helpers/atoms/Model.atom'
export const releasedEnginesCacheAtom = atomWithStorage<{
data: EngineReleased[]
@ -385,3 +392,67 @@ export const uninstallEngine = async (
throw error
}
}
/**
* Add a new remote engine model
* @param name
* @param engine
* @returns
*/
export const addRemoteEngineModel = async (name: string, engine: string) => {
const extension = getExtension()
if (!extension) {
throw new Error('Extension is not available')
}
try {
// Call the extension's method
const response = await extension.addRemoteModel({
id: name,
model: name,
engine: engine as InferenceEngine,
} as unknown as Model)
events.emit(ModelEvent.OnModelsUpdate, { fetch: true })
return response
} catch (error) {
console.error('Failed to install engine variant:', error)
throw error
}
}
/**
* Remote model sources
* @returns A Promise that resolves to an object of model sources.
*/
export const useGetEngineModelSources = () => {
const { engines } = useGetEngines()
const downloadedModels = useAtomValue(downloadedModelsAtom)
return {
sources: Object.entries(engines ?? {})
?.filter((e) => e?.[1]?.[0]?.type === 'remote')
.map(
([key, values]) =>
({
id: key,
models: (
downloadedModels.filter((e) => e.engine === values[0]?.engine) ??
[]
).map(
(e) =>
({
id: e.id,
size: e.metadata?.size,
}) as unknown as ModelSibling
),
metadata: {
id: getTitleByEngine(key as InferenceEngine),
description: getDescriptionByEngine(key as InferenceEngine),
apiKey: values[0]?.api_key,
},
type: 'cloud',
}) as unknown as ModelSource
),
}
}

View File

@ -0,0 +1,72 @@
import { useMemo } from 'react'
import { ExtensionTypeEnum, ModelExtension } from '@janhq/core'
import useSWR from 'swr'
import { extensionManager } from '@/extension/ExtensionManager'
/**
* @returns A Promise that resolves to an object of model sources.
*/
export function useGetModelSources() {
const extension = useMemo(
() => extensionManager.get<ModelExtension>(ExtensionTypeEnum.Model),
[]
)
const {
data: sources,
error,
mutate,
} = useSWR(
extension ? 'getSources' : null,
() =>
extension?.getSources().then((e) =>
e.map((m) => ({
...m,
models: m.models.sort((a, b) => a.size - b.size),
}))
),
{
revalidateOnFocus: false,
revalidateOnReconnect: true,
}
)
return { sources, error, mutate }
}
export const useModelSourcesMutation = () => {
const extension = useMemo(
() => extensionManager.get<ModelExtension>(ExtensionTypeEnum.Model),
[]
)
/**
* Add a new model source
* @returns A Promise that resolves to intall of engine.
*/
const addModelSource = async (source: string) => {
try {
// Call the extension's method
return await extension?.addSource(source)
} catch (error) {
console.error('Failed to install engine variant:', error)
throw error
}
}
/**
* Delete a new model source
* @returns A Promise that resolves to intall of engine.
*/
const deleteModelSource = async (source: string) => {
try {
// Call the extension's method
return await extension?.deleteSource(source)
} catch (error) {
console.error('Failed to install engine variant:', error)
throw error
}
}
return { addModelSource, deleteModelSource }
}

View File

@ -32,12 +32,15 @@ const useModels = () => {
const getData = useCallback(() => {
const getDownloadedModels = async () => {
const localModels = (await getModels()).map((e) => ({
...e,
name: ModelManager.instance().models.get(e.id)?.name ?? e.name ?? e.id,
metadata:
ModelManager.instance().models.get(e.id)?.metadata ?? e.metadata,
}))
const localModels = (await getModels())
.map((e) => ({
...e,
name:
ModelManager.instance().models.get(e.id)?.name ?? e.name ?? e.id,
metadata:
ModelManager.instance().models.get(e.id)?.metadata ?? e.metadata,
}))
.filter((e) => !('status' in e) || e.status !== 'downloadable')
const remoteModels = ModelManager.instance()
.models.values()

View File

@ -40,5 +40,5 @@ const config = {
// module.exports = createJestConfig(config)
module.exports = async () => ({
...(await createJestConfig(config)()),
transformIgnorePatterns: ['/node_modules/(?!(layerr|nanoid|@uppy|preact)/)'],
transformIgnorePatterns: ['/node_modules/(?!((.*))/)'],
})

View File

@ -48,6 +48,7 @@
"rehype-highlight": "^7.0.1",
"rehype-highlight-code-lines": "^1.0.4",
"rehype-katex": "^7.0.1",
"remark-gfm": "^4.0.0",
"remark-math": "^6.0.0",
"sass": "^1.69.4",
"slate": "latest",

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 117 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 224 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 253 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 344 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 120 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 217 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 200 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 226 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 228 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 339 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 418 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 161 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 248 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 227 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 288 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 310 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 294 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 228 KiB

Some files were not shown because too many files have changed in this diff Show More