Merge branch 'menloresearch:dev' into de_de-i18n

This commit is contained in:
Bob Ros 2025-07-01 20:46:46 +02:00 committed by GitHub
commit f072aded62
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
21 changed files with 1428 additions and 508 deletions

View File

@ -0,0 +1,24 @@
#!/usr/bin/env bash
make clean
# To reproduce https://github.com/menloresearch/jan/pull/5463
TAURI_TOOLKIT_PATH="${XDG_CACHE_HOME:-$HOME/.cache}/tauri"
mkdir -p "$TAURI_TOOLKIT_PATH"
wget https://github.com/linuxdeploy/linuxdeploy/releases/download/1-alpha-20250213-2/linuxdeploy-x86_64.AppImage -O "$TAURI_TOOLKIT_PATH/linuxdeploy-x86_64.AppImage"
chmod +x "$TAURI_TOOLKIT_PATH/linuxdeploy-x86_64.AppImage"
jq '.bundle.resources = ["resources/pre-install/**/*"] | .bundle.externalBin = ["binaries/cortex-server", "resources/bin/uv"]' ./src-tauri/tauri.conf.json > /tmp/tauri.conf.json
mv /tmp/tauri.conf.json ./src-tauri/tauri.conf.json
make build-tauri
cp ./src-tauri/resources/bin/bun ./src-tauri/target/release/bundle/appimage/Jan.AppDir/usr/bin/bun
mkdir -p ./src-tauri/target/release/bundle/appimage/Jan.AppDir/usr/lib/Jan/binaries/engines
cp -f ./src-tauri/binaries/deps/*.so* ./src-tauri/target/release/bundle/appimage/Jan.AppDir/usr/lib/Jan/binaries/
cp -f ./src-tauri/binaries/*.so* ./src-tauri/target/release/bundle/appimage/Jan.AppDir/usr/lib/Jan/binaries/
cp -rf ./src-tauri/binaries/engines ./src-tauri/target/release/bundle/appimage/Jan.AppDir/usr/lib/Jan/binaries/
APP_IMAGE=./src-tauri/target/release/bundle/appimage/$(ls ./src-tauri/target/release/bundle/appimage/ | grep AppImage | head -1)
echo $APP_IMAGE
rm -f $APP_IMAGE
/opt/bin/appimagetool ./src-tauri/target/release/bundle/appimage/Jan.AppDir $APP_IMAGE

24
.github/ISSUE_TEMPLATE/1-bug-report.md vendored Normal file
View File

@ -0,0 +1,24 @@
---
name: 🐛 Bug Report
about: If something isn't working as expected 🤔
title: 'bug: '
type: Bug
---
**Version:** e.g. 0.5.x-xxx
## Describe the Bug
<!-- A clear & concise description of the bug -->
## Steps to Reproduce
1.
## Screenshots / Logs
<!-- You can find logs in: Setting -> General -> Data Folder -> App Logs -->
## Operating System
- [ ] MacOS
- [ ] Windows
- [ ] Linux

View File

@ -0,0 +1,11 @@
---
name: 🚀 Feature Request
about: Suggest an idea for this project 😻!
title: 'idea: '
---
## Problem Statement
<!-- Describe the problem you're facing -->
## Feature Idea
<!-- Describe what you want instead. Examples are welcome! -->

12
.github/ISSUE_TEMPLATE/3-epic.md vendored Normal file
View File

@ -0,0 +1,12 @@
---
name: 🌟 Epic
about: Major building block that advances Jan's goals
title: 'epic: '
type: Epic
---
## Goal
## Tasklist
## Out of scope

13
.github/ISSUE_TEMPLATE/4-goal.md vendored Normal file
View File

@ -0,0 +1,13 @@
---
name: 🎯 Goal
about: External communication of Jan's roadmap and objectives
title: 'goal: '
type: Goal
---
## Goal
## Tasklist
## Out of scope

View File

@ -1,42 +0,0 @@
name: "\U0001F41B Bug Report"
description: "If something isn't working as expected \U0001F914"
title: 'bug: [DESCRIPTION]'
body:
- type: input
validations:
required: true
attributes:
label: "Jan version"
description: "**Tip:** The version is in the app's bottom right corner"
placeholder: "e.g. 0.5.x-xxx"
- type: textarea
validations:
required: true
attributes:
label: "Describe the Bug"
description: "A clear & concise description of the bug"
- type: textarea
attributes:
label: "Steps to Reproduce"
description: |
Please list out steps to reproduce the issue
placeholder: |
1. Go to '...'
2. Click on '...'
- type: textarea
attributes:
label: "Screenshots / Logs"
description: |
You can find logs in: ~/jan/logs/app.logs
- type: checkboxes
attributes:
label: "What is your OS?"
options:
- label: MacOS
- label: Windows
- label: Linux

View File

@ -1,7 +1,5 @@
## To encourage contributors to use issue templates, we don't allow blank issues
blank_issues_enabled: true blank_issues_enabled: true
contact_links: contact_links:
- name: "\1F4AC Jan Discussions" - name: Jan Discussions
url: "https://github.com/orgs/menloresearch/discussions/categories/q-a" url: https://github.com/orgs/menloresearch/discussions/categories/q-a
about: "Get help, discuss features & roadmap, and share your projects" about: Get help, discuss features & roadmap, and share your projects

View File

@ -1,20 +0,0 @@
name: "\U0001F680 Feature Request"
description: "Suggest an idea for this project \U0001F63B!"
title: 'idea: [DESCRIPTION]'
labels: 'feature request'
body:
- type: textarea
validations:
required: true
attributes:
label: "Problem Statement"
description: "Describe the problem you're facing"
placeholder: |
I'm always frustrated when ...
- type: textarea
validations:
required: true
attributes:
label: "Feature Idea"
description: "Describe what you want instead. Examples are welcome!"

View File

@ -1,16 +0,0 @@
---
name: Goal
about: Team-wide Quarterly Goals for Jan
title: 'goal: '
labels: ''
assignees: freelerobot
---
## Goal
## Motivation
## Key Outcomes
## Related Epics

View File

@ -1,21 +0,0 @@
name: "\U0001F929 Model Request"
description: "Request a new model to be compiled"
title: 'feat: [DESCRIPTION]'
labels: 'type: model request'
body:
- type: markdown
attributes:
value: "**Tip:** Download any HuggingFace model in app ([see guides](https://jan.ai/docs/models/manage-models#add-models)). Use this form for unsupported models only."
- type: textarea
validations:
required: true
attributes:
label: "Model Requests"
description: "If applicable, include the source URL, licenses, and any other relevant information"
- type: checkboxes
attributes:
label: "Which formats?"
options:
- label: GGUF (llama.cpp)
- label: TensorRT (TensorRT-LLM)
- label: ONNX (Onnx Runtime)

View File

@ -23,20 +23,20 @@
}, },
"devDependencies": { "devDependencies": {
"@npmcli/arborist": "^7.1.0", "@npmcli/arborist": "^7.1.0",
"@types/jest": "^29.5.14", "@types/jest": "^30.0.0",
"@types/node": "^22.10.0", "@types/node": "^22.10.0",
"@types/pacote": "^11.1.7", "@types/pacote": "^11.1.7",
"@types/request": "^2.48.12", "@types/request": "^2.48.12",
"electron": "33.2.1", "electron": "33.2.1",
"eslint": "8.57.0", "eslint": "8.57.0",
"eslint-plugin-jest": "^27.9.0", "eslint-plugin-jest": "^27.9.0",
"jest": "^29.7.0", "jest": "^30.0.3",
"jest-junit": "^16.0.0", "jest-junit": "^16.0.0",
"jest-runner": "^29.7.0", "jest-runner": "^29.7.0",
"pacote": "^21.0.0", "pacote": "^21.0.0",
"request": "^2.88.2", "request": "^2.88.2",
"request-progress": "^3.0.0", "request-progress": "^3.0.0",
"rimraf": "^3.0.2", "rimraf": "^6.0.1",
"rolldown": "1.0.0-beta.1", "rolldown": "1.0.0-beta.1",
"ts-jest": "^29.2.5", "ts-jest": "^29.2.5",
"tslib": "^2.6.2", "tslib": "^2.6.2",

Binary file not shown.

After

Width:  |  Height:  |  Size: 512 KiB

View File

@ -56,36 +56,37 @@ cd ~/.config/Jan/data # Default install
<Callout type="info"> <Callout type="info">
Root directory: `~/jan` Root directory: `~/jan`
</Callout> </Callout>
```sh ```sh
/assistants /assistants/
/jan /jan/
assistant.json assistant.json
/extensions /engines/
/llama.cpp/
/extensions/
extensions.json extensions.json
/@janhq /@janhq/
/extension_A /assistant-extension/
package.json /conversational-extension/
/logs /download-extension/
/app.txt /engine-management-extension/
/models /hardware-management-extension/
/model_A /inference-cortex-extension/
model.yaml /model-extension/
model_A.yaml /files/
/settings /logs/
settings.json app.log
/@janhq /models/
/extension_A_Settings /huggingface.co/
settings.json /Model_Provider_A/
/themes /Model_A
/dark-dimmed model_A.gguf
/joi-dark model_A.yaml
/joi-light /threads/
/night-blue /thread_A/
/threads messages.jsonl
/jan_thread_A thread.json
messages.jsonl
thread.json
messages.jsonl
``` ```
### `assistants/` ### `assistants/`
@ -93,14 +94,28 @@ Where AI personalities live. The default one (`/assistants/jan/`):
```json ```json
{ {
"avatar": "", "avatar": "👋",
"id": "jan", "id": "jan",
"object": "assistant", "object": "assistant",
"created_at": 1715132389207, "created_at": 1750945742.536,
"name": "Jan", "name": "Jan",
"description": "A default assistant that can use all downloaded models", "description": "Jan is a helpful AI assistant that can use tools and help complete tasks for its users.",
"model": "*", "model": "*",
"instructions": "" "instructions": "You have access to a set of tools to help you answer the users question. You can use only one tool per message, and youll receive the result of that tool in the users next response. To complete a task, use tools step by step—each step should be guided by the outcome of the previous one.\nTool Usage Rules:\n1. Always provide the correct values as arguments when using tools. Do not pass variable names—use actual values instead.\n2. You may perform multiple tool steps to complete a task.\n3. Avoid repeating a tool call with exactly the same parameters to prevent infinite loops.",
"tools": [
{
"type": "retrieval",
"enabled": false,
"useTimeWeightedRetriever": false,
"settings": {
"top_k": 2,
"chunk_size": 1024,
"chunk_overlap": 64,
"retrieval_template": "Use the following pieces of context to answer the question at the end.\n----------------\nCONTEXT: {CONTEXT}\n----------------\nQUESTION: {QUESTION}\n----------------\nHelpful Answer:"
}
}
],
"file_ids": []
} }
``` ```
@ -140,88 +155,65 @@ Debugging headquarters (`/logs/app.txt`):
The silicon brain collection. Each model has its own `model.json`. The silicon brain collection. Each model has its own `model.json`.
<Callout type="info"> <Callout type="info">
Full parameters: [here](/docs/models/model-parameters) Full parameters: [here](/docs/model-parameters)
</Callout> </Callout>
### `settings/`
Control panel. Extension settings in `/settings/@janhq/`:
| Parameter | Description |
|----------------|----------------------------------------------------|
| key | Setting identifier |
| title | Display name |
| description | Setting explanation |
| controllerType | UI component type |
| controllerProps| Component properties |
| extensionName | Parent extension link |
GPU settings (`settings.json`):
| Parameter | Description |
|----------------------|--------------------------------------------|
| notify | Notification status |
| run_mode | Operating mode |
| nvidia_driver.exist | NVIDIA driver presence |
| nvidia_driver.version| Driver version |
| cuda.exist | CUDA availability |
| cuda.version | CUDA version |
| gpus[0].id | GPU identifier |
| gpus[0].vram | GPU memory (MB) |
| gpus[0].name | GPU model |
| gpus[0].arch | GPU architecture |
| gpu_highest_vram | Most capable GPU |
| gpus_in_use | Active GPUs |
| is_initial | First run flag |
| vulkan | Vulkan support |
### `themes/`
Visual wardrobe. Each theme's `theme.json`:
| Parameter | Description |
|------------------|-------------------------------------------|
| id | Theme identifier |
| displayName | UI name |
| reduceTransparent| Transparency control |
| nativeTheme | OS theme sync |
| variables | Component settings |
### `threads/` ### `threads/`
Chat archive. Each thread (`/threads/jan_unixstamp/`) contains: Chat archive. Each thread (`/threads/jan_unixstamp/`) contains:
- `messages.jsonl`: - `messages.jsonl`:
```json ```json
{ {
"id":"01J6Y6FH8PFTHQB5PNJTHEN27C", "completed_at": 0,
"thread_id":"jan_1725437954", "content": [
"type":"Thread", {
"role":"assistant", "text": {
"content": "annotations": [],
[ "value": "Hello! I can help you with various tasks. I can search for information on the internet, including news, videos, images, shopping, and more. I can also scrape webpages to extract specific information. Let me know what you need!"
{ },
"type": "text", "type": "text"
"text": { }
"value": "Hello! Is there something I can help you with or would you like to chat?", ],
"annotations": [] "created_at": 1751012639307,
} "id": "01JYR7S0JB5ZBGMJV52KWMW5VW",
} "metadata": {
], "assistant": {
"status": "ready", "avatar": "👋",
"created": 1725442802966, "id": "jan",
"updated": 1725442802966, "instructions": "You have access to a set of tools to help you answer the user's question. You can use only one tool per message, and you'll receive the result of that tool in the user's next response. To complete a task, use tools step by step—each step should be guided by the outcome of the previous one.\nTool Usage Rules:\n1. Always provide the correct values as arguments when using tools. Do not pass variable names—use actual values instead.\n2. You may perform multiple tool steps to complete a task.\n3. Avoid repeating a tool call with exactly the same parameters to prevent infinite loops.",
"object": "thread.message" "name": "Jan",
} "parameters": ""
},
"tokenSpeed": {
"lastTimestamp": 1751012637097,
"message": "01JYR7S0GW5M9PSHMRE7T8VQJM",
"tokenCount": 49,
"tokenSpeed": 22.653721682847895
}
},
"object": "thread.message",
"role": "assistant",
"status": "ready",
"thread_id": "8f2c9922-db49-4d1e-8620-279c05baf2d0",
"type": "text"
}
``` ```
- `thread.json`: - `thread.json`:
| Parameter | Description | | Parameter | Description |
|------------|------------------------------------------------| |------------|------------------------------------------------|
| assistants | Assistant configuration clone |
| created | Creation timestamp |
| id | Thread identifier | | id | Thread identifier |
| metadata | Additional thread data |
| model | Active model settings |
| object | OpenAI compatibility marker | | object | OpenAI compatibility marker |
| title | Thread name | | title | Thread name |
| assistants | Assistant configuration clone | | updated | Updated timestamp |
| model | Active model settings |
| metadata | Additional thread data |
## Delete Jan Data ## Delete Jan Data
Uninstall guides: [Mac](/docs/desktop/mac#step-2-clean-up-data-optional), Uninstall guides: [Mac](/docs/desktop/mac#step-2-clean-up-data-optional),

View File

@ -33,7 +33,7 @@ import { Settings, EllipsisVertical, Plus, FolderOpen, Pencil } from 'lucide-rea
Jan uses **llama.cpp** for running local AI models. You can find its settings in **Settings** (<Settings width={16} height={16} style={{display:"inline"}}/>) > **Local Engine** > **llama.cpp**: Jan uses **llama.cpp** for running local AI models. You can find its settings in **Settings** (<Settings width={16} height={16} style={{display:"inline"}}/>) > **Local Engine** > **llama.cpp**:
<br/> <br/>
![llama.cpp](./_assets/llama.cpp-01.png) ![llama.cpp](./_assets/llama.cpp-01-updated.png)
<br/> <br/>
These settings are for advanced users, you would want to check these settings when: These settings are for advanced users, you would want to check these settings when:
@ -151,6 +151,7 @@ For detailed hardware compatibility, please visit our guide for [Mac](/docs/desk
| **Caching** | - Enable to store recent prompts and responses<br></br>- Improves response time for repeated prompts | Enabled | | **Caching** | - Enable to store recent prompts and responses<br></br>- Improves response time for repeated prompts | Enabled |
| **KV Cache Type** | - KV cache implementation type; controls memory usage and precision trade-off<br></br>- Options:<br></br>• f16 (most stable)<br></br>• q8_0 (balanced)<br></br>• q4_0 (lowest memory) | f16 | | **KV Cache Type** | - KV cache implementation type; controls memory usage and precision trade-off<br></br>- Options:<br></br>• f16 (most stable)<br></br>• q8_0 (balanced)<br></br>• q4_0 (lowest memory) | f16 |
| **mmap** | - Enables memory-mapped model loading<br></br>- Reduces memory usage<br></br>- Recommended for large models | Enabled | | **mmap** | - Enables memory-mapped model loading<br></br>- Reduces memory usage<br></br>- Recommended for large models | Enabled |
| **Context Shift** | - Automatically shifts the context window when the model is unable to process the entire prompt<br/> - Ensures that the most relevant information is always included <br/> - Recommended for long conversations and multiple tool calls | Disabled |
## Best Practices ## Best Practices

View File

@ -38,8 +38,6 @@ These settings are available in the model settings modal:
| **Repeat Last N** | Number of tokens to consider for repeat penalty. | | **Repeat Last N** | Number of tokens to consider for repeat penalty. |
| **Repeat Penalty** | Penalize repeating token sequences. | | **Repeat Penalty** | Penalize repeating token sequences. |
| **Presence Penalty**| Penalize alpha presence (encourages new topics). | | **Presence Penalty**| Penalize alpha presence (encourages new topics). |
| **Max Tokens** | Maximum length of the model's response. |
| **Stop Sequences** | Tokens or phrases that will end the model's response. |
| **Frequency Penalty** | Reduces word repetition. | | **Frequency Penalty** | Reduces word repetition. |
<br/> <br/>

View File

@ -36,11 +36,15 @@ Follow this [guide](https://continue.dev/docs/quickstart) to install the Continu
To set up Continue for use with Jan's Local Server, you must activate the Jan API Server with your chosen model. To set up Continue for use with Jan's Local Server, you must activate the Jan API Server with your chosen model.
1. Press the `<>` button. Jan will take you to the **Local API Server** section. 1. Press the `⚙️ Settings` button.
2. Setup the server, which includes the **IP Port**, **Cross-Origin-Resource-Sharing (CORS)** and **Verbose Server Logs**. 2. Locate `Local API Server`.
3. Press the **Start Server** button 3. Setup the server, which includes the **IP Port**, **Cross-Origin-Resource-Sharing (CORS)** and **Verbose Server Logs**.
4. Include your user-defined API Key.
5. Press the **Start Server** button
### Step 3: Configure Continue to Use Jan's Local Server ### Step 3: Configure Continue to Use Jan's Local Server
@ -64,30 +68,35 @@ To set up Continue for use with Jan's Local Server, you must activate the Jan AP
</Tabs.Tab> </Tabs.Tab>
</Tabs> </Tabs>
```json title="~/.continue/config.json" ```yaml title="~/.continue/config.yaml"
{ name: Local Assistant
"models": [ version: 1.0.0
{ schema: v1
"title": "Jan", models:
"provider": "openai", - name: Jan
"model": "mistral-ins-7b-q4", provider: openai
"apiKey": "EMPTY", model: #MODEL_NAME (e.g. qwen3:0.6b)
"apiBase": "http://localhost:1337/v1" apiKey: #YOUR_USER_DEFINED_API_KEY_HERE (e.g. hello)
} apiBase: http://localhost:1337/v1
] context:
} - provider: code
- provider: docs
- provider: diff
- provider: terminal
- provider: problems
- provider: folder
- provider: codebase
``` ```
2. Ensure the file has the following configurations: 2. Ensure the file has the following configurations:
- Ensure `openai` is selected as the `provider`. - Ensure `openai` is selected as the `provider`.
- Match the `model` with the one enabled in the Jan API Server. - Match the `model` with the one enabled in the Jan API Server.
- Set `apiBase` to `http://localhost:1337`. - Set `apiBase` to `http://localhost:1337/v1`.
- Leave the `apiKey` field to `EMPTY`.
### Step 4: Ensure the Using Model Is Activated in Jan ### Step 4: Ensure the Using Model Is Activated in Jan
1. Navigate to `Settings` > `My Models`. 1. Navigate to `Settings` > `Model Providers`.
2. Click the **three dots (⋮)** button. 2. Under Llama.cpp, find the model that you would want to use.
3. Select the **Start Model** button to activate the model. 3. Select the **Start Model** button to activate the model.
</Steps> </Steps>

View File

@ -13,7 +13,7 @@
}, },
"devDependencies": { "devDependencies": {
"cpx": "^1.5.0", "cpx": "^1.5.0",
"rimraf": "^3.0.2", "rimraf": "^6.0.1",
"rolldown": "1.0.0-beta.1", "rolldown": "1.0.0-beta.1",
"run-script-os": "^1.1.6", "run-script-os": "^1.1.6",
"typescript": "^5.3.3" "typescript": "^5.3.3"

View File

@ -17,7 +17,7 @@
}, },
"devDependencies": { "devDependencies": {
"cpx": "^1.5.0", "cpx": "^1.5.0",
"rimraf": "^3.0.2", "rimraf": "^6.0.1",
"rolldown": "1.0.0-beta.1", "rolldown": "1.0.0-beta.1",
"ts-loader": "^9.5.0", "ts-loader": "^9.5.0",
"typescript": "^5.7.2" "typescript": "^5.7.2"

View File

@ -13,7 +13,7 @@
}, },
"devDependencies": { "devDependencies": {
"cpx": "^1.5.0", "cpx": "^1.5.0",
"rimraf": "^3.0.2", "rimraf": "^6.0.1",
"rolldown": "1.0.0-beta.1", "rolldown": "1.0.0-beta.1",
"run-script-os": "^1.1.6", "run-script-os": "^1.1.6",
"typescript": "5.8.3", "typescript": "5.8.3",

View File

@ -45,7 +45,7 @@
"cpx": "^1.5.0", "cpx": "^1.5.0",
"cross-env": "^7.0.3", "cross-env": "^7.0.3",
"husky": "^9.1.5", "husky": "^9.1.5",
"jest": "^29.7.0", "jest": "^30.0.3",
"jest-environment-jsdom": "^29.7.0", "jest-environment-jsdom": "^29.7.0",
"rimraf": "^3.0.2", "rimraf": "^3.0.2",
"run-script-os": "^1.1.6", "run-script-os": "^1.1.6",

1493
yarn.lock

File diff suppressed because it is too large Load Diff