Merge branch 'dev' into current-date-instruction

This commit is contained in:
Louis 2025-08-21 11:41:30 +07:00 committed by GitHub
commit e6587844d0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
57 changed files with 2829 additions and 377 deletions

121
.github/workflows/autoqa-reliability.yml vendored Normal file
View File

@ -0,0 +1,121 @@
name: AutoQA Reliability (Manual)
on:
workflow_dispatch:
inputs:
source_type:
description: 'App source type (url)'
required: true
type: choice
options: [url]
default: url
jan_app_windows_source:
description: 'Windows installer URL path (used when source_type=url or to select artifact)'
required: true
type: string
default: 'https://catalog.jan.ai/windows/Jan_0.6.8_x64-setup.exe'
jan_app_ubuntu_source:
description: 'Ubuntu .deb URL path'
required: true
type: string
default: 'https://delta.jan.ai/nightly/Jan-nightly_0.6.4-728_amd64.deb'
jan_app_macos_source:
description: 'macOS .dmg URL path'
required: true
type: string
default: 'https://delta.jan.ai/nightly/Jan-nightly_0.6.4-728_universal.dmg'
is_nightly:
description: 'Is the app a nightly build?'
required: true
type: boolean
default: true
reliability_phase:
description: 'Reliability phase'
required: true
type: choice
options: [development, deployment]
default: development
reliability_runs:
description: 'Custom runs (0 uses phase default)'
required: true
type: number
default: 0
reliability_test_path:
description: 'Test file path (relative to autoqa working directory)'
required: true
type: string
default: 'tests/base/settings/app-data.txt'
jobs:
reliability-windows:
runs-on: windows-11-nvidia-gpu
timeout-minutes: 60
env:
DEFAULT_JAN_APP_URL: 'https://catalog.jan.ai/windows/Jan_0.6.8_x64-setup.exe'
DEFAULT_IS_NIGHTLY: 'false'
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Setup Python 3.13
uses: actions/setup-python@v4
with:
python-version: '3.13'
- name: Clean existing Jan installations
shell: powershell
run: |
.\autoqa\scripts\windows_cleanup.ps1 -IsNightly "${{ inputs.is_nightly }}"
- name: Download/Prepare Jan app
shell: powershell
run: |
.\autoqa\scripts\windows_download.ps1 `
-WorkflowInputUrl "${{ inputs.jan_app_windows_source }}" `
-WorkflowInputIsNightly "${{ inputs.is_nightly }}" `
-RepoVariableUrl "${{ vars.JAN_APP_URL }}" `
-RepoVariableIsNightly "${{ vars.IS_NIGHTLY }}" `
-DefaultUrl "$env:DEFAULT_JAN_APP_URL" `
-DefaultIsNightly "$env:DEFAULT_IS_NIGHTLY"
- name: Install Jan app
shell: powershell
run: |
.\autoqa\scripts\windows_install.ps1 -IsNightly "$env:IS_NIGHTLY"
- name: Install Python dependencies
working-directory: autoqa
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Run reliability tests
working-directory: autoqa
shell: powershell
run: |
$runs = "${{ inputs.reliability_runs }}"
$runsArg = ""
if ([int]$runs -gt 0) { $runsArg = "--reliability-runs $runs" }
python main.py --enable-reliability-test --reliability-phase "${{ inputs.reliability_phase }}" --reliability-test-path "${{ inputs.reliability_test_path }}" $runsArg
- name: Upload screen recordings
if: always()
uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: reliability-recordings-${{ github.run_number }}-${{ runner.os }}
path: autoqa/recordings/
- name: Upload trajectories
if: always()
uses: actions/upload-artifact@v4
continue-on-error: true
with:
name: reliability-trajectories-${{ github.run_number }}-${{ runner.os }}
path: autoqa/trajectories/
- name: Cleanup after tests
if: always()
shell: powershell
run: |
.\autoqa\scripts\windows_post_cleanup.ps1 -IsNightly "${{ inputs.is_nightly }}"

View File

@ -19,7 +19,7 @@ jobs:
deploy:
name: Deploy to CloudFlare Pages
env:
CLOUDFLARE_PROJECT_NAME: astro-docs
CLOUDFLARE_PROJECT_NAME: astro-docs # docs.jan.ai
runs-on: ubuntu-latest
permissions:
contents: write

View File

@ -1,32 +1,266 @@
# Contributing to jan
# Contributing to Jan
First off, thank you for considering contributing to jan. It's people like you that make jan such an amazing project.
First off, thank you for considering contributing to Jan. It's people like you that make Jan such an amazing project.
Jan is an AI assistant that can run 100% offline on your device. Think ChatGPT, but private, local, and under your complete control. If you're thinking about contributing, you're already awesome - let's make AI accessible to everyone, one commit at a time.
## Quick Links to Component Guides
- **[Web App](./web-app/CONTRIBUTING.md)** - React UI and logic
- **[Core SDK](./core/CONTRIBUTING.md)** - TypeScript SDK and extension system
- **[Extensions](./extensions/CONTRIBUTING.md)** - Supportive modules for the frontend
- **[Tauri Backend](./src-tauri/CONTRIBUTING.md)** - Rust native integration
- **[Tauri Plugins](./src-tauri/plugins/CONTRIBUTING.md)** - Hardware and system plugins
## How Jan Actually Works
Jan is a desktop app that runs local AI models. Here's how the components actually connect:
```
┌──────────────────────────────────────────────────────────┐
│ Web App (Frontend) │
│ (web-app/) │
│ • React UI │
│ • Chat Interface │
│ • Settings Pages │
│ • Model Hub │
└────────────┬─────────────────────────────┬───────────────┘
│ │
│ imports │ imports
▼ ▼
┌──────────────────────┐ ┌──────────────────────┐
│ Core SDK │ │ Extensions │
│ (core/) │ │ (extensions/) │
│ │ │ │
│ • TypeScript APIs │◄─────│ • Assistant Mgmt │
│ • Extension System │ uses │ • Conversations │
│ • Event Bus │ │ • Downloads │
│ • Type Definitions │ │ • LlamaCPP │
└──────────┬───────────┘ └───────────┬──────────┘
│ │
│ ┌──────────────────────┐ │
│ │ Web App │ │
│ └──────────┬───────────┘ │
│ │ │
└──────────────┼───────────────┘
Tauri IPC
(invoke commands)
┌───────────────────────────────────────────────────────────┐
│ Tauri Backend (Rust) │
│ (src-tauri/) │
│ │
│ • Window Management • File System Access │
│ • Process Control • System Integration │
│ • IPC Command Handler • Security & Permissions │
└───────────────────────────┬───────────────────────────────┘
┌───────────────────────────────────────────────────────────┐
│ Tauri Plugins (Rust) │
│ (src-tauri/plugins/) │
│ │
│ ┌──────────────────┐ ┌──────────────────┐ │
│ │ Hardware Plugin │ │ LlamaCPP Plugin │ │
│ │ │ │ │ │
│ │ • CPU/GPU Info │ │ • Process Mgmt │ │
│ │ • Memory Stats │ │ • Model Loading │ │
│ │ • System Info │ │ • Inference │ │
│ └──────────────────┘ └──────────────────┘ │
└───────────────────────────────────────────────────────────┘
```
### The Communication Flow
1. **JavaScript Layer Relationships**:
- Web App imports Core SDK and Extensions as JavaScript modules
- Extensions use Core SDK for shared functionality
- All run in the browser/webview context
2. **All Three → Backend**: Through Tauri IPC
- **Web App** → Backend: `await invoke('app_command', data)`
- **Core SDK** → Backend: `await invoke('core_command', data)`
- **Extensions** → Backend: `await invoke('ext_command', data)`
- Each component can independently call backend commands
3. **Backend → Plugins**: Native Rust integration
- Backend loads plugins as Rust libraries
- Direct function calls, no IPC overhead
4. **Response Flow**:
- Plugin → Backend → IPC → Requester (Web App/Core/Extension) → UI updates
### Real-World Example: Loading a Model
Here's what actually happens when you click "Download Llama 3":
1. **Web App** (`web-app/`) - User clicks download button
2. **Extension** (`extensions/download-extension`) - Handles the download logic
3. **Tauri Backend** (`src-tauri/`) - Actually downloads the file to disk
4. **Extension** (`extensions/llamacpp-extension`) - Prepares model for loading
5. **Tauri Plugin** (`src-tauri/plugins/llamacpp`) - Starts llama.cpp process
6. **Hardware Plugin** (`src-tauri/plugins/hardware`) - Detects GPU, optimizes settings
7. **Model ready!** - User can start chatting
## Project Structure
```
jan/
├── web-app/ # React frontend (what users see)
├── src-tauri/ # Rust backend (system integration)
│ ├── src/core/ # Core Tauri commands
│ └── plugins/ # Tauri plugins (hardware, llamacpp)
├── core/ # TypeScript SDK (API layer)
├── extensions/ # JavaScript extensions
│ ├── assistant-extension/
│ ├── conversational-extension/
│ ├── download-extension/
│ └── llamacpp-extension/
├── docs/ # Documentation website
├── website/ # Marketing website
├── autoqa/ # Automated testing
├── scripts/ # Build utilities
├── package.json # Root workspace configuration
├── Makefile # Build automation commands
├── mise.toml # Mise tool configuration
├── LICENSE # Apache 2.0 license
└── README.md # Project overview
```
## Development Setup
### The Scenic Route (Build from Source)
**Prerequisites:**
- Node.js ≥ 20.0.0
- Yarn ≥ 1.22.0
- Rust (for Tauri)
- Make ≥ 3.81
**Option 1: The Easy Way (Make)**
```bash
git clone https://github.com/menloresearch/jan
cd jan
make dev
```
**Option 2: The Easier Way (Mise)**
```bash
git clone https://github.com/menloresearch/jan
cd jan
# Install mise
curl https://mise.run | sh
# Let mise handle everything
mise install # installs Node.js, Rust, and other tools
mise dev # runs the full development setup
```
## How Can I Contribute?
### Reporting Bugs
- **Ensure the bug was not already reported** by searching on GitHub under [Issues](https://github.com/menloresearch/jan/issues).
- If you're unable to find an open issue addressing the problem, [open a new one](https://github.com/menloresearch/jan/issues/new).
- **Ensure the bug was not already reported** by searching on GitHub under [Issues](https://github.com/menloresearch/jan/issues)
- If you're unable to find an open issue addressing the problem, [open a new one](https://github.com/menloresearch/jan/issues/new)
- Include your system specs and error logs - it helps a ton
### Suggesting Enhancements
- Open a new issue with a clear title and description.
- Open a new issue with a clear title and description
- Explain why this enhancement would be useful
- Include mockups or examples if you can
### Your First Code Contribution
- Fork the repo.
- Create a new branch (`git checkout -b feature-name`).
- Commit your changes (`git commit -am 'Add some feature'`).
- Push to the branch (`git push origin feature-name`).
- Open a new Pull Request.
**Choose Your Adventure:**
- **Frontend UI and logic**`web-app/`
- **Shared API declarations**`core/`
- **Backend system integration**`src-tauri/`
- **Business logic features**`extensions/`
- **Dedicated backend handler**`src-tauri/plugins/`
## Styleguides
**The Process:**
1. Fork the repo
2. Create a new branch (`git checkout -b feature-name`)
3. Make your changes (and write tests!)
4. Commit your changes (`git commit -am 'Add some feature'`)
5. Push to the branch (`git push origin feature-name`)
6. Open a new Pull Request against `dev` branch
### Git Commit Messages
## Testing
- Use the present tense ("Add feature" not "Added feature").
```bash
yarn test # All tests
cd src-tauri && cargo test # Rust tests
cd autoqa && python main.py # End-to-end tests
```
## Code Standards
### TypeScript/JavaScript
- TypeScript required (we're not animals)
- ESLint + Prettier
- Functional React components
- Proper typing (no `any` - seriously!)
### Rust
- `cargo fmt` + `cargo clippy`
- `Result<T, E>` for error handling
- Document public APIs
## Git Conventions
### Branches
- `main` - stable releases
- `dev` - development (target this for PRs)
- `feature/*` - new features
- `fix/*` - bug fixes
### Commit Messages
- Use the present tense ("Add feature" not "Added feature")
- Be descriptive but concise
- Reference issues when applicable
Examples:
```
feat: add support for Qwen models
fix: resolve memory leak in model loading
docs: update installation instructions
```
## Troubleshooting
If things go sideways:
1. **Check our [troubleshooting docs](https://jan.ai/docs/troubleshooting)**
2. **Clear everything and start fresh:** `make clean` then `make dev`
3. **Copy your error logs and system specs**
4. **Ask for help in our [Discord](https://discord.gg/FTk2MvZwJH)** `#🆘|jan-help` channel
Common issues:
- **Build failures**: Check Node.js and Rust versions
- **Extension not loading**: Verify it's properly registered
- **Model not working**: Check hardware requirements and GPU drivers
## Getting Help
- [Documentation](https://jan.ai/docs) - The manual you should read
- [Discord Community](https://discord.gg/jan) - Where the community lives
- [GitHub Issues](https://github.com/janhq/jan/issues) - Report bugs here
- [GitHub Discussions](https://github.com/janhq/jan/discussions) - Ask questions
## License
Apache 2.0 - Because sharing is caring. See [LICENSE](./LICENSE) for the legal stuff.
## Additional Notes
Thank you for contributing to jan!
We're building something pretty cool here - an AI assistant that respects your privacy and runs entirely on your machine. Every contribution, no matter how small, helps make AI more accessible to everyone.
Thanks for being part of the journey. Let's build the future of local AI together! 🚀

View File

@ -47,6 +47,8 @@ test: lint
yarn copy:assets:tauri
yarn build:icon
cargo test --manifest-path src-tauri/Cargo.toml --no-default-features --features test-tauri -- --test-threads=1
cargo test --manifest-path src-tauri/plugins/tauri-plugin-hardware/Cargo.toml
cargo test --manifest-path src-tauri/plugins/tauri-plugin-llamacpp/Cargo.toml
# Builds and publishes the app
build-and-publish: install-and-build

71
core/CONTRIBUTING.md Normal file
View File

@ -0,0 +1,71 @@
# Contributing to Jan Core
[← Back to Main Contributing Guide](../CONTRIBUTING.md)
TypeScript SDK providing extension system, APIs, and type definitions for all Jan components.
## Key Directories
- **`/src/browser`** - Core APIs (events, extensions, file system)
- **`/src/browser/extensions`** - Built-in extensions (assistant, inference, conversational)
- **`/src/types`** - TypeScript type definitions
- **`/src/test`** - Testing utilities
## Development
### Key Principles
1. **Platform Agnostic** - Works everywhere (browser, Node.js)
2. **Extension-Based** - New features = new extensions
3. **Type Everything** - TypeScript required
4. **Event-Driven** - Components communicate via events
### Building & Testing
```bash
# Build the SDK
yarn build
# Run tests
yarn test
# Watch mode
yarn test:watch
```
### Event System
```typescript
// Emit events
events.emit('model:loaded', { modelId: 'llama-3' })
// Listen for events
events.on('model:loaded', (data) => {
console.log('Model loaded:', data.modelId)
})
```
## Testing
```typescript
describe('MyFeature', () => {
it('should do something', () => {
const result = doSomething()
expect(result).toBe('expected')
})
})
```
## Best Practices
- Keep it simple
- Use TypeScript fully (no `any`)
- Write tests for critical features
- Follow existing patterns
- Export new modules in index files
## Dependencies
- **TypeScript** - Type safety
- **Rolldown** - Bundling
- **Vitest** - Testing

View File

@ -7,6 +7,7 @@ export interface chatCompletionRequestMessage {
role: 'system' | 'user' | 'assistant' | 'tool'
content: string | null | Content[] // Content can be a string OR an array of content parts
reasoning?: string | null // Some models return reasoning in completed responses
reasoning_content?: string | null // Some models return reasoning in completed responses
name?: string
tool_calls?: any[] // Simplified tool_call_id?: string
}
@ -274,7 +275,7 @@ export abstract class AIEngine extends BaseExtension {
/**
* Check if a tool is supported by the model
* @param modelId
* @param modelId
*/
abstract isToolSupported(modelId: string): Promise<boolean>
}

137
extensions/CONTRIBUTING.md Normal file
View File

@ -0,0 +1,137 @@
# Contributing to Jan Extensions
[← Back to Main Contributing Guide](../CONTRIBUTING.md)
Extensions add specific features to Jan as self-contained modules.
## Current Extensions
### `/assistant-extension`
- Assistant CRUD operations
- `src/index.ts` - Main implementation
### `/conversational-extension`
- Message handling, conversation state
- `src/index.ts` - Chat logic
### `/download-extension`
- Model downloads with progress tracking
- `src/index.ts` - Download logic
- `settings.json` - Download settings
### `/llamacpp-extension`
- Local model inference via llama.cpp
- `src/index.ts` - Entry point
- `src/backend.ts` - llama.cpp integration
- `settings.json` - Model settings
## Creating Extensions
### Setup
```bash
mkdir my-extension
cd my-extension
yarn init
```
### Structure
```
my-extension/
├── package.json
├── rolldown.config.mjs
├── src/index.ts
└── settings.json (optional)
```
### Basic Extension
```typescript
import { Extension } from '@janhq/core'
export default class MyExtension extends Extension {
async onLoad() {
// Extension initialization
}
async onUnload() {
// Cleanup
}
}
```
## Building & Testing
```bash
# Build extension
yarn build
# Run tests
yarn test
```
## Common Patterns
### Service Registration
```typescript
async onLoad() {
this.registerService('myService', {
doSomething: async () => 'result'
})
}
```
### Event Handling
```typescript
async onLoad() {
this.on('model:loaded', (model) => {
console.log('Model loaded:', model.id)
})
}
```
## Extension Lifecycle
1. **Jan starts** → Discovers extensions
2. **Loading** → Calls `onLoad()` method
3. **Active** → Extension responds to events
4. **Unloading** → Calls `onUnload()` on shutdown
## Debugging Extensions
```bash
# Check if extension loaded
console.log(window.core.extensions)
# Debug extension events
this.on('*', console.log)
# Check extension services
console.log(window.core.api)
```
## Common Issues
**Extension not loading?**
- Check package.json format: `@janhq/extension-name`
- Ensure `onLoad()` doesn't throw errors
- Verify exports in index.ts
**Events not working?**
- Check event name spelling
- Ensure listeners are set up in `onLoad()`
## Best Practices
- Keep extensions focused on one feature
- Use async/await for all operations
- Clean up resources in onUnload()
- Handle errors gracefully
- Don't depend on other extensions
## Dependencies
- **@janhq/core** - Core SDK and extension system
- **TypeScript** - Type safety
- **Rolldown** - Bundling

View File

@ -31,6 +31,7 @@
"@janhq/tauri-plugin-hardware-api": "link:../../src-tauri/plugins/tauri-plugin-hardware",
"@janhq/tauri-plugin-llamacpp-api": "link:../../src-tauri/plugins/tauri-plugin-llamacpp",
"@tauri-apps/api": "^2.5.0",
"@tauri-apps/plugin-http": "^2.5.1",
"@tauri-apps/plugin-log": "^2.6.0",
"fetch-retry": "^5.0.6",
"ulidx": "^2.3.0"

View File

@ -17,4 +17,7 @@ export default defineConfig({
IS_MAC: JSON.stringify(process.platform === 'darwin'),
IS_LINUX: JSON.stringify(process.platform === 'linux'),
},
inject: {
fetch: ['@tauri-apps/plugin-http', 'fetch'],
},
})

View File

@ -41,6 +41,7 @@ type LlamacppConfig = {
auto_unload: boolean
chat_template: string
n_gpu_layers: number
offload_mmproj: boolean
override_tensor_buffer_t: string
ctx_size: number
threads: number
@ -103,12 +104,6 @@ interface DeviceList {
free: number
}
interface GgufMetadata {
version: number
tensor_count: number
metadata: Record<string, string>
}
/**
* Override the default app.log function to use Jan's logging system.
* @param args
@ -1061,13 +1056,34 @@ export default class llamacpp_extension extends AIEngine {
}
}
// TODO: check if files are valid GGUF files
// NOTE: modelPath and mmprojPath can be either relative to Jan's data folder (if they are downloaded)
// or absolute paths (if they are provided as local files)
// Validate GGUF files
const janDataFolderPath = await getJanDataFolderPath()
let size_bytes = (
await fs.fileStat(await joinPath([janDataFolderPath, modelPath]))
).size
const fullModelPath = await joinPath([janDataFolderPath, modelPath])
try {
// Validate main model file
const modelMetadata = await readGgufMetadata(fullModelPath)
logger.info(
`Model GGUF validation successful: version ${modelMetadata.version}, tensors: ${modelMetadata.tensor_count}`
)
// Validate mmproj file if present
if (mmprojPath) {
const fullMmprojPath = await joinPath([janDataFolderPath, mmprojPath])
const mmprojMetadata = await readGgufMetadata(fullMmprojPath)
logger.info(
`Mmproj GGUF validation successful: version ${mmprojMetadata.version}, tensors: ${mmprojMetadata.tensor_count}`
)
}
} catch (error) {
logger.error('GGUF validation failed:', error)
throw new Error(
`Invalid GGUF file(s): ${error.message || 'File format validation failed'}`
)
}
// Calculate file sizes
let size_bytes = (await fs.fileStat(fullModelPath)).size
if (mmprojPath) {
size_bytes += (
await fs.fileStat(await joinPath([janDataFolderPath, mmprojPath]))
@ -1203,7 +1219,7 @@ export default class llamacpp_extension extends AIEngine {
// disable llama-server webui
args.push('--no-webui')
const api_key = await this.generateApiKey(modelId, String(port))
envs["LLAMA_API_KEY"] = api_key
envs['LLAMA_API_KEY'] = api_key
// model option is required
// NOTE: model_path and mmproj_path can be either relative to Jan's data folder or absolute path
@ -1212,7 +1228,6 @@ export default class llamacpp_extension extends AIEngine {
modelConfig.model_path,
])
args.push('--jinja')
args.push('--reasoning-format', 'none')
args.push('-m', modelPath)
// For overriding tensor buffer type, useful where
// massive MOE models can be made faster by keeping attention on the GPU
@ -1222,6 +1237,10 @@ export default class llamacpp_extension extends AIEngine {
// Takes a regex with matching tensor name as input
if (cfg.override_tensor_buffer_t)
args.push('--override-tensor', cfg.override_tensor_buffer_t)
// offload multimodal projector model to the GPU by default. if there is not enough memory
// turn this setting off will keep the projector model on the CPU but the image processing can
// take longer
if (cfg.offload_mmproj === false) args.push('--no-mmproj-offload')
args.push('-a', modelId)
args.push('--port', String(port))
if (modelConfig.mmproj_path) {
@ -1288,12 +1307,15 @@ export default class llamacpp_extension extends AIEngine {
try {
// TODO: add LIBRARY_PATH
const sInfo = await invoke<SessionInfo>('plugin:llamacpp|load_llama_model', {
backendPath,
libraryPath,
args,
envs,
})
const sInfo = await invoke<SessionInfo>(
'plugin:llamacpp|load_llama_model',
{
backendPath,
libraryPath,
args,
envs,
}
)
return sInfo
} catch (error) {
logger.error('Error in load command:\n', error)
@ -1383,7 +1405,11 @@ export default class llamacpp_extension extends AIEngine {
method: 'POST',
headers,
body,
signal: abortController?.signal,
connectTimeout: 600000, // 10 minutes
signal: AbortSignal.any([
AbortSignal.timeout(600000),
abortController?.signal,
]),
})
if (!response.ok) {
const errorData = await response.json().catch(() => null)
@ -1542,6 +1568,26 @@ export default class llamacpp_extension extends AIEngine {
}
}
/**
* Check if mmproj.gguf file exists for a given model ID
* @param modelId - The model ID to check for mmproj.gguf
* @returns Promise<boolean> - true if mmproj.gguf exists, false otherwise
*/
async checkMmprojExists(modelId: string): Promise<boolean> {
try {
const mmprojPath = await joinPath([
await this.getProviderPath(),
'models',
modelId,
'mmproj.gguf',
])
return await fs.existsSync(mmprojPath)
} catch (e) {
logger.error(`Error checking mmproj.gguf for model ${modelId}:`, e)
return false
}
}
async getDevices(): Promise<DeviceList[]> {
const cfg = this.config
const [version, backend] = cfg.version_backend.split('/')

111
src-tauri/CONTRIBUTING.md Normal file
View File

@ -0,0 +1,111 @@
# Contributing to Tauri Backend
[← Back to Main Contributing Guide](../CONTRIBUTING.md)
Rust backend that handles native system integration, file operations, and process management.
## Key Modules
- **`/src/core/app`** - App state and commands
- **`/src/core/downloads`** - Model download management
- **`/src/core/filesystem`** - File system operations
- **`/src/core/mcp`** - Model Context Protocol
- **`/src/core/server`** - Local API server
- **`/src/core/system`** - System information and utilities
- **`/src/core/threads`** - Conversation management
- **`/utils`** - Shared utility crate (CLI, crypto, HTTP, path utils). Used by plugins and the main backend.
- **`/plugins`** - Native Tauri plugins ([see plugins guide](./plugins/CONTRIBUTING.md))
## Development
### Adding Tauri Commands
```rust
#[tauri::command]
async fn my_command(param: String) -> Result<String, String> {
Ok(format!("Processed: {}", param))
}
// Register in lib.rs
tauri::Builder::default()
.invoke_handler(tauri::generate_handler![my_command])
```
## Building & Testing
```bash
# Development
yarn tauri dev
# Build
yarn tauri build
# Run tests
cargo test
```
### State Management
```rust
#[tauri::command]
async fn get_data(state: State<'_, AppState>) -> Result<Data, Error> {
state.get_data().await
}
```
### Error Handling
```rust
#[derive(Debug, thiserror::Error)]
pub enum AppError {
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
}
```
## Debugging
```rust
// Enable debug logging
env::set_var("RUST_LOG", "debug");
// Debug print in commands
#[tauri::command]
async fn my_command() -> Result<String, String> {
println!("Command called"); // Shows in terminal
dbg!("Debug info");
Ok("result".to_string())
}
```
## Platform-Specific Notes
**Windows**: Requires Visual Studio Build Tools
**macOS**: Needs Xcode command line tools
**Linux**: May need additional system packages
```rust
#[cfg(target_os = "windows")]
use std::os::windows::process::CommandExt;
```
## Common Issues
**Build failures**: Check Rust toolchain version
**IPC errors**: Ensure command names match frontend calls
**Permission errors**: Update capabilities configuration
## Best Practices
- Always use `Result<T, E>` for fallible operations
- Validate all input from frontend
- Use async for I/O operations
- Follow Rust naming conventions
- Document public APIs
## Dependencies
- **Tauri** - Desktop app framework
- **Tokio** - Async runtime
- **Serde** - JSON serialization
- **thiserror** - Error handling

55
src-tauri/Cargo.lock generated
View File

@ -854,8 +854,18 @@ version = "0.20.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee"
dependencies = [
"darling_core",
"darling_macro",
"darling_core 0.20.11",
"darling_macro 0.20.11",
]
[[package]]
name = "darling"
version = "0.21.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08440b3dd222c3d0433e63e097463969485f112baff337dfdaca043a0d760570"
dependencies = [
"darling_core 0.21.2",
"darling_macro 0.21.2",
]
[[package]]
@ -872,13 +882,38 @@ dependencies = [
"syn 2.0.104",
]
[[package]]
name = "darling_core"
version = "0.21.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d25b7912bc28a04ab1b7715a68ea03aaa15662b43a1a4b2c480531fd19f8bf7e"
dependencies = [
"fnv",
"ident_case",
"proc-macro2",
"quote",
"strsim",
"syn 2.0.104",
]
[[package]]
name = "darling_macro"
version = "0.20.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead"
dependencies = [
"darling_core",
"darling_core 0.20.11",
"quote",
"syn 2.0.104",
]
[[package]]
name = "darling_macro"
version = "0.21.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce154b9bea7fb0c8e8326e62d00354000c36e79770ff21b8c84e3aa267d9d531"
dependencies = [
"darling_core 0.21.2",
"quote",
"syn 2.0.104",
]
@ -3984,8 +4019,8 @@ dependencies = [
[[package]]
name = "rmcp"
version = "0.2.1"
source = "git+https://github.com/modelcontextprotocol/rust-sdk?rev=3196c95f1dfafbffbdcdd6d365c94969ac975e6a#3196c95f1dfafbffbdcdd6d365c94969ac975e6a"
version = "0.5.0"
source = "git+https://github.com/modelcontextprotocol/rust-sdk?rev=209dbac50f51737ad953c3a2c8e28f3619b6c277#209dbac50f51737ad953c3a2c8e28f3619b6c277"
dependencies = [
"base64 0.22.1",
"chrono",
@ -4010,10 +4045,10 @@ dependencies = [
[[package]]
name = "rmcp-macros"
version = "0.2.1"
source = "git+https://github.com/modelcontextprotocol/rust-sdk?rev=3196c95f1dfafbffbdcdd6d365c94969ac975e6a#3196c95f1dfafbffbdcdd6d365c94969ac975e6a"
version = "0.5.0"
source = "git+https://github.com/modelcontextprotocol/rust-sdk?rev=209dbac50f51737ad953c3a2c8e28f3619b6c277#209dbac50f51737ad953c3a2c8e28f3619b6c277"
dependencies = [
"darling",
"darling 0.21.2",
"proc-macro2",
"quote",
"serde_json",
@ -4408,7 +4443,7 @@ version = "3.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f"
dependencies = [
"darling",
"darling 0.20.11",
"proc-macro2",
"quote",
"syn 2.0.104",
@ -6868,7 +6903,7 @@ version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a76ff259533532054cfbaefb115c613203c73707017459206380f03b3b3f266e"
dependencies = [
"darling",
"darling 0.20.11",
"proc-macro2",
"quote",
"syn 2.0.104",

View File

@ -44,7 +44,7 @@ jan-utils = { path = "./utils" }
libloading = "0.8.7"
log = "0.4"
reqwest = { version = "0.11", features = ["json", "blocking", "stream"] }
rmcp = { git = "https://github.com/modelcontextprotocol/rust-sdk", rev = "209dbac50f51737ad953c3a2c8e28f3619b6c277", features = [
rmcp = { version = "0.6.0", features = [
"client",
"transport-sse-client",
"transport-streamable-http-client",

View File

@ -0,0 +1,119 @@
# Contributing to Tauri Plugins
[← Back to Main Contributing Guide](../../CONTRIBUTING.md) | [← Back to Tauri Guide](../CONTRIBUTING.md)
Native Rust plugins for hardware access, process management, and system integration.
## Current Plugins
### `/tauri-plugin-hardware`
- Hardware detection (CPU, GPU, memory)
### `/tauri-plugin-llamacpp`
- llama.cpp process management and model inference
## Plugin Structure
```
tauri-plugin-name/
├── Cargo.toml
├── src/lib.rs # Plugin entry point
├── src/commands.rs # Tauri commands
├── guest-js/index.ts # JavaScript API
└── permissions/default.toml
```
## Development
### Creating Plugins
Assuming that your new plugin name is `my-plugin`
```bash
# with npx
npx @tauri-apps/cli plugin new my-plugin
# with cargo
cargo tauri plugin new my-plugin
cd tauri-plugin-my-plugin
```
### Plugin Registration
```rust
use tauri::{plugin::{Builder, TauriPlugin}, Runtime};
pub fn init<R: Runtime>() -> TauriPlugin<R> {
Builder::new("my-plugin")
.invoke_handler(tauri::generate_handler![commands::my_command])
.build()
}
```
### Commands & JavaScript API
```rust
#[tauri::command]
pub async fn my_command(param: String) -> Result<String, Error> {
Ok(format!("Result: {}", param))
}
```
```typescript
import { invoke } from '@tauri-apps/api/core'
export async function myCommand(param: string): Promise<string> {
return await invoke('plugin:my-plugin|my_command', { param })
}
```
### Building & Testing
```bash
cargo build # Build plugin
yarn build # Build JavaScript
cargo test # Run tests
```
## Security Considerations
```toml
# permissions/default.toml - Be specific
[[permission]]
identifier = "allow-hardware-info"
description = "Read system hardware information"
# Never use wildcards in production
# ❌ identifier = "allow-*"
# ✅ identifier = "allow-specific-action"
```
## Testing Plugins
```bash
# Test plugin in isolation
cd tauri-plugin-my-plugin
cargo test
# Test with main app
cd ../../
yarn tauri dev
# Test JavaScript API
yarn build && node -e "const plugin = require('./dist-js'); console.log(plugin)"
```
## Best Practices
- Use secure permission configurations
- Validate all command inputs
- Handle platform differences properly
- Clean up resources in Drop implementations
- Test on all target platforms
## Dependencies
- **Tauri** - Plugin framework
- **Serde** - JSON serialization
- **Tokio** - Async runtime (if needed)

View File

@ -12,7 +12,7 @@ use tokio::time::Instant;
use crate::device::{get_devices_from_backend, DeviceInfo};
use crate::error::{ErrorCode, LlamacppError, ServerError, ServerResult};
use crate::path::{validate_binary_path, validate_model_path};
use crate::path::{validate_binary_path, validate_model_path, validate_mmproj_path};
use crate::process::{
find_session_by_model_id, get_all_active_sessions, get_all_loaded_model_ids,
get_random_available_port, is_process_running_by_pid,
@ -55,6 +55,7 @@ pub async fn load_llama_model<R: Runtime>(
let port = parse_port_from_args(&args);
let model_path_pb = validate_model_path(&mut args)?;
let _mmproj_path_pb = validate_mmproj_path(&mut args)?;
let api_key: String;

View File

@ -98,3 +98,50 @@ pub fn validate_model_path(args: &mut Vec<String>) -> ServerResult<PathBuf> {
Ok(model_path_pb)
}
/// Validate mmproj path exists and update args with platform-appropriate path format
pub fn validate_mmproj_path(args: &mut Vec<String>) -> ServerResult<Option<PathBuf>> {
let mmproj_path_index = match args.iter().position(|arg| arg == "--mmproj") {
Some(index) => index,
None => return Ok(None), // mmproj is optional
};
let mmproj_path = args.get(mmproj_path_index + 1).cloned().ok_or_else(|| {
LlamacppError::new(
ErrorCode::ModelLoadFailed,
"Mmproj path was not provided after '--mmproj' flag.".into(),
None,
)
})?;
let mmproj_path_pb = PathBuf::from(&mmproj_path);
if !mmproj_path_pb.exists() {
let err_msg = format!(
"Invalid or inaccessible mmproj path: {}",
mmproj_path_pb.display()
);
log::error!("{}", &err_msg);
return Err(LlamacppError::new(
ErrorCode::ModelFileNotFound,
"The specified mmproj file does not exist or is not accessible.".into(),
Some(err_msg),
)
.into());
}
#[cfg(windows)]
{
// use short path on Windows
if let Some(short) = get_short_path(&mmproj_path_pb) {
args[mmproj_path_index + 1] = short;
} else {
args[mmproj_path_index + 1] = mmproj_path_pb.display().to_string();
}
}
#[cfg(not(windows))]
{
args[mmproj_path_index + 1] = mmproj_path_pb.display().to_string();
}
Ok(Some(mmproj_path_pb))
}

View File

@ -2,6 +2,7 @@ use rmcp::model::{CallToolRequestParam, CallToolResult};
use serde_json::{Map, Value};
use tauri::{AppHandle, Emitter, Runtime, State};
use tokio::time::timeout;
use tokio::sync::oneshot;
use super::{
constants::{DEFAULT_MCP_CONFIG, MCP_TOOL_CALL_TIMEOUT},
@ -179,6 +180,7 @@ pub async fn get_tools(state: State<'_, AppState>) -> Result<Vec<ToolWithServer>
/// * `state` - Application state containing MCP server connections
/// * `tool_name` - Name of the tool to call
/// * `arguments` - Optional map of argument names to values
/// * `cancellation_token` - Optional token to allow cancellation from JS side
///
/// # Returns
/// * `Result<CallToolResult, String>` - Result of the tool call if successful, or error message if failed
@ -187,13 +189,23 @@ pub async fn get_tools(state: State<'_, AppState>) -> Result<Vec<ToolWithServer>
/// 1. Locks the MCP servers mutex to access server connections
/// 2. Searches through all servers for one containing the named tool
/// 3. When found, calls the tool on that server with the provided arguments
/// 4. Returns error if no server has the requested tool
/// 4. Supports cancellation via cancellation_token
/// 5. Returns error if no server has the requested tool
#[tauri::command]
pub async fn call_tool(
state: State<'_, AppState>,
tool_name: String,
arguments: Option<Map<String, Value>>,
cancellation_token: Option<String>,
) -> Result<CallToolResult, String> {
// Set up cancellation if token is provided
let (cancel_tx, cancel_rx) = oneshot::channel::<()>();
if let Some(token) = &cancellation_token {
let mut cancellations = state.tool_call_cancellations.lock().await;
cancellations.insert(token.clone(), cancel_tx);
}
let servers = state.mcp_servers.lock().await;
// Iterate through servers and find the first one that contains the tool
@ -209,25 +221,77 @@ pub async fn call_tool(
println!("Found tool {} in server", tool_name);
// Call the tool with timeout
// Call the tool with timeout and cancellation support
let tool_call = service.call_tool(CallToolRequestParam {
name: tool_name.clone().into(),
arguments,
});
return match timeout(MCP_TOOL_CALL_TIMEOUT, tool_call).await {
Ok(result) => result.map_err(|e| e.to_string()),
Err(_) => Err(format!(
"Tool call '{}' timed out after {} seconds",
tool_name,
MCP_TOOL_CALL_TIMEOUT.as_secs()
)),
// Race between timeout, tool call, and cancellation
let result = if cancellation_token.is_some() {
tokio::select! {
result = timeout(MCP_TOOL_CALL_TIMEOUT, tool_call) => {
match result {
Ok(call_result) => call_result.map_err(|e| e.to_string()),
Err(_) => Err(format!(
"Tool call '{}' timed out after {} seconds",
tool_name,
MCP_TOOL_CALL_TIMEOUT.as_secs()
)),
}
}
_ = cancel_rx => {
Err(format!("Tool call '{}' was cancelled", tool_name))
}
}
} else {
match timeout(MCP_TOOL_CALL_TIMEOUT, tool_call).await {
Ok(call_result) => call_result.map_err(|e| e.to_string()),
Err(_) => Err(format!(
"Tool call '{}' timed out after {} seconds",
tool_name,
MCP_TOOL_CALL_TIMEOUT.as_secs()
)),
}
};
// Clean up cancellation token
if let Some(token) = &cancellation_token {
let mut cancellations = state.tool_call_cancellations.lock().await;
cancellations.remove(token);
}
return result;
}
Err(format!("Tool {} not found", tool_name))
}
/// Cancels a running tool call by its cancellation token
///
/// # Arguments
/// * `state` - Application state containing cancellation tokens
/// * `cancellation_token` - Token identifying the tool call to cancel
///
/// # Returns
/// * `Result<(), String>` - Success if token found and cancelled, error otherwise
#[tauri::command]
pub async fn cancel_tool_call(
state: State<'_, AppState>,
cancellation_token: String,
) -> Result<(), String> {
let mut cancellations = state.tool_call_cancellations.lock().await;
if let Some(cancel_tx) = cancellations.remove(&cancellation_token) {
// Send cancellation signal - ignore if receiver is already dropped
let _ = cancel_tx.send(());
println!("Tool call with token {} cancelled", cancellation_token);
Ok(())
} else {
Err(format!("Cancellation token {} not found", cancellation_token))
}
}
#[tauri::command]
pub async fn get_mcp_configs(app: AppHandle) -> Result<String, String> {
let mut path = get_jan_data_folder_path(app);

View File

@ -7,10 +7,11 @@ use rmcp::{
ServiceExt,
};
use serde_json::Value;
use std::{collections::HashMap, env, sync::Arc, time::Duration};
use std::{collections::HashMap, env, process::Stdio, sync::Arc, time::Duration};
use tauri::{AppHandle, Emitter, Manager, Runtime, State};
use tauri_plugin_http::reqwest;
use tokio::{
io::AsyncReadExt,
process::Command,
sync::Mutex,
time::{sleep, timeout},
@ -647,23 +648,8 @@ async fn schedule_mcp_start_task<R: Runtime>(
{
cmd.creation_flags(0x08000000); // CREATE_NO_WINDOW: prevents shell window on Windows
}
let app_path_str = app_path.to_str().unwrap().to_string();
let log_file_path = format!("{}/logs/app.log", app_path_str);
match std::fs::OpenOptions::new()
.create(true)
.append(true)
.open(log_file_path)
{
Ok(file) => {
cmd.stderr(std::process::Stdio::from(file));
}
Err(err) => {
log::error!("Failed to open log file: {}", err);
}
};
cmd.kill_on_drop(true);
log::trace!("Command: {cmd:#?}");
config_params
.args
@ -678,26 +664,42 @@ async fn schedule_mcp_start_task<R: Runtime>(
}
});
let process = TokioChildProcess::new(cmd).map_err(|e| {
log::error!("Failed to run command {name}: {e}");
format!("Failed to run command {name}: {e}")
})?;
let (process, stderr) = TokioChildProcess::builder(cmd)
.stderr(Stdio::piped())
.spawn()
.map_err(|e| {
log::error!("Failed to run command {name}: {e}");
format!("Failed to run command {name}: {e}")
})?;
let service = ()
.serve(process)
.await
.map_err(|e| format!("Failed to start MCP server {name}: {e}"))?;
.map_err(|e| format!("Failed to start MCP server {name}: {e}"));
// Get peer info and clone the needed values before moving the service
let server_info = service.peer_info();
log::trace!("Connected to server: {server_info:#?}");
// Now move the service into the HashMap
servers
.lock()
.await
.insert(name.clone(), RunningServiceEnum::NoInit(service));
log::info!("Server {name} started successfully.");
match service {
Ok(server) => {
log::trace!("Connected to server: {:#?}", server.peer_info());
servers
.lock()
.await
.insert(name.clone(), RunningServiceEnum::NoInit(server));
log::info!("Server {name} started successfully.");
}
Err(_) => {
let mut buffer = String::new();
let error = match stderr
.expect("stderr must be piped")
.read_to_string(&mut buffer)
.await
{
Ok(_) => format!("Failed to start MCP server {name}: {buffer}"),
Err(_) => format!("Failed to read MCP server {name} stderr"),
};
log::error!("{error}");
return Err(error);
}
}
// Wait a short time to verify the server is stable before marking as connected
// This prevents race conditions where the server quits immediately
@ -754,7 +756,7 @@ pub fn extract_command_args(config: &Value) -> Option<McpServerConfig> {
command,
args,
envs,
headers
headers,
})
}

View File

@ -6,7 +6,7 @@ use rmcp::{
service::RunningService,
RoleClient, ServiceError,
};
use tokio::sync::Mutex;
use tokio::sync::{Mutex, oneshot};
use tokio::task::JoinHandle;
/// Server handle type for managing the proxy server lifecycle
@ -27,6 +27,7 @@ pub struct AppState {
pub mcp_active_servers: Arc<Mutex<HashMap<String, serde_json::Value>>>,
pub mcp_successfully_connected: Arc<Mutex<HashMap<String, bool>>>,
pub server_handle: Arc<Mutex<Option<ServerHandle>>>,
pub tool_call_cancellations: Arc<Mutex<HashMap<String, oneshot::Sender<()>>>>,
}
impl RunningServiceEnum {

View File

@ -74,6 +74,7 @@ pub fn run() {
// MCP commands
core::mcp::commands::get_tools,
core::mcp::commands::call_tool,
core::mcp::commands::cancel_tool_call,
core::mcp::commands::restart_mcp_servers,
core::mcp::commands::get_connected_servers,
core::mcp::commands::save_mcp_configs,
@ -105,6 +106,7 @@ pub fn run() {
mcp_active_servers: Arc::new(Mutex::new(HashMap::new())),
mcp_successfully_connected: Arc::new(Mutex::new(HashMap::new())),
server_handle: Arc::new(Mutex::new(None)),
tool_call_cancellations: Arc::new(Mutex::new(HashMap::new())),
})
.setup(|app| {
app.handle().plugin(

View File

@ -35,7 +35,8 @@
"effects": ["fullScreenUI", "mica", "tabbed", "blur", "acrylic"],
"state": "active",
"radius": 8
}
},
"dragDropEnabled": false
}
],
"security": {

128
web-app/CONTRIBUTING.md Normal file
View File

@ -0,0 +1,128 @@
# Contributing to Jan Web App
[← Back to Main Contributing Guide](../CONTRIBUTING.md)
React frontend using TypeScript, TanStack Router, Radix UI, and Tailwind CSS. State is managed by React State and Zustand.
## Key Directories
- **`/src/components/ui`** - UI components (buttons, dialogs, inputs)
- **`/src/containers`** - Complex feature components (ChatInput, ThreadContent)
- **`/src/hooks`** - Custom React hooks (useChat, useThreads, useAppState)
- **`/src/routes`** - TanStack Router pages
- **`/src/services`** - API layer for backend communication
- **`/src/types`** - TypeScript definitions
## Development
### Component Example
```tsx
interface Props {
title: string
onAction?: () => void
}
export const MyComponent: React.FC<Props> = ({ title, onAction }) => {
return (
<div className="flex items-center gap-2">
<h2>{title}</h2>
<Button onClick={onAction}>Action</Button>
</div>
)
}
```
### Routing
```tsx
export const Route = createFileRoute('/settings/general')({
component: GeneralSettings
})
```
### Building & Testing
```bash
# Development
yarn dev
yarn build
yarn test
```
### State Management
```tsx
// Local state
const [value, setValue] = useState<string>('')
// Global state (Zustand)
export const useAppState = create<AppState>((set) => ({
data: null,
setData: (data) => set({ data })
}))
```
### Tauri Integration
```tsx
import { invoke } from '@tauri-apps/api/tauri'
const result = await invoke('command_name', { param: 'value' })
```
## Performance Tips
```tsx
// Use React.memo for expensive components
const ExpensiveComponent = React.memo(({ data }) => {
return <div>{processData(data)}</div>
})
// Debounce frequent updates
const debouncedValue = useDebounce(searchTerm, 300)
// Virtual scrolling for large lists
import { VariableSizeList } from 'react-window'
```
## Debugging
```bash
# React DevTools
# Install browser extension, then:
# - Inspect component tree
# - Debug hooks and state
# - Profile performance
# Debug Tauri commands
console.log(await window.__TAURI__.invoke('command_name'))
# Check for console errors
# Press F12 → Console tab
```
## Accessibility Guidelines
- Use semantic HTML (`<button>`, `<nav>`, `<main>`)
- Add ARIA labels: `aria-label`, `aria-describedby`
- Ensure keyboard navigation works
- Test with screen readers
- Maintain color contrast ratios
## Best Practices
- Keep components small and focused
- Use TypeScript fully (no `any`)
- Handle loading and error states
- Follow accessibility guidelines
- Extract business logic into hooks
## Dependencies
- **React** - UI framework
- **TypeScript** - Type safety
- **TanStack Router** - Type-safe routing
- **Radix UI** - Accessible components
- **Tailwind CSS** - Utility-first styling
- **Zustand** - State management

View File

@ -1,7 +1,7 @@
'use client'
import TextareaAutosize from 'react-textarea-autosize'
import { cn, toGigabytes } from '@/lib/utils'
import { cn } from '@/lib/utils'
import { usePrompt } from '@/hooks/usePrompt'
import { useThreads } from '@/hooks/useThreads'
import { useCallback, useEffect, useRef, useState } from 'react'
@ -14,7 +14,7 @@ import {
} from '@/components/ui/tooltip'
import { ArrowRight } from 'lucide-react'
import {
IconPaperclip,
IconPhoto,
IconWorld,
IconAtom,
IconEye,
@ -34,6 +34,7 @@ import DropdownModelProvider from '@/containers/DropdownModelProvider'
import { ModelLoader } from '@/containers/loaders/ModelLoader'
import DropdownToolsAvailable from '@/containers/DropdownToolsAvailable'
import { getConnectedServers } from '@/services/mcp'
import { checkMmprojExists } from '@/services/models'
type ChatInputProps = {
className?: string
@ -46,8 +47,13 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
const textareaRef = useRef<HTMLTextAreaElement>(null)
const [isFocused, setIsFocused] = useState(false)
const [rows, setRows] = useState(1)
const { streamingContent, abortControllers, loadingModel, tools } =
useAppState()
const {
streamingContent,
abortControllers,
loadingModel,
tools,
cancelToolCall,
} = useAppState()
const { prompt, setPrompt } = usePrompt()
const { currentThreadId } = useThreads()
const { t } = useTranslation()
@ -55,7 +61,7 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
const maxRows = 10
const { selectedModel } = useModelProvider()
const { selectedModel, selectedProvider } = useModelProvider()
const { sendMessage } = useChat()
const [message, setMessage] = useState('')
const [dropdownToolsAvailable, setDropdownToolsAvailable] = useState(false)
@ -70,6 +76,8 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
}>
>([])
const [connectedServers, setConnectedServers] = useState<string[]>([])
const [isDragOver, setIsDragOver] = useState(false)
const [hasMmproj, setHasMmproj] = useState(false)
// Check for connected MCP servers
useEffect(() => {
@ -91,6 +99,29 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
return () => clearInterval(intervalId)
}, [])
// Check for mmproj existence or vision capability when model changes
useEffect(() => {
const checkMmprojSupport = async () => {
if (selectedModel?.id) {
try {
// Only check mmproj for llamacpp provider
if (selectedProvider === 'llamacpp') {
const hasLocalMmproj = await checkMmprojExists(selectedModel.id)
setHasMmproj(hasLocalMmproj)
} else {
// For non-llamacpp providers, only check vision capability
setHasMmproj(true)
}
} catch (error) {
console.error('Error checking mmproj:', error)
setHasMmproj(false)
}
}
}
checkMmprojSupport()
}, [selectedModel?.id, selectedProvider])
// Check if there are active MCP servers
const hasActiveMCPServers = connectedServers.length > 0 || tools.length > 0
@ -99,11 +130,16 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
setMessage('Please select a model to start chatting.')
return
}
if (!prompt.trim()) {
if (!prompt.trim() && uploadedFiles.length === 0) {
return
}
setMessage('')
sendMessage(prompt)
sendMessage(
prompt,
true,
uploadedFiles.length > 0 ? uploadedFiles : undefined
)
setUploadedFiles([])
}
useEffect(() => {
@ -161,8 +197,9 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
const stopStreaming = useCallback(
(threadId: string) => {
abortControllers[threadId]?.abort()
cancelToolCall?.()
},
[abortControllers]
[abortControllers, cancelToolCall]
)
const fileInputRef = useRef<HTMLInputElement>(null)
@ -185,8 +222,6 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
return 'image/jpeg'
case 'png':
return 'image/png'
case 'pdf':
return 'application/pdf'
default:
return ''
}
@ -220,17 +255,12 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
const detectedType = file.type || getFileTypeFromExtension(file.name)
const actualType = getFileTypeFromExtension(file.name) || detectedType
// Check file type
const allowedTypes = [
'image/jpg',
'image/jpeg',
'image/png',
'application/pdf',
]
// Check file type - images only
const allowedTypes = ['image/jpg', 'image/jpeg', 'image/png']
if (!allowedTypes.includes(actualType)) {
setMessage(
`File is not supported. Only JPEG, JPG, PNG, and PDF files are allowed.`
`File attachments not supported currently. Only JPEG, JPG, and PNG files are allowed.`
)
// Reset file input to allow re-uploading
if (fileInputRef.current) {
@ -281,6 +311,104 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
}
}
const handleDragEnter = (e: React.DragEvent) => {
e.preventDefault()
e.stopPropagation()
// Only allow drag if model supports mmproj
if (hasMmproj) {
setIsDragOver(true)
}
}
const handleDragLeave = (e: React.DragEvent) => {
e.preventDefault()
e.stopPropagation()
// Only set dragOver to false if we're leaving the drop zone entirely
// In Tauri, relatedTarget can be null, so we need to handle that case
const relatedTarget = e.relatedTarget as Node | null
if (!relatedTarget || !e.currentTarget.contains(relatedTarget)) {
setIsDragOver(false)
}
}
const handleDragOver = (e: React.DragEvent) => {
e.preventDefault()
e.stopPropagation()
// Ensure drag state is maintained during drag over
if (hasMmproj) {
setIsDragOver(true)
}
}
const handleDrop = (e: React.DragEvent) => {
e.preventDefault()
e.stopPropagation()
setIsDragOver(false)
// Only allow drop if model supports mmproj
if (!hasMmproj) {
return
}
// Check if dataTransfer exists (it might not in some Tauri scenarios)
if (!e.dataTransfer) {
console.warn('No dataTransfer available in drop event')
return
}
const files = e.dataTransfer.files
if (files && files.length > 0) {
// Create a synthetic event to reuse existing file handling logic
const syntheticEvent = {
target: {
files: files,
},
} as React.ChangeEvent<HTMLInputElement>
handleFileChange(syntheticEvent)
}
}
const handlePaste = (e: React.ClipboardEvent) => {
const clipboardItems = e.clipboardData?.items
if (!clipboardItems) return
// Only allow paste if model supports mmproj
if (!hasMmproj) {
return
}
const imageItems = Array.from(clipboardItems).filter((item) =>
item.type.startsWith('image/')
)
if (imageItems.length > 0) {
e.preventDefault()
const files: File[] = []
let processedCount = 0
imageItems.forEach((item) => {
const file = item.getAsFile()
if (file) {
files.push(file)
}
processedCount++
// When all items are processed, handle the valid files
if (processedCount === imageItems.length && files.length > 0) {
const syntheticEvent = {
target: {
files: files,
},
} as unknown as React.ChangeEvent<HTMLInputElement>
handleFileChange(syntheticEvent)
}
})
}
}
return (
<div className="relative">
<div className="relative">
@ -305,8 +433,14 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
<div
className={cn(
'relative z-20 px-0 pb-10 border border-main-view-fg/5 rounded-lg text-main-view-fg bg-main-view',
isFocused && 'ring-1 ring-main-view-fg/10'
isFocused && 'ring-1 ring-main-view-fg/10',
isDragOver && 'ring-2 ring-accent border-accent'
)}
data-drop-zone={hasMmproj ? 'true' : undefined}
onDragEnter={hasMmproj ? handleDragEnter : undefined}
onDragLeave={hasMmproj ? handleDragLeave : undefined}
onDragOver={hasMmproj ? handleDragOver : undefined}
onDrop={hasMmproj ? handleDrop : undefined}
>
{uploadedFiles.length > 0 && (
<div className="flex gap-3 items-center p-2 pb-0">
@ -326,25 +460,6 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
alt={`${file.name} - ${index}`}
/>
)}
{file.type === 'application/pdf' && (
<div className="bg-main-view-fg/4 h-full rounded-lg p-2 max-w-[400px] pr-4">
<div className="flex gap-2 items-center justify-center h-full">
<div className="size-10 rounded-md bg-main-view shrink-0 flex items-center justify-center">
<span className="uppercase font-bold">
{file.name.split('.').pop()}
</span>
</div>
<div className="truncate">
<h6 className="truncate mb-0.5 text-main-view-fg/80">
{file.name}
</h6>
<p className="text-xs text-main-view-fg/70">
{toGigabytes(file.size)}
</p>
</div>
</div>
</div>
)}
<div
className="absolute -top-1 -right-2.5 bg-destructive size-5 flex rounded-full items-center justify-center cursor-pointer"
onClick={() => handleRemoveFile(index)}
@ -363,7 +478,7 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
rows={1}
maxRows={10}
value={prompt}
data-test-id={'chat-input'}
data-testid={'chat-input'}
onChange={(e) => {
setPrompt(e.target.value)
// Count the number of newlines to estimate rows
@ -372,14 +487,21 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
}}
onKeyDown={(e) => {
// e.keyCode 229 is for IME input with Safari
const isComposing = e.nativeEvent.isComposing || e.keyCode === 229;
if (e.key === 'Enter' && !e.shiftKey && prompt.trim() && !isComposing) {
const isComposing =
e.nativeEvent.isComposing || e.keyCode === 229
if (
e.key === 'Enter' &&
!e.shiftKey &&
prompt.trim() &&
!isComposing
) {
e.preventDefault()
// Submit the message when Enter is pressed without Shift
handleSendMesage(prompt)
// When Shift+Enter is pressed, a new line is added (default behavior)
}
}}
onPaste={hasMmproj ? handlePaste : undefined}
placeholder={t('common:placeholder.chatInput')}
autoFocus
spellCheck={spellCheckChatInput}
@ -400,7 +522,7 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
<div className="px-1 flex items-center gap-1">
<div
className={cn(
'px-1 flex items-center gap-1',
'px-1 flex items-center',
streamingContent && 'opacity-50 pointer-events-none'
)}
>
@ -412,19 +534,22 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
useLastUsedModel={initialMessage}
/>
)}
{/* File attachment - always available */}
<div
className="h-6 hidden p-1 items-center justify-center rounded-sm hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out gap-1"
onClick={handleAttachmentClick}
>
<IconPaperclip size={18} className="text-main-view-fg/50" />
<input
type="file"
ref={fileInputRef}
className="hidden"
onChange={handleFileChange}
/>
</div>
{/* File attachment - show only for models with mmproj */}
{hasMmproj && (
<div
className="h-6 p-1 ml-1 flex items-center justify-center rounded-sm hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out gap-1"
onClick={handleAttachmentClick}
>
<IconPhoto size={18} className="text-main-view-fg/50" />
<input
type="file"
ref={fileInputRef}
className="hidden"
multiple
onChange={handleFileChange}
/>
</div>
)}
{/* Microphone - always available - Temp Hide */}
{/* <div className="h-6 p-1 flex items-center justify-center rounded-sm hover:bg-main-view-fg/10 transition-all duration-200 ease-in-out gap-1">
<IconMicrophone size={18} className="text-main-view-fg/50" />
@ -568,9 +693,13 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
</Button>
) : (
<Button
variant={!prompt.trim() ? null : 'default'}
variant={
!prompt.trim() && uploadedFiles.length === 0
? null
: 'default'
}
size="icon"
disabled={!prompt.trim()}
disabled={!prompt.trim() && uploadedFiles.length === 0}
data-test-id="send-message-button"
onClick={() => handleSendMesage(prompt)}
>
@ -584,6 +713,7 @@ const ChatInput = ({ model, className, initialMessage }: ChatInputProps) => {
</div>
</div>
</div>
{message && (
<div className="bg-main-view-fg/2 -mt-0.5 mx-2 pb-2 px-3 pt-1.5 rounded-b-lg text-xs text-destructive transition-all duration-200 ease-in-out">
<div className="flex items-center gap-1 justify-between">

View File

@ -1,4 +1,4 @@
import { useAppearance } from '@/hooks/useAppearance'
import { useAppearance, isDefaultColorAccent } from '@/hooks/useAppearance'
import { cn } from '@/lib/utils'
import { RgbaColor, RgbaColorPicker } from 'react-colorful'
import { IconColorPicker } from '@tabler/icons-react'
@ -37,10 +37,11 @@ export function ColorPickerAppAccentColor() {
<div className="flex items-center gap-1.5">
{predefineAppAccentBgColor.map((item, i) => {
const isSelected =
item.r === appAccentBgColor.r &&
(item.r === appAccentBgColor.r &&
item.g === appAccentBgColor.g &&
item.b === appAccentBgColor.b &&
item.a === appAccentBgColor.a
item.a === appAccentBgColor.a) ||
(isDefaultColorAccent(appAccentBgColor) && isDefaultColorAccent(item))
return (
<div
key={i}

View File

@ -1,4 +1,4 @@
import { useAppearance } from '@/hooks/useAppearance'
import { useAppearance, isDefaultColor } from '@/hooks/useAppearance'
import { cn } from '@/lib/utils'
import { RgbaColor, RgbaColorPicker } from 'react-colorful'
import { IconColorPicker } from '@tabler/icons-react'
@ -60,10 +60,11 @@ export function ColorPickerAppBgColor() {
<div className="flex items-center gap-1.5">
{predefineAppBgColor.map((item, i) => {
const isSelected =
item.r === appBgColor.r &&
(item.r === appBgColor.r &&
item.g === appBgColor.g &&
item.b === appBgColor.b &&
item.a === appBgColor.a
item.a === appBgColor.a) ||
(isDefaultColor(appBgColor) && isDefaultColor(item))
return (
<div
key={i}

View File

@ -1,4 +1,4 @@
import { useAppearance } from '@/hooks/useAppearance'
import { useAppearance, isDefaultColorDestructive } from '@/hooks/useAppearance'
import { cn } from '@/lib/utils'
import { RgbaColor, RgbaColorPicker } from 'react-colorful'
import { IconColorPicker } from '@tabler/icons-react'
@ -46,10 +46,11 @@ export function ColorPickerAppDestructiveColor() {
<div className="flex items-center gap-1.5">
{predefineAppDestructiveBgColor.map((item, i) => {
const isSelected =
item.r === appDestructiveBgColor.r &&
(item.r === appDestructiveBgColor.r &&
item.g === appDestructiveBgColor.g &&
item.b === appDestructiveBgColor.b &&
item.a === appDestructiveBgColor.a
item.a === appDestructiveBgColor.a) ||
(isDefaultColorDestructive(appDestructiveBgColor) && isDefaultColorDestructive(item))
return (
<div
key={i}

View File

@ -1,4 +1,4 @@
import { useAppearance } from '@/hooks/useAppearance'
import { useAppearance, isDefaultColorMainView } from '@/hooks/useAppearance'
import { cn } from '@/lib/utils'
import { RgbaColor, RgbaColorPicker } from 'react-colorful'
import { IconColorPicker } from '@tabler/icons-react'
@ -33,10 +33,11 @@ export function ColorPickerAppMainView() {
<div className="flex items-center gap-1.5">
{predefineAppMainViewBgColor.map((item, i) => {
const isSelected =
item.r === appMainViewBgColor.r &&
(item.r === appMainViewBgColor.r &&
item.g === appMainViewBgColor.g &&
item.b === appMainViewBgColor.b &&
item.a === appMainViewBgColor.a
item.a === appMainViewBgColor.a) ||
(isDefaultColorMainView(appMainViewBgColor) && isDefaultColorMainView(item))
return (
<div
key={i}

View File

@ -1,4 +1,4 @@
import { useAppearance } from '@/hooks/useAppearance'
import { useAppearance, isDefaultColorPrimary } from '@/hooks/useAppearance'
import { cn } from '@/lib/utils'
import { RgbaColor, RgbaColorPicker } from 'react-colorful'
import { IconColorPicker } from '@tabler/icons-react'
@ -42,10 +42,11 @@ export function ColorPickerAppPrimaryColor() {
<div className="flex items-center gap-1.5">
{predefineappPrimaryBgColor.map((item, i) => {
const isSelected =
item.r === appPrimaryBgColor.r &&
(item.r === appPrimaryBgColor.r &&
item.g === appPrimaryBgColor.g &&
item.b === appPrimaryBgColor.b &&
item.a === appPrimaryBgColor.a
item.a === appPrimaryBgColor.a) ||
(isDefaultColorPrimary(appPrimaryBgColor) && isDefaultColorPrimary(item))
return (
<div
key={i}

View File

@ -19,6 +19,7 @@ import { localStorageKey } from '@/constants/localStorage'
import { useTranslation } from '@/i18n/react-i18next-compat'
import { useFavoriteModel } from '@/hooks/useFavoriteModel'
import { predefinedProviders } from '@/consts/providers'
import { checkMmprojExistsAndUpdateOffloadMMprojSetting } from '@/services/models'
type DropdownModelProviderProps = {
model?: ThreadModel
@ -66,6 +67,7 @@ const DropdownModelProvider = ({
getModelBy,
selectedProvider,
selectedModel,
updateProvider,
} = useModelProvider()
const [displayModel, setDisplayModel] = useState<string>('')
const { updateCurrentThreadModel } = useThreads()
@ -79,31 +81,52 @@ const DropdownModelProvider = ({
const searchInputRef = useRef<HTMLInputElement>(null)
// Helper function to check if a model exists in providers
const checkModelExists = useCallback((providerName: string, modelId: string) => {
const provider = providers.find(
(p) => p.provider === providerName && p.active
)
return provider?.models.find((m) => m.id === modelId)
}, [providers])
const checkModelExists = useCallback(
(providerName: string, modelId: string) => {
const provider = providers.find(
(p) => p.provider === providerName && p.active
)
return provider?.models.find((m) => m.id === modelId)
},
[providers]
)
// Initialize model provider only once
useEffect(() => {
// Auto select model when existing thread is passed
if (model) {
selectModelProvider(model?.provider as string, model?.id as string)
if (!checkModelExists(model.provider, model.id)) {
selectModelProvider('', '')
}
} else if (useLastUsedModel) {
// Try to use last used model only when explicitly requested (for new chat)
const lastUsed = getLastUsedModel()
if (lastUsed && checkModelExists(lastUsed.provider, lastUsed.model)) {
selectModelProvider(lastUsed.provider, lastUsed.model)
} else {
// Fallback to default model if last used model no longer exists
selectModelProvider('', '')
const initializeModel = async () => {
// Auto select model when existing thread is passed
if (model) {
selectModelProvider(model?.provider as string, model?.id as string)
if (!checkModelExists(model.provider, model.id)) {
selectModelProvider('', '')
}
// Check mmproj existence for llamacpp models
if (model?.provider === 'llamacpp') {
await checkMmprojExistsAndUpdateOffloadMMprojSetting(
model.id as string,
updateProvider,
getProviderByName
)
}
} else if (useLastUsedModel) {
// Try to use last used model only when explicitly requested (for new chat)
const lastUsed = getLastUsedModel()
if (lastUsed && checkModelExists(lastUsed.provider, lastUsed.model)) {
selectModelProvider(lastUsed.provider, lastUsed.model)
if (lastUsed.provider === 'llamacpp') {
await checkMmprojExistsAndUpdateOffloadMMprojSetting(
lastUsed.model,
updateProvider,
getProviderByName
)
}
} else {
selectModelProvider('', '')
}
}
}
initializeModel()
}, [
model,
selectModelProvider,
@ -111,6 +134,8 @@ const DropdownModelProvider = ({
providers,
useLastUsedModel,
checkModelExists,
updateProvider,
getProviderByName,
])
// Update display model when selection changes
@ -245,7 +270,7 @@ const DropdownModelProvider = ({
}, [filteredItems, providers, searchValue, favoriteModels])
const handleSelect = useCallback(
(searchableModel: SearchableModel) => {
async (searchableModel: SearchableModel) => {
selectModelProvider(
searchableModel.provider.provider,
searchableModel.model.id
@ -254,6 +279,16 @@ const DropdownModelProvider = ({
id: searchableModel.model.id,
provider: searchableModel.provider.provider,
})
// Check mmproj existence for llamacpp models
if (searchableModel.provider.provider === 'llamacpp') {
await checkMmprojExistsAndUpdateOffloadMMprojSetting(
searchableModel.model.id,
updateProvider,
getProviderByName
)
}
// Store the selected model as last used
if (useLastUsedModel) {
setLastUsedModel(
@ -264,7 +299,13 @@ const DropdownModelProvider = ({
setSearchValue('')
setOpen(false)
},
[selectModelProvider, updateCurrentThreadModel, useLastUsedModel]
[
selectModelProvider,
updateCurrentThreadModel,
useLastUsedModel,
updateProvider,
getProviderByName,
]
)
const currentModel = selectedModel?.id

View File

@ -70,8 +70,8 @@ export function ModelSetting({
models: updatedModels,
})
// Call debounced stopModel only when updating ctx_len or ngl
if (key === 'ctx_len' || key === 'ngl' || key === 'chat_template') {
// Call debounced stopModel only when updating ctx_len, ngl, chat_template, or offload_mmproj
if (key === 'ctx_len' || key === 'ngl' || key === 'chat_template' || key === 'offload_mmproj') {
debouncedStopModel(model.id)
}
}

View File

@ -1,3 +1,4 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
import { ThreadMessage } from '@janhq/core'
import { RenderMarkdown } from './RenderMarkdown'
import React, { Fragment, memo, useCallback, useMemo, useState } from 'react'
@ -144,7 +145,7 @@ export const ThreadContent = memo(
isLastMessage?: boolean
index?: number
showAssistant?: boolean
// eslint-disable-next-line @typescript-eslint/no-explicit-any
streamTools?: any
contextOverflowModal?: React.ReactNode | null
updateMessage?: (item: ThreadMessage, message: string) => void
@ -172,9 +173,12 @@ export const ThreadContent = memo(
const { reasoningSegment, textSegment } = useMemo(() => {
// Check for thinking formats
const hasThinkTag = text.includes('<think>') && !text.includes('</think>')
const hasAnalysisChannel = text.includes('<|channel|>analysis<|message|>') && !text.includes('<|start|>assistant<|channel|>final<|message|>')
if (hasThinkTag || hasAnalysisChannel) return { reasoningSegment: text, textSegment: '' }
const hasAnalysisChannel =
text.includes('<|channel|>analysis<|message|>') &&
!text.includes('<|start|>assistant<|channel|>final<|message|>')
if (hasThinkTag || hasAnalysisChannel)
return { reasoningSegment: text, textSegment: '' }
// Check for completed think tag format
const thinkMatch = text.match(/<think>([\s\S]*?)<\/think>/)
@ -187,7 +191,9 @@ export const ThreadContent = memo(
}
// Check for completed analysis channel format
const analysisMatch = text.match(/<\|channel\|>analysis<\|message\|>([\s\S]*?)<\|start\|>assistant<\|channel\|>final<\|message\|>/)
const analysisMatch = text.match(
/<\|channel\|>analysis<\|message\|>([\s\S]*?)<\|start\|>assistant<\|channel\|>final<\|message\|>/
)
if (analysisMatch?.index !== undefined) {
const splitIndex = analysisMatch.index + analysisMatch[0].length
return {
@ -213,7 +219,36 @@ export const ThreadContent = memo(
}
if (toSendMessage) {
deleteMessage(toSendMessage.thread_id, toSendMessage.id ?? '')
sendMessage(toSendMessage.content?.[0]?.text?.value || '')
// Extract text content and any attachments
const textContent =
toSendMessage.content?.find((c) => c.type === 'text')?.text?.value ||
''
const attachments = toSendMessage.content
?.filter((c) => (c.type === 'image_url' && c.image_url?.url) || false)
.map((c) => {
if (c.type === 'image_url' && c.image_url?.url) {
const url = c.image_url.url
const [mimeType, base64] = url
.replace('data:', '')
.split(';base64,')
return {
name: 'image', // We don't have the original filename
type: mimeType,
size: 0, // We don't have the original size
base64: base64,
dataUrl: url,
}
}
return null
})
.filter(Boolean) as Array<{
name: string
type: string
size: number
base64: string
dataUrl: string
}>
sendMessage(textContent, true, attachments)
}
}, [deleteMessage, getMessages, item, sendMessage])
@ -255,22 +290,68 @@ export const ThreadContent = memo(
return (
<Fragment>
{item.content?.[0]?.text && item.role === 'user' && (
{item.role === 'user' && (
<div className="w-full">
<div className="flex justify-end w-full h-full text-start break-words whitespace-normal">
<div className="bg-main-view-fg/4 relative text-main-view-fg p-2 rounded-md inline-block max-w-[80%] ">
<div className="select-text">
<RenderMarkdown
content={item.content?.[0].text.value}
components={linkComponents}
isUser
/>
{/* Render attachments above the message bubble */}
{item.content?.some(
(c) => (c.type === 'image_url' && c.image_url?.url) || false
) && (
<div className="flex justify-end w-full mb-2">
<div className="flex flex-wrap gap-2 max-w-[80%] justify-end">
{item.content
?.filter(
(c) =>
(c.type === 'image_url' && c.image_url?.url) || false
)
.map((contentPart, index) => {
// Handle images
if (
contentPart.type === 'image_url' &&
contentPart.image_url?.url
) {
return (
<div key={index} className="relative">
<img
src={contentPart.image_url.url}
alt="Uploaded attachment"
className="size-40 rounded-md object-cover border border-main-view-fg/10"
/>
</div>
)
}
return null
})}
</div>
</div>
</div>
)}
{/* Render text content in the message bubble */}
{item.content?.some((c) => c.type === 'text' && c.text?.value) && (
<div className="flex justify-end w-full h-full text-start break-words whitespace-normal">
<div className="bg-main-view-fg/4 relative text-main-view-fg p-2 rounded-md inline-block max-w-[80%] ">
<div className="select-text">
{item.content
?.filter((c) => c.type === 'text' && c.text?.value)
.map((contentPart, index) => (
<div key={index}>
<RenderMarkdown
content={contentPart.text!.value}
components={linkComponents}
isUser
/>
</div>
))}
</div>
</div>
</div>
)}
<div className="flex items-center justify-end gap-2 text-main-view-fg/60 text-xs mt-2">
<EditDialog
message={item.content?.[0]?.text.value}
message={
item.content?.find((c) => c.type === 'text')?.text?.value ||
''
}
setMessage={(message) => {
if (item.updateMessage) {
item.updateMessage(item, message)

View File

@ -73,6 +73,11 @@ vi.mock('@/services/mcp', () => ({
vi.mock('@/services/models', () => ({
stopAllModels: vi.fn(),
checkMmprojExists: vi.fn(() => Promise.resolve(true)),
}))
vi.mock('../MovingBorder', () => ({
MovingBorder: ({ children }: { children: React.ReactNode }) => <div data-testid="moving-border">{children}</div>,
}))
describe('ChatInput', () => {
@ -231,7 +236,7 @@ describe('ChatInput', () => {
const sendButton = document.querySelector('[data-test-id="send-message-button"]')
await user.click(sendButton)
expect(mockSendMessage).toHaveBeenCalledWith('Hello world')
expect(mockSendMessage).toHaveBeenCalledWith('Hello world', true, undefined)
})
it('sends message when Enter key is pressed', async () => {
@ -248,7 +253,7 @@ describe('ChatInput', () => {
const textarea = screen.getByRole('textbox')
await user.type(textarea, '{Enter}')
expect(mockSendMessage).toHaveBeenCalledWith('Hello world')
expect(mockSendMessage).toHaveBeenCalledWith('Hello world', true, undefined)
})
it('does not send message when Shift+Enter is pressed', async () => {
@ -343,9 +348,12 @@ describe('ChatInput', () => {
const user = userEvent.setup()
renderWithRouter()
// File upload is rendered as hidden input element
const fileInput = document.querySelector('input[type="file"]')
expect(fileInput).toBeInTheDocument()
// Wait for async effects to complete (mmproj check)
await waitFor(() => {
// File upload is rendered as hidden input element
const fileInput = document.querySelector('input[type="file"]')
expect(fileInput).toBeInTheDocument()
})
})
it('disables input when streaming', () => {
@ -361,7 +369,7 @@ describe('ChatInput', () => {
renderWithRouter()
})
const textarea = screen.getByRole('textbox')
const textarea = screen.getByTestId('chat-input')
expect(textarea).toBeDisabled()
})
@ -378,4 +386,28 @@ describe('ChatInput', () => {
expect(toolsIcon).toBeInTheDocument()
})
})
it('uses selectedProvider for provider checks', () => {
// Test that the component correctly uses selectedProvider instead of selectedModel.provider
vi.mocked(useModelProvider).mockReturnValue({
selectedModel: {
id: 'test-model',
capabilities: ['vision'],
},
providers: [],
getModelBy: vi.fn(),
selectModelProvider: vi.fn(),
selectedProvider: 'llamacpp',
setProviders: vi.fn(),
getProviderByName: vi.fn(),
updateProvider: vi.fn(),
addProvider: vi.fn(),
deleteProvider: vi.fn(),
deleteModel: vi.fn(),
deletedModels: [],
})
// This test ensures the component renders without errors when using selectedProvider
expect(() => renderWithRouter()).not.toThrow()
})
})

View File

@ -62,6 +62,7 @@ export default function AddEditAssistant({
const [showEmojiPicker, setShowEmojiPicker] = useState(false)
const emojiPickerRef = useRef<HTMLDivElement>(null)
const [nameError, setNameError] = useState<string | null>(null)
const [toolSteps, setToolSteps] = useState(20)
// Handle click outside emoji picker
useEffect(() => {
@ -90,6 +91,7 @@ export default function AddEditAssistant({
setName(initialData.name)
setDescription(initialData.description)
setInstructions(initialData.instructions)
setToolSteps(initialData.tool_steps ?? 20)
// Convert parameters object to arrays of keys and values
const keys = Object.keys(initialData.parameters || {})
const values = Object.values(initialData.parameters || {})
@ -120,6 +122,7 @@ export default function AddEditAssistant({
setParamsValues([''])
setParamsTypes(['string'])
setNameError(null)
setToolSteps(20)
}
const handleParameterChange = (
@ -216,6 +219,7 @@ export default function AddEditAssistant({
description,
instructions,
parameters: parameters || {},
tool_steps: toolSteps,
}
onSave(assistant)
onOpenChange(false)
@ -326,6 +330,29 @@ export default function AddEditAssistant({
</div>
</div>
<div className="space-y-2 my-4 mt-6">
<div className="flex items-center justify-between">
<label className="text-sm">{t('common:settings')}</label>
</div>
<div className="flex justify-between items-center gap-2">
<div className="w-full">
<p className="text-sm">{t('assistants:maxToolSteps')}</p>
</div>
<Input
value={toolSteps}
type="number"
min={0}
onChange={(e) => {
const newSteps = e.target.value
const stepNumber = Number(newSteps)
setToolSteps(isNaN(stepNumber) ? 20 : stepNumber)
}}
placeholder="20"
className="w-18 text-right"
/>
</div>
</div>
<div className="space-y-2 my-4">
<div className="flex items-center justify-between">
<label className="text-sm">

View File

@ -0,0 +1,123 @@
import {
Dialog,
DialogContent,
DialogDescription,
DialogFooter,
DialogHeader,
DialogTitle,
} from '@/components/ui/dialog'
import { Button } from '@/components/ui/button'
import { AlertTriangle, ChevronDown, ChevronRight } from 'lucide-react'
import { IconCopy, IconCopyCheck } from '@tabler/icons-react'
import { useTranslation } from '@/i18n/react-i18next-compat'
import { toast } from 'sonner'
import { useState } from 'react'
import { useAppState } from '@/hooks/useAppState'
export default function ErrorDialog() {
const { t } = useTranslation()
const { errorMessage, setErrorMessage } = useAppState()
const [isCopying, setIsCopying] = useState(false)
const [isDetailExpanded, setIsDetailExpanded] = useState(true)
const handleCopy = async () => {
setIsCopying(true)
try {
await navigator.clipboard.writeText(errorMessage?.message ?? '')
toast.success('Copy successful', {
id: 'copy-model',
description: 'Model load error information copied to clipboard',
})
} catch {
toast.error('Failed to copy', {
id: 'copy-model-error',
description: 'Failed to copy error information to clipboard',
})
} finally {
setTimeout(() => setIsCopying(false), 2000)
}
}
const handleDialogOpen = (open: boolean) => {
setErrorMessage(open ? errorMessage : undefined)
}
return (
<Dialog open={!!errorMessage} onOpenChange={handleDialogOpen}>
<DialogContent showCloseButton={false}>
<DialogHeader>
<div className="flex items-start gap-3">
<div className="shrink-0">
<AlertTriangle className="size-4 text-destructive" />
</div>
<div>
<DialogTitle>{t('common:error')}</DialogTitle>
<DialogDescription className="mt-1 text-main-view-fg/70">
{errorMessage?.title ?? 'Something went wrong'}
</DialogDescription>
</div>
</div>
</DialogHeader>
<div className="bg-main-view-fg/2 p-2 border border-main-view-fg/5 rounded-lg space-y-2">
<div>
<button
onClick={() => setIsDetailExpanded(!isDetailExpanded)}
className="flex items-center gap-1 text-sm text-main-view-fg/60 hover:text-main-view-fg/80 transition-colors cursor-pointer"
>
{isDetailExpanded ? (
<ChevronDown className="size-3" />
) : (
<ChevronRight className="size-3" />
)}
Details
</button>
{isDetailExpanded && (
<div
className="mt-2 text-sm text-main-view-fg/70 leading-relaxed max-h-[150px] overflow-y-auto break-all bg-main-view-fg/10 p-2 rounded border border-main-view-fg/5"
ref={(el) => {
if (el) {
el.scrollTop = el.scrollHeight
}
}}
>
{errorMessage?.message}
</div>
)}
</div>
<span className="text-sm text-main-view-fg/60">{errorMessage?.subtitle}</span>
</div>
<DialogFooter className="flex flex-col gap-2 sm:flex-row sm:justify-right">
<Button
variant="link"
onClick={() => handleDialogOpen(false)}
className="flex-1 text-right sm:flex-none"
>
{t('common:cancel')}
</Button>
<Button
variant="link"
onClick={() => handleCopy()}
disabled={isCopying}
autoFocus
className="flex-1 text-right sm:flex-none border border-main-view-fg/20 !px-2"
>
{isCopying ? (
<>
<IconCopyCheck className="text-accent" />
{t('common:copied')}
</>
) : (
<>
<IconCopy />
{t('common:copy')}
</>
)}
</Button>
</DialogFooter>
</DialogContent>
</Dialog>
)
}

View File

@ -4,6 +4,12 @@ import { MCPTool } from '@/types/completion'
import { useAssistant } from './useAssistant'
import { ChatCompletionMessageToolCall } from 'openai/resources'
type AppErrorMessage = {
message?: string
title?: string
subtitle: string
}
type AppState = {
streamingContent?: ThreadMessage
loadingModel?: boolean
@ -13,6 +19,8 @@ type AppState = {
tokenSpeed?: TokenSpeed
currentToolCall?: ChatCompletionMessageToolCall
showOutOfContextDialog?: boolean
errorMessage?: AppErrorMessage
cancelToolCall?: () => void
setServerStatus: (value: 'running' | 'stopped' | 'pending') => void
updateStreamingContent: (content: ThreadMessage | undefined) => void
updateCurrentToolCall: (
@ -24,6 +32,8 @@ type AppState = {
updateTokenSpeed: (message: ThreadMessage, increment?: number) => void
resetTokenSpeed: () => void
setOutOfContextDialog: (show: boolean) => void
setCancelToolCall: (cancel: (() => void) | undefined) => void
setErrorMessage: (error: AppErrorMessage | undefined) => void
}
export const useAppState = create<AppState>()((set) => ({
@ -34,6 +44,7 @@ export const useAppState = create<AppState>()((set) => ({
abortControllers: {},
tokenSpeed: undefined,
currentToolCall: undefined,
cancelToolCall: undefined,
updateStreamingContent: (content: ThreadMessage | undefined) => {
const assistants = useAssistant.getState().assistants
const currentAssistant = useAssistant.getState().currentAssistant
@ -112,4 +123,14 @@ export const useAppState = create<AppState>()((set) => ({
showOutOfContextDialog: show,
}))
},
setCancelToolCall: (cancel) => {
set(() => ({
cancelToolCall: cancel,
}))
},
setErrorMessage: (error) => {
set(() => ({
errorMessage: error,
}))
},
}))

View File

@ -204,7 +204,17 @@ export const useChat = () => {
)
const sendMessage = useCallback(
async (message: string, troubleshooting = true) => {
async (
message: string,
troubleshooting = true,
attachments?: Array<{
name: string
type: string
size: number
base64: string
dataUrl: string
}>
) => {
const activeThread = await getCurrentThread()
resetTokenSpeed()
@ -218,7 +228,7 @@ export const useChat = () => {
updateStreamingContent(emptyThreadContent)
// Do not add new message on retry
if (troubleshooting)
addMessage(newUserThreadContent(activeThread.id, message))
addMessage(newUserThreadContent(activeThread.id, message, attachments))
updateThreadTimestamp(activeThread.id)
setPrompt('')
try {
@ -232,7 +242,7 @@ export const useChat = () => {
messages,
renderInstructions(currentAssistant?.instructions)
)
if (troubleshooting) builder.addUserMessage(message)
if (troubleshooting) builder.addUserMessage(message, attachments)
let isCompleted = false
@ -245,8 +255,8 @@ export const useChat = () => {
})
: []
// TODO: Later replaced by Agent setup?
const followUpWithToolUse = true
let assistantLoopSteps = 0
while (
!isCompleted &&
!abortController.signal.aborted &&
@ -255,6 +265,7 @@ export const useChat = () => {
const modelConfig = activeProvider.models.find(
(m) => m.id === selectedModel?.id
)
assistantLoopSteps += 1
const modelSettings = modelConfig?.settings
? Object.fromEntries(
@ -499,7 +510,11 @@ export const useChat = () => {
isCompleted = !toolCalls.length
// Do not create agent loop if there is no need for it
if (!followUpWithToolUse) availableTools = []
// Check if assistant loop steps are within limits
if (assistantLoopSteps >= (currentAssistant?.tool_steps ?? 20)) {
// Stop the assistant tool call if it exceeds the maximum steps
availableTools = []
}
}
} catch (error) {
if (!abortController.signal.aborted) {

View File

@ -1,3 +1,4 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
import {
ContentType,
ChatCompletionRole,
@ -31,8 +32,9 @@ import { ulid } from 'ulidx'
import { MCPTool } from '@/types/completion'
import { CompletionMessagesBuilder } from './messages'
import { ChatCompletionMessageToolCall } from 'openai/resources'
import { callTool } from '@/services/mcp'
import { callToolWithCancellation } from '@/services/mcp'
import { ExtensionManager } from './extension'
import { useAppState } from '@/hooks/useAppState'
export type ChatCompletionResponse =
| chatCompletion
@ -50,11 +52,16 @@ export type ChatCompletionResponse =
*/
export const newUserThreadContent = (
threadId: string,
content: string
): ThreadMessage => ({
type: 'text',
role: ChatCompletionRole.User,
content: [
content: string,
attachments?: Array<{
name: string
type: string
size: number
base64: string
dataUrl: string
}>
): ThreadMessage => {
const contentParts = [
{
type: ContentType.Text,
text: {
@ -62,14 +69,35 @@ export const newUserThreadContent = (
annotations: [],
},
},
],
id: ulid(),
object: 'thread.message',
thread_id: threadId,
status: MessageStatus.Ready,
created_at: 0,
completed_at: 0,
})
]
// Add attachments to content array
if (attachments) {
attachments.forEach((attachment) => {
if (attachment.type.startsWith('image/')) {
contentParts.push({
type: ContentType.Image,
image_url: {
url: `data:${attachment.type};base64,${attachment.base64}`,
detail: 'auto',
},
} as any)
}
})
}
return {
type: 'text',
role: ChatCompletionRole.User,
content: contentParts,
id: ulid(),
object: 'thread.message',
thread_id: threadId,
status: MessageStatus.Ready,
created_at: 0,
completed_at: 0,
}
}
/**
* @fileoverview Helper functions for creating thread content.
* These functions are used to create thread content objects
@ -161,13 +189,11 @@ export const sendCompletion = async (
if (
thread.model.id &&
!Object.values(models[providerName]).flat().includes(thread.model.id) &&
// eslint-disable-next-line @typescript-eslint/no-explicit-any
!tokenJS.extendedModelExist(providerName as any, thread.model.id) &&
provider.provider !== 'llamacpp'
) {
try {
tokenJS.extendModelList(
// eslint-disable-next-line @typescript-eslint/no-explicit-any
providerName as any,
thread.model.id,
// This is to inherit the model capabilities from another built-in model
@ -200,7 +226,7 @@ export const sendCompletion = async (
? await tokenJS.chat.completions.create(
{
stream: true,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
provider: providerName as any,
model: thread.model?.id,
messages,
@ -381,13 +407,17 @@ export const postMessageProcessing = async (
)
: true)
const { promise, cancel } = callToolWithCancellation({
toolName: toolCall.function.name,
arguments: toolCall.function.arguments.length
? JSON.parse(toolCall.function.arguments)
: {},
})
useAppState.getState().setCancelToolCall(cancel)
let result = approved
? await callTool({
toolName: toolCall.function.name,
arguments: toolCall.function.arguments.length
? JSON.parse(toolCall.function.arguments)
: {},
}).catch((e) => {
? await promise.catch((e) => {
console.error('Tool call failed:', e)
return {
content: [

View File

@ -1,3 +1,4 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
import { ChatCompletionMessageParam } from 'token.js'
import { ChatCompletionMessageToolCall } from 'openai/resources'
import { ThreadMessage } from '@janhq/core'
@ -19,32 +20,106 @@ export class CompletionMessagesBuilder {
this.messages.push(
...messages
.filter((e) => !e.metadata?.error)
.map<ChatCompletionMessageParam>(
(msg) =>
({
.map<ChatCompletionMessageParam>((msg) => {
if (msg.role === 'assistant') {
return {
role: msg.role,
content:
msg.role === 'assistant'
? this.normalizeContent(msg.content[0]?.text?.value || '.')
: msg.content[0]?.text?.value || '.',
}) as ChatCompletionMessageParam
)
content: this.normalizeContent(
msg.content[0]?.text?.value || '.'
),
} as ChatCompletionMessageParam
} else {
// For user messages, handle multimodal content
if (msg.content.length > 1) {
// Multiple content parts (text + images + files)
const content = msg.content.map((contentPart) => {
if (contentPart.type === 'text') {
return {
type: 'text',
text: contentPart.text?.value || '',
}
} else if (contentPart.type === 'image_url') {
return {
type: 'image_url',
image_url: {
url: contentPart.image_url?.url || '',
detail: contentPart.image_url?.detail || 'auto',
},
}
} else {
return contentPart
}
})
return {
role: msg.role,
content,
} as ChatCompletionMessageParam
} else {
// Single text content
return {
role: msg.role,
content: msg.content[0]?.text?.value || '.',
} as ChatCompletionMessageParam
}
}
})
)
}
/**
* Add a user message to the messages array.
* @param content - The content of the user message.
* @param attachments - Optional attachments for the message.
*/
addUserMessage(content: string) {
addUserMessage(
content: string,
attachments?: Array<{
name: string
type: string
size: number
base64: string
dataUrl: string
}>
) {
// Ensure no consecutive user messages
if (this.messages[this.messages.length - 1]?.role === 'user') {
this.messages.pop()
}
this.messages.push({
role: 'user',
content: content,
})
// Handle multimodal content with attachments
if (attachments && attachments.length > 0) {
const messageContent: any[] = [
{
type: 'text',
text: content,
},
]
// Add attachments (images and PDFs)
attachments.forEach((attachment) => {
if (attachment.type.startsWith('image/')) {
messageContent.push({
type: 'image_url',
image_url: {
url: `data:${attachment.type};base64,${attachment.base64}`,
detail: 'auto',
},
})
}
})
this.messages.push({
role: 'user',
content: messageContent,
} as any)
} else {
// Text-only message
this.messages.push({
role: 'user',
content: content,
})
}
}
/**

View File

@ -5,6 +5,7 @@ export const AppRoutes = [
'installExtensions',
'getTools',
'callTool',
'cancelToolCall',
'listThreads',
'createThread',
'modifyThread',

View File

@ -30,5 +30,6 @@
"createNew": "Neuen Assistenten anlegen",
"personality": "Persönlichkeit",
"capabilities": "Fähigkeiten",
"instructionsDateHint": "Tipp: Verwenden Sie {{current_date}}, um das heutige Datum einzufügen."
"instructionsDateHint": "Tipp: Verwenden Sie {{current_date}}, um das heutige Datum einzufügen.",
"maxToolSteps": "Maximale Werkzeugschritte"
}

View File

@ -30,5 +30,6 @@
"createNew": "Create New Assistant",
"personality": "Personality",
"capabilities": "Capabilities",
"instructionsDateHint": "Tip: Use {{current_date}} to insert todays date."
"instructionsDateHint": "Tip: Use {{current_date}} to insert todays date.",
"maxToolSteps": "Max tool steps"
}

View File

@ -30,5 +30,6 @@
"createNew": "Buat Asisten Baru",
"personality": "Kepribadian",
"capabilities": "Kemampuan",
"instructionsDateHint": "Tips: Gunakan {{current_date}} untuk menyisipkan tanggal hari ini."
"instructionsDateHint": "Tips: Gunakan {{current_date}} untuk menyisipkan tanggal hari ini.",
"maxToolSteps": "Langkah alat maksimum"
}

View File

@ -30,5 +30,6 @@
"createNew": "Tạo Trợ lý Mới",
"personality": "Tính cách",
"capabilities": "Khả năng",
"instructionsDateHint": "Mẹo: Dùng {{current_date}} để chèn ngày hôm nay."
"instructionsDateHint": "Mẹo: Dùng {{current_date}} để chèn ngày hôm nay.",
"maxToolSteps": "Bước tối đa của công cụ"
}

View File

@ -30,5 +30,6 @@
"createNew": "创建新助手",
"personality": "个性",
"capabilities": "能力",
"instructionsDateHint": "提示:使用 {{current_date}} 插入今天的日期。"
"instructionsDateHint": "提示:使用 {{current_date}} 插入今天的日期。",
"maxToolSteps": "最大工具步骤"
}

View File

@ -30,5 +30,6 @@
"createNew": "建立新助理",
"personality": "個性",
"capabilities": "能力",
"instructionsDateHint": "提示:使用 {{current_date}} 插入今天的日期。"
"instructionsDateHint": "提示:使用 {{current_date}} 插入今天的日期。",
"maxToolSteps": "最大工具步驟"
}

View File

@ -26,9 +26,10 @@ import {
ResizablePanel,
ResizableHandle,
} from '@/components/ui/resizable'
import { useCallback } from 'react'
import { useCallback, useEffect } from 'react'
import GlobalError from '@/containers/GlobalError'
import { GlobalEventHandler } from '@/providers/GlobalEventHandler'
import ErrorDialog from '@/containers/dialogs/ErrorDialog'
export const Route = createRootRoute({
component: RootLayout,
@ -65,6 +66,41 @@ const AppLayout = () => {
[setLeftPanelSize, setLeftPanel]
)
// Prevent default drag and drop behavior globally
useEffect(() => {
const preventDefaults = (e: DragEvent) => {
e.preventDefault()
e.stopPropagation()
}
const handleGlobalDrop = (e: DragEvent) => {
e.preventDefault()
e.stopPropagation()
// Only prevent if the target is not within a chat input or other valid drop zone
const target = e.target as Element
const isValidDropZone = target?.closest('[data-drop-zone="true"]') ||
target?.closest('.chat-input-drop-zone') ||
target?.closest('[data-tauri-drag-region]')
if (!isValidDropZone) {
// Prevent the file from opening in the window
return false
}
}
// Add event listeners to prevent default drag/drop behavior
window.addEventListener('dragenter', preventDefaults)
window.addEventListener('dragover', preventDefaults)
window.addEventListener('drop', handleGlobalDrop)
return () => {
window.removeEventListener('dragenter', preventDefaults)
window.removeEventListener('dragover', preventDefaults)
window.removeEventListener('drop', handleGlobalDrop)
}
}, [])
return (
<Fragment>
<AnalyticProvider />
@ -168,6 +204,7 @@ function RootLayout() {
{/* <TanStackRouterDevtools position="bottom-right" /> */}
<ToolApproval />
<LoadModelErrorDialog />
<ErrorDialog />
<OutOfContextPromiseModal />
</TranslationProvider>
</Fragment>

View File

@ -20,10 +20,17 @@ import { extractModelName, extractDescription } from '@/lib/models'
import {
IconDownload,
IconFileCode,
IconEye,
IconSearch,
IconTool,
} from '@tabler/icons-react'
import { Switch } from '@/components/ui/switch'
import {
Tooltip,
TooltipContent,
TooltipProvider,
TooltipTrigger,
} from '@/components/ui/tooltip'
import Joyride, { CallBackProps, STATUS } from 'react-joyride'
import { CustomTooltipJoyRide } from '@/containers/CustomeTooltipJoyRide'
import {
@ -146,13 +153,16 @@ function Hub() {
}
// Apply downloaded filter
if (showOnlyDownloaded) {
filtered = filtered?.filter((model) =>
model.quants.some((variant) =>
llamaProvider?.models.some(
(m: { id: string }) => m.id === variant.model_id
)
)
)
filtered = filtered
?.map((model) => ({
...model,
quants: model.quants.filter((variant) =>
llamaProvider?.models.some(
(m: { id: string }) => m.id === variant.model_id
)
),
}))
.filter((model) => model.quants.length > 0)
}
// Add HuggingFace repo at the beginning if available
if (huggingFaceRepo) {
@ -427,43 +437,44 @@ function Hub() {
const isLastStep = currentStepIndex === steps.length - 1
const renderFilter = () => {
return (
<>
<DropdownMenu>
<DropdownMenuTrigger>
<span className="flex cursor-pointer items-center gap-1 px-2 py-1 rounded-sm bg-main-view-fg/15 text-sm outline-none text-main-view-fg font-medium">
{
sortOptions.find((option) => option.value === sortSelected)
?.name
}
if (searchValue.length === 0)
return (
<>
<DropdownMenu>
<DropdownMenuTrigger>
<span className="flex cursor-pointer items-center gap-1 px-2 py-1 rounded-sm bg-main-view-fg/15 text-sm outline-none text-main-view-fg font-medium">
{
sortOptions.find((option) => option.value === sortSelected)
?.name
}
</span>
</DropdownMenuTrigger>
<DropdownMenuContent side="bottom" align="end">
{sortOptions.map((option) => (
<DropdownMenuItem
className={cn(
'cursor-pointer my-0.5',
sortSelected === option.value && 'bg-main-view-fg/5'
)}
key={option.value}
onClick={() => setSortSelected(option.value)}
>
{option.name}
</DropdownMenuItem>
))}
</DropdownMenuContent>
</DropdownMenu>
<div className="flex items-center gap-2">
<Switch
checked={showOnlyDownloaded}
onCheckedChange={setShowOnlyDownloaded}
/>
<span className="text-xs text-main-view-fg/70 font-medium whitespace-nowrap">
{t('hub:downloaded')}
</span>
</DropdownMenuTrigger>
<DropdownMenuContent side="bottom" align="end">
{sortOptions.map((option) => (
<DropdownMenuItem
className={cn(
'cursor-pointer my-0.5',
sortSelected === option.value && 'bg-main-view-fg/5'
)}
key={option.value}
onClick={() => setSortSelected(option.value)}
>
{option.name}
</DropdownMenuItem>
))}
</DropdownMenuContent>
</DropdownMenu>
<div className="flex items-center gap-2">
<Switch
checked={showOnlyDownloaded}
onCheckedChange={setShowOnlyDownloaded}
/>
<span className="text-xs text-main-view-fg/70 font-medium whitespace-nowrap">
{t('hub:downloaded')}
</span>
</div>
</>
)
</div>
</>
)
}
return (
@ -657,11 +668,41 @@ function Hub() {
</div>
{filteredModels[virtualItem.index].tools && (
<div className="flex items-center gap-1">
<IconTool
size={17}
className="text-main-view-fg/50"
title={t('hub:tools')}
/>
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>
<div>
<IconTool
size={17}
className="text-main-view-fg/50"
/>
</div>
</TooltipTrigger>
<TooltipContent>
<p>{t('tools')}</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
</div>
)}
{filteredModels[virtualItem.index].num_mmproj >
0 && (
<div className="flex items-center gap-1">
<TooltipProvider>
<Tooltip>
<TooltipTrigger asChild>
<div>
<IconEye
size={17}
className="text-main-view-fg/50"
/>
</div>
</TooltipTrigger>
<TooltipContent>
<p>{t('vision')}</p>
</TooltipContent>
</Tooltip>
</TooltipProvider>
</div>
)}
{filteredModels[virtualItem.index].quants.length >

View File

@ -11,7 +11,7 @@ export const Route = createFileRoute(route.localApiServerlogs as any)({
component: LogsViewer,
})
const SERVER_LOG_TARGET = 'app_lib::core::server'
const SERVER_LOG_TARGET = 'app_lib::core::server::proxy'
const LOG_EVENT_NAME = 'log://log'
function LogsViewer() {

View File

@ -21,6 +21,7 @@ import { useToolApproval } from '@/hooks/useToolApproval'
import { toast } from 'sonner'
import { invoke } from '@tauri-apps/api/core'
import { useTranslation } from '@/i18n/react-i18next-compat'
import { useAppState } from '@/hooks/useAppState'
// Function to mask sensitive values
const maskSensitiveValue = (value: string) => {
@ -120,6 +121,7 @@ function MCPServers() {
const [loadingServers, setLoadingServers] = useState<{
[key: string]: boolean
}>({})
const { setErrorMessage } = useAppState()
const handleOpenDialog = (serverKey?: string) => {
if (serverKey) {
@ -247,13 +249,13 @@ function MCPServers() {
getConnectedServers().then(setConnectedServers)
})
.catch((error) => {
console.log(error, 'error.mcp')
editServer(serverKey, {
...(config ?? (mcpServers[serverKey] as MCPServerConfig)),
active: false,
})
toast.error(error, {
description: t('mcp-servers:checkParams'),
setErrorMessage({
message: error,
subtitle: t('mcp-servers:checkParams'),
})
})
.finally(() => {

View File

@ -79,6 +79,7 @@ function ProviderDetail() {
const [activeModels, setActiveModels] = useState<string[]>([])
const [loadingModels, setLoadingModels] = useState<string[]>([])
const [refreshingModels, setRefreshingModels] = useState(false)
const [importingModel, setImportingModel] = useState(false)
const { providerName } = useParams({ from: Route.id })
const { getProviderByName, setProviders, updateProvider } = useModelProvider()
const provider = getProviderByName(providerName)
@ -95,6 +96,60 @@ function ProviderDetail() {
!setting.controller_props.value)
)
const handleImportModel = async () => {
if (!provider) {
return
}
setImportingModel(true)
const selectedFile = await open({
multiple: false,
directory: false,
})
// If the dialog returns a file path, extract just the file name
const fileName =
typeof selectedFile === 'string'
? selectedFile.split(/[\\/]/).pop()?.replace(/\s/g, '-')
: undefined
if (selectedFile && fileName) {
// Check if model already exists
const modelExists = provider.models.some(
(model) => model.name === fileName
)
if (modelExists) {
toast.error('Model already exists', {
description: `${fileName} already imported`,
})
setImportingModel(false)
return
}
try {
await pullModel(fileName, selectedFile)
// Refresh the provider to update the models list
await getProviders().then(setProviders)
toast.success(t('providers:import'), {
id: `import-model-${provider.provider}`,
description: t('providers:importModelSuccess', {
provider: fileName,
}),
})
} catch (error) {
console.error(t('providers:importModelError'), error)
toast.error(t('providers:importModelError'), {
description:
error instanceof Error ? error.message : 'Unknown error occurred',
})
} finally {
setImportingModel(false)
}
} else {
setImportingModel(false)
}
}
useEffect(() => {
// Initial data fetch
getActiveModels().then((models) => setActiveModels(models || []))
@ -482,52 +537,25 @@ function ProviderDetail() {
variant="link"
size="sm"
className="hover:no-underline"
onClick={async () => {
const selectedFile = await open({
multiple: false,
directory: false,
filters: [
{
name: 'GGUF',
extensions: ['gguf'],
},
],
})
// If the dialog returns a file path, extract just the file name
const fileName =
typeof selectedFile === 'string'
? selectedFile.split(/[\\/]/).pop()
: undefined
if (selectedFile && fileName) {
try {
await pullModel(fileName, selectedFile)
} catch (error) {
console.error(
t('providers:importModelError'),
error
)
} finally {
// Refresh the provider to update the models list
getProviders().then(setProviders)
toast.success(t('providers:import'), {
id: `import-model-${provider.provider}`,
description: t(
'providers:importModelSuccess',
{ provider: provider.provider }
),
})
}
}
}}
disabled={importingModel}
onClick={handleImportModel}
>
<div className="cursor-pointer flex items-center justify-center rounded hover:bg-main-view-fg/15 bg-main-view-fg/10 transition-all duration-200 ease-in-out p-1.5 py-1 gap-1 -mr-2">
<IconFolderPlus
size={18}
className="text-main-view-fg/50"
/>
{importingModel ? (
<IconLoader
size={18}
className="text-main-view-fg/50 animate-spin"
/>
) : (
<IconFolderPlus
size={18}
className="text-main-view-fg/50"
/>
)}
<span className="text-main-view-fg/70">
{t('providers:import')}
{importingModel
? 'Importing...'
: t('providers:import')}
</span>
</div>
</Button>

View File

@ -37,6 +37,8 @@ function ThreadDetail() {
const [isAtBottom, setIsAtBottom] = useState(true)
const [hasScrollbar, setHasScrollbar] = useState(false)
const lastScrollTopRef = useRef(0)
const userIntendedPositionRef = useRef<number | null>(null)
const wasStreamingRef = useRef(false)
const { currentThreadId, setCurrentThreadId } = useThreads()
const { setCurrentAssistant, assistants } = useAssistant()
const { setMessages, deleteMessage } = useMessages()
@ -112,6 +114,8 @@ function ThreadDetail() {
scrollToBottom()
setIsAtBottom(true)
setIsUserScrolling(false)
userIntendedPositionRef.current = null
wasStreamingRef.current = false
checkScrollState()
return
}
@ -123,11 +127,39 @@ function ThreadDetail() {
scrollToBottom()
setIsAtBottom(true)
setIsUserScrolling(false)
userIntendedPositionRef.current = null
wasStreamingRef.current = false
checkScrollState()
}, [threadId])
// Single useEffect for all auto-scrolling logic
useEffect(() => {
// Track streaming state changes
const isCurrentlyStreaming = !!streamingContent
const justFinishedStreaming = wasStreamingRef.current && !isCurrentlyStreaming
wasStreamingRef.current = isCurrentlyStreaming
// If streaming just finished and user had an intended position, restore it
if (justFinishedStreaming && userIntendedPositionRef.current !== null) {
// Small delay to ensure DOM has updated
setTimeout(() => {
if (scrollContainerRef.current && userIntendedPositionRef.current !== null) {
scrollContainerRef.current.scrollTo({
top: userIntendedPositionRef.current,
behavior: 'smooth'
})
userIntendedPositionRef.current = null
setIsUserScrolling(false)
}
}, 100)
return
}
// Clear intended position when streaming starts fresh
if (isCurrentlyStreaming && !wasStreamingRef.current) {
userIntendedPositionRef.current = null
}
// Only auto-scroll when the user is not actively scrolling
// AND either at the bottom OR there's streaming content
if (!isUserScrolling && (streamingContent || isAtBottom) && messagesCount) {
@ -163,6 +195,11 @@ function ThreadDetail() {
// Detect if this is a user-initiated scroll
if (Math.abs(scrollTop - lastScrollTopRef.current) > 10) {
setIsUserScrolling(!isBottom)
// If user scrolls during streaming and moves away from bottom, record their intended position
if (streamingContent && !isBottom) {
userIntendedPositionRef.current = scrollTop
}
}
setIsAtBottom(isBottom)
setHasScrollbar(hasScroll)
@ -180,6 +217,11 @@ function ThreadDetail() {
// Detect if this is a user-initiated scroll
if (Math.abs(scrollTop - lastScrollTopRef.current) > 10) {
setIsUserScrolling(!isBottom)
// If user scrolls during streaming and moves away from bottom, record their intended position
if (streamingContent && !isBottom) {
userIntendedPositionRef.current = scrollTop
}
}
setIsAtBottom(isBottom)
setHasScrollbar(hasScroll)

View File

@ -290,7 +290,7 @@ describe('models service', () => {
likes: 100,
tags: ['conversational', 'pytorch'],
pipeline_tag: 'text-generation',
created_at: '2023-01-01T00:00:00Z',
createdAt: '2023-01-01T00:00:00Z',
last_modified: '2023-12-01T00:00:00Z',
private: false,
disabled: false,
@ -443,7 +443,7 @@ describe('models service', () => {
likes: 100,
tags: ['conversational'],
pipeline_tag: 'text-generation',
created_at: '2023-01-01T00:00:00Z',
createdAt: '2023-01-01T00:00:00Z',
last_modified: '2023-12-01T00:00:00Z',
private: false,
disabled: false,
@ -471,7 +471,7 @@ describe('models service', () => {
likes: 100,
tags: ['conversational'],
pipeline_tag: 'text-generation',
created_at: '2023-01-01T00:00:00Z',
createdAt: '2023-01-01T00:00:00Z',
last_modified: '2023-12-01T00:00:00Z',
private: false,
disabled: false,
@ -510,7 +510,7 @@ describe('models service', () => {
likes: 100,
tags: ['conversational'],
pipeline_tag: 'text-generation',
created_at: '2023-01-01T00:00:00Z',
createdAt: '2023-01-01T00:00:00Z',
last_modified: '2023-12-01T00:00:00Z',
private: false,
disabled: false,
@ -559,7 +559,7 @@ describe('models service', () => {
likes: 75,
tags: ['pytorch', 'transformers', 'text-generation'],
pipeline_tag: 'text-generation',
created_at: '2021-01-01T00:00:00Z',
createdAt: '2021-01-01T00:00:00Z',
last_modified: '2021-12-01T00:00:00Z',
private: false,
disabled: false,
@ -605,6 +605,8 @@ describe('models service', () => {
file_size: '4.0 GB',
},
],
num_mmproj: 0,
mmproj_models: [],
created_at: '2021-01-01T00:00:00Z',
readme:
'https://huggingface.co/microsoft/DialoGPT-medium/resolve/main/README.md',
@ -820,7 +822,7 @@ describe('models service', () => {
downloads: 0,
likes: 0,
tags: [],
created_at: '2021-01-01T00:00:00Z',
createdAt: '2021-01-01T00:00:00Z',
last_modified: '2021-12-01T00:00:00Z',
private: false,
disabled: false,

View File

@ -56,3 +56,44 @@ export const callTool = (args: {
}): Promise<{ error: string; content: { text: string }[] }> => {
return window.core?.api?.callTool(args)
}
/**
* @description Enhanced function to invoke an MCP tool with cancellation support
* @param args - Tool call arguments
* @param cancellationToken - Optional cancellation token
* @returns Promise with tool result and cancellation function
*/
export const callToolWithCancellation = (args: {
toolName: string
arguments: object
cancellationToken?: string
}): {
promise: Promise<{ error: string; content: { text: string }[] }>
cancel: () => Promise<void>
token: string
} => {
// Generate a unique cancellation token if not provided
const token = args.cancellationToken ?? `tool_call_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`
// Create the tool call promise with cancellation token
const promise = window.core?.api?.callTool({
...args,
cancellationToken: token
})
// Create cancel function
const cancel = async () => {
await window.core?.api?.cancelToolCall({ cancellationToken: token })
}
return { promise, cancel, token }
}
/**
* @description This function cancels a running tool call
* @param cancellationToken - The token identifying the tool call to cancel
* @returns
*/
export const cancelToolCall = (cancellationToken: string): Promise<void> => {
return window.core?.api?.cancelToolCall({ cancellationToken })
}

View File

@ -1,3 +1,4 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
import { sanitizeModelId } from '@/lib/utils'
import {
AIEngine,
@ -27,6 +28,7 @@ export interface CatalogModel {
num_quants: number
quants: ModelQuant[]
mmproj_models?: MMProjModel[]
num_mmproj: number
created_at?: string
readme?: string
tools?: boolean
@ -44,7 +46,7 @@ export interface HuggingFaceRepo {
library_name?: string
tags: string[]
pipeline_tag?: string
created_at: string
createdAt: string
last_modified: string
private: boolean
disabled: boolean
@ -155,21 +157,30 @@ export const fetchHuggingFaceRepo = async (
export const convertHfRepoToCatalogModel = (
repo: HuggingFaceRepo
): CatalogModel => {
// Format file size helper
const formatFileSize = (size?: number) => {
if (!size) return 'Unknown size'
if (size < 1024 ** 3) return `${(size / 1024 ** 2).toFixed(1)} MB`
return `${(size / 1024 ** 3).toFixed(1)} GB`
}
// Extract GGUF files from the repository siblings
const ggufFiles =
repo.siblings?.filter((file) =>
file.rfilename.toLowerCase().endsWith('.gguf')
) || []
// Convert GGUF files to quants format
const quants = ggufFiles.map((file) => {
// Format file size
const formatFileSize = (size?: number) => {
if (!size) return 'Unknown size'
if (size < 1024 ** 3) return `${(size / 1024 ** 2).toFixed(1)} MB`
return `${(size / 1024 ** 3).toFixed(1)} GB`
}
// Separate regular GGUF files from mmproj files
const regularGgufFiles = ggufFiles.filter(
(file) => !file.rfilename.toLowerCase().includes('mmproj')
)
const mmprojFiles = ggufFiles.filter((file) =>
file.rfilename.toLowerCase().includes('mmproj')
)
// Convert regular GGUF files to quants format
const quants = regularGgufFiles.map((file) => {
// Generate model_id from filename (remove .gguf extension, case-insensitive)
const modelId = file.rfilename.replace(/\.gguf$/i, '')
@ -180,15 +191,28 @@ export const convertHfRepoToCatalogModel = (
}
})
// Convert mmproj files to mmproj_models format
const mmprojModels = mmprojFiles.map((file) => {
const modelId = file.rfilename.replace(/\.gguf$/i, '')
return {
model_id: sanitizeModelId(modelId),
path: `https://huggingface.co/${repo.modelId}/resolve/main/${file.rfilename}`,
file_size: formatFileSize(file.size),
}
})
return {
model_name: repo.modelId,
description: `**Tags**: ${repo.tags?.join(', ')}`,
developer: repo.author,
downloads: repo.downloads || 0,
created_at: repo.createdAt,
num_quants: quants.length,
quants: quants,
created_at: repo.created_at,
num_mmproj: mmprojModels.length,
mmproj_models: mmprojModels,
readme: `https://huggingface.co/${repo.modelId}/resolve/main/README.md`,
description: `**Tags**: ${repo.tags?.join(', ')}`,
}
}
@ -318,8 +342,8 @@ export const startModel = async (
/**
* Check if model support tool use capability
* Returned by backend engine
* @param modelId
* @returns
* @param modelId
* @returns
*/
export const isToolSupported = async (modelId: string): Promise<boolean> => {
const engine = getEngine()
@ -327,3 +351,137 @@ export const isToolSupported = async (modelId: string): Promise<boolean> => {
return engine.isToolSupported(modelId)
}
/**
* Checks if mmproj.gguf file exists for a given model ID in the llamacpp provider.
* Also checks if the model has offload_mmproj setting.
* If mmproj.gguf exists, adds offload_mmproj setting with value true.
* @param modelId - The model ID to check for mmproj.gguf
* @param updateProvider - Function to update the provider state
* @param getProviderByName - Function to get provider by name
* @returns Promise<{exists: boolean, settingsUpdated: boolean}> - exists: true if mmproj.gguf exists, settingsUpdated: true if settings were modified
*/
export const checkMmprojExistsAndUpdateOffloadMMprojSetting = async (
modelId: string,
updateProvider?: (providerName: string, data: Partial<ModelProvider>) => void,
getProviderByName?: (providerName: string) => ModelProvider | undefined
): Promise<{ exists: boolean; settingsUpdated: boolean }> => {
let settingsUpdated = false
try {
const engine = getEngine('llamacpp') as AIEngine & {
checkMmprojExists?: (id: string) => Promise<boolean>
}
if (engine && typeof engine.checkMmprojExists === 'function') {
const exists = await engine.checkMmprojExists(modelId)
// If we have the store functions, use them; otherwise fall back to localStorage
if (updateProvider && getProviderByName) {
const provider = getProviderByName('llamacpp')
if (provider) {
const model = provider.models.find((m) => m.id === modelId)
if (model?.settings) {
const hasOffloadMmproj = 'offload_mmproj' in model.settings
// If mmproj exists, add offload_mmproj setting (only if it doesn't exist)
if (exists && !hasOffloadMmproj) {
// Create updated models array with the new setting
const updatedModels = provider.models.map((m) => {
if (m.id === modelId) {
return {
...m,
settings: {
...m.settings,
offload_mmproj: {
key: 'offload_mmproj',
title: 'Offload MMProj',
description:
'Offload multimodal projection layers to GPU',
controller_type: 'checkbox',
controller_props: {
value: true,
},
},
},
}
}
return m
})
// Update the provider with the new models array
updateProvider('llamacpp', { models: updatedModels })
settingsUpdated = true
}
}
}
} else {
// Fall back to localStorage approach for backwards compatibility
try {
const modelProviderData = JSON.parse(
localStorage.getItem('model-provider') || '{}'
)
const llamacppProvider = modelProviderData.state?.providers?.find(
(p: any) => p.provider === 'llamacpp'
)
const model = llamacppProvider?.models?.find(
(m: any) => m.id === modelId
)
if (model?.settings) {
// If mmproj exists, add offload_mmproj setting (only if it doesn't exist)
if (exists) {
if (!model.settings.offload_mmproj) {
model.settings.offload_mmproj = {
key: 'offload_mmproj',
title: 'Offload MMProj',
description: 'Offload multimodal projection layers to GPU',
controller_type: 'checkbox',
controller_props: {
value: true,
},
}
// Save updated settings back to localStorage
localStorage.setItem(
'model-provider',
JSON.stringify(modelProviderData)
)
settingsUpdated = true
}
}
}
} catch (localStorageError) {
console.error(
`Error checking localStorage for model ${modelId}:`,
localStorageError
)
}
}
return { exists, settingsUpdated }
}
} catch (error) {
console.error(`Error checking mmproj for model ${modelId}:`, error)
}
return { exists: false, settingsUpdated }
}
/**
* Checks if mmproj.gguf file exists for a given model ID in the llamacpp provider.
* If mmproj.gguf exists, adds offload_mmproj setting with value true.
* @param modelId - The model ID to check for mmproj.gguf
* @returns Promise<{exists: boolean, settingsUpdated: boolean}> - exists: true if mmproj.gguf exists, settingsUpdated: true if settings were modified
*/
export const checkMmprojExists = async (modelId: string): Promise<boolean> => {
try {
const engine = getEngine('llamacpp') as AIEngine & {
checkMmprojExists?: (id: string) => Promise<boolean>
}
if (engine && typeof engine.checkMmprojExists === 'function') {
return await engine.checkMmprojExists(modelId)
}
} catch (error) {
console.error(`Error checking mmproj for model ${modelId}:`, error)
}
return false
}

View File

@ -54,6 +54,7 @@ type Assistant = {
description?: string
instructions: string
parameters: Record<string, unknown>
tool_steps?: number
}
type TokenSpeed = {

View File

@ -0,0 +1,381 @@
import { describe, it, expect, beforeEach } from 'vitest'
import {
ReasoningProcessor,
extractReasoningFromMessage,
} from '../reasoning'
import { CompletionResponseChunk } from 'token.js'
import { chatCompletionChunk, chatCompletionRequestMessage } from '@janhq/core'
describe('extractReasoningFromMessage', () => {
it('should extract reasoning from message with reasoning_content property', () => {
const message = {
role: 'assistant' as const,
content: 'Hello',
reasoning_content: 'This is my reasoning content',
}
const result = extractReasoningFromMessage(message)
expect(result).toBe('This is my reasoning content')
})
it('should extract reasoning from message with legacy reasoning property', () => {
const message = {
role: 'assistant' as const,
content: 'Hello',
reasoning: 'This is my reasoning',
}
const result = extractReasoningFromMessage(message)
expect(result).toBe('This is my reasoning')
})
it('should prefer reasoning_content over reasoning property', () => {
const message = {
role: 'assistant' as const,
content: 'Hello',
reasoning_content: 'New reasoning content',
reasoning: 'Old reasoning',
}
const result = extractReasoningFromMessage(message)
expect(result).toBe('New reasoning content')
})
it('should return null for message without reasoning', () => {
const message = {
role: 'assistant' as const,
content: 'Hello',
}
const result = extractReasoningFromMessage(message)
expect(result).toBeNull()
})
it('should return null for null/undefined message', () => {
expect(extractReasoningFromMessage(null as any)).toBeNull()
expect(extractReasoningFromMessage(undefined as any)).toBeNull()
})
})
describe('ReasoningProcessor', () => {
let processor: ReasoningProcessor
beforeEach(() => {
processor = new ReasoningProcessor()
})
describe('processReasoningChunk', () => {
it('should start reasoning with opening think tag using reasoning_content', () => {
const chunk: chatCompletionChunk = {
choices: [{
delta: {
reasoning_content: 'Let me think about this...',
},
}],
}
const result = processor.processReasoningChunk(chunk)
expect(result).toBe('<think>Let me think about this...')
expect(processor.isReasoningInProgress()).toBe(true)
})
it('should start reasoning with opening think tag using legacy reasoning', () => {
const chunk: chatCompletionChunk = {
choices: [{
delta: {
reasoning: 'Let me think about this...',
},
}],
}
const result = processor.processReasoningChunk(chunk)
expect(result).toBe('<think>Let me think about this...')
expect(processor.isReasoningInProgress()).toBe(true)
})
it('should continue reasoning without opening tag', () => {
// Start reasoning
const chunk1: chatCompletionChunk = {
choices: [{
delta: {
reasoning_content: 'First part',
},
}],
}
processor.processReasoningChunk(chunk1)
// Continue reasoning
const chunk2: chatCompletionChunk = {
choices: [{
delta: {
reasoning_content: ' second part',
},
}],
}
const result = processor.processReasoningChunk(chunk2)
expect(result).toBe(' second part')
expect(processor.isReasoningInProgress()).toBe(true)
})
it('should end reasoning when content starts', () => {
// Start reasoning
const chunk1: chatCompletionChunk = {
choices: [{
delta: {
reasoning_content: 'Thinking...',
},
}],
}
processor.processReasoningChunk(chunk1)
// End reasoning with content
const chunk2: chatCompletionChunk = {
choices: [{
delta: {
content: 'Now I respond',
},
}],
}
const result = processor.processReasoningChunk(chunk2)
expect(result).toBe('</think>')
expect(processor.isReasoningInProgress()).toBe(false)
})
it('should handle empty reasoning chunks', () => {
const chunk: chatCompletionChunk = {
choices: [{
delta: {
reasoning_content: '',
},
}],
}
const result = processor.processReasoningChunk(chunk)
expect(result).toBe('')
expect(processor.isReasoningInProgress()).toBe(false)
})
it('should handle whitespace-only reasoning', () => {
const chunk: chatCompletionChunk = {
choices: [{
delta: {
reasoning_content: ' \n ',
},
}],
}
const result = processor.processReasoningChunk(chunk)
expect(result).toBe('')
expect(processor.isReasoningInProgress()).toBe(false)
})
it('should handle non-string reasoning', () => {
const chunk: chatCompletionChunk = {
choices: [{
delta: {
reasoning_content: null as any,
},
}],
}
const result = processor.processReasoningChunk(chunk)
expect(result).toBe('')
expect(processor.isReasoningInProgress()).toBe(false)
})
it('should handle chunk without choices', () => {
const chunk: chatCompletionChunk = {
choices: undefined as any,
}
const result = processor.processReasoningChunk(chunk)
expect(result).toBe('')
expect(processor.isReasoningInProgress()).toBe(false)
})
it('should handle chunk without delta', () => {
const chunk: chatCompletionChunk = {
choices: [{
delta: undefined as any,
}],
}
const result = processor.processReasoningChunk(chunk)
expect(result).toBe('')
expect(processor.isReasoningInProgress()).toBe(false)
})
it('should handle content without active reasoning', () => {
const chunk: chatCompletionChunk = {
choices: [{
delta: {
content: 'Regular content',
},
}],
}
const result = processor.processReasoningChunk(chunk)
expect(result).toBe('')
expect(processor.isReasoningInProgress()).toBe(false)
})
})
describe('finalize', () => {
it('should close reasoning if still active', () => {
// Start reasoning
const chunk: chatCompletionChunk = {
choices: [{
delta: {
reasoning_content: 'Unfinished thinking',
},
}],
}
processor.processReasoningChunk(chunk)
const result = processor.finalize()
expect(result).toBe('</think>')
expect(processor.isReasoningInProgress()).toBe(false)
})
it('should return empty string if no active reasoning', () => {
const result = processor.finalize()
expect(result).toBe('')
expect(processor.isReasoningInProgress()).toBe(false)
})
it('should handle multiple finalize calls', () => {
// Start reasoning
const chunk: chatCompletionChunk = {
choices: [{
delta: {
reasoning_content: 'Thinking',
},
}],
}
processor.processReasoningChunk(chunk)
// First finalize
const result1 = processor.finalize()
expect(result1).toBe('</think>')
// Second finalize should return empty
const result2 = processor.finalize()
expect(result2).toBe('')
})
})
describe('isReasoningInProgress', () => {
it('should track reasoning state correctly', () => {
expect(processor.isReasoningInProgress()).toBe(false)
// Start reasoning
const chunk1: chatCompletionChunk = {
choices: [{
delta: {
reasoning_content: 'Start thinking',
},
}],
}
processor.processReasoningChunk(chunk1)
expect(processor.isReasoningInProgress()).toBe(true)
// End with content
const chunk2: chatCompletionChunk = {
choices: [{
delta: {
content: 'Response',
},
}],
}
processor.processReasoningChunk(chunk2)
expect(processor.isReasoningInProgress()).toBe(false)
})
})
describe('integration scenarios', () => {
it('should handle complete reasoning flow', () => {
const chunks: chatCompletionChunk[] = [
{
choices: [{
delta: { reasoning_content: 'Let me think' },
}],
},
{
choices: [{
delta: { reasoning_content: ' about this problem' },
}],
},
{
choices: [{
delta: { reasoning_content: ' step by step.' },
}],
},
{
choices: [{
delta: { content: 'Based on my analysis,' },
}],
},
{
choices: [{
delta: { content: ' the answer is 42.' },
}],
},
]
const results = chunks.map(chunk => processor.processReasoningChunk(chunk))
expect(results[0]).toBe('<think>Let me think')
expect(results[1]).toBe(' about this problem')
expect(results[2]).toBe(' step by step.')
expect(results[3]).toBe('</think>')
expect(results[4]).toBe('')
expect(processor.isReasoningInProgress()).toBe(false)
})
it('should handle reasoning without content', () => {
const chunk: chatCompletionChunk = {
choices: [{
delta: { reasoning_content: 'Only reasoning, no content' },
}],
}
const result1 = processor.processReasoningChunk(chunk)
expect(result1).toBe('<think>Only reasoning, no content')
const result2 = processor.finalize()
expect(result2).toBe('</think>')
})
it('should handle mixed reasoning and content chunks', () => {
// Reasoning then content then reasoning again (edge case)
const chunk1: chatCompletionChunk = {
choices: [{
delta: { reasoning_content: 'First thought' },
}],
}
const chunk2: chatCompletionChunk = {
choices: [{
delta: { content: 'Some content' },
}],
}
const chunk3: chatCompletionChunk = {
choices: [{
delta: { reasoning_content: 'Second thought' },
}],
}
const result1 = processor.processReasoningChunk(chunk1)
const result2 = processor.processReasoningChunk(chunk2)
const result3 = processor.processReasoningChunk(chunk3)
expect(result1).toBe('<think>First thought')
expect(result2).toBe('</think>')
expect(result3).toBe('<think>Second thought')
})
})
})

View File

@ -5,6 +5,11 @@ import {
chatCompletionRequestMessage,
} from '@janhq/core'
// Helper function to get reasoning content from an object
function getReasoning(obj: { reasoning_content?: string | null; reasoning?: string | null } | null | undefined): string | null {
return obj?.reasoning_content ?? obj?.reasoning ?? null
}
// Extract reasoning from a message (for completed responses)
export function extractReasoningFromMessage(
message: chatCompletionRequestMessage | ChatCompletionMessage
@ -12,7 +17,7 @@ export function extractReasoningFromMessage(
if (!message) return null
const extendedMessage = message as chatCompletionRequestMessage
return extendedMessage.reasoning || null
return getReasoning(extendedMessage)
}
// Extract reasoning from a chunk (for streaming responses)
@ -22,7 +27,7 @@ function extractReasoningFromChunk(
if (!chunk.choices?.[0]?.delta) return null
const delta = chunk.choices[0].delta as chatCompletionRequestMessage
const reasoning = delta.reasoning
const reasoning = getReasoning(delta)
// Return null for falsy values, non-strings, or whitespace-only strings
if (!reasoning || typeof reasoning !== 'string' || !reasoning.trim())