Merge remote-tracking branch 'origin/dev' into feat/retain-interruption-message

This commit is contained in:
Vanalite 2025-10-03 09:47:08 +07:00
commit 4ac45aba23
22 changed files with 328 additions and 302 deletions

Binary file not shown.

After

Width:  |  Height:  |  Size: 262 KiB

View File

@ -0,0 +1,28 @@
---
title: "Jan v0.7.0: Jan Projects"
version: v0.7.0
description: "Jan v0.7.0 introduces Projects, model renaming, llama.cpp auto-tuning, model stats, and Azure support."
date: 2025-10-02
ogImage: "/assets/images/changelog/jan-release-v0.7.0.jpeg"
---
import ChangelogHeader from "@/components/Changelog/ChangelogHeader"
import { Callout } from 'nextra/components'
<ChangelogHeader title="Jan v0.7.0" date="2025-10-01" ogImage="/assets/images/changelog/jan-release-v0.7.0.jpeg" />
## Jan v0.7.0: Jan Projects
Jan v0.7.0 is live! This release focuses on helping you organize your workspace and better understand how models run.
### Whats new
- **Projects**: Group related chats under one project for a cleaner workflow.
- **Rename models**: Give your models custom names for easier identification.
- **Model context stats**: See context usage when a model runs.
- **Auto-loaded cloud models**: Cloud model names now appear automatically.
---
Update your Jan or [download the latest version](https://jan.ai/).
For the complete list of changes, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.7.0).

View File

@ -19,10 +19,7 @@ pub fn get_app_configurations<R: Runtime>(app_handle: tauri::AppHandle<R>) -> Ap
let default_data_folder = default_data_folder_path(app_handle.clone());
if !configuration_file.exists() {
log::info!(
"App config not found, creating default config at {:?}",
configuration_file
);
log::info!("App config not found, creating default config at {configuration_file:?}");
app_default_configuration.data_folder = default_data_folder;
@ -30,7 +27,7 @@ pub fn get_app_configurations<R: Runtime>(app_handle: tauri::AppHandle<R>) -> Ap
&configuration_file,
serde_json::to_string(&app_default_configuration).unwrap(),
) {
log::error!("Failed to create default config: {}", err);
log::error!("Failed to create default config: {err}");
}
return app_default_configuration;
@ -40,18 +37,12 @@ pub fn get_app_configurations<R: Runtime>(app_handle: tauri::AppHandle<R>) -> Ap
Ok(content) => match serde_json::from_str::<AppConfiguration>(&content) {
Ok(app_configurations) => app_configurations,
Err(err) => {
log::error!(
"Failed to parse app config, returning default config instead. Error: {}",
err
);
log::error!("Failed to parse app config, returning default config instead. Error: {err}");
app_default_configuration
}
},
Err(err) => {
log::error!(
"Failed to read app config, returning default config instead. Error: {}",
err
);
log::error!("Failed to read app config, returning default config instead. Error: {err}");
app_default_configuration
}
}
@ -63,10 +54,7 @@ pub fn update_app_configuration<R: Runtime>(
configuration: AppConfiguration,
) -> Result<(), String> {
let configuration_file = get_configuration_file_path(app_handle);
log::info!(
"update_app_configuration, configuration_file: {:?}",
configuration_file
);
log::info!("update_app_configuration, configuration_file: {configuration_file:?}");
fs::write(
configuration_file,
@ -95,8 +83,7 @@ pub fn get_jan_data_folder_path<R: Runtime>(app_handle: tauri::AppHandle<R>) ->
pub fn get_configuration_file_path<R: Runtime>(app_handle: tauri::AppHandle<R>) -> PathBuf {
let app_path = app_handle.path().app_data_dir().unwrap_or_else(|err| {
log::error!(
"Failed to get app data directory: {}. Using home directory instead.",
err
"Failed to get app data directory: {err}. Using home directory instead."
);
let home_dir = std::env::var(if cfg!(target_os = "windows") {
@ -130,9 +117,9 @@ pub fn get_configuration_file_path<R: Runtime>(app_handle: tauri::AppHandle<R>)
.join(package_name);
if old_data_dir.exists() {
return old_data_dir.join(CONFIGURATION_FILE_NAME);
old_data_dir.join(CONFIGURATION_FILE_NAME)
} else {
return app_path.join(CONFIGURATION_FILE_NAME);
app_path.join(CONFIGURATION_FILE_NAME)
}
}
@ -156,7 +143,7 @@ pub fn default_data_folder_path<R: Runtime>(app_handle: tauri::AppHandle<R>) ->
#[tauri::command]
pub fn get_user_home_path<R: Runtime>(app: AppHandle<R>) -> String {
return get_app_configurations(app.clone()).data_folder;
get_app_configurations(app.clone()).data_folder
}
#[tauri::command]
@ -171,16 +158,12 @@ pub fn change_app_data_folder<R: Runtime>(
// Create the new data folder if it doesn't exist
if !new_data_folder_path.exists() {
fs::create_dir_all(&new_data_folder_path)
.map_err(|e| format!("Failed to create new data folder: {}", e))?;
.map_err(|e| format!("Failed to create new data folder: {e}"))?;
}
// Copy all files from the old folder to the new one
if current_data_folder.exists() {
log::info!(
"Copying data from {:?} to {:?}",
current_data_folder,
new_data_folder_path
);
log::info!("Copying data from {current_data_folder:?} to {new_data_folder_path:?}");
// Check if this is a parent directory to avoid infinite recursion
if new_data_folder_path.starts_with(&current_data_folder) {
@ -193,7 +176,7 @@ pub fn change_app_data_folder<R: Runtime>(
&new_data_folder_path,
&[".uvx", ".npx"],
)
.map_err(|e| format!("Failed to copy data to new folder: {}", e))?;
.map_err(|e| format!("Failed to copy data to new folder: {e}"))?;
} else {
log::info!("Current data folder does not exist, nothing to copy");
}

View File

@ -19,7 +19,7 @@ pub async fn download_files<R: Runtime>(
{
let mut download_manager = state.download_manager.lock().await;
if download_manager.cancel_tokens.contains_key(task_id) {
return Err(format!("task_id {} exists", task_id));
return Err(format!("task_id {task_id} exists"));
}
download_manager
.cancel_tokens
@ -60,9 +60,9 @@ pub async fn cancel_download_task(state: State<'_, AppState>, task_id: &str) ->
let mut download_manager = state.download_manager.lock().await;
if let Some(token) = download_manager.cancel_tokens.remove(task_id) {
token.cancel();
log::info!("Cancelled download task: {}", task_id);
log::info!("Cancelled download task: {task_id}");
Ok(())
} else {
Err(format!("No download task: {}", task_id))
Err(format!("No download task: {task_id}"))
}
}

View File

@ -15,7 +15,7 @@ use url::Url;
// ===== UTILITY FUNCTIONS =====
pub fn err_to_string<E: std::fmt::Display>(e: E) -> String {
format!("Error: {}", e)
format!("Error: {e}")
}
@ -55,7 +55,7 @@ async fn validate_downloaded_file(
)
.unwrap();
log::info!("Starting validation for model: {}", model_id);
log::info!("Starting validation for model: {model_id}");
// Validate size if provided (fast check first)
if let Some(expected_size) = &item.size {
@ -73,8 +73,7 @@ async fn validate_downloaded_file(
actual_size
);
return Err(format!(
"Size verification failed. Expected {} bytes but got {} bytes.",
expected_size, actual_size
"Size verification failed. Expected {expected_size} bytes but got {actual_size} bytes."
));
}
@ -90,7 +89,7 @@ async fn validate_downloaded_file(
save_path.display(),
e
);
return Err(format!("Failed to verify file size: {}", e));
return Err(format!("Failed to verify file size: {e}"));
}
}
}
@ -115,9 +114,7 @@ async fn validate_downloaded_file(
computed_sha256
);
return Err(format!(
"Hash verification failed. The downloaded file is corrupted or has been tampered with."
));
return Err("Hash verification failed. The downloaded file is corrupted or has been tampered with.".to_string());
}
log::info!("Hash verification successful for {}", item.url);
@ -128,7 +125,7 @@ async fn validate_downloaded_file(
save_path.display(),
e
);
return Err(format!("Failed to verify file integrity: {}", e));
return Err(format!("Failed to verify file integrity: {e}"));
}
}
}
@ -140,14 +137,14 @@ async fn validate_downloaded_file(
pub fn validate_proxy_config(config: &ProxyConfig) -> Result<(), String> {
// Validate proxy URL format
if let Err(e) = Url::parse(&config.url) {
return Err(format!("Invalid proxy URL '{}': {}", config.url, e));
return Err(format!("Invalid proxy URL '{}': {e}", config.url));
}
// Check if proxy URL has valid scheme
let url = Url::parse(&config.url).unwrap(); // Safe to unwrap as we just validated it
match url.scheme() {
"http" | "https" | "socks4" | "socks5" => {}
scheme => return Err(format!("Unsupported proxy scheme: {}", scheme)),
scheme => return Err(format!("Unsupported proxy scheme: {scheme}")),
}
// Validate authentication credentials
@ -167,7 +164,7 @@ pub fn validate_proxy_config(config: &ProxyConfig) -> Result<(), String> {
}
// Basic validation for wildcard patterns
if entry.starts_with("*.") && entry.len() < 3 {
return Err(format!("Invalid wildcard pattern: {}", entry));
return Err(format!("Invalid wildcard pattern: {entry}"));
}
}
}
@ -214,8 +211,7 @@ pub fn should_bypass_proxy(url: &str, no_proxy: &[String]) -> bool {
}
// Simple wildcard matching
if entry.starts_with("*.") {
let domain = &entry[2..];
if let Some(domain) = entry.strip_prefix("*.") {
if host.ends_with(domain) {
return true;
}
@ -305,7 +301,7 @@ pub async fn _download_files_internal(
resume: bool,
cancel_token: CancellationToken,
) -> Result<(), String> {
log::info!("Start download task: {}", task_id);
log::info!("Start download task: {task_id}");
let header_map = _convert_headers(headers).map_err(err_to_string)?;
@ -320,9 +316,9 @@ pub async fn _download_files_internal(
}
let total_size: u64 = file_sizes.values().sum();
log::info!("Total download size: {}", total_size);
log::info!("Total download size: {total_size}");
let evt_name = format!("download-{}", task_id);
let evt_name = format!("download-{task_id}");
// Create progress tracker
let progress_tracker = ProgressTracker::new(items, file_sizes.clone());
@ -352,7 +348,7 @@ pub async fn _download_files_internal(
let cancel_token_clone = cancel_token.clone();
let evt_name_clone = evt_name.clone();
let progress_tracker_clone = progress_tracker.clone();
let file_id = format!("{}-{}", task_id, index);
let file_id = format!("{task_id}-{index}");
let file_size = file_sizes.get(&item.url).copied().unwrap_or(0);
let task = tokio::spawn(async move {
@ -377,7 +373,7 @@ pub async fn _download_files_internal(
// Wait for all downloads to complete
let mut validation_tasks = Vec::new();
for (task, item) in download_tasks.into_iter().zip(items.iter()) {
let result = task.await.map_err(|e| format!("Task join error: {}", e))?;
let result = task.await.map_err(|e| format!("Task join error: {e}"))?;
match result {
Ok(downloaded_path) => {
@ -399,7 +395,7 @@ pub async fn _download_files_internal(
for (validation_task, save_path, _item) in validation_tasks {
let validation_result = validation_task
.await
.map_err(|e| format!("Validation task join error: {}", e))?;
.map_err(|e| format!("Validation task join error: {e}"))?;
if let Err(validation_error) = validation_result {
// Clean up the file if validation fails
@ -448,7 +444,7 @@ async fn download_single_file(
if current_extension.is_empty() {
ext.to_string()
} else {
format!("{}.{}", current_extension, ext)
format!("{current_extension}.{ext}")
}
};
let tmp_save_path = save_path.with_extension(append_extension("tmp"));
@ -469,8 +465,8 @@ async fn download_single_file(
let decoded_url = url::Url::parse(&item.url)
.map(|u| u.to_string())
.unwrap_or_else(|_| item.url.clone());
log::info!("Started downloading: {}", decoded_url);
let client = _get_client_for_item(item, &header_map).map_err(err_to_string)?;
log::info!("Started downloading: {decoded_url}");
let client = _get_client_for_item(item, header_map).map_err(err_to_string)?;
let mut download_delta = 0u64;
let mut initial_progress = 0u64;
@ -503,7 +499,7 @@ async fn download_single_file(
}
Err(e) => {
// fallback to normal download
log::warn!("Failed to resume download: {}", e);
log::warn!("Failed to resume download: {e}");
should_resume = false;
_get_maybe_resume(&client, &item.url, 0).await?
}
@ -592,7 +588,7 @@ async fn download_single_file(
let decoded_url = url::Url::parse(&item.url)
.map(|u| u.to_string())
.unwrap_or_else(|_| item.url.clone());
log::info!("Finished downloading: {}", decoded_url);
log::info!("Finished downloading: {decoded_url}");
Ok(save_path.to_path_buf())
}
@ -606,7 +602,7 @@ pub async fn _get_maybe_resume(
if start_bytes > 0 {
let resp = client
.get(url)
.header("Range", format!("bytes={}-", start_bytes))
.header("Range", format!("bytes={start_bytes}-"))
.send()
.await
.map_err(err_to_string)?;

View File

@ -13,7 +13,7 @@ pub fn get_jan_extensions_path<R: Runtime>(app_handle: tauri::AppHandle<R>) -> P
#[tauri::command]
pub fn install_extensions<R: Runtime>(app: AppHandle<R>) {
if let Err(err) = setup::install_extensions(app, true) {
log::error!("Failed to install extensions: {}", err);
log::error!("Failed to install extensions: {err}");
}
}
@ -21,7 +21,7 @@ pub fn install_extensions<R: Runtime>(app: AppHandle<R>) {
pub fn get_active_extensions<R: Runtime>(app: AppHandle<R>) -> Vec<serde_json::Value> {
let mut path = get_jan_extensions_path(app);
path.push("extensions.json");
log::info!("get jan extensions, path: {:?}", path);
log::info!("get jan extensions, path: {path:?}");
let contents = fs::read_to_string(path);
let contents: Vec<serde_json::Value> = match contents {
@ -40,14 +40,14 @@ pub fn get_active_extensions<R: Runtime>(app: AppHandle<R>) -> Vec<serde_json::V
})
.collect(),
Err(error) => {
log::error!("Failed to parse extensions.json: {}", error);
log::error!("Failed to parse extensions.json: {error}");
vec![]
}
},
Err(error) => {
log::error!("Failed to read extensions.json: {}", error);
log::error!("Failed to read extensions.json: {error}");
vec![]
}
};
return contents;
contents
}

View File

@ -9,7 +9,7 @@ fn test_rm() {
let app = mock_app();
let path = "test_rm_dir";
fs::create_dir_all(get_jan_data_folder_path(app.handle().clone()).join(path)).unwrap();
let args = vec![format!("file://{}", path).to_string()];
let args = vec![format!("file://{path}").to_string()];
let result = rm(app.handle().clone(), args);
assert!(result.is_ok());
assert!(!get_jan_data_folder_path(app.handle().clone())
@ -21,7 +21,7 @@ fn test_rm() {
fn test_mkdir() {
let app = mock_app();
let path = "test_mkdir_dir";
let args = vec![format!("file://{}", path).to_string()];
let args = vec![format!("file://{path}").to_string()];
let result = mkdir(app.handle().clone(), args);
assert!(result.is_ok());
assert!(get_jan_data_folder_path(app.handle().clone())
@ -39,7 +39,7 @@ fn test_join_path() {
assert_eq!(
result,
get_jan_data_folder_path(app.handle().clone())
.join(&format!("test_dir{}test_file", std::path::MAIN_SEPARATOR))
.join(format!("test_dir{}test_file", std::path::MAIN_SEPARATOR))
.to_string_lossy()
.to_string()
);

View File

@ -30,28 +30,28 @@ pub async fn activate_mcp_server<R: Runtime>(
#[tauri::command]
pub async fn deactivate_mcp_server(state: State<'_, AppState>, name: String) -> Result<(), String> {
log::info!("Deactivating MCP server: {}", name);
log::info!("Deactivating MCP server: {name}");
// First, mark server as manually deactivated to prevent restart
// Remove from active servers list to prevent restart
{
let mut active_servers = state.mcp_active_servers.lock().await;
active_servers.remove(&name);
log::info!("Removed MCP server {} from active servers list", name);
log::info!("Removed MCP server {name} from active servers list");
}
// Mark as not successfully connected to prevent restart logic
{
let mut connected = state.mcp_successfully_connected.lock().await;
connected.insert(name.clone(), false);
log::info!("Marked MCP server {} as not successfully connected", name);
log::info!("Marked MCP server {name} as not successfully connected");
}
// Reset restart count
{
let mut counts = state.mcp_restart_counts.lock().await;
counts.remove(&name);
log::info!("Reset restart count for MCP server {}", name);
log::info!("Reset restart count for MCP server {name}");
}
// Now remove and stop the server
@ -60,7 +60,7 @@ pub async fn deactivate_mcp_server(state: State<'_, AppState>, name: String) ->
let service = servers_map
.remove(&name)
.ok_or_else(|| format!("Server {} not found", name))?;
.ok_or_else(|| format!("Server {name} not found"))?;
// Release the lock before calling cancel
drop(servers_map);
@ -89,7 +89,7 @@ pub async fn restart_mcp_servers<R: Runtime>(app: AppHandle<R>, state: State<'_,
restart_active_mcp_servers(&app, servers).await?;
app.emit("mcp-update", "MCP servers updated")
.map_err(|e| format!("Failed to emit event: {}", e))?;
.map_err(|e| format!("Failed to emit event: {e}"))?;
Ok(())
}
@ -110,9 +110,7 @@ pub async fn reset_mcp_restart_count(
let old_count = *count;
*count = 0;
log::info!(
"MCP server {} restart count reset from {} to 0.",
server_name,
old_count
"MCP server {server_name} restart count reset from {old_count} to 0."
);
Ok(())
}
@ -219,7 +217,7 @@ pub async fn call_tool(
continue; // Tool not found in this server, try next
}
println!("Found tool {} in server", tool_name);
println!("Found tool {tool_name} in server");
// Call the tool with timeout and cancellation support
let tool_call = service.call_tool(CallToolRequestParam {
@ -234,22 +232,20 @@ pub async fn call_tool(
match result {
Ok(call_result) => call_result.map_err(|e| e.to_string()),
Err(_) => Err(format!(
"Tool call '{}' timed out after {} seconds",
tool_name,
"Tool call '{tool_name}' timed out after {} seconds",
MCP_TOOL_CALL_TIMEOUT.as_secs()
)),
}
}
_ = cancel_rx => {
Err(format!("Tool call '{}' was cancelled", tool_name))
Err(format!("Tool call '{tool_name}' was cancelled"))
}
}
} else {
match timeout(MCP_TOOL_CALL_TIMEOUT, tool_call).await {
Ok(call_result) => call_result.map_err(|e| e.to_string()),
Err(_) => Err(format!(
"Tool call '{}' timed out after {} seconds",
tool_name,
"Tool call '{tool_name}' timed out after {} seconds",
MCP_TOOL_CALL_TIMEOUT.as_secs()
)),
}
@ -264,7 +260,7 @@ pub async fn call_tool(
return result;
}
Err(format!("Tool {} not found", tool_name))
Err(format!("Tool {tool_name} not found"))
}
/// Cancels a running tool call by its cancellation token
@ -285,10 +281,10 @@ pub async fn cancel_tool_call(
if let Some(cancel_tx) = cancellations.remove(&cancellation_token) {
// Send cancellation signal - ignore if receiver is already dropped
let _ = cancel_tx.send(());
println!("Tool call with token {} cancelled", cancellation_token);
println!("Tool call with token {cancellation_token} cancelled");
Ok(())
} else {
Err(format!("Cancellation token {} not found", cancellation_token))
Err(format!("Cancellation token {cancellation_token} not found"))
}
}
@ -301,7 +297,7 @@ pub async fn get_mcp_configs<R: Runtime>(app: AppHandle<R>) -> Result<String, St
if !path.exists() {
log::info!("mcp_config.json not found, creating default empty config");
fs::write(&path, DEFAULT_MCP_CONFIG)
.map_err(|e| format!("Failed to create default MCP config: {}", e))?;
.map_err(|e| format!("Failed to create default MCP config: {e}"))?;
}
fs::read_to_string(path).map_err(|e| e.to_string())
@ -311,7 +307,7 @@ pub async fn get_mcp_configs<R: Runtime>(app: AppHandle<R>) -> Result<String, St
pub async fn save_mcp_configs<R: Runtime>(app: AppHandle<R>, configs: String) -> Result<(), String> {
let mut path = get_jan_data_folder_path(app);
path.push("mcp_config.json");
log::info!("save mcp configs, path: {:?}", path);
log::info!("save mcp configs, path: {path:?}");
fs::write(path, configs).map_err(|e| e.to_string())
}

View File

@ -56,22 +56,13 @@ pub fn calculate_exponential_backoff_delay(attempt: u32) -> u64 {
let hash = hasher.finish();
// Convert hash to jitter value in range [-jitter_range, +jitter_range]
let jitter_offset = (hash % (jitter_range * 2)) as i64 - jitter_range as i64;
jitter_offset
(hash % (jitter_range * 2)) as i64 - jitter_range as i64
} else {
0
};
// Apply jitter while ensuring delay stays positive and within bounds
let final_delay = cmp::max(
100, // Minimum 100ms delay
cmp::min(
MCP_MAX_RESTART_DELAY_MS,
(capped_delay as i64 + jitter) as u64,
),
);
final_delay
((capped_delay as i64 + jitter) as u64).clamp(100, MCP_MAX_RESTART_DELAY_MS)
}
/// Runs MCP commands by reading configuration from a JSON file and initializing servers
@ -135,9 +126,7 @@ pub async fn run_mcp_commands<R: Runtime>(
// If initial startup failed, we still want to continue with other servers
if let Err(e) = &result {
log::error!(
"Initial startup failed for MCP server {}: {}",
name_clone,
e
"Initial startup failed for MCP server {name_clone}: {e}"
);
}
@ -155,25 +144,23 @@ pub async fn run_mcp_commands<R: Runtime>(
match handle.await {
Ok((name, result)) => match result {
Ok(_) => {
log::info!("MCP server {} initialized successfully", name);
log::info!("MCP server {name} initialized successfully");
successful_count += 1;
}
Err(e) => {
log::error!("MCP server {} failed to initialize: {}", name, e);
log::error!("MCP server {name} failed to initialize: {e}");
failed_count += 1;
}
},
Err(e) => {
log::error!("Failed to join startup task: {}", e);
log::error!("Failed to join startup task: {e}");
failed_count += 1;
}
}
}
log::info!(
"MCP server initialization complete: {} successful, {} failed",
successful_count,
failed_count
"MCP server initialization complete: {successful_count} successful, {failed_count} failed"
);
Ok(())
@ -184,7 +171,7 @@ pub async fn monitor_mcp_server_handle(
servers_state: SharedMcpServers,
name: String,
) -> Option<rmcp::service::QuitReason> {
log::info!("Monitoring MCP server {} health", name);
log::info!("Monitoring MCP server {name} health");
// Monitor server health with periodic checks
loop {
@ -202,17 +189,17 @@ pub async fn monitor_mcp_server_handle(
true
}
Ok(Err(e)) => {
log::warn!("MCP server {} health check failed: {}", name, e);
log::warn!("MCP server {name} health check failed: {e}");
false
}
Err(_) => {
log::warn!("MCP server {} health check timed out", name);
log::warn!("MCP server {name} health check timed out");
false
}
}
} else {
// Server was removed from HashMap (e.g., by deactivate_mcp_server)
log::info!("MCP server {} no longer in running services", name);
log::info!("MCP server {name} no longer in running services");
return Some(rmcp::service::QuitReason::Closed);
}
};
@ -220,8 +207,7 @@ pub async fn monitor_mcp_server_handle(
if !health_check_result {
// Server failed health check - remove it and return
log::error!(
"MCP server {} failed health check, removing from active servers",
name
"MCP server {name} failed health check, removing from active servers"
);
let mut servers = servers_state.lock().await;
if let Some(service) = servers.remove(&name) {
@ -262,7 +248,7 @@ pub async fn start_mcp_server_with_restart<R: Runtime>(
let max_restarts = max_restarts.unwrap_or(5);
// Try the first start attempt and return its result
log::info!("Starting MCP server {} (Initial attempt)", name);
log::info!("Starting MCP server {name} (Initial attempt)");
let first_start_result = schedule_mcp_start_task(
app.clone(),
servers_state.clone(),
@ -273,7 +259,7 @@ pub async fn start_mcp_server_with_restart<R: Runtime>(
match first_start_result {
Ok(_) => {
log::info!("MCP server {} started successfully on first attempt", name);
log::info!("MCP server {name} started successfully on first attempt");
reset_restart_count(&restart_counts, &name).await;
// Check if server was marked as successfully connected (passed verification)
@ -298,18 +284,15 @@ pub async fn start_mcp_server_with_restart<R: Runtime>(
Ok(())
} else {
// Server failed verification, don't monitor for restarts
log::error!("MCP server {} failed verification after startup", name);
log::error!("MCP server {name} failed verification after startup");
Err(format!(
"MCP server {} failed verification after startup",
name
"MCP server {name} failed verification after startup"
))
}
}
Err(e) => {
log::error!(
"Failed to start MCP server {} on first attempt: {}",
name,
e
"Failed to start MCP server {name} on first attempt: {e}"
);
Err(e)
}
@ -336,9 +319,7 @@ pub async fn start_restart_loop<R: Runtime>(
if current_restart_count > max_restarts {
log::error!(
"MCP server {} reached maximum restart attempts ({}). Giving up.",
name,
max_restarts
"MCP server {name} reached maximum restart attempts ({max_restarts}). Giving up."
);
if let Err(e) = app.emit(
"mcp_max_restarts_reached",
@ -353,19 +334,13 @@ pub async fn start_restart_loop<R: Runtime>(
}
log::info!(
"Restarting MCP server {} (Attempt {}/{})",
name,
current_restart_count,
max_restarts
"Restarting MCP server {name} (Attempt {current_restart_count}/{max_restarts})"
);
// Calculate exponential backoff delay
let delay_ms = calculate_exponential_backoff_delay(current_restart_count);
log::info!(
"Waiting {}ms before restart attempt {} for MCP server {}",
delay_ms,
current_restart_count,
name
"Waiting {delay_ms}ms before restart attempt {current_restart_count} for MCP server {name}"
);
sleep(Duration::from_millis(delay_ms)).await;
@ -380,7 +355,7 @@ pub async fn start_restart_loop<R: Runtime>(
match start_result {
Ok(_) => {
log::info!("MCP server {} restarted successfully.", name);
log::info!("MCP server {name} restarted successfully.");
// Check if server passed verification (was marked as successfully connected)
let passed_verification = {
@ -390,8 +365,7 @@ pub async fn start_restart_loop<R: Runtime>(
if !passed_verification {
log::error!(
"MCP server {} failed verification after restart - stopping permanently",
name
"MCP server {name} failed verification after restart - stopping permanently"
);
break;
}
@ -402,9 +376,7 @@ pub async fn start_restart_loop<R: Runtime>(
if let Some(count) = counts.get_mut(&name) {
if *count > 0 {
log::info!(
"MCP server {} restarted successfully, resetting restart count from {} to 0.",
name,
*count
"MCP server {name} restarted successfully, resetting restart count from {count} to 0."
);
*count = 0;
}
@ -415,7 +387,7 @@ pub async fn start_restart_loop<R: Runtime>(
let quit_reason =
monitor_mcp_server_handle(servers_state.clone(), name.clone()).await;
log::info!("MCP server {} quit with reason: {:?}", name, quit_reason);
log::info!("MCP server {name} quit with reason: {quit_reason:?}");
// Check if server was marked as successfully connected
let was_connected = {
@ -426,8 +398,7 @@ pub async fn start_restart_loop<R: Runtime>(
// Only continue restart loop if server was previously connected
if !was_connected {
log::error!(
"MCP server {} failed before establishing successful connection - stopping permanently",
name
"MCP server {name} failed before establishing successful connection - stopping permanently"
);
break;
}
@ -435,11 +406,11 @@ pub async fn start_restart_loop<R: Runtime>(
// Determine if we should restart based on quit reason
let should_restart = match quit_reason {
Some(reason) => {
log::warn!("MCP server {} terminated unexpectedly: {:?}", name, reason);
log::warn!("MCP server {name} terminated unexpectedly: {reason:?}");
true
}
None => {
log::info!("MCP server {} was manually stopped - not restarting", name);
log::info!("MCP server {name} was manually stopped - not restarting");
false
}
};
@ -450,7 +421,7 @@ pub async fn start_restart_loop<R: Runtime>(
// Continue the loop for another restart attempt
}
Err(e) => {
log::error!("Failed to restart MCP server {}: {}", name, e);
log::error!("Failed to restart MCP server {name}: {e}");
// Check if server was marked as successfully connected before
let was_connected = {
@ -461,8 +432,7 @@ pub async fn start_restart_loop<R: Runtime>(
// Only continue restart attempts if server was previously connected
if !was_connected {
log::error!(
"MCP server {} failed restart and was never successfully connected - stopping permanently",
name
"MCP server {name} failed restart and was never successfully connected - stopping permanently"
);
break;
}
@ -529,7 +499,7 @@ async fn schedule_mcp_start_task<R: Runtime>(
},
};
let client = client_info.serve(transport).await.inspect_err(|e| {
log::error!("client error: {:?}", e);
log::error!("client error: {e:?}");
});
match client {
@ -545,12 +515,12 @@ async fn schedule_mcp_start_task<R: Runtime>(
let app_state = app.state::<AppState>();
let mut connected = app_state.mcp_successfully_connected.lock().await;
connected.insert(name.clone(), true);
log::info!("Marked MCP server {} as successfully connected", name);
log::info!("Marked MCP server {name} as successfully connected");
}
}
Err(e) => {
log::error!("Failed to connect to server: {}", e);
return Err(format!("Failed to connect to server: {}", e));
log::error!("Failed to connect to server: {e}");
return Err(format!("Failed to connect to server: {e}"));
}
}
} else if config_params.transport_type.as_deref() == Some("sse") && config_params.url.is_some()
@ -587,8 +557,8 @@ async fn schedule_mcp_start_task<R: Runtime>(
)
.await
.map_err(|e| {
log::error!("transport error: {:?}", e);
format!("Failed to start SSE transport: {}", e)
log::error!("transport error: {e:?}");
format!("Failed to start SSE transport: {e}")
})?;
let client_info = ClientInfo {
@ -600,7 +570,7 @@ async fn schedule_mcp_start_task<R: Runtime>(
},
};
let client = client_info.serve(transport).await.map_err(|e| {
log::error!("client error: {:?}", e);
log::error!("client error: {e:?}");
e.to_string()
});
@ -617,12 +587,12 @@ async fn schedule_mcp_start_task<R: Runtime>(
let app_state = app.state::<AppState>();
let mut connected = app_state.mcp_successfully_connected.lock().await;
connected.insert(name.clone(), true);
log::info!("Marked MCP server {} as successfully connected", name);
log::info!("Marked MCP server {name} as successfully connected");
}
}
Err(e) => {
log::error!("Failed to connect to server: {}", e);
return Err(format!("Failed to connect to server: {}", e));
log::error!("Failed to connect to server: {e}");
return Err(format!("Failed to connect to server: {e}"));
}
}
} else {
@ -639,7 +609,7 @@ async fn schedule_mcp_start_task<R: Runtime>(
cache_dir.push(".npx");
cmd = Command::new(bun_x_path.display().to_string());
cmd.arg("x");
cmd.env("BUN_INSTALL", cache_dir.to_str().unwrap().to_string());
cmd.env("BUN_INSTALL", cache_dir.to_str().unwrap());
}
let uv_path = if cfg!(windows) {
@ -654,7 +624,7 @@ async fn schedule_mcp_start_task<R: Runtime>(
cmd = Command::new(uv_path);
cmd.arg("tool");
cmd.arg("run");
cmd.env("UV_CACHE_DIR", cache_dir.to_str().unwrap().to_string());
cmd.env("UV_CACHE_DIR", cache_dir.to_str().unwrap());
}
#[cfg(windows)]
{
@ -726,8 +696,7 @@ async fn schedule_mcp_start_task<R: Runtime>(
if !server_still_running {
return Err(format!(
"MCP server {} quit immediately after starting",
name
"MCP server {name} quit immediately after starting"
));
}
// Mark server as successfully connected (for restart policy)
@ -735,7 +704,7 @@ async fn schedule_mcp_start_task<R: Runtime>(
let app_state = app.state::<AppState>();
let mut connected = app_state.mcp_successfully_connected.lock().await;
connected.insert(name.clone(), true);
log::info!("Marked MCP server {} as successfully connected", name);
log::info!("Marked MCP server {name} as successfully connected");
}
}
Ok(())
@ -792,7 +761,7 @@ pub async fn restart_active_mcp_servers<R: Runtime>(
);
for (name, config) in active_servers.iter() {
log::info!("Restarting MCP server: {}", name);
log::info!("Restarting MCP server: {name}");
// Start server with restart monitoring - spawn async task
let app_clone = app.clone();
@ -891,9 +860,7 @@ pub async fn spawn_server_monitoring_task<R: Runtime>(
monitor_mcp_server_handle(servers_clone.clone(), name_clone.clone()).await;
log::info!(
"MCP server {} quit with reason: {:?}",
name_clone,
quit_reason
"MCP server {name_clone} quit with reason: {quit_reason:?}"
);
// Check if we should restart based on connection status and quit reason
@ -928,8 +895,7 @@ pub async fn should_restart_server(
// Only restart if server was previously connected
if !was_connected {
log::error!(
"MCP server {} failed before establishing successful connection - stopping permanently",
name
"MCP server {name} failed before establishing successful connection - stopping permanently"
);
return false;
}
@ -937,11 +903,11 @@ pub async fn should_restart_server(
// Determine if we should restart based on quit reason
match quit_reason {
Some(reason) => {
log::warn!("MCP server {} terminated unexpectedly: {:?}", name, reason);
log::warn!("MCP server {name} terminated unexpectedly: {reason:?}");
true
}
None => {
log::info!("MCP server {} was manually stopped - not restarting", name);
log::info!("MCP server {name} was manually stopped - not restarting");
false
}
}

View File

@ -70,7 +70,7 @@ fn test_add_server_config_new_file() {
Some("mcp_config_test_new.json"),
);
assert!(result.is_ok(), "Failed to add server config: {:?}", result);
assert!(result.is_ok(), "Failed to add server config: {result:?}");
// Verify the config was added correctly
let config_content = std::fs::read_to_string(&config_path)
@ -128,7 +128,7 @@ fn test_add_server_config_existing_servers() {
Some("mcp_config_test_existing.json"),
);
assert!(result.is_ok(), "Failed to add server config: {:?}", result);
assert!(result.is_ok(), "Failed to add server config: {result:?}");
// Verify both servers exist
let config_content = std::fs::read_to_string(&config_path)

View File

@ -67,7 +67,7 @@ async fn proxy_request(
.any(|&method| method.eq_ignore_ascii_case(requested_method));
if !method_allowed {
log::warn!("CORS preflight: Method '{}' not allowed", requested_method);
log::warn!("CORS preflight: Method '{requested_method}' not allowed");
return Ok(Response::builder()
.status(StatusCode::METHOD_NOT_ALLOWED)
.body(Body::from("Method not allowed"))
@ -80,14 +80,12 @@ async fn proxy_request(
let is_trusted = if is_whitelisted_path {
log::debug!(
"CORS preflight: Bypassing host check for whitelisted path: {}",
request_path
"CORS preflight: Bypassing host check for whitelisted path: {request_path}"
);
true
} else if !host.is_empty() {
log::debug!(
"CORS preflight: Host is '{}', trusted hosts: {:?}",
host,
"CORS preflight: Host is '{host}', trusted hosts: {:?}",
&config.trusted_hosts
);
is_valid_host(host, &config.trusted_hosts)
@ -98,9 +96,7 @@ async fn proxy_request(
if !is_trusted {
log::warn!(
"CORS preflight: Host '{}' not trusted for path '{}'",
host,
request_path
"CORS preflight: Host '{host}' not trusted for path '{request_path}'"
);
return Ok(Response::builder()
.status(StatusCode::FORBIDDEN)
@ -158,8 +154,7 @@ async fn proxy_request(
if !headers_valid {
log::warn!(
"CORS preflight: Some requested headers not allowed: {}",
requested_headers
"CORS preflight: Some requested headers not allowed: {requested_headers}"
);
return Ok(Response::builder()
.status(StatusCode::FORBIDDEN)
@ -186,9 +181,7 @@ async fn proxy_request(
}
log::debug!(
"CORS preflight response: host_trusted={}, origin='{}'",
is_trusted,
origin
"CORS preflight response: host_trusted={is_trusted}, origin='{origin}'"
);
return Ok(response.body(Body::empty()).unwrap());
}
@ -252,7 +245,7 @@ async fn proxy_request(
.unwrap());
}
} else {
log::debug!("Bypassing host validation for whitelisted path: {}", path);
log::debug!("Bypassing host validation for whitelisted path: {path}");
}
if !is_whitelisted_path && !config.proxy_api_key.is_empty() {
@ -285,8 +278,7 @@ async fn proxy_request(
}
} else if is_whitelisted_path {
log::debug!(
"Bypassing authorization check for whitelisted path: {}",
path
"Bypassing authorization check for whitelisted path: {path}"
);
}
@ -312,8 +304,7 @@ async fn proxy_request(
| (hyper::Method::POST, "/completions")
| (hyper::Method::POST, "/embeddings") => {
log::debug!(
"Handling POST request to {} requiring model lookup in body",
destination_path
"Handling POST request to {destination_path} requiring model lookup in body",
);
let body_bytes = match hyper::body::to_bytes(body).await {
Ok(bytes) => bytes,
@ -336,13 +327,12 @@ async fn proxy_request(
match serde_json::from_slice::<serde_json::Value>(&body_bytes) {
Ok(json_body) => {
if let Some(model_id) = json_body.get("model").and_then(|v| v.as_str()) {
log::debug!("Extracted model_id: {}", model_id);
log::debug!("Extracted model_id: {model_id}");
let sessions_guard = sessions.lock().await;
if sessions_guard.is_empty() {
log::warn!(
"Request for model '{}' but no models are running.",
model_id
"Request for model '{model_id}' but no models are running."
);
let mut error_response =
Response::builder().status(StatusCode::SERVICE_UNAVAILABLE);
@ -363,9 +353,9 @@ async fn proxy_request(
{
target_port = Some(session.info.port);
session_api_key = Some(session.info.api_key.clone());
log::debug!("Found session for model_id {}", model_id,);
log::debug!("Found session for model_id {model_id}");
} else {
log::warn!("No running session found for model_id: {}", model_id);
log::warn!("No running session found for model_id: {model_id}");
let mut error_response =
Response::builder().status(StatusCode::NOT_FOUND);
error_response = add_cors_headers_with_host_and_origin(
@ -376,15 +366,13 @@ async fn proxy_request(
);
return Ok(error_response
.body(Body::from(format!(
"No running session found for model '{}'",
model_id
"No running session found for model '{model_id}'"
)))
.unwrap());
}
} else {
log::warn!(
"POST body for {} is missing 'model' field or it's not a string",
destination_path
"POST body for {destination_path} is missing 'model' field or it's not a string"
);
let mut error_response =
Response::builder().status(StatusCode::BAD_REQUEST);
@ -401,9 +389,7 @@ async fn proxy_request(
}
Err(e) => {
log::warn!(
"Failed to parse POST body for {} as JSON: {}",
destination_path,
e
"Failed to parse POST body for {destination_path} as JSON: {e}"
);
let mut error_response = Response::builder().status(StatusCode::BAD_REQUEST);
error_response = add_cors_headers_with_host_and_origin(
@ -535,7 +521,7 @@ async fn proxy_request(
let is_explicitly_whitelisted_get = method == hyper::Method::GET
&& whitelisted_paths.contains(&destination_path.as_str());
if is_explicitly_whitelisted_get {
log::debug!("Handled whitelisted GET path: {}", destination_path);
log::debug!("Handled whitelisted GET path: {destination_path}");
let mut error_response = Response::builder().status(StatusCode::NOT_FOUND);
error_response = add_cors_headers_with_host_and_origin(
error_response,
@ -546,9 +532,7 @@ async fn proxy_request(
return Ok(error_response.body(Body::from("Not Found")).unwrap());
} else {
log::warn!(
"Unhandled method/path for dynamic routing: {} {}",
method,
destination_path
"Unhandled method/path for dynamic routing: {method} {destination_path}"
);
let mut error_response = Response::builder().status(StatusCode::NOT_FOUND);
error_response = add_cors_headers_with_host_and_origin(
@ -581,7 +565,7 @@ async fn proxy_request(
}
};
let upstream_url = format!("http://127.0.0.1:{}{}", port, destination_path);
let upstream_url = format!("http://127.0.0.1:{port}{destination_path}");
let mut outbound_req = client.request(method.clone(), &upstream_url);
@ -593,13 +577,14 @@ async fn proxy_request(
if let Some(key) = session_api_key {
log::debug!("Adding session Authorization header");
outbound_req = outbound_req.header("Authorization", format!("Bearer {}", key));
outbound_req = outbound_req.header("Authorization", format!("Bearer {key}"));
} else {
log::debug!("No session API key available for this request");
}
let outbound_req_with_body = if let Some(bytes) = buffered_body {
log::debug!("Sending buffered body ({} bytes)", bytes.len());
let bytes_len = bytes.len();
log::debug!("Sending buffered body ({bytes_len} bytes)");
outbound_req.body(bytes)
} else {
log::error!("Internal logic error: Request reached proxy stage without a buffered body.");
@ -618,7 +603,7 @@ async fn proxy_request(
match outbound_req_with_body.send().await {
Ok(response) => {
let status = response.status();
log::debug!("Received response with status: {}", status);
log::debug!("Received response with status: {status}");
let mut builder = Response::builder().status(status);
@ -648,7 +633,7 @@ async fn proxy_request(
}
}
Err(e) => {
log::error!("Stream error: {}", e);
log::error!("Stream error: {e}");
break;
}
}
@ -659,8 +644,8 @@ async fn proxy_request(
Ok(builder.body(body).unwrap())
}
Err(e) => {
let error_msg = format!("Proxy request to model failed: {}", e);
log::error!("{}", error_msg);
let error_msg = format!("Proxy request to model failed: {e}");
log::error!("{error_msg}");
let mut error_response = Response::builder().status(StatusCode::BAD_GATEWAY);
error_response = add_cors_headers_with_host_and_origin(
error_response,
@ -675,14 +660,12 @@ async fn proxy_request(
fn add_cors_headers_with_host_and_origin(
builder: hyper::http::response::Builder,
host: &str,
_host: &str,
origin: &str,
trusted_hosts: &[Vec<String>],
_trusted_hosts: &[Vec<String>],
) -> hyper::http::response::Builder {
let mut builder = builder;
let allow_origin_header = if !origin.is_empty() && is_valid_host(host, trusted_hosts) {
origin.to_string()
} else if !origin.is_empty() {
let allow_origin_header = if !origin.is_empty() {
origin.to_string()
} else {
"*".to_string()
@ -706,6 +689,7 @@ pub async fn is_server_running(server_handle: Arc<Mutex<Option<ServerHandle>>>)
handle_guard.is_some()
}
#[allow(clippy::too_many_arguments)]
pub async fn start_server(
server_handle: Arc<Mutex<Option<ServerHandle>>>,
sessions: Arc<Mutex<HashMap<i32, LLamaBackendSession>>>,
@ -721,9 +705,9 @@ pub async fn start_server(
return Err("Server is already running".into());
}
let addr: SocketAddr = format!("{}:{}", host, port)
let addr: SocketAddr = format!("{host}:{port}")
.parse()
.map_err(|e| format!("Invalid address: {}", e))?;
.map_err(|e| format!("Invalid address: {e}"))?;
let config = ProxyConfig {
prefix,
@ -752,15 +736,15 @@ pub async fn start_server(
let server = match Server::try_bind(&addr) {
Ok(builder) => builder.serve(make_svc),
Err(e) => {
log::error!("Failed to bind to {}: {}", addr, e);
log::error!("Failed to bind to {addr}: {e}");
return Err(Box::new(e));
}
};
log::info!("Jan API server started on http://{}", addr);
log::info!("Jan API server started on http://{addr}");
let server_task = tokio::spawn(async move {
if let Err(e) = server.await {
log::error!("Server error: {}", e);
log::error!("Server error: {e}");
return Err(Box::new(e) as Box<dyn std::error::Error + Send + Sync>);
}
Ok(())
@ -768,7 +752,7 @@ pub async fn start_server(
*handle_guard = Some(server_task);
let actual_port = addr.port();
log::info!("Jan API server started successfully on port {}", actual_port);
log::info!("Jan API server started successfully on port {actual_port}");
Ok(actual_port)
}

View File

@ -38,7 +38,7 @@ pub fn install_extensions<R: Runtime>(app: tauri::AppHandle<R>, force: bool) ->
if std::env::var("IS_CLEAN").is_ok() {
clean_up = true;
}
log::info!("Installing extensions. Clean up: {}", clean_up);
log::info!("Installing extensions. Clean up: {clean_up}");
if !clean_up && extensions_path.exists() {
return Ok(());
}
@ -68,7 +68,7 @@ pub fn install_extensions<R: Runtime>(app: tauri::AppHandle<R>, force: bool) ->
let entry = entry.map_err(|e| e.to_string())?;
let path = entry.path();
if path.extension().map_or(false, |ext| ext == "tgz") {
if path.extension().is_some_and(|ext| ext == "tgz") {
let tar_gz = File::open(&path).map_err(|e| e.to_string())?;
let gz_decoder = GzDecoder::new(tar_gz);
let mut archive = Archive::new(gz_decoder);
@ -134,7 +134,7 @@ pub fn install_extensions<R: Runtime>(app: tauri::AppHandle<R>, force: bool) ->
extensions_list.push(new_extension);
log::info!("Installed extension to {:?}", extension_dir);
log::info!("Installed extension to {extension_dir:?}");
}
}
fs::write(
@ -154,7 +154,7 @@ pub fn migrate_mcp_servers(
let mcp_version = store
.get("mcp_version")
.and_then(|v| v.as_i64())
.unwrap_or_else(|| 0);
.unwrap_or(0);
if mcp_version < 1 {
log::info!("Migrating MCP schema version 1");
let result = add_server_config(
@ -168,7 +168,7 @@ pub fn migrate_mcp_servers(
}),
);
if let Err(e) = result {
log::error!("Failed to add server config: {}", e);
log::error!("Failed to add server config: {e}");
}
}
store.set("mcp_version", 1);
@ -212,7 +212,7 @@ pub fn setup_mcp<R: Runtime>(app: &App<R>) {
let app_handle = app.handle().clone();
tauri::async_runtime::spawn(async move {
if let Err(e) = run_mcp_commands(&app_handle, servers).await {
log::error!("Failed to run mcp commands: {}", e);
log::error!("Failed to run mcp commands: {e}");
}
app_handle
.emit("mcp-update", "MCP servers updated")
@ -258,7 +258,7 @@ pub fn setup_tray(app: &App) -> tauri::Result<TrayIcon> {
app.exit(0);
}
other => {
println!("menu item {} not handled", other);
println!("menu item {other} not handled");
}
})
.build(app)

View File

@ -18,12 +18,12 @@ pub fn factory_reset<R: Runtime>(app_handle: tauri::AppHandle<R>, state: State<'
let windows = app_handle.webview_windows();
for (label, window) in windows.iter() {
window.close().unwrap_or_else(|_| {
log::warn!("Failed to close window: {:?}", label);
log::warn!("Failed to close window: {label:?}");
});
}
}
let data_folder = get_jan_data_folder_path(app_handle.clone());
log::info!("Factory reset, removing data folder: {:?}", data_folder);
log::info!("Factory reset, removing data folder: {data_folder:?}");
tauri::async_runtime::block_on(async {
clean_up_mcp_servers(state.clone()).await;
@ -31,7 +31,7 @@ pub fn factory_reset<R: Runtime>(app_handle: tauri::AppHandle<R>, state: State<'
if data_folder.exists() {
if let Err(e) = fs::remove_dir_all(&data_folder) {
log::error!("Failed to remove data folder: {}", e);
log::error!("Failed to remove data folder: {e}");
return;
}
}
@ -59,17 +59,17 @@ pub fn open_app_directory<R: Runtime>(app: AppHandle<R>) {
if cfg!(target_os = "windows") {
std::process::Command::new("explorer")
.arg(app_path)
.spawn()
.status()
.expect("Failed to open app directory");
} else if cfg!(target_os = "macos") {
std::process::Command::new("open")
.arg(app_path)
.spawn()
.status()
.expect("Failed to open app directory");
} else {
std::process::Command::new("xdg-open")
.arg(app_path)
.spawn()
.status()
.expect("Failed to open app directory");
}
}
@ -80,17 +80,17 @@ pub fn open_file_explorer(path: String) {
if cfg!(target_os = "windows") {
std::process::Command::new("explorer")
.arg(path)
.spawn()
.status()
.expect("Failed to open file explorer");
} else if cfg!(target_os = "macos") {
std::process::Command::new("open")
.arg(path)
.spawn()
.status()
.expect("Failed to open file explorer");
} else {
std::process::Command::new("xdg-open")
.arg(path)
.spawn()
.status()
.expect("Failed to open file explorer");
}
}
@ -102,7 +102,7 @@ pub async fn read_logs<R: Runtime>(app: AppHandle<R>) -> Result<String, String>
let content = fs::read_to_string(log_path).map_err(|e| e.to_string())?;
Ok(content)
} else {
Err(format!("Log file not found"))
Err("Log file not found".to_string())
}
}
@ -112,7 +112,7 @@ pub fn is_library_available(library: &str) -> bool {
match unsafe { libloading::Library::new(library) } {
Ok(_) => true,
Err(e) => {
log::info!("Library {} is not available: {}", library, e);
log::info!("Library {library} is not available: {e}");
false
}
}

View File

@ -38,7 +38,7 @@ pub async fn list_threads<R: Runtime>(
match serde_json::from_str(&data) {
Ok(thread) => threads.push(thread),
Err(e) => {
println!("Failed to parse thread file: {}", e);
println!("Failed to parse thread file: {e}");
continue; // skip invalid thread files
}
}
@ -149,7 +149,7 @@ pub async fn create_message<R: Runtime>(
.map_err(|e| e.to_string())?;
let data = serde_json::to_string(&message).map_err(|e| e.to_string())?;
writeln!(file, "{}", data).map_err(|e| e.to_string())?;
writeln!(file, "{data}").map_err(|e| e.to_string())?;
// Explicitly flush to ensure data is written before returning
file.flush().map_err(|e| e.to_string())?;
@ -234,7 +234,7 @@ pub async fn get_thread_assistant<R: Runtime>(
let data = fs::read_to_string(&path).map_err(|e| e.to_string())?;
let thread: serde_json::Value = serde_json::from_str(&data).map_err(|e| e.to_string())?;
if let Some(assistants) = thread.get("assistants").and_then(|a| a.as_array()) {
if let Some(first) = assistants.get(0) {
if let Some(first) = assistants.first() {
Ok(first.clone())
} else {
Err("Assistant not found".to_string())

View File

@ -33,7 +33,7 @@ pub fn write_messages_to_file(
let mut file = File::create(path).map_err(|e| e.to_string())?;
for msg in messages {
let data = serde_json::to_string(msg).map_err(|e| e.to_string())?;
writeln!(file, "{}", data).map_err(|e| e.to_string())?;
writeln!(file, "{data}").map_err(|e| e.to_string())?;
}
Ok(())
}

View File

@ -16,7 +16,7 @@ fn mock_app_with_temp_data_dir() -> (tauri::App<MockRuntime>, PathBuf) {
.as_nanos();
let data_dir = std::env::current_dir()
.unwrap_or_else(|_| PathBuf::from("."))
.join(format!("test-data-{:?}-{}", unique_id, timestamp));
.join(format!("test-data-{unique_id:?}-{timestamp}"));
println!("Mock app data dir: {}", data_dir.display());
// Ensure the unique test directory exists
let _ = fs::create_dir_all(&data_dir);
@ -42,7 +42,7 @@ async fn test_create_and_list_threads() {
// List threads
let threads = list_threads(app.handle().clone()).await.unwrap();
assert!(threads.len() > 0);
assert!(!threads.is_empty());
// Clean up
let _ = fs::remove_dir_all(data_dir);
@ -88,7 +88,7 @@ async fn test_create_and_list_messages() {
let messages = list_messages(app.handle().clone(), thread_id.clone())
.await
.unwrap();
assert!(messages.len() > 0, "Expected at least one message, but got none. Thread ID: {}", thread_id);
assert!(!messages.is_empty(), "Expected at least one message, but got none. Thread ID: {thread_id}");
assert_eq!(messages[0]["role"], "user");
// Clean up

View File

@ -151,17 +151,17 @@ pub fn run() {
.config()
.version
.clone()
.unwrap_or_else(|| "".to_string());
.unwrap_or_default();
// Migrate extensions
if let Err(e) =
setup::install_extensions(app.handle().clone(), stored_version != app_version)
{
log::error!("Failed to install extensions: {}", e);
log::error!("Failed to install extensions: {e}");
}
// Migrate MCP servers
if let Err(e) = setup::migrate_mcp_servers(app.handle().clone(), store.clone()) {
log::error!("Failed to migrate MCP servers: {}", e);
log::error!("Failed to migrate MCP servers: {e}");
}
// Store the new app version
@ -187,8 +187,8 @@ pub fn run() {
.expect("error while running tauri application");
// Handle app lifecycle events
app.run(|app, event| match event {
RunEvent::Exit => {
app.run(|app, event| {
if let RunEvent::Exit = event {
// This is called when the app is actually exiting (e.g., macOS dock quit)
// We can't prevent this, so run cleanup quickly
let app_handle = app.clone();
@ -208,6 +208,5 @@ pub fn run() {
});
});
}
_ => {}
});
}

View File

@ -15,8 +15,7 @@ import { IconPlus } from '@tabler/icons-react'
import { useState } from 'react'
import { getProviderTitle } from '@/lib/utils'
import { useTranslation } from '@/i18n/react-i18next-compat'
import { ModelCapabilities } from '@/types/models'
import { models as providerModels } from 'token.js'
import { getModelCapabilities } from '@/lib/models'
import { toast } from 'sonner'
type DialogAddModelProps = {
@ -52,23 +51,7 @@ export const DialogAddModel = ({ provider, trigger }: DialogAddModelProps) => {
id: modelId,
model: modelId,
name: modelId,
capabilities: [
ModelCapabilities.COMPLETION,
(
providerModels[
provider.provider as unknown as keyof typeof providerModels
]?.supportsToolCalls as unknown as string[]
)?.includes(modelId)
? ModelCapabilities.TOOLS
: undefined,
(
providerModels[
provider.provider as unknown as keyof typeof providerModels
]?.supportsImages as unknown as string[]
)?.includes(modelId)
? ModelCapabilities.VISION
: undefined,
].filter(Boolean) as string[],
capabilities: getModelCapabilities(provider.provider, modelId),
version: '1.0',
}

View File

@ -5,19 +5,30 @@ import {
removeYamlFrontMatter,
extractModelName,
extractModelRepo,
getModelCapabilities,
} from '../models'
import { ModelCapabilities } from '@/types/models'
// Mock the token.js module
vi.mock('token.js', () => ({
models: {
openai: {
models: ['gpt-3.5-turbo', 'gpt-4'],
supportsToolCalls: ['gpt-3.5-turbo', 'gpt-4'],
supportsImages: ['gpt-4-vision-preview'],
},
anthropic: {
models: ['claude-3-sonnet', 'claude-3-haiku'],
supportsToolCalls: ['claude-3-sonnet'],
supportsImages: ['claude-3-sonnet', 'claude-3-haiku'],
},
mistral: {
models: ['mistral-7b', 'mistral-8x7b'],
supportsToolCalls: ['mistral-8x7b'],
},
// Provider with no capability arrays
cohere: {
models: ['command', 'command-light'],
},
},
}))
@ -223,3 +234,74 @@ describe('extractModelRepo', () => {
)
})
})
describe('getModelCapabilities', () => {
it('returns completion capability for all models', () => {
const capabilities = getModelCapabilities('openai', 'gpt-3.5-turbo')
expect(capabilities).toContain(ModelCapabilities.COMPLETION)
})
it('includes tools capability when model supports it', () => {
const capabilities = getModelCapabilities('openai', 'gpt-3.5-turbo')
expect(capabilities).toContain(ModelCapabilities.TOOLS)
expect(capabilities).toContain(ModelCapabilities.COMPLETION)
})
it('excludes tools capability when model does not support it', () => {
const capabilities = getModelCapabilities('mistral', 'mistral-7b')
expect(capabilities).not.toContain(ModelCapabilities.TOOLS)
expect(capabilities).toContain(ModelCapabilities.COMPLETION)
})
it('includes vision capability when model supports it', () => {
const capabilities = getModelCapabilities('openai', 'gpt-4-vision-preview')
expect(capabilities).toContain(ModelCapabilities.VISION)
expect(capabilities).toContain(ModelCapabilities.COMPLETION)
})
it('excludes vision capability when model does not support it', () => {
const capabilities = getModelCapabilities('openai', 'gpt-3.5-turbo')
expect(capabilities).not.toContain(ModelCapabilities.VISION)
})
it('includes both tools and vision when model supports both', () => {
const capabilities = getModelCapabilities('anthropic', 'claude-3-sonnet')
expect(capabilities).toContain(ModelCapabilities.COMPLETION)
expect(capabilities).toContain(ModelCapabilities.TOOLS)
expect(capabilities).toContain(ModelCapabilities.VISION)
})
it('handles provider with no capability arrays gracefully', () => {
const capabilities = getModelCapabilities('cohere', 'command')
expect(capabilities).toEqual([ModelCapabilities.COMPLETION])
expect(capabilities).not.toContain(ModelCapabilities.TOOLS)
expect(capabilities).not.toContain(ModelCapabilities.VISION)
})
it('handles unknown provider gracefully', () => {
const capabilities = getModelCapabilities('openrouter', 'some-model')
expect(capabilities).toEqual([ModelCapabilities.COMPLETION])
expect(capabilities).not.toContain(ModelCapabilities.TOOLS)
expect(capabilities).not.toContain(ModelCapabilities.VISION)
})
it('handles model not in capability list', () => {
const capabilities = getModelCapabilities('anthropic', 'claude-3-haiku')
expect(capabilities).toContain(ModelCapabilities.COMPLETION)
expect(capabilities).toContain(ModelCapabilities.VISION)
expect(capabilities).not.toContain(ModelCapabilities.TOOLS)
})
it('returns only completion for provider with partial capability data', () => {
// Mistral has supportsToolCalls but no supportsImages
const capabilities = getModelCapabilities('mistral', 'mistral-7b')
expect(capabilities).toEqual([ModelCapabilities.COMPLETION])
})
it('handles model that supports tools but not vision', () => {
const capabilities = getModelCapabilities('mistral', 'mistral-8x7b')
expect(capabilities).toContain(ModelCapabilities.COMPLETION)
expect(capabilities).toContain(ModelCapabilities.TOOLS)
expect(capabilities).not.toContain(ModelCapabilities.VISION)
})
})

View File

@ -1,4 +1,5 @@
import { models } from 'token.js'
import { ModelCapabilities } from '@/types/models'
export const defaultModel = (provider?: string) => {
if (!provider || !Object.keys(models).includes(provider)) {
@ -10,6 +11,38 @@ export const defaultModel = (provider?: string) => {
)[0]
}
/**
* Determines model capabilities based on provider configuration from token.js
* @param providerName - The provider name (e.g., 'openai', 'anthropic', 'openrouter')
* @param modelId - The model ID to check capabilities for
* @returns Array of model capabilities
*/
export const getModelCapabilities = (
providerName: string,
modelId: string
): string[] => {
const providerConfig =
models[providerName as unknown as keyof typeof models]
const supportsToolCalls = Array.isArray(
providerConfig?.supportsToolCalls as unknown
)
? (providerConfig.supportsToolCalls as unknown as string[])
: []
const supportsImages = Array.isArray(
providerConfig?.supportsImages as unknown
)
? (providerConfig.supportsImages as unknown as string[])
: []
return [
ModelCapabilities.COMPLETION,
supportsToolCalls.includes(modelId) ? ModelCapabilities.TOOLS : undefined,
supportsImages.includes(modelId) ? ModelCapabilities.VISION : undefined,
].filter(Boolean) as string[]
}
/**
* This utility is to extract cortexso model description from README.md file
* @returns

View File

@ -10,6 +10,7 @@ import { modelSettings } from '@/lib/predefined'
import { ExtensionManager } from '@/lib/extension'
import { fetch as fetchTauri } from '@tauri-apps/plugin-http'
import { DefaultProvidersService } from './default'
import { getModelCapabilities } from '@/lib/models'
export class TauriProvidersService extends DefaultProvidersService {
fetch(): typeof fetch {
@ -26,32 +27,16 @@ export class TauriProvidersService extends DefaultProvidersService {
provider.provider as unknown as keyof typeof providerModels
].models as unknown as string[]
if (Array.isArray(builtInModels))
if (Array.isArray(builtInModels)) {
models = builtInModels.map((model) => {
const modelManifest = models.find((e) => e.id === model)
// TODO: Check chat_template for tool call support
const capabilities = [
ModelCapabilities.COMPLETION,
(
providerModels[
provider.provider as unknown as keyof typeof providerModels
]?.supportsToolCalls as unknown as string[]
)?.includes(model)
? ModelCapabilities.TOOLS
: undefined,
(
providerModels[
provider.provider as unknown as keyof typeof providerModels
]?.supportsImages as unknown as string[]
)?.includes(model)
? ModelCapabilities.VISION
: undefined,
].filter(Boolean) as string[]
return {
...(modelManifest ?? { id: model, name: model }),
capabilities,
capabilities: getModelCapabilities(provider.provider, model),
} as Model
})
}
}
return {

View File

@ -11,6 +11,7 @@ import { ExtensionManager } from '@/lib/extension'
import type { ProvidersService } from './types'
import { PlatformFeatures } from '@/lib/platform/const'
import { PlatformFeature } from '@/lib/platform/types'
import { getModelCapabilities } from '@/lib/models'
export class WebProvidersService implements ProvidersService {
async getProviders(): Promise<ModelProvider[]> {
@ -88,19 +89,9 @@ export class WebProvidersService implements ProvidersService {
models = builtInModels.map((model) => {
const modelManifest = models.find((e) => e.id === model)
// TODO: Check chat_template for tool call support
const capabilities = [
ModelCapabilities.COMPLETION,
(
providerModels[
provider.provider as unknown as keyof typeof providerModels
]?.supportsToolCalls as unknown as string[]
)?.includes(model)
? ModelCapabilities.TOOLS
: undefined,
].filter(Boolean) as string[]
return {
...(modelManifest ?? { id: model, name: model }),
capabilities,
capabilities: getModelCapabilities(provider.provider, model),
} as Model
})
}