diff --git a/docs/public/assets/images/changelog/jan-release-v0.7.0.jpeg b/docs/public/assets/images/changelog/jan-release-v0.7.0.jpeg
new file mode 100644
index 000000000..cb0d4a3a9
Binary files /dev/null and b/docs/public/assets/images/changelog/jan-release-v0.7.0.jpeg differ
diff --git a/docs/src/pages/changelog/2025-10-02-jan-projects.mdx b/docs/src/pages/changelog/2025-10-02-jan-projects.mdx
new file mode 100644
index 000000000..63f29194d
--- /dev/null
+++ b/docs/src/pages/changelog/2025-10-02-jan-projects.mdx
@@ -0,0 +1,28 @@
+---
+title: "Jan v0.7.0: Jan Projects"
+version: v0.7.0
+description: "Jan v0.7.0 introduces Projects, model renaming, llama.cpp auto-tuning, model stats, and Azure support."
+date: 2025-10-02
+ogImage: "/assets/images/changelog/jan-release-v0.7.0.jpeg"
+---
+
+import ChangelogHeader from "@/components/Changelog/ChangelogHeader"
+import { Callout } from 'nextra/components'
+
+
+
+## Jan v0.7.0: Jan Projects
+
+Jan v0.7.0 is live! This release focuses on helping you organize your workspace and better understand how models run.
+
+### What’s new
+- **Projects**: Group related chats under one project for a cleaner workflow.
+- **Rename models**: Give your models custom names for easier identification.
+- **Model context stats**: See context usage when a model runs.
+- **Auto-loaded cloud models**: Cloud model names now appear automatically.
+
+---
+
+Update your Jan or [download the latest version](https://jan.ai/).
+
+For the complete list of changes, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.7.0).
diff --git a/src-tauri/src/core/app/commands.rs b/src-tauri/src/core/app/commands.rs
index 0d9c66c12..18e746869 100644
--- a/src-tauri/src/core/app/commands.rs
+++ b/src-tauri/src/core/app/commands.rs
@@ -19,10 +19,7 @@ pub fn get_app_configurations(app_handle: tauri::AppHandle) -> Ap
let default_data_folder = default_data_folder_path(app_handle.clone());
if !configuration_file.exists() {
- log::info!(
- "App config not found, creating default config at {:?}",
- configuration_file
- );
+ log::info!("App config not found, creating default config at {configuration_file:?}");
app_default_configuration.data_folder = default_data_folder;
@@ -30,7 +27,7 @@ pub fn get_app_configurations(app_handle: tauri::AppHandle) -> Ap
&configuration_file,
serde_json::to_string(&app_default_configuration).unwrap(),
) {
- log::error!("Failed to create default config: {}", err);
+ log::error!("Failed to create default config: {err}");
}
return app_default_configuration;
@@ -40,18 +37,12 @@ pub fn get_app_configurations(app_handle: tauri::AppHandle) -> Ap
Ok(content) => match serde_json::from_str::(&content) {
Ok(app_configurations) => app_configurations,
Err(err) => {
- log::error!(
- "Failed to parse app config, returning default config instead. Error: {}",
- err
- );
+ log::error!("Failed to parse app config, returning default config instead. Error: {err}");
app_default_configuration
}
},
Err(err) => {
- log::error!(
- "Failed to read app config, returning default config instead. Error: {}",
- err
- );
+ log::error!("Failed to read app config, returning default config instead. Error: {err}");
app_default_configuration
}
}
@@ -63,10 +54,7 @@ pub fn update_app_configuration(
configuration: AppConfiguration,
) -> Result<(), String> {
let configuration_file = get_configuration_file_path(app_handle);
- log::info!(
- "update_app_configuration, configuration_file: {:?}",
- configuration_file
- );
+ log::info!("update_app_configuration, configuration_file: {configuration_file:?}");
fs::write(
configuration_file,
@@ -95,8 +83,7 @@ pub fn get_jan_data_folder_path(app_handle: tauri::AppHandle) ->
pub fn get_configuration_file_path(app_handle: tauri::AppHandle) -> PathBuf {
let app_path = app_handle.path().app_data_dir().unwrap_or_else(|err| {
log::error!(
- "Failed to get app data directory: {}. Using home directory instead.",
- err
+ "Failed to get app data directory: {err}. Using home directory instead."
);
let home_dir = std::env::var(if cfg!(target_os = "windows") {
@@ -130,9 +117,9 @@ pub fn get_configuration_file_path(app_handle: tauri::AppHandle)
.join(package_name);
if old_data_dir.exists() {
- return old_data_dir.join(CONFIGURATION_FILE_NAME);
+ old_data_dir.join(CONFIGURATION_FILE_NAME)
} else {
- return app_path.join(CONFIGURATION_FILE_NAME);
+ app_path.join(CONFIGURATION_FILE_NAME)
}
}
@@ -156,7 +143,7 @@ pub fn default_data_folder_path(app_handle: tauri::AppHandle) ->
#[tauri::command]
pub fn get_user_home_path(app: AppHandle) -> String {
- return get_app_configurations(app.clone()).data_folder;
+ get_app_configurations(app.clone()).data_folder
}
#[tauri::command]
@@ -171,16 +158,12 @@ pub fn change_app_data_folder(
// Create the new data folder if it doesn't exist
if !new_data_folder_path.exists() {
fs::create_dir_all(&new_data_folder_path)
- .map_err(|e| format!("Failed to create new data folder: {}", e))?;
+ .map_err(|e| format!("Failed to create new data folder: {e}"))?;
}
// Copy all files from the old folder to the new one
if current_data_folder.exists() {
- log::info!(
- "Copying data from {:?} to {:?}",
- current_data_folder,
- new_data_folder_path
- );
+ log::info!("Copying data from {current_data_folder:?} to {new_data_folder_path:?}");
// Check if this is a parent directory to avoid infinite recursion
if new_data_folder_path.starts_with(¤t_data_folder) {
@@ -193,7 +176,7 @@ pub fn change_app_data_folder(
&new_data_folder_path,
&[".uvx", ".npx"],
)
- .map_err(|e| format!("Failed to copy data to new folder: {}", e))?;
+ .map_err(|e| format!("Failed to copy data to new folder: {e}"))?;
} else {
log::info!("Current data folder does not exist, nothing to copy");
}
diff --git a/src-tauri/src/core/downloads/commands.rs b/src-tauri/src/core/downloads/commands.rs
index 6d50ed1a3..a24ae32f0 100644
--- a/src-tauri/src/core/downloads/commands.rs
+++ b/src-tauri/src/core/downloads/commands.rs
@@ -19,7 +19,7 @@ pub async fn download_files(
{
let mut download_manager = state.download_manager.lock().await;
if download_manager.cancel_tokens.contains_key(task_id) {
- return Err(format!("task_id {} exists", task_id));
+ return Err(format!("task_id {task_id} exists"));
}
download_manager
.cancel_tokens
@@ -60,9 +60,9 @@ pub async fn cancel_download_task(state: State<'_, AppState>, task_id: &str) ->
let mut download_manager = state.download_manager.lock().await;
if let Some(token) = download_manager.cancel_tokens.remove(task_id) {
token.cancel();
- log::info!("Cancelled download task: {}", task_id);
+ log::info!("Cancelled download task: {task_id}");
Ok(())
} else {
- Err(format!("No download task: {}", task_id))
+ Err(format!("No download task: {task_id}"))
}
}
diff --git a/src-tauri/src/core/downloads/helpers.rs b/src-tauri/src/core/downloads/helpers.rs
index d3d8f6b7c..3ce1d89fa 100644
--- a/src-tauri/src/core/downloads/helpers.rs
+++ b/src-tauri/src/core/downloads/helpers.rs
@@ -15,7 +15,7 @@ use url::Url;
// ===== UTILITY FUNCTIONS =====
pub fn err_to_string(e: E) -> String {
- format!("Error: {}", e)
+ format!("Error: {e}")
}
@@ -55,7 +55,7 @@ async fn validate_downloaded_file(
)
.unwrap();
- log::info!("Starting validation for model: {}", model_id);
+ log::info!("Starting validation for model: {model_id}");
// Validate size if provided (fast check first)
if let Some(expected_size) = &item.size {
@@ -73,8 +73,7 @@ async fn validate_downloaded_file(
actual_size
);
return Err(format!(
- "Size verification failed. Expected {} bytes but got {} bytes.",
- expected_size, actual_size
+ "Size verification failed. Expected {expected_size} bytes but got {actual_size} bytes."
));
}
@@ -90,7 +89,7 @@ async fn validate_downloaded_file(
save_path.display(),
e
);
- return Err(format!("Failed to verify file size: {}", e));
+ return Err(format!("Failed to verify file size: {e}"));
}
}
}
@@ -115,9 +114,7 @@ async fn validate_downloaded_file(
computed_sha256
);
- return Err(format!(
- "Hash verification failed. The downloaded file is corrupted or has been tampered with."
- ));
+ return Err("Hash verification failed. The downloaded file is corrupted or has been tampered with.".to_string());
}
log::info!("Hash verification successful for {}", item.url);
@@ -128,7 +125,7 @@ async fn validate_downloaded_file(
save_path.display(),
e
);
- return Err(format!("Failed to verify file integrity: {}", e));
+ return Err(format!("Failed to verify file integrity: {e}"));
}
}
}
@@ -140,14 +137,14 @@ async fn validate_downloaded_file(
pub fn validate_proxy_config(config: &ProxyConfig) -> Result<(), String> {
// Validate proxy URL format
if let Err(e) = Url::parse(&config.url) {
- return Err(format!("Invalid proxy URL '{}': {}", config.url, e));
+ return Err(format!("Invalid proxy URL '{}': {e}", config.url));
}
// Check if proxy URL has valid scheme
let url = Url::parse(&config.url).unwrap(); // Safe to unwrap as we just validated it
match url.scheme() {
"http" | "https" | "socks4" | "socks5" => {}
- scheme => return Err(format!("Unsupported proxy scheme: {}", scheme)),
+ scheme => return Err(format!("Unsupported proxy scheme: {scheme}")),
}
// Validate authentication credentials
@@ -167,7 +164,7 @@ pub fn validate_proxy_config(config: &ProxyConfig) -> Result<(), String> {
}
// Basic validation for wildcard patterns
if entry.starts_with("*.") && entry.len() < 3 {
- return Err(format!("Invalid wildcard pattern: {}", entry));
+ return Err(format!("Invalid wildcard pattern: {entry}"));
}
}
}
@@ -214,8 +211,7 @@ pub fn should_bypass_proxy(url: &str, no_proxy: &[String]) -> bool {
}
// Simple wildcard matching
- if entry.starts_with("*.") {
- let domain = &entry[2..];
+ if let Some(domain) = entry.strip_prefix("*.") {
if host.ends_with(domain) {
return true;
}
@@ -305,7 +301,7 @@ pub async fn _download_files_internal(
resume: bool,
cancel_token: CancellationToken,
) -> Result<(), String> {
- log::info!("Start download task: {}", task_id);
+ log::info!("Start download task: {task_id}");
let header_map = _convert_headers(headers).map_err(err_to_string)?;
@@ -320,9 +316,9 @@ pub async fn _download_files_internal(
}
let total_size: u64 = file_sizes.values().sum();
- log::info!("Total download size: {}", total_size);
+ log::info!("Total download size: {total_size}");
- let evt_name = format!("download-{}", task_id);
+ let evt_name = format!("download-{task_id}");
// Create progress tracker
let progress_tracker = ProgressTracker::new(items, file_sizes.clone());
@@ -352,7 +348,7 @@ pub async fn _download_files_internal(
let cancel_token_clone = cancel_token.clone();
let evt_name_clone = evt_name.clone();
let progress_tracker_clone = progress_tracker.clone();
- let file_id = format!("{}-{}", task_id, index);
+ let file_id = format!("{task_id}-{index}");
let file_size = file_sizes.get(&item.url).copied().unwrap_or(0);
let task = tokio::spawn(async move {
@@ -377,7 +373,7 @@ pub async fn _download_files_internal(
// Wait for all downloads to complete
let mut validation_tasks = Vec::new();
for (task, item) in download_tasks.into_iter().zip(items.iter()) {
- let result = task.await.map_err(|e| format!("Task join error: {}", e))?;
+ let result = task.await.map_err(|e| format!("Task join error: {e}"))?;
match result {
Ok(downloaded_path) => {
@@ -399,7 +395,7 @@ pub async fn _download_files_internal(
for (validation_task, save_path, _item) in validation_tasks {
let validation_result = validation_task
.await
- .map_err(|e| format!("Validation task join error: {}", e))?;
+ .map_err(|e| format!("Validation task join error: {e}"))?;
if let Err(validation_error) = validation_result {
// Clean up the file if validation fails
@@ -448,7 +444,7 @@ async fn download_single_file(
if current_extension.is_empty() {
ext.to_string()
} else {
- format!("{}.{}", current_extension, ext)
+ format!("{current_extension}.{ext}")
}
};
let tmp_save_path = save_path.with_extension(append_extension("tmp"));
@@ -469,8 +465,8 @@ async fn download_single_file(
let decoded_url = url::Url::parse(&item.url)
.map(|u| u.to_string())
.unwrap_or_else(|_| item.url.clone());
- log::info!("Started downloading: {}", decoded_url);
- let client = _get_client_for_item(item, &header_map).map_err(err_to_string)?;
+ log::info!("Started downloading: {decoded_url}");
+ let client = _get_client_for_item(item, header_map).map_err(err_to_string)?;
let mut download_delta = 0u64;
let mut initial_progress = 0u64;
@@ -503,7 +499,7 @@ async fn download_single_file(
}
Err(e) => {
// fallback to normal download
- log::warn!("Failed to resume download: {}", e);
+ log::warn!("Failed to resume download: {e}");
should_resume = false;
_get_maybe_resume(&client, &item.url, 0).await?
}
@@ -592,7 +588,7 @@ async fn download_single_file(
let decoded_url = url::Url::parse(&item.url)
.map(|u| u.to_string())
.unwrap_or_else(|_| item.url.clone());
- log::info!("Finished downloading: {}", decoded_url);
+ log::info!("Finished downloading: {decoded_url}");
Ok(save_path.to_path_buf())
}
@@ -606,7 +602,7 @@ pub async fn _get_maybe_resume(
if start_bytes > 0 {
let resp = client
.get(url)
- .header("Range", format!("bytes={}-", start_bytes))
+ .header("Range", format!("bytes={start_bytes}-"))
.send()
.await
.map_err(err_to_string)?;
diff --git a/src-tauri/src/core/extensions/commands.rs b/src-tauri/src/core/extensions/commands.rs
index c61d744a7..e416a03a3 100644
--- a/src-tauri/src/core/extensions/commands.rs
+++ b/src-tauri/src/core/extensions/commands.rs
@@ -13,7 +13,7 @@ pub fn get_jan_extensions_path(app_handle: tauri::AppHandle) -> P
#[tauri::command]
pub fn install_extensions(app: AppHandle) {
if let Err(err) = setup::install_extensions(app, true) {
- log::error!("Failed to install extensions: {}", err);
+ log::error!("Failed to install extensions: {err}");
}
}
@@ -30,7 +30,7 @@ pub fn get_active_extensions(app: AppHandle) -> Vec = match contents {
@@ -49,12 +49,12 @@ pub fn get_active_extensions(app: AppHandle) -> Vec {
- log::error!("Failed to parse extensions.json: {}", error);
+ log::error!("Failed to parse extensions.json: {error}");
vec![]
}
},
Err(error) => {
- log::error!("Failed to read extensions.json: {}", error);
+ log::error!("Failed to read extensions.json: {error}");
vec![]
}
};
diff --git a/src-tauri/src/core/filesystem/tests.rs b/src-tauri/src/core/filesystem/tests.rs
index b4e96e994..b89b834d6 100644
--- a/src-tauri/src/core/filesystem/tests.rs
+++ b/src-tauri/src/core/filesystem/tests.rs
@@ -9,7 +9,7 @@ fn test_rm() {
let app = mock_app();
let path = "test_rm_dir";
fs::create_dir_all(get_jan_data_folder_path(app.handle().clone()).join(path)).unwrap();
- let args = vec![format!("file://{}", path).to_string()];
+ let args = vec![format!("file://{path}").to_string()];
let result = rm(app.handle().clone(), args);
assert!(result.is_ok());
assert!(!get_jan_data_folder_path(app.handle().clone())
@@ -21,7 +21,7 @@ fn test_rm() {
fn test_mkdir() {
let app = mock_app();
let path = "test_mkdir_dir";
- let args = vec![format!("file://{}", path).to_string()];
+ let args = vec![format!("file://{path}").to_string()];
let result = mkdir(app.handle().clone(), args);
assert!(result.is_ok());
assert!(get_jan_data_folder_path(app.handle().clone())
@@ -39,7 +39,7 @@ fn test_join_path() {
assert_eq!(
result,
get_jan_data_folder_path(app.handle().clone())
- .join(&format!("test_dir{}test_file", std::path::MAIN_SEPARATOR))
+ .join(format!("test_dir{}test_file", std::path::MAIN_SEPARATOR))
.to_string_lossy()
.to_string()
);
diff --git a/src-tauri/src/core/mcp/commands.rs b/src-tauri/src/core/mcp/commands.rs
index a86db598e..6eb6dab40 100644
--- a/src-tauri/src/core/mcp/commands.rs
+++ b/src-tauri/src/core/mcp/commands.rs
@@ -30,28 +30,28 @@ pub async fn activate_mcp_server(
#[tauri::command]
pub async fn deactivate_mcp_server(state: State<'_, AppState>, name: String) -> Result<(), String> {
- log::info!("Deactivating MCP server: {}", name);
+ log::info!("Deactivating MCP server: {name}");
// First, mark server as manually deactivated to prevent restart
// Remove from active servers list to prevent restart
{
let mut active_servers = state.mcp_active_servers.lock().await;
active_servers.remove(&name);
- log::info!("Removed MCP server {} from active servers list", name);
+ log::info!("Removed MCP server {name} from active servers list");
}
// Mark as not successfully connected to prevent restart logic
{
let mut connected = state.mcp_successfully_connected.lock().await;
connected.insert(name.clone(), false);
- log::info!("Marked MCP server {} as not successfully connected", name);
+ log::info!("Marked MCP server {name} as not successfully connected");
}
// Reset restart count
{
let mut counts = state.mcp_restart_counts.lock().await;
counts.remove(&name);
- log::info!("Reset restart count for MCP server {}", name);
+ log::info!("Reset restart count for MCP server {name}");
}
// Now remove and stop the server
@@ -60,7 +60,7 @@ pub async fn deactivate_mcp_server(state: State<'_, AppState>, name: String) ->
let service = servers_map
.remove(&name)
- .ok_or_else(|| format!("Server {} not found", name))?;
+ .ok_or_else(|| format!("Server {name} not found"))?;
// Release the lock before calling cancel
drop(servers_map);
@@ -89,7 +89,7 @@ pub async fn restart_mcp_servers(app: AppHandle, state: State<'_,
restart_active_mcp_servers(&app, servers).await?;
app.emit("mcp-update", "MCP servers updated")
- .map_err(|e| format!("Failed to emit event: {}", e))?;
+ .map_err(|e| format!("Failed to emit event: {e}"))?;
Ok(())
}
@@ -110,9 +110,7 @@ pub async fn reset_mcp_restart_count(
let old_count = *count;
*count = 0;
log::info!(
- "MCP server {} restart count reset from {} to 0.",
- server_name,
- old_count
+ "MCP server {server_name} restart count reset from {old_count} to 0."
);
Ok(())
}
@@ -219,7 +217,7 @@ pub async fn call_tool(
continue; // Tool not found in this server, try next
}
- println!("Found tool {} in server", tool_name);
+ println!("Found tool {tool_name} in server");
// Call the tool with timeout and cancellation support
let tool_call = service.call_tool(CallToolRequestParam {
@@ -234,22 +232,20 @@ pub async fn call_tool(
match result {
Ok(call_result) => call_result.map_err(|e| e.to_string()),
Err(_) => Err(format!(
- "Tool call '{}' timed out after {} seconds",
- tool_name,
+ "Tool call '{tool_name}' timed out after {} seconds",
MCP_TOOL_CALL_TIMEOUT.as_secs()
)),
}
}
_ = cancel_rx => {
- Err(format!("Tool call '{}' was cancelled", tool_name))
+ Err(format!("Tool call '{tool_name}' was cancelled"))
}
}
} else {
match timeout(MCP_TOOL_CALL_TIMEOUT, tool_call).await {
Ok(call_result) => call_result.map_err(|e| e.to_string()),
Err(_) => Err(format!(
- "Tool call '{}' timed out after {} seconds",
- tool_name,
+ "Tool call '{tool_name}' timed out after {} seconds",
MCP_TOOL_CALL_TIMEOUT.as_secs()
)),
}
@@ -264,7 +260,7 @@ pub async fn call_tool(
return result;
}
- Err(format!("Tool {} not found", tool_name))
+ Err(format!("Tool {tool_name} not found"))
}
/// Cancels a running tool call by its cancellation token
@@ -285,10 +281,10 @@ pub async fn cancel_tool_call(
if let Some(cancel_tx) = cancellations.remove(&cancellation_token) {
// Send cancellation signal - ignore if receiver is already dropped
let _ = cancel_tx.send(());
- println!("Tool call with token {} cancelled", cancellation_token);
+ println!("Tool call with token {cancellation_token} cancelled");
Ok(())
} else {
- Err(format!("Cancellation token {} not found", cancellation_token))
+ Err(format!("Cancellation token {cancellation_token} not found"))
}
}
@@ -301,7 +297,7 @@ pub async fn get_mcp_configs(app: AppHandle) -> Result(app: AppHandle) -> Result(app: AppHandle, configs: String) -> Result<(), String> {
let mut path = get_jan_data_folder_path(app);
path.push("mcp_config.json");
- log::info!("save mcp configs, path: {:?}", path);
+ log::info!("save mcp configs, path: {path:?}");
fs::write(path, configs).map_err(|e| e.to_string())
}
diff --git a/src-tauri/src/core/mcp/helpers.rs b/src-tauri/src/core/mcp/helpers.rs
index 48c92ba2c..4e226a055 100644
--- a/src-tauri/src/core/mcp/helpers.rs
+++ b/src-tauri/src/core/mcp/helpers.rs
@@ -56,22 +56,13 @@ pub fn calculate_exponential_backoff_delay(attempt: u32) -> u64 {
let hash = hasher.finish();
// Convert hash to jitter value in range [-jitter_range, +jitter_range]
- let jitter_offset = (hash % (jitter_range * 2)) as i64 - jitter_range as i64;
- jitter_offset
+ (hash % (jitter_range * 2)) as i64 - jitter_range as i64
} else {
0
};
// Apply jitter while ensuring delay stays positive and within bounds
- let final_delay = cmp::max(
- 100, // Minimum 100ms delay
- cmp::min(
- MCP_MAX_RESTART_DELAY_MS,
- (capped_delay as i64 + jitter) as u64,
- ),
- );
-
- final_delay
+ ((capped_delay as i64 + jitter) as u64).clamp(100, MCP_MAX_RESTART_DELAY_MS)
}
/// Runs MCP commands by reading configuration from a JSON file and initializing servers
@@ -135,9 +126,7 @@ pub async fn run_mcp_commands(
// If initial startup failed, we still want to continue with other servers
if let Err(e) = &result {
log::error!(
- "Initial startup failed for MCP server {}: {}",
- name_clone,
- e
+ "Initial startup failed for MCP server {name_clone}: {e}"
);
}
@@ -155,25 +144,23 @@ pub async fn run_mcp_commands(
match handle.await {
Ok((name, result)) => match result {
Ok(_) => {
- log::info!("MCP server {} initialized successfully", name);
+ log::info!("MCP server {name} initialized successfully");
successful_count += 1;
}
Err(e) => {
- log::error!("MCP server {} failed to initialize: {}", name, e);
+ log::error!("MCP server {name} failed to initialize: {e}");
failed_count += 1;
}
},
Err(e) => {
- log::error!("Failed to join startup task: {}", e);
+ log::error!("Failed to join startup task: {e}");
failed_count += 1;
}
}
}
log::info!(
- "MCP server initialization complete: {} successful, {} failed",
- successful_count,
- failed_count
+ "MCP server initialization complete: {successful_count} successful, {failed_count} failed"
);
Ok(())
@@ -184,7 +171,7 @@ pub async fn monitor_mcp_server_handle(
servers_state: SharedMcpServers,
name: String,
) -> Option {
- log::info!("Monitoring MCP server {} health", name);
+ log::info!("Monitoring MCP server {name} health");
// Monitor server health with periodic checks
loop {
@@ -202,17 +189,17 @@ pub async fn monitor_mcp_server_handle(
true
}
Ok(Err(e)) => {
- log::warn!("MCP server {} health check failed: {}", name, e);
+ log::warn!("MCP server {name} health check failed: {e}");
false
}
Err(_) => {
- log::warn!("MCP server {} health check timed out", name);
+ log::warn!("MCP server {name} health check timed out");
false
}
}
} else {
// Server was removed from HashMap (e.g., by deactivate_mcp_server)
- log::info!("MCP server {} no longer in running services", name);
+ log::info!("MCP server {name} no longer in running services");
return Some(rmcp::service::QuitReason::Closed);
}
};
@@ -220,8 +207,7 @@ pub async fn monitor_mcp_server_handle(
if !health_check_result {
// Server failed health check - remove it and return
log::error!(
- "MCP server {} failed health check, removing from active servers",
- name
+ "MCP server {name} failed health check, removing from active servers"
);
let mut servers = servers_state.lock().await;
if let Some(service) = servers.remove(&name) {
@@ -262,7 +248,7 @@ pub async fn start_mcp_server_with_restart(
let max_restarts = max_restarts.unwrap_or(5);
// Try the first start attempt and return its result
- log::info!("Starting MCP server {} (Initial attempt)", name);
+ log::info!("Starting MCP server {name} (Initial attempt)");
let first_start_result = schedule_mcp_start_task(
app.clone(),
servers_state.clone(),
@@ -273,7 +259,7 @@ pub async fn start_mcp_server_with_restart(
match first_start_result {
Ok(_) => {
- log::info!("MCP server {} started successfully on first attempt", name);
+ log::info!("MCP server {name} started successfully on first attempt");
reset_restart_count(&restart_counts, &name).await;
// Check if server was marked as successfully connected (passed verification)
@@ -298,18 +284,15 @@ pub async fn start_mcp_server_with_restart(
Ok(())
} else {
// Server failed verification, don't monitor for restarts
- log::error!("MCP server {} failed verification after startup", name);
+ log::error!("MCP server {name} failed verification after startup");
Err(format!(
- "MCP server {} failed verification after startup",
- name
+ "MCP server {name} failed verification after startup"
))
}
}
Err(e) => {
log::error!(
- "Failed to start MCP server {} on first attempt: {}",
- name,
- e
+ "Failed to start MCP server {name} on first attempt: {e}"
);
Err(e)
}
@@ -336,9 +319,7 @@ pub async fn start_restart_loop(
if current_restart_count > max_restarts {
log::error!(
- "MCP server {} reached maximum restart attempts ({}). Giving up.",
- name,
- max_restarts
+ "MCP server {name} reached maximum restart attempts ({max_restarts}). Giving up."
);
if let Err(e) = app.emit(
"mcp_max_restarts_reached",
@@ -353,19 +334,13 @@ pub async fn start_restart_loop(
}
log::info!(
- "Restarting MCP server {} (Attempt {}/{})",
- name,
- current_restart_count,
- max_restarts
+ "Restarting MCP server {name} (Attempt {current_restart_count}/{max_restarts})"
);
// Calculate exponential backoff delay
let delay_ms = calculate_exponential_backoff_delay(current_restart_count);
log::info!(
- "Waiting {}ms before restart attempt {} for MCP server {}",
- delay_ms,
- current_restart_count,
- name
+ "Waiting {delay_ms}ms before restart attempt {current_restart_count} for MCP server {name}"
);
sleep(Duration::from_millis(delay_ms)).await;
@@ -380,7 +355,7 @@ pub async fn start_restart_loop(
match start_result {
Ok(_) => {
- log::info!("MCP server {} restarted successfully.", name);
+ log::info!("MCP server {name} restarted successfully.");
// Check if server passed verification (was marked as successfully connected)
let passed_verification = {
@@ -390,8 +365,7 @@ pub async fn start_restart_loop(
if !passed_verification {
log::error!(
- "MCP server {} failed verification after restart - stopping permanently",
- name
+ "MCP server {name} failed verification after restart - stopping permanently"
);
break;
}
@@ -402,9 +376,7 @@ pub async fn start_restart_loop(
if let Some(count) = counts.get_mut(&name) {
if *count > 0 {
log::info!(
- "MCP server {} restarted successfully, resetting restart count from {} to 0.",
- name,
- *count
+ "MCP server {name} restarted successfully, resetting restart count from {count} to 0."
);
*count = 0;
}
@@ -415,7 +387,7 @@ pub async fn start_restart_loop(
let quit_reason =
monitor_mcp_server_handle(servers_state.clone(), name.clone()).await;
- log::info!("MCP server {} quit with reason: {:?}", name, quit_reason);
+ log::info!("MCP server {name} quit with reason: {quit_reason:?}");
// Check if server was marked as successfully connected
let was_connected = {
@@ -426,8 +398,7 @@ pub async fn start_restart_loop(
// Only continue restart loop if server was previously connected
if !was_connected {
log::error!(
- "MCP server {} failed before establishing successful connection - stopping permanently",
- name
+ "MCP server {name} failed before establishing successful connection - stopping permanently"
);
break;
}
@@ -435,11 +406,11 @@ pub async fn start_restart_loop(
// Determine if we should restart based on quit reason
let should_restart = match quit_reason {
Some(reason) => {
- log::warn!("MCP server {} terminated unexpectedly: {:?}", name, reason);
+ log::warn!("MCP server {name} terminated unexpectedly: {reason:?}");
true
}
None => {
- log::info!("MCP server {} was manually stopped - not restarting", name);
+ log::info!("MCP server {name} was manually stopped - not restarting");
false
}
};
@@ -450,7 +421,7 @@ pub async fn start_restart_loop(
// Continue the loop for another restart attempt
}
Err(e) => {
- log::error!("Failed to restart MCP server {}: {}", name, e);
+ log::error!("Failed to restart MCP server {name}: {e}");
// Check if server was marked as successfully connected before
let was_connected = {
@@ -461,8 +432,7 @@ pub async fn start_restart_loop(
// Only continue restart attempts if server was previously connected
if !was_connected {
log::error!(
- "MCP server {} failed restart and was never successfully connected - stopping permanently",
- name
+ "MCP server {name} failed restart and was never successfully connected - stopping permanently"
);
break;
}
@@ -529,7 +499,7 @@ async fn schedule_mcp_start_task(
},
};
let client = client_info.serve(transport).await.inspect_err(|e| {
- log::error!("client error: {:?}", e);
+ log::error!("client error: {e:?}");
});
match client {
@@ -545,12 +515,12 @@ async fn schedule_mcp_start_task(
let app_state = app.state::();
let mut connected = app_state.mcp_successfully_connected.lock().await;
connected.insert(name.clone(), true);
- log::info!("Marked MCP server {} as successfully connected", name);
+ log::info!("Marked MCP server {name} as successfully connected");
}
}
Err(e) => {
- log::error!("Failed to connect to server: {}", e);
- return Err(format!("Failed to connect to server: {}", e));
+ log::error!("Failed to connect to server: {e}");
+ return Err(format!("Failed to connect to server: {e}"));
}
}
} else if config_params.transport_type.as_deref() == Some("sse") && config_params.url.is_some()
@@ -587,8 +557,8 @@ async fn schedule_mcp_start_task(
)
.await
.map_err(|e| {
- log::error!("transport error: {:?}", e);
- format!("Failed to start SSE transport: {}", e)
+ log::error!("transport error: {e:?}");
+ format!("Failed to start SSE transport: {e}")
})?;
let client_info = ClientInfo {
@@ -600,7 +570,7 @@ async fn schedule_mcp_start_task(
},
};
let client = client_info.serve(transport).await.map_err(|e| {
- log::error!("client error: {:?}", e);
+ log::error!("client error: {e:?}");
e.to_string()
});
@@ -617,12 +587,12 @@ async fn schedule_mcp_start_task(
let app_state = app.state::();
let mut connected = app_state.mcp_successfully_connected.lock().await;
connected.insert(name.clone(), true);
- log::info!("Marked MCP server {} as successfully connected", name);
+ log::info!("Marked MCP server {name} as successfully connected");
}
}
Err(e) => {
- log::error!("Failed to connect to server: {}", e);
- return Err(format!("Failed to connect to server: {}", e));
+ log::error!("Failed to connect to server: {e}");
+ return Err(format!("Failed to connect to server: {e}"));
}
}
} else {
@@ -639,7 +609,7 @@ async fn schedule_mcp_start_task(
cache_dir.push(".npx");
cmd = Command::new(bun_x_path.display().to_string());
cmd.arg("x");
- cmd.env("BUN_INSTALL", cache_dir.to_str().unwrap().to_string());
+ cmd.env("BUN_INSTALL", cache_dir.to_str().unwrap());
}
let uv_path = if cfg!(windows) {
@@ -654,7 +624,7 @@ async fn schedule_mcp_start_task(
cmd = Command::new(uv_path);
cmd.arg("tool");
cmd.arg("run");
- cmd.env("UV_CACHE_DIR", cache_dir.to_str().unwrap().to_string());
+ cmd.env("UV_CACHE_DIR", cache_dir.to_str().unwrap());
}
#[cfg(windows)]
{
@@ -726,8 +696,7 @@ async fn schedule_mcp_start_task(
if !server_still_running {
return Err(format!(
- "MCP server {} quit immediately after starting",
- name
+ "MCP server {name} quit immediately after starting"
));
}
// Mark server as successfully connected (for restart policy)
@@ -735,7 +704,7 @@ async fn schedule_mcp_start_task(
let app_state = app.state::();
let mut connected = app_state.mcp_successfully_connected.lock().await;
connected.insert(name.clone(), true);
- log::info!("Marked MCP server {} as successfully connected", name);
+ log::info!("Marked MCP server {name} as successfully connected");
}
}
Ok(())
@@ -792,7 +761,7 @@ pub async fn restart_active_mcp_servers(
);
for (name, config) in active_servers.iter() {
- log::info!("Restarting MCP server: {}", name);
+ log::info!("Restarting MCP server: {name}");
// Start server with restart monitoring - spawn async task
let app_clone = app.clone();
@@ -891,9 +860,7 @@ pub async fn spawn_server_monitoring_task(
monitor_mcp_server_handle(servers_clone.clone(), name_clone.clone()).await;
log::info!(
- "MCP server {} quit with reason: {:?}",
- name_clone,
- quit_reason
+ "MCP server {name_clone} quit with reason: {quit_reason:?}"
);
// Check if we should restart based on connection status and quit reason
@@ -928,8 +895,7 @@ pub async fn should_restart_server(
// Only restart if server was previously connected
if !was_connected {
log::error!(
- "MCP server {} failed before establishing successful connection - stopping permanently",
- name
+ "MCP server {name} failed before establishing successful connection - stopping permanently"
);
return false;
}
@@ -937,11 +903,11 @@ pub async fn should_restart_server(
// Determine if we should restart based on quit reason
match quit_reason {
Some(reason) => {
- log::warn!("MCP server {} terminated unexpectedly: {:?}", name, reason);
+ log::warn!("MCP server {name} terminated unexpectedly: {reason:?}");
true
}
None => {
- log::info!("MCP server {} was manually stopped - not restarting", name);
+ log::info!("MCP server {name} was manually stopped - not restarting");
false
}
}
diff --git a/src-tauri/src/core/mcp/tests.rs b/src-tauri/src/core/mcp/tests.rs
index d973ce647..71967cd96 100644
--- a/src-tauri/src/core/mcp/tests.rs
+++ b/src-tauri/src/core/mcp/tests.rs
@@ -70,7 +70,7 @@ fn test_add_server_config_new_file() {
Some("mcp_config_test_new.json"),
);
- assert!(result.is_ok(), "Failed to add server config: {:?}", result);
+ assert!(result.is_ok(), "Failed to add server config: {result:?}");
// Verify the config was added correctly
let config_content = std::fs::read_to_string(&config_path)
@@ -128,7 +128,7 @@ fn test_add_server_config_existing_servers() {
Some("mcp_config_test_existing.json"),
);
- assert!(result.is_ok(), "Failed to add server config: {:?}", result);
+ assert!(result.is_ok(), "Failed to add server config: {result:?}");
// Verify both servers exist
let config_content = std::fs::read_to_string(&config_path)
diff --git a/src-tauri/src/core/server/proxy.rs b/src-tauri/src/core/server/proxy.rs
index 12398ac02..b832b03a2 100644
--- a/src-tauri/src/core/server/proxy.rs
+++ b/src-tauri/src/core/server/proxy.rs
@@ -67,7 +67,7 @@ async fn proxy_request(
.any(|&method| method.eq_ignore_ascii_case(requested_method));
if !method_allowed {
- log::warn!("CORS preflight: Method '{}' not allowed", requested_method);
+ log::warn!("CORS preflight: Method '{requested_method}' not allowed");
return Ok(Response::builder()
.status(StatusCode::METHOD_NOT_ALLOWED)
.body(Body::from("Method not allowed"))
@@ -80,14 +80,12 @@ async fn proxy_request(
let is_trusted = if is_whitelisted_path {
log::debug!(
- "CORS preflight: Bypassing host check for whitelisted path: {}",
- request_path
+ "CORS preflight: Bypassing host check for whitelisted path: {request_path}"
);
true
} else if !host.is_empty() {
log::debug!(
- "CORS preflight: Host is '{}', trusted hosts: {:?}",
- host,
+ "CORS preflight: Host is '{host}', trusted hosts: {:?}",
&config.trusted_hosts
);
is_valid_host(host, &config.trusted_hosts)
@@ -98,9 +96,7 @@ async fn proxy_request(
if !is_trusted {
log::warn!(
- "CORS preflight: Host '{}' not trusted for path '{}'",
- host,
- request_path
+ "CORS preflight: Host '{host}' not trusted for path '{request_path}'"
);
return Ok(Response::builder()
.status(StatusCode::FORBIDDEN)
@@ -158,8 +154,7 @@ async fn proxy_request(
if !headers_valid {
log::warn!(
- "CORS preflight: Some requested headers not allowed: {}",
- requested_headers
+ "CORS preflight: Some requested headers not allowed: {requested_headers}"
);
return Ok(Response::builder()
.status(StatusCode::FORBIDDEN)
@@ -186,9 +181,7 @@ async fn proxy_request(
}
log::debug!(
- "CORS preflight response: host_trusted={}, origin='{}'",
- is_trusted,
- origin
+ "CORS preflight response: host_trusted={is_trusted}, origin='{origin}'"
);
return Ok(response.body(Body::empty()).unwrap());
}
@@ -252,7 +245,7 @@ async fn proxy_request(
.unwrap());
}
} else {
- log::debug!("Bypassing host validation for whitelisted path: {}", path);
+ log::debug!("Bypassing host validation for whitelisted path: {path}");
}
if !is_whitelisted_path && !config.proxy_api_key.is_empty() {
@@ -285,8 +278,7 @@ async fn proxy_request(
}
} else if is_whitelisted_path {
log::debug!(
- "Bypassing authorization check for whitelisted path: {}",
- path
+ "Bypassing authorization check for whitelisted path: {path}"
);
}
@@ -312,8 +304,7 @@ async fn proxy_request(
| (hyper::Method::POST, "/completions")
| (hyper::Method::POST, "/embeddings") => {
log::debug!(
- "Handling POST request to {} requiring model lookup in body",
- destination_path
+ "Handling POST request to {destination_path} requiring model lookup in body",
);
let body_bytes = match hyper::body::to_bytes(body).await {
Ok(bytes) => bytes,
@@ -336,13 +327,12 @@ async fn proxy_request(
match serde_json::from_slice::(&body_bytes) {
Ok(json_body) => {
if let Some(model_id) = json_body.get("model").and_then(|v| v.as_str()) {
- log::debug!("Extracted model_id: {}", model_id);
+ log::debug!("Extracted model_id: {model_id}");
let sessions_guard = sessions.lock().await;
if sessions_guard.is_empty() {
log::warn!(
- "Request for model '{}' but no models are running.",
- model_id
+ "Request for model '{model_id}' but no models are running."
);
let mut error_response =
Response::builder().status(StatusCode::SERVICE_UNAVAILABLE);
@@ -363,9 +353,9 @@ async fn proxy_request(
{
target_port = Some(session.info.port);
session_api_key = Some(session.info.api_key.clone());
- log::debug!("Found session for model_id {}", model_id,);
+ log::debug!("Found session for model_id {model_id}");
} else {
- log::warn!("No running session found for model_id: {}", model_id);
+ log::warn!("No running session found for model_id: {model_id}");
let mut error_response =
Response::builder().status(StatusCode::NOT_FOUND);
error_response = add_cors_headers_with_host_and_origin(
@@ -376,15 +366,13 @@ async fn proxy_request(
);
return Ok(error_response
.body(Body::from(format!(
- "No running session found for model '{}'",
- model_id
+ "No running session found for model '{model_id}'"
)))
.unwrap());
}
} else {
log::warn!(
- "POST body for {} is missing 'model' field or it's not a string",
- destination_path
+ "POST body for {destination_path} is missing 'model' field or it's not a string"
);
let mut error_response =
Response::builder().status(StatusCode::BAD_REQUEST);
@@ -401,9 +389,7 @@ async fn proxy_request(
}
Err(e) => {
log::warn!(
- "Failed to parse POST body for {} as JSON: {}",
- destination_path,
- e
+ "Failed to parse POST body for {destination_path} as JSON: {e}"
);
let mut error_response = Response::builder().status(StatusCode::BAD_REQUEST);
error_response = add_cors_headers_with_host_and_origin(
@@ -535,7 +521,7 @@ async fn proxy_request(
let is_explicitly_whitelisted_get = method == hyper::Method::GET
&& whitelisted_paths.contains(&destination_path.as_str());
if is_explicitly_whitelisted_get {
- log::debug!("Handled whitelisted GET path: {}", destination_path);
+ log::debug!("Handled whitelisted GET path: {destination_path}");
let mut error_response = Response::builder().status(StatusCode::NOT_FOUND);
error_response = add_cors_headers_with_host_and_origin(
error_response,
@@ -546,9 +532,7 @@ async fn proxy_request(
return Ok(error_response.body(Body::from("Not Found")).unwrap());
} else {
log::warn!(
- "Unhandled method/path for dynamic routing: {} {}",
- method,
- destination_path
+ "Unhandled method/path for dynamic routing: {method} {destination_path}"
);
let mut error_response = Response::builder().status(StatusCode::NOT_FOUND);
error_response = add_cors_headers_with_host_and_origin(
@@ -581,7 +565,7 @@ async fn proxy_request(
}
};
- let upstream_url = format!("http://127.0.0.1:{}{}", port, destination_path);
+ let upstream_url = format!("http://127.0.0.1:{port}{destination_path}");
let mut outbound_req = client.request(method.clone(), &upstream_url);
@@ -593,13 +577,14 @@ async fn proxy_request(
if let Some(key) = session_api_key {
log::debug!("Adding session Authorization header");
- outbound_req = outbound_req.header("Authorization", format!("Bearer {}", key));
+ outbound_req = outbound_req.header("Authorization", format!("Bearer {key}"));
} else {
log::debug!("No session API key available for this request");
}
let outbound_req_with_body = if let Some(bytes) = buffered_body {
- log::debug!("Sending buffered body ({} bytes)", bytes.len());
+ let bytes_len = bytes.len();
+ log::debug!("Sending buffered body ({bytes_len} bytes)");
outbound_req.body(bytes)
} else {
log::error!("Internal logic error: Request reached proxy stage without a buffered body.");
@@ -618,7 +603,7 @@ async fn proxy_request(
match outbound_req_with_body.send().await {
Ok(response) => {
let status = response.status();
- log::debug!("Received response with status: {}", status);
+ log::debug!("Received response with status: {status}");
let mut builder = Response::builder().status(status);
@@ -648,7 +633,7 @@ async fn proxy_request(
}
}
Err(e) => {
- log::error!("Stream error: {}", e);
+ log::error!("Stream error: {e}");
break;
}
}
@@ -659,8 +644,8 @@ async fn proxy_request(
Ok(builder.body(body).unwrap())
}
Err(e) => {
- let error_msg = format!("Proxy request to model failed: {}", e);
- log::error!("{}", error_msg);
+ let error_msg = format!("Proxy request to model failed: {e}");
+ log::error!("{error_msg}");
let mut error_response = Response::builder().status(StatusCode::BAD_GATEWAY);
error_response = add_cors_headers_with_host_and_origin(
error_response,
@@ -675,14 +660,12 @@ async fn proxy_request(
fn add_cors_headers_with_host_and_origin(
builder: hyper::http::response::Builder,
- host: &str,
+ _host: &str,
origin: &str,
- trusted_hosts: &[Vec],
+ _trusted_hosts: &[Vec],
) -> hyper::http::response::Builder {
let mut builder = builder;
- let allow_origin_header = if !origin.is_empty() && is_valid_host(host, trusted_hosts) {
- origin.to_string()
- } else if !origin.is_empty() {
+ let allow_origin_header = if !origin.is_empty() {
origin.to_string()
} else {
"*".to_string()
@@ -706,6 +689,7 @@ pub async fn is_server_running(server_handle: Arc>>)
handle_guard.is_some()
}
+#[allow(clippy::too_many_arguments)]
pub async fn start_server(
server_handle: Arc>>,
sessions: Arc>>,
@@ -721,9 +705,9 @@ pub async fn start_server(
return Err("Server is already running".into());
}
- let addr: SocketAddr = format!("{}:{}", host, port)
+ let addr: SocketAddr = format!("{host}:{port}")
.parse()
- .map_err(|e| format!("Invalid address: {}", e))?;
+ .map_err(|e| format!("Invalid address: {e}"))?;
let config = ProxyConfig {
prefix,
@@ -752,15 +736,15 @@ pub async fn start_server(
let server = match Server::try_bind(&addr) {
Ok(builder) => builder.serve(make_svc),
Err(e) => {
- log::error!("Failed to bind to {}: {}", addr, e);
+ log::error!("Failed to bind to {addr}: {e}");
return Err(Box::new(e));
}
};
- log::info!("Jan API server started on http://{}", addr);
+ log::info!("Jan API server started on http://{addr}");
let server_task = tokio::spawn(async move {
if let Err(e) = server.await {
- log::error!("Server error: {}", e);
+ log::error!("Server error: {e}");
return Err(Box::new(e) as Box);
}
Ok(())
@@ -768,7 +752,7 @@ pub async fn start_server(
*handle_guard = Some(server_task);
let actual_port = addr.port();
- log::info!("Jan API server started successfully on port {}", actual_port);
+ log::info!("Jan API server started successfully on port {actual_port}");
Ok(actual_port)
}
diff --git a/src-tauri/src/core/setup.rs b/src-tauri/src/core/setup.rs
index 4eebaedb7..6564d3609 100644
--- a/src-tauri/src/core/setup.rs
+++ b/src-tauri/src/core/setup.rs
@@ -45,7 +45,7 @@ pub fn install_extensions(app: tauri::AppHandle, force: bool) ->
if std::env::var("IS_CLEAN").is_ok() {
clean_up = true;
}
- log::info!("Installing extensions. Clean up: {}", clean_up);
+ log::info!("Installing extensions. Clean up: {clean_up}");
if !clean_up && extensions_path.exists() {
return Ok(());
}
@@ -75,7 +75,7 @@ pub fn install_extensions(app: tauri::AppHandle, force: bool) ->
let entry = entry.map_err(|e| e.to_string())?;
let path = entry.path();
- if path.extension().map_or(false, |ext| ext == "tgz") {
+ if path.extension().is_some_and(|ext| ext == "tgz") {
let tar_gz = File::open(&path).map_err(|e| e.to_string())?;
let gz_decoder = GzDecoder::new(tar_gz);
let mut archive = Archive::new(gz_decoder);
@@ -141,7 +141,7 @@ pub fn install_extensions(app: tauri::AppHandle, force: bool) ->
extensions_list.push(new_extension);
- log::info!("Installed extension to {:?}", extension_dir);
+ log::info!("Installed extension to {extension_dir:?}");
}
}
fs::write(
@@ -161,7 +161,7 @@ pub fn migrate_mcp_servers(
let mcp_version = store
.get("mcp_version")
.and_then(|v| v.as_i64())
- .unwrap_or_else(|| 0);
+ .unwrap_or(0);
if mcp_version < 1 {
log::info!("Migrating MCP schema version 1");
let result = add_server_config(
@@ -175,7 +175,7 @@ pub fn migrate_mcp_servers(
}),
);
if let Err(e) = result {
- log::error!("Failed to add server config: {}", e);
+ log::error!("Failed to add server config: {e}");
}
}
store.set("mcp_version", 1);
@@ -219,7 +219,7 @@ pub fn setup_mcp(app: &App) {
let app_handle = app.handle().clone();
tauri::async_runtime::spawn(async move {
if let Err(e) = run_mcp_commands(&app_handle, servers).await {
- log::error!("Failed to run mcp commands: {}", e);
+ log::error!("Failed to run mcp commands: {e}");
}
app_handle
.emit("mcp-update", "MCP servers updated")
@@ -265,7 +265,7 @@ pub fn setup_tray(app: &App) -> tauri::Result {
app.exit(0);
}
other => {
- println!("menu item {} not handled", other);
+ println!("menu item {other} not handled");
}
})
.build(app)
diff --git a/src-tauri/src/core/system/commands.rs b/src-tauri/src/core/system/commands.rs
index f5e9d7618..938e6f8bf 100644
--- a/src-tauri/src/core/system/commands.rs
+++ b/src-tauri/src/core/system/commands.rs
@@ -18,12 +18,12 @@ pub fn factory_reset(app_handle: tauri::AppHandle, state: State<'
let windows = app_handle.webview_windows();
for (label, window) in windows.iter() {
window.close().unwrap_or_else(|_| {
- log::warn!("Failed to close window: {:?}", label);
+ log::warn!("Failed to close window: {label:?}");
});
}
}
let data_folder = get_jan_data_folder_path(app_handle.clone());
- log::info!("Factory reset, removing data folder: {:?}", data_folder);
+ log::info!("Factory reset, removing data folder: {data_folder:?}");
tauri::async_runtime::block_on(async {
clean_up_mcp_servers(state.clone()).await;
@@ -31,7 +31,7 @@ pub fn factory_reset(app_handle: tauri::AppHandle, state: State<'
if data_folder.exists() {
if let Err(e) = fs::remove_dir_all(&data_folder) {
- log::error!("Failed to remove data folder: {}", e);
+ log::error!("Failed to remove data folder: {e}");
return;
}
}
@@ -59,17 +59,17 @@ pub fn open_app_directory(app: AppHandle) {
if cfg!(target_os = "windows") {
std::process::Command::new("explorer")
.arg(app_path)
- .spawn()
+ .status()
.expect("Failed to open app directory");
} else if cfg!(target_os = "macos") {
std::process::Command::new("open")
.arg(app_path)
- .spawn()
+ .status()
.expect("Failed to open app directory");
} else {
std::process::Command::new("xdg-open")
.arg(app_path)
- .spawn()
+ .status()
.expect("Failed to open app directory");
}
}
@@ -80,17 +80,17 @@ pub fn open_file_explorer(path: String) {
if cfg!(target_os = "windows") {
std::process::Command::new("explorer")
.arg(path)
- .spawn()
+ .status()
.expect("Failed to open file explorer");
} else if cfg!(target_os = "macos") {
std::process::Command::new("open")
.arg(path)
- .spawn()
+ .status()
.expect("Failed to open file explorer");
} else {
std::process::Command::new("xdg-open")
.arg(path)
- .spawn()
+ .status()
.expect("Failed to open file explorer");
}
}
@@ -102,7 +102,7 @@ pub async fn read_logs(app: AppHandle) -> Result
let content = fs::read_to_string(log_path).map_err(|e| e.to_string())?;
Ok(content)
} else {
- Err(format!("Log file not found"))
+ Err("Log file not found".to_string())
}
}
@@ -112,7 +112,7 @@ pub fn is_library_available(library: &str) -> bool {
match unsafe { libloading::Library::new(library) } {
Ok(_) => true,
Err(e) => {
- log::info!("Library {} is not available: {}", library, e);
+ log::info!("Library {library} is not available: {e}");
false
}
}
diff --git a/src-tauri/src/core/threads/commands.rs b/src-tauri/src/core/threads/commands.rs
index f53239206..07bf46094 100644
--- a/src-tauri/src/core/threads/commands.rs
+++ b/src-tauri/src/core/threads/commands.rs
@@ -48,7 +48,7 @@ pub async fn list_threads(
match serde_json::from_str(&data) {
Ok(thread) => threads.push(thread),
Err(e) => {
- println!("Failed to parse thread file: {}", e);
+ println!("Failed to parse thread file: {e}");
continue; // skip invalid thread files
}
}
@@ -189,7 +189,7 @@ pub async fn create_message(
.map_err(|e| e.to_string())?;
let data = serde_json::to_string(&message).map_err(|e| e.to_string())?;
- writeln!(file, "{}", data).map_err(|e| e.to_string())?;
+ writeln!(file, "{data}").map_err(|e| e.to_string())?;
// Explicitly flush to ensure data is written before returning
file.flush().map_err(|e| e.to_string())?;
@@ -292,7 +292,7 @@ pub async fn get_thread_assistant(
let data = fs::read_to_string(&path).map_err(|e| e.to_string())?;
let thread: serde_json::Value = serde_json::from_str(&data).map_err(|e| e.to_string())?;
if let Some(assistants) = thread.get("assistants").and_then(|a| a.as_array()) {
- if let Some(first) = assistants.get(0) {
+ if let Some(first) = assistants.first() {
Ok(first.clone())
} else {
Err("Assistant not found".to_string())
diff --git a/src-tauri/src/core/threads/helpers.rs b/src-tauri/src/core/threads/helpers.rs
index 06b3561d9..1710c5767 100644
--- a/src-tauri/src/core/threads/helpers.rs
+++ b/src-tauri/src/core/threads/helpers.rs
@@ -38,7 +38,7 @@ pub fn write_messages_to_file(
let mut file = File::create(path).map_err(|e| e.to_string())?;
for msg in messages {
let data = serde_json::to_string(msg).map_err(|e| e.to_string())?;
- writeln!(file, "{}", data).map_err(|e| e.to_string())?;
+ writeln!(file, "{data}").map_err(|e| e.to_string())?;
}
Ok(())
}
diff --git a/src-tauri/src/core/threads/tests.rs b/src-tauri/src/core/threads/tests.rs
index a6170dfe7..07cbba9ef 100644
--- a/src-tauri/src/core/threads/tests.rs
+++ b/src-tauri/src/core/threads/tests.rs
@@ -17,7 +17,7 @@ fn mock_app_with_temp_data_dir() -> (tauri::App, PathBuf) {
.as_nanos();
let data_dir = std::env::current_dir()
.unwrap_or_else(|_| PathBuf::from("."))
- .join(format!("test-data-{:?}-{}", unique_id, timestamp));
+ .join(format!("test-data-{unique_id:?}-{timestamp}"));
println!("Mock app data dir: {}", data_dir.display());
// Ensure the unique test directory exists
let _ = fs::create_dir_all(&data_dir);
@@ -69,7 +69,7 @@ async fn test_create_and_list_threads() {
// List threads
let threads = list_threads(app.handle().clone()).await.unwrap();
- assert!(threads.len() > 0);
+ assert!(!threads.is_empty());
// Clean up
let _ = fs::remove_dir_all(data_dir);
@@ -115,7 +115,7 @@ async fn test_create_and_list_messages() {
let messages = list_messages(app.handle().clone(), thread_id.clone())
.await
.unwrap();
- assert!(messages.len() > 0, "Expected at least one message, but got none. Thread ID: {}", thread_id);
+ assert!(!messages.is_empty(), "Expected at least one message, but got none. Thread ID: {thread_id}");
assert_eq!(messages[0]["role"], "user");
// Clean up
diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs
index 1b4acbdf4..628f22b08 100644
--- a/src-tauri/src/lib.rs
+++ b/src-tauri/src/lib.rs
@@ -151,17 +151,17 @@ pub fn run() {
.config()
.version
.clone()
- .unwrap_or_else(|| "".to_string());
+ .unwrap_or_default();
// Migrate extensions
if let Err(e) =
setup::install_extensions(app.handle().clone(), stored_version != app_version)
{
- log::error!("Failed to install extensions: {}", e);
+ log::error!("Failed to install extensions: {e}");
}
// Migrate MCP servers
if let Err(e) = setup::migrate_mcp_servers(app.handle().clone(), store.clone()) {
- log::error!("Failed to migrate MCP servers: {}", e);
+ log::error!("Failed to migrate MCP servers: {e}");
}
// Store the new app version
@@ -199,8 +199,8 @@ pub fn run() {
.expect("error while running tauri application");
// Handle app lifecycle events
- app.run(|app, event| match event {
- RunEvent::Exit => {
+ app.run(|app, event| {
+ if let RunEvent::Exit = event {
// This is called when the app is actually exiting (e.g., macOS dock quit)
// We can't prevent this, so run cleanup quickly
let app_handle = app.clone();
@@ -220,6 +220,5 @@ pub fn run() {
});
});
}
- _ => {}
});
}