refactor: resolve rust analyzer warnings and improve code quality (#6696)

- Update string formatting to use modern interpolation syntax
- Simplify expressions and remove unnecessary intermediate variables
- Improve logging statements for better readability
- Clean up code across core modules (app, downloads, mcp, server, etc.)
This commit is contained in:
Roushan Kumar Singh 2025-10-02 13:31:06 +05:30 committed by GitHub
parent ab2bc11465
commit eccaa282e0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 177 additions and 253 deletions

View File

@ -19,10 +19,7 @@ pub fn get_app_configurations<R: Runtime>(app_handle: tauri::AppHandle<R>) -> Ap
let default_data_folder = default_data_folder_path(app_handle.clone()); let default_data_folder = default_data_folder_path(app_handle.clone());
if !configuration_file.exists() { if !configuration_file.exists() {
log::info!( log::info!("App config not found, creating default config at {configuration_file:?}");
"App config not found, creating default config at {:?}",
configuration_file
);
app_default_configuration.data_folder = default_data_folder; app_default_configuration.data_folder = default_data_folder;
@ -30,7 +27,7 @@ pub fn get_app_configurations<R: Runtime>(app_handle: tauri::AppHandle<R>) -> Ap
&configuration_file, &configuration_file,
serde_json::to_string(&app_default_configuration).unwrap(), serde_json::to_string(&app_default_configuration).unwrap(),
) { ) {
log::error!("Failed to create default config: {}", err); log::error!("Failed to create default config: {err}");
} }
return app_default_configuration; return app_default_configuration;
@ -40,18 +37,12 @@ pub fn get_app_configurations<R: Runtime>(app_handle: tauri::AppHandle<R>) -> Ap
Ok(content) => match serde_json::from_str::<AppConfiguration>(&content) { Ok(content) => match serde_json::from_str::<AppConfiguration>(&content) {
Ok(app_configurations) => app_configurations, Ok(app_configurations) => app_configurations,
Err(err) => { Err(err) => {
log::error!( log::error!("Failed to parse app config, returning default config instead. Error: {err}");
"Failed to parse app config, returning default config instead. Error: {}",
err
);
app_default_configuration app_default_configuration
} }
}, },
Err(err) => { Err(err) => {
log::error!( log::error!("Failed to read app config, returning default config instead. Error: {err}");
"Failed to read app config, returning default config instead. Error: {}",
err
);
app_default_configuration app_default_configuration
} }
} }
@ -63,10 +54,7 @@ pub fn update_app_configuration<R: Runtime>(
configuration: AppConfiguration, configuration: AppConfiguration,
) -> Result<(), String> { ) -> Result<(), String> {
let configuration_file = get_configuration_file_path(app_handle); let configuration_file = get_configuration_file_path(app_handle);
log::info!( log::info!("update_app_configuration, configuration_file: {configuration_file:?}");
"update_app_configuration, configuration_file: {:?}",
configuration_file
);
fs::write( fs::write(
configuration_file, configuration_file,
@ -95,8 +83,7 @@ pub fn get_jan_data_folder_path<R: Runtime>(app_handle: tauri::AppHandle<R>) ->
pub fn get_configuration_file_path<R: Runtime>(app_handle: tauri::AppHandle<R>) -> PathBuf { pub fn get_configuration_file_path<R: Runtime>(app_handle: tauri::AppHandle<R>) -> PathBuf {
let app_path = app_handle.path().app_data_dir().unwrap_or_else(|err| { let app_path = app_handle.path().app_data_dir().unwrap_or_else(|err| {
log::error!( log::error!(
"Failed to get app data directory: {}. Using home directory instead.", "Failed to get app data directory: {err}. Using home directory instead."
err
); );
let home_dir = std::env::var(if cfg!(target_os = "windows") { let home_dir = std::env::var(if cfg!(target_os = "windows") {
@ -130,9 +117,9 @@ pub fn get_configuration_file_path<R: Runtime>(app_handle: tauri::AppHandle<R>)
.join(package_name); .join(package_name);
if old_data_dir.exists() { if old_data_dir.exists() {
return old_data_dir.join(CONFIGURATION_FILE_NAME); old_data_dir.join(CONFIGURATION_FILE_NAME)
} else { } else {
return app_path.join(CONFIGURATION_FILE_NAME); app_path.join(CONFIGURATION_FILE_NAME)
} }
} }
@ -156,7 +143,7 @@ pub fn default_data_folder_path<R: Runtime>(app_handle: tauri::AppHandle<R>) ->
#[tauri::command] #[tauri::command]
pub fn get_user_home_path<R: Runtime>(app: AppHandle<R>) -> String { pub fn get_user_home_path<R: Runtime>(app: AppHandle<R>) -> String {
return get_app_configurations(app.clone()).data_folder; get_app_configurations(app.clone()).data_folder
} }
#[tauri::command] #[tauri::command]
@ -171,16 +158,12 @@ pub fn change_app_data_folder<R: Runtime>(
// Create the new data folder if it doesn't exist // Create the new data folder if it doesn't exist
if !new_data_folder_path.exists() { if !new_data_folder_path.exists() {
fs::create_dir_all(&new_data_folder_path) fs::create_dir_all(&new_data_folder_path)
.map_err(|e| format!("Failed to create new data folder: {}", e))?; .map_err(|e| format!("Failed to create new data folder: {e}"))?;
} }
// Copy all files from the old folder to the new one // Copy all files from the old folder to the new one
if current_data_folder.exists() { if current_data_folder.exists() {
log::info!( log::info!("Copying data from {current_data_folder:?} to {new_data_folder_path:?}");
"Copying data from {:?} to {:?}",
current_data_folder,
new_data_folder_path
);
// Check if this is a parent directory to avoid infinite recursion // Check if this is a parent directory to avoid infinite recursion
if new_data_folder_path.starts_with(&current_data_folder) { if new_data_folder_path.starts_with(&current_data_folder) {
@ -193,7 +176,7 @@ pub fn change_app_data_folder<R: Runtime>(
&new_data_folder_path, &new_data_folder_path,
&[".uvx", ".npx"], &[".uvx", ".npx"],
) )
.map_err(|e| format!("Failed to copy data to new folder: {}", e))?; .map_err(|e| format!("Failed to copy data to new folder: {e}"))?;
} else { } else {
log::info!("Current data folder does not exist, nothing to copy"); log::info!("Current data folder does not exist, nothing to copy");
} }

View File

@ -19,7 +19,7 @@ pub async fn download_files<R: Runtime>(
{ {
let mut download_manager = state.download_manager.lock().await; let mut download_manager = state.download_manager.lock().await;
if download_manager.cancel_tokens.contains_key(task_id) { if download_manager.cancel_tokens.contains_key(task_id) {
return Err(format!("task_id {} exists", task_id)); return Err(format!("task_id {task_id} exists"));
} }
download_manager download_manager
.cancel_tokens .cancel_tokens
@ -60,9 +60,9 @@ pub async fn cancel_download_task(state: State<'_, AppState>, task_id: &str) ->
let mut download_manager = state.download_manager.lock().await; let mut download_manager = state.download_manager.lock().await;
if let Some(token) = download_manager.cancel_tokens.remove(task_id) { if let Some(token) = download_manager.cancel_tokens.remove(task_id) {
token.cancel(); token.cancel();
log::info!("Cancelled download task: {}", task_id); log::info!("Cancelled download task: {task_id}");
Ok(()) Ok(())
} else { } else {
Err(format!("No download task: {}", task_id)) Err(format!("No download task: {task_id}"))
} }
} }

View File

@ -15,7 +15,7 @@ use url::Url;
// ===== UTILITY FUNCTIONS ===== // ===== UTILITY FUNCTIONS =====
pub fn err_to_string<E: std::fmt::Display>(e: E) -> String { pub fn err_to_string<E: std::fmt::Display>(e: E) -> String {
format!("Error: {}", e) format!("Error: {e}")
} }
@ -55,7 +55,7 @@ async fn validate_downloaded_file(
) )
.unwrap(); .unwrap();
log::info!("Starting validation for model: {}", model_id); log::info!("Starting validation for model: {model_id}");
// Validate size if provided (fast check first) // Validate size if provided (fast check first)
if let Some(expected_size) = &item.size { if let Some(expected_size) = &item.size {
@ -73,8 +73,7 @@ async fn validate_downloaded_file(
actual_size actual_size
); );
return Err(format!( return Err(format!(
"Size verification failed. Expected {} bytes but got {} bytes.", "Size verification failed. Expected {expected_size} bytes but got {actual_size} bytes."
expected_size, actual_size
)); ));
} }
@ -90,7 +89,7 @@ async fn validate_downloaded_file(
save_path.display(), save_path.display(),
e e
); );
return Err(format!("Failed to verify file size: {}", e)); return Err(format!("Failed to verify file size: {e}"));
} }
} }
} }
@ -115,9 +114,7 @@ async fn validate_downloaded_file(
computed_sha256 computed_sha256
); );
return Err(format!( return Err("Hash verification failed. The downloaded file is corrupted or has been tampered with.".to_string());
"Hash verification failed. The downloaded file is corrupted or has been tampered with."
));
} }
log::info!("Hash verification successful for {}", item.url); log::info!("Hash verification successful for {}", item.url);
@ -128,7 +125,7 @@ async fn validate_downloaded_file(
save_path.display(), save_path.display(),
e e
); );
return Err(format!("Failed to verify file integrity: {}", e)); return Err(format!("Failed to verify file integrity: {e}"));
} }
} }
} }
@ -140,14 +137,14 @@ async fn validate_downloaded_file(
pub fn validate_proxy_config(config: &ProxyConfig) -> Result<(), String> { pub fn validate_proxy_config(config: &ProxyConfig) -> Result<(), String> {
// Validate proxy URL format // Validate proxy URL format
if let Err(e) = Url::parse(&config.url) { if let Err(e) = Url::parse(&config.url) {
return Err(format!("Invalid proxy URL '{}': {}", config.url, e)); return Err(format!("Invalid proxy URL '{}': {e}", config.url));
} }
// Check if proxy URL has valid scheme // Check if proxy URL has valid scheme
let url = Url::parse(&config.url).unwrap(); // Safe to unwrap as we just validated it let url = Url::parse(&config.url).unwrap(); // Safe to unwrap as we just validated it
match url.scheme() { match url.scheme() {
"http" | "https" | "socks4" | "socks5" => {} "http" | "https" | "socks4" | "socks5" => {}
scheme => return Err(format!("Unsupported proxy scheme: {}", scheme)), scheme => return Err(format!("Unsupported proxy scheme: {scheme}")),
} }
// Validate authentication credentials // Validate authentication credentials
@ -167,7 +164,7 @@ pub fn validate_proxy_config(config: &ProxyConfig) -> Result<(), String> {
} }
// Basic validation for wildcard patterns // Basic validation for wildcard patterns
if entry.starts_with("*.") && entry.len() < 3 { if entry.starts_with("*.") && entry.len() < 3 {
return Err(format!("Invalid wildcard pattern: {}", entry)); return Err(format!("Invalid wildcard pattern: {entry}"));
} }
} }
} }
@ -214,8 +211,7 @@ pub fn should_bypass_proxy(url: &str, no_proxy: &[String]) -> bool {
} }
// Simple wildcard matching // Simple wildcard matching
if entry.starts_with("*.") { if let Some(domain) = entry.strip_prefix("*.") {
let domain = &entry[2..];
if host.ends_with(domain) { if host.ends_with(domain) {
return true; return true;
} }
@ -305,7 +301,7 @@ pub async fn _download_files_internal(
resume: bool, resume: bool,
cancel_token: CancellationToken, cancel_token: CancellationToken,
) -> Result<(), String> { ) -> Result<(), String> {
log::info!("Start download task: {}", task_id); log::info!("Start download task: {task_id}");
let header_map = _convert_headers(headers).map_err(err_to_string)?; let header_map = _convert_headers(headers).map_err(err_to_string)?;
@ -320,9 +316,9 @@ pub async fn _download_files_internal(
} }
let total_size: u64 = file_sizes.values().sum(); let total_size: u64 = file_sizes.values().sum();
log::info!("Total download size: {}", total_size); log::info!("Total download size: {total_size}");
let evt_name = format!("download-{}", task_id); let evt_name = format!("download-{task_id}");
// Create progress tracker // Create progress tracker
let progress_tracker = ProgressTracker::new(items, file_sizes.clone()); let progress_tracker = ProgressTracker::new(items, file_sizes.clone());
@ -352,7 +348,7 @@ pub async fn _download_files_internal(
let cancel_token_clone = cancel_token.clone(); let cancel_token_clone = cancel_token.clone();
let evt_name_clone = evt_name.clone(); let evt_name_clone = evt_name.clone();
let progress_tracker_clone = progress_tracker.clone(); let progress_tracker_clone = progress_tracker.clone();
let file_id = format!("{}-{}", task_id, index); let file_id = format!("{task_id}-{index}");
let file_size = file_sizes.get(&item.url).copied().unwrap_or(0); let file_size = file_sizes.get(&item.url).copied().unwrap_or(0);
let task = tokio::spawn(async move { let task = tokio::spawn(async move {
@ -377,7 +373,7 @@ pub async fn _download_files_internal(
// Wait for all downloads to complete // Wait for all downloads to complete
let mut validation_tasks = Vec::new(); let mut validation_tasks = Vec::new();
for (task, item) in download_tasks.into_iter().zip(items.iter()) { for (task, item) in download_tasks.into_iter().zip(items.iter()) {
let result = task.await.map_err(|e| format!("Task join error: {}", e))?; let result = task.await.map_err(|e| format!("Task join error: {e}"))?;
match result { match result {
Ok(downloaded_path) => { Ok(downloaded_path) => {
@ -399,7 +395,7 @@ pub async fn _download_files_internal(
for (validation_task, save_path, _item) in validation_tasks { for (validation_task, save_path, _item) in validation_tasks {
let validation_result = validation_task let validation_result = validation_task
.await .await
.map_err(|e| format!("Validation task join error: {}", e))?; .map_err(|e| format!("Validation task join error: {e}"))?;
if let Err(validation_error) = validation_result { if let Err(validation_error) = validation_result {
// Clean up the file if validation fails // Clean up the file if validation fails
@ -448,7 +444,7 @@ async fn download_single_file(
if current_extension.is_empty() { if current_extension.is_empty() {
ext.to_string() ext.to_string()
} else { } else {
format!("{}.{}", current_extension, ext) format!("{current_extension}.{ext}")
} }
}; };
let tmp_save_path = save_path.with_extension(append_extension("tmp")); let tmp_save_path = save_path.with_extension(append_extension("tmp"));
@ -469,8 +465,8 @@ async fn download_single_file(
let decoded_url = url::Url::parse(&item.url) let decoded_url = url::Url::parse(&item.url)
.map(|u| u.to_string()) .map(|u| u.to_string())
.unwrap_or_else(|_| item.url.clone()); .unwrap_or_else(|_| item.url.clone());
log::info!("Started downloading: {}", decoded_url); log::info!("Started downloading: {decoded_url}");
let client = _get_client_for_item(item, &header_map).map_err(err_to_string)?; let client = _get_client_for_item(item, header_map).map_err(err_to_string)?;
let mut download_delta = 0u64; let mut download_delta = 0u64;
let mut initial_progress = 0u64; let mut initial_progress = 0u64;
@ -503,7 +499,7 @@ async fn download_single_file(
} }
Err(e) => { Err(e) => {
// fallback to normal download // fallback to normal download
log::warn!("Failed to resume download: {}", e); log::warn!("Failed to resume download: {e}");
should_resume = false; should_resume = false;
_get_maybe_resume(&client, &item.url, 0).await? _get_maybe_resume(&client, &item.url, 0).await?
} }
@ -592,7 +588,7 @@ async fn download_single_file(
let decoded_url = url::Url::parse(&item.url) let decoded_url = url::Url::parse(&item.url)
.map(|u| u.to_string()) .map(|u| u.to_string())
.unwrap_or_else(|_| item.url.clone()); .unwrap_or_else(|_| item.url.clone());
log::info!("Finished downloading: {}", decoded_url); log::info!("Finished downloading: {decoded_url}");
Ok(save_path.to_path_buf()) Ok(save_path.to_path_buf())
} }
@ -606,7 +602,7 @@ pub async fn _get_maybe_resume(
if start_bytes > 0 { if start_bytes > 0 {
let resp = client let resp = client
.get(url) .get(url)
.header("Range", format!("bytes={}-", start_bytes)) .header("Range", format!("bytes={start_bytes}-"))
.send() .send()
.await .await
.map_err(err_to_string)?; .map_err(err_to_string)?;

View File

@ -13,7 +13,7 @@ pub fn get_jan_extensions_path<R: Runtime>(app_handle: tauri::AppHandle<R>) -> P
#[tauri::command] #[tauri::command]
pub fn install_extensions<R: Runtime>(app: AppHandle<R>) { pub fn install_extensions<R: Runtime>(app: AppHandle<R>) {
if let Err(err) = setup::install_extensions(app, true) { if let Err(err) = setup::install_extensions(app, true) {
log::error!("Failed to install extensions: {}", err); log::error!("Failed to install extensions: {err}");
} }
} }
@ -21,7 +21,7 @@ pub fn install_extensions<R: Runtime>(app: AppHandle<R>) {
pub fn get_active_extensions<R: Runtime>(app: AppHandle<R>) -> Vec<serde_json::Value> { pub fn get_active_extensions<R: Runtime>(app: AppHandle<R>) -> Vec<serde_json::Value> {
let mut path = get_jan_extensions_path(app); let mut path = get_jan_extensions_path(app);
path.push("extensions.json"); path.push("extensions.json");
log::info!("get jan extensions, path: {:?}", path); log::info!("get jan extensions, path: {path:?}");
let contents = fs::read_to_string(path); let contents = fs::read_to_string(path);
let contents: Vec<serde_json::Value> = match contents { let contents: Vec<serde_json::Value> = match contents {
@ -40,14 +40,14 @@ pub fn get_active_extensions<R: Runtime>(app: AppHandle<R>) -> Vec<serde_json::V
}) })
.collect(), .collect(),
Err(error) => { Err(error) => {
log::error!("Failed to parse extensions.json: {}", error); log::error!("Failed to parse extensions.json: {error}");
vec![] vec![]
} }
}, },
Err(error) => { Err(error) => {
log::error!("Failed to read extensions.json: {}", error); log::error!("Failed to read extensions.json: {error}");
vec![] vec![]
} }
}; };
return contents; contents
} }

View File

@ -9,7 +9,7 @@ fn test_rm() {
let app = mock_app(); let app = mock_app();
let path = "test_rm_dir"; let path = "test_rm_dir";
fs::create_dir_all(get_jan_data_folder_path(app.handle().clone()).join(path)).unwrap(); fs::create_dir_all(get_jan_data_folder_path(app.handle().clone()).join(path)).unwrap();
let args = vec![format!("file://{}", path).to_string()]; let args = vec![format!("file://{path}").to_string()];
let result = rm(app.handle().clone(), args); let result = rm(app.handle().clone(), args);
assert!(result.is_ok()); assert!(result.is_ok());
assert!(!get_jan_data_folder_path(app.handle().clone()) assert!(!get_jan_data_folder_path(app.handle().clone())
@ -21,7 +21,7 @@ fn test_rm() {
fn test_mkdir() { fn test_mkdir() {
let app = mock_app(); let app = mock_app();
let path = "test_mkdir_dir"; let path = "test_mkdir_dir";
let args = vec![format!("file://{}", path).to_string()]; let args = vec![format!("file://{path}").to_string()];
let result = mkdir(app.handle().clone(), args); let result = mkdir(app.handle().clone(), args);
assert!(result.is_ok()); assert!(result.is_ok());
assert!(get_jan_data_folder_path(app.handle().clone()) assert!(get_jan_data_folder_path(app.handle().clone())
@ -39,7 +39,7 @@ fn test_join_path() {
assert_eq!( assert_eq!(
result, result,
get_jan_data_folder_path(app.handle().clone()) get_jan_data_folder_path(app.handle().clone())
.join(&format!("test_dir{}test_file", std::path::MAIN_SEPARATOR)) .join(format!("test_dir{}test_file", std::path::MAIN_SEPARATOR))
.to_string_lossy() .to_string_lossy()
.to_string() .to_string()
); );

View File

@ -30,28 +30,28 @@ pub async fn activate_mcp_server<R: Runtime>(
#[tauri::command] #[tauri::command]
pub async fn deactivate_mcp_server(state: State<'_, AppState>, name: String) -> Result<(), String> { pub async fn deactivate_mcp_server(state: State<'_, AppState>, name: String) -> Result<(), String> {
log::info!("Deactivating MCP server: {}", name); log::info!("Deactivating MCP server: {name}");
// First, mark server as manually deactivated to prevent restart // First, mark server as manually deactivated to prevent restart
// Remove from active servers list to prevent restart // Remove from active servers list to prevent restart
{ {
let mut active_servers = state.mcp_active_servers.lock().await; let mut active_servers = state.mcp_active_servers.lock().await;
active_servers.remove(&name); active_servers.remove(&name);
log::info!("Removed MCP server {} from active servers list", name); log::info!("Removed MCP server {name} from active servers list");
} }
// Mark as not successfully connected to prevent restart logic // Mark as not successfully connected to prevent restart logic
{ {
let mut connected = state.mcp_successfully_connected.lock().await; let mut connected = state.mcp_successfully_connected.lock().await;
connected.insert(name.clone(), false); connected.insert(name.clone(), false);
log::info!("Marked MCP server {} as not successfully connected", name); log::info!("Marked MCP server {name} as not successfully connected");
} }
// Reset restart count // Reset restart count
{ {
let mut counts = state.mcp_restart_counts.lock().await; let mut counts = state.mcp_restart_counts.lock().await;
counts.remove(&name); counts.remove(&name);
log::info!("Reset restart count for MCP server {}", name); log::info!("Reset restart count for MCP server {name}");
} }
// Now remove and stop the server // Now remove and stop the server
@ -60,7 +60,7 @@ pub async fn deactivate_mcp_server(state: State<'_, AppState>, name: String) ->
let service = servers_map let service = servers_map
.remove(&name) .remove(&name)
.ok_or_else(|| format!("Server {} not found", name))?; .ok_or_else(|| format!("Server {name} not found"))?;
// Release the lock before calling cancel // Release the lock before calling cancel
drop(servers_map); drop(servers_map);
@ -89,7 +89,7 @@ pub async fn restart_mcp_servers<R: Runtime>(app: AppHandle<R>, state: State<'_,
restart_active_mcp_servers(&app, servers).await?; restart_active_mcp_servers(&app, servers).await?;
app.emit("mcp-update", "MCP servers updated") app.emit("mcp-update", "MCP servers updated")
.map_err(|e| format!("Failed to emit event: {}", e))?; .map_err(|e| format!("Failed to emit event: {e}"))?;
Ok(()) Ok(())
} }
@ -110,9 +110,7 @@ pub async fn reset_mcp_restart_count(
let old_count = *count; let old_count = *count;
*count = 0; *count = 0;
log::info!( log::info!(
"MCP server {} restart count reset from {} to 0.", "MCP server {server_name} restart count reset from {old_count} to 0."
server_name,
old_count
); );
Ok(()) Ok(())
} }
@ -219,7 +217,7 @@ pub async fn call_tool(
continue; // Tool not found in this server, try next continue; // Tool not found in this server, try next
} }
println!("Found tool {} in server", tool_name); println!("Found tool {tool_name} in server");
// Call the tool with timeout and cancellation support // Call the tool with timeout and cancellation support
let tool_call = service.call_tool(CallToolRequestParam { let tool_call = service.call_tool(CallToolRequestParam {
@ -234,22 +232,20 @@ pub async fn call_tool(
match result { match result {
Ok(call_result) => call_result.map_err(|e| e.to_string()), Ok(call_result) => call_result.map_err(|e| e.to_string()),
Err(_) => Err(format!( Err(_) => Err(format!(
"Tool call '{}' timed out after {} seconds", "Tool call '{tool_name}' timed out after {} seconds",
tool_name,
MCP_TOOL_CALL_TIMEOUT.as_secs() MCP_TOOL_CALL_TIMEOUT.as_secs()
)), )),
} }
} }
_ = cancel_rx => { _ = cancel_rx => {
Err(format!("Tool call '{}' was cancelled", tool_name)) Err(format!("Tool call '{tool_name}' was cancelled"))
} }
} }
} else { } else {
match timeout(MCP_TOOL_CALL_TIMEOUT, tool_call).await { match timeout(MCP_TOOL_CALL_TIMEOUT, tool_call).await {
Ok(call_result) => call_result.map_err(|e| e.to_string()), Ok(call_result) => call_result.map_err(|e| e.to_string()),
Err(_) => Err(format!( Err(_) => Err(format!(
"Tool call '{}' timed out after {} seconds", "Tool call '{tool_name}' timed out after {} seconds",
tool_name,
MCP_TOOL_CALL_TIMEOUT.as_secs() MCP_TOOL_CALL_TIMEOUT.as_secs()
)), )),
} }
@ -264,7 +260,7 @@ pub async fn call_tool(
return result; return result;
} }
Err(format!("Tool {} not found", tool_name)) Err(format!("Tool {tool_name} not found"))
} }
/// Cancels a running tool call by its cancellation token /// Cancels a running tool call by its cancellation token
@ -285,10 +281,10 @@ pub async fn cancel_tool_call(
if let Some(cancel_tx) = cancellations.remove(&cancellation_token) { if let Some(cancel_tx) = cancellations.remove(&cancellation_token) {
// Send cancellation signal - ignore if receiver is already dropped // Send cancellation signal - ignore if receiver is already dropped
let _ = cancel_tx.send(()); let _ = cancel_tx.send(());
println!("Tool call with token {} cancelled", cancellation_token); println!("Tool call with token {cancellation_token} cancelled");
Ok(()) Ok(())
} else { } else {
Err(format!("Cancellation token {} not found", cancellation_token)) Err(format!("Cancellation token {cancellation_token} not found"))
} }
} }
@ -301,7 +297,7 @@ pub async fn get_mcp_configs<R: Runtime>(app: AppHandle<R>) -> Result<String, St
if !path.exists() { if !path.exists() {
log::info!("mcp_config.json not found, creating default empty config"); log::info!("mcp_config.json not found, creating default empty config");
fs::write(&path, DEFAULT_MCP_CONFIG) fs::write(&path, DEFAULT_MCP_CONFIG)
.map_err(|e| format!("Failed to create default MCP config: {}", e))?; .map_err(|e| format!("Failed to create default MCP config: {e}"))?;
} }
fs::read_to_string(path).map_err(|e| e.to_string()) fs::read_to_string(path).map_err(|e| e.to_string())
@ -311,7 +307,7 @@ pub async fn get_mcp_configs<R: Runtime>(app: AppHandle<R>) -> Result<String, St
pub async fn save_mcp_configs<R: Runtime>(app: AppHandle<R>, configs: String) -> Result<(), String> { pub async fn save_mcp_configs<R: Runtime>(app: AppHandle<R>, configs: String) -> Result<(), String> {
let mut path = get_jan_data_folder_path(app); let mut path = get_jan_data_folder_path(app);
path.push("mcp_config.json"); path.push("mcp_config.json");
log::info!("save mcp configs, path: {:?}", path); log::info!("save mcp configs, path: {path:?}");
fs::write(path, configs).map_err(|e| e.to_string()) fs::write(path, configs).map_err(|e| e.to_string())
} }

View File

@ -56,22 +56,13 @@ pub fn calculate_exponential_backoff_delay(attempt: u32) -> u64 {
let hash = hasher.finish(); let hash = hasher.finish();
// Convert hash to jitter value in range [-jitter_range, +jitter_range] // Convert hash to jitter value in range [-jitter_range, +jitter_range]
let jitter_offset = (hash % (jitter_range * 2)) as i64 - jitter_range as i64; (hash % (jitter_range * 2)) as i64 - jitter_range as i64
jitter_offset
} else { } else {
0 0
}; };
// Apply jitter while ensuring delay stays positive and within bounds // Apply jitter while ensuring delay stays positive and within bounds
let final_delay = cmp::max( ((capped_delay as i64 + jitter) as u64).clamp(100, MCP_MAX_RESTART_DELAY_MS)
100, // Minimum 100ms delay
cmp::min(
MCP_MAX_RESTART_DELAY_MS,
(capped_delay as i64 + jitter) as u64,
),
);
final_delay
} }
/// Runs MCP commands by reading configuration from a JSON file and initializing servers /// Runs MCP commands by reading configuration from a JSON file and initializing servers
@ -135,9 +126,7 @@ pub async fn run_mcp_commands<R: Runtime>(
// If initial startup failed, we still want to continue with other servers // If initial startup failed, we still want to continue with other servers
if let Err(e) = &result { if let Err(e) = &result {
log::error!( log::error!(
"Initial startup failed for MCP server {}: {}", "Initial startup failed for MCP server {name_clone}: {e}"
name_clone,
e
); );
} }
@ -155,25 +144,23 @@ pub async fn run_mcp_commands<R: Runtime>(
match handle.await { match handle.await {
Ok((name, result)) => match result { Ok((name, result)) => match result {
Ok(_) => { Ok(_) => {
log::info!("MCP server {} initialized successfully", name); log::info!("MCP server {name} initialized successfully");
successful_count += 1; successful_count += 1;
} }
Err(e) => { Err(e) => {
log::error!("MCP server {} failed to initialize: {}", name, e); log::error!("MCP server {name} failed to initialize: {e}");
failed_count += 1; failed_count += 1;
} }
}, },
Err(e) => { Err(e) => {
log::error!("Failed to join startup task: {}", e); log::error!("Failed to join startup task: {e}");
failed_count += 1; failed_count += 1;
} }
} }
} }
log::info!( log::info!(
"MCP server initialization complete: {} successful, {} failed", "MCP server initialization complete: {successful_count} successful, {failed_count} failed"
successful_count,
failed_count
); );
Ok(()) Ok(())
@ -184,7 +171,7 @@ pub async fn monitor_mcp_server_handle(
servers_state: SharedMcpServers, servers_state: SharedMcpServers,
name: String, name: String,
) -> Option<rmcp::service::QuitReason> { ) -> Option<rmcp::service::QuitReason> {
log::info!("Monitoring MCP server {} health", name); log::info!("Monitoring MCP server {name} health");
// Monitor server health with periodic checks // Monitor server health with periodic checks
loop { loop {
@ -202,17 +189,17 @@ pub async fn monitor_mcp_server_handle(
true true
} }
Ok(Err(e)) => { Ok(Err(e)) => {
log::warn!("MCP server {} health check failed: {}", name, e); log::warn!("MCP server {name} health check failed: {e}");
false false
} }
Err(_) => { Err(_) => {
log::warn!("MCP server {} health check timed out", name); log::warn!("MCP server {name} health check timed out");
false false
} }
} }
} else { } else {
// Server was removed from HashMap (e.g., by deactivate_mcp_server) // Server was removed from HashMap (e.g., by deactivate_mcp_server)
log::info!("MCP server {} no longer in running services", name); log::info!("MCP server {name} no longer in running services");
return Some(rmcp::service::QuitReason::Closed); return Some(rmcp::service::QuitReason::Closed);
} }
}; };
@ -220,8 +207,7 @@ pub async fn monitor_mcp_server_handle(
if !health_check_result { if !health_check_result {
// Server failed health check - remove it and return // Server failed health check - remove it and return
log::error!( log::error!(
"MCP server {} failed health check, removing from active servers", "MCP server {name} failed health check, removing from active servers"
name
); );
let mut servers = servers_state.lock().await; let mut servers = servers_state.lock().await;
if let Some(service) = servers.remove(&name) { if let Some(service) = servers.remove(&name) {
@ -262,7 +248,7 @@ pub async fn start_mcp_server_with_restart<R: Runtime>(
let max_restarts = max_restarts.unwrap_or(5); let max_restarts = max_restarts.unwrap_or(5);
// Try the first start attempt and return its result // Try the first start attempt and return its result
log::info!("Starting MCP server {} (Initial attempt)", name); log::info!("Starting MCP server {name} (Initial attempt)");
let first_start_result = schedule_mcp_start_task( let first_start_result = schedule_mcp_start_task(
app.clone(), app.clone(),
servers_state.clone(), servers_state.clone(),
@ -273,7 +259,7 @@ pub async fn start_mcp_server_with_restart<R: Runtime>(
match first_start_result { match first_start_result {
Ok(_) => { Ok(_) => {
log::info!("MCP server {} started successfully on first attempt", name); log::info!("MCP server {name} started successfully on first attempt");
reset_restart_count(&restart_counts, &name).await; reset_restart_count(&restart_counts, &name).await;
// Check if server was marked as successfully connected (passed verification) // Check if server was marked as successfully connected (passed verification)
@ -298,18 +284,15 @@ pub async fn start_mcp_server_with_restart<R: Runtime>(
Ok(()) Ok(())
} else { } else {
// Server failed verification, don't monitor for restarts // Server failed verification, don't monitor for restarts
log::error!("MCP server {} failed verification after startup", name); log::error!("MCP server {name} failed verification after startup");
Err(format!( Err(format!(
"MCP server {} failed verification after startup", "MCP server {name} failed verification after startup"
name
)) ))
} }
} }
Err(e) => { Err(e) => {
log::error!( log::error!(
"Failed to start MCP server {} on first attempt: {}", "Failed to start MCP server {name} on first attempt: {e}"
name,
e
); );
Err(e) Err(e)
} }
@ -336,9 +319,7 @@ pub async fn start_restart_loop<R: Runtime>(
if current_restart_count > max_restarts { if current_restart_count > max_restarts {
log::error!( log::error!(
"MCP server {} reached maximum restart attempts ({}). Giving up.", "MCP server {name} reached maximum restart attempts ({max_restarts}). Giving up."
name,
max_restarts
); );
if let Err(e) = app.emit( if let Err(e) = app.emit(
"mcp_max_restarts_reached", "mcp_max_restarts_reached",
@ -353,19 +334,13 @@ pub async fn start_restart_loop<R: Runtime>(
} }
log::info!( log::info!(
"Restarting MCP server {} (Attempt {}/{})", "Restarting MCP server {name} (Attempt {current_restart_count}/{max_restarts})"
name,
current_restart_count,
max_restarts
); );
// Calculate exponential backoff delay // Calculate exponential backoff delay
let delay_ms = calculate_exponential_backoff_delay(current_restart_count); let delay_ms = calculate_exponential_backoff_delay(current_restart_count);
log::info!( log::info!(
"Waiting {}ms before restart attempt {} for MCP server {}", "Waiting {delay_ms}ms before restart attempt {current_restart_count} for MCP server {name}"
delay_ms,
current_restart_count,
name
); );
sleep(Duration::from_millis(delay_ms)).await; sleep(Duration::from_millis(delay_ms)).await;
@ -380,7 +355,7 @@ pub async fn start_restart_loop<R: Runtime>(
match start_result { match start_result {
Ok(_) => { Ok(_) => {
log::info!("MCP server {} restarted successfully.", name); log::info!("MCP server {name} restarted successfully.");
// Check if server passed verification (was marked as successfully connected) // Check if server passed verification (was marked as successfully connected)
let passed_verification = { let passed_verification = {
@ -390,8 +365,7 @@ pub async fn start_restart_loop<R: Runtime>(
if !passed_verification { if !passed_verification {
log::error!( log::error!(
"MCP server {} failed verification after restart - stopping permanently", "MCP server {name} failed verification after restart - stopping permanently"
name
); );
break; break;
} }
@ -402,9 +376,7 @@ pub async fn start_restart_loop<R: Runtime>(
if let Some(count) = counts.get_mut(&name) { if let Some(count) = counts.get_mut(&name) {
if *count > 0 { if *count > 0 {
log::info!( log::info!(
"MCP server {} restarted successfully, resetting restart count from {} to 0.", "MCP server {name} restarted successfully, resetting restart count from {count} to 0."
name,
*count
); );
*count = 0; *count = 0;
} }
@ -415,7 +387,7 @@ pub async fn start_restart_loop<R: Runtime>(
let quit_reason = let quit_reason =
monitor_mcp_server_handle(servers_state.clone(), name.clone()).await; monitor_mcp_server_handle(servers_state.clone(), name.clone()).await;
log::info!("MCP server {} quit with reason: {:?}", name, quit_reason); log::info!("MCP server {name} quit with reason: {quit_reason:?}");
// Check if server was marked as successfully connected // Check if server was marked as successfully connected
let was_connected = { let was_connected = {
@ -426,8 +398,7 @@ pub async fn start_restart_loop<R: Runtime>(
// Only continue restart loop if server was previously connected // Only continue restart loop if server was previously connected
if !was_connected { if !was_connected {
log::error!( log::error!(
"MCP server {} failed before establishing successful connection - stopping permanently", "MCP server {name} failed before establishing successful connection - stopping permanently"
name
); );
break; break;
} }
@ -435,11 +406,11 @@ pub async fn start_restart_loop<R: Runtime>(
// Determine if we should restart based on quit reason // Determine if we should restart based on quit reason
let should_restart = match quit_reason { let should_restart = match quit_reason {
Some(reason) => { Some(reason) => {
log::warn!("MCP server {} terminated unexpectedly: {:?}", name, reason); log::warn!("MCP server {name} terminated unexpectedly: {reason:?}");
true true
} }
None => { None => {
log::info!("MCP server {} was manually stopped - not restarting", name); log::info!("MCP server {name} was manually stopped - not restarting");
false false
} }
}; };
@ -450,7 +421,7 @@ pub async fn start_restart_loop<R: Runtime>(
// Continue the loop for another restart attempt // Continue the loop for another restart attempt
} }
Err(e) => { Err(e) => {
log::error!("Failed to restart MCP server {}: {}", name, e); log::error!("Failed to restart MCP server {name}: {e}");
// Check if server was marked as successfully connected before // Check if server was marked as successfully connected before
let was_connected = { let was_connected = {
@ -461,8 +432,7 @@ pub async fn start_restart_loop<R: Runtime>(
// Only continue restart attempts if server was previously connected // Only continue restart attempts if server was previously connected
if !was_connected { if !was_connected {
log::error!( log::error!(
"MCP server {} failed restart and was never successfully connected - stopping permanently", "MCP server {name} failed restart and was never successfully connected - stopping permanently"
name
); );
break; break;
} }
@ -529,7 +499,7 @@ async fn schedule_mcp_start_task<R: Runtime>(
}, },
}; };
let client = client_info.serve(transport).await.inspect_err(|e| { let client = client_info.serve(transport).await.inspect_err(|e| {
log::error!("client error: {:?}", e); log::error!("client error: {e:?}");
}); });
match client { match client {
@ -545,12 +515,12 @@ async fn schedule_mcp_start_task<R: Runtime>(
let app_state = app.state::<AppState>(); let app_state = app.state::<AppState>();
let mut connected = app_state.mcp_successfully_connected.lock().await; let mut connected = app_state.mcp_successfully_connected.lock().await;
connected.insert(name.clone(), true); connected.insert(name.clone(), true);
log::info!("Marked MCP server {} as successfully connected", name); log::info!("Marked MCP server {name} as successfully connected");
} }
} }
Err(e) => { Err(e) => {
log::error!("Failed to connect to server: {}", e); log::error!("Failed to connect to server: {e}");
return Err(format!("Failed to connect to server: {}", e)); return Err(format!("Failed to connect to server: {e}"));
} }
} }
} else if config_params.transport_type.as_deref() == Some("sse") && config_params.url.is_some() } else if config_params.transport_type.as_deref() == Some("sse") && config_params.url.is_some()
@ -587,8 +557,8 @@ async fn schedule_mcp_start_task<R: Runtime>(
) )
.await .await
.map_err(|e| { .map_err(|e| {
log::error!("transport error: {:?}", e); log::error!("transport error: {e:?}");
format!("Failed to start SSE transport: {}", e) format!("Failed to start SSE transport: {e}")
})?; })?;
let client_info = ClientInfo { let client_info = ClientInfo {
@ -600,7 +570,7 @@ async fn schedule_mcp_start_task<R: Runtime>(
}, },
}; };
let client = client_info.serve(transport).await.map_err(|e| { let client = client_info.serve(transport).await.map_err(|e| {
log::error!("client error: {:?}", e); log::error!("client error: {e:?}");
e.to_string() e.to_string()
}); });
@ -617,12 +587,12 @@ async fn schedule_mcp_start_task<R: Runtime>(
let app_state = app.state::<AppState>(); let app_state = app.state::<AppState>();
let mut connected = app_state.mcp_successfully_connected.lock().await; let mut connected = app_state.mcp_successfully_connected.lock().await;
connected.insert(name.clone(), true); connected.insert(name.clone(), true);
log::info!("Marked MCP server {} as successfully connected", name); log::info!("Marked MCP server {name} as successfully connected");
} }
} }
Err(e) => { Err(e) => {
log::error!("Failed to connect to server: {}", e); log::error!("Failed to connect to server: {e}");
return Err(format!("Failed to connect to server: {}", e)); return Err(format!("Failed to connect to server: {e}"));
} }
} }
} else { } else {
@ -639,7 +609,7 @@ async fn schedule_mcp_start_task<R: Runtime>(
cache_dir.push(".npx"); cache_dir.push(".npx");
cmd = Command::new(bun_x_path.display().to_string()); cmd = Command::new(bun_x_path.display().to_string());
cmd.arg("x"); cmd.arg("x");
cmd.env("BUN_INSTALL", cache_dir.to_str().unwrap().to_string()); cmd.env("BUN_INSTALL", cache_dir.to_str().unwrap());
} }
let uv_path = if cfg!(windows) { let uv_path = if cfg!(windows) {
@ -654,7 +624,7 @@ async fn schedule_mcp_start_task<R: Runtime>(
cmd = Command::new(uv_path); cmd = Command::new(uv_path);
cmd.arg("tool"); cmd.arg("tool");
cmd.arg("run"); cmd.arg("run");
cmd.env("UV_CACHE_DIR", cache_dir.to_str().unwrap().to_string()); cmd.env("UV_CACHE_DIR", cache_dir.to_str().unwrap());
} }
#[cfg(windows)] #[cfg(windows)]
{ {
@ -726,8 +696,7 @@ async fn schedule_mcp_start_task<R: Runtime>(
if !server_still_running { if !server_still_running {
return Err(format!( return Err(format!(
"MCP server {} quit immediately after starting", "MCP server {name} quit immediately after starting"
name
)); ));
} }
// Mark server as successfully connected (for restart policy) // Mark server as successfully connected (for restart policy)
@ -735,7 +704,7 @@ async fn schedule_mcp_start_task<R: Runtime>(
let app_state = app.state::<AppState>(); let app_state = app.state::<AppState>();
let mut connected = app_state.mcp_successfully_connected.lock().await; let mut connected = app_state.mcp_successfully_connected.lock().await;
connected.insert(name.clone(), true); connected.insert(name.clone(), true);
log::info!("Marked MCP server {} as successfully connected", name); log::info!("Marked MCP server {name} as successfully connected");
} }
} }
Ok(()) Ok(())
@ -792,7 +761,7 @@ pub async fn restart_active_mcp_servers<R: Runtime>(
); );
for (name, config) in active_servers.iter() { for (name, config) in active_servers.iter() {
log::info!("Restarting MCP server: {}", name); log::info!("Restarting MCP server: {name}");
// Start server with restart monitoring - spawn async task // Start server with restart monitoring - spawn async task
let app_clone = app.clone(); let app_clone = app.clone();
@ -891,9 +860,7 @@ pub async fn spawn_server_monitoring_task<R: Runtime>(
monitor_mcp_server_handle(servers_clone.clone(), name_clone.clone()).await; monitor_mcp_server_handle(servers_clone.clone(), name_clone.clone()).await;
log::info!( log::info!(
"MCP server {} quit with reason: {:?}", "MCP server {name_clone} quit with reason: {quit_reason:?}"
name_clone,
quit_reason
); );
// Check if we should restart based on connection status and quit reason // Check if we should restart based on connection status and quit reason
@ -928,8 +895,7 @@ pub async fn should_restart_server(
// Only restart if server was previously connected // Only restart if server was previously connected
if !was_connected { if !was_connected {
log::error!( log::error!(
"MCP server {} failed before establishing successful connection - stopping permanently", "MCP server {name} failed before establishing successful connection - stopping permanently"
name
); );
return false; return false;
} }
@ -937,11 +903,11 @@ pub async fn should_restart_server(
// Determine if we should restart based on quit reason // Determine if we should restart based on quit reason
match quit_reason { match quit_reason {
Some(reason) => { Some(reason) => {
log::warn!("MCP server {} terminated unexpectedly: {:?}", name, reason); log::warn!("MCP server {name} terminated unexpectedly: {reason:?}");
true true
} }
None => { None => {
log::info!("MCP server {} was manually stopped - not restarting", name); log::info!("MCP server {name} was manually stopped - not restarting");
false false
} }
} }

View File

@ -70,7 +70,7 @@ fn test_add_server_config_new_file() {
Some("mcp_config_test_new.json"), Some("mcp_config_test_new.json"),
); );
assert!(result.is_ok(), "Failed to add server config: {:?}", result); assert!(result.is_ok(), "Failed to add server config: {result:?}");
// Verify the config was added correctly // Verify the config was added correctly
let config_content = std::fs::read_to_string(&config_path) let config_content = std::fs::read_to_string(&config_path)
@ -128,7 +128,7 @@ fn test_add_server_config_existing_servers() {
Some("mcp_config_test_existing.json"), Some("mcp_config_test_existing.json"),
); );
assert!(result.is_ok(), "Failed to add server config: {:?}", result); assert!(result.is_ok(), "Failed to add server config: {result:?}");
// Verify both servers exist // Verify both servers exist
let config_content = std::fs::read_to_string(&config_path) let config_content = std::fs::read_to_string(&config_path)

View File

@ -67,7 +67,7 @@ async fn proxy_request(
.any(|&method| method.eq_ignore_ascii_case(requested_method)); .any(|&method| method.eq_ignore_ascii_case(requested_method));
if !method_allowed { if !method_allowed {
log::warn!("CORS preflight: Method '{}' not allowed", requested_method); log::warn!("CORS preflight: Method '{requested_method}' not allowed");
return Ok(Response::builder() return Ok(Response::builder()
.status(StatusCode::METHOD_NOT_ALLOWED) .status(StatusCode::METHOD_NOT_ALLOWED)
.body(Body::from("Method not allowed")) .body(Body::from("Method not allowed"))
@ -80,14 +80,12 @@ async fn proxy_request(
let is_trusted = if is_whitelisted_path { let is_trusted = if is_whitelisted_path {
log::debug!( log::debug!(
"CORS preflight: Bypassing host check for whitelisted path: {}", "CORS preflight: Bypassing host check for whitelisted path: {request_path}"
request_path
); );
true true
} else if !host.is_empty() { } else if !host.is_empty() {
log::debug!( log::debug!(
"CORS preflight: Host is '{}', trusted hosts: {:?}", "CORS preflight: Host is '{host}', trusted hosts: {:?}",
host,
&config.trusted_hosts &config.trusted_hosts
); );
is_valid_host(host, &config.trusted_hosts) is_valid_host(host, &config.trusted_hosts)
@ -98,9 +96,7 @@ async fn proxy_request(
if !is_trusted { if !is_trusted {
log::warn!( log::warn!(
"CORS preflight: Host '{}' not trusted for path '{}'", "CORS preflight: Host '{host}' not trusted for path '{request_path}'"
host,
request_path
); );
return Ok(Response::builder() return Ok(Response::builder()
.status(StatusCode::FORBIDDEN) .status(StatusCode::FORBIDDEN)
@ -158,8 +154,7 @@ async fn proxy_request(
if !headers_valid { if !headers_valid {
log::warn!( log::warn!(
"CORS preflight: Some requested headers not allowed: {}", "CORS preflight: Some requested headers not allowed: {requested_headers}"
requested_headers
); );
return Ok(Response::builder() return Ok(Response::builder()
.status(StatusCode::FORBIDDEN) .status(StatusCode::FORBIDDEN)
@ -186,9 +181,7 @@ async fn proxy_request(
} }
log::debug!( log::debug!(
"CORS preflight response: host_trusted={}, origin='{}'", "CORS preflight response: host_trusted={is_trusted}, origin='{origin}'"
is_trusted,
origin
); );
return Ok(response.body(Body::empty()).unwrap()); return Ok(response.body(Body::empty()).unwrap());
} }
@ -252,7 +245,7 @@ async fn proxy_request(
.unwrap()); .unwrap());
} }
} else { } else {
log::debug!("Bypassing host validation for whitelisted path: {}", path); log::debug!("Bypassing host validation for whitelisted path: {path}");
} }
if !is_whitelisted_path && !config.proxy_api_key.is_empty() { if !is_whitelisted_path && !config.proxy_api_key.is_empty() {
@ -285,8 +278,7 @@ async fn proxy_request(
} }
} else if is_whitelisted_path { } else if is_whitelisted_path {
log::debug!( log::debug!(
"Bypassing authorization check for whitelisted path: {}", "Bypassing authorization check for whitelisted path: {path}"
path
); );
} }
@ -312,8 +304,7 @@ async fn proxy_request(
| (hyper::Method::POST, "/completions") | (hyper::Method::POST, "/completions")
| (hyper::Method::POST, "/embeddings") => { | (hyper::Method::POST, "/embeddings") => {
log::debug!( log::debug!(
"Handling POST request to {} requiring model lookup in body", "Handling POST request to {destination_path} requiring model lookup in body",
destination_path
); );
let body_bytes = match hyper::body::to_bytes(body).await { let body_bytes = match hyper::body::to_bytes(body).await {
Ok(bytes) => bytes, Ok(bytes) => bytes,
@ -336,13 +327,12 @@ async fn proxy_request(
match serde_json::from_slice::<serde_json::Value>(&body_bytes) { match serde_json::from_slice::<serde_json::Value>(&body_bytes) {
Ok(json_body) => { Ok(json_body) => {
if let Some(model_id) = json_body.get("model").and_then(|v| v.as_str()) { if let Some(model_id) = json_body.get("model").and_then(|v| v.as_str()) {
log::debug!("Extracted model_id: {}", model_id); log::debug!("Extracted model_id: {model_id}");
let sessions_guard = sessions.lock().await; let sessions_guard = sessions.lock().await;
if sessions_guard.is_empty() { if sessions_guard.is_empty() {
log::warn!( log::warn!(
"Request for model '{}' but no models are running.", "Request for model '{model_id}' but no models are running."
model_id
); );
let mut error_response = let mut error_response =
Response::builder().status(StatusCode::SERVICE_UNAVAILABLE); Response::builder().status(StatusCode::SERVICE_UNAVAILABLE);
@ -363,9 +353,9 @@ async fn proxy_request(
{ {
target_port = Some(session.info.port); target_port = Some(session.info.port);
session_api_key = Some(session.info.api_key.clone()); session_api_key = Some(session.info.api_key.clone());
log::debug!("Found session for model_id {}", model_id,); log::debug!("Found session for model_id {model_id}");
} else { } else {
log::warn!("No running session found for model_id: {}", model_id); log::warn!("No running session found for model_id: {model_id}");
let mut error_response = let mut error_response =
Response::builder().status(StatusCode::NOT_FOUND); Response::builder().status(StatusCode::NOT_FOUND);
error_response = add_cors_headers_with_host_and_origin( error_response = add_cors_headers_with_host_and_origin(
@ -376,15 +366,13 @@ async fn proxy_request(
); );
return Ok(error_response return Ok(error_response
.body(Body::from(format!( .body(Body::from(format!(
"No running session found for model '{}'", "No running session found for model '{model_id}'"
model_id
))) )))
.unwrap()); .unwrap());
} }
} else { } else {
log::warn!( log::warn!(
"POST body for {} is missing 'model' field or it's not a string", "POST body for {destination_path} is missing 'model' field or it's not a string"
destination_path
); );
let mut error_response = let mut error_response =
Response::builder().status(StatusCode::BAD_REQUEST); Response::builder().status(StatusCode::BAD_REQUEST);
@ -401,9 +389,7 @@ async fn proxy_request(
} }
Err(e) => { Err(e) => {
log::warn!( log::warn!(
"Failed to parse POST body for {} as JSON: {}", "Failed to parse POST body for {destination_path} as JSON: {e}"
destination_path,
e
); );
let mut error_response = Response::builder().status(StatusCode::BAD_REQUEST); let mut error_response = Response::builder().status(StatusCode::BAD_REQUEST);
error_response = add_cors_headers_with_host_and_origin( error_response = add_cors_headers_with_host_and_origin(
@ -535,7 +521,7 @@ async fn proxy_request(
let is_explicitly_whitelisted_get = method == hyper::Method::GET let is_explicitly_whitelisted_get = method == hyper::Method::GET
&& whitelisted_paths.contains(&destination_path.as_str()); && whitelisted_paths.contains(&destination_path.as_str());
if is_explicitly_whitelisted_get { if is_explicitly_whitelisted_get {
log::debug!("Handled whitelisted GET path: {}", destination_path); log::debug!("Handled whitelisted GET path: {destination_path}");
let mut error_response = Response::builder().status(StatusCode::NOT_FOUND); let mut error_response = Response::builder().status(StatusCode::NOT_FOUND);
error_response = add_cors_headers_with_host_and_origin( error_response = add_cors_headers_with_host_and_origin(
error_response, error_response,
@ -546,9 +532,7 @@ async fn proxy_request(
return Ok(error_response.body(Body::from("Not Found")).unwrap()); return Ok(error_response.body(Body::from("Not Found")).unwrap());
} else { } else {
log::warn!( log::warn!(
"Unhandled method/path for dynamic routing: {} {}", "Unhandled method/path for dynamic routing: {method} {destination_path}"
method,
destination_path
); );
let mut error_response = Response::builder().status(StatusCode::NOT_FOUND); let mut error_response = Response::builder().status(StatusCode::NOT_FOUND);
error_response = add_cors_headers_with_host_and_origin( error_response = add_cors_headers_with_host_and_origin(
@ -581,7 +565,7 @@ async fn proxy_request(
} }
}; };
let upstream_url = format!("http://127.0.0.1:{}{}", port, destination_path); let upstream_url = format!("http://127.0.0.1:{port}{destination_path}");
let mut outbound_req = client.request(method.clone(), &upstream_url); let mut outbound_req = client.request(method.clone(), &upstream_url);
@ -593,13 +577,14 @@ async fn proxy_request(
if let Some(key) = session_api_key { if let Some(key) = session_api_key {
log::debug!("Adding session Authorization header"); log::debug!("Adding session Authorization header");
outbound_req = outbound_req.header("Authorization", format!("Bearer {}", key)); outbound_req = outbound_req.header("Authorization", format!("Bearer {key}"));
} else { } else {
log::debug!("No session API key available for this request"); log::debug!("No session API key available for this request");
} }
let outbound_req_with_body = if let Some(bytes) = buffered_body { let outbound_req_with_body = if let Some(bytes) = buffered_body {
log::debug!("Sending buffered body ({} bytes)", bytes.len()); let bytes_len = bytes.len();
log::debug!("Sending buffered body ({bytes_len} bytes)");
outbound_req.body(bytes) outbound_req.body(bytes)
} else { } else {
log::error!("Internal logic error: Request reached proxy stage without a buffered body."); log::error!("Internal logic error: Request reached proxy stage without a buffered body.");
@ -618,7 +603,7 @@ async fn proxy_request(
match outbound_req_with_body.send().await { match outbound_req_with_body.send().await {
Ok(response) => { Ok(response) => {
let status = response.status(); let status = response.status();
log::debug!("Received response with status: {}", status); log::debug!("Received response with status: {status}");
let mut builder = Response::builder().status(status); let mut builder = Response::builder().status(status);
@ -648,7 +633,7 @@ async fn proxy_request(
} }
} }
Err(e) => { Err(e) => {
log::error!("Stream error: {}", e); log::error!("Stream error: {e}");
break; break;
} }
} }
@ -659,8 +644,8 @@ async fn proxy_request(
Ok(builder.body(body).unwrap()) Ok(builder.body(body).unwrap())
} }
Err(e) => { Err(e) => {
let error_msg = format!("Proxy request to model failed: {}", e); let error_msg = format!("Proxy request to model failed: {e}");
log::error!("{}", error_msg); log::error!("{error_msg}");
let mut error_response = Response::builder().status(StatusCode::BAD_GATEWAY); let mut error_response = Response::builder().status(StatusCode::BAD_GATEWAY);
error_response = add_cors_headers_with_host_and_origin( error_response = add_cors_headers_with_host_and_origin(
error_response, error_response,
@ -675,14 +660,12 @@ async fn proxy_request(
fn add_cors_headers_with_host_and_origin( fn add_cors_headers_with_host_and_origin(
builder: hyper::http::response::Builder, builder: hyper::http::response::Builder,
host: &str, _host: &str,
origin: &str, origin: &str,
trusted_hosts: &[Vec<String>], _trusted_hosts: &[Vec<String>],
) -> hyper::http::response::Builder { ) -> hyper::http::response::Builder {
let mut builder = builder; let mut builder = builder;
let allow_origin_header = if !origin.is_empty() && is_valid_host(host, trusted_hosts) { let allow_origin_header = if !origin.is_empty() {
origin.to_string()
} else if !origin.is_empty() {
origin.to_string() origin.to_string()
} else { } else {
"*".to_string() "*".to_string()
@ -706,6 +689,7 @@ pub async fn is_server_running(server_handle: Arc<Mutex<Option<ServerHandle>>>)
handle_guard.is_some() handle_guard.is_some()
} }
#[allow(clippy::too_many_arguments)]
pub async fn start_server( pub async fn start_server(
server_handle: Arc<Mutex<Option<ServerHandle>>>, server_handle: Arc<Mutex<Option<ServerHandle>>>,
sessions: Arc<Mutex<HashMap<i32, LLamaBackendSession>>>, sessions: Arc<Mutex<HashMap<i32, LLamaBackendSession>>>,
@ -721,9 +705,9 @@ pub async fn start_server(
return Err("Server is already running".into()); return Err("Server is already running".into());
} }
let addr: SocketAddr = format!("{}:{}", host, port) let addr: SocketAddr = format!("{host}:{port}")
.parse() .parse()
.map_err(|e| format!("Invalid address: {}", e))?; .map_err(|e| format!("Invalid address: {e}"))?;
let config = ProxyConfig { let config = ProxyConfig {
prefix, prefix,
@ -752,15 +736,15 @@ pub async fn start_server(
let server = match Server::try_bind(&addr) { let server = match Server::try_bind(&addr) {
Ok(builder) => builder.serve(make_svc), Ok(builder) => builder.serve(make_svc),
Err(e) => { Err(e) => {
log::error!("Failed to bind to {}: {}", addr, e); log::error!("Failed to bind to {addr}: {e}");
return Err(Box::new(e)); return Err(Box::new(e));
} }
}; };
log::info!("Jan API server started on http://{}", addr); log::info!("Jan API server started on http://{addr}");
let server_task = tokio::spawn(async move { let server_task = tokio::spawn(async move {
if let Err(e) = server.await { if let Err(e) = server.await {
log::error!("Server error: {}", e); log::error!("Server error: {e}");
return Err(Box::new(e) as Box<dyn std::error::Error + Send + Sync>); return Err(Box::new(e) as Box<dyn std::error::Error + Send + Sync>);
} }
Ok(()) Ok(())
@ -768,7 +752,7 @@ pub async fn start_server(
*handle_guard = Some(server_task); *handle_guard = Some(server_task);
let actual_port = addr.port(); let actual_port = addr.port();
log::info!("Jan API server started successfully on port {}", actual_port); log::info!("Jan API server started successfully on port {actual_port}");
Ok(actual_port) Ok(actual_port)
} }

View File

@ -38,7 +38,7 @@ pub fn install_extensions<R: Runtime>(app: tauri::AppHandle<R>, force: bool) ->
if std::env::var("IS_CLEAN").is_ok() { if std::env::var("IS_CLEAN").is_ok() {
clean_up = true; clean_up = true;
} }
log::info!("Installing extensions. Clean up: {}", clean_up); log::info!("Installing extensions. Clean up: {clean_up}");
if !clean_up && extensions_path.exists() { if !clean_up && extensions_path.exists() {
return Ok(()); return Ok(());
} }
@ -68,7 +68,7 @@ pub fn install_extensions<R: Runtime>(app: tauri::AppHandle<R>, force: bool) ->
let entry = entry.map_err(|e| e.to_string())?; let entry = entry.map_err(|e| e.to_string())?;
let path = entry.path(); let path = entry.path();
if path.extension().map_or(false, |ext| ext == "tgz") { if path.extension().is_some_and(|ext| ext == "tgz") {
let tar_gz = File::open(&path).map_err(|e| e.to_string())?; let tar_gz = File::open(&path).map_err(|e| e.to_string())?;
let gz_decoder = GzDecoder::new(tar_gz); let gz_decoder = GzDecoder::new(tar_gz);
let mut archive = Archive::new(gz_decoder); let mut archive = Archive::new(gz_decoder);
@ -134,7 +134,7 @@ pub fn install_extensions<R: Runtime>(app: tauri::AppHandle<R>, force: bool) ->
extensions_list.push(new_extension); extensions_list.push(new_extension);
log::info!("Installed extension to {:?}", extension_dir); log::info!("Installed extension to {extension_dir:?}");
} }
} }
fs::write( fs::write(
@ -154,7 +154,7 @@ pub fn migrate_mcp_servers(
let mcp_version = store let mcp_version = store
.get("mcp_version") .get("mcp_version")
.and_then(|v| v.as_i64()) .and_then(|v| v.as_i64())
.unwrap_or_else(|| 0); .unwrap_or(0);
if mcp_version < 1 { if mcp_version < 1 {
log::info!("Migrating MCP schema version 1"); log::info!("Migrating MCP schema version 1");
let result = add_server_config( let result = add_server_config(
@ -168,7 +168,7 @@ pub fn migrate_mcp_servers(
}), }),
); );
if let Err(e) = result { if let Err(e) = result {
log::error!("Failed to add server config: {}", e); log::error!("Failed to add server config: {e}");
} }
} }
store.set("mcp_version", 1); store.set("mcp_version", 1);
@ -212,7 +212,7 @@ pub fn setup_mcp<R: Runtime>(app: &App<R>) {
let app_handle = app.handle().clone(); let app_handle = app.handle().clone();
tauri::async_runtime::spawn(async move { tauri::async_runtime::spawn(async move {
if let Err(e) = run_mcp_commands(&app_handle, servers).await { if let Err(e) = run_mcp_commands(&app_handle, servers).await {
log::error!("Failed to run mcp commands: {}", e); log::error!("Failed to run mcp commands: {e}");
} }
app_handle app_handle
.emit("mcp-update", "MCP servers updated") .emit("mcp-update", "MCP servers updated")
@ -258,7 +258,7 @@ pub fn setup_tray(app: &App) -> tauri::Result<TrayIcon> {
app.exit(0); app.exit(0);
} }
other => { other => {
println!("menu item {} not handled", other); println!("menu item {other} not handled");
} }
}) })
.build(app) .build(app)

View File

@ -18,12 +18,12 @@ pub fn factory_reset<R: Runtime>(app_handle: tauri::AppHandle<R>, state: State<'
let windows = app_handle.webview_windows(); let windows = app_handle.webview_windows();
for (label, window) in windows.iter() { for (label, window) in windows.iter() {
window.close().unwrap_or_else(|_| { window.close().unwrap_or_else(|_| {
log::warn!("Failed to close window: {:?}", label); log::warn!("Failed to close window: {label:?}");
}); });
} }
} }
let data_folder = get_jan_data_folder_path(app_handle.clone()); let data_folder = get_jan_data_folder_path(app_handle.clone());
log::info!("Factory reset, removing data folder: {:?}", data_folder); log::info!("Factory reset, removing data folder: {data_folder:?}");
tauri::async_runtime::block_on(async { tauri::async_runtime::block_on(async {
clean_up_mcp_servers(state.clone()).await; clean_up_mcp_servers(state.clone()).await;
@ -31,7 +31,7 @@ pub fn factory_reset<R: Runtime>(app_handle: tauri::AppHandle<R>, state: State<'
if data_folder.exists() { if data_folder.exists() {
if let Err(e) = fs::remove_dir_all(&data_folder) { if let Err(e) = fs::remove_dir_all(&data_folder) {
log::error!("Failed to remove data folder: {}", e); log::error!("Failed to remove data folder: {e}");
return; return;
} }
} }
@ -59,17 +59,17 @@ pub fn open_app_directory<R: Runtime>(app: AppHandle<R>) {
if cfg!(target_os = "windows") { if cfg!(target_os = "windows") {
std::process::Command::new("explorer") std::process::Command::new("explorer")
.arg(app_path) .arg(app_path)
.spawn() .status()
.expect("Failed to open app directory"); .expect("Failed to open app directory");
} else if cfg!(target_os = "macos") { } else if cfg!(target_os = "macos") {
std::process::Command::new("open") std::process::Command::new("open")
.arg(app_path) .arg(app_path)
.spawn() .status()
.expect("Failed to open app directory"); .expect("Failed to open app directory");
} else { } else {
std::process::Command::new("xdg-open") std::process::Command::new("xdg-open")
.arg(app_path) .arg(app_path)
.spawn() .status()
.expect("Failed to open app directory"); .expect("Failed to open app directory");
} }
} }
@ -80,17 +80,17 @@ pub fn open_file_explorer(path: String) {
if cfg!(target_os = "windows") { if cfg!(target_os = "windows") {
std::process::Command::new("explorer") std::process::Command::new("explorer")
.arg(path) .arg(path)
.spawn() .status()
.expect("Failed to open file explorer"); .expect("Failed to open file explorer");
} else if cfg!(target_os = "macos") { } else if cfg!(target_os = "macos") {
std::process::Command::new("open") std::process::Command::new("open")
.arg(path) .arg(path)
.spawn() .status()
.expect("Failed to open file explorer"); .expect("Failed to open file explorer");
} else { } else {
std::process::Command::new("xdg-open") std::process::Command::new("xdg-open")
.arg(path) .arg(path)
.spawn() .status()
.expect("Failed to open file explorer"); .expect("Failed to open file explorer");
} }
} }
@ -102,7 +102,7 @@ pub async fn read_logs<R: Runtime>(app: AppHandle<R>) -> Result<String, String>
let content = fs::read_to_string(log_path).map_err(|e| e.to_string())?; let content = fs::read_to_string(log_path).map_err(|e| e.to_string())?;
Ok(content) Ok(content)
} else { } else {
Err(format!("Log file not found")) Err("Log file not found".to_string())
} }
} }
@ -112,7 +112,7 @@ pub fn is_library_available(library: &str) -> bool {
match unsafe { libloading::Library::new(library) } { match unsafe { libloading::Library::new(library) } {
Ok(_) => true, Ok(_) => true,
Err(e) => { Err(e) => {
log::info!("Library {} is not available: {}", library, e); log::info!("Library {library} is not available: {e}");
false false
} }
} }

View File

@ -38,7 +38,7 @@ pub async fn list_threads<R: Runtime>(
match serde_json::from_str(&data) { match serde_json::from_str(&data) {
Ok(thread) => threads.push(thread), Ok(thread) => threads.push(thread),
Err(e) => { Err(e) => {
println!("Failed to parse thread file: {}", e); println!("Failed to parse thread file: {e}");
continue; // skip invalid thread files continue; // skip invalid thread files
} }
} }
@ -149,7 +149,7 @@ pub async fn create_message<R: Runtime>(
.map_err(|e| e.to_string())?; .map_err(|e| e.to_string())?;
let data = serde_json::to_string(&message).map_err(|e| e.to_string())?; let data = serde_json::to_string(&message).map_err(|e| e.to_string())?;
writeln!(file, "{}", data).map_err(|e| e.to_string())?; writeln!(file, "{data}").map_err(|e| e.to_string())?;
// Explicitly flush to ensure data is written before returning // Explicitly flush to ensure data is written before returning
file.flush().map_err(|e| e.to_string())?; file.flush().map_err(|e| e.to_string())?;
@ -234,7 +234,7 @@ pub async fn get_thread_assistant<R: Runtime>(
let data = fs::read_to_string(&path).map_err(|e| e.to_string())?; let data = fs::read_to_string(&path).map_err(|e| e.to_string())?;
let thread: serde_json::Value = serde_json::from_str(&data).map_err(|e| e.to_string())?; let thread: serde_json::Value = serde_json::from_str(&data).map_err(|e| e.to_string())?;
if let Some(assistants) = thread.get("assistants").and_then(|a| a.as_array()) { if let Some(assistants) = thread.get("assistants").and_then(|a| a.as_array()) {
if let Some(first) = assistants.get(0) { if let Some(first) = assistants.first() {
Ok(first.clone()) Ok(first.clone())
} else { } else {
Err("Assistant not found".to_string()) Err("Assistant not found".to_string())

View File

@ -33,7 +33,7 @@ pub fn write_messages_to_file(
let mut file = File::create(path).map_err(|e| e.to_string())?; let mut file = File::create(path).map_err(|e| e.to_string())?;
for msg in messages { for msg in messages {
let data = serde_json::to_string(msg).map_err(|e| e.to_string())?; let data = serde_json::to_string(msg).map_err(|e| e.to_string())?;
writeln!(file, "{}", data).map_err(|e| e.to_string())?; writeln!(file, "{data}").map_err(|e| e.to_string())?;
} }
Ok(()) Ok(())
} }

View File

@ -16,7 +16,7 @@ fn mock_app_with_temp_data_dir() -> (tauri::App<MockRuntime>, PathBuf) {
.as_nanos(); .as_nanos();
let data_dir = std::env::current_dir() let data_dir = std::env::current_dir()
.unwrap_or_else(|_| PathBuf::from(".")) .unwrap_or_else(|_| PathBuf::from("."))
.join(format!("test-data-{:?}-{}", unique_id, timestamp)); .join(format!("test-data-{unique_id:?}-{timestamp}"));
println!("Mock app data dir: {}", data_dir.display()); println!("Mock app data dir: {}", data_dir.display());
// Ensure the unique test directory exists // Ensure the unique test directory exists
let _ = fs::create_dir_all(&data_dir); let _ = fs::create_dir_all(&data_dir);
@ -42,7 +42,7 @@ async fn test_create_and_list_threads() {
// List threads // List threads
let threads = list_threads(app.handle().clone()).await.unwrap(); let threads = list_threads(app.handle().clone()).await.unwrap();
assert!(threads.len() > 0); assert!(!threads.is_empty());
// Clean up // Clean up
let _ = fs::remove_dir_all(data_dir); let _ = fs::remove_dir_all(data_dir);
@ -88,7 +88,7 @@ async fn test_create_and_list_messages() {
let messages = list_messages(app.handle().clone(), thread_id.clone()) let messages = list_messages(app.handle().clone(), thread_id.clone())
.await .await
.unwrap(); .unwrap();
assert!(messages.len() > 0, "Expected at least one message, but got none. Thread ID: {}", thread_id); assert!(!messages.is_empty(), "Expected at least one message, but got none. Thread ID: {thread_id}");
assert_eq!(messages[0]["role"], "user"); assert_eq!(messages[0]["role"], "user");
// Clean up // Clean up

View File

@ -151,17 +151,17 @@ pub fn run() {
.config() .config()
.version .version
.clone() .clone()
.unwrap_or_else(|| "".to_string()); .unwrap_or_default();
// Migrate extensions // Migrate extensions
if let Err(e) = if let Err(e) =
setup::install_extensions(app.handle().clone(), stored_version != app_version) setup::install_extensions(app.handle().clone(), stored_version != app_version)
{ {
log::error!("Failed to install extensions: {}", e); log::error!("Failed to install extensions: {e}");
} }
// Migrate MCP servers // Migrate MCP servers
if let Err(e) = setup::migrate_mcp_servers(app.handle().clone(), store.clone()) { if let Err(e) = setup::migrate_mcp_servers(app.handle().clone(), store.clone()) {
log::error!("Failed to migrate MCP servers: {}", e); log::error!("Failed to migrate MCP servers: {e}");
} }
// Store the new app version // Store the new app version
@ -187,8 +187,8 @@ pub fn run() {
.expect("error while running tauri application"); .expect("error while running tauri application");
// Handle app lifecycle events // Handle app lifecycle events
app.run(|app, event| match event { app.run(|app, event| {
RunEvent::Exit => { if let RunEvent::Exit = event {
// This is called when the app is actually exiting (e.g., macOS dock quit) // This is called when the app is actually exiting (e.g., macOS dock quit)
// We can't prevent this, so run cleanup quickly // We can't prevent this, so run cleanup quickly
let app_handle = app.clone(); let app_handle = app.clone();
@ -208,6 +208,5 @@ pub fn run() {
}); });
}); });
} }
_ => {}
}); });
} }