From e80a865def0d4f07414caa347b76fddd136cdad7 Mon Sep 17 00:00:00 2001 From: Akarshan Biswas Date: Mon, 15 Sep 2025 12:35:24 +0530 Subject: [PATCH] fix: detect allocation failures as out-of-memory errors (#6459) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Llama.cpp backend can emit the phrase “failed to allocate” when it runs out of memory. Adding this check ensures such messages are correctly classified as out‑of‑memory errors, providing more accurate error handling CPU backends. --- src-tauri/plugins/tauri-plugin-llamacpp/src/error.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src-tauri/plugins/tauri-plugin-llamacpp/src/error.rs b/src-tauri/plugins/tauri-plugin-llamacpp/src/error.rs index 647b2fead..d26e612fb 100644 --- a/src-tauri/plugins/tauri-plugin-llamacpp/src/error.rs +++ b/src-tauri/plugins/tauri-plugin-llamacpp/src/error.rs @@ -48,6 +48,7 @@ impl LlamacppError { let lower_stderr = stderr.to_lowercase(); // TODO: add others let is_out_of_memory = lower_stderr.contains("out of memory") + || lower_stderr.contains("failed to allocate") || lower_stderr.contains("insufficient memory") || lower_stderr.contains("erroroutofdevicememory") // vulkan specific || lower_stderr.contains("kiogpucommandbuffercallbackerroroutofmemory") // Metal-specific error code