Merge branch 'dev' into blog/add-deepresearch-piece
This commit is contained in:
commit
b29ad205df
@ -1280,7 +1280,7 @@ export default class llamacpp_extension extends AIEngine {
|
||||
return sInfo
|
||||
} catch (error) {
|
||||
logger.error('Error in load command:\n', error)
|
||||
throw new Error(`Failed to load model:\n${error}`)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
@ -1291,7 +1291,6 @@ export default class llamacpp_extension extends AIEngine {
|
||||
}
|
||||
const pid = sInfo.pid
|
||||
try {
|
||||
|
||||
// Pass the PID as the session_id
|
||||
const result = await invoke<UnloadResult>('unload_llama_model', {
|
||||
pid: pid,
|
||||
@ -1430,13 +1429,15 @@ export default class llamacpp_extension extends AIEngine {
|
||||
}
|
||||
|
||||
private async findSessionByModel(modelId: string): Promise<SessionInfo> {
|
||||
try {
|
||||
let sInfo = await invoke<SessionInfo>('find_session_by_model', {modelId})
|
||||
return sInfo
|
||||
} catch (e) {
|
||||
logger.error(e)
|
||||
throw new Error(String(e))
|
||||
}
|
||||
try {
|
||||
let sInfo = await invoke<SessionInfo>('find_session_by_model', {
|
||||
modelId,
|
||||
})
|
||||
return sInfo
|
||||
} catch (e) {
|
||||
logger.error(e)
|
||||
throw new Error(String(e))
|
||||
}
|
||||
}
|
||||
|
||||
override async chat(
|
||||
@ -1507,13 +1508,13 @@ export default class llamacpp_extension extends AIEngine {
|
||||
}
|
||||
|
||||
override async getLoadedModels(): Promise<string[]> {
|
||||
try {
|
||||
let models: string[] = await invoke<string[]>('get_loaded_models')
|
||||
return models
|
||||
} catch (e) {
|
||||
logger.error(e)
|
||||
throw new Error(e)
|
||||
}
|
||||
try {
|
||||
let models: string[] = await invoke<string[]>('get_loaded_models')
|
||||
return models
|
||||
} catch (e) {
|
||||
logger.error(e)
|
||||
throw new Error(e)
|
||||
}
|
||||
}
|
||||
|
||||
async getDevices(): Promise<DeviceList[]> {
|
||||
|
||||
@ -19,19 +19,92 @@ use crate::core::state::AppState;
|
||||
use crate::core::state::LLamaBackendSession;
|
||||
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
|
||||
pub enum ErrorCode {
|
||||
BinaryNotFound,
|
||||
ModelFileNotFound,
|
||||
LibraryPathInvalid,
|
||||
|
||||
// --- Model Loading Errors ---
|
||||
ModelLoadFailed,
|
||||
DraftModelLoadFailed,
|
||||
MultimodalProjectorLoadFailed,
|
||||
ModelArchNotSupported,
|
||||
ModelLoadTimedOut,
|
||||
LlamaCppProcessError,
|
||||
|
||||
// --- Memory Errors ---
|
||||
OutOfMemory,
|
||||
|
||||
// --- Internal Application Errors ---
|
||||
DeviceListParseFailed,
|
||||
IoError,
|
||||
InternalError,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, thiserror::Error)]
|
||||
#[error("LlamacppError {{ code: {code:?}, message: \"{message}\" }}")]
|
||||
pub struct LlamacppError {
|
||||
pub code: ErrorCode,
|
||||
pub message: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub details: Option<String>,
|
||||
}
|
||||
impl LlamacppError {
|
||||
pub fn new(code: ErrorCode, message: String, details: Option<String>) -> Self {
|
||||
Self {
|
||||
code,
|
||||
message,
|
||||
details,
|
||||
}
|
||||
}
|
||||
|
||||
/// Parses stderr from llama.cpp and creates a specific LlamacppError.
|
||||
pub fn from_stderr(stderr: &str) -> Self {
|
||||
let lower_stderr = stderr.to_lowercase();
|
||||
// TODO: add others
|
||||
let is_out_of_memory = lower_stderr.contains("out of memory")
|
||||
|| lower_stderr.contains("insufficient memory")
|
||||
|| lower_stderr.contains("erroroutofdevicememory") // vulkan specific
|
||||
|| lower_stderr.contains("kiogpucommandbuffercallbackerroroutofmemory") // Metal-specific error code
|
||||
|| lower_stderr.contains("cuda_error_out_of_memory"); // CUDA-specific
|
||||
|
||||
if is_out_of_memory {
|
||||
return Self::new(
|
||||
ErrorCode::OutOfMemory,
|
||||
"Out of memory. The model requires more RAM or VRAM than available.".into(),
|
||||
Some(stderr.into()),
|
||||
);
|
||||
}
|
||||
|
||||
if lower_stderr.contains("error loading model architecture") {
|
||||
return Self::new(
|
||||
ErrorCode::ModelArchNotSupported,
|
||||
"The model's architecture is not supported by this version of the backend.".into(),
|
||||
Some(stderr.into()),
|
||||
);
|
||||
}
|
||||
Self::new(
|
||||
ErrorCode::LlamaCppProcessError,
|
||||
"The model process encountered an unexpected error.".into(),
|
||||
Some(stderr.into()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Error type for server commands
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ServerError {
|
||||
#[error("llamacpp error: {0}")]
|
||||
LlamacppError(String),
|
||||
#[error("Failed to locate server binary: {0}")]
|
||||
BinaryNotFound(String),
|
||||
#[error(transparent)]
|
||||
Llamacpp(#[from] LlamacppError),
|
||||
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
#[error("Jan API error: {0}")]
|
||||
|
||||
#[error("Tauri error: {0}")]
|
||||
Tauri(#[from] tauri::Error),
|
||||
#[error("Parse error: {0}")]
|
||||
ParseError(String),
|
||||
}
|
||||
|
||||
// impl serialization for tauri
|
||||
@ -40,7 +113,20 @@ impl serde::Serialize for ServerError {
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
serializer.serialize_str(self.to_string().as_ref())
|
||||
let error_to_serialize: LlamacppError = match self {
|
||||
ServerError::Llamacpp(err) => err.clone(),
|
||||
ServerError::Io(e) => LlamacppError::new(
|
||||
ErrorCode::IoError,
|
||||
"An input/output error occurred.".into(),
|
||||
Some(e.to_string()),
|
||||
),
|
||||
ServerError::Tauri(e) => LlamacppError::new(
|
||||
ErrorCode::InternalError,
|
||||
"An internal application error occurred.".into(),
|
||||
Some(e.to_string()),
|
||||
),
|
||||
};
|
||||
error_to_serialize.serialize(serializer)
|
||||
}
|
||||
}
|
||||
|
||||
@ -110,14 +196,17 @@ pub async fn load_llama_model(
|
||||
|
||||
let server_path_buf = PathBuf::from(backend_path);
|
||||
if !server_path_buf.exists() {
|
||||
let err_msg = format!("Binary not found at {:?}", backend_path);
|
||||
log::error!(
|
||||
"Server binary not found at expected path: {:?}",
|
||||
backend_path
|
||||
);
|
||||
return Err(ServerError::BinaryNotFound(format!(
|
||||
"Binary not found at {:?}",
|
||||
backend_path
|
||||
)));
|
||||
return Err(LlamacppError::new(
|
||||
ErrorCode::BinaryNotFound,
|
||||
"The llama.cpp server binary could not be found.".into(),
|
||||
Some(err_msg),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
let port_str = args
|
||||
@ -134,22 +223,35 @@ pub async fn load_llama_model(
|
||||
}
|
||||
};
|
||||
// FOR MODEL PATH; TODO: DO SIMILARLY FOR MMPROJ PATH
|
||||
let model_path_index = args
|
||||
.iter()
|
||||
.position(|arg| arg == "-m")
|
||||
.ok_or(ServerError::LlamacppError("Missing `-m` flag".into()))?;
|
||||
let model_path_index = args.iter().position(|arg| arg == "-m").ok_or_else(|| {
|
||||
LlamacppError::new(
|
||||
ErrorCode::ModelLoadFailed,
|
||||
"Model path argument '-m' is missing.".into(),
|
||||
None,
|
||||
)
|
||||
})?;
|
||||
|
||||
let model_path = args
|
||||
.get(model_path_index + 1)
|
||||
.ok_or(ServerError::LlamacppError("Missing path after `-m`".into()))?
|
||||
.clone();
|
||||
let model_path = args.get(model_path_index + 1).cloned().ok_or_else(|| {
|
||||
LlamacppError::new(
|
||||
ErrorCode::ModelLoadFailed,
|
||||
"Model path was not provided after '-m' flag.".into(),
|
||||
None,
|
||||
)
|
||||
})?;
|
||||
|
||||
let model_path_pb = PathBuf::from(model_path);
|
||||
let model_path_pb = PathBuf::from(&model_path);
|
||||
if !model_path_pb.exists() {
|
||||
return Err(ServerError::LlamacppError(format!(
|
||||
let err_msg = format!(
|
||||
"Invalid or inaccessible model path: {}",
|
||||
model_path_pb.display().to_string(),
|
||||
)));
|
||||
model_path_pb.display()
|
||||
);
|
||||
log::error!("{}", &err_msg);
|
||||
return Err(LlamacppError::new(
|
||||
ErrorCode::ModelFileNotFound,
|
||||
"The specified model file does not exist or is not accessible.".into(),
|
||||
Some(err_msg),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
#[cfg(windows)]
|
||||
{
|
||||
@ -285,13 +387,13 @@ pub async fn load_llama_model(
|
||||
|| line_lower.contains("starting the main loop")
|
||||
|| line_lower.contains("server listening on")
|
||||
{
|
||||
log::info!("Server appears to be ready based on stderr: '{}'", line);
|
||||
log::info!("Model appears to be ready based on logs: '{}'", line);
|
||||
let _ = ready_tx.send(true).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("Error reading stderr: {}", e);
|
||||
log::error!("Error reading logs: {}", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -304,21 +406,21 @@ pub async fn load_llama_model(
|
||||
if let Some(status) = child.try_wait()? {
|
||||
if !status.success() {
|
||||
let stderr_output = stderr_task.await.unwrap_or_default();
|
||||
log::error!("llama.cpp exited early with code {:?}", status);
|
||||
log::error!("--- stderr ---\n{}", stderr_output);
|
||||
return Err(ServerError::LlamacppError(stderr_output.trim().to_string()));
|
||||
log::error!("llama.cpp failed early with code {:?}", status);
|
||||
log::error!("{}", stderr_output);
|
||||
return Err(LlamacppError::from_stderr(&stderr_output).into());
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for server to be ready or timeout
|
||||
let timeout_duration = Duration::from_secs(300); // 5 minutes timeout
|
||||
let timeout_duration = Duration::from_secs(180); // 3 minutes timeout
|
||||
let start_time = Instant::now();
|
||||
log::info!("Waiting for server to be ready...");
|
||||
log::info!("Waiting for model session to be ready...");
|
||||
loop {
|
||||
tokio::select! {
|
||||
// Server is ready
|
||||
Some(true) = ready_rx.recv() => {
|
||||
log::info!("Server is ready to accept requests!");
|
||||
log::info!("Model is ready to accept requests!");
|
||||
break;
|
||||
}
|
||||
// Check for process exit more frequently
|
||||
@ -328,10 +430,10 @@ pub async fn load_llama_model(
|
||||
let stderr_output = stderr_task.await.unwrap_or_default();
|
||||
if !status.success() {
|
||||
log::error!("llama.cpp exited with error code {:?}", status);
|
||||
return Err(ServerError::LlamacppError(format!("Process exited with code {:?}\n\nStderr:\n{}", status, stderr_output)));
|
||||
return Err(LlamacppError::from_stderr(&stderr_output).into());
|
||||
} else {
|
||||
log::error!("llama.cpp exited successfully but without ready signal");
|
||||
return Err(ServerError::LlamacppError(format!("Process exited unexpectedly\n\nStderr:\n{}", stderr_output)));
|
||||
return Err(LlamacppError::from_stderr(&stderr_output).into());
|
||||
}
|
||||
}
|
||||
|
||||
@ -340,7 +442,11 @@ pub async fn load_llama_model(
|
||||
log::error!("Timeout waiting for server to be ready");
|
||||
let _ = child.kill().await;
|
||||
let stderr_output = stderr_task.await.unwrap_or_default();
|
||||
return Err(ServerError::LlamacppError(format!("Server startup timeout\n\nStderr:\n{}", stderr_output)));
|
||||
return Err(LlamacppError::new(
|
||||
ErrorCode::ModelLoadTimedOut,
|
||||
"The model took too long to load and timed out.".into(),
|
||||
Some(format!("Timeout: {}s\n\nStderr:\n{}", timeout_duration.as_secs(), stderr_output)),
|
||||
).into());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -463,10 +569,12 @@ pub async fn get_devices(
|
||||
"Server binary not found at expected path: {:?}",
|
||||
backend_path
|
||||
);
|
||||
return Err(ServerError::BinaryNotFound(format!(
|
||||
"Binary not found at {:?}",
|
||||
backend_path
|
||||
)));
|
||||
return Err(LlamacppError::new(
|
||||
ErrorCode::BinaryNotFound,
|
||||
"The llama.cpp server binary could not be found.".into(),
|
||||
Some(format!("Path: {}", backend_path)),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
// Configure the command to run the server with --list-devices
|
||||
@ -521,20 +629,21 @@ pub async fn get_devices(
|
||||
// Execute the command and wait for completion
|
||||
let output = timeout(Duration::from_secs(30), command.output())
|
||||
.await
|
||||
.map_err(|_| ServerError::LlamacppError("Timeout waiting for device list".to_string()))?
|
||||
.map_err(|_| {
|
||||
LlamacppError::new(
|
||||
ErrorCode::InternalError,
|
||||
"Timeout waiting for device list".into(),
|
||||
None,
|
||||
)
|
||||
})?
|
||||
.map_err(ServerError::Io)?;
|
||||
|
||||
// Check if command executed successfully
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
log::error!("llama-server --list-devices failed: {}", stderr);
|
||||
return Err(ServerError::LlamacppError(format!(
|
||||
"Command failed with exit code {:?}: {}",
|
||||
output.status.code(),
|
||||
stderr
|
||||
)));
|
||||
return Err(LlamacppError::from_stderr(&stderr).into());
|
||||
}
|
||||
|
||||
// Parse the output
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
log::info!("Device list output:\n{}", stdout);
|
||||
@ -572,9 +681,12 @@ fn parse_device_output(output: &str) -> ServerResult<Vec<DeviceInfo>> {
|
||||
if devices.is_empty() && found_devices_section {
|
||||
log::warn!("No devices found in output");
|
||||
} else if !found_devices_section {
|
||||
return Err(ServerError::ParseError(
|
||||
"Could not find 'Available devices:' section in output".to_string(),
|
||||
));
|
||||
return Err(LlamacppError::new(
|
||||
ErrorCode::DeviceListParseFailed,
|
||||
"Could not find 'Available devices:' section in the backend output.".into(),
|
||||
Some(output.to_string()),
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
Ok(devices)
|
||||
@ -684,16 +796,23 @@ fn parse_memory_value(mem_str: &str) -> ServerResult<i32> {
|
||||
// Handle formats like "8000 MiB" or "7721 MiB free"
|
||||
let parts: Vec<&str> = mem_str.split_whitespace().collect();
|
||||
if parts.is_empty() {
|
||||
return Err(ServerError::ParseError(format!(
|
||||
"Empty memory value: '{}'",
|
||||
mem_str
|
||||
)));
|
||||
return Err(LlamacppError::new(
|
||||
ErrorCode::DeviceListParseFailed,
|
||||
format!("empty memory value: {}", mem_str),
|
||||
None,
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
// Take the first part which should be the number
|
||||
let number_str = parts[0];
|
||||
number_str.parse::<i32>().map_err(|_| {
|
||||
ServerError::ParseError(format!("Could not parse memory value: '{}'", number_str))
|
||||
LlamacppError::new(
|
||||
ErrorCode::DeviceListParseFailed,
|
||||
format!("Could not parse memory value: '{}'", number_str),
|
||||
None,
|
||||
)
|
||||
.into()
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@ -7,7 +7,7 @@ import {
|
||||
DialogTitle,
|
||||
} from '@/components/ui/dialog'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { AlertTriangle } from 'lucide-react'
|
||||
import { AlertTriangle, ChevronDown, ChevronRight } from 'lucide-react'
|
||||
import { IconCopy, IconCopyCheck } from '@tabler/icons-react'
|
||||
import { useTranslation } from '@/i18n/react-i18next-compat'
|
||||
import { useModelLoad } from '@/hooks/useModelLoad'
|
||||
@ -18,11 +18,47 @@ export default function LoadModelErrorDialog() {
|
||||
const { t } = useTranslation()
|
||||
const { modelLoadError, setModelLoadError } = useModelLoad()
|
||||
const [isCopying, setIsCopying] = useState(false)
|
||||
const [isDetailExpanded, setIsDetailExpanded] = useState(true)
|
||||
|
||||
const getErrorDetail = (error: string | object | undefined) => {
|
||||
if (!error || typeof error !== 'object') return null
|
||||
if ('details' in error) {
|
||||
return (error as { details?: string }).details
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
const hasErrorDetail = (error: string | object | undefined) => {
|
||||
return Boolean(getErrorDetail(error))
|
||||
}
|
||||
|
||||
const formatErrorForCopy = (error: string | object | undefined) => {
|
||||
if (!error) return ''
|
||||
|
||||
if (typeof error === 'string') return error
|
||||
|
||||
if (typeof error === 'object' && 'code' in error && 'message' in error) {
|
||||
const errorObj = error as {
|
||||
code?: string
|
||||
message: string
|
||||
details?: string
|
||||
}
|
||||
let copyText = errorObj.code
|
||||
? `${errorObj.code}: ${errorObj.message}`
|
||||
: errorObj.message
|
||||
if (errorObj.details) {
|
||||
copyText += `\n\nDetails:\n${errorObj.details}`
|
||||
}
|
||||
return copyText
|
||||
}
|
||||
|
||||
return JSON.stringify(error)
|
||||
}
|
||||
|
||||
const handleCopy = async () => {
|
||||
setIsCopying(true)
|
||||
try {
|
||||
await navigator.clipboard.writeText(modelLoadError ?? '')
|
||||
await navigator.clipboard.writeText(formatErrorForCopy(modelLoadError))
|
||||
toast.success('Copy successful', {
|
||||
id: 'copy-model',
|
||||
description: 'Model load error information copied to clipboard',
|
||||
@ -58,17 +94,59 @@ export default function LoadModelErrorDialog() {
|
||||
</div>
|
||||
</DialogHeader>
|
||||
|
||||
<div className="bg-main-view-fg/8 p-2 border border-main-view-fg/5 rounded-lg">
|
||||
<p
|
||||
className="text-sm text-main-view-fg/70 leading-relaxed max-h-[200px] overflow-y-auto break-all"
|
||||
ref={(el) => {
|
||||
if (el) {
|
||||
el.scrollTop = el.scrollHeight
|
||||
}
|
||||
}}
|
||||
>
|
||||
{modelLoadError}
|
||||
</p>
|
||||
<div className="bg-main-view-fg/2 p-2 border border-main-view-fg/5 rounded-lg space-y-2">
|
||||
{typeof modelLoadError === 'object' &&
|
||||
modelLoadError &&
|
||||
'code' in modelLoadError &&
|
||||
'message' in modelLoadError ? (
|
||||
<div>
|
||||
{(modelLoadError as { code?: string }).code && (
|
||||
<div>
|
||||
<p className="text-sm text-main-view-fg/80 leading-relaxed break-all">
|
||||
{(modelLoadError as { code: string }).code}
|
||||
</p>
|
||||
</div>
|
||||
)}
|
||||
<div>
|
||||
<p className="text-sm text-main-view-fg/60 leading-relaxed break-all">
|
||||
{(modelLoadError as { message: string }).message}
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
<p className="text-sm text-main-view-fg/70 leading-relaxed break-all">
|
||||
{String(modelLoadError)}
|
||||
</p>
|
||||
)}
|
||||
|
||||
{hasErrorDetail(modelLoadError) && (
|
||||
<div>
|
||||
<button
|
||||
onClick={() => setIsDetailExpanded(!isDetailExpanded)}
|
||||
className="flex items-center gap-1 text-sm text-main-view-fg/60 hover:text-main-view-fg/80 transition-colors cursor-pointer"
|
||||
>
|
||||
{isDetailExpanded ? (
|
||||
<ChevronDown className="size-3" />
|
||||
) : (
|
||||
<ChevronRight className="size-3" />
|
||||
)}
|
||||
Details
|
||||
</button>
|
||||
|
||||
{isDetailExpanded && (
|
||||
<div
|
||||
className="mt-2 text-sm text-main-view-fg/70 leading-relaxed max-h-[150px] overflow-y-auto break-all bg-main-view-fg/10 p-2 rounded border border-main-view-fg/5"
|
||||
ref={(el) => {
|
||||
if (el) {
|
||||
el.scrollTop = el.scrollHeight
|
||||
}
|
||||
}}
|
||||
>
|
||||
{getErrorDetail(modelLoadError)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<DialogFooter className="flex flex-col gap-2 sm:flex-row sm:justify-right">
|
||||
|
||||
@ -5,12 +5,6 @@ import {
|
||||
DropdownMenuTrigger,
|
||||
} from '@/components/ui/dropdown-menu'
|
||||
|
||||
import {
|
||||
Tooltip,
|
||||
TooltipTrigger,
|
||||
TooltipContent,
|
||||
} from '@/components/ui/tooltip'
|
||||
import { IconStarFilled } from '@tabler/icons-react'
|
||||
import { cn } from '@/lib/utils'
|
||||
|
||||
// Dropdown component
|
||||
@ -24,7 +18,6 @@ type DropdownControlProps = {
|
||||
export function DropdownControl({
|
||||
value,
|
||||
options = [],
|
||||
recommended,
|
||||
onChange,
|
||||
}: DropdownControlProps) {
|
||||
const isSelected =
|
||||
@ -48,18 +41,6 @@ export function DropdownControl({
|
||||
)}
|
||||
>
|
||||
<span>{option.name}</span>
|
||||
{recommended === option.value && (
|
||||
<Tooltip>
|
||||
<TooltipTrigger asChild>
|
||||
<div className="cursor-pointer">
|
||||
<IconStarFilled className="text-accent" />
|
||||
</div>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent side="top" sideOffset={8} className="z-50">
|
||||
Recommended
|
||||
</TooltipContent>
|
||||
</Tooltip>
|
||||
)}
|
||||
</DropdownMenuItem>
|
||||
))}
|
||||
</DropdownMenuContent>
|
||||
|
||||
@ -428,11 +428,11 @@ export const useChat = () => {
|
||||
}
|
||||
} catch (error) {
|
||||
if (!abortController.signal.aborted) {
|
||||
const errorMessage =
|
||||
error && typeof error === 'object' && 'message' in error
|
||||
? error.message
|
||||
: error
|
||||
setModelLoadError(`${errorMessage}`)
|
||||
if (error && typeof error === 'object' && 'message' in error) {
|
||||
setModelLoadError(error as ErrorObject)
|
||||
} else {
|
||||
setModelLoadError(`${error}`)
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
updateLoadingModel(false)
|
||||
@ -453,6 +453,7 @@ export const useChat = () => {
|
||||
setPrompt,
|
||||
selectedModel,
|
||||
currentAssistant,
|
||||
experimentalFeatures,
|
||||
tools,
|
||||
updateLoadingModel,
|
||||
getDisabledToolsForThread,
|
||||
|
||||
@ -1,8 +1,8 @@
|
||||
import { create } from 'zustand'
|
||||
|
||||
type ModelLoadState = {
|
||||
modelLoadError?: string
|
||||
setModelLoadError: (error: string | undefined) => void
|
||||
modelLoadError?: string | ErrorObject
|
||||
setModelLoadError: (error: string | ErrorObject | undefined) => void
|
||||
}
|
||||
|
||||
export const useModelLoad = create<ModelLoadState>()((set) => ({
|
||||
|
||||
@ -211,7 +211,11 @@ function ProviderDetail() {
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error('Error starting model:', error)
|
||||
setModelLoadError(`${error.message}`)
|
||||
if (error && typeof error === 'object' && 'message' in error) {
|
||||
setModelLoadError(error)
|
||||
} else {
|
||||
setModelLoadError(`${error}`)
|
||||
}
|
||||
})
|
||||
.finally(() => {
|
||||
// Remove model from loading state
|
||||
@ -384,29 +388,43 @@ function ProviderDetail() {
|
||||
: false
|
||||
}
|
||||
description={
|
||||
<RenderMarkdown
|
||||
className="![>p]:text-main-view-fg/70 select-none"
|
||||
content={setting.description}
|
||||
components={{
|
||||
// Make links open in a new tab
|
||||
a: ({ ...props }) => {
|
||||
return (
|
||||
<a
|
||||
{...props}
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
className={cn(
|
||||
setting.key === 'api-key' &&
|
||||
'second-step-setup-remote-provider'
|
||||
)}
|
||||
/>
|
||||
)
|
||||
},
|
||||
p: ({ ...props }) => (
|
||||
<p {...props} className="!mb-0" />
|
||||
),
|
||||
}}
|
||||
/>
|
||||
<>
|
||||
<RenderMarkdown
|
||||
className="![>p]:text-main-view-fg/70 select-none"
|
||||
content={setting.description}
|
||||
components={{
|
||||
// Make links open in a new tab
|
||||
a: ({ ...props }) => {
|
||||
return (
|
||||
<a
|
||||
{...props}
|
||||
target="_blank"
|
||||
rel="noopener noreferrer"
|
||||
className={cn(
|
||||
setting.key === 'api-key' &&
|
||||
'second-step-setup-remote-provider'
|
||||
)}
|
||||
/>
|
||||
)
|
||||
},
|
||||
p: ({ ...props }) => (
|
||||
<p {...props} className="!mb-0" />
|
||||
),
|
||||
}}
|
||||
/>
|
||||
{setting.key === 'version_backend' &&
|
||||
setting.controller_props?.recommended && (
|
||||
<div className="mt-1 text-sm text-main-view-fg/60">
|
||||
<span className="font-medium">
|
||||
{setting.controller_props.recommended
|
||||
?.split('/')
|
||||
.pop() ||
|
||||
setting.controller_props.recommended}
|
||||
</span>
|
||||
<span> is the recommended backend.</span>
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
}
|
||||
actions={actionComponent}
|
||||
/>
|
||||
|
||||
6
web-app/src/types/app.d.ts
vendored
6
web-app/src/types/app.d.ts
vendored
@ -5,3 +5,9 @@ interface LogEntry {
|
||||
target: string
|
||||
message: string
|
||||
}
|
||||
|
||||
type ErrorObject = {
|
||||
code?: string
|
||||
message: string
|
||||
details?: string
|
||||
}
|
||||
|
||||
1
web-app/src/types/modelProviders.d.ts
vendored
1
web-app/src/types/modelProviders.d.ts
vendored
@ -7,6 +7,7 @@ type ControllerProps = {
|
||||
type?: string
|
||||
options?: Array<{ value: number | string; name: string }>
|
||||
input_actions?: string[]
|
||||
recommended?: string
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user