Merge branch 'dev' into blog/add-deepresearch-piece

This commit is contained in:
Daniel Ching 2025-08-08 09:16:20 +08:00 committed by GitHub
commit b29ad205df
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 338 additions and 133 deletions

View File

@ -1280,7 +1280,7 @@ export default class llamacpp_extension extends AIEngine {
return sInfo return sInfo
} catch (error) { } catch (error) {
logger.error('Error in load command:\n', error) logger.error('Error in load command:\n', error)
throw new Error(`Failed to load model:\n${error}`) throw error
} }
} }
@ -1291,7 +1291,6 @@ export default class llamacpp_extension extends AIEngine {
} }
const pid = sInfo.pid const pid = sInfo.pid
try { try {
// Pass the PID as the session_id // Pass the PID as the session_id
const result = await invoke<UnloadResult>('unload_llama_model', { const result = await invoke<UnloadResult>('unload_llama_model', {
pid: pid, pid: pid,
@ -1430,13 +1429,15 @@ export default class llamacpp_extension extends AIEngine {
} }
private async findSessionByModel(modelId: string): Promise<SessionInfo> { private async findSessionByModel(modelId: string): Promise<SessionInfo> {
try { try {
let sInfo = await invoke<SessionInfo>('find_session_by_model', {modelId}) let sInfo = await invoke<SessionInfo>('find_session_by_model', {
return sInfo modelId,
} catch (e) { })
logger.error(e) return sInfo
throw new Error(String(e)) } catch (e) {
} logger.error(e)
throw new Error(String(e))
}
} }
override async chat( override async chat(
@ -1507,13 +1508,13 @@ export default class llamacpp_extension extends AIEngine {
} }
override async getLoadedModels(): Promise<string[]> { override async getLoadedModels(): Promise<string[]> {
try { try {
let models: string[] = await invoke<string[]>('get_loaded_models') let models: string[] = await invoke<string[]>('get_loaded_models')
return models return models
} catch (e) { } catch (e) {
logger.error(e) logger.error(e)
throw new Error(e) throw new Error(e)
} }
} }
async getDevices(): Promise<DeviceList[]> { async getDevices(): Promise<DeviceList[]> {

View File

@ -19,19 +19,92 @@ use crate::core::state::AppState;
use crate::core::state::LLamaBackendSession; use crate::core::state::LLamaBackendSession;
type HmacSha256 = Hmac<Sha256>; type HmacSha256 = Hmac<Sha256>;
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
pub enum ErrorCode {
BinaryNotFound,
ModelFileNotFound,
LibraryPathInvalid,
// --- Model Loading Errors ---
ModelLoadFailed,
DraftModelLoadFailed,
MultimodalProjectorLoadFailed,
ModelArchNotSupported,
ModelLoadTimedOut,
LlamaCppProcessError,
// --- Memory Errors ---
OutOfMemory,
// --- Internal Application Errors ---
DeviceListParseFailed,
IoError,
InternalError,
}
#[derive(Debug, Clone, Serialize, thiserror::Error)]
#[error("LlamacppError {{ code: {code:?}, message: \"{message}\" }}")]
pub struct LlamacppError {
pub code: ErrorCode,
pub message: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub details: Option<String>,
}
impl LlamacppError {
pub fn new(code: ErrorCode, message: String, details: Option<String>) -> Self {
Self {
code,
message,
details,
}
}
/// Parses stderr from llama.cpp and creates a specific LlamacppError.
pub fn from_stderr(stderr: &str) -> Self {
let lower_stderr = stderr.to_lowercase();
// TODO: add others
let is_out_of_memory = lower_stderr.contains("out of memory")
|| lower_stderr.contains("insufficient memory")
|| lower_stderr.contains("erroroutofdevicememory") // vulkan specific
|| lower_stderr.contains("kiogpucommandbuffercallbackerroroutofmemory") // Metal-specific error code
|| lower_stderr.contains("cuda_error_out_of_memory"); // CUDA-specific
if is_out_of_memory {
return Self::new(
ErrorCode::OutOfMemory,
"Out of memory. The model requires more RAM or VRAM than available.".into(),
Some(stderr.into()),
);
}
if lower_stderr.contains("error loading model architecture") {
return Self::new(
ErrorCode::ModelArchNotSupported,
"The model's architecture is not supported by this version of the backend.".into(),
Some(stderr.into()),
);
}
Self::new(
ErrorCode::LlamaCppProcessError,
"The model process encountered an unexpected error.".into(),
Some(stderr.into()),
)
}
}
// Error type for server commands // Error type for server commands
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
pub enum ServerError { pub enum ServerError {
#[error("llamacpp error: {0}")] #[error(transparent)]
LlamacppError(String), Llamacpp(#[from] LlamacppError),
#[error("Failed to locate server binary: {0}")]
BinaryNotFound(String),
#[error("IO error: {0}")] #[error("IO error: {0}")]
Io(#[from] std::io::Error), Io(#[from] std::io::Error),
#[error("Jan API error: {0}")]
#[error("Tauri error: {0}")]
Tauri(#[from] tauri::Error), Tauri(#[from] tauri::Error),
#[error("Parse error: {0}")]
ParseError(String),
} }
// impl serialization for tauri // impl serialization for tauri
@ -40,7 +113,20 @@ impl serde::Serialize for ServerError {
where where
S: serde::Serializer, S: serde::Serializer,
{ {
serializer.serialize_str(self.to_string().as_ref()) let error_to_serialize: LlamacppError = match self {
ServerError::Llamacpp(err) => err.clone(),
ServerError::Io(e) => LlamacppError::new(
ErrorCode::IoError,
"An input/output error occurred.".into(),
Some(e.to_string()),
),
ServerError::Tauri(e) => LlamacppError::new(
ErrorCode::InternalError,
"An internal application error occurred.".into(),
Some(e.to_string()),
),
};
error_to_serialize.serialize(serializer)
} }
} }
@ -110,14 +196,17 @@ pub async fn load_llama_model(
let server_path_buf = PathBuf::from(backend_path); let server_path_buf = PathBuf::from(backend_path);
if !server_path_buf.exists() { if !server_path_buf.exists() {
let err_msg = format!("Binary not found at {:?}", backend_path);
log::error!( log::error!(
"Server binary not found at expected path: {:?}", "Server binary not found at expected path: {:?}",
backend_path backend_path
); );
return Err(ServerError::BinaryNotFound(format!( return Err(LlamacppError::new(
"Binary not found at {:?}", ErrorCode::BinaryNotFound,
backend_path "The llama.cpp server binary could not be found.".into(),
))); Some(err_msg),
)
.into());
} }
let port_str = args let port_str = args
@ -134,22 +223,35 @@ pub async fn load_llama_model(
} }
}; };
// FOR MODEL PATH; TODO: DO SIMILARLY FOR MMPROJ PATH // FOR MODEL PATH; TODO: DO SIMILARLY FOR MMPROJ PATH
let model_path_index = args let model_path_index = args.iter().position(|arg| arg == "-m").ok_or_else(|| {
.iter() LlamacppError::new(
.position(|arg| arg == "-m") ErrorCode::ModelLoadFailed,
.ok_or(ServerError::LlamacppError("Missing `-m` flag".into()))?; "Model path argument '-m' is missing.".into(),
None,
)
})?;
let model_path = args let model_path = args.get(model_path_index + 1).cloned().ok_or_else(|| {
.get(model_path_index + 1) LlamacppError::new(
.ok_or(ServerError::LlamacppError("Missing path after `-m`".into()))? ErrorCode::ModelLoadFailed,
.clone(); "Model path was not provided after '-m' flag.".into(),
None,
)
})?;
let model_path_pb = PathBuf::from(model_path); let model_path_pb = PathBuf::from(&model_path);
if !model_path_pb.exists() { if !model_path_pb.exists() {
return Err(ServerError::LlamacppError(format!( let err_msg = format!(
"Invalid or inaccessible model path: {}", "Invalid or inaccessible model path: {}",
model_path_pb.display().to_string(), model_path_pb.display()
))); );
log::error!("{}", &err_msg);
return Err(LlamacppError::new(
ErrorCode::ModelFileNotFound,
"The specified model file does not exist or is not accessible.".into(),
Some(err_msg),
)
.into());
} }
#[cfg(windows)] #[cfg(windows)]
{ {
@ -285,13 +387,13 @@ pub async fn load_llama_model(
|| line_lower.contains("starting the main loop") || line_lower.contains("starting the main loop")
|| line_lower.contains("server listening on") || line_lower.contains("server listening on")
{ {
log::info!("Server appears to be ready based on stderr: '{}'", line); log::info!("Model appears to be ready based on logs: '{}'", line);
let _ = ready_tx.send(true).await; let _ = ready_tx.send(true).await;
} }
} }
} }
Err(e) => { Err(e) => {
log::error!("Error reading stderr: {}", e); log::error!("Error reading logs: {}", e);
break; break;
} }
} }
@ -304,21 +406,21 @@ pub async fn load_llama_model(
if let Some(status) = child.try_wait()? { if let Some(status) = child.try_wait()? {
if !status.success() { if !status.success() {
let stderr_output = stderr_task.await.unwrap_or_default(); let stderr_output = stderr_task.await.unwrap_or_default();
log::error!("llama.cpp exited early with code {:?}", status); log::error!("llama.cpp failed early with code {:?}", status);
log::error!("--- stderr ---\n{}", stderr_output); log::error!("{}", stderr_output);
return Err(ServerError::LlamacppError(stderr_output.trim().to_string())); return Err(LlamacppError::from_stderr(&stderr_output).into());
} }
} }
// Wait for server to be ready or timeout // Wait for server to be ready or timeout
let timeout_duration = Duration::from_secs(300); // 5 minutes timeout let timeout_duration = Duration::from_secs(180); // 3 minutes timeout
let start_time = Instant::now(); let start_time = Instant::now();
log::info!("Waiting for server to be ready..."); log::info!("Waiting for model session to be ready...");
loop { loop {
tokio::select! { tokio::select! {
// Server is ready // Server is ready
Some(true) = ready_rx.recv() => { Some(true) = ready_rx.recv() => {
log::info!("Server is ready to accept requests!"); log::info!("Model is ready to accept requests!");
break; break;
} }
// Check for process exit more frequently // Check for process exit more frequently
@ -328,10 +430,10 @@ pub async fn load_llama_model(
let stderr_output = stderr_task.await.unwrap_or_default(); let stderr_output = stderr_task.await.unwrap_or_default();
if !status.success() { if !status.success() {
log::error!("llama.cpp exited with error code {:?}", status); log::error!("llama.cpp exited with error code {:?}", status);
return Err(ServerError::LlamacppError(format!("Process exited with code {:?}\n\nStderr:\n{}", status, stderr_output))); return Err(LlamacppError::from_stderr(&stderr_output).into());
} else { } else {
log::error!("llama.cpp exited successfully but without ready signal"); log::error!("llama.cpp exited successfully but without ready signal");
return Err(ServerError::LlamacppError(format!("Process exited unexpectedly\n\nStderr:\n{}", stderr_output))); return Err(LlamacppError::from_stderr(&stderr_output).into());
} }
} }
@ -340,7 +442,11 @@ pub async fn load_llama_model(
log::error!("Timeout waiting for server to be ready"); log::error!("Timeout waiting for server to be ready");
let _ = child.kill().await; let _ = child.kill().await;
let stderr_output = stderr_task.await.unwrap_or_default(); let stderr_output = stderr_task.await.unwrap_or_default();
return Err(ServerError::LlamacppError(format!("Server startup timeout\n\nStderr:\n{}", stderr_output))); return Err(LlamacppError::new(
ErrorCode::ModelLoadTimedOut,
"The model took too long to load and timed out.".into(),
Some(format!("Timeout: {}s\n\nStderr:\n{}", timeout_duration.as_secs(), stderr_output)),
).into());
} }
} }
} }
@ -463,10 +569,12 @@ pub async fn get_devices(
"Server binary not found at expected path: {:?}", "Server binary not found at expected path: {:?}",
backend_path backend_path
); );
return Err(ServerError::BinaryNotFound(format!( return Err(LlamacppError::new(
"Binary not found at {:?}", ErrorCode::BinaryNotFound,
backend_path "The llama.cpp server binary could not be found.".into(),
))); Some(format!("Path: {}", backend_path)),
)
.into());
} }
// Configure the command to run the server with --list-devices // Configure the command to run the server with --list-devices
@ -521,20 +629,21 @@ pub async fn get_devices(
// Execute the command and wait for completion // Execute the command and wait for completion
let output = timeout(Duration::from_secs(30), command.output()) let output = timeout(Duration::from_secs(30), command.output())
.await .await
.map_err(|_| ServerError::LlamacppError("Timeout waiting for device list".to_string()))? .map_err(|_| {
LlamacppError::new(
ErrorCode::InternalError,
"Timeout waiting for device list".into(),
None,
)
})?
.map_err(ServerError::Io)?; .map_err(ServerError::Io)?;
// Check if command executed successfully // Check if command executed successfully
if !output.status.success() { if !output.status.success() {
let stderr = String::from_utf8_lossy(&output.stderr); let stderr = String::from_utf8_lossy(&output.stderr);
log::error!("llama-server --list-devices failed: {}", stderr); log::error!("llama-server --list-devices failed: {}", stderr);
return Err(ServerError::LlamacppError(format!( return Err(LlamacppError::from_stderr(&stderr).into());
"Command failed with exit code {:?}: {}",
output.status.code(),
stderr
)));
} }
// Parse the output // Parse the output
let stdout = String::from_utf8_lossy(&output.stdout); let stdout = String::from_utf8_lossy(&output.stdout);
log::info!("Device list output:\n{}", stdout); log::info!("Device list output:\n{}", stdout);
@ -572,9 +681,12 @@ fn parse_device_output(output: &str) -> ServerResult<Vec<DeviceInfo>> {
if devices.is_empty() && found_devices_section { if devices.is_empty() && found_devices_section {
log::warn!("No devices found in output"); log::warn!("No devices found in output");
} else if !found_devices_section { } else if !found_devices_section {
return Err(ServerError::ParseError( return Err(LlamacppError::new(
"Could not find 'Available devices:' section in output".to_string(), ErrorCode::DeviceListParseFailed,
)); "Could not find 'Available devices:' section in the backend output.".into(),
Some(output.to_string()),
)
.into());
} }
Ok(devices) Ok(devices)
@ -684,16 +796,23 @@ fn parse_memory_value(mem_str: &str) -> ServerResult<i32> {
// Handle formats like "8000 MiB" or "7721 MiB free" // Handle formats like "8000 MiB" or "7721 MiB free"
let parts: Vec<&str> = mem_str.split_whitespace().collect(); let parts: Vec<&str> = mem_str.split_whitespace().collect();
if parts.is_empty() { if parts.is_empty() {
return Err(ServerError::ParseError(format!( return Err(LlamacppError::new(
"Empty memory value: '{}'", ErrorCode::DeviceListParseFailed,
mem_str format!("empty memory value: {}", mem_str),
))); None,
)
.into());
} }
// Take the first part which should be the number // Take the first part which should be the number
let number_str = parts[0]; let number_str = parts[0];
number_str.parse::<i32>().map_err(|_| { number_str.parse::<i32>().map_err(|_| {
ServerError::ParseError(format!("Could not parse memory value: '{}'", number_str)) LlamacppError::new(
ErrorCode::DeviceListParseFailed,
format!("Could not parse memory value: '{}'", number_str),
None,
)
.into()
}) })
} }

View File

@ -7,7 +7,7 @@ import {
DialogTitle, DialogTitle,
} from '@/components/ui/dialog' } from '@/components/ui/dialog'
import { Button } from '@/components/ui/button' import { Button } from '@/components/ui/button'
import { AlertTriangle } from 'lucide-react' import { AlertTriangle, ChevronDown, ChevronRight } from 'lucide-react'
import { IconCopy, IconCopyCheck } from '@tabler/icons-react' import { IconCopy, IconCopyCheck } from '@tabler/icons-react'
import { useTranslation } from '@/i18n/react-i18next-compat' import { useTranslation } from '@/i18n/react-i18next-compat'
import { useModelLoad } from '@/hooks/useModelLoad' import { useModelLoad } from '@/hooks/useModelLoad'
@ -18,11 +18,47 @@ export default function LoadModelErrorDialog() {
const { t } = useTranslation() const { t } = useTranslation()
const { modelLoadError, setModelLoadError } = useModelLoad() const { modelLoadError, setModelLoadError } = useModelLoad()
const [isCopying, setIsCopying] = useState(false) const [isCopying, setIsCopying] = useState(false)
const [isDetailExpanded, setIsDetailExpanded] = useState(true)
const getErrorDetail = (error: string | object | undefined) => {
if (!error || typeof error !== 'object') return null
if ('details' in error) {
return (error as { details?: string }).details
}
return null
}
const hasErrorDetail = (error: string | object | undefined) => {
return Boolean(getErrorDetail(error))
}
const formatErrorForCopy = (error: string | object | undefined) => {
if (!error) return ''
if (typeof error === 'string') return error
if (typeof error === 'object' && 'code' in error && 'message' in error) {
const errorObj = error as {
code?: string
message: string
details?: string
}
let copyText = errorObj.code
? `${errorObj.code}: ${errorObj.message}`
: errorObj.message
if (errorObj.details) {
copyText += `\n\nDetails:\n${errorObj.details}`
}
return copyText
}
return JSON.stringify(error)
}
const handleCopy = async () => { const handleCopy = async () => {
setIsCopying(true) setIsCopying(true)
try { try {
await navigator.clipboard.writeText(modelLoadError ?? '') await navigator.clipboard.writeText(formatErrorForCopy(modelLoadError))
toast.success('Copy successful', { toast.success('Copy successful', {
id: 'copy-model', id: 'copy-model',
description: 'Model load error information copied to clipboard', description: 'Model load error information copied to clipboard',
@ -58,17 +94,59 @@ export default function LoadModelErrorDialog() {
</div> </div>
</DialogHeader> </DialogHeader>
<div className="bg-main-view-fg/8 p-2 border border-main-view-fg/5 rounded-lg"> <div className="bg-main-view-fg/2 p-2 border border-main-view-fg/5 rounded-lg space-y-2">
<p {typeof modelLoadError === 'object' &&
className="text-sm text-main-view-fg/70 leading-relaxed max-h-[200px] overflow-y-auto break-all" modelLoadError &&
ref={(el) => { 'code' in modelLoadError &&
if (el) { 'message' in modelLoadError ? (
el.scrollTop = el.scrollHeight <div>
} {(modelLoadError as { code?: string }).code && (
}} <div>
> <p className="text-sm text-main-view-fg/80 leading-relaxed break-all">
{modelLoadError} {(modelLoadError as { code: string }).code}
</p> </p>
</div>
)}
<div>
<p className="text-sm text-main-view-fg/60 leading-relaxed break-all">
{(modelLoadError as { message: string }).message}
</p>
</div>
</div>
) : (
<p className="text-sm text-main-view-fg/70 leading-relaxed break-all">
{String(modelLoadError)}
</p>
)}
{hasErrorDetail(modelLoadError) && (
<div>
<button
onClick={() => setIsDetailExpanded(!isDetailExpanded)}
className="flex items-center gap-1 text-sm text-main-view-fg/60 hover:text-main-view-fg/80 transition-colors cursor-pointer"
>
{isDetailExpanded ? (
<ChevronDown className="size-3" />
) : (
<ChevronRight className="size-3" />
)}
Details
</button>
{isDetailExpanded && (
<div
className="mt-2 text-sm text-main-view-fg/70 leading-relaxed max-h-[150px] overflow-y-auto break-all bg-main-view-fg/10 p-2 rounded border border-main-view-fg/5"
ref={(el) => {
if (el) {
el.scrollTop = el.scrollHeight
}
}}
>
{getErrorDetail(modelLoadError)}
</div>
)}
</div>
)}
</div> </div>
<DialogFooter className="flex flex-col gap-2 sm:flex-row sm:justify-right"> <DialogFooter className="flex flex-col gap-2 sm:flex-row sm:justify-right">

View File

@ -5,12 +5,6 @@ import {
DropdownMenuTrigger, DropdownMenuTrigger,
} from '@/components/ui/dropdown-menu' } from '@/components/ui/dropdown-menu'
import {
Tooltip,
TooltipTrigger,
TooltipContent,
} from '@/components/ui/tooltip'
import { IconStarFilled } from '@tabler/icons-react'
import { cn } from '@/lib/utils' import { cn } from '@/lib/utils'
// Dropdown component // Dropdown component
@ -24,7 +18,6 @@ type DropdownControlProps = {
export function DropdownControl({ export function DropdownControl({
value, value,
options = [], options = [],
recommended,
onChange, onChange,
}: DropdownControlProps) { }: DropdownControlProps) {
const isSelected = const isSelected =
@ -48,18 +41,6 @@ export function DropdownControl({
)} )}
> >
<span>{option.name}</span> <span>{option.name}</span>
{recommended === option.value && (
<Tooltip>
<TooltipTrigger asChild>
<div className="cursor-pointer">
<IconStarFilled className="text-accent" />
</div>
</TooltipTrigger>
<TooltipContent side="top" sideOffset={8} className="z-50">
Recommended
</TooltipContent>
</Tooltip>
)}
</DropdownMenuItem> </DropdownMenuItem>
))} ))}
</DropdownMenuContent> </DropdownMenuContent>

View File

@ -428,11 +428,11 @@ export const useChat = () => {
} }
} catch (error) { } catch (error) {
if (!abortController.signal.aborted) { if (!abortController.signal.aborted) {
const errorMessage = if (error && typeof error === 'object' && 'message' in error) {
error && typeof error === 'object' && 'message' in error setModelLoadError(error as ErrorObject)
? error.message } else {
: error setModelLoadError(`${error}`)
setModelLoadError(`${errorMessage}`) }
} }
} finally { } finally {
updateLoadingModel(false) updateLoadingModel(false)
@ -453,6 +453,7 @@ export const useChat = () => {
setPrompt, setPrompt,
selectedModel, selectedModel,
currentAssistant, currentAssistant,
experimentalFeatures,
tools, tools,
updateLoadingModel, updateLoadingModel,
getDisabledToolsForThread, getDisabledToolsForThread,

View File

@ -1,8 +1,8 @@
import { create } from 'zustand' import { create } from 'zustand'
type ModelLoadState = { type ModelLoadState = {
modelLoadError?: string modelLoadError?: string | ErrorObject
setModelLoadError: (error: string | undefined) => void setModelLoadError: (error: string | ErrorObject | undefined) => void
} }
export const useModelLoad = create<ModelLoadState>()((set) => ({ export const useModelLoad = create<ModelLoadState>()((set) => ({

View File

@ -211,7 +211,11 @@ function ProviderDetail() {
}) })
.catch((error) => { .catch((error) => {
console.error('Error starting model:', error) console.error('Error starting model:', error)
setModelLoadError(`${error.message}`) if (error && typeof error === 'object' && 'message' in error) {
setModelLoadError(error)
} else {
setModelLoadError(`${error}`)
}
}) })
.finally(() => { .finally(() => {
// Remove model from loading state // Remove model from loading state
@ -384,29 +388,43 @@ function ProviderDetail() {
: false : false
} }
description={ description={
<RenderMarkdown <>
className="![>p]:text-main-view-fg/70 select-none" <RenderMarkdown
content={setting.description} className="![>p]:text-main-view-fg/70 select-none"
components={{ content={setting.description}
// Make links open in a new tab components={{
a: ({ ...props }) => { // Make links open in a new tab
return ( a: ({ ...props }) => {
<a return (
{...props} <a
target="_blank" {...props}
rel="noopener noreferrer" target="_blank"
className={cn( rel="noopener noreferrer"
setting.key === 'api-key' && className={cn(
'second-step-setup-remote-provider' setting.key === 'api-key' &&
)} 'second-step-setup-remote-provider'
/> )}
) />
}, )
p: ({ ...props }) => ( },
<p {...props} className="!mb-0" /> p: ({ ...props }) => (
), <p {...props} className="!mb-0" />
}} ),
/> }}
/>
{setting.key === 'version_backend' &&
setting.controller_props?.recommended && (
<div className="mt-1 text-sm text-main-view-fg/60">
<span className="font-medium">
{setting.controller_props.recommended
?.split('/')
.pop() ||
setting.controller_props.recommended}
</span>
<span> is the recommended backend.</span>
</div>
)}
</>
} }
actions={actionComponent} actions={actionComponent}
/> />

View File

@ -5,3 +5,9 @@ interface LogEntry {
target: string target: string
message: string message: string
} }
type ErrorObject = {
code?: string
message: string
details?: string
}

View File

@ -7,6 +7,7 @@ type ControllerProps = {
type?: string type?: string
options?: Array<{ value: number | string; name: string }> options?: Array<{ value: number | string; name: string }>
input_actions?: string[] input_actions?: string[]
recommended?: string
} }
/** /**