* feat: Add model compatibility check and memory estimation This commit introduces a new feature to check if a given model is supported based on available device memory. The change includes: - A new `estimateKVCache` method that calculates the required memory for the model's KV cache. It uses GGUF metadata such as `block_count`, `head_count`, `key_length`, and `value_length` to perform the calculation. - An `isModelSupported` method that combines the model file size and the estimated KV cache size to determine the total memory required. It then checks if any available device has sufficient free memory to load the model. - An updated error message for the `version_backend` check to be more user-friendly, suggesting a stable internet connection as a potential solution for backend setup failures. This functionality helps prevent the application from attempting to load models that would exceed the device's memory capacity, leading to more stable and predictable behavior. fixes: #5505 * Update extensions/llamacpp-extension/src/index.ts Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com> * Update extensions/llamacpp-extension/src/index.ts Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com> * Extend this to available system RAM if GGML device is not available * fix: Improve model metadata and memory checks This commit refactors the logic for checking if a model is supported by a system's available memory. **Key changes:** - **Remote model support**: The `read_gguf_metadata` function can now fetch metadata from a remote URL by reading the file in chunks. - **Improved KV cache size calculation**: The KV cache size is now estimated more accurately by using `attention.key_length` and `attention.value_length` from the GGUF metadata, with a fallback to `embedding_length`. - **Granular memory check statuses**: The `isModelSupported` function now returns a more specific status (`'RED'`, `'YELLOW'`, `'GREEN'`) to indicate whether the model weights or the KV cache are too large for the available memory. - **Consolidated logic**: The logic for checking local and remote models has been consolidated into a single `isModelSupported` function, improving code clarity and maintainability. These changes provide more robust and informative model compatibility checks, especially for models hosted on remote servers. * Update extensions/llamacpp-extension/src/index.ts Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com> * Make ctx_size optional and use sum free memory across ggml devices * feat: hub and dropdown model selection handle model compatibility * feat: update bage model info color * chore: enable detail page to get compatibility model * chore: update copy * chore: update shrink indicator UI --------- Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com> Co-authored-by: Faisal Amir <urmauur@gmail.com>
143 lines
4.0 KiB
TypeScript
143 lines
4.0 KiB
TypeScript
import { useCallback, useEffect, useState } from 'react'
|
|
import { cn } from '@/lib/utils'
|
|
import {
|
|
Tooltip,
|
|
TooltipContent,
|
|
TooltipProvider,
|
|
TooltipTrigger,
|
|
} from '@/components/ui/tooltip'
|
|
import { isModelSupported } from '@/services/models'
|
|
import { getJanDataFolderPath, joinPath } from '@janhq/core'
|
|
|
|
interface ModelSupportStatusProps {
|
|
modelId: string | undefined
|
|
provider: string | undefined
|
|
contextSize: number
|
|
className?: string
|
|
}
|
|
|
|
export const ModelSupportStatus = ({
|
|
modelId,
|
|
provider,
|
|
contextSize,
|
|
className,
|
|
}: ModelSupportStatusProps) => {
|
|
const [modelSupportStatus, setModelSupportStatus] = useState<
|
|
'RED' | 'YELLOW' | 'GREEN' | 'LOADING' | null
|
|
>(null)
|
|
|
|
// Helper function to check model support with proper path resolution
|
|
const checkModelSupportWithPath = useCallback(
|
|
async (
|
|
id: string,
|
|
ctxSize: number
|
|
): Promise<'RED' | 'YELLOW' | 'GREEN'> => {
|
|
try {
|
|
// Get Jan's data folder path and construct the full model file path
|
|
// Following the llamacpp extension structure: <Jan's data folder>/llamacpp/models/<modelId>/model.gguf
|
|
const janDataFolder = await getJanDataFolderPath()
|
|
const modelFilePath = await joinPath([
|
|
janDataFolder,
|
|
'llamacpp',
|
|
'models',
|
|
id,
|
|
'model.gguf',
|
|
])
|
|
|
|
return await isModelSupported(modelFilePath, ctxSize)
|
|
} catch (error) {
|
|
console.error(
|
|
'Error checking model support with constructed path:',
|
|
error
|
|
)
|
|
// If path construction or model support check fails, assume not supported
|
|
return 'RED'
|
|
}
|
|
},
|
|
[]
|
|
)
|
|
|
|
// Helper function to get icon color based on model support status
|
|
const getStatusColor = (): string => {
|
|
switch (modelSupportStatus) {
|
|
case 'GREEN':
|
|
return 'bg-green-500'
|
|
case 'YELLOW':
|
|
return 'bg-yellow-500'
|
|
case 'RED':
|
|
return 'bg-red-500'
|
|
case 'LOADING':
|
|
return 'bg-main-view-fg/50'
|
|
default:
|
|
return 'bg-main-view-fg/50'
|
|
}
|
|
}
|
|
|
|
// Helper function to get tooltip text based on model support status
|
|
const getStatusTooltip = (): string => {
|
|
switch (modelSupportStatus) {
|
|
case 'GREEN':
|
|
return `Works Well on your device (ctx: ${contextSize})`
|
|
case 'YELLOW':
|
|
return `Might work on your device (ctx: ${contextSize})`
|
|
case 'RED':
|
|
return `Doesn't work on your device (ctx: ${contextSize})`
|
|
case 'LOADING':
|
|
return 'Checking device compatibility...'
|
|
default:
|
|
return 'Unknown'
|
|
}
|
|
}
|
|
|
|
// Check model support when model changes
|
|
useEffect(() => {
|
|
const checkModelSupport = async () => {
|
|
if (modelId && provider === 'llamacpp') {
|
|
// Set loading state immediately
|
|
setModelSupportStatus('LOADING')
|
|
try {
|
|
const supportStatus = await checkModelSupportWithPath(
|
|
modelId,
|
|
contextSize
|
|
)
|
|
setModelSupportStatus(supportStatus)
|
|
} catch (error) {
|
|
console.error('Error checking model support:', error)
|
|
setModelSupportStatus('RED')
|
|
}
|
|
} else {
|
|
// Only show status for llamacpp models since isModelSupported is specific to llamacpp
|
|
setModelSupportStatus(null)
|
|
}
|
|
}
|
|
|
|
checkModelSupport()
|
|
}, [modelId, provider, contextSize, checkModelSupportWithPath])
|
|
|
|
// Don't render anything if no status or not llamacpp
|
|
if (!modelSupportStatus || provider !== 'llamacpp') {
|
|
return null
|
|
}
|
|
|
|
return (
|
|
<TooltipProvider>
|
|
<Tooltip>
|
|
<TooltipTrigger asChild>
|
|
<div
|
|
className={cn(
|
|
'size-2 flex items-center justify-center rounded-full',
|
|
modelSupportStatus === 'LOADING'
|
|
? 'size-2.5 border border-main-view-fg/50 border-t-transparent animate-spin'
|
|
: getStatusColor(),
|
|
className
|
|
)}
|
|
/>
|
|
</TooltipTrigger>
|
|
<TooltipContent>
|
|
<p>{getStatusTooltip()}</p>
|
|
</TooltipContent>
|
|
</Tooltip>
|
|
</TooltipProvider>
|
|
)
|
|
}
|