Merge pull request #3484 from janhq/fix/modal-import-gguf

fix: responsive modal import gguf
This commit is contained in:
Van Pham 2024-08-28 18:04:53 +07:00 committed by GitHub
commit 1c8e8ca3be
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 55 additions and 37 deletions

View File

@ -159,35 +159,40 @@ const SystemMonitor = () => {
{gpus.length > 0 && (
<div className="mb-4 border-b border-[hsla(var(--app-border))] pb-4 last:border-none">
{gpus.map((gpu, index) => (
<div key={index} className="mt-4 flex flex-col gap-x-2">
<div className="flex w-full items-start justify-between">
<span className="line-clamp-1 w-1/2 font-bold">
{gpu.name}
</span>
<div className="flex gap-x-2">
<div className="">
<span>
{gpu.memoryTotal - gpu.memoryFree}/
{gpu.memoryTotal}
</span>
<span> MB</span>
{gpus.map((gpu, index) => {
const gpuUtilization = Math.round(
(gpu.memoryFree / Math.max(gpu.memoryTotal, 1)) * 100
)
return (
<div key={index} className="mt-4 flex flex-col gap-x-2">
<div className="flex w-full items-start justify-between">
<span className="line-clamp-1 w-1/2 font-bold">
{gpu.name}
</span>
<div className="flex gap-x-2">
<div className="">
<span>
{gpu.memoryTotal - gpu.memoryFree}/
{gpu.memoryTotal}
</span>
<span> MB</span>
</div>
</div>
</div>
</div>
<div className="flex items-center gap-x-4">
<Progress
value={gpu.utilization}
className="w-full"
size="small"
/>
<span className="flex-shrink-0 ">
{gpu.utilization}%
</span>
<div className="flex items-center gap-x-4">
<Progress
value={gpuUtilization}
className="w-full"
size="small"
/>
<span className="flex-shrink-0 ">
{gpuUtilization}%
</span>
</div>
</div>
</div>
))}
)
})}
</div>
)}
</div>

View File

@ -29,7 +29,7 @@ const ModelDownloadList = () => {
return (
<div className="flex h-[500px] flex-1 flex-col">
<h1 className="mb-3 font-semibold">Available Versions</h1>
<ScrollArea className="w-full lg:flex-1">
<ScrollArea className="w-full lg:h-full lg:flex-1">
{ggufModels.map((model, index) => {
if (!model.downloadUrl) return null
return (

View File

@ -10,6 +10,8 @@ import { Badge, Button, Progress } from '@janhq/joi'
import { useAtomValue, useSetAtom } from 'jotai'
import { twMerge } from 'tailwind-merge'
import { MainViewState } from '@/constants/screens'
import { useCreateNewThread } from '@/hooks/useCreateNewThread'
@ -114,16 +116,24 @@ const ModelDownloadRow: React.FC<Props> = ({
}
return (
<div className="flex flex-col gap-4 space-x-1 rounded border border-[hsla(var(--app-border))] p-3 md:flex-row md:items-center md:justify-between lg:w-[550px]">
<div className="flex">
{quantization && (
<Badge variant="soft" className="mr-1">
{quantization}
</Badge>
)}
<h1 className="mr-5 line-clamp-1 font-medium text-[hsla(var(--text-secondary))]">
{fileName}
</h1>
<div className="flex flex-col gap-4 rounded border border-[hsla(var(--app-border))] p-3 md:flex-row md:items-center md:justify-between xl:w-full">
<div className="flex justify-between">
<div className="flex">
{quantization && (
<Badge variant="soft" className="mr-1">
{quantization}
</Badge>
)}
<h1
className={twMerge(
'mr-5 line-clamp-1 font-medium text-[hsla(var(--text-secondary))]',
quantization && 'max-w-[25ch]'
)}
title={fileName}
>
{fileName}
</h1>
</div>
<Badge theme="secondary" className="hidden md:flex">
{toGibibytes(fileSize)}
</Badge>

View File

@ -33,7 +33,7 @@ const ModelSegmentInfo = () => {
if (!importingHuggingFaceRepoData) return null
return (
<div className="flex w-full flex-col space-y-4">
<div className="flex w-full flex-col space-y-4 lg:w-1/3">
<HeaderInfo title={'Model ID'}>
<h1 className="font-medium text-zinc-500 dark:text-gray-300">
{modelName}

View File

@ -4,6 +4,7 @@ export const getLogoEngine = (engine: InferenceEngine) => {
switch (engine) {
case InferenceEngine.anthropic:
return 'images/ModelProvider/anthropic.svg'
case InferenceEngine.nitro_tensorrt_llm:
case InferenceEngine.nitro:
return 'images/ModelProvider/nitro.svg'
case InferenceEngine.cortex_llamacpp:
@ -43,6 +44,8 @@ export const getTitleByEngine = (engine: InferenceEngine) => {
switch (engine) {
case InferenceEngine.nitro:
return 'Llama.cpp (Nitro)'
case InferenceEngine.nitro_tensorrt_llm:
return 'TensorRT-LLM (Nitro)'
case InferenceEngine.cortex_llamacpp:
return 'Llama.cpp (Cortex)'
case InferenceEngine.cortex_onnx: