Chore: Change CommandR to unavailable (#2722)

* fix: move to comming soon

* fix: Q4 for consistancy

* version pump extension

* pump version model

* fix: highlight unsupported tag

---------

Co-authored-by: Louis <louis@jan.ai>
This commit is contained in:
Hoang Ha 2024-04-15 12:57:52 +07:00 committed by GitHub
parent aff6a7d11a
commit b908ae2933
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 42 additions and 35 deletions

View File

@ -1,7 +1,7 @@
{ {
"name": "@janhq/inference-nitro-extension", "name": "@janhq/inference-nitro-extension",
"productName": "Nitro Inference Engine", "productName": "Nitro Inference Engine",
"version": "1.0.0", "version": "1.0.1",
"description": "This extension embeds Nitro, a lightweight (3mb) inference engine written in C++. See https://nitro.jan.ai.\nAdditional dependencies could be installed to run without Cuda Toolkit installation.", "description": "This extension embeds Nitro, a lightweight (3mb) inference engine written in C++. See https://nitro.jan.ai.\nAdditional dependencies could be installed to run without Cuda Toolkit installation.",
"main": "dist/index.js", "main": "dist/index.js",
"node": "dist/node/index.cjs.js", "node": "dist/node/index.cjs.js",

View File

@ -8,7 +8,7 @@
"id": "command-r-34b", "id": "command-r-34b",
"object": "model", "object": "model",
"name": "Command-R v01 34B Q4", "name": "Command-R v01 34B Q4",
"version": "1.0", "version": "1.1",
"description": "C4AI Command-R developed by CohereAI is optimized for a variety of use cases including reasoning, summarization, and question answering.", "description": "C4AI Command-R developed by CohereAI is optimized for a variety of use cases including reasoning, summarization, and question answering.",
"format": "gguf", "format": "gguf",
"settings": { "settings": {
@ -27,7 +27,7 @@
}, },
"metadata": { "metadata": {
"author": "CohereAI", "author": "CohereAI",
"tags": ["34B", "Finetuned"], "tags": ["34B", "Finetuned", "Coming Soon", "Unavailable"],
"size": 21500000000 "size": 21500000000
}, },
"engine": "nitro" "engine": "nitro"

View File

@ -1,20 +1,20 @@
{ {
"sources": [ "sources": [
{ {
"filename": "wizardcoder-python-13b-v1.0.Q5_K_M.gguf", "filename": "wizardcoder-python-13b-v1.0.Q4_K_M.gguf",
"url": "https://huggingface.co/TheBloke/WizardCoder-Python-13B-V1.0-GGUF/resolve/main/wizardcoder-python-13b-v1.0.Q5_K_M.gguf" "url": "https://huggingface.co/TheBloke/WizardCoder-Python-13B-V1.0-GGUF/resolve/main/wizardcoder-python-13b-v1.0.Q4_K_M.gguf"
} }
], ],
"id": "wizardcoder-13b", "id": "wizardcoder-13b",
"object": "model", "object": "model",
"name": "Wizard Coder Python 13B Q5", "name": "Wizard Coder Python 13B Q4",
"version": "1.0", "version": "1.1",
"description": "WizardCoder 13B is a Python coding model. This model demonstrate high proficiency in specific domains like coding and mathematics.", "description": "WizardCoder 13B is a Python coding model. This model demonstrate high proficiency in specific domains like coding and mathematics.",
"format": "gguf", "format": "gguf",
"settings": { "settings": {
"ctx_len": 4096, "ctx_len": 4096,
"prompt_template": "### Instruction:\n{prompt}\n### Response:", "prompt_template": "### Instruction:\n{prompt}\n### Response:",
"llama_model_path": "wizardcoder-python-13b-v1.0.Q5_K_M.gguf" "llama_model_path": "wizardcoder-python-13b-v1.0.Q4_K_M.gguf"
}, },
"parameters": { "parameters": {
"temperature": 0.7, "temperature": 0.7,

View File

@ -276,7 +276,7 @@ const DropdownListSidebar = ({
{toGibibytes(x.metadata.size)} {toGibibytes(x.metadata.size)}
</span> </span>
{x.metadata.size && ( {x.metadata.size && (
<ModelLabel size={x.metadata.size} /> <ModelLabel metadata={x.metadata} />
)} )}
</div> </div>
</div> </div>

View File

@ -16,6 +16,8 @@ import CommandSearch from '@/containers/Layout/TopBar/CommandSearch'
import { showLeftSideBarAtom } from '@/containers/Providers/KeyListener' import { showLeftSideBarAtom } from '@/containers/Providers/KeyListener'
import { toaster } from '@/containers/Toast'
import { MainViewState } from '@/constants/screens' import { MainViewState } from '@/constants/screens'
import { useClickOutside } from '@/hooks/useClickOutside' import { useClickOutside } from '@/hooks/useClickOutside'
@ -61,7 +63,11 @@ const TopBar = () => {
const onCreateConversationClick = async () => { const onCreateConversationClick = async () => {
if (assistants.length === 0) { if (assistants.length === 0) {
alert('No assistant available') toaster({
title: 'No assistant available.',
description: `Could not create a new thread. Please add an assistant.`,
type: 'error',
})
} else { } else {
requestCreateNewThread(assistants[0]) requestCreateNewThread(assistants[0])
} }

View File

@ -1,5 +1,7 @@
import React from 'react' import React from 'react'
import { ModelMetadata } from '@janhq/core'
import { Badge } from '@janhq/uikit'
import { useAtomValue } from 'jotai' import { useAtomValue } from 'jotai'
import { useActiveModel } from '@/hooks/useActiveModel' import { useActiveModel } from '@/hooks/useActiveModel'
@ -19,10 +21,17 @@ import {
} from '@/helpers/atoms/SystemBar.atom' } from '@/helpers/atoms/SystemBar.atom'
type Props = { type Props = {
size: number metadata: ModelMetadata
}
const UnsupportedModel = () => {
return (
<Badge className="space-x-1 rounded-md" themes="warning">
<span>Coming Soon</span>
</Badge>
)
} }
const ModelLabel: React.FC<Props> = ({ size }) => { const ModelLabel: React.FC<Props> = ({ metadata }) => {
const { activeModel } = useActiveModel() const { activeModel } = useActiveModel()
const totalRam = useAtomValue(totalRamAtom) const totalRam = useAtomValue(totalRamAtom)
const usedRam = useAtomValue(usedRamAtom) const usedRam = useAtomValue(usedRamAtom)
@ -52,7 +61,11 @@ const ModelLabel: React.FC<Props> = ({ size }) => {
return null return null
} }
return getLabel(size) return metadata.tags.includes('Coming Soon') ? (
<UnsupportedModel />
) : (
getLabel(metadata.size ?? 0)
)
} }
export default React.memo(ModelLabel) export default React.memo(ModelLabel)

View File

@ -19,6 +19,10 @@ import { twMerge } from 'tailwind-merge'
import ModalCancelDownload from '@/containers/ModalCancelDownload' import ModalCancelDownload from '@/containers/ModalCancelDownload'
import ModelLabel from '@/containers/ModelLabel'
import { toaster } from '@/containers/Toast'
import { MainViewState } from '@/constants/screens' import { MainViewState } from '@/constants/screens'
import { useCreateNewThread } from '@/hooks/useCreateNewThread' import { useCreateNewThread } from '@/hooks/useCreateNewThread'
@ -47,22 +51,6 @@ type Props = {
open: string open: string
} }
const getLabel = (size: number, ram: number, unit: string = 'RAM') => {
if (size * 1.25 >= ram) {
return (
<Badge className="rounded-md" themes="danger">
Not enough {unit}
</Badge>
)
} else {
return (
<Badge className="rounded-md" themes="success">
Recommended
</Badge>
)
}
}
const ExploreModelItemHeader: React.FC<Props> = ({ model, onClick, open }) => { const ExploreModelItemHeader: React.FC<Props> = ({ model, onClick, open }) => {
const { downloadModel } = useDownloadModel() const { downloadModel } = useDownloadModel()
const downloadingModels = useAtomValue(getDownloadingModelAtom) const downloadingModels = useAtomValue(getDownloadingModelAtom)
@ -105,7 +93,11 @@ const ExploreModelItemHeader: React.FC<Props> = ({ model, onClick, open }) => {
const onUseModelClick = useCallback(async () => { const onUseModelClick = useCallback(async () => {
if (assistants.length === 0) { if (assistants.length === 0) {
alert('No assistant available') toaster({
title: 'No assistant available.',
description: `Could not use Model ${model.name} as no assistant is available.`,
type: 'error',
})
return return
} }
await requestCreateNewThread(assistants[0], model) await requestCreateNewThread(assistants[0], model)
@ -163,11 +155,7 @@ const ExploreModelItemHeader: React.FC<Props> = ({ model, onClick, open }) => {
<span className="mr-4 font-semibold text-muted-foreground"> <span className="mr-4 font-semibold text-muted-foreground">
{toGibibytes(model.metadata.size)} {toGibibytes(model.metadata.size)}
</span> </span>
{getLabel( {<ModelLabel metadata={model.metadata} />}
model.metadata.size,
ram,
settings?.run_mode === 'gpu' ? 'VRAM' : 'RAM'
)}
{downloadButton} {downloadButton}
<ChevronDownIcon <ChevronDownIcon