fix: HF token is not used while searching repositories (#6137)
* fix: HF token is not used while searching repositories * chore: whitelist jan model with tool use support by default * fix: tests * fix: duplicate model while searching * fix: deprecate addSource tests since the function was removed
This commit is contained in:
commit
7c25d1dbfd
@ -49,7 +49,6 @@ describe('useModelSources', () => {
|
||||
expect(result.current.error).toBe(null)
|
||||
expect(result.current.loading).toBe(false)
|
||||
expect(typeof result.current.fetchSources).toBe('function')
|
||||
expect(typeof result.current.addSource).toBe('function')
|
||||
})
|
||||
|
||||
describe('fetchSources', () => {
|
||||
@ -225,153 +224,6 @@ describe('useModelSources', () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe('addSource', () => {
|
||||
it('should add a new source to the store', () => {
|
||||
const { result } = renderHook(() => useModelSources())
|
||||
|
||||
const testModel: CatalogModel = {
|
||||
model_name: 'test-model',
|
||||
description: 'Test model description',
|
||||
developer: 'test-developer',
|
||||
downloads: 100,
|
||||
num_quants: 2,
|
||||
quants: [
|
||||
{
|
||||
model_id: 'test-model-q4',
|
||||
path: 'https://example.com/test-model-q4.gguf',
|
||||
file_size: '2.0 GB',
|
||||
},
|
||||
],
|
||||
created_at: '2023-01-01T00:00:00Z',
|
||||
}
|
||||
|
||||
act(() => {
|
||||
result.current.addSource(testModel)
|
||||
})
|
||||
|
||||
expect(result.current.sources).toHaveLength(1)
|
||||
expect(result.current.sources[0]).toEqual(testModel)
|
||||
})
|
||||
|
||||
it('should replace existing source with same model_name', () => {
|
||||
const { result } = renderHook(() => useModelSources())
|
||||
|
||||
const originalModel: CatalogModel = {
|
||||
model_name: 'duplicate-model',
|
||||
description: 'Original description',
|
||||
developer: 'original-developer',
|
||||
downloads: 50,
|
||||
num_quants: 1,
|
||||
quants: [],
|
||||
created_at: '2023-01-01T00:00:00Z',
|
||||
}
|
||||
|
||||
const updatedModel: CatalogModel = {
|
||||
model_name: 'duplicate-model',
|
||||
description: 'Updated description',
|
||||
developer: 'updated-developer',
|
||||
downloads: 150,
|
||||
num_quants: 2,
|
||||
quants: [
|
||||
{
|
||||
model_id: 'duplicate-model-q4',
|
||||
path: 'https://example.com/duplicate-model-q4.gguf',
|
||||
file_size: '3.0 GB',
|
||||
},
|
||||
],
|
||||
created_at: '2023-02-01T00:00:00Z',
|
||||
}
|
||||
|
||||
act(() => {
|
||||
result.current.addSource(originalModel)
|
||||
})
|
||||
|
||||
expect(result.current.sources).toHaveLength(1)
|
||||
|
||||
act(() => {
|
||||
result.current.addSource(updatedModel)
|
||||
})
|
||||
|
||||
expect(result.current.sources).toHaveLength(1)
|
||||
expect(result.current.sources[0]).toEqual(updatedModel)
|
||||
})
|
||||
|
||||
it('should handle multiple different sources', () => {
|
||||
const { result } = renderHook(() => useModelSources())
|
||||
|
||||
const model1: CatalogModel = {
|
||||
model_name: 'model-1',
|
||||
description: 'First model',
|
||||
developer: 'developer-1',
|
||||
downloads: 100,
|
||||
num_quants: 1,
|
||||
quants: [],
|
||||
created_at: '2023-01-01T00:00:00Z',
|
||||
}
|
||||
|
||||
const model2: CatalogModel = {
|
||||
model_name: 'model-2',
|
||||
description: 'Second model',
|
||||
developer: 'developer-2',
|
||||
downloads: 200,
|
||||
num_quants: 1,
|
||||
quants: [],
|
||||
created_at: '2023-01-02T00:00:00Z',
|
||||
}
|
||||
|
||||
act(() => {
|
||||
result.current.addSource(model1)
|
||||
})
|
||||
|
||||
act(() => {
|
||||
result.current.addSource(model2)
|
||||
})
|
||||
|
||||
expect(result.current.sources).toHaveLength(2)
|
||||
expect(result.current.sources).toContainEqual(model1)
|
||||
expect(result.current.sources).toContainEqual(model2)
|
||||
})
|
||||
|
||||
it('should handle CatalogModel with complete quants data', () => {
|
||||
const { result } = renderHook(() => useModelSources())
|
||||
|
||||
const modelWithQuants: CatalogModel = {
|
||||
model_name: 'model-with-quants',
|
||||
description: 'Model with quantizations',
|
||||
developer: 'quant-developer',
|
||||
downloads: 500,
|
||||
num_quants: 3,
|
||||
quants: [
|
||||
{
|
||||
model_id: 'model-q4_k_m',
|
||||
path: 'https://example.com/model-q4_k_m.gguf',
|
||||
file_size: '2.0 GB',
|
||||
},
|
||||
{
|
||||
model_id: 'model-q8_0',
|
||||
path: 'https://example.com/model-q8_0.gguf',
|
||||
file_size: '4.0 GB',
|
||||
},
|
||||
{
|
||||
model_id: 'model-f16',
|
||||
path: 'https://example.com/model-f16.gguf',
|
||||
file_size: '8.0 GB',
|
||||
},
|
||||
],
|
||||
created_at: '2023-01-01T00:00:00Z',
|
||||
readme: 'https://example.com/readme.md',
|
||||
}
|
||||
|
||||
act(() => {
|
||||
result.current.addSource(modelWithQuants)
|
||||
})
|
||||
|
||||
expect(result.current.sources).toHaveLength(1)
|
||||
expect(result.current.sources[0]).toEqual(modelWithQuants)
|
||||
expect(result.current.sources[0].quants).toHaveLength(3)
|
||||
})
|
||||
})
|
||||
|
||||
describe('state management', () => {
|
||||
it('should maintain state across multiple hook instances', () => {
|
||||
const { result: result1 } = renderHook(() => useModelSources())
|
||||
|
||||
@ -8,7 +8,6 @@ type ModelSourcesState = {
|
||||
sources: CatalogModel[]
|
||||
error: Error | null
|
||||
loading: boolean
|
||||
addSource: (source: CatalogModel) => void
|
||||
fetchSources: () => Promise<void>
|
||||
}
|
||||
|
||||
@ -18,15 +17,6 @@ export const useModelSources = create<ModelSourcesState>()(
|
||||
sources: [],
|
||||
error: null,
|
||||
loading: false,
|
||||
|
||||
addSource: (source: CatalogModel) => {
|
||||
set((state) => ({
|
||||
sources: [
|
||||
...state.sources.filter((e) => e.model_name !== source.model_name),
|
||||
source,
|
||||
],
|
||||
}))
|
||||
},
|
||||
fetchSources: async () => {
|
||||
set({ loading: true, error: null })
|
||||
try {
|
||||
|
||||
@ -39,6 +39,7 @@ import HeaderPage from '@/containers/HeaderPage'
|
||||
import { Loader } from 'lucide-react'
|
||||
import { useTranslation } from '@/i18n/react-i18next-compat'
|
||||
import Fuse from 'fuse.js'
|
||||
import { useGeneralSetting } from '@/hooks/useGeneralSetting'
|
||||
|
||||
type ModelProps = {
|
||||
model: CatalogModel
|
||||
@ -57,6 +58,7 @@ export const Route = createFileRoute(route.hub.index as any)({
|
||||
|
||||
function Hub() {
|
||||
const parentRef = useRef(null)
|
||||
const { huggingfaceToken } = useGeneralSetting()
|
||||
|
||||
const { t } = useTranslation()
|
||||
const sortOptions = [
|
||||
@ -71,7 +73,7 @@ function Hub() {
|
||||
}
|
||||
}, [])
|
||||
|
||||
const { sources, addSource, fetchSources, loading } = useModelSources()
|
||||
const { sources, fetchSources, loading } = useModelSources()
|
||||
|
||||
const [searchValue, setSearchValue] = useState('')
|
||||
const [sortSelected, setSortSelected] = useState('newest')
|
||||
@ -185,14 +187,16 @@ function Hub() {
|
||||
addModelSourceTimeoutRef.current = setTimeout(async () => {
|
||||
try {
|
||||
// Fetch HuggingFace repository information
|
||||
const repoInfo = await fetchHuggingFaceRepo(e.target.value)
|
||||
const repoInfo = await fetchHuggingFaceRepo(
|
||||
e.target.value,
|
||||
huggingfaceToken
|
||||
)
|
||||
if (repoInfo) {
|
||||
const catalogModel = convertHfRepoToCatalogModel(repoInfo)
|
||||
if (
|
||||
!sources.some((s) => s.model_name === catalogModel.model_name)
|
||||
) {
|
||||
setHuggingFaceRepo(catalogModel)
|
||||
addSource(catalogModel)
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
|
||||
@ -325,7 +325,10 @@ describe('models service', () => {
|
||||
|
||||
expect(result).toEqual(mockRepoData)
|
||||
expect(fetch).toHaveBeenCalledWith(
|
||||
'https://huggingface.co/api/models/microsoft/DialoGPT-medium?blobs=true'
|
||||
'https://huggingface.co/api/models/microsoft/DialoGPT-medium?blobs=true',
|
||||
{
|
||||
headers: {},
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
@ -341,19 +344,28 @@ describe('models service', () => {
|
||||
'https://huggingface.co/microsoft/DialoGPT-medium'
|
||||
)
|
||||
expect(fetch).toHaveBeenCalledWith(
|
||||
'https://huggingface.co/api/models/microsoft/DialoGPT-medium?blobs=true'
|
||||
'https://huggingface.co/api/models/microsoft/DialoGPT-medium?blobs=true',
|
||||
{
|
||||
headers: {},
|
||||
}
|
||||
)
|
||||
|
||||
// Test with domain prefix
|
||||
await fetchHuggingFaceRepo('huggingface.co/microsoft/DialoGPT-medium')
|
||||
expect(fetch).toHaveBeenCalledWith(
|
||||
'https://huggingface.co/api/models/microsoft/DialoGPT-medium?blobs=true'
|
||||
'https://huggingface.co/api/models/microsoft/DialoGPT-medium?blobs=true',
|
||||
{
|
||||
headers: {},
|
||||
}
|
||||
)
|
||||
|
||||
// Test with trailing slash
|
||||
await fetchHuggingFaceRepo('microsoft/DialoGPT-medium/')
|
||||
expect(fetch).toHaveBeenCalledWith(
|
||||
'https://huggingface.co/api/models/microsoft/DialoGPT-medium?blobs=true'
|
||||
'https://huggingface.co/api/models/microsoft/DialoGPT-medium?blobs=true',
|
||||
{
|
||||
headers: {},
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
@ -379,7 +391,10 @@ describe('models service', () => {
|
||||
|
||||
expect(result).toBeNull()
|
||||
expect(fetch).toHaveBeenCalledWith(
|
||||
'https://huggingface.co/api/models/nonexistent/model?blobs=true'
|
||||
'https://huggingface.co/api/models/nonexistent/model?blobs=true',
|
||||
{
|
||||
headers: {},
|
||||
}
|
||||
)
|
||||
})
|
||||
|
||||
|
||||
@ -99,7 +99,8 @@ export const fetchModelCatalog = async (): Promise<ModelCatalog> => {
|
||||
* @returns A promise that resolves to the repository information.
|
||||
*/
|
||||
export const fetchHuggingFaceRepo = async (
|
||||
repoId: string
|
||||
repoId: string,
|
||||
hfToken?: string
|
||||
): Promise<HuggingFaceRepo | null> => {
|
||||
try {
|
||||
// Clean the repo ID to handle various input formats
|
||||
@ -114,7 +115,14 @@ export const fetchHuggingFaceRepo = async (
|
||||
}
|
||||
|
||||
const response = await fetch(
|
||||
`https://huggingface.co/api/models/${cleanRepoId}?blobs=true`
|
||||
`https://huggingface.co/api/models/${cleanRepoId}?blobs=true`,
|
||||
{
|
||||
headers: hfToken
|
||||
? {
|
||||
Authorization: `Bearer ${hfToken}`,
|
||||
}
|
||||
: {},
|
||||
}
|
||||
)
|
||||
|
||||
if (!response.ok) {
|
||||
|
||||
@ -17,7 +17,7 @@ export enum ModelCapabilities {
|
||||
|
||||
// TODO: Remove this enum when we integrate llama.cpp extension
|
||||
export enum DefaultToolUseSupportedModels {
|
||||
JanNano = 'jan-nano',
|
||||
JanNano = 'jan-',
|
||||
Qwen3 = 'qwen3',
|
||||
Lucy = 'lucy',
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user