Merge remote-tracking branch 'origin/dev' into mobile/init-mobile-app

This commit is contained in:
Vanalite 2025-09-17 11:22:57 +07:00
commit adfcb35ca6
320 changed files with 84 additions and 62967 deletions

View File

@ -1,198 +0,0 @@
/**
* Web Assistant Extension
* Implements assistant management using IndexedDB
*/
import { Assistant, AssistantExtension } from '@janhq/core'
import { getSharedDB } from '../shared/db'
export default class AssistantExtensionWeb extends AssistantExtension {
private db: IDBDatabase | null = null
private defaultAssistant: Assistant = {
avatar: '👋',
thread_location: undefined,
id: 'jan',
object: 'assistant',
created_at: Date.now() / 1000,
name: 'Jan',
description:
'Jan is a helpful desktop assistant that can reason through complex tasks and use tools to complete them on the user\'s behalf.',
model: '*',
instructions:
'You are a helpful AI assistant. Your primary goal is to assist users with their questions and tasks to the best of your abilities.\n\n' +
'When responding:\n' +
'- Answer directly from your knowledge when you can\n' +
'- Be concise, clear, and helpful\n' +
'- Admit when you\'re unsure rather than making things up\n\n' +
'If tools are available to you:\n' +
'- Only use tools when they add real value to your response\n' +
'- Use tools when the user explicitly asks (e.g., "search for...", "calculate...", "run this code")\n' +
'- Use tools for information you don\'t know or that needs verification\n' +
'- Never use tools just because they\'re available\n\n' +
'When using tools:\n' +
'- Use one tool at a time and wait for results\n' +
'- Use actual values as arguments, not variable names\n' +
'- Learn from each result before deciding next steps\n' +
'- Avoid repeating the same tool call with identical parameters\n\n' +
'Remember: Most questions can be answered without tools. Think first whether you need them.\n\n' +
'Current date: {{current_date}}',
tools: [
{
type: 'retrieval',
enabled: false,
useTimeWeightedRetriever: false,
settings: {
top_k: 2,
chunk_size: 1024,
chunk_overlap: 64,
retrieval_template: `Use the following pieces of context to answer the question at the end.
{context}
Question: {question}
Helpful Answer:`,
},
},
],
file_ids: [],
metadata: undefined,
}
async onLoad() {
console.log('Loading Web Assistant Extension')
this.db = await getSharedDB()
// Create default assistant if none exist
const assistants = await this.getAssistants()
if (assistants.length === 0) {
await this.createAssistant(this.defaultAssistant)
}
}
onUnload() {
// Don't close shared DB, other extensions might be using it
this.db = null
}
private ensureDB(): void {
if (!this.db) {
throw new Error('Database not initialized. Call onLoad() first.')
}
}
async getAssistants(): Promise<Assistant[]> {
this.ensureDB()
return new Promise((resolve, reject) => {
const transaction = this.db!.transaction(['assistants'], 'readonly')
const store = transaction.objectStore('assistants')
const request = store.getAll()
request.onsuccess = () => {
resolve(request.result || [])
}
request.onerror = () => {
reject(request.error)
}
})
}
async createAssistant(assistant: Assistant): Promise<void> {
this.ensureDB()
return new Promise((resolve, reject) => {
const transaction = this.db!.transaction(['assistants'], 'readwrite')
const store = transaction.objectStore('assistants')
const assistantToStore = {
...assistant,
created_at: assistant.created_at || Date.now() / 1000,
}
const request = store.add(assistantToStore)
request.onsuccess = () => {
console.log('Assistant created:', assistant.id)
resolve()
}
request.onerror = () => {
console.error('Failed to create assistant:', request.error)
reject(request.error)
}
})
}
async updateAssistant(id: string, assistant: Partial<Assistant>): Promise<void> {
this.ensureDB()
return new Promise((resolve, reject) => {
const transaction = this.db!.transaction(['assistants'], 'readwrite')
const store = transaction.objectStore('assistants')
// First get the existing assistant
const getRequest = store.get(id)
getRequest.onsuccess = () => {
const existingAssistant = getRequest.result
if (!existingAssistant) {
reject(new Error(`Assistant with id ${id} not found`))
return
}
const updatedAssistant = {
...existingAssistant,
...assistant,
id, // Ensure ID doesn't change
}
const putRequest = store.put(updatedAssistant)
putRequest.onsuccess = () => resolve()
putRequest.onerror = () => reject(putRequest.error)
}
getRequest.onerror = () => {
reject(getRequest.error)
}
})
}
async deleteAssistant(assistant: Assistant): Promise<void> {
this.ensureDB()
return new Promise((resolve, reject) => {
const transaction = this.db!.transaction(['assistants'], 'readwrite')
const store = transaction.objectStore('assistants')
const request = store.delete(assistant.id)
request.onsuccess = () => {
console.log('Assistant deleted:', assistant.id)
resolve()
}
request.onerror = () => {
console.error('Failed to delete assistant:', request.error)
reject(request.error)
}
})
}
async getAssistant(id: string): Promise<Assistant | null> {
this.ensureDB()
return new Promise((resolve, reject) => {
const transaction = this.db!.transaction(['assistants'], 'readonly')
const store = transaction.objectStore('assistants')
const request = store.get(id)
request.onsuccess = () => {
resolve(request.result || null)
}
request.onerror = () => {
reject(request.error)
}
})
}
}

View File

@ -5,7 +5,6 @@
import type { WebExtensionRegistry } from './types' import type { WebExtensionRegistry } from './types'
export { default as AssistantExtensionWeb } from './assistant-web'
export { default as ConversationalExtensionWeb } from './conversational-web' export { default as ConversationalExtensionWeb } from './conversational-web'
export { default as JanProviderWeb } from './jan-provider-web' export { default as JanProviderWeb } from './jan-provider-web'
export { default as MCPExtensionWeb } from './mcp-web' export { default as MCPExtensionWeb } from './mcp-web'
@ -16,7 +15,6 @@ export type {
WebExtensionModule, WebExtensionModule,
WebExtensionName, WebExtensionName,
WebExtensionLoader, WebExtensionLoader,
AssistantWebModule,
ConversationalWebModule, ConversationalWebModule,
JanProviderWebModule, JanProviderWebModule,
MCPWebModule MCPWebModule
@ -24,7 +22,6 @@ export type {
// Extension registry for dynamic loading // Extension registry for dynamic loading
export const WEB_EXTENSIONS: WebExtensionRegistry = { export const WEB_EXTENSIONS: WebExtensionRegistry = {
'assistant-web': () => import('./assistant-web'),
'conversational-web': () => import('./conversational-web'), 'conversational-web': () => import('./conversational-web'),
'jan-provider-web': () => import('./jan-provider-web'), 'jan-provider-web': () => import('./jan-provider-web'),
'mcp-web': () => import('./mcp-web'), 'mcp-web': () => import('./mcp-web'),

View File

@ -2,14 +2,10 @@
* Web Extension Types * Web Extension Types
*/ */
import type { AssistantExtension, ConversationalExtension, BaseExtension, AIEngine, MCPExtension } from '@janhq/core' import type { ConversationalExtension, BaseExtension, AIEngine, MCPExtension } from '@janhq/core'
type ExtensionConstructorParams = ConstructorParameters<typeof BaseExtension> type ExtensionConstructorParams = ConstructorParameters<typeof BaseExtension>
export interface AssistantWebModule {
default: new (...args: ExtensionConstructorParams) => AssistantExtension
}
export interface ConversationalWebModule { export interface ConversationalWebModule {
default: new (...args: ExtensionConstructorParams) => ConversationalExtension default: new (...args: ExtensionConstructorParams) => ConversationalExtension
} }
@ -22,10 +18,9 @@ export interface MCPWebModule {
default: new (...args: ExtensionConstructorParams) => MCPExtension default: new (...args: ExtensionConstructorParams) => MCPExtension
} }
export type WebExtensionModule = AssistantWebModule | ConversationalWebModule | JanProviderWebModule | MCPWebModule export type WebExtensionModule = ConversationalWebModule | JanProviderWebModule | MCPWebModule
export interface WebExtensionRegistry { export interface WebExtensionRegistry {
'assistant-web': () => Promise<AssistantWebModule>
'conversational-web': () => Promise<ConversationalWebModule> 'conversational-web': () => Promise<ConversationalWebModule>
'jan-provider-web': () => Promise<JanProviderWebModule> 'jan-provider-web': () => Promise<JanProviderWebModule>
'mcp-web': () => Promise<MCPWebModule> 'mcp-web': () => Promise<MCPWebModule>

View File

@ -8,6 +8,12 @@ pub const MCP_BACKOFF_MULTIPLIER: f64 = 2.0; // Double the delay each time
pub const DEFAULT_MCP_CONFIG: &str = r#"{ pub const DEFAULT_MCP_CONFIG: &str = r#"{
"mcpServers": { "mcpServers": {
"exa": {
"command": "npx",
"args": ["-y", "exa-mcp-server"],
"env": { "EXA_API_KEY": "YOUR_EXA_API_KEY_HERE" },
"active": false
},
"browsermcp": { "browsermcp": {
"command": "npx", "command": "npx",
"args": ["@browsermcp/mcp"], "args": ["@browsermcp/mcp"],

View File

@ -28,7 +28,7 @@ const DropdownAssistant = () => {
) )
const selectedAssistant = const selectedAssistant =
assistants.find((a) => a.id === currentAssistant.id) || assistants[0] assistants.find((a) => a.id === currentAssistant?.id) || assistants[0]
return ( return (
<> <>

View File

@ -46,7 +46,7 @@ const mainMenus = [
title: 'common:assistants', title: 'common:assistants',
icon: IconClipboardSmileFilled, icon: IconClipboardSmileFilled,
route: route.assistant, route: route.assistant,
isEnabled: true, isEnabled: PlatformFeatures[PlatformFeature.ASSISTANTS],
}, },
{ {
title: 'common:hub', title: 'common:hub',

View File

@ -50,7 +50,7 @@ export const useAppState = create<AppState>()((set) => ({
const currentAssistant = useAssistant.getState().currentAssistant const currentAssistant = useAssistant.getState().currentAssistant
const selectedAssistant = const selectedAssistant =
assistants.find((a) => a.id === currentAssistant.id) || assistants[0] assistants.find((a) => a.id === currentAssistant?.id) || assistants[0]
set(() => ({ set(() => ({
streamingContent: content streamingContent: content

View File

@ -2,10 +2,12 @@ import { getServiceHub } from '@/hooks/useServiceHub'
import { Assistant as CoreAssistant } from '@janhq/core' import { Assistant as CoreAssistant } from '@janhq/core'
import { create } from 'zustand' import { create } from 'zustand'
import { localStorageKey } from '@/constants/localStorage' import { localStorageKey } from '@/constants/localStorage'
import { PlatformFeatures } from '@/lib/platform/const'
import { PlatformFeature } from '@/lib/platform/types'
interface AssistantState { interface AssistantState {
assistants: Assistant[] assistants: Assistant[]
currentAssistant: Assistant currentAssistant: Assistant | null
addAssistant: (assistant: Assistant) => void addAssistant: (assistant: Assistant) => void
updateAssistant: (assistant: Assistant) => void updateAssistant: (assistant: Assistant) => void
deleteAssistant: (id: string) => void deleteAssistant: (id: string) => void
@ -46,12 +48,29 @@ export const defaultAssistant: Assistant = {
'You are a helpful AI assistant. Your primary goal is to assist users with their questions and tasks to the best of your abilities.\n\nWhen responding:\n- Answer directly from your knowledge when you can\n- Be concise, clear, and helpful\n- Admit when youre unsure rather than making things up\n\nIf tools are available to you:\n- Only use tools when they add real value to your response\n- Use tools when the user explicitly asks (e.g., "search for...", "calculate...", "run this code")\n- Use tools for information you dont know or that needs verification\n- Never use tools just because theyre available\n\nWhen using tools:\n- Use one tool at a time and wait for results\n- Use actual values as arguments, not variable names\n- Learn from each result before deciding next steps\n- Avoid repeating the same tool call with identical parameters\n\nRemember: Most questions can be answered without tools. Think first whether you need them.\n\nCurrent date: {{current_date}}', 'You are a helpful AI assistant. Your primary goal is to assist users with their questions and tasks to the best of your abilities.\n\nWhen responding:\n- Answer directly from your knowledge when you can\n- Be concise, clear, and helpful\n- Admit when youre unsure rather than making things up\n\nIf tools are available to you:\n- Only use tools when they add real value to your response\n- Use tools when the user explicitly asks (e.g., "search for...", "calculate...", "run this code")\n- Use tools for information you dont know or that needs verification\n- Never use tools just because theyre available\n\nWhen using tools:\n- Use one tool at a time and wait for results\n- Use actual values as arguments, not variable names\n- Learn from each result before deciding next steps\n- Avoid repeating the same tool call with identical parameters\n\nRemember: Most questions can be answered without tools. Think first whether you need them.\n\nCurrent date: {{current_date}}',
} }
export const useAssistant = create<AssistantState>()((set, get) => ({ // Platform-aware initial state
const getInitialAssistantState = () => {
if (PlatformFeatures[PlatformFeature.ASSISTANTS]) {
return {
assistants: [defaultAssistant], assistants: [defaultAssistant],
currentAssistant: defaultAssistant, currentAssistant: defaultAssistant,
}
} else {
return {
assistants: [],
currentAssistant: null,
}
}
}
export const useAssistant = create<AssistantState>((set, get) => ({
...getInitialAssistantState(),
addAssistant: (assistant) => { addAssistant: (assistant) => {
set({ assistants: [...get().assistants, assistant] }) set({ assistants: [...get().assistants, assistant] })
getServiceHub().assistants().createAssistant(assistant as unknown as CoreAssistant).catch((error) => { getServiceHub()
.assistants()
.createAssistant(assistant as unknown as CoreAssistant)
.catch((error) => {
console.error('Failed to create assistant:', error) console.error('Failed to create assistant:', error)
}) })
}, },
@ -63,25 +82,31 @@ export const useAssistant = create<AssistantState>()((set, get) => ({
), ),
// Update currentAssistant if it's the same assistant being updated // Update currentAssistant if it's the same assistant being updated
currentAssistant: currentAssistant:
state.currentAssistant.id === assistant.id state.currentAssistant?.id === assistant.id
? assistant ? assistant
: state.currentAssistant, : state.currentAssistant,
}) })
// Create assistant already cover update logic // Create assistant already cover update logic
getServiceHub().assistants().createAssistant(assistant as unknown as CoreAssistant).catch((error) => { getServiceHub()
.assistants()
.createAssistant(assistant as unknown as CoreAssistant)
.catch((error) => {
console.error('Failed to update assistant:', error) console.error('Failed to update assistant:', error)
}) })
}, },
deleteAssistant: (id) => { deleteAssistant: (id) => {
const state = get() const state = get()
getServiceHub().assistants().deleteAssistant( getServiceHub()
.assistants()
.deleteAssistant(
state.assistants.find((e) => e.id === id) as unknown as CoreAssistant state.assistants.find((e) => e.id === id) as unknown as CoreAssistant
).catch((error) => { )
.catch((error) => {
console.error('Failed to delete assistant:', error) console.error('Failed to delete assistant:', error)
}) })
// Check if we're deleting the current assistant // Check if we're deleting the current assistant
const wasCurrentAssistant = state.currentAssistant.id === id const wasCurrentAssistant = state.currentAssistant?.id === id
set({ assistants: state.assistants.filter((a) => a.id !== id) }) set({ assistants: state.assistants.filter((a) => a.id !== id) })

View File

@ -73,7 +73,7 @@ export const useChat = () => {
}, [provider, selectedProvider]) }, [provider, selectedProvider])
const selectedAssistant = const selectedAssistant =
assistants.find((a) => a.id === currentAssistant.id) || assistants[0] assistants.find((a) => a.id === currentAssistant?.id) || assistants[0]
const getCurrentThread = useCallback(async () => { const getCurrentThread = useCallback(async () => {
let currentThread = retrieveThread() let currentThread = retrieveThread()
@ -237,7 +237,7 @@ export const useChat = () => {
const builder = new CompletionMessagesBuilder( const builder = new CompletionMessagesBuilder(
messages, messages,
renderInstructions(currentAssistant?.instructions) currentAssistant ? renderInstructions(currentAssistant.instructions) : undefined
) )
if (troubleshooting) builder.addUserMessage(message, attachments) if (troubleshooting) builder.addUserMessage(message, attachments)
@ -284,10 +284,10 @@ export const useChat = () => {
builder.getMessages(), builder.getMessages(),
abortController, abortController,
availableTools, availableTools,
currentAssistant.parameters?.stream === false ? false : true, currentAssistant?.parameters?.stream === false ? false : true,
{ {
...modelSettings, ...modelSettings,
...currentAssistant.parameters, ...(currentAssistant?.parameters || {}),
} as unknown as Record<string, object> } as unknown as Record<string, object>
) )

View File

@ -29,7 +29,7 @@ export const useMessages = create<MessageState>()((set, get) => ({
const currentAssistant = useAssistant.getState().currentAssistant const currentAssistant = useAssistant.getState().currentAssistant
const selectedAssistant = const selectedAssistant =
assistants.find((a) => a.id === currentAssistant.id) || assistants[0] assistants.find((a) => a.id === currentAssistant?.id) || assistants[0]
const newMessage = { const newMessage = {
...message, ...message,

View File

@ -49,4 +49,7 @@ export const PlatformFeatures: Record<PlatformFeature, boolean> = {
// Extensions settings page - disabled for web // Extensions settings page - disabled for web
[PlatformFeature.EXTENSIONS_SETTINGS]: isPlatformTauri(), [PlatformFeature.EXTENSIONS_SETTINGS]: isPlatformTauri(),
// Assistant functionality - disabled for web
[PlatformFeature.ASSISTANTS]: isPlatformTauri(),
} }

View File

@ -51,4 +51,7 @@ export enum PlatformFeature {
// Extensions settings page management // Extensions settings page management
EXTENSIONS_SETTINGS = 'extensionsSettings', EXTENSIONS_SETTINGS = 'extensionsSettings',
// Assistant functionality (creation, editing, management)
ASSISTANTS = 'assistants',
} }

View File

@ -10,6 +10,8 @@ import AddEditAssistant from '@/containers/dialogs/AddEditAssistant'
import { DeleteAssistantDialog } from '@/containers/dialogs' import { DeleteAssistantDialog } from '@/containers/dialogs'
import { AvatarEmoji } from '@/containers/AvatarEmoji' import { AvatarEmoji } from '@/containers/AvatarEmoji'
import { useTranslation } from '@/i18n/react-i18next-compat' import { useTranslation } from '@/i18n/react-i18next-compat'
import { PlatformGuard } from '@/lib/platform/PlatformGuard'
import { PlatformFeature } from '@/lib/platform/types'
// eslint-disable-next-line @typescript-eslint/no-explicit-any // eslint-disable-next-line @typescript-eslint/no-explicit-any
export const Route = createFileRoute(route.assistant as any)({ export const Route = createFileRoute(route.assistant as any)({
@ -17,6 +19,14 @@ export const Route = createFileRoute(route.assistant as any)({
}) })
function Assistant() { function Assistant() {
return (
<PlatformGuard feature={PlatformFeature.ASSISTANTS}>
<AssistantContent />
</PlatformGuard>
)
}
function AssistantContent() {
const { t } = useTranslation() const { t } = useTranslation()
const { assistants, addAssistant, updateAssistant, deleteAssistant } = const { assistants, addAssistant, updateAssistant, deleteAssistant } =
useAssistant() useAssistant()

View File

@ -20,6 +20,8 @@ import DropdownAssistant from '@/containers/DropdownAssistant'
import { useEffect } from 'react' import { useEffect } from 'react'
import { useThreads } from '@/hooks/useThreads' import { useThreads } from '@/hooks/useThreads'
import { useMobileScreen } from '@/hooks/useMediaQuery' import { useMobileScreen } from '@/hooks/useMediaQuery'
import { PlatformFeatures } from '@/lib/platform/const'
import { PlatformFeature } from '@/lib/platform/types'
export const Route = createFileRoute(route.home as any)({ export const Route = createFileRoute(route.home as any)({
component: Index, component: Index,
@ -57,7 +59,7 @@ function Index() {
return ( return (
<div className="flex h-full flex-col justify-center py-5"> <div className="flex h-full flex-col justify-center py-5">
<HeaderPage> <HeaderPage>
<DropdownAssistant /> {PlatformFeatures[PlatformFeature.ASSISTANTS] && <DropdownAssistant />}
</HeaderPage> </HeaderPage>
<div className={cn( <div className={cn(
"h-full overflow-y-auto flex flex-col gap-2 justify-center px-3 sm:px-4 md:px-8 py-4 md:py-0", "h-full overflow-y-auto flex flex-col gap-2 justify-center px-3 sm:px-4 md:px-8 py-4 md:py-0",

View File

@ -24,6 +24,8 @@ import { useTranslation } from '@/i18n/react-i18next-compat'
import { useChat } from '@/hooks/useChat' import { useChat } from '@/hooks/useChat'
import { useSmallScreen, useMobileScreen } from '@/hooks/useMediaQuery' import { useSmallScreen, useMobileScreen } from '@/hooks/useMediaQuery'
import { useTools } from '@/hooks/useTools' import { useTools } from '@/hooks/useTools'
import { PlatformFeatures } from '@/lib/platform/const'
import { PlatformFeature } from '@/lib/platform/types'
// as route.threadsDetail // as route.threadsDetail
export const Route = createFileRoute('/threads/$threadId')({ export const Route = createFileRoute('/threads/$threadId')({
@ -301,10 +303,10 @@ function ThreadDetail() {
<div className="flex flex-col h-full"> <div className="flex flex-col h-full">
<HeaderPage> <HeaderPage>
<div className="flex items-center justify-between w-full pr-2"> <div className="flex items-center justify-between w-full pr-2">
<DropdownAssistant /> {PlatformFeatures[PlatformFeature.ASSISTANTS] && <DropdownAssistant />}
</div> </div>
</HeaderPage> </HeaderPage>
<div className="flex flex-col h-[calc(100%-40px)] "> <div className="flex flex-col h-[calc(100%-40px)]">
<div <div
ref={scrollContainerRef} ref={scrollContainerRef}
onScroll={handleScroll} onScroll={handleScroll}

View File

@ -23,6 +23,7 @@ vi.mock('@/lib/platform/const', () => ({
mcpAutoApproveTools: false, mcpAutoApproveTools: false,
mcpServersSettings: true, mcpServersSettings: true,
extensionsSettings: true, extensionsSettings: true,
assistants: true,
} }
})) }))

21
website/.gitignore vendored
View File

@ -1,21 +0,0 @@
# build output
dist/
# generated types
.astro/
# dependencies
node_modules/
# logs
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
# environment variables
.env
.env.production
# macOS-specific files
.DS_Store

View File

@ -1,183 +0,0 @@
# API Specification Synchronization
This document explains how the Jan Server API specification is kept in sync with the documentation.
## Overview
The Jan documentation automatically synchronizes with the Jan Server API specification to ensure the API reference is always up to date. This is managed through GitHub Actions workflows that can be triggered in multiple ways.
## Synchronization Methods
### 1. Automatic Daily Sync
- **Schedule**: Runs daily at 2 AM UTC
- **Branch**: `dev`
- **Behavior**: Fetches the latest spec and commits changes if any
- **Workflow**: `.github/workflows/update-cloud-api-spec.yml`
### 2. Manual Trigger via GitHub UI
Navigate to Actions → "Update Cloud API Spec" → Run workflow
Options:
- **Commit changes**: Whether to commit changes directly (default: true)
- **Custom spec URL**: Override the default API spec URL
- **Create PR**: Create a pull request instead of direct commit (default: false)
### 3. Webhook Trigger (For Jan Server Team)
Send a repository dispatch event to trigger an update:
```bash
curl -X POST \
-H "Accept: application/vnd.github.v3+json" \
-H "Authorization: token YOUR_GITHUB_TOKEN" \
https://api.github.com/repos/janhq/jan/dispatches \
-d '{
"event_type": "update-api-spec",
"client_payload": {
"spec_url": "https://api.jan.ai/api/swagger/doc.json"
}
}'
```
### 4. Local Development
For local development, the spec is updated conditionally:
```bash
# Force update the cloud spec
bun run generate:cloud-spec-force
# Normal update (checks if update is needed)
bun run generate:cloud-spec
# Update both local and cloud specs
bun run generate:specs
```
## Configuration
### Environment Variables
The following environment variables can be configured in GitHub Secrets:
| Variable | Description | Default |
|----------|-------------|---------|
| `JAN_SERVER_SPEC_URL` | URL to fetch the OpenAPI spec | `https://api.jan.ai/api/swagger/doc.json` |
| `JAN_SERVER_PROD_URL` | Production API base URL | `https://api.jan.ai/v1` |
| `JAN_SERVER_STAGING_URL` | Staging API base URL | `https://staging-api.jan.ai/v1` |
### Build Behavior
| Context | Behavior |
|---------|----------|
| Pull Request | Uses existing spec (no update) |
| Push to dev | Uses existing spec (no update) |
| Scheduled run | Updates spec and commits changes |
| Manual trigger | Updates based on input options |
| Webhook | Updates and creates PR |
| Local dev | Updates if spec is >24hrs old or missing |
## Workflow Integration
### For Jan Server Team
When deploying a new API version:
1. **Option A: Automatic PR**
- Deploy your API changes
- Trigger the webhook (see above)
- Review and merge the created PR
2. **Option B: Manual Update**
- Go to [Actions](https://github.com/janhq/jan/actions/workflows/update-cloud-api-spec.yml)
- Click "Run workflow"
- Select options:
- Set "Create PR" to `true` for review
- Or leave as `false` for direct commit
3. **Option C: Wait for Daily Sync**
- Changes will be picked up automatically at 2 AM UTC
### For Documentation Team
The API spec updates are handled automatically. However, you can:
1. **Force an update**: Run the "Update Cloud API Spec" workflow manually
2. **Test locally**: Use `bun run generate:cloud-spec-force`
3. **Review changes**: Check PRs labeled with `api` and `automated`
## Fallback Mechanism
If the Jan Server API is unavailable:
1. The workflow will use the last known good spec
2. Local builds will fall back to the local OpenAPI spec
3. The build will continue without failing
## Monitoring
### Check Update Status
1. Go to [Actions](https://github.com/janhq/jan/actions/workflows/update-cloud-api-spec.yml)
2. Check the latest run status
3. Review the workflow summary for details
### Notifications
To add Slack/Discord notifications:
1. Add webhook URL to GitHub Secrets
2. Uncomment notification section in workflow
3. Configure message format as needed
## Troubleshooting
### Spec Update Fails
1. Check if the API endpoint is accessible
2. Verify the spec URL is correct
3. Check GitHub Actions logs for errors
4. Ensure proper permissions for the workflow
### Changes Not Appearing
1. Verify the workflow completed successfully
2. Check if changes were committed to the correct branch
3. Ensure the build is using the updated spec
4. Clear CDN cache if using Cloudflare
### Manual Recovery
If automated updates fail:
```bash
# Clone the repository
git clone https://github.com/janhq/jan.git
cd jan/website
# Install dependencies
bun install
# Force update the spec
FORCE_UPDATE=true bun run generate:cloud-spec
# Commit and push
git add public/openapi/cloud-openapi.json
git commit -m "chore: manual update of API spec"
git push
```
## Best Practices
1. **Version Control**: Always review significant API changes before merging
2. **Testing**: Test the updated spec locally before deploying
3. **Communication**: Notify the docs team of breaking API changes
4. **Monitoring**: Set up alerts for failed spec updates
5. **Documentation**: Update this guide when changing the sync process
## Support
For issues or questions:
- Open an issue in the [Jan repository](https://github.com/janhq/jan/issues)
- Contact the documentation team on Discord
- Check the [workflow runs](https://github.com/janhq/jan/actions) for debugging

View File

@ -1,48 +0,0 @@
# Jan's Website
This website is [built with Starlight](https://starlight.astro.build)
Starlight looks for `.md` or `.mdx` files in the `src/content/docs/` directory. Each file is exposed
as a route based on its file name.
Images can be added to `src/assets/` and embedded in Markdown with a relative link.
Static assets, like favicons, can be placed in the `public/` directory.
If you want to add new pages, these can go in the `src/pages/` directory. Because of the topics plugin
we are using ([starlight sidebar topics](https://starlight-sidebar-topics.netlify.app/docs/guides/excluded-pages/))
you will need to exclude them from the sidebar by adding them to the exclude list in `astro.config.mjs`, e.g., `exclude: ['/example'],`.
## 🧞 Commands
All commands are run from the root of the project, from a terminal:
| Command | Action |
| :------------------------ | :----------------------------------------------- |
| `bun install` | Installs dependencies |
| `bun dev` | Starts local dev server at `localhost:4321` |
| `bun build` | Build your production site to `./dist/` |
| `bun preview` | Preview your build locally, before deploying |
| `bun astro ...` | Run CLI commands like `astro add`, `astro check` |
| `bun astro -- --help` | Get help using the Astro CLI |
## 📖 API Reference Commands
The website includes interactive API documentation. These commands help manage the OpenAPI specifications:
| Command | Action |
| :------------------------------- | :-------------------------------------------------------- |
| `bun run api:dev` | Start dev server with API reference at `/api` |
| `bun run api:local` | Start dev server with local API docs at `/api-reference/local` |
| `bun run api:cloud` | Start dev server with cloud API docs at `/api-reference/cloud` |
| `bun run generate:local-spec` | Generate/fix the local OpenAPI specification |
| `bun run generate:cloud-spec` | Generate the cloud OpenAPI specification from Jan Server |
| `bun run generate:cloud-spec-force` | Force update cloud spec (ignores cache/conditions) |
**API Reference Pages:**
- `/api` - Landing page with Local and Server API options
- `/api-reference/local` - Local API (llama.cpp) documentation
- `/api-reference/cloud` - Jan Server API (vLLM) documentation
The cloud specification is automatically synced via GitHub Actions on a daily schedule and can be manually triggered by the Jan Server team.

View File

@ -1,306 +0,0 @@
// @ts-check
import { defineConfig } from 'astro/config'
import starlight from '@astrojs/starlight'
import starlightThemeRapide from 'starlight-theme-rapide'
import starlightSidebarTopics from 'starlight-sidebar-topics'
import starlightUtils from '@lorenzo_lewis/starlight-utils'
import react from '@astrojs/react'
import mermaid from 'astro-mermaid'
import { fileURLToPath } from 'url'
import path, { dirname } from 'path'
const __filename = fileURLToPath(import.meta.url)
const __dirname = dirname(__filename)
// https://astro.build/config
export default defineConfig({
// Deploy to the new v2 subdomain
site: 'https://docs.jan.ai',
integrations: [
react(),
mermaid({
theme: 'default',
autoTheme: true,
}),
starlight({
title: '👋 Jan',
favicon: 'favicon.ico',
customCss: ['./src/styles/global.css'],
head: [
{
tag: 'script',
attrs: { src: '/scripts/inject-navigation.js', defer: true },
},
{
tag: 'link',
attrs: { rel: 'stylesheet', href: '/styles/navigation.css' },
},
],
plugins: [
starlightThemeRapide(),
starlightSidebarTopics(
[
{
label: 'Jan',
link: '/',
icon: 'rocket',
items: [{ label: 'Ecosystem', slug: 'index' }],
},
{
label: 'Jan Desktop',
link: '/jan/quickstart',
icon: 'rocket',
items: [
{
label: '🚀 QUICK START',
items: [
{ label: 'Getting Started', slug: 'jan/quickstart' },
{
label: 'Install Jan',
collapsed: false,
autogenerate: { directory: 'jan/installation' },
},
{ label: 'AI Assistants', slug: 'jan/assistants' },
],
},
{
label: '🤖 MODELS',
items: [
{ label: 'Overview', slug: 'jan/manage-models' },
{
label: 'Jan Models',
collapsed: false,
items: [
{
label: 'Jan v1',
slug: 'jan/jan-models/jan-v1',
},
{
label: 'Research Models',
collapsed: true,
items: [
{
label: 'Jan Nano 32k',
slug: 'jan/jan-models/jan-nano-32',
},
{
label: 'Jan Nano 128k',
slug: 'jan/jan-models/jan-nano-128',
},
{
label: 'Lucy',
slug: 'jan/jan-models/lucy',
},
],
},
],
},
{
label: 'Cloud Providers',
collapsed: true,
items: [
{ label: 'OpenAI', slug: 'jan/remote-models/openai' },
{
label: 'Anthropic',
slug: 'jan/remote-models/anthropic',
},
{ label: 'Gemini', slug: 'jan/remote-models/google' },
{ label: 'Groq', slug: 'jan/remote-models/groq' },
{
label: 'Mistral',
slug: 'jan/remote-models/mistralai',
},
{ label: 'Cohere', slug: 'jan/remote-models/cohere' },
{
label: 'OpenRouter',
slug: 'jan/remote-models/openrouter',
},
{
label: 'HuggingFace 🤗',
slug: 'jan/remote-models/huggingface',
},
],
},
{
label: 'Custom Providers',
slug: 'jan/custom-provider',
},
{
label: 'Multi-Modal Models',
slug: 'jan/multi-modal',
},
],
},
{
label: '🔧 TOOLS & INTEGRATIONS',
items: [
{ label: 'What is MCP?', slug: 'jan/mcp' },
{
label: 'Examples & Tutorials',
collapsed: true,
items: [
{
label: 'Web & Search',
collapsed: true,
items: [
{
label: 'Browser Control',
slug: 'jan/mcp-examples/browser/browserbase',
},
{
label: 'Serper Search',
slug: 'jan/mcp-examples/search/serper',
},
{
label: 'Exa Search',
slug: 'jan/mcp-examples/search/exa',
},
],
},
{
label: 'Data & Analysis',
collapsed: true,
items: [
{
label: 'Jupyter Notebooks',
slug: 'jan/mcp-examples/data-analysis/jupyter',
},
{
label: 'Code Sandbox (E2B)',
slug: 'jan/mcp-examples/data-analysis/e2b',
},
{
label: 'Deep Financial Research',
slug: 'jan/mcp-examples/deepresearch/octagon',
},
],
},
{
label: 'Productivity',
collapsed: true,
items: [
{
label: 'Linear',
slug: 'jan/mcp-examples/productivity/linear',
},
{
label: 'Todoist',
slug: 'jan/mcp-examples/productivity/todoist',
},
],
},
{
label: 'Creative',
collapsed: true,
items: [
{
label: 'Design with Canva',
slug: 'jan/mcp-examples/design/canva',
},
],
},
],
},
],
},
{
label: '⚙️ DEVELOPER',
items: [
{
label: 'Local API Server',
collapsed: true,
items: [
{ label: 'Overview', slug: 'local-server' },
{
label: 'API Configuration',
slug: 'local-server/api-server',
},
{
label: 'Engine Settings',
slug: 'local-server/llama-cpp',
},
{
label: 'Server Settings',
slug: 'local-server/settings',
},
{
label: 'Integrations',
collapsed: true,
autogenerate: {
directory: 'local-server/integrations',
},
},
],
},
{
label: 'Technical Details',
collapsed: true,
items: [
{
label: 'Model Parameters',
slug: 'jan/explanation/model-parameters',
},
],
},
],
},
{
label: '📚 REFERENCE',
items: [
{ label: 'Settings', slug: 'jan/settings' },
{ label: 'Data Folder', slug: 'jan/data-folder' },
{ label: 'Troubleshooting', slug: 'jan/troubleshooting' },
{ label: 'Privacy Policy', slug: 'jan/privacy' },
],
},
],
},
{
label: 'Browser Extension',
link: '/browser/',
badge: { text: 'Alpha', variant: 'tip' },
icon: 'puzzle',
items: [{ label: 'Overview', slug: 'browser' }],
},
{
label: 'Jan Mobile',
link: '/mobile/',
badge: { text: 'Soon', variant: 'caution' },
icon: 'phone',
items: [{ label: 'Overview', slug: 'mobile' }],
},
{
label: 'Jan Server',
link: '/server/',
badge: { text: 'Soon', variant: 'caution' },
icon: 'forward-slash',
items: [{ label: 'Overview', slug: 'server' }],
},
],
{
exclude: ['/api-reference', '/api-reference/**/*'],
}
),
],
social: [
{
icon: 'github',
label: 'GitHub',
href: 'https://github.com/menloresearch/jan',
},
{
icon: 'x.com',
label: 'X',
href: 'https://twitter.com/jandotai',
},
{
icon: 'discord',
label: 'Discord',
href: 'https://discord.com/invite/FTk2MvZwJH',
},
],
}),
],
})

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@ -1,36 +0,0 @@
{
"name": "website",
"type": "module",
"version": "0.0.1",
"scripts": {
"dev": "astro dev",
"start": "astro dev",
"build": "astro build",
"preview": "astro preview",
"astro": "astro",
"prebuild": "bun scripts/fix-local-spec-complete.js && bun scripts/conditional-cloud-spec.js",
"generate:local-spec": "bun scripts/fix-local-spec-complete.js",
"generate:cloud-spec": "bun scripts/generate-cloud-spec.js",
"generate:cloud-spec-force": "FORCE_UPDATE=true bun scripts/generate-cloud-spec.js",
"api:dev": "astro dev --open /api",
"api:local": "astro dev --open /api-reference/local",
"api:cloud": "astro dev --open /api-reference/cloud"
},
"dependencies": {
"@astrojs/react": "^4.3.0",
"@astrojs/starlight": "^0.35.1",
"@lorenzo_lewis/starlight-utils": "^0.3.2",
"@scalar/api-reference-react": "^0.7.42",
"@types/react": "^19.1.12",
"astro": "^5.6.1",
"astro-mermaid": "^1.0.4",
"mermaid": "^11.9.0",
"react": "^19.1.1",
"react-dom": "^19.1.1",
"sharp": "^0.34.3",
"starlight-openapi": "^0.19.1",
"starlight-sidebar-topics": "^0.6.0",
"starlight-theme-rapide": "^0.5.1",
"unist-util-visit": "^5.0.0"
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.9 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 488 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.3 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 477 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 232 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 225 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 161 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 162 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 144 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 84 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 163 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 85 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 368 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 114 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 395 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 119 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 195 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 119 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 198 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 125 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 325 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 94 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 322 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 91 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 333 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 97 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 350 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 95 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.4 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.8 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.0 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 77 KiB

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 77 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 387 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 98 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 392 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.7 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 MiB

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@ -1,515 +0,0 @@
{
"openapi": "3.1.0",
"info": {
"title": "👋Jan API",
"description": "OpenAI-compatible API for local inference with Jan. Run AI models locally with complete privacy using llama.cpp's high-performance inference engine. Supports GGUF models with CPU and GPU acceleration. No authentication required for local usage.",
"version": "0.3.14",
"contact": {
"name": "Jan Support",
"url": "https://jan.ai/support",
"email": "support@jan.ai"
},
"license": {
"name": "Apache 2.0",
"url": "https://github.com/janhq/jan/blob/main/LICENSE"
}
},
"servers": [
{
"url": "http://127.0.0.1:1337",
"description": "Local Jan Server (Default IP)"
},
{
"url": "http://localhost:1337",
"description": "Local Jan Server (localhost)"
},
{
"url": "http://localhost:8080",
"description": "Local Jan Server (Alternative Port)"
}
],
"tags": [
{
"name": "Models",
"description": "List and describe available models"
},
{
"name": "Chat",
"description": "Chat completion endpoints for conversational AI"
},
{
"name": "Completions",
"description": "Text completion endpoints for generating text"
},
{
"name": "Extras",
"description": "Additional utility endpoints for tokenization and text processing"
}
],
"paths": {
"/v1/completions": {
"post": {
"tags": ["Completions"],
"summary": "Create completion",
"description": "Creates a completion for the provided prompt and parameters. This endpoint is compatible with OpenAI's completions API.",
"operationId": "create_completion",
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/CreateCompletionRequest"
},
"examples": {
"basic": {
"summary": "Basic Completion",
"description": "Simple text completion example",
"value": {
"model": "gemma-2-2b-it-Q8_0",
"prompt": "Once upon a time",
"max_tokens": 50,
"temperature": 0.7
}
},
"creative": {
"summary": "Creative Writing",
"description": "Generate creative content with higher temperature",
"value": {
"model": "gemma-2-2b-it-Q8_0",
"prompt": "Write a short poem about coding:",
"max_tokens": 150,
"temperature": 1,
"top_p": 0.95
}
},
"code": {
"summary": "Code Generation",
"description": "Generate code with lower temperature for accuracy",
"value": {
"model": "gemma-2-2b-it-Q8_0",
"prompt": "# Python function to calculate fibonacci\ndef fibonacci(n):",
"max_tokens": 200,
"temperature": 0.3,
"stop": ["\n\n", "def ", "class "]
}
},
"streaming": {
"summary": "Streaming Response",
"description": "Stream tokens as they are generated",
"value": {
"model": "gemma-2-2b-it-Q8_0",
"prompt": "Explain quantum computing in simple terms:",
"max_tokens": 300,
"temperature": 0.7,
"stream": true
}
}
}
}
}
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/CreateCompletionResponse"
}
}
}
},
"202": {
"description": "Accepted - Request is being processed",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/CreateCompletionResponse"
}
},
"text/event-stream": {
"schema": {
"type": "string",
"format": "binary",
"description": "Server-sent events stream for streaming responses"
}
}
}
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ValidationError"
}
}
}
}
}
}
},
"/v1/chat/completions": {
"post": {
"tags": ["Chat"],
"summary": "Create chat completion",
"description": "Creates a model response for the given chat conversation. This endpoint is compatible with OpenAI's chat completions API.",
"operationId": "create_chat_completion",
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/CreateChatCompletionRequest"
},
"examples": {
"simple": {
"summary": "Simple Chat",
"description": "Basic question and answer",
"value": {
"model": "gemma-2-2b-it-Q8_0",
"messages": [
{
"role": "user",
"content": "What is the capital of France?"
}
],
"max_tokens": 100,
"temperature": 0.7
}
},
"system": {
"summary": "With System Message",
"description": "Chat with system instructions",
"value": {
"model": "gemma-2-2b-it-Q8_0",
"messages": [
{
"role": "system",
"content": "You are a helpful assistant that speaks like a pirate."
},
{
"role": "user",
"content": "Tell me about the weather today."
}
],
"max_tokens": 150,
"temperature": 0.8
}
},
"conversation": {
"summary": "Multi-turn Conversation",
"description": "Extended conversation with context",
"value": {
"model": "gemma-2-2b-it-Q8_0",
"messages": [
{
"role": "system",
"content": "You are a knowledgeable AI assistant."
},
{
"role": "user",
"content": "What is machine learning?"
},
{
"role": "assistant",
"content": "Machine learning is a subset of artificial intelligence that enables systems to learn and improve from experience without being explicitly programmed."
},
{
"role": "user",
"content": "Can you give me a simple example?"
}
],
"max_tokens": 200,
"temperature": 0.7
}
},
"streaming": {
"summary": "Streaming Chat",
"description": "Stream the response token by token",
"value": {
"model": "gemma-2-2b-it-Q8_0",
"messages": [
{
"role": "user",
"content": "Write a haiku about programming"
}
],
"stream": true,
"temperature": 0.9
}
},
"json_mode": {
"summary": "JSON Response",
"description": "Request structured JSON output",
"value": {
"model": "gemma-2-2b-it-Q8_0",
"messages": [
{
"role": "user",
"content": "List 3 programming languages with their main use cases in JSON format"
}
],
"max_tokens": 200,
"temperature": 0.5,
"response_format": {
"type": "json_object"
}
}
}
}
}
}
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/CreateChatCompletionResponse"
}
},
"text/event-stream": {
"schema": {
"type": "string",
"format": "binary",
"description": "Server-sent events stream for streaming responses"
}
}
}
},
"202": {
"description": "Accepted - Request is being processed",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/CreateChatCompletionResponse"
}
},
"text/event-stream": {
"schema": {
"type": "string",
"format": "binary",
"description": "Server-sent events stream for streaming responses"
}
}
}
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ValidationError"
}
}
}
}
}
}
},
"/v1/models": {
"get": {
"tags": ["Models"],
"summary": "List available models",
"description": "Lists the currently available models and provides basic information about each one such as the owner and availability.",
"operationId": "list_models",
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/ModelList"
},
"example": {
"object": "list",
"data": [
{
"id": "gemma-2-2b-it-Q8_0",
"object": "model",
"created": 1686935002,
"owned_by": "jan"
},
{
"id": "llama-3.1-8b-instruct-Q4_K_M",
"object": "model",
"created": 1686935002,
"owned_by": "jan"
},
{
"id": "mistral-7b-instruct-v0.3-Q4_K_M",
"object": "model",
"created": 1686935002,
"owned_by": "jan"
},
{
"id": "phi-3-mini-4k-instruct-Q4_K_M",
"object": "model",
"created": 1686935002,
"owned_by": "jan"
}
]
}
}
}
}
}
}
},
"/extras/tokenize": {
"post": {
"tags": ["Extras"],
"summary": "Tokenize text",
"description": "Convert text input into tokens using the model's tokenizer.",
"operationId": "tokenize",
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/TokenizeRequest"
},
"example": {
"input": "Hello, world!",
"model": "gemma-2-2b-it-Q8_0"
}
}
}
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/TokenizeResponse"
},
"example": {
"tokens": [15339, 11, 1917, 0]
}
}
}
}
}
}
},
"/extras/tokenize/count": {
"post": {
"tags": ["Extras"],
"summary": "Count tokens",
"description": "Count the number of tokens in the provided text.",
"operationId": "count_tokens",
"requestBody": {
"required": true,
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/TokenizeRequest"
},
"example": {
"input": "How many tokens does this text have?",
"model": "gemma-2-2b-it-Q8_0"
}
}
}
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/TokenCountResponse"
},
"example": {
"count": 8
}
}
}
}
}
}
}
},
"components": {
"schemas": {
"TokenizeRequest": {
"type": "object",
"properties": {
"input": {
"type": "string",
"description": "The text to tokenize"
},
"model": {
"type": "string",
"description": "The model to use for tokenization",
"enum": [
"gemma-2-2b-it-Q8_0",
"llama-3.1-8b-instruct-Q4_K_M",
"mistral-7b-instruct-v0.3-Q4_K_M",
"phi-3-mini-4k-instruct-Q4_K_M"
]
}
},
"required": ["input"]
},
"TokenizeResponse": {
"type": "object",
"properties": {
"tokens": {
"type": "array",
"items": {
"type": "integer"
},
"description": "Array of token IDs"
}
},
"required": ["tokens"]
},
"TokenCountResponse": {
"type": "object",
"properties": {
"count": {
"type": "integer",
"description": "Number of tokens"
}
},
"required": ["count"]
}
},
"securitySchemes": {
"bearerAuth": {
"type": "http",
"scheme": "bearer",
"bearerFormat": "JWT",
"description": "Optional: Enter your API key if authentication is enabled. The Bearer prefix will be added automatically."
}
}
},
"x-jan-local-features": {
"engine": "llama.cpp",
"features": [
"GGUF model support",
"CPU and GPU acceleration",
"Quantized model support (Q4, Q5, Q8)",
"Metal acceleration on macOS",
"CUDA support on NVIDIA GPUs",
"ROCm support on AMD GPUs",
"AVX/AVX2/AVX512 optimizations",
"Memory-mapped model loading"
],
"privacy": {
"local_processing": true,
"no_telemetry": true,
"offline_capable": true
},
"model_formats": ["GGUF", "GGML"],
"default_settings": {
"context_length": 4096,
"batch_size": 512,
"threads": "auto"
}
}
}

View File

@ -1,119 +0,0 @@
// Navigation injection script for Jan documentation
// This script adds navigation links to regular docs pages (not API reference pages)
;(function () {
// Navigation configuration for Jan docs
const JAN_NAV_CONFIG = {
// Product navigation links - easy to extend for multiple products
links: [
{
href: '/',
text: 'Docs',
isActive: (path) =>
path === '/' || (path.startsWith('/') && !path.startsWith('/api')),
},
{
href: '/api',
text: 'API Reference',
isActive: (path) => path.startsWith('/api'),
},
],
// Pages that have their own navigation (don't inject nav)
excludePaths: ['/api-reference/', '/api/'],
}
// Add navigation to docs pages with retry logic
function addNavigation(retries = 0) {
const currentPath = window.location.pathname
// Skip if page has its own navigation
const shouldSkipNav = JAN_NAV_CONFIG.excludePaths.some((path) =>
currentPath.startsWith(path)
)
if (shouldSkipNav) return
const header = document.querySelector('.header')
const siteTitle = document.querySelector('.site-title')
const existingNav = document.querySelector('.custom-nav-links')
if (header && siteTitle && !existingNav) {
// Find the right container for nav links
const searchElement = header.querySelector('[class*="search"]')
const flexContainer = header.querySelector('.sl-flex')
const targetContainer = flexContainer || header
if (targetContainer) {
// Create navigation container
const nav = document.createElement('nav')
nav.className = 'custom-nav-links'
nav.setAttribute('aria-label', 'Product Navigation')
// Create links from configuration
JAN_NAV_CONFIG.links.forEach((link) => {
const a = document.createElement('a')
a.href = link.href
a.textContent = link.text
a.className = 'nav-link'
// Set active state
if (link.isActive(currentPath)) {
a.setAttribute('aria-current', 'page')
}
nav.appendChild(a)
})
// Insert navigation safely
if (searchElement && targetContainer.contains(searchElement)) {
targetContainer.insertBefore(nav, searchElement)
} else {
// Find site title and insert after it
if (siteTitle && targetContainer.contains(siteTitle)) {
siteTitle.insertAdjacentElement('afterend', nav)
} else {
targetContainer.appendChild(nav)
}
}
} else if (retries < 5) {
setTimeout(() => addNavigation(retries + 1), 500)
}
} else if (retries < 5) {
setTimeout(() => addNavigation(retries + 1), 500)
}
}
// Initialize navigation injection
function initNavigation() {
// Update logo link to jan.ai
const logoLink = document.querySelector('a[href="/"]')
if (logoLink && logoLink.getAttribute('href') === '/') {
logoLink.href = 'https://jan.ai'
}
// Start navigation injection
if (document.readyState === 'loading') {
setTimeout(() => addNavigation(), 1000)
} else {
addNavigation()
}
}
// Run when DOM is ready
if (document.readyState === 'loading') {
document.addEventListener('DOMContentLoaded', initNavigation)
} else {
initNavigation()
}
// Handle page navigation in SPA-like environments
let lastUrl = location.href
new MutationObserver(() => {
const url = location.href
if (url !== lastUrl) {
lastUrl = url
// Re-run navigation injection after navigation
setTimeout(() => addNavigation(), 100)
}
}).observe(document, { subtree: true, childList: true })
})()

View File

@ -1,48 +0,0 @@
/* Navigation links for regular docs pages */
.custom-nav-links {
display: inline-flex;
align-items: center;
gap: 0.5rem;
margin: 0 1rem;
}
.custom-nav-links .nav-link {
display: inline-flex;
align-items: center;
padding: 0.5rem 0.875rem;
border-radius: 0.375rem;
color: var(--sl-color-gray-2);
text-decoration: none;
font-weight: 500;
font-size: 0.875rem;
transition: all 0.2s ease;
white-space: nowrap;
}
.custom-nav-links .nav-link:hover {
color: var(--sl-color-text);
background: var(--sl-color-gray-6);
}
.custom-nav-links .nav-link[aria-current="page"] {
color: var(--sl-color-text);
background: var(--sl-color-gray-6);
}
/* Responsive design */
@media (max-width: 768px) {
.custom-nav-links {
display: none;
}
}
@media (min-width: 768px) and (max-width: 1024px) {
.custom-nav-links {
margin: 0 0.5rem;
}
.custom-nav-links .nav-link {
padding: 0.375rem 0.625rem;
font-size: 0.8125rem;
}
}

View File

@ -1,187 +0,0 @@
#!/usr/bin/env node
/**
* Conditional Cloud Spec Generator
*
* This script conditionally runs the cloud spec generation based on environment variables.
* It's designed to be used in CI/CD pipelines to control when the spec should be updated.
*
* Environment variables:
* - SKIP_CLOUD_SPEC_UPDATE: Skip cloud spec generation entirely
* - FORCE_UPDATE: Force update even if skip is set
* - CI: Detect if running in CI environment
*/
import { spawn } from 'child_process'
import fs from 'fs'
import path from 'path'
import { fileURLToPath } from 'url'
const __filename = fileURLToPath(import.meta.url)
const __dirname = path.dirname(__filename)
// Configuration
const CONFIG = {
CLOUD_SPEC_PATH: path.join(__dirname, '../public/openapi/cloud-openapi.json'),
GENERATOR_SCRIPT: path.join(__dirname, 'generate-cloud-spec.js'),
FALLBACK_SPEC_PATH: path.join(__dirname, '../public/openapi/openapi.json'),
}
// Color codes for console output
const colors = {
reset: '\x1b[0m',
green: '\x1b[32m',
yellow: '\x1b[33m',
cyan: '\x1b[36m',
gray: '\x1b[90m',
}
function log(message, type = 'info') {
const prefix = {
info: `${colors.cyan} `,
skip: `${colors.gray}⏭️ `,
run: `${colors.green}▶️ `,
warning: `${colors.yellow}⚠️ `,
}[type] || ''
console.log(`${prefix}${message}${colors.reset}`)
}
async function shouldRunGenerator() {
// Check environment variables
const skipUpdate = process.env.SKIP_CLOUD_SPEC_UPDATE === 'true'
const forceUpdate = process.env.FORCE_UPDATE === 'true'
const isCI = process.env.CI === 'true'
const isPR = process.env.GITHUB_EVENT_NAME === 'pull_request'
// Force update overrides all
if (forceUpdate) {
log('Force update requested', 'info')
return true
}
// Skip if explicitly requested
if (skipUpdate) {
log('Cloud spec update skipped (SKIP_CLOUD_SPEC_UPDATE=true)', 'skip')
return false
}
// Skip in PR builds to avoid unnecessary API calls
if (isPR) {
log('Cloud spec update skipped (Pull Request build)', 'skip')
return false
}
// Check if cloud spec already exists
const specExists = fs.existsSync(CONFIG.CLOUD_SPEC_PATH)
// In CI, only update if spec doesn't exist or if scheduled/manual trigger
if (isCI) {
const isScheduled = process.env.GITHUB_EVENT_NAME === 'schedule'
const isManualWithUpdate =
process.env.GITHUB_EVENT_NAME === 'workflow_dispatch' &&
process.env.UPDATE_CLOUD_SPEC === 'true'
if (isScheduled || isManualWithUpdate) {
log('Cloud spec update triggered (scheduled/manual)', 'info')
return true
}
if (!specExists) {
log('Cloud spec missing, will attempt to generate', 'warning')
return true
}
log('Cloud spec update skipped (CI build, spec exists)', 'skip')
return false
}
// For local development, update if spec is missing or older than 24 hours
if (!specExists) {
log('Cloud spec missing, generating...', 'info')
return true
}
// Check if spec is older than 24 hours
const stats = fs.statSync(CONFIG.CLOUD_SPEC_PATH)
const ageInHours = (Date.now() - stats.mtime.getTime()) / (1000 * 60 * 60)
if (ageInHours > 24) {
log(`Cloud spec is ${Math.round(ageInHours)} hours old, updating...`, 'info')
return true
}
log(`Cloud spec is recent (${Math.round(ageInHours)} hours old), skipping update`, 'skip')
return false
}
async function runGenerator() {
return new Promise((resolve, reject) => {
log('Running cloud spec generator...', 'run')
const child = spawn('bun', [CONFIG.GENERATOR_SCRIPT], {
stdio: 'inherit',
env: { ...process.env }
})
child.on('close', (code) => {
if (code === 0) {
resolve()
} else {
reject(new Error(`Generator exited with code ${code}`))
}
})
child.on('error', (err) => {
reject(err)
})
})
}
async function ensureFallback() {
// If cloud spec doesn't exist but fallback does, copy it
if (!fs.existsSync(CONFIG.CLOUD_SPEC_PATH) && fs.existsSync(CONFIG.FALLBACK_SPEC_PATH)) {
log('Using fallback spec as cloud spec', 'warning')
fs.copyFileSync(CONFIG.FALLBACK_SPEC_PATH, CONFIG.CLOUD_SPEC_PATH)
return true
}
return false
}
async function main() {
try {
// Determine if we should run the generator
const shouldRun = await shouldRunGenerator()
if (shouldRun) {
try {
await runGenerator()
log('Cloud spec generation completed', 'info')
} catch (error) {
log(`Cloud spec generation failed: ${error.message}`, 'warning')
// Try to use fallback
if (ensureFallback()) {
log('Fallback spec used successfully', 'info')
} else {
log('No fallback available, build may fail', 'warning')
// Don't exit with error - let the build continue
}
}
} else {
// Ensure we have at least a fallback spec
if (!fs.existsSync(CONFIG.CLOUD_SPEC_PATH)) {
ensureFallback()
}
}
// Always exit successfully to not break the build
process.exit(0)
} catch (error) {
console.error('Unexpected error:', error)
// Even on error, try to continue the build
process.exit(0)
}
}
// Run the script
main()

View File

@ -1,746 +0,0 @@
#!/usr/bin/env node
import fs from 'fs'
import path from 'path'
import { fileURLToPath } from 'url'
const __filename = fileURLToPath(import.meta.url)
const __dirname = path.dirname(__filename)
const cloudSpecPath = path.join(
__dirname,
'../public/openapi/cloud-openapi.json'
)
const outputPath = path.join(__dirname, '../public/openapi/openapi.json')
console.log(
'🔧 Fixing Local OpenAPI Spec with Complete Examples and Schemas...'
)
// Read cloud spec as a reference
const cloudSpec = JSON.parse(fs.readFileSync(cloudSpecPath, 'utf8'))
// Convert Swagger 2.0 to OpenAPI 3.0 format for paths
function convertSwaggerPathToOpenAPI3(swaggerPath) {
const openApiPath = {}
Object.keys(swaggerPath || {}).forEach((method) => {
if (typeof swaggerPath[method] === 'object') {
openApiPath[method] = {
...swaggerPath[method],
// Convert parameters
parameters: swaggerPath[method].parameters?.filter(
(p) => p.in !== 'body'
),
// Convert body parameter to requestBody
requestBody: swaggerPath[method].parameters?.find(
(p) => p.in === 'body'
)
? {
required: true,
content: {
'application/json': {
schema: swaggerPath[method].parameters.find(
(p) => p.in === 'body'
).schema,
},
},
}
: undefined,
// Convert responses
responses: {},
}
// Convert responses
Object.keys(swaggerPath[method].responses || {}).forEach((statusCode) => {
const response = swaggerPath[method].responses[statusCode]
openApiPath[method].responses[statusCode] = {
description: response.description,
content: response.schema
? {
'application/json': {
schema: response.schema,
},
}
: undefined,
}
})
}
})
return openApiPath
}
// Create comprehensive local spec
const localSpec = {
openapi: '3.1.0',
info: {
title: 'Jan API',
description:
"OpenAI-compatible API for local inference with Jan. Run AI models locally with complete privacy using llama.cpp's high-performance inference engine. Supports GGUF models with CPU and GPU acceleration. No authentication required for local usage.",
version: '0.3.14',
contact: {
name: 'Jan Support',
url: 'https://jan.ai/support',
email: 'support@jan.ai',
},
license: {
name: 'Apache 2.0',
url: 'https://github.com/janhq/jan/blob/main/LICENSE',
},
},
servers: [
{
url: 'http://127.0.0.1:1337',
description: 'Local Jan Server (Default IP)',
},
{
url: 'http://localhost:1337',
description: 'Local Jan Server (localhost)',
},
{
url: 'http://localhost:8080',
description: 'Local Jan Server (Alternative Port)',
},
],
tags: [
{
name: 'Models',
description: 'List and describe available models',
},
{
name: 'Chat',
description: 'Chat completion endpoints for conversational AI',
},
{
name: 'Completions',
description: 'Text completion endpoints for generating text',
},
{
name: 'Extras',
description:
'Additional utility endpoints for tokenization and text processing',
},
],
paths: {},
components: {
schemas: {},
securitySchemes: {
bearerAuth: {
type: 'http',
scheme: 'bearer',
bearerFormat: 'JWT',
description:
'Optional: Enter your API key if authentication is enabled. The Bearer prefix will be added automatically.',
},
},
},
}
// Local model examples
const LOCAL_MODELS = [
'gemma-2-2b-it-Q8_0',
'llama-3.1-8b-instruct-Q4_K_M',
'mistral-7b-instruct-v0.3-Q4_K_M',
'phi-3-mini-4k-instruct-Q4_K_M',
]
// Add completions endpoint with rich examples
localSpec.paths['/v1/completions'] = {
post: {
tags: ['Completions'],
summary: 'Create completion',
description:
"Creates a completion for the provided prompt and parameters. This endpoint is compatible with OpenAI's completions API.",
operationId: 'create_completion',
requestBody: {
required: true,
content: {
'application/json': {
schema: {
$ref: '#/components/schemas/CreateCompletionRequest',
},
examples: {
basic: {
summary: 'Basic Completion',
description: 'Simple text completion example',
value: {
model: LOCAL_MODELS[0],
prompt: 'Once upon a time',
max_tokens: 50,
temperature: 0.7,
},
},
creative: {
summary: 'Creative Writing',
description: 'Generate creative content with higher temperature',
value: {
model: LOCAL_MODELS[0],
prompt: 'Write a short poem about coding:',
max_tokens: 150,
temperature: 1.0,
top_p: 0.95,
},
},
code: {
summary: 'Code Generation',
description: 'Generate code with lower temperature for accuracy',
value: {
model: LOCAL_MODELS[0],
prompt:
'# Python function to calculate fibonacci\ndef fibonacci(n):',
max_tokens: 200,
temperature: 0.3,
stop: ['\n\n', 'def ', 'class '],
},
},
streaming: {
summary: 'Streaming Response',
description: 'Stream tokens as they are generated',
value: {
model: LOCAL_MODELS[0],
prompt: 'Explain quantum computing in simple terms:',
max_tokens: 300,
temperature: 0.7,
stream: true,
},
},
},
},
},
},
responses: {
200: {
description: 'Successful Response',
content: {
'application/json': {
schema: {
$ref: '#/components/schemas/CreateCompletionResponse',
},
},
},
},
202: {
description: 'Accepted - Request is being processed',
content: {
'application/json': {
schema: {
$ref: '#/components/schemas/CreateCompletionResponse',
},
},
'text/event-stream': {
schema: {
type: 'string',
format: 'binary',
description: 'Server-sent events stream for streaming responses',
},
},
},
},
422: {
description: 'Validation Error',
content: {
'application/json': {
schema: {
$ref: '#/components/schemas/ValidationError',
},
},
},
},
},
},
}
// Add chat completions endpoint with rich examples
localSpec.paths['/v1/chat/completions'] = {
post: {
tags: ['Chat'],
summary: 'Create chat completion',
description:
"Creates a model response for the given chat conversation. This endpoint is compatible with OpenAI's chat completions API.",
operationId: 'create_chat_completion',
requestBody: {
required: true,
content: {
'application/json': {
schema: {
$ref: '#/components/schemas/CreateChatCompletionRequest',
},
examples: {
simple: {
summary: 'Simple Chat',
description: 'Basic question and answer',
value: {
model: LOCAL_MODELS[0],
messages: [
{
role: 'user',
content: 'What is the capital of France?',
},
],
max_tokens: 100,
temperature: 0.7,
},
},
system: {
summary: 'With System Message',
description: 'Chat with system instructions',
value: {
model: LOCAL_MODELS[0],
messages: [
{
role: 'system',
content:
'You are a helpful assistant that speaks like a pirate.',
},
{
role: 'user',
content: 'Tell me about the weather today.',
},
],
max_tokens: 150,
temperature: 0.8,
},
},
conversation: {
summary: 'Multi-turn Conversation',
description: 'Extended conversation with context',
value: {
model: LOCAL_MODELS[0],
messages: [
{
role: 'system',
content: 'You are a knowledgeable AI assistant.',
},
{
role: 'user',
content: 'What is machine learning?',
},
{
role: 'assistant',
content:
'Machine learning is a subset of artificial intelligence that enables systems to learn and improve from experience without being explicitly programmed.',
},
{
role: 'user',
content: 'Can you give me a simple example?',
},
],
max_tokens: 200,
temperature: 0.7,
},
},
streaming: {
summary: 'Streaming Chat',
description: 'Stream the response token by token',
value: {
model: LOCAL_MODELS[0],
messages: [
{
role: 'user',
content: 'Write a haiku about programming',
},
],
stream: true,
temperature: 0.9,
},
},
json_mode: {
summary: 'JSON Response',
description: 'Request structured JSON output',
value: {
model: LOCAL_MODELS[0],
messages: [
{
role: 'user',
content:
'List 3 programming languages with their main use cases in JSON format',
},
],
max_tokens: 200,
temperature: 0.5,
response_format: {
type: 'json_object',
},
},
},
},
},
},
},
responses: {
200: {
description: 'Successful Response',
content: {
'application/json': {
schema: {
$ref: '#/components/schemas/CreateChatCompletionResponse',
},
},
'text/event-stream': {
schema: {
type: 'string',
format: 'binary',
description: 'Server-sent events stream for streaming responses',
},
},
},
},
202: {
description: 'Accepted - Request is being processed',
content: {
'application/json': {
schema: {
$ref: '#/components/schemas/CreateChatCompletionResponse',
},
},
'text/event-stream': {
schema: {
type: 'string',
format: 'binary',
description: 'Server-sent events stream for streaming responses',
},
},
},
},
422: {
description: 'Validation Error',
content: {
'application/json': {
schema: {
$ref: '#/components/schemas/ValidationError',
},
},
},
},
},
},
}
// Add models endpoint
localSpec.paths['/v1/models'] = {
get: {
tags: ['Models'],
summary: 'List available models',
description:
'Lists the currently available models and provides basic information about each one such as the owner and availability.',
operationId: 'list_models',
responses: {
200: {
description: 'Successful Response',
content: {
'application/json': {
schema: {
$ref: '#/components/schemas/ModelList',
},
example: {
object: 'list',
data: LOCAL_MODELS.map((id) => ({
id: id,
object: 'model',
created: 1686935002,
owned_by: 'jan',
})),
},
},
},
},
},
},
}
// Add tokenization endpoints
localSpec.paths['/extras/tokenize'] = {
post: {
tags: ['Extras'],
summary: 'Tokenize text',
description: "Convert text input into tokens using the model's tokenizer.",
operationId: 'tokenize',
requestBody: {
required: true,
content: {
'application/json': {
schema: {
$ref: '#/components/schemas/TokenizeRequest',
},
example: {
input: 'Hello, world!',
model: LOCAL_MODELS[0],
},
},
},
},
responses: {
200: {
description: 'Successful Response',
content: {
'application/json': {
schema: {
$ref: '#/components/schemas/TokenizeResponse',
},
example: {
tokens: [15339, 11, 1917, 0],
},
},
},
},
},
},
}
localSpec.paths['/extras/tokenize/count'] = {
post: {
tags: ['Extras'],
summary: 'Count tokens',
description: 'Count the number of tokens in the provided text.',
operationId: 'count_tokens',
requestBody: {
required: true,
content: {
'application/json': {
schema: {
$ref: '#/components/schemas/TokenizeRequest',
},
example: {
input: 'How many tokens does this text have?',
model: LOCAL_MODELS[0],
},
},
},
},
responses: {
200: {
description: 'Successful Response',
content: {
'application/json': {
schema: {
$ref: '#/components/schemas/TokenCountResponse',
},
example: {
count: 8,
},
},
},
},
},
},
}
// Copy ALL necessary schemas from cloud spec
const schemasToInclude = [
// Request/Response schemas
'CreateChatCompletionRequest',
'CreateChatCompletionResponse',
'CreateCompletionRequest',
'CreateCompletionResponse',
'ChatCompletionRequestMessage',
'ChatCompletionRequestSystemMessage',
'ChatCompletionRequestUserMessage',
'ChatCompletionRequestAssistantMessage',
'ChatCompletionResponseMessage',
'ChatCompletionResponseChoice',
'CompletionChoice',
'CompletionUsage',
'ModelList',
'ModelData',
'ValidationError',
// Additional message types
'ChatCompletionRequestFunctionMessage',
'ChatCompletionRequestToolMessage',
'ChatCompletionRequestMessageContentPart',
'ChatCompletionRequestMessageContentPartText',
'ChatCompletionRequestMessageContentPartImage',
// Function calling
'ChatCompletionFunction',
'ChatCompletionFunctionCall',
'ChatCompletionTool',
'ChatCompletionToolCall',
'ChatCompletionNamedToolChoice',
// Response format
'ChatCompletionRequestResponseFormat',
// Logprobs
'ChatCompletionLogprobs',
'ChatCompletionLogprobToken',
'ChatCompletionTopLogprobToken',
]
// Copy schemas from cloud spec (handle both definitions and schemas)
if (cloudSpec.definitions || cloudSpec.components?.schemas) {
const sourceSchemas =
cloudSpec.definitions || cloudSpec.components?.schemas || {}
schemasToInclude.forEach((schemaName) => {
if (sourceSchemas[schemaName]) {
localSpec.components.schemas[schemaName] = JSON.parse(
JSON.stringify(sourceSchemas[schemaName])
)
}
})
// Also copy any schemas that are referenced by the included schemas
const processedSchemas = new Set(schemasToInclude)
const schemasToProcess = [...schemasToInclude]
while (schemasToProcess.length > 0) {
const currentSchema = schemasToProcess.pop()
const schema = localSpec.components.schemas[currentSchema]
if (!schema) continue
// Find all $ref references
const schemaString = JSON.stringify(schema)
const refPattern = /#\/(?:definitions|components\/schemas)\/([^"]+)/g
let match
while ((match = refPattern.exec(schemaString)) !== null) {
const referencedSchema = match[1]
if (
!processedSchemas.has(referencedSchema) &&
sourceSchemas[referencedSchema]
) {
localSpec.components.schemas[referencedSchema] = JSON.parse(
JSON.stringify(sourceSchemas[referencedSchema])
)
processedSchemas.add(referencedSchema)
schemasToProcess.push(referencedSchema)
}
}
}
}
// Add tokenization schemas manually
localSpec.components.schemas.TokenizeRequest = {
type: 'object',
properties: {
input: {
type: 'string',
description: 'The text to tokenize',
},
model: {
type: 'string',
description: 'The model to use for tokenization',
enum: LOCAL_MODELS,
},
},
required: ['input'],
}
localSpec.components.schemas.TokenizeResponse = {
type: 'object',
properties: {
tokens: {
type: 'array',
items: {
type: 'integer',
},
description: 'Array of token IDs',
},
},
required: ['tokens'],
}
localSpec.components.schemas.TokenCountResponse = {
type: 'object',
properties: {
count: {
type: 'integer',
description: 'Number of tokens',
},
},
required: ['count'],
}
// Update model references in schemas to use local models
if (
localSpec.components.schemas.CreateChatCompletionRequest?.properties?.model
) {
localSpec.components.schemas.CreateChatCompletionRequest.properties.model = {
...localSpec.components.schemas.CreateChatCompletionRequest.properties
.model,
enum: LOCAL_MODELS,
example: LOCAL_MODELS[0],
description: `ID of the model to use. Available models: ${LOCAL_MODELS.join(', ')}`,
}
}
if (localSpec.components.schemas.CreateCompletionRequest?.properties?.model) {
localSpec.components.schemas.CreateCompletionRequest.properties.model = {
...localSpec.components.schemas.CreateCompletionRequest.properties.model,
enum: LOCAL_MODELS,
example: LOCAL_MODELS[0],
description: `ID of the model to use. Available models: ${LOCAL_MODELS.join(', ')}`,
}
}
// Fix all $ref references to use components/schemas instead of definitions
function fixReferences(obj) {
if (typeof obj === 'string') {
return obj.replace(/#\/definitions\//g, '#/components/schemas/')
}
if (Array.isArray(obj)) {
return obj.map(fixReferences)
}
if (obj && typeof obj === 'object') {
const fixed = {}
for (const key in obj) {
fixed[key] = fixReferences(obj[key])
}
return fixed
}
return obj
}
// Apply reference fixes
localSpec.paths = fixReferences(localSpec.paths)
localSpec.components.schemas = fixReferences(localSpec.components.schemas)
// Add x-jan-local-features
localSpec['x-jan-local-features'] = {
engine: 'llama.cpp',
features: [
'GGUF model support',
'CPU and GPU acceleration',
'Quantized model support (Q4, Q5, Q8)',
'Metal acceleration on macOS',
'CUDA support on NVIDIA GPUs',
'ROCm support on AMD GPUs',
'AVX/AVX2/AVX512 optimizations',
'Memory-mapped model loading',
],
privacy: {
local_processing: true,
no_telemetry: true,
offline_capable: true,
},
model_formats: ['GGUF', 'GGML'],
default_settings: {
context_length: 4096,
batch_size: 512,
threads: 'auto',
},
}
// Write the fixed spec
fs.writeFileSync(outputPath, JSON.stringify(localSpec, null, 2), 'utf8')
console.log('✅ Local OpenAPI spec fixed successfully!')
console.log(`📁 Output: ${outputPath}`)
console.log(`📊 Endpoints: ${Object.keys(localSpec.paths).length}`)
console.log(`📊 Schemas: ${Object.keys(localSpec.components.schemas).length}`)
console.log(
`🎯 Examples: ${Object.keys(localSpec.paths).reduce((count, path) => {
return (
count +
Object.keys(localSpec.paths[path]).reduce((c, method) => {
const examples =
localSpec.paths[path][method]?.requestBody?.content?.[
'application/json'
]?.examples
return c + (examples ? Object.keys(examples).length : 0)
}, 0)
)
}, 0)}`
)

View File

@ -1,421 +0,0 @@
#!/usr/bin/env node
import fs from 'fs'
import path from 'path'
import { fileURLToPath } from 'url'
// Get current directory in ES modules
const __filename = fileURLToPath(import.meta.url)
const __dirname = path.dirname(__filename)
const CONFIG = {
// Jan Server API spec URL - change this for different environments
JAN_SERVER_SPEC_URL:
process.env.JAN_SERVER_SPEC_URL ||
'https://api.jan.ai/api/swagger/doc.json',
// Server URLs for different environments
SERVERS: {
production: {
url: process.env.JAN_SERVER_PROD_URL || 'https://api.jan.ai/v1',
description: 'Jan Server API (Production)',
},
staging: {
url:
process.env.JAN_SERVER_STAGING_URL || 'https://staging-api.jan.ai/v1',
description: 'Jan Server API (Staging)',
},
local: {
url: process.env.JAN_SERVER_LOCAL_URL || 'http://localhost:8000/v1',
description: 'Jan Server (Local Development)',
},
minikube: {
url:
process.env.JAN_SERVER_MINIKUBE_URL ||
'http://jan-server.local:8000/v1',
description: 'Jan Server (Minikube)',
},
},
// Output file path
OUTPUT_PATH: path.join(__dirname, '../public/openapi/cloud-openapi.json'),
// Fallback to local spec if fetch fails
FALLBACK_SPEC_PATH: path.join(__dirname, '../public/openapi/openapi.json'),
// Request timeout in milliseconds
FETCH_TIMEOUT: 10000,
}
// Model examples for Jan Server (vLLM deployment)
const MODEL_EXAMPLES = [
'llama-3.1-8b-instruct',
'mistral-7b-instruct-v0.3',
'gemma-2-9b-it',
'qwen2.5-7b-instruct',
]
// =============================================================================
// UTILITY FUNCTIONS
// =============================================================================
const colors = {
reset: '\x1b[0m',
green: '\x1b[32m',
yellow: '\x1b[33m',
red: '\x1b[31m',
cyan: '\x1b[36m',
bright: '\x1b[1m',
}
function log(message, type = 'info') {
const prefix =
{
success: `${colors.green}`,
warning: `${colors.yellow}⚠️ `,
error: `${colors.red}`,
info: `${colors.cyan} `,
}[type] || ''
console.log(`${prefix} ${message}${colors.reset}`)
}
async function fetchWithTimeout(url, options = {}) {
const controller = new AbortController()
const timeoutId = setTimeout(() => controller.abort(), CONFIG.FETCH_TIMEOUT)
try {
const response = await fetch(url, {
...options,
signal: controller.signal,
})
clearTimeout(timeoutId)
return response
} catch (error) {
clearTimeout(timeoutId)
throw error
}
}
// =============================================================================
// SPEC ENHANCEMENT FUNCTIONS
// =============================================================================
function enhanceSpecWithBranding(spec) {
// Update info section with Jan Server branding
spec.info = {
...spec.info,
'title': '👋Jan Server API',
'description':
'OpenAI-compatible API for Jan Server powered by vLLM. High-performance, scalable inference service with automatic batching and optimized memory management.',
'version': spec.info?.version || '1.0.0',
'x-logo': {
url: 'https://jan.ai/logo.png',
altText: '👋Jan Server API',
},
'contact': {
name: 'Jan Server Support',
url: 'https://jan.ai/support',
email: 'support@jan.ai',
},
'license': {
name: 'Apache 2.0',
url: 'https://github.com/menloresearch/jan/blob/main/LICENSE',
},
}
// Update servers with our configured endpoints
spec.servers = Object.values(CONFIG.SERVERS)
// Add global security requirement
spec.security = [{ bearerAuth: [] }]
// Add tags for better organization
spec.tags = [
{ name: 'Models', description: 'List and describe available models' },
{
name: 'Chat',
description: 'Chat completion endpoints for conversational AI',
},
{ name: 'Completions', description: 'Text completion endpoints' },
{ name: 'Embeddings', description: 'Generate embeddings for text' },
{ name: 'Usage', description: 'Monitor API usage and quotas' },
]
return spec
}
function enhanceSecuritySchemes(spec) {
if (!spec.components) spec.components = {}
if (!spec.components.securitySchemes) spec.components.securitySchemes = {}
spec.components.securitySchemes.bearerAuth = {
type: 'http',
scheme: 'bearer',
bearerFormat: 'JWT',
description:
'Enter your Jan Server API key. Configure authentication in your server settings.',
}
return spec
}
function addModelExamples(spec) {
const primaryModel = MODEL_EXAMPLES[0]
// Helper function to update model fields in schemas
function updateModelField(modelField) {
if (!modelField) return
modelField.example = primaryModel
modelField.description = `ID of the model to use. Available models: ${MODEL_EXAMPLES.join(', ')}`
if (modelField.anyOf && modelField.anyOf[0]?.type === 'string') {
modelField.anyOf[0].example = primaryModel
modelField.anyOf[0].enum = MODEL_EXAMPLES
} else if (modelField.type === 'string') {
modelField.enum = MODEL_EXAMPLES
}
}
// Update model fields in common request schemas
const schemas = spec.components?.schemas || {}
if (schemas.CreateCompletionRequest?.properties?.model) {
updateModelField(schemas.CreateCompletionRequest.properties.model)
}
if (schemas.CreateChatCompletionRequest?.properties?.model) {
updateModelField(schemas.CreateChatCompletionRequest.properties.model)
}
if (schemas.CreateEmbeddingRequest?.properties?.model) {
updateModelField(schemas.CreateEmbeddingRequest.properties.model)
}
return spec
}
function addRequestExamples(spec) {
const primaryModel = MODEL_EXAMPLES[0]
// Example request bodies
const examples = {
completion: {
'text-completion': {
summary: 'Text Completion Example',
description: `Complete text using ${primaryModel}`,
value: {
model: primaryModel,
prompt: 'Once upon a time,',
max_tokens: 50,
temperature: 0.7,
top_p: 0.9,
stream: false,
},
},
},
chatCompletion: {
'simple-chat': {
summary: 'Simple Chat Example',
description: `Chat completion using ${primaryModel}`,
value: {
model: primaryModel,
messages: [
{ role: 'user', content: 'What is the capital of France?' },
],
max_tokens: 100,
temperature: 0.7,
stream: false,
},
},
},
embedding: {
'text-embedding': {
summary: 'Text Embedding Example',
description: `Generate embeddings using ${primaryModel}`,
value: {
model: primaryModel,
input: 'The quick brown fox jumps over the lazy dog',
},
},
},
}
// Add examples to path operations
Object.keys(spec.paths || {}).forEach((path) => {
Object.keys(spec.paths[path] || {}).forEach((method) => {
const operation = spec.paths[path][method]
if (!operation.requestBody?.content?.['application/json']) return
if (path.includes('/completions') && !path.includes('/chat')) {
operation.requestBody.content['application/json'].examples =
examples.completion
} else if (path.includes('/chat/completions')) {
operation.requestBody.content['application/json'].examples =
examples.chatCompletion
} else if (path.includes('/embeddings')) {
operation.requestBody.content['application/json'].examples =
examples.embedding
}
})
})
return spec
}
function addCloudFeatures(spec) {
// Add cloud-specific extension
spec['x-jan-server-features'] = {
vllm: {
version: '0.5.0',
features: [
'PagedAttention for efficient memory management',
'Continuous batching for high throughput',
'Tensor parallelism for multi-GPU serving',
'Quantization support (AWQ, GPTQ, SqueezeLLM)',
'Speculative decoding',
'LoRA adapter support',
],
},
scaling: {
auto_scaling: true,
min_replicas: 1,
max_replicas: 100,
target_qps: 100,
},
limits: {
max_tokens_per_request: 32768,
max_batch_size: 256,
timeout_seconds: 300,
},
}
return spec
}
// =============================================================================
// MAIN FUNCTIONS
// =============================================================================
async function fetchJanServerSpec() {
log(`Fetching Jan Server spec from: ${CONFIG.JAN_SERVER_SPEC_URL}`)
try {
const response = await fetchWithTimeout(CONFIG.JAN_SERVER_SPEC_URL)
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`)
}
const spec = await response.json()
log('Successfully fetched Jan Server specification', 'success')
return spec
} catch (error) {
log(`Failed to fetch Jan Server spec: ${error.message}`, 'warning')
// If FORCE_UPDATE is set, don't use fallback - fail instead
if (process.env.FORCE_UPDATE === 'true') {
log('Force update requested - not using fallback', 'error')
throw error
}
log(`Falling back to local spec: ${CONFIG.FALLBACK_SPEC_PATH}`, 'warning')
if (fs.existsSync(CONFIG.FALLBACK_SPEC_PATH)) {
const fallbackSpec = JSON.parse(
fs.readFileSync(CONFIG.FALLBACK_SPEC_PATH, 'utf8')
)
log('Using local fallback specification', 'warning')
return fallbackSpec
} else {
throw new Error('No fallback spec available')
}
}
}
async function generateCloudSpec() {
console.log(
`${colors.bright}${colors.cyan}🚀 Jan Server API Spec Generator${colors.reset}`
)
console.log(
`${colors.cyan}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${colors.reset}`
)
console.log(`📡 Source: ${CONFIG.JAN_SERVER_SPEC_URL}`)
console.log(`📁 Output: ${CONFIG.OUTPUT_PATH}`)
console.log(`🏗️ Servers: ${Object.keys(CONFIG.SERVERS).join(', ')}`)
console.log('')
try {
// Fetch the real Jan Server specification
let spec = await fetchJanServerSpec()
// Apply all enhancements
spec = enhanceSpecWithBranding(spec)
spec = enhanceSecuritySchemes(spec)
spec = addModelExamples(spec)
spec = addRequestExamples(spec)
spec = addCloudFeatures(spec)
// Ensure all paths have security requirements
Object.keys(spec.paths || {}).forEach((path) => {
Object.keys(spec.paths[path] || {}).forEach((method) => {
const operation = spec.paths[path][method]
if (!operation.security) {
operation.security = [{ bearerAuth: [] }]
}
})
})
// Write the enhanced specification
fs.writeFileSync(CONFIG.OUTPUT_PATH, JSON.stringify(spec, null, 2), 'utf8')
log('Jan Server specification generated successfully!', 'success')
console.log(`📁 Output: ${CONFIG.OUTPUT_PATH}`)
console.log('\n📊 Summary:')
console.log(` - Endpoints: ${Object.keys(spec.paths || {}).length}`)
console.log(` - Servers: ${spec.servers?.length || 0}`)
console.log(` - Models: ${MODEL_EXAMPLES.length}`)
console.log(` - Security: Bearer token authentication`)
console.log(
` - Engine: vLLM (${spec['x-jan-server-features']?.vllm?.version || 'unknown'})`
)
return true
} catch (error) {
log(
`Failed to generate Jan Server specification: ${error.message}`,
'error'
)
console.log('\n🔧 Troubleshooting:')
console.log(' 1. Check your internet connection')
console.log(
` 2. Verify Jan Server is accessible at: ${CONFIG.JAN_SERVER_SPEC_URL}`
)
console.log(' 3. Check if you need to set environment variables:')
console.log(' - JAN_SERVER_SPEC_URL')
console.log(' - JAN_SERVER_PROD_URL')
console.log(' - JAN_SERVER_LOCAL_URL')
return false
}
}
// =============================================================================
// EXECUTION
// =============================================================================
// Show configuration on startup
if (process.env.NODE_ENV !== 'test') {
console.log(`${colors.cyan}🔧 Configuration:${colors.reset}`)
console.log(` Spec URL: ${CONFIG.JAN_SERVER_SPEC_URL}`)
console.log(` Timeout: ${CONFIG.FETCH_TIMEOUT}ms`)
console.log(` Servers: ${Object.keys(CONFIG.SERVERS).length} configured`)
if (process.env.FORCE_UPDATE === 'true') {
console.log(` ${colors.yellow}Force Update: ENABLED${colors.reset}`)
}
console.log('')
}
// Run the generator
const success = await generateCloudSpec()
process.exit(success ? 0 : 1)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 163 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.4 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 562 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 598 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 306 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 85 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 120 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 450 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 118 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 453 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 714 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 554 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 377 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 453 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 616 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 742 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 544 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 404 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.4 MiB

Some files were not shown because too many files have changed in this diff Show More