new tutorials, reorganization, and pages
@ -20,7 +20,7 @@
|
||||
"embla-carousel-react": "^8.0.0",
|
||||
"fs": "^0.0.1-security",
|
||||
"gray-matter": "^4.0.3",
|
||||
"lucide-react": "^0.372.0",
|
||||
"lucide-react": "^0.522.0",
|
||||
"next": "^14.1.4",
|
||||
"next-seo": "^6.5.0",
|
||||
"next-sitemap": "^4.2.3",
|
||||
@ -1503,7 +1503,7 @@
|
||||
|
||||
"lru-cache": ["lru-cache@10.4.3", "", {}, "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ=="],
|
||||
|
||||
"lucide-react": ["lucide-react@0.372.0", "", { "peerDependencies": { "react": "^16.5.1 || ^17.0.0 || ^18.0.0" } }, "sha512-0cKdqmilHXWUwWAWnf6CrrjHD8YaqPMtLrmEHXolZusNTr9epULCsiJwIOHk2q1yFxdEwd96D4zShlAj67UJdA=="],
|
||||
"lucide-react": ["lucide-react@0.522.0", "", { "peerDependencies": { "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "sha512-jnJbw974yZ7rQHHEFKJOlWAefG3ATSCZHANZxIdx8Rk/16siuwjgA4fBULpXEAWx/RlTs3FzmKW/udWUuO0aRw=="],
|
||||
|
||||
"lz-string": ["lz-string@1.5.0", "", { "bin": { "lz-string": "bin/bin.js" } }, "sha512-h5bgJWpxJNswbU7qCrV0tIKQCaS3blPDrqKWx+QxzuzL1zGUzij9XCWLrSLsJPu5t+eWA/ycetzYAO5IOMcWAQ=="],
|
||||
|
||||
|
||||
@ -11,6 +11,11 @@
|
||||
"type": "page",
|
||||
"title": "Documentation"
|
||||
},
|
||||
"local-server": {
|
||||
"type": "page",
|
||||
"title": "Jan Local Server",
|
||||
"display": "hidden"
|
||||
},
|
||||
"cortex": {
|
||||
"type": "page",
|
||||
"title": "Cortex",
|
||||
|
||||
@ -17,17 +17,13 @@
|
||||
"title": "TUTORIALS",
|
||||
"type": "separator"
|
||||
},
|
||||
"quickstart": "Quickstart",
|
||||
"remote-models": "Connect to Remote Models",
|
||||
"server-examples": "Integrations",
|
||||
|
||||
"explanation-separator": {
|
||||
"title": "EXPLANATION",
|
||||
"type": "separator"
|
||||
},
|
||||
"llama-cpp": "Local AI Engine",
|
||||
"api-server": "Server Overview",
|
||||
"data-folder": "Jan Data Folder",
|
||||
"privacy-policy": {
|
||||
"type": "page",
|
||||
"display": "hidden",
|
||||
@ -40,12 +36,14 @@
|
||||
},
|
||||
"manage-models": "Manage Models",
|
||||
"mcp": "Model Context Protocol",
|
||||
"mcp-examples": "MCP Examples",
|
||||
|
||||
"reference-separator": {
|
||||
"title": "REFERENCE",
|
||||
"type": "separator"
|
||||
},
|
||||
"settings": "Settings",
|
||||
"data-folder": "Jan Data Folder",
|
||||
"troubleshooting": "Troubleshooting",
|
||||
"model-parameters": "Model Parameters",
|
||||
"privacy": "Privacy"
|
||||
|
||||
200
docs/src/pages/docs/mcp-examples/search/exa.mdx
Normal file
@ -0,0 +1,200 @@
|
||||
---
|
||||
title: Exa Search MCP
|
||||
description: Connect Jan to real-time web search with Exa's AI-powered search engine.
|
||||
keywords:
|
||||
[
|
||||
Jan,
|
||||
MCP,
|
||||
Model Context Protocol,
|
||||
Exa,
|
||||
web search,
|
||||
real-time search,
|
||||
research,
|
||||
AI search,
|
||||
tool calling,
|
||||
]
|
||||
---
|
||||
|
||||
import { Callout, Steps } from 'nextra/components'
|
||||
|
||||
# Using Exa Search MCP with Jan
|
||||
|
||||
Exa MCP turns your AI into a research powerhouse. Instead of hallucinating facts or working with stale training data, your model gets real-time access to the web through Exa's AI-powered search engine.
|
||||
|
||||
## What You Get
|
||||
|
||||
Exa MCP provides eight specialized search tools that actually work:
|
||||
|
||||
- **web_search_exa**: General web search with content extraction
|
||||
- **research_paper_search**: Academic papers and research content
|
||||
- **company_research**: Deep company analysis and intelligence
|
||||
- **crawling**: Extract content from specific URLs
|
||||
- **competitor_finder**: Find business competitors
|
||||
- **linkedin_search**: Search LinkedIn profiles and companies
|
||||
- **wikipedia_search_exa**: Wikipedia content retrieval
|
||||
- **github_search**: Repository and code search
|
||||
|
||||
Think of it as giving your AI a PhD in Google-fu.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
You'll need:
|
||||
- Jan installed and running
|
||||
- An Exa API key ([get yours here](https://dashboard.exa.ai/api-keys))
|
||||
- A model with tool calling capabilities (Claude, GPT-4, or compatible local models)
|
||||
- Node.js installed on your machine
|
||||
|
||||
<Callout type="info">
|
||||
**Model Compatibility**: Not all models play nice with tools. Claude models work exceptionally well. For local models, ensure tool calling is enabled in Jan's model parameters.
|
||||
</Callout>
|
||||
|
||||
## Setup
|
||||
|
||||
### Step 1: Enable MCP in Jan
|
||||
|
||||
Navigate to **Settings** > **MCP Servers** and toggle **Allow All MCP Tool Permission** ON.
|
||||
|
||||
*[Screenshot placeholder: MCP settings page with toggle enabled]*
|
||||
|
||||
### Step 2: Get Your Exa API Key
|
||||
|
||||
1. Go to [dashboard.exa.ai/api-keys](https://dashboard.exa.ai/api-keys)
|
||||
2. Sign up or log in
|
||||
3. Generate a new API key
|
||||
4. Copy it somewhere safe
|
||||
|
||||
*[Screenshot placeholder: Exa dashboard showing API key generation]*
|
||||
|
||||
### Step 3: Add Exa MCP Server
|
||||
|
||||
Click the `+` button in the MCP Servers section.
|
||||
|
||||
*[Screenshot placeholder: MCP server addition dialog]*
|
||||
|
||||
Enter the following configuration:
|
||||
|
||||
- **Server Name**: `exa`
|
||||
- **Command**: `npx`
|
||||
- **Arguments**: `-y exa-mcp-server`
|
||||
- **Environment Variables**:
|
||||
- Key: `EXA_API_KEY`
|
||||
- Value: `your-actual-api-key-here`
|
||||
|
||||
*[Screenshot placeholder: Filled MCP server configuration form]*
|
||||
|
||||
### Step 4: Verify Connection
|
||||
|
||||
Check that the server shows as active in your MCP Servers list.
|
||||
|
||||
*[Screenshot placeholder: Active Exa MCP server in the list]*
|
||||
|
||||
### Step 5: Configure Your Model
|
||||
|
||||
Use a model provider that supports tool calling. For cloud models:
|
||||
|
||||
- **Anthropic**: Ensure tool calling is enabled in Model Providers settings
|
||||
- **OpenAI**: Tool calling should work by default
|
||||
- **OpenRouter**: Works with tool-capable models like Claude
|
||||
|
||||
*[Screenshot placeholder: Model provider settings with tool calling enabled]*
|
||||
|
||||
For local models, enable tool calling in **Model Parameters** > **Model Capabilities**.
|
||||
|
||||
## Using Exa MCP
|
||||
|
||||
Once configured, start a new chat with your tool-enabled model. The available Exa tools will appear in the tools section.
|
||||
|
||||
*[Screenshot placeholder: Chat interface showing Exa MCP tools available]*
|
||||
|
||||
### Example Queries
|
||||
|
||||
Try these to see Exa MCP in action:
|
||||
|
||||
**Research Query:**
|
||||
```
|
||||
Find recent research papers about transformer model optimization published in 2024
|
||||
```
|
||||
|
||||
**Company Intelligence:**
|
||||
```
|
||||
Research the company Anthropic - their business model, recent funding, and key competitors
|
||||
```
|
||||
|
||||
**Technical Search:**
|
||||
```
|
||||
Find GitHub repositories for modern React state management libraries with good TypeScript support
|
||||
```
|
||||
|
||||
**Content Analysis:**
|
||||
```
|
||||
Extract and summarize the content from this article: [URL]
|
||||
```
|
||||
|
||||
*[Screenshot placeholder: Example of Exa MCP performing a search with results]*
|
||||
|
||||
## Advanced Configuration
|
||||
|
||||
### Selective Tool Enabling
|
||||
|
||||
If you don't want all eight tools (and who needs that much power?), you can enable specific ones:
|
||||
|
||||
- **Command**: `npx`
|
||||
- **Arguments**: `-y exa-mcp-server --tools=web_search_exa,research_paper_search,company_research`
|
||||
|
||||
### Remote MCP Option
|
||||
|
||||
For the truly lazy, use Exa's hosted MCP server instead of running it locally:
|
||||
|
||||
- **Server Name**: `exa-remote`
|
||||
- **Command**: `npx`
|
||||
- **Arguments**: `-y mcp-remote https://mcp.exa.ai/mcp?exaApiKey=your-api-key`
|
||||
|
||||
## Use Cases That Actually Matter
|
||||
|
||||
### Academic Research
|
||||
Perfect for literature reviews, finding recent papers, and staying current with research trends. No more sifting through Google Scholar's ancient interface.
|
||||
|
||||
### Competitive Intelligence
|
||||
Research competitors, analyze market positioning, and gather business intelligence without manual browsing.
|
||||
|
||||
### Technical Discovery
|
||||
Find libraries, tools, and solutions on GitHub. Better than hoping Stack Overflow has the answer.
|
||||
|
||||
### Content Curation
|
||||
Extract and analyze content from specific URLs for research or content creation.
|
||||
|
||||
### Professional Networking
|
||||
Search LinkedIn for potential collaborators, industry experts, or business connections.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**Server won't connect?**
|
||||
- Verify your API key is correct (no extra spaces or quotes)
|
||||
- Ensure Node.js is installed
|
||||
- Restart Jan completely
|
||||
|
||||
**Model not using tools?**
|
||||
- Check that tool calling is enabled for your model
|
||||
- Try a different model (Claude works best)
|
||||
- Verify the MCP server is active
|
||||
|
||||
**Search results seem off?**
|
||||
- Exa's AI search is context-aware - be specific in your queries
|
||||
- Use natural language rather than keyword stuffing
|
||||
|
||||
**API key errors?**
|
||||
- Double-check your key at [dashboard.exa.ai](https://dashboard.exa.ai)
|
||||
- Ensure you haven't hit rate limits
|
||||
- Try regenerating the key if issues persist
|
||||
|
||||
<Callout type="warning">
|
||||
**Rate Limits**: Exa has API rate limits. If you're planning to go full researcher mode, check your plan limits to avoid surprises.
|
||||
</Callout>
|
||||
|
||||
## What's Next
|
||||
|
||||
Once you've got Exa MCP running, you'll wonder how you ever did research without it. The combination of Jan's privacy-focused approach with Exa's real-time search capabilities creates a research environment that's both powerful and under your control.
|
||||
|
||||
No more tab explosions. No more bookmark chaos. Just ask, and get answers backed by current, relevant sources.
|
||||
|
||||
*[Screenshot placeholder: Final example showing a complex research query with multiple Exa tools being used]*
|
||||
245
docs/src/pages/docs/mcp-examples/search/perplexity.mdx
Normal file
@ -0,0 +1,245 @@
|
||||
---
|
||||
title: Perplexity MCP
|
||||
description: Connect Jan to Perplexity's AI-powered search and reasoning capabilities.
|
||||
keywords:
|
||||
[
|
||||
Jan,
|
||||
MCP,
|
||||
Model Context Protocol,
|
||||
Perplexity,
|
||||
AI search,
|
||||
web search,
|
||||
reasoning,
|
||||
Sonar,
|
||||
real-time search,
|
||||
tool calling,
|
||||
]
|
||||
---
|
||||
|
||||
import { Callout, Steps } from 'nextra/components'
|
||||
|
||||
# Using Perplexity MCP with Jan
|
||||
|
||||
Perplexity MCP brings real-time web search and specialized reasoning to your AI conversations. Instead of your model pretending it knows what happened yesterday, it can actually find out.
|
||||
|
||||
## What You Get
|
||||
|
||||
Perplexity MCP offers two flavors of intelligence:
|
||||
|
||||
### Official Go Implementation
|
||||
- **perplexity_ask**: Real-time web search using Sonar Pro
|
||||
- **perplexity_reason**: Complex reasoning with Sonar Reasoning Pro
|
||||
- Fast, minimal setup with pre-built binaries
|
||||
|
||||
### Python Implementation
|
||||
- **ask_perplexity**: Expert programming assistance and technical explanations
|
||||
- **chat_perplexity**: Maintains ongoing conversations with full history
|
||||
- **list_chats_perplexity**: Browse your conversation history
|
||||
- **read_chat_perplexity**: Retrieve complete chat histories
|
||||
- **search**: General web search with customizable detail levels
|
||||
|
||||
Both give your AI the ability to stop guessing and start knowing.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
You'll need:
|
||||
- Jan installed and running
|
||||
- A Perplexity API key ([get yours here](https://docs.perplexity.ai/docs/getting-started))
|
||||
- For Go version: Homebrew (macOS/Linux) or manual binary installation
|
||||
- For Python version: Python 3.10+ and uvx package manager
|
||||
- A model with tool calling capabilities
|
||||
|
||||
<Callout type="info">
|
||||
**API Key Required**: Unlike some search engines that pretend to be free, Perplexity requires an API key. The good news? Their pricing is actually reasonable for the quality you get.
|
||||
</Callout>
|
||||
|
||||
## Setup
|
||||
|
||||
### Step 1: Enable MCP in Jan
|
||||
|
||||
Navigate to **Settings** > **MCP Servers** and toggle **Allow All MCP Tool Permission** ON.
|
||||
|
||||
*[Screenshot placeholder: MCP settings page with toggle enabled]*
|
||||
|
||||
### Step 2: Get Your Perplexity API Key
|
||||
|
||||
1. Go to [docs.perplexity.ai](https://docs.perplexity.ai/docs/getting-started)
|
||||
2. Sign up or log in to get API access
|
||||
3. Generate your API key
|
||||
4. Keep it handy
|
||||
|
||||
*[Screenshot placeholder: Perplexity API key generation page]*
|
||||
|
||||
### Step 3: Choose Your Installation Method
|
||||
|
||||
You have two solid options. Pick your poison:
|
||||
|
||||
#### Option A: Go Implementation (Recommended for Speed)
|
||||
|
||||
**Using Homebrew (macOS/Linux):**
|
||||
```bash
|
||||
brew install alcova-ai/tap/perplexity-mcp
|
||||
```
|
||||
|
||||
**Manual Installation:**
|
||||
Download the appropriate binary from [Perplexity MCP releases](https://github.com/Alcova-AI/perplexity-mcp/releases) and place it in your PATH.
|
||||
|
||||
#### Option B: Python Implementation (More Features)
|
||||
|
||||
**Install uvx if you haven't already:**
|
||||
```bash
|
||||
pip install uvx
|
||||
```
|
||||
|
||||
### Step 4: Configure MCP Server
|
||||
|
||||
Click the `+` button in the MCP Servers section and choose your configuration:
|
||||
|
||||
*[Screenshot placeholder: MCP server addition dialog]*
|
||||
|
||||
#### For Go Implementation:
|
||||
|
||||
- **Server Name**: `perplexity-go`
|
||||
- **Command**: `perplexity-mcp`
|
||||
- **Arguments**: (leave empty)
|
||||
- **Environment Variables**:
|
||||
- Key: `PERPLEXITY_API_KEY`
|
||||
- Value: `your-api-key-here`
|
||||
|
||||
*[Screenshot placeholder: Go implementation MCP configuration]*
|
||||
|
||||
#### For Python Implementation:
|
||||
|
||||
- **Server Name**: `perplexity-python`
|
||||
- **Command**: `uvx`
|
||||
- **Arguments**: `mcp-perplexity`
|
||||
- **Environment Variables**:
|
||||
- Key: `PERPLEXITY_API_KEY`
|
||||
- Value: `your-api-key-here`
|
||||
- Key: `PERPLEXITY_MODEL`
|
||||
- Value: `sonar-pro` (optional, this is the default)
|
||||
|
||||
*[Screenshot placeholder: Python implementation MCP configuration]*
|
||||
|
||||
### Step 5: Verify Connection
|
||||
|
||||
Check that your chosen server shows as active in the MCP Servers list.
|
||||
|
||||
*[Screenshot placeholder: Active Perplexity MCP server in the list]*
|
||||
|
||||
### Step 6: Configure Your Model
|
||||
|
||||
Ensure your model supports tool calling. For cloud models, verify tool calling is enabled in your provider settings.
|
||||
|
||||
*[Screenshot placeholder: Model provider settings with tool calling enabled]*
|
||||
|
||||
## Using Perplexity MCP
|
||||
|
||||
Start a new chat with a tool-enabled model. The available Perplexity tools will appear in the tools section.
|
||||
|
||||
*[Screenshot placeholder: Chat interface showing Perplexity MCP tools available]*
|
||||
|
||||
### Example Queries
|
||||
|
||||
**Real-Time News:**
|
||||
```
|
||||
What are the latest developments in AI regulation in the EU this week?
|
||||
```
|
||||
|
||||
**Technical Research:**
|
||||
```
|
||||
Search for recent best practices in React server components and explain the key differences from client components
|
||||
```
|
||||
|
||||
**Ongoing Research (Python version):**
|
||||
```
|
||||
Start a research conversation about quantum computing advancements in 2024. I want to maintain this conversation for follow-up questions.
|
||||
```
|
||||
|
||||
**Code Validation:**
|
||||
```
|
||||
Check if using jQuery in 2024 is considered deprecated for modern web development
|
||||
```
|
||||
|
||||
*[Screenshot placeholder: Example of Perplexity MCP performing a search with results]*
|
||||
|
||||
## Advanced Configuration
|
||||
|
||||
### Custom Models
|
||||
|
||||
For the Go implementation, you can specify different models:
|
||||
|
||||
- **Arguments**: `--model sonar-reasoning-pro --reasoning-model sonar-pro`
|
||||
|
||||
For the Python implementation, use environment variables:
|
||||
|
||||
- **PERPLEXITY_MODEL_ASK**: `sonar-reasoning-pro`
|
||||
- **PERPLEXITY_MODEL_CHAT**: `sonar-pro`
|
||||
|
||||
### Chat History Management (Python Version)
|
||||
|
||||
The Python implementation stores chat history locally. Configure the database path:
|
||||
|
||||
- **DB_PATH**: `/path/to/your/chats.db`
|
||||
|
||||
### Web UI (Python Version)
|
||||
|
||||
Enable a web interface for chat management:
|
||||
|
||||
- **WEB_UI_ENABLED**: `true`
|
||||
- **WEB_UI_PORT**: `8050`
|
||||
- **WEB_UI_HOST**: `127.0.0.1`
|
||||
|
||||
## Use Cases That Matter
|
||||
|
||||
### Research and Fact-Checking
|
||||
Perfect for journalists, researchers, and anyone who needs current, verified information. No more "according to my training data from 2023" responses.
|
||||
|
||||
### Technical Problem-Solving
|
||||
Get current best practices, recent framework updates, and solutions to bleeding-edge problems. Stack Overflow wishes it was this fresh.
|
||||
|
||||
### Market Research
|
||||
Track industry trends, competitor analysis, and market developments in real-time. Your AI finally knows what happened after its training cutoff.
|
||||
|
||||
### Academic Research
|
||||
Access current papers, recent findings, and evolving scientific discussions. Like having a research assistant who never sleeps.
|
||||
|
||||
### Code Modernization
|
||||
Check if your dependencies are deprecated, find modern alternatives, and stay current with evolving best practices.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**Server won't connect?**
|
||||
- Verify your API key is valid and properly formatted
|
||||
- For Go version: Ensure the binary is in your PATH
|
||||
- For Python version: Check that uvx and Python 3.10+ are installed
|
||||
- Restart Jan completely
|
||||
|
||||
**API errors?**
|
||||
- Check your Perplexity API usage limits
|
||||
- Verify your API key hasn't expired
|
||||
- Ensure you have sufficient credits
|
||||
|
||||
**No search results?**
|
||||
- Perplexity's search is context-aware - be specific in your queries
|
||||
- Check that your model actually supports tool calling
|
||||
- Try rephrasing your question
|
||||
|
||||
**Python version issues?**
|
||||
- Ensure uvx is properly installed: `pip install uvx`
|
||||
- Check Python version: `python --version` (should be 3.10+)
|
||||
- Verify dependencies: `uvx --help`
|
||||
|
||||
<Callout type="warning">
|
||||
**Rate Limits**: Perplexity has usage limits based on your plan. Monitor your usage to avoid hitting limits during intensive research sessions.
|
||||
</Callout>
|
||||
|
||||
## What's Next
|
||||
|
||||
With Perplexity MCP running, your AI conversations become grounded in reality. No more hallucinated facts, no more outdated information, no more "I don't have access to real-time data" excuses.
|
||||
|
||||
Whether you're researching the latest AI developments, debugging modern frameworks, or tracking current events, your AI assistant finally has the tools to give you accurate, up-to-date answers.
|
||||
|
||||
Time to retire those bookmark folders. Your AI can find what you need, when you need it.
|
||||
|
||||
*[Screenshot placeholder: Complex research query showing Perplexity MCP tools working together]*
|
||||
@ -1,143 +0,0 @@
|
||||
---
|
||||
title: Installation
|
||||
description: Jan is an open-source ChatGPT-alternative and self-hosted AI platform - build and run AI on your own desktop or server.
|
||||
sidebar_position: 2
|
||||
keywords:
|
||||
[
|
||||
Jan,
|
||||
Customizable Intelligence, LLM,
|
||||
local AI,
|
||||
privacy focus,
|
||||
free and open source,
|
||||
private and offline,
|
||||
conversational AI,
|
||||
no-subscription fee,
|
||||
large language models,
|
||||
quickstart,
|
||||
getting started,
|
||||
using AI model,
|
||||
installation,
|
||||
]
|
||||
---
|
||||
|
||||
import { Callout, Steps } from 'nextra/components'
|
||||
import { Settings } from 'lucide-react'
|
||||
|
||||
|
||||
# Quickstart
|
||||
|
||||
<Steps>
|
||||
|
||||
### Step 1: Install Jan
|
||||
|
||||
1. [Download Jan](/download)
|
||||
2. Install the app on your system ([Mac](/docs/desktop/mac), [Windows](/docs/desktop/windows), [Linux](/docs/desktop/linux))
|
||||
3. Launch Jan
|
||||
|
||||
Once installed, you'll see Jan's interface with no pre-installed models. You can:
|
||||
- Download and run local AI models
|
||||
- Connect to cloud-based AI model providers if desired
|
||||
<br/>
|
||||
|
||||

|
||||
<br/>
|
||||
|
||||
### Step 2: Download a Model
|
||||
|
||||
Jan offers various local AI models, from nimble lightweights to hefty powerhouses:
|
||||
1. Go to the **Hub Tab** or to [HuggingFace](https://huggingface.co/models) where you will be able
|
||||
to find even more models alongside their details. (TIP: You can copy the URL or name of the model on the Hub Tab
|
||||
and download it there.)
|
||||
2. Browse models and tap any for details (Models need to be in GGUF format)
|
||||
3. Select one that matches your needs & hardware specs
|
||||
4. Hit **Download** to begin (a progress bar will appear for the duration of the download)
|
||||
|
||||
<br/>
|
||||

|
||||
<br/>
|
||||
|
||||
<Callout type="warning">
|
||||
Local models consume your computer's memory and processing power. Choose carefully based on your hardware
|
||||
specifications ([Mac](/docs/desktop/mac#minimum-requirements), [Windows](/docs/desktop/windows#compatibility),
|
||||
[Linux](/docs/desktop/linux#compatibility)).
|
||||
</Callout>
|
||||
|
||||
**Note:** Some Hugging Face models require an access token. Enter yours
|
||||
in **Settings > Model Providers > Llama.cpp > Hugging Face Access Token** before importing.
|
||||
|
||||
<br/>
|
||||

|
||||
<br/>
|
||||
|
||||
|
||||
For alternative installation methods, see the [Model Management](/manage-models) section.
|
||||
|
||||
|
||||
|
||||
### Step 3: Turn on GPU Acceleration (Optional)
|
||||
|
||||
While your model downloads, let's supercharge your setup. On **Windows** or **Linux** with
|
||||
a compatible graphics card, you can dramatically boost performance with GPU acceleration.
|
||||
1. Head to **(<Settings width={16} height={16} style={{display:"inline"}}/>) Settings** > **Hardware**
|
||||
2. Under **GPUs**, toggle the setting to ON if not already enabled.
|
||||
|
||||

|
||||
|
||||
<Callout type="info">
|
||||
Install all required dependencies and drivers before enabling GPU acceleration. Check the **GPU Setup Guide** for [Windows](/docs/desktop/windows#gpu-acceleration) & [Linux](/docs/desktop/linux#gpu-acceleration).
|
||||
</Callout>
|
||||
<br/>
|
||||
|
||||
### Step 4: Customize Assistant Instructions
|
||||
|
||||
With your model ready to roll, you can tailor how it responds by tweaking instructions or model configurations
|
||||
through the [Assistants feature](/docs/assistants).
|
||||
|
||||
<br/>
|
||||
|
||||

|
||||
|
||||
<br/>
|
||||
|
||||
You can also go to the assistant tab to manage all of your personalized instructions. The cool thing about
|
||||
these is that you can use them no matter which model you choose.
|
||||
|
||||
<br/>
|
||||
|
||||

|
||||
|
||||
<br/>
|
||||
|
||||
### Step 5: Start Chatting and Update the Settings
|
||||
|
||||
Model downloaded? Instructions set? Time to chat. Type your message in the **input field** at
|
||||
the bottom to kickstart the conversation.
|
||||
|
||||
Fine-tune your experience by:
|
||||
- Tweaking [model parameters](/docs/model-parameters) via the **Gear icon** next to your model or in **Assistant Settings**
|
||||
- Switching models for different tasks through the **model selector** in **Model** tab or **input field**
|
||||
- [Creating new threads](/docs/threads#creating-new-thread) with custom instructions and configurations
|
||||
|
||||
<br/>
|
||||
|
||||

|
||||
|
||||
<br/>
|
||||
|
||||
|
||||
### Step 6: Connect to cloud models (Optional)
|
||||
|
||||
Jan plays nice with both open source and cloud models. Connect to OpenAI (GPT-4o, o1), Anthropic (Claude), Groq, Mistral, and others:
|
||||
1. Open any **Thread**
|
||||
2. Select a model from the **model selector** dropdown
|
||||
3. Pick your provider, click the **Gear icon** beside it
|
||||
4. Grab a valid API key from your chosen provider (make sure it has sufficient credits)
|
||||
5. Paste your **API Key** into Jan
|
||||
|
||||
For detailed setup, check [Remote APIs](/docs/remote-models/openai).
|
||||
|
||||
<br/>
|
||||

|
||||
<br/>
|
||||
|
||||
</Steps>
|
||||
@ -1,86 +1,145 @@
|
||||
---
|
||||
title: Start Chatting
|
||||
description: Manage your interaction with AI models locally.
|
||||
description: Download models and manage your conversations with AI models locally.
|
||||
keywords:
|
||||
[
|
||||
Jan,
|
||||
Customizable Intelligence, LLM,
|
||||
local AI,
|
||||
privacy focus,
|
||||
free and open source,
|
||||
private and offline,
|
||||
conversational AI,
|
||||
no-subscription fee,
|
||||
large language models,
|
||||
LLM,
|
||||
chat,
|
||||
threads,
|
||||
chat history,
|
||||
thread history,
|
||||
models,
|
||||
download,
|
||||
installation,
|
||||
conversations,
|
||||
]
|
||||
---
|
||||
|
||||
import { Callout } from 'nextra/components'
|
||||
import { SquarePen, Pencil, Ellipsis, Paintbrush, Trash2 } from 'lucide-react'
|
||||
import { Callout, Steps } from 'nextra/components'
|
||||
import { SquarePen, Pencil, Ellipsis, Paintbrush, Trash2, Settings } from 'lucide-react'
|
||||
|
||||
# Start Chatting
|
||||
|
||||
# Chat with a Model
|
||||
<Steps>
|
||||
|
||||
Jan organizes your conversations with a model into chats or threads, making it easy to track and revisit
|
||||
your interactions. This guide will help you effectively manage your chat history.
|
||||
### Step 1: Install Jan
|
||||
|
||||
## Creating New Conversation/Thread
|
||||
1. Click **New Chat** (<SquarePen width={16} height={16} style={{display:"inline"}}/>) icon on the
|
||||
bottom left of Jan.
|
||||
2. Select your preferred model in **Model Selector** in input field & start chatting.
|
||||
1. [Download Jan](/download)
|
||||
2. Install the app ([Mac](/docs/desktop/mac), [Windows](/docs/desktop/windows), [Linux](/docs/desktop/linux))
|
||||
3. Launch Jan
|
||||
|
||||
<br/>
|
||||

|
||||
### Step 2: Download a Model
|
||||
|
||||
## View Your Chat History
|
||||
Jan requires a model to chat. Download one from the Hub:
|
||||
|
||||
1. Once you open Jan, the default screen is **Chat**
|
||||
2. On the **left sidebar**, you can:
|
||||
- View your **Conversations** and scroll through your history
|
||||
- Click any chat to open the full conversation
|
||||
1. Go to the **Hub Tab**
|
||||
2. Browse available models (must be GGUF format)
|
||||
3. Select one matching your hardware specs
|
||||
4. Click **Download**
|
||||
|
||||
## Favorites and Recents
|
||||
|
||||
Jan helps you quickly access important and recent conversations with **Favorites** and **Recents**
|
||||
in the left sidebar:
|
||||
- **Favorites**: Pin threads you use often for instant access. Click the three dots icon on the right of the
|
||||
thread and a context menu will pop up with the favorite option for you to click on.
|
||||
- **Recents**: See your most recently accessed threads for quick navigation.
|
||||
|
||||
<br/>
|
||||

|
||||
|
||||
|
||||
## Edit a Chat Title
|
||||
1. Navigate to the **Conversation** that you want to edit title on the sidebar to your left
|
||||
2. Hover on the conversation and click on **three dots** (<Ellipsis width={16} height={16} style={{display:"inline"}}/>) icon
|
||||
3. Click <Pencil width={16} height={16} style={{display:"inline"}}/> **Rename**
|
||||
4. Add new title & save
|
||||
|
||||
<br/>
|
||||

|
||||
|
||||
## Delete Thread
|
||||

|
||||
|
||||
<Callout type="warning">
|
||||
There's no undo for chat deletion, so make sure you REALLY want to remove it permanently.
|
||||
Models consume memory and processing power. Choose based on your hardware specs.
|
||||
</Callout>
|
||||
|
||||
When you want to completely remove a thread:
|
||||
**HuggingFace models:** Some require an access token. Add yours in **Settings > Model Providers > Llama.cpp > Hugging Face Access Token**.
|
||||
|
||||
1. Navigate to the **Thread** that you want to delete in left sidebar
|
||||
2. Hover on the thread and click on **three dots** (<Ellipsis width={16} height={16} style={{display:"inline"}}/>) icon
|
||||

|
||||
|
||||
### Step 3: Enable GPU Acceleration (Optional)
|
||||
|
||||
For Windows/Linux with compatible graphics cards:
|
||||
|
||||
1. Go to **(<Settings width={16} height={16} style={{display:"inline"}}/>) Settings** > **Hardware**
|
||||
2. Toggle **GPUs** to ON
|
||||
|
||||

|
||||
|
||||
<Callout type="info">
|
||||
Install required drivers before enabling GPU acceleration. See setup guides for [Windows](/docs/desktop/windows#gpu-acceleration) & [Linux](/docs/desktop/linux#gpu-acceleration).
|
||||
</Callout>
|
||||
|
||||
### Step 4: Start Chatting
|
||||
|
||||
1. Click **New Chat** (<SquarePen width={16} height={16} style={{display:"inline"}}/>) icon
|
||||
2. Select your model in the input field dropdown
|
||||
3. Type your message and start chatting
|
||||
|
||||

|
||||
|
||||
</Steps>
|
||||
|
||||
## Managing Conversations
|
||||
|
||||
Jan organizes conversations into threads for easy tracking and revisiting.
|
||||
|
||||
### View Chat History
|
||||
|
||||
- **Left sidebar** shows all conversations
|
||||
- Click any chat to open the full conversation
|
||||
- **Favorites**: Pin important threads for quick access
|
||||
- **Recents**: Access recently used threads
|
||||
|
||||

|
||||
|
||||
### Edit Chat Titles
|
||||
|
||||
1. Hover over a conversation in the sidebar
|
||||
2. Click **three dots** (<Ellipsis width={16} height={16} style={{display:"inline"}}/>) icon
|
||||
3. Click <Pencil width={16} height={16} style={{display:"inline"}}/> **Rename**
|
||||
4. Enter new title and save
|
||||
|
||||

|
||||
|
||||
### Delete Threads
|
||||
|
||||
<Callout type="warning">
|
||||
Thread deletion is permanent. No undo available.
|
||||
</Callout>
|
||||
|
||||
**Single thread:**
|
||||
1. Hover over thread in sidebar
|
||||
2. Click **three dots** (<Ellipsis width={16} height={16} style={{display:"inline"}}/>) icon
|
||||
3. Click <Trash2 width={16} height={16} style={{display:"inline"}}/> **Delete**
|
||||
|
||||
**All threads:**
|
||||
1. Hover over `Recents` category
|
||||
2. Click **three dots** (<Ellipsis width={16} height={16} style={{display:"inline"}}/>) icon
|
||||
3. Select <Trash2 width={16} height={16} style={{display:"inline"}}/> **Delete All**
|
||||
|
||||
<br/>
|
||||

|
||||
## Advanced Features
|
||||
|
||||
### Delete all threads at once
|
||||
### Custom Assistant Instructions
|
||||
|
||||
In case you need to remove all conversations at once:
|
||||
1. Hover on the `Recents` category and click on **three dots** (<Ellipsis width={16} height={16} style={{display:"inline"}}/>) icon
|
||||
2. Select <Trash2 width={16} height={16} style={{display:"inline"}}/> **Delete All**
|
||||
Customize how models respond:
|
||||
|
||||
1. Use the assistant dropdown in the input field
|
||||
2. Or go to the **Assistant tab** to create custom instructions
|
||||
3. Instructions work across all models
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
### Model Parameters
|
||||
|
||||
Fine-tune model behavior:
|
||||
- Click the **Gear icon** next to your model
|
||||
- Adjust parameters in **Assistant Settings**
|
||||
- Switch models via the **model selector**
|
||||
|
||||

|
||||
|
||||
### Connect Cloud Models (Optional)
|
||||
|
||||
Connect to OpenAI, Anthropic, Groq, Mistral, and others:
|
||||
|
||||
1. Open any thread
|
||||
2. Select a cloud model from the dropdown
|
||||
3. Click the **Gear icon** beside the provider
|
||||
4. Add your API key (ensure sufficient credits)
|
||||
|
||||

|
||||
|
||||
For detailed setup, see [Remote APIs](/docs/remote-models/openai).
|
||||
|
||||
BIN
docs/src/pages/local-server/_assets/add_assistant.png
Normal file
|
After Width: | Height: | Size: 163 KiB |
BIN
docs/src/pages/local-server/_assets/anthropic.png
Normal file
|
After Width: | Height: | Size: 149 KiB |
BIN
docs/src/pages/local-server/_assets/api-server.png
Normal file
|
After Width: | Height: | Size: 598 KiB |
BIN
docs/src/pages/local-server/_assets/api-server2.png
Normal file
|
After Width: | Height: | Size: 306 KiB |
BIN
docs/src/pages/local-server/_assets/assistant-add-dialog.png
Normal file
|
After Width: | Height: | Size: 85 KiB |
|
After Width: | Height: | Size: 120 KiB |
BIN
docs/src/pages/local-server/_assets/assistant-dropdown.png
Normal file
|
After Width: | Height: | Size: 450 KiB |
BIN
docs/src/pages/local-server/_assets/assistant-edit-dialog.png
Normal file
|
After Width: | Height: | Size: 118 KiB |
BIN
docs/src/pages/local-server/_assets/assistants-ui-overview.png
Normal file
|
After Width: | Height: | Size: 453 KiB |
BIN
docs/src/pages/local-server/_assets/cohere.png
Normal file
|
After Width: | Height: | Size: 524 KiB |
BIN
docs/src/pages/local-server/_assets/deepseek.png
Normal file
|
After Width: | Height: | Size: 147 KiB |
BIN
docs/src/pages/local-server/_assets/extensions-01.png
Normal file
|
After Width: | Height: | Size: 160 KiB |
BIN
docs/src/pages/local-server/_assets/extensions-02.png
Normal file
|
After Width: | Height: | Size: 142 KiB |
BIN
docs/src/pages/local-server/_assets/extensions-03.png
Normal file
|
After Width: | Height: | Size: 140 KiB |
BIN
docs/src/pages/local-server/_assets/extensions-04.png
Normal file
|
After Width: | Height: | Size: 162 KiB |
BIN
docs/src/pages/local-server/_assets/extensions-05.png
Normal file
|
After Width: | Height: | Size: 185 KiB |
BIN
docs/src/pages/local-server/_assets/extensions-06.png
Normal file
|
After Width: | Height: | Size: 185 KiB |
BIN
docs/src/pages/local-server/_assets/extensions-07.png
Normal file
|
After Width: | Height: | Size: 187 KiB |
BIN
docs/src/pages/local-server/_assets/extensions-08.png
Normal file
|
After Width: | Height: | Size: 187 KiB |
BIN
docs/src/pages/local-server/_assets/extensions-09.png
Normal file
|
After Width: | Height: | Size: 192 KiB |
BIN
docs/src/pages/local-server/_assets/extensions-10.png
Normal file
|
After Width: | Height: | Size: 187 KiB |
BIN
docs/src/pages/local-server/_assets/google.png
Normal file
|
After Width: | Height: | Size: 541 KiB |
BIN
docs/src/pages/local-server/_assets/gpu_accl.png
Normal file
|
After Width: | Height: | Size: 257 KiB |
BIN
docs/src/pages/local-server/_assets/groq.png
Normal file
|
After Width: | Height: | Size: 537 KiB |
BIN
docs/src/pages/local-server/_assets/hardware.png
Normal file
|
After Width: | Height: | Size: 576 KiB |
BIN
docs/src/pages/local-server/_assets/hf-unsloth.png
Normal file
|
After Width: | Height: | Size: 1.5 MiB |
BIN
docs/src/pages/local-server/_assets/hf_and_jan.png
Normal file
|
After Width: | Height: | Size: 1.2 MiB |
BIN
docs/src/pages/local-server/_assets/hf_token.png
Normal file
|
After Width: | Height: | Size: 487 KiB |
BIN
docs/src/pages/local-server/_assets/install-engines-01.png
Normal file
|
After Width: | Height: | Size: 170 KiB |
BIN
docs/src/pages/local-server/_assets/install-engines-02.png
Normal file
|
After Width: | Height: | Size: 166 KiB |
BIN
docs/src/pages/local-server/_assets/install-engines-03.png
Normal file
|
After Width: | Height: | Size: 183 KiB |
BIN
docs/src/pages/local-server/_assets/jan-app-new.png
Normal file
|
After Width: | Height: | Size: 343 KiB |
BIN
docs/src/pages/local-server/_assets/jan-app.png
Normal file
|
After Width: | Height: | Size: 1.5 MiB |
BIN
docs/src/pages/local-server/_assets/jan-nano-bench.png
Normal file
|
After Width: | Height: | Size: 205 KiB |
BIN
docs/src/pages/local-server/_assets/jan-nano-demo.gif
Normal file
|
After Width: | Height: | Size: 22 MiB |
BIN
docs/src/pages/local-server/_assets/jan-nano0.png
Normal file
|
After Width: | Height: | Size: 4.4 MiB |
BIN
docs/src/pages/local-server/_assets/jan-nano1.png
Normal file
|
After Width: | Height: | Size: 819 KiB |
BIN
docs/src/pages/local-server/_assets/jan_ui.png
Normal file
|
After Width: | Height: | Size: 29 KiB |
BIN
docs/src/pages/local-server/_assets/llama.cpp-01-updated.png
Normal file
|
After Width: | Height: | Size: 512 KiB |
BIN
docs/src/pages/local-server/_assets/llama.cpp-01.png
Normal file
|
After Width: | Height: | Size: 199 KiB |
BIN
docs/src/pages/local-server/_assets/martian.png
Normal file
|
After Width: | Height: | Size: 144 KiB |
BIN
docs/src/pages/local-server/_assets/mcp-on.png
Normal file
|
After Width: | Height: | Size: 337 KiB |
BIN
docs/src/pages/local-server/_assets/mcp-setup-1.png
Normal file
|
After Width: | Height: | Size: 126 KiB |
BIN
docs/src/pages/local-server/_assets/mcp-setup-10.png
Normal file
|
After Width: | Height: | Size: 992 KiB |
BIN
docs/src/pages/local-server/_assets/mcp-setup-2.png
Normal file
|
After Width: | Height: | Size: 439 KiB |
BIN
docs/src/pages/local-server/_assets/mcp-setup-3.png
Normal file
|
After Width: | Height: | Size: 284 KiB |
BIN
docs/src/pages/local-server/_assets/mcp-setup-4.png
Normal file
|
After Width: | Height: | Size: 514 KiB |
BIN
docs/src/pages/local-server/_assets/mcp-setup-5.png
Normal file
|
After Width: | Height: | Size: 128 KiB |
BIN
docs/src/pages/local-server/_assets/mcp-setup-6.png
Normal file
|
After Width: | Height: | Size: 970 KiB |
BIN
docs/src/pages/local-server/_assets/mcp-setup-7.png
Normal file
|
After Width: | Height: | Size: 364 KiB |
BIN
docs/src/pages/local-server/_assets/mcp-setup-8.png
Normal file
|
After Width: | Height: | Size: 110 KiB |
BIN
docs/src/pages/local-server/_assets/mcp-setup-9.png
Normal file
|
After Width: | Height: | Size: 513 KiB |
BIN
docs/src/pages/local-server/_assets/mistralai.png
Normal file
|
After Width: | Height: | Size: 632 KiB |
|
After Width: | Height: | Size: 963 KiB |
|
After Width: | Height: | Size: 54 KiB |
BIN
docs/src/pages/local-server/_assets/model-import-04.png
Normal file
|
After Width: | Height: | Size: 757 KiB |
BIN
docs/src/pages/local-server/_assets/model-import-05.png
Normal file
|
After Width: | Height: | Size: 137 KiB |
BIN
docs/src/pages/local-server/_assets/model-management-01.png
Normal file
|
After Width: | Height: | Size: 457 KiB |
BIN
docs/src/pages/local-server/_assets/model-management-02.png
Normal file
|
After Width: | Height: | Size: 681 KiB |
BIN
docs/src/pages/local-server/_assets/model-management-03.png
Normal file
|
After Width: | Height: | Size: 230 KiB |
BIN
docs/src/pages/local-server/_assets/model-management-04.png
Normal file
|
After Width: | Height: | Size: 405 KiB |
BIN
docs/src/pages/local-server/_assets/model-management-05.png
Normal file
|
After Width: | Height: | Size: 158 KiB |
BIN
docs/src/pages/local-server/_assets/model-management-06.png
Normal file
|
After Width: | Height: | Size: 745 KiB |
BIN
docs/src/pages/local-server/_assets/model-management-07.png
Normal file
|
After Width: | Height: | Size: 174 KiB |
BIN
docs/src/pages/local-server/_assets/model-management-08.png
Normal file
|
After Width: | Height: | Size: 82 KiB |
BIN
docs/src/pages/local-server/_assets/model-management-09.png
Normal file
|
After Width: | Height: | Size: 166 KiB |
BIN
docs/src/pages/local-server/_assets/model-parameters.png
Normal file
|
After Width: | Height: | Size: 999 KiB |
BIN
docs/src/pages/local-server/_assets/nvidia-nim.png
Normal file
|
After Width: | Height: | Size: 146 KiB |
BIN
docs/src/pages/local-server/_assets/openai.png
Normal file
|
After Width: | Height: | Size: 648 KiB |
BIN
docs/src/pages/local-server/_assets/openrouter.png
Normal file
|
After Width: | Height: | Size: 490 KiB |
BIN
docs/src/pages/local-server/_assets/quick-start-01.png
Normal file
|
After Width: | Height: | Size: 136 KiB |
BIN
docs/src/pages/local-server/_assets/quick-start-02.png
Normal file
|
After Width: | Height: | Size: 155 KiB |
BIN
docs/src/pages/local-server/_assets/quick-start-03.png
Normal file
|
After Width: | Height: | Size: 685 KiB |
BIN
docs/src/pages/local-server/_assets/retrieval-01.png
Normal file
|
After Width: | Height: | Size: 172 KiB |
BIN
docs/src/pages/local-server/_assets/retrieval-02.png
Normal file
|
After Width: | Height: | Size: 148 KiB |
BIN
docs/src/pages/local-server/_assets/serper-mcp.png
Normal file
|
After Width: | Height: | Size: 231 KiB |
BIN
docs/src/pages/local-server/_assets/settings-01.png
Normal file
|
After Width: | Height: | Size: 167 KiB |
BIN
docs/src/pages/local-server/_assets/settings-02.png
Normal file
|
After Width: | Height: | Size: 170 KiB |
BIN
docs/src/pages/local-server/_assets/settings-03.png
Normal file
|
After Width: | Height: | Size: 158 KiB |
BIN
docs/src/pages/local-server/_assets/settings-04.png
Normal file
|
After Width: | Height: | Size: 775 KiB |
BIN
docs/src/pages/local-server/_assets/settings-05.png
Normal file
|
After Width: | Height: | Size: 150 KiB |
BIN
docs/src/pages/local-server/_assets/settings-06.png
Normal file
|
After Width: | Height: | Size: 562 KiB |
BIN
docs/src/pages/local-server/_assets/settings-07.png
Normal file
|
After Width: | Height: | Size: 447 KiB |
BIN
docs/src/pages/local-server/_assets/settings-08.png
Normal file
|
After Width: | Height: | Size: 678 KiB |
BIN
docs/src/pages/local-server/_assets/settings-09.png
Normal file
|
After Width: | Height: | Size: 679 KiB |
BIN
docs/src/pages/local-server/_assets/settings-10.png
Normal file
|
After Width: | Height: | Size: 172 KiB |
BIN
docs/src/pages/local-server/_assets/settings-11.png
Normal file
|
After Width: | Height: | Size: 669 KiB |
BIN
docs/src/pages/local-server/_assets/settings-12.png
Normal file
|
After Width: | Height: | Size: 677 KiB |
BIN
docs/src/pages/local-server/_assets/settings-13.png
Normal file
|
After Width: | Height: | Size: 454 KiB |
BIN
docs/src/pages/local-server/_assets/settings-14.png
Normal file
|
After Width: | Height: | Size: 221 KiB |
BIN
docs/src/pages/local-server/_assets/settings-15.png
Normal file
|
After Width: | Height: | Size: 174 KiB |
BIN
docs/src/pages/local-server/_assets/settings-16.png
Normal file
|
After Width: | Height: | Size: 766 KiB |
BIN
docs/src/pages/local-server/_assets/settings-17.png
Normal file
|
After Width: | Height: | Size: 670 KiB |