tutorials started

This commit is contained in:
Ramon Perez 2025-07-31 13:51:36 +10:00
parent aa117cf917
commit 183c028e46
37 changed files with 1979 additions and 1419 deletions

View File

@ -1,643 +0,0 @@
{
"$ref": "#/definitions/docs",
"definitions": {
"docs": {
"type": "object",
"properties": {
"title": {
"type": "string"
},
"description": {
"type": "string"
},
"editUrl": {
"anyOf": [
{
"type": "string",
"format": "uri"
},
{
"type": "boolean"
}
],
"default": true
},
"head": {
"type": "array",
"items": {
"type": "object",
"properties": {
"tag": {
"type": "string",
"enum": [
"title",
"base",
"link",
"style",
"meta",
"script",
"noscript",
"template"
]
},
"attrs": {
"type": "object",
"additionalProperties": {
"anyOf": [
{
"type": "string"
},
{
"type": "boolean"
},
{
"not": {}
}
]
}
},
"content": {
"type": "string"
}
},
"required": [
"tag"
],
"additionalProperties": false
},
"default": []
},
"tableOfContents": {
"anyOf": [
{
"type": "object",
"properties": {
"minHeadingLevel": {
"type": "integer",
"minimum": 1,
"maximum": 6,
"default": 2
},
"maxHeadingLevel": {
"type": "integer",
"minimum": 1,
"maximum": 6,
"default": 3
}
},
"additionalProperties": false
},
{
"type": "boolean"
}
],
"default": {
"minHeadingLevel": 2,
"maxHeadingLevel": 3
}
},
"template": {
"type": "string",
"enum": [
"doc",
"splash"
],
"default": "doc"
},
"hero": {
"type": "object",
"properties": {
"title": {
"type": "string"
},
"tagline": {
"type": "string"
},
"image": {
"anyOf": [
{
"type": "object",
"properties": {
"alt": {
"type": "string",
"default": ""
},
"file": {
"type": "string"
}
},
"required": [
"file"
],
"additionalProperties": false
},
{
"type": "object",
"properties": {
"alt": {
"type": "string",
"default": ""
},
"dark": {
"type": "string"
},
"light": {
"type": "string"
}
},
"required": [
"dark",
"light"
],
"additionalProperties": false
},
{
"type": "object",
"properties": {
"html": {
"type": "string"
}
},
"required": [
"html"
],
"additionalProperties": false
}
]
},
"actions": {
"type": "array",
"items": {
"type": "object",
"properties": {
"text": {
"type": "string"
},
"link": {
"type": "string"
},
"variant": {
"type": "string",
"enum": [
"primary",
"secondary",
"minimal"
],
"default": "primary"
},
"icon": {
"anyOf": [
{
"type": "string",
"enum": [
"up-caret",
"down-caret",
"right-caret",
"left-caret",
"up-arrow",
"down-arrow",
"right-arrow",
"left-arrow",
"bars",
"translate",
"pencil",
"pen",
"document",
"add-document",
"setting",
"external",
"download",
"cloud-download",
"moon",
"sun",
"laptop",
"open-book",
"information",
"magnifier",
"forward-slash",
"close",
"error",
"warning",
"approve-check-circle",
"approve-check",
"rocket",
"star",
"puzzle",
"list-format",
"random",
"comment",
"comment-alt",
"heart",
"github",
"gitlab",
"bitbucket",
"codePen",
"farcaster",
"discord",
"gitter",
"twitter",
"x.com",
"mastodon",
"codeberg",
"youtube",
"threads",
"linkedin",
"twitch",
"azureDevOps",
"microsoftTeams",
"instagram",
"stackOverflow",
"telegram",
"rss",
"facebook",
"email",
"phone",
"reddit",
"patreon",
"signal",
"slack",
"matrix",
"hackerOne",
"openCollective",
"blueSky",
"discourse",
"zulip",
"pinterest",
"tiktok",
"astro",
"alpine",
"pnpm",
"biome",
"bun",
"mdx",
"apple",
"linux",
"homebrew",
"nix",
"starlight",
"pkl",
"node",
"cloudflare",
"vercel",
"netlify",
"deno",
"jsr",
"nostr",
"backstage",
"confluence",
"jira",
"storybook",
"vscode",
"jetbrains",
"zed",
"vim",
"figma",
"sketch",
"npm",
"sourcehut",
"substack",
"seti:folder",
"seti:bsl",
"seti:mdo",
"seti:salesforce",
"seti:asm",
"seti:bicep",
"seti:bazel",
"seti:c",
"seti:c-sharp",
"seti:html",
"seti:cpp",
"seti:clojure",
"seti:coldfusion",
"seti:config",
"seti:crystal",
"seti:crystal_embedded",
"seti:json",
"seti:css",
"seti:csv",
"seti:xls",
"seti:cu",
"seti:cake",
"seti:cake_php",
"seti:d",
"seti:word",
"seti:elixir",
"seti:elixir_script",
"seti:hex",
"seti:elm",
"seti:favicon",
"seti:f-sharp",
"seti:git",
"seti:go",
"seti:godot",
"seti:gradle",
"seti:grails",
"seti:graphql",
"seti:hacklang",
"seti:haml",
"seti:mustache",
"seti:haskell",
"seti:haxe",
"seti:jade",
"seti:java",
"seti:javascript",
"seti:jinja",
"seti:julia",
"seti:karma",
"seti:kotlin",
"seti:dart",
"seti:liquid",
"seti:livescript",
"seti:lua",
"seti:markdown",
"seti:argdown",
"seti:info",
"seti:clock",
"seti:maven",
"seti:nim",
"seti:github",
"seti:notebook",
"seti:nunjucks",
"seti:npm",
"seti:ocaml",
"seti:odata",
"seti:perl",
"seti:php",
"seti:pipeline",
"seti:pddl",
"seti:plan",
"seti:happenings",
"seti:powershell",
"seti:prisma",
"seti:pug",
"seti:puppet",
"seti:purescript",
"seti:python",
"seti:react",
"seti:rescript",
"seti:R",
"seti:ruby",
"seti:rust",
"seti:sass",
"seti:spring",
"seti:slim",
"seti:smarty",
"seti:sbt",
"seti:scala",
"seti:ethereum",
"seti:stylus",
"seti:svelte",
"seti:swift",
"seti:db",
"seti:terraform",
"seti:tex",
"seti:default",
"seti:twig",
"seti:typescript",
"seti:tsconfig",
"seti:vala",
"seti:vite",
"seti:vue",
"seti:wasm",
"seti:wat",
"seti:xml",
"seti:yml",
"seti:prolog",
"seti:zig",
"seti:zip",
"seti:wgt",
"seti:illustrator",
"seti:photoshop",
"seti:pdf",
"seti:font",
"seti:image",
"seti:svg",
"seti:sublime",
"seti:code-search",
"seti:shell",
"seti:video",
"seti:audio",
"seti:windows",
"seti:jenkins",
"seti:babel",
"seti:bower",
"seti:docker",
"seti:code-climate",
"seti:eslint",
"seti:firebase",
"seti:firefox",
"seti:gitlab",
"seti:grunt",
"seti:gulp",
"seti:ionic",
"seti:platformio",
"seti:rollup",
"seti:stylelint",
"seti:yarn",
"seti:webpack",
"seti:lock",
"seti:license",
"seti:makefile",
"seti:heroku",
"seti:todo",
"seti:ignored"
]
},
{
"type": "string",
"pattern": "^\\<svg"
}
]
},
"attrs": {
"type": "object",
"additionalProperties": {
"type": [
"string",
"number",
"boolean"
]
}
}
},
"required": [
"text",
"link"
],
"additionalProperties": false
},
"default": []
}
},
"additionalProperties": false
},
"lastUpdated": {
"anyOf": [
{
"anyOf": [
{
"type": "string",
"format": "date-time"
},
{
"type": "string",
"format": "date"
},
{
"type": "integer",
"format": "unix-time"
}
]
},
{
"type": "boolean"
}
]
},
"prev": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "string"
},
{
"type": "object",
"properties": {
"link": {
"type": "string"
},
"label": {
"type": "string"
}
},
"additionalProperties": false
}
]
},
"next": {
"anyOf": [
{
"type": "boolean"
},
{
"type": "string"
},
{
"type": "object",
"properties": {
"link": {
"type": "string"
},
"label": {
"type": "string"
}
},
"additionalProperties": false
}
]
},
"sidebar": {
"type": "object",
"properties": {
"order": {
"type": "number"
},
"label": {
"type": "string"
},
"hidden": {
"type": "boolean",
"default": false
},
"badge": {
"anyOf": [
{
"type": "string"
},
{
"type": "object",
"properties": {
"variant": {
"type": "string",
"enum": [
"note",
"danger",
"success",
"caution",
"tip",
"default"
],
"default": "default"
},
"class": {
"type": "string"
},
"text": {
"type": "string"
}
},
"required": [
"text"
],
"additionalProperties": false
}
]
},
"attrs": {
"type": "object",
"additionalProperties": {
"anyOf": [
{
"type": "string"
},
{
"type": "number"
},
{
"type": "boolean"
},
{
"not": {}
}
]
},
"default": {}
}
},
"additionalProperties": false,
"default": {}
},
"banner": {
"type": "object",
"properties": {
"content": {
"type": "string"
}
},
"required": [
"content"
],
"additionalProperties": false
},
"pagefind": {
"type": "boolean",
"default": true
},
"draft": {
"type": "boolean",
"default": false
},
"$schema": {
"type": "string"
}
},
"required": [
"title"
],
"additionalProperties": false
}
},
"$schema": "http://json-schema.org/draft-07/schema#"
}

View File

@ -1 +0,0 @@
export default new Map();

View File

@ -1 +0,0 @@
export default new Map();

View File

@ -1,164 +0,0 @@
declare module 'astro:content' {
export interface RenderResult {
Content: import('astro/runtime/server/index.js').AstroComponentFactory;
headings: import('astro').MarkdownHeading[];
remarkPluginFrontmatter: Record<string, any>;
}
interface Render {
'.md': Promise<RenderResult>;
}
export interface RenderedContent {
html: string;
metadata?: {
imagePaths: Array<string>;
[key: string]: unknown;
};
}
}
declare module 'astro:content' {
type Flatten<T> = T extends { [K: string]: infer U } ? U : never;
export type CollectionKey = keyof AnyEntryMap;
export type CollectionEntry<C extends CollectionKey> = Flatten<AnyEntryMap[C]>;
export type ContentCollectionKey = keyof ContentEntryMap;
export type DataCollectionKey = keyof DataEntryMap;
type AllValuesOf<T> = T extends any ? T[keyof T] : never;
type ValidContentEntrySlug<C extends keyof ContentEntryMap> = AllValuesOf<
ContentEntryMap[C]
>['slug'];
export type ReferenceDataEntry<
C extends CollectionKey,
E extends keyof DataEntryMap[C] = string,
> = {
collection: C;
id: E;
};
export type ReferenceContentEntry<
C extends keyof ContentEntryMap,
E extends ValidContentEntrySlug<C> | (string & {}) = string,
> = {
collection: C;
slug: E;
};
/** @deprecated Use `getEntry` instead. */
export function getEntryBySlug<
C extends keyof ContentEntryMap,
E extends ValidContentEntrySlug<C> | (string & {}),
>(
collection: C,
// Note that this has to accept a regular string too, for SSR
entrySlug: E,
): E extends ValidContentEntrySlug<C>
? Promise<CollectionEntry<C>>
: Promise<CollectionEntry<C> | undefined>;
/** @deprecated Use `getEntry` instead. */
export function getDataEntryById<C extends keyof DataEntryMap, E extends keyof DataEntryMap[C]>(
collection: C,
entryId: E,
): Promise<CollectionEntry<C>>;
export function getCollection<C extends keyof AnyEntryMap, E extends CollectionEntry<C>>(
collection: C,
filter?: (entry: CollectionEntry<C>) => entry is E,
): Promise<E[]>;
export function getCollection<C extends keyof AnyEntryMap>(
collection: C,
filter?: (entry: CollectionEntry<C>) => unknown,
): Promise<CollectionEntry<C>[]>;
export function getEntry<
C extends keyof ContentEntryMap,
E extends ValidContentEntrySlug<C> | (string & {}),
>(
entry: ReferenceContentEntry<C, E>,
): E extends ValidContentEntrySlug<C>
? Promise<CollectionEntry<C>>
: Promise<CollectionEntry<C> | undefined>;
export function getEntry<
C extends keyof DataEntryMap,
E extends keyof DataEntryMap[C] | (string & {}),
>(
entry: ReferenceDataEntry<C, E>,
): E extends keyof DataEntryMap[C]
? Promise<DataEntryMap[C][E]>
: Promise<CollectionEntry<C> | undefined>;
export function getEntry<
C extends keyof ContentEntryMap,
E extends ValidContentEntrySlug<C> | (string & {}),
>(
collection: C,
slug: E,
): E extends ValidContentEntrySlug<C>
? Promise<CollectionEntry<C>>
: Promise<CollectionEntry<C> | undefined>;
export function getEntry<
C extends keyof DataEntryMap,
E extends keyof DataEntryMap[C] | (string & {}),
>(
collection: C,
id: E,
): E extends keyof DataEntryMap[C]
? string extends keyof DataEntryMap[C]
? Promise<DataEntryMap[C][E]> | undefined
: Promise<DataEntryMap[C][E]>
: Promise<CollectionEntry<C> | undefined>;
/** Resolve an array of entry references from the same collection */
export function getEntries<C extends keyof ContentEntryMap>(
entries: ReferenceContentEntry<C, ValidContentEntrySlug<C>>[],
): Promise<CollectionEntry<C>[]>;
export function getEntries<C extends keyof DataEntryMap>(
entries: ReferenceDataEntry<C, keyof DataEntryMap[C]>[],
): Promise<CollectionEntry<C>[]>;
export function render<C extends keyof AnyEntryMap>(
entry: AnyEntryMap[C][string],
): Promise<RenderResult>;
export function reference<C extends keyof AnyEntryMap>(
collection: C,
): import('astro/zod').ZodEffects<
import('astro/zod').ZodString,
C extends keyof ContentEntryMap
? ReferenceContentEntry<C, ValidContentEntrySlug<C>>
: ReferenceDataEntry<C, keyof DataEntryMap[C]>
>;
// Allow generic `string` to avoid excessive type errors in the config
// if `dev` is not running to update as you edit.
// Invalid collection names will be caught at build time.
export function reference<C extends string>(
collection: C,
): import('astro/zod').ZodEffects<import('astro/zod').ZodString, never>;
type ReturnTypeOrOriginal<T> = T extends (...args: any[]) => infer R ? R : T;
type InferEntrySchema<C extends keyof AnyEntryMap> = import('astro/zod').infer<
ReturnTypeOrOriginal<Required<ContentConfig['collections'][C]>['schema']>
>;
type ContentEntryMap = {
};
type DataEntryMap = {
"docs": Record<string, {
id: string;
body?: string;
collection: "docs";
data: any;
rendered?: RenderedContent;
filePath?: string;
}>;
};
type AnyEntryMap = ContentEntryMap & DataEntryMap;
export type ContentConfig = typeof import("../src/content.config.mjs");
}

View File

@ -1 +0,0 @@
[["Map",1,2],"meta::meta",["Map",3,4,5,6],"astro-version","5.9.3","astro-config-digest","{\"root\":{},\"srcDir\":{},\"publicDir\":{},\"outDir\":{},\"cacheDir\":{},\"compressHTML\":true,\"base\":\"/\",\"trailingSlash\":\"ignore\",\"output\":\"static\",\"scopedStyleStrategy\":\"attribute\",\"build\":{\"format\":\"directory\",\"client\":{},\"server\":{},\"assets\":\"_astro\",\"serverEntry\":\"entry.mjs\",\"redirects\":true,\"inlineStylesheets\":\"auto\",\"concurrency\":1},\"server\":{\"open\":false,\"host\":false,\"port\":4321,\"streaming\":true,\"allowedHosts\":[]},\"redirects\":{},\"image\":{\"endpoint\":{\"route\":\"/_image\"},\"service\":{\"entrypoint\":\"astro/assets/services/sharp\",\"config\":{}},\"domains\":[],\"remotePatterns\":[],\"experimentalDefaultStyles\":true},\"devToolbar\":{\"enabled\":true},\"markdown\":{\"syntaxHighlight\":{\"type\":\"shiki\",\"excludeLangs\":[\"math\"]},\"shikiConfig\":{\"langs\":[],\"langAlias\":{},\"theme\":\"github-dark\",\"themes\":{},\"wrap\":false,\"transformers\":[]},\"remarkPlugins\":[],\"rehypePlugins\":[],\"remarkRehype\":{},\"gfm\":true,\"smartypants\":true},\"security\":{\"checkOrigin\":true},\"env\":{\"schema\":{},\"validateSecrets\":false},\"experimental\":{\"clientPrerender\":false,\"contentIntellisense\":false,\"responsiveImages\":false,\"headingIdCompat\":false,\"preserveScriptOrder\":false,\"csp\":false},\"legacy\":{\"collections\":false}}"]

View File

@ -1,5 +0,0 @@
{
"_variables": {
"lastUpdateCheck": 1750832446593
}
}

View File

@ -1,2 +0,0 @@
/// <reference types="astro/client" />
/// <reference path="content.d.ts" />

View File

@ -11,10 +11,6 @@
"type": "page",
"title": "Documentation"
},
"products": {
"type": "page",
"title": "Products"
},
"cortex": {
"type": "page",
"title": "Cortex",

View File

@ -0,0 +1,248 @@
---
title: Browserbase MCP
description: Automate browsers with AI-powered natural language commands through Browserbase.
keywords:
[
Jan,
MCP,
Model Context Protocol,
Browserbase,
browser automation,
web scraping,
Stagehand,
headless browser,
tool calling,
]
---
import { Callout, Steps } from 'nextra/components'
# Browserbase MCP
[Browserbase MCP](https://docs.browserbase.com/integrations/mcp/introduction) brings browser automation to Jan through natural language commands. Instead of writing complex selectors and automation scripts, you tell the AI what to do and it figures out how to do it.
Built on Stagehand, this integration lets AI models control browsers, navigate websites, fill forms, and extract data using plain English instructions.
## Available Tools
### Core Browser Automation
- `browserbase_stagehand_navigate`: Navigate to any URL
- `browserbase_stagehand_act`: Perform actions using natural language ("click the login button")
- `browserbase_stagehand_extract`: Extract text content from pages
- `browserbase_stagehand_observe`: Find and identify page elements
- `browserbase_screenshot`: Capture page screenshots
### Session Management
- `browserbase_session_create`: Create or reuse browser sessions
- `browserbase_session_close`: Close active sessions
- Multi-session variants for parallel automation workflows
## Prerequisites
- Jan with MCP enabled
- Browserbase API key and Project ID from [dashboard.browserbase.com](https://dashboard.browserbase.com)
- Model with tool calling support
- Node.js installed
<Callout type="info">
Browserbase MCP works best with models that have strong tool calling capabilities. Claude 3.5+ Sonnet, GPT-4o, and similar models perform reliably.
</Callout>
## Setup
### Enable MCP
1. Go to **Settings** > **MCP Servers**
2. Toggle **Allow All MCP Tool Permission** ON
![MCP settings page with toggle enabled](../../_assets/mcp-on.png)
### Get Browserbase Credentials
1. Visit [dashboard.browserbase.com](https://dashboard.browserbase.com)
2. Create account or sign in
3. Copy your **API Key** and **Project ID**
[PLACEHOLDER: Screenshot of Browserbase dashboard showing API key and project ID locations]
### Configure MCP Server
Click `+` in MCP Servers section and choose your installation method:
#### Option 1: Remote URL (Recommended)
For the simplest setup with hosted infrastructure:
**Configuration:**
- **Server Name**: `browserbase`
- **URL**: Get from [smithery.ai](https://smithery.ai) with your Browserbase credentials
[PLACEHOLDER: Screenshot of smithery.ai configuration page]
#### Option 2: NPM Package
For local installation:
**Configuration:**
- **Server Name**: `browserbase`
- **Command**: `npx`
- **Arguments**: `@browserbasehq/mcp-server-browserbase`
- **Environment Variables**:
- Key: `BROWSERBASE_API_KEY`, Value: `your-api-key`
- Key: `BROWSERBASE_PROJECT_ID`, Value: `your-project-id`
- Key: `GEMINI_API_KEY`, Value: `your-gemini-key` (required for Stagehand)
[PLACEHOLDER: Screenshot of Jan MCP server configuration form with Browserbase settings]
### Verify Setup
Check server status shows as active in the MCP Servers list.
[PLACEHOLDER: Screenshot showing active Browserbase MCP server in Jan]
### Model Configuration
Use a compatible model with tool calling enabled:
- **Anthropic Claude 3.5+ Sonnet**
- **OpenAI GPT-4o**
- **Google Gemini Pro**
[PLACEHOLDER: Screenshot showing model selection with tools enabled]
## Usage
Start a new chat with your tool-enabled model. Browserbase tools will appear in the available tools list.
[PLACEHOLDER: Screenshot showing Browserbase tools in the tools panel]
### Basic Navigation
```
Navigate to github.com and take a screenshot
```
The AI will:
1. Create a browser session
2. Navigate to the specified URL
3. Capture a screenshot
4. Return the image
### Form Interaction
```
Go to example.com/contact, fill out the contact form with name "John Doe" and email "john@example.com", then submit it
```
The automation will:
- Navigate to the contact page
- Locate form fields using AI vision
- Fill in the specified information
- Submit the form
### Data Extraction
```
Visit news.ycombinator.com and extract the titles of the top 10 stories
```
This will:
- Navigate to Hacker News
- Identify story elements
- Extract and structure the title data
- Return as organized text
### Multi-Step Workflows
```
Go to GitHub, search for "javascript frameworks", click on the first repository, and tell me about its README content
```
Complex workflows work seamlessly:
- Performs the search
- Navigates to results
- Extracts repository information
- Summarizes findings
## Advanced Features
### Multi-Session Management
For parallel browser automation:
```
Create two browser sessions: one for monitoring product prices on site A, another for checking inventory on site B
```
Each session maintains independent state, cookies, and context.
### Custom Configuration
The MCP server supports various configuration options:
- **Proxies**: Enable IP rotation and geo-location testing
- **Advanced Stealth**: Bypass detection systems (Scale Plan required)
- **Custom Viewports**: Set specific browser dimensions
- **Cookie Injection**: Pre-populate authentication state
### AI Model Selection
Browserbase MCP defaults to Gemini 2.0 Flash but supports multiple AI models:
- **Gemini 2.0 Flash** (default, fastest)
- **GPT-4o** (high accuracy)
- **Claude 3.5 Sonnet** (excellent reasoning)
## Use Cases
### E-commerce Monitoring
Track product prices, availability, and competitor information across multiple sites simultaneously.
### Lead Generation
Extract contact information and business data from directories, social platforms, and company websites.
### Content Research
Gather articles, posts, and media from various sources for analysis and reporting.
### Quality Assurance
Automated testing of web applications, form submissions, and user workflows.
### Market Intelligence
Monitor competitor activities, pricing changes, and product launches.
### Data Migration
Extract structured data from legacy systems or poorly documented APIs.
## Troubleshooting
**Connection Issues:**
- Verify API key and Project ID accuracy
- Check Node.js installation (if using NPM method)
- Restart Jan application
- Confirm Browserbase account has sufficient credits
**Tool Calling Problems:**
- Ensure model has tool calling enabled
- Try Claude 3.5+ Sonnet or GPT-4o for best results
- Check MCP server shows as active
**Automation Failures:**
- Use specific, descriptive instructions
- Break complex tasks into smaller steps
- Check browser console for JavaScript errors
- Verify target website doesn't block automation
**Performance Issues:**
- Use appropriate viewport sizes for your use case
- Enable proxies only when needed
- Choose efficient AI models (Gemini Flash for speed)
- Close unused browser sessions
<Callout type="warning">
Browserbase has usage limits based on your plan. Monitor session usage to avoid interruptions.
</Callout>
## Next Steps
Browserbase MCP transforms browser automation from a programming task into a conversation. Instead of learning complex automation frameworks, you describe what you want and the AI handles the implementation details.
The combination of Jan's privacy-focused local processing with Browserbase's cloud browser infrastructure provides the best of both worlds: secure local AI reasoning with scalable remote automation capabilities.

View File

@ -0,0 +1,269 @@
---
title: Browser Use MCP
description: Control browsers with natural language through open source Browser Use automation.
keywords:
[
Jan,
MCP,
Model Context Protocol,
Browser Use,
browser automation,
web scraping,
open source,
headless browser,
tool calling,
]
---
import { Callout, Steps } from 'nextra/components'
# Browser Use MCP
[Browser Use MCP](https://docs.browser-use.com/customize/mcp-server) exposes open source browser automation as tools for AI models. Unlike complex automation frameworks that require programming knowledge, Browser Use lets you control browsers through natural language commands.
This MCP server acts as a bridge between Jan and Browser Use's automation capabilities, enabling AI models to navigate websites, interact with elements, and extract content without writing code.
## Available Tools
### Navigation
- `browser_navigate`: Navigate to URLs or open new tabs
- `browser_go_back`: Navigate back in browser history
### Interaction
- `browser_click`: Click elements by index
- `browser_type`: Type text into input fields
- `browser_scroll`: Scroll pages up or down
### State & Content
- `browser_get_state`: Get current page state with interactive elements
- `browser_extract_content`: AI-powered content extraction from pages
### Tab Management
- `browser_list_tabs`: List all open browser tabs
- `browser_switch_tab`: Switch between tabs
- `browser_close_tab`: Close specific tabs
## Prerequisites
- Jan with MCP enabled
- Python 3.8+ installed
- Model with tool calling support
- Optional: OpenAI API key for content extraction
<Callout type="info">
Browser Use works with any model that supports tool calling. Claude 3.5+ Sonnet and GPT-4o provide the most reliable results.
</Callout>
## Setup
### Enable MCP
1. Go to **Settings** > **MCP Servers**
2. Toggle **Allow All MCP Tool Permission** ON
![MCP settings page with toggle enabled](../../_assets/mcp-on.png)
### Install Browser Use
Browser Use requires Python installation. Install via pip or uv:
```bash
pip install "browser-use[cli]"
# or
uv pip install "browser-use[cli]"
```
### Configure MCP Server
Click `+` in MCP Servers section:
**Configuration:**
- **Server Name**: `browser-use`
- **Command**: `uvx`
- **Arguments**: `browser-use[cli] --mcp`
- **Environment Variables**:
- Key: `OPENAI_API_KEY`, Value: `your-openai-key` (optional, for content extraction)
[PLACEHOLDER: Screenshot of Jan MCP server configuration form with Browser Use settings]
### Install Browser Dependencies
Browser Use needs Playwright browsers installed:
```bash
playwright install chromium
```
### Verify Setup
Check server status shows as active in the MCP Servers list.
[PLACEHOLDER: Screenshot showing active Browser Use MCP server in Jan]
### Model Configuration
Use a tool-enabled model:
- **Anthropic Claude 3.5+ Sonnet**
- **OpenAI GPT-4o**
- **Google Gemini Pro**
[PLACEHOLDER: Screenshot showing model selection with tools enabled]
## Usage
Start a new chat with your tool-enabled model. Browser Use tools will appear in the available tools list.
[PLACEHOLDER: Screenshot showing Browser Use tools in the tools panel]
### Basic Navigation
```
Navigate to reddit.com and tell me what you see
```
The AI will:
1. Launch a browser session
2. Navigate to Reddit
3. Capture the page state
4. Describe the content
### Element Interaction
```
Go to google.com, search for "browser automation tools", and click the first result
```
This performs:
- Navigation to Google
- Finding the search input
- Typing the search query
- Clicking the search button
- Clicking the first result
### Content Extraction
```
Visit news.ycombinator.com and extract the top 5 story titles with their URLs
```
With OpenAI API key configured, this will:
- Navigate to Hacker News
- Use AI to identify story elements
- Extract structured data
- Return organized results
### Multi-Tab Workflows
```
Open YouTube and GitHub in separate tabs, then tell me what's trending on both platforms
```
Browser Use handles:
- Opening multiple tabs
- Switching between tabs
- Extracting content from each
- Comparing information
## Advanced Features
### State Inspection
The `browser_get_state` tool provides detailed page information:
- Current URL and title
- All interactive elements with indices
- Tab information
- Optional screenshots
### Smart Element Selection
Browser Use uses AI-powered element detection. Instead of complex CSS selectors, elements are identified by:
- Index numbers from page state
- Natural language descriptions
- Visual context understanding
### Persistent Sessions
Browser sessions remain active between commands, enabling:
- Multi-step workflows
- Authentication persistence
- Complex navigation sequences
## Use Cases
### Web Research
Automate information gathering from multiple sources, compare data across sites, and compile research reports.
### E-commerce Monitoring
Track product availability, price changes, and competitor analysis across shopping platforms.
### Social Media Management
Monitor mentions, extract engagement metrics, and analyze content performance across platforms.
### Quality Assurance Testing
Automated testing of web applications, form submissions, and user journey validation.
### Content Aggregation
Collect articles, posts, and media from various sources for analysis and curation.
### Lead Generation
Extract contact information and business intelligence from directories and professional networks.
## Configuration Options
### Downloads Directory
Files download to `~/Downloads/browser-use-mcp/` by default.
### Action Timing
Default wait time between actions is 0.5 seconds, preventing rate limiting and ensuring page loads.
### Session Persistence
Browser sessions stay alive between commands, maintaining login states and navigation history.
## Troubleshooting
**Installation Issues:**
- Verify Python 3.8+ is installed
- Install Browser Use with `pip install "browser-use[cli]"`
- Run `playwright install chromium` for browser dependencies
**Server Connection Problems:**
- Check MCP server shows as active in Jan
- Restart Jan after configuration changes
- Verify command path: `uvx browser-use[cli] --mcp`
**Browser Launch Failures:**
- Ensure Playwright browsers are installed
- Check system permissions for browser launching
- Try running `playwright install --help` for troubleshooting
**Tool Calling Issues:**
- Confirm model has tool calling enabled
- Use Claude 3.5+ Sonnet or GPT-4o for best results
- Check that Browser Use tools appear in the tools panel
**Content Extraction Not Working:**
- Add OpenAI API key to environment variables
- Verify API key has sufficient credits
- Check API key permissions for text processing
<Callout type="warning">
Browser Use launches actual browser instances. Monitor system resources with multiple concurrent sessions.
</Callout>
## Browser Use vs Browserbase
| Feature | Browser Use | Browserbase |
|---------|-------------|-------------|
| **Infrastructure** | Local browser instances | Cloud-hosted browsers |
| **Cost** | Free (open source) | Usage-based pricing |
| **Setup** | Python installation required | API key only |
| **Performance** | Local system dependent | Optimized cloud infrastructure |
| **Privacy** | Full local control | Data processed in cloud |
| **Scalability** | Limited by local resources | Highly scalable |
## Next Steps
Browser Use MCP brings powerful browser automation to Jan without requiring cloud dependencies or usage fees. The open source approach gives you complete control over the automation environment while maintaining Jan's privacy-focused philosophy.
For scenarios requiring high-scale automation or specialized browser features, consider Browserbase MCP as a complementary cloud-based solution.

View File

@ -0,0 +1,347 @@
---
title: Jupyter MCP
description: Real-time Jupyter notebook interaction and code execution through MCP integration.
keywords:
[
Jan,
MCP,
Model Context Protocol,
Jupyter,
data analysis,
code execution,
notebooks,
Python,
visualization,
tool calling,
]
---
import { Callout, Steps } from 'nextra/components'
# Jupyter MCP
[Jupyter MCP Server](https://github.com/datalayer/jupyter-mcp-server) enables real-time interaction with Jupyter notebooks, allowing AI models to edit, execute, and document code for data analysis and visualization. Instead of just generating code suggestions, AI can actually run Python code and see the results.
This integration transforms Jan from a code-suggesting assistant into a fully capable data science partner that can execute analysis, create visualizations, and iterate based on actual results.
## Available Tools
### Core Notebook Operations
- `insert_execute_code_cell`: Add and run code cells with immediate execution
- `append_markdown_cell`: Add documentation and explanations
- `get_notebook_info`: Retrieve notebook structure and metadata
- `read_cell`: Examine existing cell content and outputs
- `modify_cell`: Edit existing cells and re-execute
### Advanced Features
- **Real-time synchronization**: See changes as they happen
- **Smart execution**: Automatic retry and adjustment when cells fail
- **Output feedback**: AI learns from execution results to improve code
- **Multi-cell workflows**: Complex analysis across multiple cells
## Prerequisites
- Jan with MCP enabled
- Python 3.8+ with uv package manager
- Docker installed
- Model with tool calling support
- Basic understanding of Jupyter notebooks
<Callout type="info">
This setup requires running JupyterLab locally. The MCP server connects to your local Jupyter instance for real-time interaction.
</Callout>
## Setup
### Enable MCP
1. Go to **Settings** > **MCP Servers**
2. Toggle **Allow All MCP Tool Permission** ON
![MCP settings page with toggle enabled](../../_assets/mcp-on.png)
### Install uv Package Manager
If you don't have uv installed:
```bash
# macOS and Linux
curl -LsSf https://astral.sh/uv/install.sh | sh
# Windows
powershell -c "irm https://astral.sh/uv/install.ps1 | iex"
```
### Create Python Environment
Create an isolated environment for Jupyter:
```bash
# Create and activate environment
uv venv jupyter-mcp
source jupyter-mcp/bin/activate # Linux/macOS
# or
jupyter-mcp\Scripts\activate # Windows
# Install required packages
uv pip install jupyterlab==4.4.1 jupyter-collaboration==4.0.2 ipykernel
# Handle dependency conflicts
uv pip uninstall pycrdt datalayer_pycrdt
uv pip install datalayer_pycrdt==0.12.17
```
### Start JupyterLab Server
Launch JupyterLab with the required configuration:
```bash
jupyter lab --port 8888 --IdentityProvider.token MY_TOKEN --ip 0.0.0.0
```
This starts JupyterLab accessible at `http://localhost:8888` with token `MY_TOKEN`.
[PLACEHOLDER: Screenshot of JupyterLab running with token authentication]
### Configure MCP Server
Click `+` in MCP Servers section and choose your OS configuration:
#### macOS and Windows Configuration
**Configuration:**
- **Server Name**: `jupyter`
- **Command**: `docker`
- **Arguments**:
```
run -i --rm -e ROOM_URL -e ROOM_TOKEN -e ROOM_ID -e RUNTIME_URL -e RUNTIME_TOKEN datalayer/jupyter-mcp-server:latest
```
- **Environment Variables**:
- Key: `ROOM_URL`, Value: `http://host.docker.internal:8888`
- Key: `ROOM_TOKEN`, Value: `MY_TOKEN`
- Key: `ROOM_ID`, Value: `notebook.ipynb`
- Key: `RUNTIME_URL`, Value: `http://host.docker.internal:8888`
- Key: `RUNTIME_TOKEN`, Value: `MY_TOKEN`
#### Linux Configuration
**Configuration:**
- **Server Name**: `jupyter`
- **Command**: `docker`
- **Arguments**:
```
run -i --rm -e ROOM_URL -e ROOM_TOKEN -e ROOM_ID -e RUNTIME_URL -e RUNTIME_TOKEN --network=host datalayer/jupyter-mcp-server:latest
```
- **Environment Variables**:
- Key: `ROOM_URL`, Value: `http://localhost:8888`
- Key: `ROOM_TOKEN`, Value: `MY_TOKEN`
- Key: `ROOM_ID`, Value: `notebook.ipynb`
- Key: `RUNTIME_URL`, Value: `http://localhost:8888`
- Key: `RUNTIME_TOKEN`, Value: `MY_TOKEN`
[PLACEHOLDER: Screenshot of Jan MCP server configuration with Jupyter settings]
### Create Target Notebook
In JupyterLab, create a new notebook named `notebook.ipynb` (matching your `ROOM_ID`).
[PLACEHOLDER: Screenshot of creating new notebook in JupyterLab]
### Verify Setup
Check server status shows as active in the MCP Servers list.
[PLACEHOLDER: Screenshot showing active Jupyter MCP server in Jan]
### Model Configuration
Use a tool-enabled model:
- **Anthropic Claude 3.5+ Sonnet**
- **OpenAI GPT-4o**
- **Google Gemini Pro**
[PLACEHOLDER: Screenshot showing model selection with tools enabled]
## Usage
Start a new chat with your tool-enabled model. Jupyter tools will appear in the available tools list.
[PLACEHOLDER: Screenshot showing Jupyter tools in the tools panel]
### Data Analysis Workflow
```
Load the iris dataset and create a scatter plot showing the relationship between sepal length and petal length, colored by species.
```
The AI will:
1. Insert a code cell to load the dataset
2. Execute the code and verify it works
3. Create visualization code
4. Run the plotting code
5. Display results and iterate if needed
### Exploratory Data Analysis
```
I have sales data in a CSV file. Load it, show me the first few rows, then create a summary of sales by month with a trend chart.
```
This produces:
- Data loading and validation
- Initial data exploration
- Monthly aggregation analysis
- Trend visualization
- Summary insights
### Machine Learning Pipeline
```
Build a simple classification model on this dataset. Split the data, train a model, evaluate performance, and show feature importance.
```
The workflow includes:
- Data preprocessing
- Train/test split
- Model training
- Performance evaluation
- Feature importance visualization
- Results interpretation
### Real-Time Iteration
```
The plot looks good but the colors are hard to distinguish. Make them more vibrant and add a legend.
```
The AI will:
- Identify the relevant plotting cell
- Modify color scheme
- Add legend configuration
- Re-execute the cell
- Show updated visualization
## Advanced Features
### Multi-Cell Workflows
The MCP server manages complex analyses across multiple cells:
- Imports and setup in initial cells
- Data processing in subsequent cells
- Visualization and analysis in final cells
- Automatic dependency tracking
### Error Handling and Recovery
When code execution fails:
- AI receives error messages
- Automatic troubleshooting and fixes
- Re-execution with corrections
- Learning from failure patterns
### Real-Time Collaboration
Changes made directly in JupyterLab are immediately visible to the AI:
- Bidirectional synchronization
- Conflict resolution
- Version tracking
- Collaborative editing
## Configuration Details
### Environment Variables Explained
- **ROOM_URL**: JupyterLab server URL for notebook access
- **ROOM_TOKEN**: Authentication token for JupyterLab
- **ROOM_ID**: Path to target notebook (relative to JupyterLab root)
- **RUNTIME_URL**: Jupyter kernel server URL for code execution
- **RUNTIME_TOKEN**: Authentication token for kernel access
### Network Configuration
- **macOS/Windows**: Uses `host.docker.internal` for Docker-to-host communication
- **Linux**: Uses `--network=host` for direct network access
- **Port matching**: Ensure MCP configuration matches JupyterLab port
## Use Cases
### Data Science Research
Interactive analysis, hypothesis testing, and visualization creation with real-time code execution and iteration.
### Educational Tutorials
Create step-by-step analysis tutorials with executed examples and explanations for learning data science concepts.
### Business Analytics
Generate reports, dashboards, and insights from business data with automated analysis and visualization.
### Prototype Development
Rapid prototyping of data processing pipelines, machine learning models, and analytical workflows.
### Code Documentation
Automatically document analysis processes with markdown cells explaining methodology and results.
### Collaborative Analysis
Work with AI to explore datasets, test hypotheses, and develop analytical solutions interactively.
## Troubleshooting
**JupyterLab Connection Issues:**
- Verify JupyterLab is running on the specified port
- Check token authentication is working
- Confirm notebook file exists at specified path
- Test JupyterLab access in browser
**Docker Container Problems:**
- Ensure Docker is running and accessible
- Check network configuration for your OS
- Verify environment variables are set correctly
- Test Docker container can reach JupyterLab
**Python Environment Issues:**
- Activate the correct uv environment
- Install missing packages with `uv pip install`
- Resolve dependency conflicts
- Check Python and package versions
**Code Execution Failures:**
- Verify kernel is running in JupyterLab
- Check for missing Python packages
- Examine error messages in notebook
- Restart Jupyter kernel if needed
**MCP Server Connection:**
- Check server shows as active in Jan
- Verify all environment variables are set
- Restart Jan after configuration changes
- Test Docker container manually
<Callout type="warning">
Jupyter MCP requires both JupyterLab server and Docker to be running. Monitor system resources with active notebook sessions.
</Callout>
## Security Considerations
**Code Execution:**
- AI has full Python execution capabilities
- Review generated code before execution
- Use isolated Python environments
- Monitor system resource usage
**Network Access:**
- JupyterLab server is network accessible
- Use strong authentication tokens
- Consider firewall restrictions
- Monitor access logs
**Data Privacy:**
- Notebook content is processed by AI models
- Keep sensitive data in secure environments
- Review data handling policies
- Use local-only configurations when needed
## Next Steps
Jupyter MCP transforms Jan into a fully capable data science partner that can execute real Python code, create visualizations, and iterate on analysis based on actual results. This moves beyond code suggestions to genuine collaborative data analysis.
The real-time interaction enables a natural workflow where you describe what you want to analyze, and the AI builds the analysis step-by-step, adjusting based on intermediate results and your feedback.

View File

@ -0,0 +1,263 @@
---
title: Octagon Deep Research MCP
description: Comprehensive AI-powered research capabilities for technical teams and complex analysis.
keywords:
[
Jan,
MCP,
Model Context Protocol,
Octagon,
deep research,
technical research,
debugging,
framework comparison,
API research,
tool calling,
]
---
import { Callout, Steps } from 'nextra/components'
# Octagon Deep Research MCP
[Octagon Deep Research MCP](https://docs.octagonagents.com/guide/deep-research-mcp.html) provides enterprise-grade research capabilities designed for technical teams. Instead of spending hours researching frameworks, debugging complex issues, or evaluating tools, you get comprehensive analysis that goes straight from research to working applications.
Claims to be faster than ChatGPT Deep Research, more thorough than Perplexity, and without the rate limits that throttle your workflow.
## Available Research Capabilities
### Technical Research
- **Complex Debugging**: Root cause analysis across distributed systems, memory leak detection
- **Framework Comparison**: Performance benchmarking, scalability analysis, ecosystem evaluation
- **Package Discovery**: Dependency compatibility, security assessment, bundle size impact
- **API Research**: Documentation analysis, rate limiting comparison, integration complexity
- **Performance Optimization**: Code profiling, database tuning, caching strategies
### Business Intelligence
- **Market Research**: Competitive analysis, industry trends, customer behavior
- **Tool Discovery**: Development workflow optimization, CI/CD evaluation
- **Design Analysis**: UI/UX patterns, accessibility compliance, conversion optimization
## Prerequisites
- Jan with MCP enabled
- Octagon account and API key from [octagonagents.com](https://octagonagents.com)
- Model with tool calling support
- Node.js installed
<Callout type="info">
Octagon offers unlimited research runs without rate limits, unlike ChatGPT Pro's 125-task monthly restriction.
</Callout>
## Setup
### Enable MCP
1. Go to **Settings** > **MCP Servers**
2. Toggle **Allow All MCP Tool Permission** ON
![MCP settings page with toggle enabled](../../_assets/mcp-on.png)
### Get Octagon API Key
1. Sign up at [octagonagents.com](https://octagonagents.com)
2. Navigate to **API Keys** from the left menu
3. Generate a new API key
4. Save the key
[PLACEHOLDER: Screenshot of Octagon dashboard showing API key generation]
### Configure MCP Server
Click `+` in MCP Servers section and choose your method:
#### Option 1: Remote Server (Recommended)
**Configuration:**
- **Server Name**: `octagon-deep-research`
- **URL**: `https://mcp.octagonagents.com/deep-research/mcp`
- **Environment Variables**: Leave empty (OAuth authentication)
#### Option 2: NPM Package
**Configuration:**
- **Server Name**: `octagon-deep-research`
- **Command**: `npx`
- **Arguments**: `-y octagon-deep-research-mcp@latest`
- **Environment Variables**:
- Key: `OCTAGON_API_KEY`, Value: `your-api-key`
[PLACEHOLDER: Screenshot of Jan MCP server configuration with Octagon settings]
### Verify Setup
Check server status shows as active in the MCP Servers list.
[PLACEHOLDER: Screenshot showing active Octagon MCP server in Jan]
### Model Configuration
Use a tool-enabled model:
- **Anthropic Claude 3.5+ Sonnet**
- **OpenAI GPT-4o**
- **Google Gemini Pro**
[PLACEHOLDER: Screenshot showing model selection with tools enabled]
## Usage
Start a new chat with your tool-enabled model. Octagon research tools will appear in the available tools list.
[PLACEHOLDER: Screenshot showing Octagon tools in the tools panel]
### Technical Debugging
```
Research the latest techniques for spotting memory leaks in large React + Node.js projects. Then build a command-line analyzer that scans a codebase and suggests fixes.
```
The research will:
1. Analyze current memory leak detection methods
2. Compare available tools and libraries
3. Identify best practices and patterns
4. Generate working code implementation
5. Provide optimization recommendations
### Framework Evaluation
```
Compare Next.js, Remix, and Astro for high-traffic e-commerce. Benchmark build speed, runtime performance, and SEO. Build a functional storefront with the winner.
```
This produces:
- Detailed performance comparisons
- Real-world benchmarking data
- SEO and accessibility analysis
- Working storefront implementation
- Deployment recommendations
### API Analysis
```
Evaluate leading video-streaming APIs on cost, latency, uptime, and documentation quality. Create a working demo with the top choice.
```
Results include:
- Comprehensive API comparison matrix
- Cost-benefit analysis
- Integration complexity assessment
- Working implementation demo
- Production deployment guide
### Package Discovery
```
Compare Python libraries for real-time chat systems. Build a runnable chat server with the best stack, including Docker setup.
```
Delivers:
- Library performance benchmarks
- Feature comparison analysis
- Security and maintenance evaluation
- Complete working implementation
- Production-ready configuration
## Advanced Research Capabilities
### Multi-Source Analysis
Octagon pulls from extensive technical sources:
- Official documentation and changelogs
- GitHub repositories and issue trackers
- Stack Overflow and developer forums
- Performance benchmarking sites
- Security vulnerability databases
### Cross-Verification
Research findings are cross-checked across multiple sources for accuracy and currency.
### Implementation Focus
Unlike generic research tools, Octagon prioritizes actionable results that lead directly to working code and implementations.
## Use Cases
### Development Teams
Accelerate technical decision-making with comprehensive framework comparisons, tool evaluations, and architecture research.
### DevOps Engineering
Research deployment strategies, monitoring solutions, and infrastructure optimization techniques with working examples.
### Product Development
Analyze market opportunities, competitive landscapes, and user experience patterns to inform product strategy.
### Technical Architecture
Evaluate technology stacks, performance optimization strategies, and scalability solutions for complex systems.
### Startup Research
Rapid market analysis, competitive intelligence, and technical feasibility assessments for new ventures.
### Enterprise Migration
Research migration strategies, compatibility assessments, and risk analysis for large-scale technology transitions.
## Performance Claims
**Speed**: 8-10x faster than ChatGPT Deep Research
**Depth**: 3x more source coverage than competitors
**Limits**: No rate restrictions (vs ChatGPT Pro's 125 monthly tasks)
**Accuracy**: Cross-verified technical details and metrics
## Troubleshooting
**Authentication Issues:**
- Verify Octagon account is active
- Check API key format and permissions
- Re-authenticate OAuth connection
- Confirm account has sufficient credits
**Server Connection Problems:**
- Check MCP server shows as active in Jan
- Verify internet connectivity to Octagon services
- Restart Jan after configuration changes
- Test API key validity
**Research Quality Issues:**
- Be specific in research requests
- Break complex queries into focused topics
- Specify desired output format (code, analysis, comparison)
- Include context about your use case
**Performance Problems:**
- Monitor Octagon account usage limits
- Check network latency to research servers
- Try breaking large requests into smaller chunks
- Verify model has sufficient context window
**Tool Calling Issues:**
- Ensure model supports tool calling
- Check that Octagon tools appear in tools panel
- Try Claude 3.5+ Sonnet for best results
- Verify MCP permissions are enabled
<Callout type="warning">
Octagon Deep Research requires an active internet connection and may have usage limits based on your account type.
</Callout>
## Octagon vs Competitors
| Feature | Octagon | ChatGPT Deep Research | Perplexity | Grok DeepSearch |
|---------|---------|----------------------|------------|-----------------|
| **Speed** | 8-10x faster | Baseline | Moderate | Moderate |
| **Rate Limits** | None | 125/month | Limited | Limited |
| **Source Coverage** | 3x more | Standard | Standard | Standard |
| **Technical Focus** | Specialized | General | General | General |
| **Code Generation** | Integrated | Separate | Limited | Limited |
| **Verification** | Cross-checked | Basic | Basic | Basic |
## Next Steps
Octagon Deep Research MCP transforms research from a time-consuming manual process into an automated capability that delivers actionable results. Instead of spending hours evaluating options, you get comprehensive analysis with working implementations.
The unlimited research capability removes the bottleneck of monthly limits, enabling development teams to make informed decisions without workflow interruption.

View File

@ -0,0 +1,273 @@
---
title: Canva MCP
description: Create and edit designs through natural language commands with Canva's official MCP server.
keywords:
[
Jan,
MCP,
Model Context Protocol,
Canva,
design automation,
graphic design,
presentations,
templates,
tool calling,
]
---
import { Callout, Steps } from 'nextra/components'
# Canva MCP
[Canva MCP](https://www.canva.com/newsroom/news/deep-research-integration-mcp-server/) brings professional design capabilities directly into Jan through natural language commands. As the first design platform to offer native MCP integration, Canva lets AI models create presentations, resize graphics, and edit templates without leaving the chat.
Instead of switching between apps to create designs, you describe what you need and the AI handles the design work using your Canva account and assets.
## Available Tools
The Canva MCP server provides comprehensive design automation:
- **Template Generation**: Create presentations, social media posts, and documents
- **Asset Management**: Import, export, and organize design assets
- **Design Editing**: Resize graphics, modify layouts, and update content
- **Content Integration**: Use existing Canva assets and brand elements
- **Format Conversion**: Export designs in multiple formats (PDF, PNG, etc.)
## Prerequisites
- Jan with MCP enabled
- Canva account (free or paid)
- Model with tool calling support
- Node.js installed
- Internet connection for Canva API access
<Callout type="info">
Canva MCP works with both free and paid Canva accounts. Paid accounts have access to premium templates and features.
</Callout>
## Setup
### Enable MCP
1. Go to **Settings** > **MCP Servers**
2. Toggle **Allow All MCP Tool Permission** ON
![MCP settings page with toggle enabled](../../_assets/mcp-on.png)
### Configure Canva MCP Server
Click `+` in MCP Servers section:
**Configuration:**
- **Server Name**: `Canva`
- **Command**: `npx`
- **Arguments**: `-y mcp-remote@latest https://mcp.canva.com/mcp`
- **Environment Variables**: Leave empty (authentication handled via OAuth)
[PLACEHOLDER: Screenshot of Jan MCP server configuration form with Canva settings]
### Verify Setup
Check server status shows as active in the MCP Servers list.
[PLACEHOLDER: Screenshot showing active Canva MCP server in Jan]
### Authentication
The first time you use Canva tools, you'll be prompted to authenticate with your Canva account through OAuth. This grants secure access to your designs and templates.
[PLACEHOLDER: Screenshot of Canva OAuth authentication flow]
### Model Configuration
Use a tool-enabled model:
- **Anthropic Claude 3.5+ Sonnet**
- **OpenAI GPT-4o**
- **Google Gemini Pro**
[PLACEHOLDER: Screenshot showing model selection with tools enabled]
## Usage
Start a new chat with your tool-enabled model. Canva tools will appear in the available tools list.
[PLACEHOLDER: Screenshot showing Canva tools in the tools panel]
### Create Presentations
```
Create a 5-slide pitch deck about sustainable energy solutions. Use a professional template with blue and green colors.
```
The AI will:
1. Access Canva's template library
2. Select appropriate presentation template
3. Generate content for each slide
4. Apply consistent styling
5. Return the completed presentation
### Design Social Media Content
```
Create an Instagram post announcing our new product launch. Make it eye-catching with our brand colors.
```
This generates:
- Platform-optimized dimensions
- Brand-consistent styling
- Engaging visual elements
- Ready-to-publish format
### Resize and Adapt Designs
```
Take my existing LinkedIn post design and resize it for Twitter, Facebook, and Instagram Stories.
```
Canva MCP will:
- Access your existing design
- Create versions for each platform
- Maintain visual consistency
- Optimize for platform specifications
### Generate Marketing Materials
```
Create a flyer for our upcoming workshop on January 15th. Include registration details and make it professional but approachable.
```
Results in:
- Professional layout and typography
- Clear information hierarchy
- Contact and registration details
- Print-ready format
## Advanced Features
### Brand Integration
Canva MCP automatically applies your brand guidelines:
- Consistent color schemes
- Brand fonts and styling
- Logo placement
- Template preferences
### Asset Library Access
Access your entire Canva library:
- Previously created designs
- Uploaded images and graphics
- Brand kit elements
- Purchased premium content
### Collaborative Workflows
Designs created through MCP integrate with Canva's collaboration features:
- Share with team members
- Collect feedback and comments
- Track version history
- Manage permissions
### Export Options
Multiple format support:
- **PDF**: Print-ready documents
- **PNG/JPG**: Web and social media
- **MP4**: Video presentations
- **GIF**: Animated graphics
## Use Cases
### Marketing Teams
Create campaign materials, social media content, and promotional designs with consistent branding across all channels.
### Sales Presentations
Generate pitch decks, proposal documents, and client presentations that reflect current data and messaging.
### Educational Content
Design lesson materials, infographics, and student handouts that engage and inform effectively.
### Event Planning
Create invitations, programs, signage, and promotional materials for events and conferences.
### Small Business Marketing
Develop professional marketing materials without requiring design expertise or expensive software.
### Content Creation
Generate visual content for blogs, newsletters, and digital marketing campaigns.
## Design Best Practices
### Effective Prompts
- **Be specific**: "Create a minimalist LinkedIn banner with our logo and tagline"
- **Include dimensions**: "Design a square Instagram post"
- **Specify style**: "Use our brand colors and modern typography"
- **Mention purpose**: "Create a presentation slide explaining our pricing model"
### Brand Consistency
- Upload brand assets to Canva before using MCP
- Specify brand elements in prompts
- Use consistent messaging and visual style
- Review designs for brand compliance
### Template Selection
- Choose templates appropriate for your audience
- Consider platform requirements and limitations
- Balance creativity with readability
- Test designs across different devices
## Troubleshooting
**Authentication Issues:**
- Clear browser cache and cookies
- Re-authenticate with Canva account
- Check Canva account permissions
- Verify internet connection stability
**Server Connection Problems:**
- Confirm MCP server shows as active
- Check Node.js installation and version
- Restart Jan after configuration changes
- Test network connectivity to Canva services
**Design Creation Failures:**
- Be more specific in design requests
- Check Canva account limits and quotas
- Verify template availability for your account type
- Try simpler design requests first
**Export Issues:**
- Check available export formats for your account
- Verify file size limitations
- Ensure sufficient storage space
- Try exporting individual elements separately
**Tool Calling Problems:**
- Ensure model supports tool calling
- Check that Canva tools appear in tools panel
- Try Claude 3.5+ Sonnet or GPT-4o
- Verify MCP permissions are enabled
<Callout type="warning">
Canva MCP requires an active internet connection and may have usage limits based on your Canva account type.
</Callout>
## Privacy and Security
**Data Handling:**
- Designs remain in your Canva account
- No content is shared without explicit permission
- OAuth provides secure, revocable access
- All API communications are encrypted
**Account Security:**
- Use strong Canva account passwords
- Enable two-factor authentication
- Review connected applications periodically
- Monitor account activity for unusual access
## Next Steps
Canva MCP transforms design from a separate workflow step into part of your natural conversation with AI. Instead of describing what you want and then creating it separately, you can generate professional designs directly within your AI interactions.
This integration positions design as a thinking tool rather than just an output format, enabling more creative and efficient workflows across marketing, sales, education, and content creation.

View File

@ -0,0 +1,288 @@
---
title: Jina MCP
description: Access Jina AI's web search and content extraction APIs through community MCP servers.
keywords:
[
Jan,
MCP,
Model Context Protocol,
Jina AI,
web search,
content extraction,
web scraping,
fact checking,
tool calling,
]
---
import { Callout, Steps } from 'nextra/components'
# Jina MCP
Jina AI provides powerful web search and content extraction APIs, but doesn't offer an official MCP server. The community has built two MCP implementations that bring Jina's capabilities to Jan.
These community servers enable AI models to search the web, extract content from web pages, and perform fact-checking using Jina's infrastructure.
<Callout type="warning">
These are community-maintained packages, not official Jina AI releases. Exercise standard caution when installing third-party packages and verify their source code before use.
</Callout>
## Available Servers
### PsychArch/jina-mcp-tools
Basic implementation with core functionality:
- `jina_reader`: Extract content from web pages
- `jina_search`: Search the web
### JoeBuildsStuff/mcp-jina-ai
More comprehensive implementation with additional features:
- `read_webpage`: Advanced content extraction with multiple formats
- `search_web`: Web search with configurable options
- `fact_check`: Fact-checking and grounding capabilities
## Prerequisites
- Jan with MCP enabled
- Jina AI API key from [jina.ai](https://jina.ai/?sui=apikey) (optional for basic features)
- Model with tool calling support
- Node.js installed
<Callout type="info">
Some features work without an API key, but registration provides enhanced capabilities and higher rate limits.
</Callout>
## Setup
### Enable MCP
1. Go to **Settings** > **MCP Servers**
2. Toggle **Allow All MCP Tool Permission** ON
![MCP settings page with toggle enabled](../../_assets/mcp-on.png)
### Get Jina API Key
1. Visit [jina.ai](https://jina.ai/?sui=apikey)
2. Create account or sign in
3. Generate API key
4. Save the key
[PLACEHOLDER: Screenshot of Jina AI dashboard showing API key generation]
### Choose Your Server
Select one of the community implementations:
#### Option 1: Basic Server (PsychArch)
**Configuration:**
- **Server Name**: `jina-basic`
- **Command**: `npx`
- **Arguments**: `jina-mcp-tools`
- **Environment Variables**:
- Key: `JINA_API_KEY`, Value: `your-api-key` (optional)
#### Option 2: Advanced Server (JoeBuildsStuff)
**Configuration:**
- **Server Name**: `jina-advanced`
- **Command**: `npx`
- **Arguments**: `-y jina-ai-mcp-server`
- **Environment Variables**:
- Key: `JINA_API_KEY`, Value: `your-api-key`
[PLACEHOLDER: Screenshot of Jan MCP server configuration with Jina settings]
### Verify Setup
Check server status shows as active in the MCP Servers list.
[PLACEHOLDER: Screenshot showing active Jina MCP server in Jan]
### Model Configuration
Use a tool-enabled model:
- **Anthropic Claude 3.5+ Sonnet**
- **OpenAI GPT-4o**
- **Google Gemini Pro**
[PLACEHOLDER: Screenshot showing model selection with tools enabled]
## Usage
Start a new chat with your tool-enabled model. Jina tools will appear in the available tools list.
[PLACEHOLDER: Screenshot showing Jina tools in the tools panel]
### Web Content Extraction
**Basic Server:**
```
Extract the main content from https://example.com/article
```
**Advanced Server:**
```
Read the webpage at https://news.example.com and format it as clean markdown
```
The advanced server offers multiple extraction modes:
- **Standard**: Balanced speed and quality
- **Comprehensive**: Maximum data extraction
- **Clean Content**: Remove ads and navigation
### Web Search
**Basic Server:**
```
Search for "latest developments in quantum computing" and return 5 results
```
**Advanced Server:**
```
Search for machine learning tutorials, limit to 10 results from github.com
```
The advanced server supports:
- Configurable result counts
- Site-specific filtering
- Multiple output formats
### Fact Checking (Advanced Only)
```
Fact-check this statement: "The Great Wall of China is visible from space"
```
Returns:
- Factuality scores
- Supporting evidence
- Reference sources
- Contradictory information
### GitHub Integration
Both servers handle GitHub URLs intelligently:
```
Extract the code from https://github.com/owner/repo/blob/main/script.js
```
GitHub file URLs are automatically converted to raw content for direct access.
## Advanced Features
### Content Formats (Advanced Server)
- **Default**: Jina's native markdown format
- **Markdown**: Structured with headers and links
- **Text**: Plain text only
- **HTML**: Raw HTML content
- **Screenshot**: Visual page capture
### Search Options
- **Result Count**: Configure number of search results
- **Site Filtering**: Limit searches to specific domains
- **Image Retention**: Include or exclude image content
- **Alt Text Generation**: AI-generated image descriptions
### Fact-Checking Modes
- **Standard**: Quick factuality assessment
- **Deep Dive**: Comprehensive analysis with multiple sources
- **Evidence Scoring**: Quantified support/contradiction metrics
## Use Cases
### Research and Analysis
Extract content from academic papers, news articles, and research websites for comprehensive analysis.
### Content Verification
Fact-check claims and statements using multiple web sources and credibility scoring.
### Code Documentation
Extract and analyze code from GitHub repositories and technical documentation sites.
### Market Research
Search for industry information, competitor analysis, and market trends across specific domains.
### News Monitoring
Track breaking news, extract article content, and verify information across multiple sources.
### Academic Writing
Gather sources, extract citations, and verify facts for research papers and articles.
## Security Considerations
**Third-Party Packages:**
- These are community-maintained, not official Jina AI packages
- Review source code on GitHub before installation
- Monitor package updates and community feedback
- Consider package reputation and maintenance activity
**API Key Security:**
- Store API keys securely in environment variables
- Never commit API keys to version control
- Monitor API usage for unusual activity
- Rotate keys periodically
**Content Filtering:**
- Be aware that extracted content reflects source material
- Implement additional filtering for sensitive applications
- Consider content validation for critical use cases
## Troubleshooting
**Installation Issues:**
- Verify Node.js installation and version
- Check npm/npx permissions and configuration
- Try clearing npm cache: `npm cache clean --force`
**Server Connection Problems:**
- Confirm MCP server shows as active
- Check API key format and validity
- Restart Jan after configuration changes
- Verify network connectivity to Jina APIs
**Content Extraction Failures:**
- Some websites block automated access
- Try different extraction modes (advanced server)
- Check if target site requires authentication
- Verify URL accessibility in browser
**Rate Limiting:**
- Jina AI has rate limits for free accounts
- Consider upgrading API plan for higher limits
- Implement delays between requests
- Monitor usage through Jina dashboard
**Tool Calling Issues:**
- Ensure model supports tool calling
- Check that Jina tools appear in tools panel
- Try Claude 3.5+ Sonnet or GPT-4o for best results
- Verify MCP permissions are enabled
<Callout type="info">
For technical issues with specific implementations, check the GitHub repositories for documentation and open issues.
</Callout>
## Server Comparison
| Feature | Basic Server | Advanced Server |
|---------|--------------|-----------------|
| **Web Search** | ✓ | ✓ |
| **Content Extraction** | ✓ | ✓ |
| **Fact Checking** | ✗ | ✓ |
| **Multiple Formats** | Limited | Full |
| **Site Filtering** | ✓ | ✓ |
| **Image Handling** | Basic | Advanced |
| **GitHub Integration** | ✓ | ✓ |
| **API Key Required** | Optional | Required |
## Next Steps
Jina MCP servers provide powerful web search and content extraction capabilities within Jan's privacy-focused environment. The community implementations offer different feature sets depending on your needs.
For production use, consider contributing to these community projects or implementing additional security measures around third-party package usage.

View File

@ -0,0 +1,291 @@
---
title: Perplexity MCP
description: Real-time web search and research capabilities through Perplexity's official MCP server.
keywords:
[
Jan,
MCP,
Model Context Protocol,
Perplexity,
web search,
real-time search,
research,
Sonar API,
tool calling,
]
---
import { Callout, Steps } from 'nextra/components'
# Perplexity MCP
[Perplexity MCP](https://docs.perplexity.ai/guides/mcp-server) brings real-time web search directly into Jan through Perplexity's Sonar API. This official implementation lets AI models perform live web searches and return current, relevant information without the knowledge cutoff limitations of base models.
Unlike static training data, this integration provides access to current web information, making it useful for research, fact-checking, and staying current with recent developments.
## Available Tools
### perplexity_ask
- **Real-time web search**: Query current web information
- **Conversational interface**: Multi-turn research conversations
- **Source attribution**: Results include reference links
- **Current information**: No knowledge cutoff restrictions
## Prerequisites
- Jan with MCP enabled
- Perplexity API key from [perplexity.ai](https://perplexity.ai)
- Model with tool calling support
- Node.js installed (for NPX)
<Callout type="info">
This is Perplexity's official MCP implementation, providing direct access to their Sonar API infrastructure.
</Callout>
## Setup
### Enable MCP
1. Go to **Settings** > **MCP Servers**
2. Toggle **Allow All MCP Tool Permission** ON
![MCP settings page with toggle enabled](../../_assets/mcp-on.png)
### Get Perplexity API Key
1. Visit [perplexity.ai](https://perplexity.ai)
2. Sign up for a Sonar API account
3. Navigate to the developer dashboard
4. Generate your API key
5. Save the key
[PLACEHOLDER: Screenshot of Perplexity dashboard showing API key generation]
### Configure MCP Server
Click `+` in MCP Servers section:
**Configuration:**
- **Server Name**: `perplexity-ask`
- **Command**: `npx`
- **Arguments**: `-y server-perplexity-ask`
- **Environment Variables**:
- Key: `PERPLEXITY_API_KEY`, Value: `your-api-key`
[PLACEHOLDER: Screenshot of Jan MCP server configuration form with Perplexity settings]
<Callout type="info">
Using NPX means Jan handles package installation automatically. No need to clone repositories or manage dependencies manually.
</Callout>
### Verify Setup
Check server status shows as active in the MCP Servers list.
[PLACEHOLDER: Screenshot showing active Perplexity MCP server in Jan]
### Model Configuration
Use a tool-enabled model:
- **Anthropic Claude 3.5+ Sonnet**
- **OpenAI GPT-4o**
- **Google Gemini Pro**
[PLACEHOLDER: Screenshot showing model selection with tools enabled]
## Usage
Start a new chat with your tool-enabled model. Perplexity tools will appear in the available tools list.
[PLACEHOLDER: Screenshot showing Perplexity tools in the tools panel]
### Basic Web Search
```
What are the latest developments in quantum computing this week?
```
The AI will:
1. Query Perplexity's search engine
2. Return current, relevant information
3. Include source links and references
4. Provide up-to-date context
### Research Queries
```
Compare the current market leaders in electric vehicle charging infrastructure and their recent partnerships.
```
This produces:
- Current market analysis
- Recent partnership announcements
- Competitive landscape overview
- Source attribution for claims
### Real-Time Information
```
What's happening with the latest SpaceX launch? Any delays or updates?
```
Results include:
- Current launch status
- Recent updates or changes
- Official announcements
- News coverage links
### Multi-Turn Research
```
Search for information about the new EU AI Act. Then tell me how it specifically affects small startups.
```
Enables:
- Initial broad research
- Follow-up targeted queries
- Contextual understanding
- Comprehensive analysis
## Advanced Features
### Conversational Research
The `perplexity_ask` tool accepts conversation messages, enabling:
- Multi-turn research sessions
- Context-aware follow-up queries
- Refined search based on previous results
- Deeper exploration of topics
### Source Attribution
All results include:
- Reference links to original sources
- Publication dates when available
- Authority indicators
- Fact-checking context
### Current Information Access
Unlike base model training data:
- No knowledge cutoff limitations
- Real-time web information
- Recent news and developments
- Current market data
## Use Cases
### Journalism and Research
Access current news, verify facts, and gather sources for articles and reports.
### Market Intelligence
Track industry developments, competitor announcements, and market trends in real-time.
### Academic Research
Find recent publications, current statistics, and up-to-date information for studies.
### Business Analysis
Monitor competitors, industry changes, and regulatory developments affecting your business.
### Technical Research
Stay current with framework updates, security patches, and technology announcements.
### Investment Research
Access current financial news, earnings reports, and market analysis for investment decisions.
## Search Optimization
### Effective Queries
- **Be specific**: "Latest Tesla earnings Q4 2024" vs "Tesla news"
- **Include timeframes**: "AI regulation changes this month"
- **Specify sources**: "Recent academic papers on climate change"
- **Ask follow-ups**: Build on previous searches for deeper insights
### Research Strategies
- Start broad, then narrow focus based on initial results
- Use follow-up questions to explore specific aspects
- Cross-reference multiple sources for verification
- Request specific types of sources (academic, news, official)
## Troubleshooting
**API Key Issues:**
- Verify API key format and validity
- Check Perplexity account status and credits
- Confirm API key permissions in dashboard
- Test API key with direct API calls
**Server Connection Problems:**
- Ensure NPX can access npm registry
- Check Node.js installation and version
- Verify internet connectivity
- Restart Jan after configuration changes
**Search Quality Issues:**
- Refine query specificity and context
- Try different search approaches
- Check if topic has recent information available
- Verify sources are current and authoritative
**Tool Calling Problems:**
- Confirm model supports tool calling
- Check that Perplexity tools appear in tools panel
- Try Claude 3.5+ Sonnet for best results
- Verify MCP permissions are enabled
**Rate Limiting:**
- Monitor API usage in Perplexity dashboard
- Check account limits and quotas
- Consider upgrading account plan if needed
- Implement delays between searches if necessary
<Callout type="warning">
Perplexity API has usage limits based on your account plan. Monitor consumption to avoid service interruptions.
</Callout>
## Advanced Configuration
### Custom Search Parameters
The default implementation uses standard search parameters. For custom configurations:
- Search model selection
- Response length preferences
- Source filtering options
- Citation format preferences
Modifications require editing the server configuration directly.
### Docker Alternative
For containerized environments, Perplexity also provides Docker configuration:
```json
{
"mcpServers": {
"perplexity-ask": {
"command": "docker",
"args": ["run", "-i", "--rm", "-e", "PERPLEXITY_API_KEY", "mcp/perplexity-ask"],
"env": {
"PERPLEXITY_API_KEY": "YOUR_API_KEY_HERE"
}
}
}
}
```
## Perplexity vs Other Search MCPs
| Feature | Perplexity | Exa | Jina (Community) |
|---------|------------|-----|------------------|
| **Official Support** | ✓ | ✓ | Community |
| **Real-time Search** | ✓ | ✓ | ✓ |
| **Source Attribution** | ✓ | ✓ | Limited |
| **API Reliability** | High | High | Variable |
| **Search Quality** | AI-optimized | Semantic | Standard |
| **Rate Limits** | Plan-based | Plan-based | Varies |
## Next Steps
Perplexity MCP transforms Jan from a knowledge-cutoff-limited assistant into a current, web-connected research tool. The integration provides access to real-time information while maintaining Jan's privacy-focused local processing for conversation and reasoning.
This combination delivers the best of both worlds: secure local AI processing with access to current web information when needed.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 357 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 209 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 619 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 136 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 313 KiB

View File

@ -1,6 +0,0 @@
{
"index": "Overview",
"platforms": "Platforms",
"models": "Models",
"tools": "Tools"
}

View File

@ -1,189 +0,0 @@
---
title: Products
description: AI that runs where you need it, how you need it
---
import { Callout } from 'nextra/components'
# Products
Jan is moving from a local AI application to a complete full-stack AI solution that you can self-host. This
includes models, applications, and tools that delights users and help them solve their problems.
## What We're Building
**Jan Factory (or Agent)** = Jan Models + Jan Application + Jan Tools
Unlike other AI assistants that do specific tasks with one model or have many models with a myriad of solutions, Jan provides:
- Its own specialised models that are optimised at specific tasks like web-search, creative writing, and translation
- Applications that work across all of your devices in an integrated way
- Tools to help youget things done
## Two Modes
### Local Mode
Run AI models entirely on your device, giving you complete privacy with no internet required.
![Jan Desktop](./_assets/jan_desktop.png)
### Cloud Mode
Connect to more powerful models when needed - either self-hosted or via jan.ai.
![Jan Everywhere](./_assets/jan_everywhere.png)
<Callout type="info">
Users shouldn't need to understand models, APIs, or technical details. Just choose Local for privacy or Cloud for power.
</Callout>
## Our Product Principles
### 1) It Just Works
1. Open Jan, start chatting
2. Onboarding is fully available but optional
3. Setting up an API key is optional
4. Selecting a local model is optional
5. Become a power user at your own pace, if you want to
We handle the complexity.
### 2) Cloud When Needed
Start completely locally and own your AI models. Add cloud capabilities only when you choose to.
### 3) Solve Problems, Not Settings
We help users get to answers quickly and leave the configurations as optional. Power users can
dig deeper, but it's never required.
## Available on Every Device
### Jan Desktop
This is how Jan started and it has been available since day 1.
Jan Desktop stives to be:
> Your personal AI workstation that helps with your use cases and powers other devices. Run
models locally right away or bring an API key to connect to your favorite cloud-based models.
**Key Features:**
- Runs models locally on your hardware
- GPU acceleration support
- Powers other devices via network connection
- Complete privacy and control
- Windows, macOS, and Linux support
**Requirements:**
- Minimum 8GB RAM
- 10GB+ storage space
- Optional: NVIDIA GPU for acceleration
### Jan Web
**Status:** Beta Launch Soon
![Jan Web](./_assets/jan_web.png)
Web-based version of 👋 Jan with no setup required. Same default cloud mode for mobile and desktop users.
**Key Features:**
- No installation needed
- Instant access from any browser
- Automatic updates and maintenance
- Default cloud backend for mobile apps
- Team collaboration features
- Share prompts
- Share workflows
- Collaborate on threads
- Share threads
**Pricing:**
- Free for everyone
- Pro:
- Access our latest models
- Access other cloud providers, no need to bring their API keys
- Enterprise:
- Self-host or we host it for you
- Active support and SLAs
- SSO
- Team featues
### Jan Mobile
**Status:** Coming Q4 2025
Connect to Desktop/Server, run local mode with Jan Nano or our upcoming Jan v1, same experience everywhere.
Jan Mobile adapts to your situation:
At Home, you can connect to your Jan Desktop over WiFi
![Jan Mobile Home](./_assets/jan_mobile2.png)
At Work, you can connect to your company Jan Server
![Jan Mobile Work](./_assets/jan_mobile3.png)
On the Go, you can run Jan Nano on your phone or talk to your favourite cloud-based model
![Jan Mobile Go](./_assets/jan_mobile4.png)
**Key Features:**
- iOS and Android support
- Three adaptive modes (Desktop, Server, Local)
- Voice-first interface
- Seamless device switching
- Jan Nano for on-device AI
### Jan Server
**Status:** Coming Q3 2025
Self-hosted solution or connect to Jan via API.
**Key Features:**
- Docker and Kubernetes deployment
- Admin dashboard
- Team knowledge sharing
**Deployment Options:**
- Docker compose: Single command setup
- Kubernetes: Enterprise scale
- Bare metal: Maximum control
## What Makes Jan Different
| Feature | Other AI Assistants | Jan |
| :--- | :--- | :--- |
| **Models** | Wrapper around Claude/GPT | Our own models + You can own them |
| **Dual mode** | Your data on their servers | Your data stays yours |
| **Deployment** | Cloud only | Local, self-hosted, or cloud |
| **Cost** | Subscription forever | Free locally, pay for cloud |
## Development Timeline
Jan is actively developed with regular releases. Our development follows these key milestones:
### Current Focus
- **Jan Desktop**: Continuous improvements and model support
- **Jan Web**: Beta launch preparation
- **Model Development**: Jan Nano optimization and v1 launch
### Next 6 Months
- Jan Web public beta
- Mobile app development
- Server deployment tools
### Future Vision
- Complete full-stack AI solution
- Advanced tool integration
- Enterprise features
<Callout>
We're building AI that respects your choices. Run it locally and power other app, connect to
cloud for power, or self-host for both.
</Callout>

View File

@ -1,4 +0,0 @@
{
"jan-v1": "Jan V1",
"jan-nano": "Jan Nano"
}

View File

@ -1,31 +0,0 @@
---
title: Jan Nano
description: Compact research model optimized for finding answers through tool use.
---
import { Callout } from 'nextra/components'
Jan Nano is a 4-billion parameter model designed for research and information retrieval. Instead of trying to know everything, it excels at finding anything through deep integration with Model Context Protocol (MCP) tools.
## Two Variants
| Model | Context Window | Size | Use Case |
|:---|:---|:---|:---|
| Jan Nano 32k | 32,768 tokens | 4-8GB | Quick research, general queries |
| Jan Nano 128k | 131,072 tokens | 8-12GB | Deep research, document analysis |
<Callout>
Jan Nano requires MCP-enabled tools (like web search) to reach its full potential. Enable MCP in **Settings → Advanced Settings**.
</Callout>
## What Makes Nano Different
- **Research-First Design:** Trained to find relevant information, synthesize findings, and provide accurate citations.
- **MCP Integration:** Works seamlessly with tools like web search, document analysis, and code repositories.
- **Extended Context:** The 128k variant can process entire codebases, book-length documents, or 50+ research papers simultaneously.
## Technical Details
- **Base:** 4B parameter transformer
- **Training:** Optimized for tool use and retrieval
- **Quantization:** Q4, Q8, FP16 variants available
## Philosophy
Most models try to be encyclopedias. Jan Nano is a research assistant. It doesn't memorize the internet—it knows how to navigate it.

View File

@ -1,29 +0,0 @@
---
title: Jan V1
description: Our upcoming family of foundational models, built to compete with the best.
---
import { Callout } from 'nextra/components'
<Callout type='warning'>**In Development:** Jan V1 models are currently being trained and are not yet available.</Callout>
## Our Foundational Model Family
Jan V1 is our in-house, still in training, family of models designed to compete directly with leading
models. We're building powerful, general-purpose models from the ground up to solve real-world problems
with a focus on efficiency and privacy.
### Planned Model Lineup
| Model | Target Size | Intended Use Case | Availability |
|:------------|:------------|:-----------------------------|:--------------|
| Jan V1-7B | 4-8GB | Fast, efficient daily tasks | Coming Soon |
| Jan V1-13B | 8-16GB | Balanced power and performance | Coming Soon |
| Jan V1-70B | 40-64GB | Deep analysis, professional work | Coming Soon |
| Jan V1-2350B | 100GB+ | Frontier research, complex tasks | Planned 2026 |
### What to Expect
- **Competitive Performance**: Aiming for results on par with leading closed-source models.
- **Optimized for Local Use**: Efficient quantized versions for running on your own hardware.
- **Privacy-Centric**: Trainable and runnable in your own environment, ensuring your data stays yours.
- **Seamless Integration**: Designed to work perfectly within the Jan ecosystem.
- **Fine-tuning support**: Easy to adapt to specific tasks or domains.

View File

@ -1,6 +0,0 @@
{
"desktop": "Desktop",
"jan-ai": "Jan.ai",
"mobile": "Mobile",
"server": "Server"
}

View File

@ -1,121 +0,0 @@
---
title: Jan Desktop
description: AI that runs on your computer, not someone else's. Your personal AI workstation.
---
import { Callout, Tabs, Tab } from 'nextra/components'
This is how Jan started and it has been available since day 1.
Jan Desktop strives to be:
> Your personal AI workstation that helps with your use cases and powers other devices. Run models
locally right away or bring an API key to connect to your favorite cloud-based models.
Jan Desktop is where it all starts. Download it, open it, and start chatting. Your AI runs on your computer with zero setup required.
## Two Modes, Zero Complexity
### Local Mode (Default)
Your conversations stay on your computer. No internet needed. Complete privacy.
### Cloud Mode
Connect to more powerful models when you need them. Your choice of provider.
<Callout>
As of today, when you first open Jan you do have to download a model or connect to a cloud provider,
but that is about to change soon.
</Callout>
## What You Get
<div className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-4 mt-6">
<div className="p-4 border border-gray-200 dark:border-gray-700 rounded-lg">
<h4 className="font-semibold text-lg mb-2">📡 Works Offline</h4>
<p className="text-sm text-gray-600 dark:text-gray-400">Download once, use forever. Internet is optional.</p>
</div>
<div className="p-4 border border-gray-200 dark:border-gray-700 rounded-lg">
<h4 className="font-semibold text-lg mb-2">🛡️ Your Data Stays Yours</h4>
<p className="text-sm text-gray-600 dark:text-gray-400">Everything stored in `~/.local/share/jan`. No cloud backups unless you want them.</p>
</div>
<div className="p-4 border border-gray-200 dark:border-gray-700 rounded-lg">
<h4 className="font-semibold text-lg mb-2">🖥️ Powers Other Devices</h4>
<p className="text-sm text-gray-600 dark:text-gray-400">Your desktop becomes an AI server for your phone and other computers.</p>
</div>
<div className="p-4 border border-gray-200 dark:border-gray-700 rounded-lg">
<h4 className="font-semibold text-lg mb-2">👨‍💻 Developer Friendly</h4>
<p className="text-sm text-gray-600 dark:text-gray-400">Local API at `localhost:1337`. Works with any OpenAI-compatible tool.</p>
</div>
<div className="p-4 border border-gray-200 dark:border-gray-700 rounded-lg">
<h4 className="font-semibold text-lg mb-2">🚀 GPU Acceleration</h4>
<p className="text-sm text-gray-600 dark:text-gray-400">Automatically detects and uses NVIDIA GPUs for faster performance.</p>
</div>
<div className="p-4 border border-gray-200 dark:border-gray-700 rounded-lg">
<h4 className="font-semibold text-lg mb-2">💻 Cross-Platform</h4>
<p className="text-sm text-gray-600 dark:text-gray-400">Windows, macOS, and Linux support with native performance.</p>
</div>
</div>
## System Requirements
### Minimum Requirements
- **RAM:** 8GB (models use less than 80% of available memory)
- **Storage:** 10GB+ free space
- **OS:** Windows 10, macOS 12, Ubuntu 20.04 or newer
### Recommended
- **RAM:** 16GB+ for larger models
- **Storage:** 20GB+ for multiple models
- **GPU:** NVIDIA GPU with 6GB+ VRAM for acceleration
- **OS:** Latest versions for best performance
## Getting Started
1. **Download Jan** from [jan.ai/download](https://jan.ai/download)
2. **Open the app** - it loads with everything ready
3. **Start chatting** - that's it
## Local Mode Features
- **Select your favorite Model:** Jan allows you to download any GGUF model from the Hugging Face Hub.
- **Smart Defaults:** Automatically uses your GPU if available and adjusts to your system's capabilities.
- **Complete Privacy:** No telemetry by default, no account required, and no data leaves your machine.
## Cloud Mode (Optional)
Connect to external AI providers when you need more power:
<Tabs items={['jan.ai', 'OpenAI', 'Self-Hosted']}>
<Tab>
Our cloud service (coming soon). One click to enable.
</Tab>
<Tab>
Use your OpenAI API key for GPT-4 access.
</Tab>
<Tab>
Connect to your own Jan Server.
</Tab>
</Tabs>
## Desktop as Your AI Hub
Your desktop can power AI across all your devices by automatically becoming a local server.
- **Network Sharing:** Mobile apps connect over WiFi, and other computers can access your models.
- **API:** Available at `localhost:1337` for any OpenAI-compatible application.
- **Offline Access:** No internet required for local network connections.
## For Developers
### Local API Server
```bash
# Always running at localhost:1337
curl http://localhost:1337/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{"model": "gemma3:3b", "messages": [{"role": "user", "content": "Hello"}]}'
```
## The Bottom Line
Jan Desktop is AI that respects that your computer is YOUR computer, not a terminal
to someone else's server. Just software that works for you.

View File

@ -1,49 +0,0 @@
---
title: Jan.ai
description: Cloud AI that respects your privacy. Web-based access to Jan with no setup required.
---
import { Callout } from 'nextra/components'
![Jan Web](../_assets/jan_web.png)
**Status:** Beta Launch Soon 🚀
Web-based version of Jan with no setup required. Same default cloud mode for (soon) mobile and desktop users.
## What is Jan Web?
Jan Web is the cloud-hosted version of Jan that runs in your browser. No installation needed, instant access
from any device, with the same AI experience you get locally.
<Callout>
Currently in development with a beta launch coming soon.
</Callout>
## How It Works
<div class="grid grid-cols-1 md:grid-cols-2 gap-4 mt-6">
<div class="p-4 border border-gray-200 dark:border-gray-700 rounded-lg">
<h4 class="font-semibold text-lg mb-2">For Desktop Users</h4>
<p class="text-sm text-gray-600 dark:text-gray-400">You can sync conversations from Jan desktop to Jan Web.</p>
</div>
<div class="p-4 border border-gray-200 dark:border-gray-700 rounded-lg">
<h4 class="font-semibold text-lg mb-2">For Mobile Users</h4>
<p class="text-sm text-gray-600 dark:text-gray-400">Jan Web uses the same models you have access to on the go.</p>
</div>
<div class="p-4 border border-gray-200 dark:border-gray-700 rounded-lg">
<h4 class="font-semibold text-lg mb-2">For Web Users</h4>
<p class="text-sm text-gray-600 dark:text-gray-400">Visit (soon) ask.jan.ai directly in your browser for instant access to AI without downloading anything.</p>
</div>
<div class="p-4 border border-gray-200 dark:border-gray-700 rounded-lg">
<h4 class="font-semibold text-lg mb-2">Team Collaboration</h4>
<p class="text-sm text-gray-600 dark:text-gray-400">Share prompts, workflows, and collaborate on threads with your team members.</p>
</div>
</div>
## Pricing
| Tier | Features | Price |
| :--- | :--- | :--- |
| **Free** | Free for everyone | $0 |
| **Pro** | Access our latest models<br/>Access other cloud providers without API keys | Coming Soon |
| **Enterprise** | Self-host or we host it for you<br/>Active support and SLAs<br/>SSO integration<br/>Team features | Contact Sales |

View File

@ -1,44 +0,0 @@
---
title: Jan Mobile
description: Your AI assistant on the go. Seamlessly connect to local, desktop, or server models.
---
import { Callout } from 'nextra/components'
![Jan Mobile](../_assets/jan_mobile.png)
**Status:** Coming Q4 2025
Jan Mobile brings the same AI experience to your phone. Connect to your desktop, your server, or run models locally.
## How It Works
Jan Mobile adapts to your situation:
- **At Home:** Connect to your Jan Desktop over WiFi.
- **At Work:** Connect to your company Jan Server.
- **On the Go:** Run Jan Nano on your phone or use a cloud model.
## Three Modes, One Experience
### Desktop Mode
Access larger, more powerful models running on your home computer. No phone battery drain.
### Server Mode
Connect to your organization's private AI cloud for team collaboration and access to shared knowledge.
### Local Mode
No connection? No problem. Run models like 'Jan Nano' directly on your phone for complete privacy and offline access.
## Key Features
<div class='grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-4 mt-6'>
<div class='p-4 border border-gray-200 dark:border-gray-700 rounded-lg'>**Seamless Switching**</div>
<div class='p-4 border border-gray-200 dark:border-gray-700 rounded-lg'>**Voice First Interface**</div>
<div class='p-4 border border-gray-200 dark:border-gray-700 rounded-lg'>**Sync Everything**</div>
<div class='p-4 border border-gray-200 dark:border-gray-700 rounded-lg'>**iOS and Android**</div>
<div class='p-4 border border-gray-200 dark:border-gray-700 rounded-lg'>**Adaptive Modes**</div>
<div class='p-4 border border-gray-200 dark:border-gray-700 rounded-lg'>**Jan Models Integration**</div>
</div>
## Development Status
- Core architecture in progress
- Desktop/Server connection protocols next
- Jan Nano mobile optimization in progress
- Beta launchplanned for Q4 2025

View File

@ -1,38 +0,0 @@
---
title: Jan Server
description: Your own private AI cloud. Self-hosted AI for teams and enterprises.
---
import { Callout } from 'nextra/components'
![Jan Server](../_assets/jan-server.png)
**Status:** Coming Q2 2025
Jan Server is a powerful AI API platform with multi-user support that you can self-host. Deploy it on your
hardware to create your own private AI cloud for your team or organization, or run it at scale with Jan's
managed service.
## Why Organizations Need This
Jan Server gives you complete control over your AI infrastructure, ensuring total privacy, predictable
costs, and compliance readiness.
## Key Features
<div class='grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-4 mt-6'>
<div class='p-4 border border-gray-200 dark:border-gray-700 rounded-lg'>**Multi-User Support**</div>
<div class='p-4 border border-gray-200 dark:border-gray-700 rounded-lg'>**Enterprise Authentication**</div>
<div class='p-4 border border-gray-200 dark:border-gray-700 rounded-lg'>**Flexible Deployment**</div>
<div class='p-4 border border-gray-200 dark:border-gray-700 rounded-lg'>**Admin Dashboard**</div>
<div class='p-4 border border-gray-200 dark:border-gray-700 rounded-lg'>**Team Knowledge Sharing**</div>
<div class='p-4 border border-gray-200 dark:border-gray-700 rounded-lg'>**Same API as Desktop**</div>
</div>
## Deployment Options
- **Docker:** Single command setup
- **Kubernetes:** Scale with your needs
- **Bare Metal:** Maximum control and performance
## Scaling Guidelines
- **Small Teams (5-10 users):** Small GPU cluster
- **Departments (10-50 users):** 4-8 GPU cluster nodes
- **Enterprise (50+ users):** Multi-cluster setup with custom configurations

View File

@ -1,5 +0,0 @@
{
"deepresearch": "DeepResearch",
"search": "Search",
"browseruse": "BrowserUse"
}

View File

@ -1,24 +0,0 @@
---
title: BrowserUse
description: Native browser automation for Jan, enabling AI to interact with the web on your behalf.
---
import { Callout } from 'nextra/components'
<Callout type='warning'>**In Development:** This tool is planned and not yet available.</Callout>
## Let Jan Use Your Browser
Jan allows you to give your AI models control of your browser to accomplish tasks automate workflows, and interact with
websites just like you would.
Think of it as an integrated, automation layer that turns Jan from a conversational AI into a powerful agent for action.
### Built on MCP
The tool will be implemented as a native **Model Context Protocol (MCP)** server within Jan, ensuring secure and
standardized communication between the AI model and the browser.
### Planned Core Features:
- **Secure Sessions** in an isolated, sandboxed environment.
- **Natural Language Control** (e.g., 'Book a flight...')
- **Visual Understanding** to interpret page content.
- **User in the Loop** for critical actions.

View File

@ -1,27 +0,0 @@
---
title: DeepResearch
description: An AI agent that performs comprehensive, multi-step research for you.
---
import { Callout } from 'nextra/components'
<Callout type='warning'>**In Development:** This tool is planned and not yet available.</Callout>
## Your Personal Research Analyst
DeepResearch is a planned native tool for Jan that transforms it into a powerful research agent. Give
it a complex question, and it will autonomously browse, analyze, and synthesize information from numerous
sources to deliver a comprehensive, structured report.
Think of it as Jan's answer to the advanced research capabilities seen in **OpenAI's ChatGPT** and
**Google's Gemini**, but built in the open and with user control at its core.
### How It Will Work
Unlike a simple web search that returns a list of links, DeepResearch will understand your goal, create
a research plan that you can edit, execute it, and deliver a final, synthesized document with citations.
### Planned Core Features:
- **Autonomous Multi-Step Research**
- **Comprehensive Source Analysis**
- **Structured Report Generation**
- **Full Transparency with Citations**
- **Local-First Privacy**

View File

@ -1,25 +0,0 @@
---
title: Search
description: A native search tool that gives you answers, not just links, with complete privacy.
---
import { Callout } from 'nextra/components'
<Callout type='warning'>**In Development:** This tool is planned and not yet available.</Callout>
## Answers, Not Just Links
'Search' is a planned native tool for Jan that rethinks web search. Instead of just giving you a list of links to sift through, it understands your question, scours the web, and provides a direct, synthesized answer with sources cited.
Think of it as a private, self-hosted alternative to services like **Perplexity.ai**, integrated directly into your AI assistant.
### How It's Different
- **Privacy-First:** Your search queries are processed locally and anonymized.
- **Direct Answers:** Get a concise, accurate answer compiled from the best sources.
- **Cited Sources:** Every piece of information is backed by a verifiable source.
- **Conversational Follow-up:** Ask follow-up questions in a natural way.
### Planned Core Features:
- **Real-Time Information**
- **Source Verification**
- **Customizable Focus**
- **Seamless Integration** with other tools