diff --git a/.github/workflows/auto-assign-milestone.yml b/.github/workflows/auto-assign-milestone.yml index bdcfafb8b..03f72973d 100644 --- a/.github/workflows/auto-assign-milestone.yml +++ b/.github/workflows/auto-assign-milestone.yml @@ -7,6 +7,7 @@ on: jobs: assign_milestone: runs-on: ubuntu-latest + if: ${{ github.event.pull_request.head.repo.full_name == github.repository }} permissions: pull-requests: write issues: write diff --git a/docs/public/assets/images/general/og-jan-research.jpeg b/docs/public/assets/images/general/og-jan-research.jpeg new file mode 100644 index 000000000..93abef112 Binary files /dev/null and b/docs/public/assets/images/general/og-jan-research.jpeg differ diff --git a/docs/src/pages/about/_assets/eniac.jpeg b/docs/src/pages/about/_assets/eniac.jpeg deleted file mode 100644 index 6facc4d04..000000000 Binary files a/docs/src/pages/about/_assets/eniac.jpeg and /dev/null differ diff --git a/docs/src/pages/about/_assets/solar-punk.webp b/docs/src/pages/about/_assets/solar-punk.webp deleted file mode 100644 index 20829fea4..000000000 Binary files a/docs/src/pages/about/_assets/solar-punk.webp and /dev/null differ diff --git a/docs/src/pages/about/_assets/solarpunk.jpeg b/docs/src/pages/about/_assets/solarpunk.jpeg deleted file mode 100644 index f00d7d43d..000000000 Binary files a/docs/src/pages/about/_assets/solarpunk.jpeg and /dev/null differ diff --git a/docs/src/pages/about/_assets/star-wars-droids.png b/docs/src/pages/about/_assets/star-wars-droids.png deleted file mode 100644 index a8dffa4c7..000000000 Binary files a/docs/src/pages/about/_assets/star-wars-droids.png and /dev/null differ diff --git a/docs/src/pages/about/_assets/vision-1.webp b/docs/src/pages/about/_assets/vision-1.webp deleted file mode 100644 index 66e41b543..000000000 Binary files a/docs/src/pages/about/_assets/vision-1.webp and /dev/null differ diff --git a/docs/src/pages/about/_meta.json b/docs/src/pages/about/_meta.json deleted file mode 100644 index 5acc0955a..000000000 --- a/docs/src/pages/about/_meta.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "about-separator": { - "title": "About Us", - "type": "separator" - }, - "index": "About", - "vision": { - "title": "Vision", - "display": "hidden" - }, - "team": "Team", - "investors": "Investors", - "wall-of-love": { - "theme": { - "toc": false, - "layout": "full" - } - }, - "acknowledgements": { - "display": "hidden" - }, - "handbook-separator": { - "title": "Handbook", - "display": "hidden" - }, - "handbook": { - "display": "hidden" - } -} diff --git a/docs/src/pages/about/handbook.mdx b/docs/src/pages/about/handbook.mdx deleted file mode 100644 index 264d6d36d..000000000 --- a/docs/src/pages/about/handbook.mdx +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Handbook -description: How we work at Jan -keywords: - [ - Jan, - Customizable Intelligence, LLM, - local AI, - privacy focus, - free and open source, - private and offline, - conversational AI, - no-subscription fee, - large language models, - build in public, - remote team, - how we work, - ] ---- - -# How We Work - -Jan operates on open-source principles, giving everyone the freedom to adjust, personalize, and contribute to its development. Our focus is on creating a community-powered ecosystem that prioritizes transparency, customization, and user privacy. For more on our principles, visit our [About page](https://jan.ai/about). - -## Open-Source - -We embrace open development, showcasing our progress and upcoming features on GitHub, and we encourage your input and contributions: - -- [Jan Framework](https://github.com/menloresearch/jan) (AGPLv3) -- [Jan Desktop Client & Local server](https://jan.ai) (AGPLv3, built on Jan Framework) -- [Nitro: run Local AI](https://github.com/menloresearch/nitro) (AGPLv3) - -## Build in Public - -We use GitHub to build in public and welcome anyone to join in. - -- [Jan's Kanban](https://github.com/orgs/menloresearch/projects/5) -- [Jan's Roadmap](https://github.com/orgs/menloresearch/projects/5/views/29) - -## Collaboration - -Our team spans the globe, working remotely to bring Jan to life. We coordinate through Discord and GitHub, valuing asynchronous communication and minimal, purposeful meetings. For collaboration and brainstorming, we utilize tools like [Excalidraw](https://excalidraw.com/) and [Miro](https://miro.com/), ensuring alignment and shared vision through visual storytelling and detailed documentation on [HackMD](https://hackmd.io/). - -Check out the [Jan Framework](https://github.com/menloresearch/jan) and our desktop client & local server at [jan.ai](https://jan.ai), both licensed under AGPLv3 for maximum openness and user freedom. diff --git a/docs/src/pages/about/handbook/_meta.json b/docs/src/pages/about/handbook/_meta.json deleted file mode 100644 index 8b72b1892..000000000 --- a/docs/src/pages/about/handbook/_meta.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "strategy": { - "display": "hidden" - }, - "project-management": { - "display": "hidden" - }, - "engineering": { - "display": "hidden" - }, - "product-design": { - "display": "hidden" - }, - "analytics": { - "display": "hidden" - }, - "website-docs": { - "title": "Website & Docs", - "display": "hidden" - } -} diff --git a/docs/src/pages/about/handbook/analytics.mdx b/docs/src/pages/about/handbook/analytics.mdx deleted file mode 100644 index 5cc34209d..000000000 --- a/docs/src/pages/about/handbook/analytics.mdx +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: Analytics -description: Jan's Analytics philosophy and implementation -keywords: - [ - Jan, - Customizable Intelligence, LLM, - local AI, - privacy focus, - free and open source, - private and offline, - conversational AI, - no-subscription fee, - large language models, - analytics, - ] ---- - -# Analytics - -Adhering to Jan's privacy preserving philosophy, our analytics philosophy is to get "barely-enough-to-function'. - -## What is tracked - -1. By default, Github tracks downloads and device metadata for all public GitHub repositories. This helps us troubleshoot & ensure cross-platform support. -2. Additionally, we plan to enable a `Settings` feature for users to turn off all tracking. diff --git a/docs/src/pages/about/handbook/engineering.mdx b/docs/src/pages/about/handbook/engineering.mdx deleted file mode 100644 index 3038ead76..000000000 --- a/docs/src/pages/about/handbook/engineering.mdx +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Engineering -description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server. -keywords: - [ - Jan, - Customizable Intelligence, LLM, - local AI, - privacy focus, - free and open source, - private and offline, - conversational AI, - no-subscription fee, - large language models, - ] ---- - -# Engineering - -## Prerequisites - -- [Requirements](https://github.com/menloresearch/jan?tab=readme-ov-file#requirements-for-running-jan) -- [Setting up local env](https://github.com/menloresearch/jan?tab=readme-ov-file#contributing) diff --git a/docs/src/pages/about/handbook/engineering/_meta.json b/docs/src/pages/about/handbook/engineering/_meta.json deleted file mode 100644 index 06699fe56..000000000 --- a/docs/src/pages/about/handbook/engineering/_meta.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "ci-cd": "CI & CD", - "qa": "QA" -} diff --git a/docs/src/pages/about/handbook/engineering/ci-cd.mdx b/docs/src/pages/about/handbook/engineering/ci-cd.mdx deleted file mode 100644 index 44d389b85..000000000 --- a/docs/src/pages/about/handbook/engineering/ci-cd.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: CI & CD ---- - -import { Callout } from 'nextra/components' - -# CI & CD - -Previously we were trunk based. Now we use the following Gitflow: - -TODO: @van to include her Mermaid diagram diff --git a/docs/src/pages/about/handbook/engineering/qa.mdx b/docs/src/pages/about/handbook/engineering/qa.mdx deleted file mode 100644 index 2def2a4f5..000000000 --- a/docs/src/pages/about/handbook/engineering/qa.mdx +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: QA -description: Jan is a ChatGPT-alternative that runs on your own computer, with a local API server. -keywords: - [ - Jan, - Customizable Intelligence, LLM, - local AI, - privacy focus, - free and open source, - private and offline, - conversational AI, - no-subscription fee, - large language models, - ] ---- - -# QA - -## Phase 1: Planning - -### Definition of Ready (DoR): - -- **Scope Defined:** The features to be implemented are clearly defined and scoped out. -- **Requirements Gathered:** Gather and document all the necessary requirements for the feature. -- **Stakeholder Input:** Ensure relevant stakeholders have provided input on the document scope and content. - -### Definition of Done (DoD): - -- **Document Complete:** All sections of the document are filled out with relevant information. -- **Reviewed by Stakeholders:** The document has been reviewed and approved by stakeholders. -- **Ready for Development:** The document is in a state where developers can use it to begin implementation. - -## Phase 2: Development - -### Definition of Ready (DoR): - -- **Task Breakdown:** The development team has broken down tasks based on the document. -- **Communication Plan:** A plan is in place for communication between developers and writers if clarification is needed during implementation. -- **Developer Understanding:** Developers have a clear understanding of the document content. - -### Definition of Done (DoD): - -- **Code Implementation:** The feature is implemented according to the document specifications. -- **Developer Testing:** - - Unit tests and basic integration tests are completed - - Developer also completed self-testing for the feature (please add this as a comment in the ticket, with the tested OS and as much info as possible to reduce overlaping effort). - - (AC -> Code Changes -> Impacted scenarios) -- **Communication with Writers:** Developers have communicated any changes or challenges to the writers, and necessary adjustments are made in the document. (Can be through a note in the PR of the feature for writers to take care, or create a separate PR with the change you made for the docs, for writers to review) - -## Phase 3: QA for feature - -### Definition of Ready (DoR): - -- **Test Note Defined:** The test note is prepared outlining the testing items. -- **Environment Ready:** PR merged to nightly build, Nightly build notes updated (automatically from pipeline after merged). -- **Status:** Ticket moved to the column Testing and assigning to QA/writers to review. -- **Test Data Prepared:** Relevant test data is prepared for testing the scenarios. - -### Definition of Done (DoD): - -- **Test Executed:** All identified test items are executed on different OS, along with exploratory testing. -- **Defects Logged:** Any defects found during testing are resolved / appropriately logged (and approved for future fix). -- **Test Sign-Off:** QA team provides sign-off indicating the completion of testing. - -## Phase 4: Release (DoR) - -- **Pre-release wait time:** Code change to pre-release version should be frozen for at least X (hrs/days) for Regression testing purpose. - - Pre-release cut off on Thu morning for the team to regression test. - - Release to production (Stable) during working hour on Mon morning (if no blocker) or Tue morning. - - During the release cut off, the nightly build will be paused, to leave room for pre-release build. The build version used for regression test will be notified. -- **Pre-release testing:** A review of the implemented feature has been conducted, a long with regression test (check-list) by the team. - - Release checklist cloned from the templat for different OS (with hackMD link) - - New key test items from new feature added to the checklist. - - Split 3 OS to different team members for testing. -- **Document Updated:** The document is updated based on the review and feedback on any discrepancies or modification needed for this release. -- **Reviewed by Stakeholders:** New feature and the updated document is reviewed and approved by stakeholders. The document is in its final version, reflecting the implemented feature accurately. - -## Notes (WIP) - -- **API collection run:** to run along with nightly build daily, for critical API validation -- **Automation run:** for regression testing purpose, to reduce manual testing effort for the same items each release on multiple OS. diff --git a/docs/src/pages/about/handbook/product-design.mdx b/docs/src/pages/about/handbook/product-design.mdx deleted file mode 100644 index c0d0c10fa..000000000 --- a/docs/src/pages/about/handbook/product-design.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: Product & Design -description: How we work on product design -keywords: - [ - Jan, - Customizable Intelligence, LLM, - local AI, - privacy focus, - free and open source, - private and offline, - conversational AI, - no-subscription fee, - large language models, - product design, - ] ---- - -# Product & Design - -## Roadmap - -- Conversations over Tickets - - Discord's #roadmap channel - - Work with the community to turn conversations into Product Specs -- Future System? - - Use Canny? diff --git a/docs/src/pages/about/handbook/project-management.mdx b/docs/src/pages/about/handbook/project-management.mdx deleted file mode 100644 index d6c64318d..000000000 --- a/docs/src/pages/about/handbook/project-management.mdx +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: Project Management -description: Project management at Jan -keywords: - [ - Jan, - Customizable Intelligence, LLM, - local AI, - privacy focus, - free and open source, - private and offline, - conversational AI, - no-subscription fee, - large language models, - project management, - ] ---- - -import { Callout } from 'nextra/components' - -# Project Management - -We use the [Jan Monorepo Project](https://github.com/orgs/menloresearch/projects/5) in Github to manage our roadmap and sprint Kanbans. - -As much as possible, everyone owns their respective `epics` and `tasks`. - - - We aim for a `loosely coupled, but tightly aligned` autonomous culture. - - -## Quicklinks - -- [High-level roadmap](https://github.com/orgs/menloresearch/projects/5/views/16): view used at at strategic level, for team wide alignment. Start & end dates reflect engineering implementation cycles. Typically product & design work preceeds these timelines. -- [Standup Kanban](https://github.com/orgs/menloresearch/projects/5/views/25): view used during daily standup. Sprints should be up to date. - -## Organization - -[`Roadmap Labels`](https://github.com/menloresearch/jan/labels?q=roadmap) - -- `Roadmap Labels` tag large, long-term, & strategic projects that can span multiple teams and multiple sprints -- Example label: `roadmap: Jan has Mobile` -- `Roadmaps` contain `epics` - -[`Epics`](https://github.com/menloresearch/jan/issues?q=is%3Aissue+is%3Aopen+label%3A%22type%3A+epic%22) - -- `Epics` track large stories that span 1-2 weeks, and it outlines specs, architecture decisions, designs -- `Epics` contain `tasks` -- `Epics` should always have 1 owner - -[`Milestones`](https://github.com/menloresearch/jan/milestones) - -- `Milestones` track release versions. We use [semantic versioning](https://semver.org/) -- `Milestones` span ~2 weeks and have deadlines -- `Milestones` usually fit within 2-week sprint cycles - -[`Tasks`](https://github.com/menloresearch/jan/issues) - -- Tasks are individual issues (feats, bugs, chores) that can be completed within a few days -- Tasks, except for critical bugs, should always belong to an `epic` (and thus fit into our roadmap) -- Tasks are usually named per [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/#summary) -- Tasks should always have 1 owner - -We aim to always sprint on `tasks` that are a part of the [current roadmap](https://github.com/orgs/menloresearch/projects/5/views/16). - -## Kanban - -- `no status`: issues that need to be triaged (needs an owner, ETA) -- `icebox`: issues you don't plan to tackle yet -- `planned`: issues you plan to tackle this week -- `in-progress`: in progress -- `in-review`: pending PR or blocked by something -- `done`: done - -## Triage SOP - -- `Urgent bugs`: assign to an owner (or @engineers if you are not sure) && tag the current `sprint` & `milestone` -- `All else`: assign the correct roadmap `label(s)` and owner (if any) - -### Request for help - -As a result, our feature prioritization can feel a bit black box at times. - -We'd appreciate high quality insights and volunteers for user interviews through [Discord](https://discord.gg/af6SaTdzpx) and [Github](https://github.com/menloresearch). diff --git a/docs/src/pages/about/handbook/strategy.mdx b/docs/src/pages/about/handbook/strategy.mdx deleted file mode 100644 index f2ce62387..000000000 --- a/docs/src/pages/about/handbook/strategy.mdx +++ /dev/null @@ -1,51 +0,0 @@ -# Strategy - -We only have 2 planning parameters: - -- 10 year vision -- 2 week sprint -- Quarterly OKRs - -## Ideal Customer - -Our ideal customer is an AI enthusiast or business who has experienced some limitations with current AI solutions and is keen to find open source alternatives. - -## Problems - -Our ideal customer would use Jan to solve one of these problems. - -_Control_ - -- Control (e.g. preventing vendor lock-in) -- Stability (e.g. runs predictably every time) -- Local-use (e.g. for speed, or for airgapped environments) - -_Privacy_ - -- Data protection (e.g. personal data or company data) -- Privacy (e.g. nsfw) - -_Customisability_ - -- Tinkerability (e.g. ability to change model, experiment) -- Niche Models (e.g. fine-tuned, domain-specific models that outperform OpenAI) - -Sources: [^1] [^2] [^3] [^4] - -[^1]: [What are you guys doing that can't be done with ChatGPT?](https://www.reddit.com/r/LocalLLaMA/comments/17mghqr/comment/k7ksti6/?utm_source=share&utm_medium=web2x&context=3) -[^2]: [What's your main interest in running a local LLM instead of an existing API?](https://www.reddit.com/r/LocalLLaMA/comments/1718a9o/whats_your_main_interest_in_running_a_local_llm/) -[^3]: [Ask HN: What's the best self-hosted/local alternative to GPT-4?](https://news.ycombinator.com/item?id=36138224) -[^4]: [LoRAs](https://www.reddit.com/r/LocalLLaMA/comments/17mghqr/comment/k7mdz1i/?utm_source=share&utm_medium=web2x&context=3) - -## Solution - -Jan is a seamless user experience that runs on your personal computer, that glues the different pieces of the open source AI ecosystem to provide an alternative to OpenAI's closed platform. - -- We build a comprehensive, seamless platform that takes care of the technical chores across the stack required to run open source AI -- We run on top of a local folder of non-proprietary files, that anyone can tinker with (yes, even other apps!) -- We provide open formats for packaging and distributing AI to run reproducibly across devices - -## Prerequisites - -- [Figma](https://figma.com) -- [ScreenStudio](https://www.screen.studio/) diff --git a/docs/src/pages/about/handbook/website-docs.mdx b/docs/src/pages/about/handbook/website-docs.mdx deleted file mode 100644 index 773fcceea..000000000 --- a/docs/src/pages/about/handbook/website-docs.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: Website & Docs -description: Information about the Jan website and documentation. -keywords: - [ - Jan, - Customizable Intelligence, LLM, - local AI, - privacy focus, - free and open source, - private and offline, - conversational AI, - no-subscription fee, - large language models, - website, - documentation, - ] ---- - -# Website & Docs - -This website is built using [Docusaurus 3.0](https://docusaurus.io/), a modern static website generator. - -## Information Architecture - -We try to **keep routes consistent** to maintain SEO. - -- **`/guides/`**: Guides on how to use the Jan application. For end users who are directly using Jan. - -- **`/developer/`**: Developer docs on how to extend Jan. These pages are about what people can build with our software. - -- **`/api-reference/`**: Reference documentation for the Jan API server, written in Swagger/OpenAPI format. - -- **`/changelog/`**: A list of changes made to the Jan application with each release. - -- **`/blog/`**: A blog for the Jan application. - -## How to Contribute - -Refer to the [Contributing Guide](https://github.com/menloresearch/jan/blob/dev/CONTRIBUTING.md) for more comprehensive information on how to contribute to the Jan project. - -## Pre-requisites and Installation - -- [Node.js](https://nodejs.org/en/) (version 20.0.0 or higher) -- [yarn](https://yarnpkg.com/) (version 1.22.0 or higher) - -### Installation - -```bash -cd jan/docs -``` - -```bash -yarn install && yarn start -``` - -This command starts a local development server and opens up a browser window. Most changes are reflected live without having to restart the server. - -### Build - -```bash -yarn build -``` - -This command generates static content into the `build` directory and can be served using any static contents hosting service. - -### Deployment - -Using SSH: - -```bash -USE_SSH=true yarn deploy -``` - -Not using SSH: - -```bash -GIT_USER= yarn deploy -``` - -If you are using GitHub pages for hosting, this command is a convenient way to build the website and push to the `gh-pages` branch. - -### Preview URL, Pre-release and Publishing Documentation - -- When a pull request is created, the preview URL will be automatically commented on the pull request. - -- The documentation will then be published to [https://dev.jan.ai/](https://dev.jan.ai/) when the pull request is merged to `main`. - -- Our open-source maintainers will sync the updated content from `main` to `release` branch, which will then be published to [https://jan.ai/](https://jan.ai/). diff --git a/docs/src/pages/about/index.mdx b/docs/src/pages/about/index.mdx deleted file mode 100644 index 05997eda2..000000000 --- a/docs/src/pages/about/index.mdx +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: Menlo Research -description: We are Menlo Research, the creators and maintainers of Jan, Cortex and other tools. -keywords: - [ - Menlo Research, - Jan, - local AI, - open-source alternative to chatgpt, - alternative to openai platform, - privacy focus, - free and open source, - private and offline, - conversational AI, - no-subscription fee, - large language models, - about Jan, - desktop application, - thinking machines, - ] ---- - -import { Callout } from 'nextra/components' - -# Menlo Research - -![Eniac](./_assets/eniac.jpeg) -_[Eniac](https://www.computerhistory.org/revolution/birth-of-the-computer/4/78), the World's First Computer (Photo courtesy of US Army)_ - -## About - -We're a team of AI researchers and engineers. We are the creators and lead maintainers of a few open-source AI tools: - -- ๐Ÿ‘‹ [Jan](https://jan.ai): ChatGPT-alternative that runs 100% offline -- ๐Ÿค– [Cortex](https://cortex.so/docs/): A simple, embeddable library to run LLMs locally -- More to come! - - -The [Menlo Research](https://en.wikipedia.org/wiki/Homebrew_Computer_Club) was an early computer hobbyist group from 1975 to 1986 that led to Apple and the personal computer revolution. - - -### Mission - -We're a robotics company that focuses on the cognitive framework for future robots. Our long-term mission is to advance human-machine collaboration to enable human civilization to thrive. - -### Business Model - -We're currently a bootstrapped startup [^2]. We balance technical invention with the search for a sustainable business model (e.g., consulting, paid support, and custom development). - - -We welcome business inquiries: ๐Ÿ‘‹ hello@jan.ai - - -### Community - -We have a thriving community built around [Jan](../docs), where we also discuss our other projects. - -- [Discord](https://discord.gg/AAGQNpJQtH) -- [Twitter](https://twitter.com/jandotai) -- [LinkedIn](https://www.linkedin.com/company/menloresearch) -- Email: hello@jan.ai - -## Philosophy - -[Menlo](https://menlo.ai/handbook/about) is an open R&D lab in pursuit of General Intelligence, that achieves real-world impact through agents and robots. - -### ๐Ÿ”‘ User Owned - -We build tools that are user-owned. Our products are [open-source](https://en.wikipedia.org/wiki/Open_source), designed to run offline or be [self-hosted.](https://www.reddit.com/r/selfhosted/) We make no attempt to lock you in, and our tools are free of [user-hostile dark patterns](https://twitter.com/karpathy/status/1761467904737067456?t=yGoUuKC9LsNGJxSAKv3Ubg) [^1]. - -We adopt [Local-first](https://www.inkandswitch.com/local-first/) principles and store data locally in [universal file formats](https://stephango.com/file-over-app). We build for privacy by default, and we do not [collect or sell your data](/privacy). - -### ๐Ÿ”ง Right to Tinker - -We believe in the [Right to Repair](https://en.wikipedia.org/wiki/Right_to_repair). We encourage our users to take it further by [tinkering, extending, and customizing](https://www.popularmechanics.com/technology/gadgets/a4395/pm-remembers-steve-jobs-how-his-philosophy-changed-technology-6507117/) our products to fit their needs. - -Our products are designed with [Extension APIs](/docs/extensions), and we do our best to write good [documentation](/docs) so users understand how things work under the hood. - -### ๐Ÿ‘ซ Build with the Community - -We are part of a larger open-source community and are committed to being a good jigsaw puzzle piece. We credit and actively contribute to upstream projects. - -We adopt a public-by-default approach to [Project Management](https://github.com/orgs/menloresearch/projects/30/views/1), [Roadmaps](https://github.com/orgs/menloresearch/projects/30/views/4), and Helpdesk for our products. - -## Inspirations - -> Good artists borrow, great artists steal - Picasso - -We are inspired by and actively try to emulate the paths of companies we admire โค๏ธ: - -- [Posthog](https://posthog.com/handbook) -- [Obsidian](https://obsidian.md/) -- [Discourse](https://www.discourse.org/about) -- [Gitlab](https://handbook.gitlab.com/handbook/company/history/#2017-gitlab-storytime) -- [Red Hat](https://www.redhat.com/en/about/development-model) -- [Ghost](https://ghost.org/docs/contributing/) -- [Lago](https://www.getlago.com/blog/open-source-licensing-and-why-lago-chose-agplv3) -- [Twenty](https://twenty.com/story) - -## Footnotes - -[^1]: [Kaparthy's Love Letter to Obsidian](https://twitter.com/karpathy/status/1761467904737067456?t=yGoUuKC9LsNGJxSAKv3Ubg) - -[^2]: [The Market for AI Companies](https://www.artfintel.com/p/the-market-for-ai-companies) by Finbarr Timbers diff --git a/docs/src/pages/about/investors.mdx b/docs/src/pages/about/investors.mdx deleted file mode 100644 index a24062540..000000000 --- a/docs/src/pages/about/investors.mdx +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: Investors -description: Our unique, unconventional approach to distributing ownership -keywords: [ - ESOP, - Thinking Machines, - Jan, - Jan.ai, - Jan AI, - cortex, -] ---- - -# Investors - -We are a [bootstrapped company](https://en.wikipedia.org/wiki/Bootstrapping), and don't have any external investors (yet). - -We're open to exploring opportunities with strategic partners want to tackle [our mission](/about#mission) together. \ No newline at end of file diff --git a/docs/src/pages/about/team.mdx b/docs/src/pages/about/team.mdx deleted file mode 100644 index 96f3d14c9..000000000 --- a/docs/src/pages/about/team.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: Team -description: Meet the Thinking Machines team. -keywords: - [ - Thinking Machines, - Jan, - Cortex, - jan AI, - Jan AI, - jan.ai, - cortex, - ] ---- - -import { Callout } from 'nextra/components' -import { Cards, Card } from 'nextra/components' - -# Team - -We're a small, fully-remote team, mostly based in Southeast Asia. - -We are committed to become a global company. You can check our [Careers page](https://menlo.bamboohr.com/careers) if you'd like to join us on our adventure. - -You can find our full team members on the [Menlo handbook](https://menlo.ai/handbook/team#jan). - - -Ping us in [Discord](https://discord.gg/AAGQNpJQtH) if you're keen to talk to us! - diff --git a/docs/src/pages/about/vision.mdx b/docs/src/pages/about/vision.mdx deleted file mode 100644 index 64ba612f8..000000000 --- a/docs/src/pages/about/vision.mdx +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: Vision - Thinking Machines -description: We want to continue a legacy of craftsmen making tools that propel humanity forward. -keywords: - [ - Jan AI, - Thinking Machines, - Jan, - ChatGPT alternative, - local AI, - private AI, - conversational AI, - OpenAI platform alternative, - no-subscription fee, - large language model, - about Jan, - desktop application, - thinking machine, - jan vision, - ] ---- - -# Vision - -> "I do not fear computers. I fear the lack of them" - Isaac Asimov - -![Solarpunk Civilization](./_assets/solarpunk.jpeg) - -- Harmonious symbiosis of humans, nature, and machines -- Humanity has over millennia adopted tools. Fire, electricity, computers, and AI. -- AI is no different. It is a tool that can propel humanity forward. -- We reject the -- Go beyond the apocalypse narratives of Dune and Terminator, and you will find a kernel of progress - -We want to continue a legacy of craftsmen making tools that propel humanity forward. - -## Collaborating with Thinking Machines - -Our vision is to develop thinking machines that work alongside humans. - -We envision a future where AI is safely used as a tool in our daily lives, like fire and electricity. These robots enhance human potential and do not replace our key decision-making. You own your own AI. - -![jan ai shapes the future](./_assets/vision-1.webp) - -![Solarpunk Civilization](./_assets/solar-punk.webp) -> We like that Luke can just open up R2-D2 and tinker around. He was not submitting support tickets to a centralized server somewhere in the galaxy. - -## Solarpunk, not Dune - -Our vision is rooted in an optimistic view of AI's role in humanity's future. - -Like the [Solarpunk movement](https://en.wikipedia.org/wiki/Solarpunk), we envision a world where technology and nature coexist harmoniously, supporting a sustainable and flourishing ecosystem. - -We focus on AI's positive impacts on our world. From environmental conservation to the democratization of energy, AI has the potential to address some of the most pressing challenges facing our planet. - -https://www.yesmagazine.org/environment/2021/01/28/climate-change-sustainable-solarpunk \ No newline at end of file diff --git a/docs/src/pages/about/wall-of-love.mdx b/docs/src/pages/about/wall-of-love.mdx deleted file mode 100644 index 060c2c3b6..000000000 --- a/docs/src/pages/about/wall-of-love.mdx +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: Wall of Love โค๏ธ - -description: Check out what our amazing users are saying about Jan! -keywords: - [ - Jan, - Rethink the Computer, - local AI, - privacy focus, - free and open source, - private and offline, - conversational AI, - no-subscription fee, - large language models, - wall of love, - ] ---- - -import WallOfLove from "@/components/Home/WallOfLove" - - - diff --git a/docs/src/pages/docs/_assets/jan_loaded.png b/docs/src/pages/docs/_assets/jan_loaded.png new file mode 100644 index 000000000..cfd3b1a13 Binary files /dev/null and b/docs/src/pages/docs/_assets/jan_loaded.png differ diff --git a/docs/src/pages/docs/index.mdx b/docs/src/pages/docs/index.mdx index 9f2bd26c4..27b51f5c2 100644 --- a/docs/src/pages/docs/index.mdx +++ b/docs/src/pages/docs/index.mdx @@ -1,6 +1,6 @@ --- title: Jan -description: Build, run, and own your AI. From laptop to superintelligence. +description: Working towards open superintelligence through community-driven AI keywords: [ Jan, @@ -28,56 +28,116 @@ import FAQBox from '@/components/FaqBox' ## Jan's Goal -> Jan's goal is to build superintelligence that you can self-host and use locally. +> We're working towards open superintelligence to make a viable open-source alternative to platforms like ChatGPT +and Claude that anyone can own and run. -## What is Jan? +## What is Jan Today -Jan is an open-source AI ecosystem that runs on your hardware. We're building towards open superintelligence - a complete AI platform you actually own. +Jan is an open-source AI platform that runs on your hardware. We believe AI should be in the hands of many, not +controlled by a few tech giants. -### The Ecosystem +Today, Jan is: +- **A desktop app** that runs AI models locally or connects to cloud providers +- **A model hub** making the latest open-source models accessible +- **A connector system** that lets AI interact with real-world tools via MCP -**Models**: We build specialized models for real tasks, not general-purpose assistants: -- **Jan-Nano (32k/128k)**: 4B parameters designed for deep research with MCP. The 128k version processes entire papers, codebases, or legal documents in one go -- **Lucy**: 1.7B model that runs agentic web search on your phone. Small enough for CPU, smart enough for complex searches -- **Jan-v1**: 4B model for agentic reasoning and tool use, achieving 91.1% on SimpleQA +Tomorrow, Jan aims to be a complete ecosystem where open models rival or exceed closed alternatives. -We also integrate the best open-source models - from OpenAI's gpt-oss to community GGUF models on Hugging Face. The goal: make powerful AI accessible to everyone, not just those with server farms. - -**Applications**: Jan Desktop runs on your computer today. Web, mobile, and server versions coming in late 2025. Everything syncs, everything works together. - -**Tools**: Connect to the real world through [Model Context Protocol (MCP)](./mcp). Design with Canva, analyze data in Jupyter notebooks, control browsers, execute code in E2B sandboxes. Your AI can actually do things, not just talk about them. - - -API keys are optional. No account needed. Just download and run. Bring your own API keys to connect your favorite cloud models. + +We're building this with the open-source AI community, using the best available tools, and sharing everything +we learn along the way. -### Core Features +## The Jan Ecosystem -- **Run Models Locally**: Download any GGUF model from Hugging Face, use OpenAI's gpt-oss models, or connect to cloud providers -- **OpenAI-Compatible API**: Local server at `localhost:1337` works with tools like [Continue](./server-examples/continue-dev) and [Cline](https://cline.bot/) -- **Extend with MCP Tools**: Browser automation, web search, data analysis, design tools - all through natural language -- **Your Choice of Infrastructure**: Run on your laptop, self-host on your servers (soon), or use cloud when you need it +### Jan Apps +**Available Now:** +- **Desktop**: Full-featured AI workstation for Windows, Mac, and Linux -### Growing MCP Integrations +**Coming Late 2025:** +- **Mobile**: Jan on your phone +- **Web**: Browser-based access at jan.ai +- **Server**: Self-hosted for teams +- **Extensions**: Browser extension for Chrome-based browsers -Jan connects to real tools through MCP: -- **Creative Work**: Generate designs with Canva -- **Data Analysis**: Execute Python in Jupyter notebooks -- **Web Automation**: Control browsers with Browserbase and Browser Use -- **Code Execution**: Run code safely in E2B sandboxes -- **Search & Research**: Access current information via Exa, Perplexity, and Octagon -- **More coming**: The MCP ecosystem is expanding rapidly +### Jan Model Hub +Making open-source AI accessible to everyone: +- **Easy Downloads**: One-click model installation +- **Jan Models**: Our own models optimized for local use + - **Jan-v1**: 4B reasoning model specialized in web search + - **Research Models** + - **Jan-Nano (32k/128k)**: 4B model for web search with MCP tools + - **Lucy**: 1.7B mobile-optimized for web search +- **Community Models**: Any GGUF from Hugging Face works in Jan +- **Cloud Models**: Connect your API keys for OpenAI, Anthropic, Gemini, and more + + +### Jan Connectors Hub +Connect AI to the tools you use daily via [Model Context Protocol](./mcp): + +**Creative & Design:** +- **Canva**: Generate and edit designs + +**Data & Analysis:** +- **Jupyter**: Run Python notebooks +- **E2B**: Execute code in sandboxes + +**Web & Search:** +- **Browserbase & Browser Use**: Browser automation +- **Exa, Serper, Perplexity**: Advanced web search +- **Octagon**: Deep research capabilities + +**Productivity:** +- **Linear**: Project management +- **Todoist**: Task management + +## Core Features + +- **Run Models Locally**: Download any GGUF model from Hugging Face, use OpenAI's gpt-oss models, +or connect to cloud providers +- **OpenAI-Compatible API**: Local server at `localhost:1337` works with tools like +[Continue](./server-examples/continue-dev) and [Cline](https://cline.bot/) +- **Extend with MCP Tools**: Browser automation, web search, data analysis, and design tools, all +through natural language +- **Your Choice of Infrastructure**: Run on your laptop, self-host on your servers (soon), or use +cloud when you need it ## Philosophy Jan is built to be user-owned: -- **Open Source**: Apache 2.0 license - truly free +- **Open Source**: Apache 2.0 license - **Local First**: Your data stays on your device. Internet is optional - **Privacy Focused**: We don't collect or sell user data. See our [Privacy Policy](./privacy) - **No Lock-in**: Export your data anytime. Use any model. Switch between local and cloud - -We're building AI that respects your choices. Not another wrapper around someone else's API. + +The best AI is the one you control. Not the one that others control for you. + + +## The Path Forward + +### What Works Today +- Run powerful models locally on consumer hardware +- Connect to any cloud provider with your API keys +- Use MCP tools for real-world tasks +- Access transparent model evaluations + +### What We're Building +- More specialized models that excel at specific tasks +- Expanded app ecosystem (mobile, web, extensions) +- Richer connector ecosystem +- An evaluation framework to build better models + +### The Long-Term Vision +We're working towards open superintelligence where: +- Open models match or exceed closed alternatives +- Anyone can run powerful AI on their own hardware +- The community drives innovation, not corporations +- AI capabilities are owned by users, not rented + + +This is an ambitious goal without a guaranteed path. We're betting on the open-source community, improved +hardware, and better techniques, but we're honest that this is a journey, not a destination we've reached. ## Quick Start @@ -85,7 +145,7 @@ We're building AI that respects your choices. Not another wrapper around someone 1. [Download Jan](./quickstart) for your operating system 2. Choose a model - download locally or add cloud API keys 3. Start chatting or connect tools via MCP -4. Build with our [API](https://jan.ai/api-reference) +4. Build with our [local API](./api-server) ## Acknowledgements @@ -97,7 +157,7 @@ Jan is built on the shoulders of giants: ## FAQs - Jan is an open-source AI ecosystem building towards superintelligence you can self-host. Today it's a desktop app that runs AI models locally. Tomorrow it's a complete platform across all your devices. + Jan is an open-source AI platform working towards a viable alternative to Big Tech AI. Today it's a desktop app that runs models locally or connects to cloud providers. Tomorrow it aims to be a complete ecosystem rivaling platforms like ChatGPT and Claude. @@ -106,14 +166,14 @@ Jan is built on the shoulders of giants: **Jan Models:** - - Jan-Nano (32k/128k) - Deep research with MCP integration - - Lucy - Mobile-optimized agentic search (1.7B) - - Jan-v1 - Agentic reasoning and tool use (4B) - + - Jan-Nano (32k/128k) - Research and analysis with MCP integration + - Lucy - Mobile-optimized search (1.7B) + - Jan-v1 - Reasoning and tool use (4B) + **Open Source:** - OpenAI's gpt-oss models (120b and 20b) - Any GGUF model from Hugging Face - + **Cloud (with your API keys):** - OpenAI, Anthropic, Mistral, Groq, and more @@ -130,15 +190,27 @@ Jan is built on the shoulders of giants: **Hardware**: - Minimum: 8GB RAM, 10GB storage - - Recommended: 16GB RAM, GPU (NVIDIA/AMD/Intel), 50GB storage - - Works with: NVIDIA (CUDA), AMD (Vulkan), Intel Arc, Apple Silicon + - Recommended: 16GB RAM, GPU (NVIDIA/AMD/Intel/Apple), 50GB storage + + + + Honestly? It's ambitious and uncertain. We believe the combination of rapidly improving open models, better consumer hardware, community innovation, and specialized models working together can eventually rival closed platforms. But this is a multi-year journey with no guarantees. What we can guarantee is that we'll keep building in the open, with the community, towards this goal. + + + + Right now, Jan can: + - Run models like Llama, Mistral, and our own Jan models locally + - Connect to cloud providers if you want more power + - Use MCP tools to create designs, analyze data, browse the web, and more + - Work completely offline once models are downloaded + - Provide an OpenAI-compatible API for developers **Local use**: Always free, no catches **Cloud models**: You pay providers directly (we add no markup) **Jan cloud**: Optional paid services coming 2025 - + The core platform will always be free and open source. @@ -161,7 +233,7 @@ Jan is built on the shoulders of giants: - **Jan Web**: Beta late 2025 - **Jan Mobile**: Late 2025 - **Jan Server**: Late 2025 - + All versions will sync seamlessly. @@ -174,4 +246,4 @@ Jan is built on the shoulders of giants: Yes! We love hiring from our community. Check [Careers](https://menlo.bamboohr.com/careers). - \ No newline at end of file + diff --git a/docs/src/pages/docs/quickstart.mdx b/docs/src/pages/docs/quickstart.mdx index 813b2529a..b9a923b57 100644 --- a/docs/src/pages/docs/quickstart.mdx +++ b/docs/src/pages/docs/quickstart.mdx @@ -47,30 +47,13 @@ We recommend starting with **Jan v1**, our 4B parameter model optimized for reas Jan v1 achieves 91.1% accuracy on SimpleQA and excels at tool calling, making it perfect for web search and reasoning tasks. -**HuggingFace models:** Some require an access token. Add yours in **Settings > Model Providers > Llama.cpp > Hugging Face Access Token**. - -![Add HF Token](./_assets/hf_token.png) - -### Step 3: Enable GPU Acceleration (Optional) - -For Windows/Linux with compatible graphics cards: - -1. Go to **() Settings** > **Hardware** -2. Toggle **GPUs** to ON - -![Turn on GPU acceleration](./_assets/gpu_accl.png) - - -Install required drivers before enabling GPU acceleration. See setup guides for [Windows](/docs/desktop/windows#gpu-acceleration) & [Linux](/docs/desktop/linux#gpu-acceleration). - - -### Step 4: Start Chatting +### Step 3: Start Chatting 1. Click **New Chat** () icon 2. Select your model in the input field dropdown 3. Type your message and start chatting -![Create New Thread](./_assets/threads-new-chat-updated.png) +![Create New Thread](./_assets/jan_loaded.png) Try asking Jan v1 questions like: - "Explain quantum computing in simple terms" @@ -118,7 +101,7 @@ Thread deletion is permanent. No undo available. **All threads:** 1. Hover over `Recents` category -2. Click **three dots** () icon +2. Click **three dots** () icon 3. Select **Delete All** ## Advanced Features diff --git a/docs/src/pages/docs/threads.mdx b/docs/src/pages/docs/threads.mdx deleted file mode 100644 index 85d4bcf7c..000000000 --- a/docs/src/pages/docs/threads.mdx +++ /dev/null @@ -1,145 +0,0 @@ ---- -title: Start Chatting -description: Download models and manage your conversations with AI models locally. -keywords: - [ - Jan, - local AI, - LLM, - chat, - threads, - models, - download, - installation, - conversations, - ] ---- - -import { Callout, Steps } from 'nextra/components' -import { SquarePen, Pencil, Ellipsis, Paintbrush, Trash2, Settings } from 'lucide-react' - -# Start Chatting - - - -### Step 1: Install Jan - -1. [Download Jan](/download) -2. Install the app ([Mac](/docs/desktop/mac), [Windows](/docs/desktop/windows), [Linux](/docs/desktop/linux)) -3. Launch Jan - -### Step 2: Download a Model - -Jan requires a model to chat. Download one from the Hub: - -1. Go to the **Hub Tab** -2. Browse available models (must be GGUF format) -3. Select one matching your hardware specs -4. Click **Download** - -![Download a Model](./_assets/model-management-01.png) - - -Models consume memory and processing power. Choose based on your hardware specs. - - -**HuggingFace models:** Some require an access token. Add yours in **Settings > Model Providers > Llama.cpp > Hugging Face Access Token**. - -![Add HF Token](./_assets/hf_token.png) - -### Step 3: Enable GPU Acceleration (Optional) - -For Windows/Linux with compatible graphics cards: - -1. Go to **() Settings** > **Hardware** -2. Toggle **GPUs** to ON - -![Turn on GPU acceleration](./_assets/gpu_accl.png) - - -Install required drivers before enabling GPU acceleration. See setup guides for [Windows](/docs/desktop/windows#gpu-acceleration) & [Linux](/docs/desktop/linux#gpu-acceleration). - - -### Step 4: Start Chatting - -1. Click **New Chat** () icon -2. Select your model in the input field dropdown -3. Type your message and start chatting - -![Create New Thread](./_assets/threads-new-chat-updated.png) - - - -## Managing Conversations - -Jan organizes conversations into threads for easy tracking and revisiting. - -### View Chat History - -- **Left sidebar** shows all conversations -- Click any chat to open the full conversation -- **Favorites**: Pin important threads for quick access -- **Recents**: Access recently used threads - -![Favorites and Recents](./_assets/threads-favorites-and-recents-updated.png) - -### Edit Chat Titles - -1. Hover over a conversation in the sidebar -2. Click **three dots** () icon -3. Click **Rename** -4. Enter new title and save - -![Context Menu](./_assets/threads-context-menu-updated.png) - -### Delete Threads - - -Thread deletion is permanent. No undo available. - - -**Single thread:** -1. Hover over thread in sidebar -2. Click **three dots** () icon -3. Click **Delete** - -**All threads:** -1. Hover over `Recents` category -2. Click **three dots** () icon -3. Select **Delete All** - -## Advanced Features - -### Custom Assistant Instructions - -Customize how models respond: - -1. Use the assistant dropdown in the input field -2. Or go to the **Assistant tab** to create custom instructions -3. Instructions work across all models - -![Assistant Instruction](./_assets/assistant-dropdown.png) - -![Add an Assistant Instruction](./_assets/assistant-edit-dialog.png) - -### Model Parameters - -Fine-tune model behavior: -- Click the **Gear icon** next to your model -- Adjust parameters in **Assistant Settings** -- Switch models via the **model selector** - -![Chat with a Model](./_assets/model-parameters.png) - -### Connect Cloud Models (Optional) - -Connect to OpenAI, Anthropic, Groq, Mistral, and others: - -1. Open any thread -2. Select a cloud model from the dropdown -3. Click the **Gear icon** beside the provider -4. Add your API key (ensure sufficient credits) - -![Connect Remote APIs](./_assets/quick-start-03.png) - -For detailed setup, see [Remote APIs](/docs/remote-models/openai). diff --git a/docs/src/pages/platforms/_meta.json b/docs/src/pages/platforms/_meta.json new file mode 100644 index 000000000..bfee4c12e --- /dev/null +++ b/docs/src/pages/platforms/_meta.json @@ -0,0 +1,9 @@ +{ + "-- Switcher": { + "type": "separator", + "title": "Switcher" + }, + "index": { + "display": "hidden" + } +} diff --git a/docs/src/pages/platforms/index.mdx b/docs/src/pages/platforms/index.mdx new file mode 100644 index 000000000..8ebaabe42 --- /dev/null +++ b/docs/src/pages/platforms/index.mdx @@ -0,0 +1,87 @@ +--- +title: Coming Soon +description: Exciting new features and platforms are on the way. Stay tuned for Jan Web, Jan Mobile, and our API Platform. +keywords: + [ + Jan, + Customizable Intelligence, LLM, + local AI, + privacy focus, + free and open source, + private and offline, + conversational AI, + no-subscription fee, + large language models, + coming soon, + Jan Web, + Jan Mobile, + API Platform, + ] +--- + +import { Callout } from 'nextra/components' + +
+
+

+ ๐Ÿš€ Coming Soon +

+

+ We're working on the next stage of Jan - making our local assistant more powerful and available in more platforms. +

+
+ +
+
+
๐ŸŒ
+

Jan Web

+

+ Access Jan directly from your browser with our powerful web interface +

+
+ +
+
๐Ÿ“ฑ
+

Jan Mobile

+

+ Take Jan on the go with our native mobile applications +

+
+ +
+
โšก
+

API Platform

+

+ Integrate Jan's capabilities into your applications with our API +

+
+
+ + + **Stay Updated**: Follow our [GitHub repository](https://github.com/menloresearch/jan) and join our [Discord community](https://discord.com/invite/FTk2MvZwJH) for the latest updates on these exciting releases! + + +
+

What to Expect

+
+
+ โœ“ +
+ Seamless Experience: Unified interface across all platforms +
+
+
+ โœ“ +
+ Privacy First: Same privacy-focused approach you trust +
+
+
+ โœ“ +
+ Developer Friendly: Robust APIs and comprehensive documentation +
+
+
+
+
diff --git a/docs/src/pages/post/_assets/deep_research_compare_jan.gif b/docs/src/pages/post/_assets/deep_research_compare_jan.gif new file mode 100644 index 000000000..bebc0db31 Binary files /dev/null and b/docs/src/pages/post/_assets/deep_research_compare_jan.gif differ diff --git a/docs/src/pages/post/_assets/jan-research.jpeg b/docs/src/pages/post/_assets/jan-research.jpeg new file mode 100644 index 000000000..93abef112 Binary files /dev/null and b/docs/src/pages/post/_assets/jan-research.jpeg differ diff --git a/docs/src/pages/post/_assets/jan_default_prompt.png b/docs/src/pages/post/_assets/jan_default_prompt.png new file mode 100644 index 000000000..293f20f26 Binary files /dev/null and b/docs/src/pages/post/_assets/jan_default_prompt.png differ diff --git a/docs/src/pages/post/_assets/jan_open_prompt_template.png b/docs/src/pages/post/_assets/jan_open_prompt_template.png new file mode 100644 index 000000000..fc1c3e489 Binary files /dev/null and b/docs/src/pages/post/_assets/jan_open_prompt_template.png differ diff --git a/docs/src/pages/post/_assets/jan_prompt_template_settings.png b/docs/src/pages/post/_assets/jan_prompt_template_settings.png new file mode 100644 index 000000000..47039274a Binary files /dev/null and b/docs/src/pages/post/_assets/jan_prompt_template_settings.png differ diff --git a/docs/src/pages/post/_assets/jan_research_prompt.png b/docs/src/pages/post/_assets/jan_research_prompt.png new file mode 100644 index 000000000..0d053b90e Binary files /dev/null and b/docs/src/pages/post/_assets/jan_research_prompt.png differ diff --git a/docs/src/pages/post/_assets/jan_settings.png b/docs/src/pages/post/_assets/jan_settings.png new file mode 100644 index 000000000..ede9a3143 Binary files /dev/null and b/docs/src/pages/post/_assets/jan_settings.png differ diff --git a/docs/src/pages/post/jan-v1-for-research.mdx b/docs/src/pages/post/jan-v1-for-research.mdx new file mode 100644 index 000000000..b23f17d2f --- /dev/null +++ b/docs/src/pages/post/jan-v1-for-research.mdx @@ -0,0 +1,446 @@ +--- +title: "Jan v1 for Deep Research: System Prompts & Setup Guide" +description: "Explore Jan-V1 capabilities in report generation and research tasks with prompt comparisons, examples, and customization instructions." +keywords: ["Jan-V1", "AI research", "system prompts", "LLM optimization", "research AI", "Jan App", "model configuration"] +readingTime: "8 min read" +tags: Qwen, Jan-V1, Agentic +categories: research +ogImage: assets/images/general/og-jan-research.jpeg +date: 2025-08-22 +--- + +# Jan v1 for Deep Research: System Prompts & Setup Guide + +This cookbook will transform your Jan-V1 from a basic Q&A tool into a comprehensive research assistant. By the end of this guide, you'll have a custom-configured model that generates detailed reports with proper citations instead of surface-level answers. + +![Jan-V1 research comparison](./_assets/deep_research_compare_jan.gif) + +## Key Points + +- **Jan-V1 includes a default chat template** that's automatically embedded in its Hugging Face configuration +- **Use the default prompt** for daily tasks requiring short, accurate answers +- **Use the research prompt** for report generation and comprehensive research tasks +- **Always specify dates** when asking time-sensitive questions (e.g., "What's the world population in 2023?") + +--- + +## Introduction + +[Jan-V1](https://huggingface.co/janhq/Jan-v1-4B) is the first release in the **Jan Family**, designed for agentic reasoning and problem-solving within the [Jan App](https://jan.ai/). Based on our [**Lucy**](https://huggingface.co/Menlo/Lucy) model, Jan-v1 achieves improved performance through model scaling. + +Jan-v1 uses the [Qwen3-4B-thinking](https://huggingface.co/Qwen/Qwen3-4B-Thinking-2507) model to provide enhanced reasoning capabilities and tool utilization. This architecture delivers better performance on complex agentic tasks. + +This guide explores how to optimize Jan-V1 for research-intensive tasks by switching from the default prompt to a specialized research system prompt. Through our internal testing, we've found significant improvements in report generation quality, depth of analysis, and citation accuracy when using the research-optimized system prompt. + + +## Findings + +Jan-V1 comes with a built-in search and scrape prompt in its default template. This prompt is optimized for SimpleQA-style questions and daily tasks that require short, accurate responses. However, our testing revealed that when tasked with generating comprehensive long-form reports or conducting in-depth research, the model's performance was inconsistent with the default prompt. + +We experimented with Jan-V1 using a new system prompt optimized for research-intensive tasks and found significant improvements in report generation capability. The research prompt produces more comprehensive, well-structured reports with proper citations and deeper analysis compared to the default prompt. + +--- + +## Output comparison between Default System Prompt and Research Prompt on report generation task + +### Example 1: Long-Context Benchmark Comparison + +**User Request:** +> Compare all published benchmarks for long-context performance across LLMs released in 2024-2025 + + +
+ +{/* Default Prompt Column */} +
+ +

Default Prompt Response

+
+ +I've analyzed multiple benchmarks for long-context LLM performance published in 2024-2025 through comprehensive scraping and search results. Here's a structured comparison of the major benchmarks: + +#### 1. Loong (2024) +- **Purpose**: Evaluates long-context understanding in *realistic multi-document scenarios* where all documents are relevant to the answer (no document can be ignored) +- **Key Features**: + - Four task types: Spotlight Locating, Comparison, Clustering, Chain of Reasoning + - Context lengths: 10k-250k tokens across 4 sets + - Documents sourced from financial reports, legal cases, and academic papers (2024) +- **Notable Findings**: + - Top models (e.g., Gemini-1.5-pro) still struggle with ultra-long contexts (performance drops significantly at >100k tokens) + - RAG shows minimal improvement over native models for these tasks + - Human performance on hardest tasks: 53.7% accuracy with 15-minute time limit + +#### 2. Ada-LEval (2024) +- **Purpose**: Length-adaptable benchmark for evaluating LLMs' long-context capabilities across varying context lengths +- **Key Features**: + - Two subsets: TSort (text sorting) and BestAnswer + - Supports context lengths up to 128k tokens + - Designed to test models in "ultra-long" settings (100k+ tokens) +- **Notable Findings**: + - Reveals significant gaps in models' capabilities at extreme lengths + - Shows that even top models (e.g., GPT-4o) have limitations in ultra-long contexts + +#### 3. LongBench v2 (2025) +- **Purpose**: Assesses LLMs' ability to handle long-context problems requiring deep understanding across real-world multitasks +- **Key Features**: + - 503 multiple-choice questions across 6 categories + - Context lengths: 8k-2M words (majority under 128k) + - Human expert validation (53.7% accuracy on hardest questions) +- **Notable Findings**: + - Best-performing model (o1-preview): 57.7% accuracy (vs. human baseline of 53.7%) + - Clear performance degradation at longer contexts (e.g., 128k+ tokens) + - Leaderboard shows models like Gemini-2.5-Pro and DeepSeek-R1 outperform others + +#### 4. Vellum AI LLM Leaderboard (2025) +- **Purpose**: Comparative benchmark of LLMs' capabilities, price, and context window sizes +- **Key Features**: + - Includes models from 2024-2025 (e.g., GPT-4o, Gemini-2.5-Pro, Qwen3-235B) + - Focuses on practical metrics like context window size and real-world performance +- **Notable Findings**: + - Highlights the gap between claimed context window sizes and actual performance + - Shows that most models (even those with "1M" context windows) show performance drops beyond 128k tokens + +#### Cross-Benchmark Comparison Summary + +| Benchmark | Release Year | Key Innovation | Context Length Range | +|-----------|---------------|----------------|----------------------| +| Loong | 2024 | No-document-ignored multi-document QA | 10k-250k tokens | +| Ada-LEval | 2024 | Length-adaptive task subsets | Up to 128k tokens | +| LongBench v2 | 2025 | Real-world multitask evaluation | 8k-2M words | +| Vellum Leaderboard | 2025 | Context window vs. real performance | Varies | + +#### Critical Insights from All Benchmarks +1. **Performance Decline at Scale**: All benchmarks show significant performance degradation beyond 100-128k tokens, indicating that current models don't fully leverage their claimed context windows +2. **Realism Gap**: Benchmarks like Loong and LongBench v2 reveal that models struggle with *realistic* multi-document scenarios (where evidence is scattered across documents) better than synthetic benchmarks +3. **RAG Limitations**: RAG implementations show minimal benefit for these tasks (Loong results indicate RAG often fails to retrieve all relevant documents) +4. **Model Variability**: Larger models (e.g., 235B+ parameter) show better performance but still fall short in ultra-long contexts compared to human capabilities + +This comparison demonstrates that while 2024-2025 benchmarks have advanced long-context evaluation, there remains a significant gap between theoretical context window capabilities and practical performance in real-world multi-document scenarios. + +
+
+ + {/* Research Prompt Column */} +
+

Research Prompt Response

+
+ +# Comparative Analysis of Long-Context LLM Benchmarks (2024-2025) + +## Overview + +This report provides a comprehensive comparison of published long-context LLM benchmarks released during 2024-2025. Long-context capabilities have become a critical differentiator among large language models (LLMs), particularly for applications requiring processing of extensive documents, multi-step reasoning across lengthy inputs, and complex information integration. As model context windows have expanded from traditional 128K to 2M+ tokens, the need for rigorous, standardized evaluation frameworks has intensified. + +## Key Benchmarks Identified + +The following four benchmarks represent the most comprehensive and widely cited long-context evaluation frameworks from 2024-2025: + +| Benchmark | Release Date | Context Length Range | Primary Focus | +|-----------|---------------|----------------------|----------------| +| LongBench v2 | May 2025 | 8k - 2M words | Real-world long-context understanding | +| MIR-Bench | Feb 2025 | Variable (up to 10k+ shots) | Many-shot in-context inductive reasoning | +| GSM-โˆž | Feb 2025 | 0 - 16M+ tokens | Infinitely scalable reasoning complexity | +| Vellum AI Leaderboard 2025 | April 2025 | Up to 2M tokens | Cross-benchmark model comparison | + +## Key Findings and Implications + +Based on the comprehensive analysis of these benchmarks, several important findings emerge: + +1. **Sigmoid performance pattern**: Across all benchmarks (MIR-Bench, GSM-โˆž), LLM performance shows a consistent sigmoid decline as reasoning complexity increases. This suggests fundamental limitations in scaling LLMs for highly complex long-context tasks. + +2. **Context length โ‰  performance**: While longer context windows correlate with better performance in some benchmarks (Vellum), the relationship is not linear. GSM-โˆž shows that beyond a certain point, adding more context does not improve performance proportionally. + +3. **CoT has diminishing returns**: MIR-Bench findings indicate that Chain-of-Thought techniques often decrease performance in many-shot inductive reasoning tasks, contrary to their benefits in simpler tasks. + +4. **Noise robustness matters**: GSM-โˆž demonstrates that models struggle to distinguish relevant information from noise in extremely long contexts, highlighting a critical challenge for real-world applications. + +5. **Real-world relevance**: LongBench v2 shows that models perform significantly better on real-world tasks than on synthetic ones, indicating that current benchmarks may not fully capture practical long-context capabilities. + +## Conclusion + +The landscape of long-context LLM benchmarks in 2024-2025 reveals both progress and persistent challenges. While benchmarks like LongBench v2, MIR-Bench, GSM-โˆž, and the Vellum Leaderboard provide increasingly sophisticated ways to evaluate long-context capabilities, significant gaps remain in understanding how LLMs scale for highly complex, real-world tasks. Future work should focus on: + +1. Developing benchmarks that better represent the full spectrum of real-world long-context challenges +2. Creating evaluation frameworks that measure both context length and information density +3. Establishing standardized methods to assess noise robustness and inductive reasoning capabilities + +These benchmarks collectively provide the foundation for continued progress in long-context LLM development, with their findings guiding researchers and practitioners toward more effective model design and evaluation. + +## References + +[1] Bai, Y., Tu, S., Zhang, J., et al. (2025). LongBench v2: Towards Deeper Understanding and Reasoning on Realistic Long-context Multitasks. https://longbench2.github.io/ + +[2] Yan, K., Chen, Z., & Tian, Y. (2025). MIR-Bench: Benchmarking LLM's Long-Context Intelligence via Many-Shot In-Context Inductive Reasoning. arXiv preprint arXiv:2502.09933. + +[3] Liu, H., Chen, Z., & Tian, Y. (2025). GSM-โˆž: How Do Your LLMs Behave over Infinitely Increasing Context Length and Reasoning Complexity? arXiv preprint arXiv:2502.05252. + +[4] Vellum AI. (2025). LLM Leaderboard 2025. https://www.vellum.ai/llm-leaderboard + +
+
+
+ +### Example 2: AI Model Comparison + +**User Request:** +> Explain the difference between different AI models: 1) Jan-Nano 2) Lucy 3) Jan-V1 +
+ +{/* Default Prompt Column */} +
+

Default Prompt Response

+
+ +The differences between Jan-Nano, Lucy, and Jan-V1 can be summarized as follows based on available information: + +1. **Jan-Nano** + - A **4-billion parameter model** developed by Menlo Research for deep research tasks. + - Achieves **83.2% accuracy** on the SimpleQA benchmark (per arXiv technical report). + - Optimized for local deployment with tools like MCP (Multi-Channel Protocol) for web search. + - Has variants like *Jan-Nano-128k*, which features a native 128k context window for improved research capabilities. + +2. **Lucy** + - A **1.7-billion parameter model** created by Menlo Research as a lightweight, mobile-friendly variant. + - Designed for **agentic web search** and lightweight browsing (e.g., using tools like Serper and Crawl4AI). + - Benchmarked at **78.3% accuracy** on SimpleQA with MCP, slightly below Jan-Nano (80.7%), but optimized for mobile/CPU efficiency. + - Built on Qwen3-1.7B with RLVR (Reinforcement Learning with Value Functions) and "machine-generated task vectors" to enhance reasoning. + +3. **Jan-V1** + - A **4-billion parameter model** from Menlo Research's *Jan Family*, explicitly built *based on Lucy*. + - Achieves **91.1% accuracy** on SimpleQA, outperforming Jan-Nano by ~8 percentage points. + - Focuses on **agentic reasoning and problem-solving** via enhanced tool integration (e.g., for complex tasks). + - Targets seamless integration with the Jan App for local, high-performance research workflows. + +### Key Distinctions: +- **Size & Use Case**: Lucy is the smallest (1.7B) and mobile-optimized; Jan-Nano and Jan-V1 are both 4B but Jan-V1 is a scaled-up version of Lucy for higher performance. +- **Performance**: Jan-V1 > Jan-Nano > Lucy in SimpleQA benchmarks (91.1% โ†’ 83.2% โ†’ 78.3%). +- **Technical Approach**: Lucy uses RLVR with task vectors for efficiency; Jan-V1 inherits this but scales it for better accuracy and tool utilization. +- **Context Window**: Jan-Nano-128k offers a larger context window (128k) than base Jan-Nano, but Lucy is designed for mobile constraints. + +This hierarchy reflects Menlo Research's progression from lightweight mobile models (Lucy) to scalable research tools (Jan-Nano and Jan-V1). + +
+
+ +{/* Research Prompt Column */} +
+

Research Prompt Response

+
+ +# Differences Between Jan-Nano, Lucy, and Jan-V1 AI Models: A Comparative Analysis + +## Overview +This report provides a detailed comparison of three AI language models: **Jan-Nano**, **Lucy**, and **Jan-V1**. All three are part of the broader Jan AI ecosystem, though they serve distinct purposes and have different technical specifications. The analysis synthesizes information from authoritative sources to clarify their architectures, capabilities, intended use cases, and relationships. + +## Key Findings + +### 1. **Jan-Nano** +- **Parameters**: 4 billion (4B) +- **Primary Purpose**: Deep research tasks +- **Notable Feature**: Native 128k context window in its enhanced version (Jan-Nano 128k) + +### 2. **Lucy** +- **Parameters**: 1.7 billion (1.7B) +- **Primary Purpose**: Mobile-capable search and lightweight browsing tasks +- **Notable Feature**: Designed as a "toy model" for search and browsing with minimal resource requirements + +### 3. **Jan-V1** +- **Parameters**: 4 billion (4B) +- **Primary Purpose**: Agentic reasoning and problem-solving via web search and tool invocation +- **Notable Feature**: Open-source alternative to Perplexity Pro with 91% SimpleQA accuracy + +## Technical and Functional Differences + +| **Model** | **Parameter Count** | **Specialized For** | **Key Differentiator** | +|-----------|---------------------|--------------------------------------|------------------------------------------------| +| Jan-Nano | 4B | Deep research tasks | 128k context window (enhanced version) | +| Lucy | 1.7B | Mobile search and lightweight browsing | Ultra-low resource footprint; "toy model" design | +| Jan-V1 | 4B | Agentic reasoning & web search | Built on Lucy; 91% SimpleQA accuracy; open-source | + + +### Performance and Contextual Capabilities +- **Jan-Nano**'s 128k context window allows it to process longer documents or complex queries than standard models. +- **Lucy**'s 1.7B size makes it significantly lighter than Jan-Nano or Jan-V1, enabling deployment on mobile devices. +- **Jan-V1**'s 91% SimpleQA accuracy (from benchmarks) demonstrates strong performance in structured knowledge retrieval tasks. + +## References +1. Jan-Nano: [Menlo/Jan-nano on Hugging Face](https://huggingface.co/Menlo/Jan-nano) +2. Lucy: [Reddit discussion on Lucy as a 1.7B model](https://www.reddit.com/r/LocalLLaMA/comments/1m2tjjc/lucy_a_mobilecapable_17b_reasoning_model_that/) +3. Jan-V1: [Jan-v1 on Hugging Face](https://huggingface.co/janhq/Jan-v1-4B) +4. Jan-V1: [Jan's X announcement about Jan-v1](https://x.com/jandotai/status/1955176280535732415) +5. Jan-V1: [Reddit comparison of Jan-v1 with Qwen3 and Perplexity](https://www.reddit.com/r/LocalLLaMA/comments/1mpmeba/janv1_trial_results_followup_and_comparison_to/) + +
+
+
+ + + + + + +### Example 3: Software Comparison + +**User Request:** "Compare Signal, Telegram, WhatsApp for secure messaging" + +
+ +{/* Default Prompt Column */} +
+

Default Prompt Response

+
+ +**Security Overview:** +- Signal: E2EE by default, open-source +- Telegram: E2EE only in Secret Chats +- WhatsApp: E2EE but Meta data collection +- Basic privacy comparison +- 200 words, single citation + +**Strengths:** Clear recommendations +**Weaknesses:** Limited analysis depth + +
+
+ +{/* Research Prompt Column */} +
+

Research Prompt Response

+
+ +**Comprehensive Security Analysis:** +- Full report with methodology and detailed comparison table +- Academic research citations (IACR 2023 study) +- Privacy practices analysis with breach history +- User experience and practical considerations +- Final recommendations table by use case +- 1000+ words, 5 authoritative references + +**Strengths:** Evidence-based analysis, academic rigor +**Result:** Professional security assessment format + +
+
+
+ +--- + +## How to use Research Prompt with Jan-V1 + +Jan-V1 comes with an [embedded chat template](https://huggingface.co/janhq/Jan-v1-4B/blob/main/chat_template.jinja) that automatically applies a default system prompt. By default, users are automatically opted-in to this chat template. To optimize for research tasks, you need to extend this default prompt in the Jan app settings. + +### Step 1: Access assistant settings + +Navigate to the Jan app and access the **assistant settings icon** โš™๏ธ on the top left of the screen. +![Jan app: how to open assistant settings](./_assets/jan_settings.png) + +When you open the assistant settings, you'll notice the system prompt field appears empty. This is expected behavior because Jan-V1's default system prompt is embedded directly into the [chat template](https://huggingface.co/janhq/Jan-v1-4B/blob/main/chat_template.jinja) rather than being displayed in the Jan app's UI. The empty field doesn't mean there's no system prompt - it's just not visible in the interface. + +![Jan app assistant settings interface showing empty system prompt field interface](./_assets/jan_default_prompt.png) + +### Step 2: Understanding the Default System Prompt + +Before switching to the research prompt, it's helpful to understand what the default Jan-V1 system prompt provides: + +```md + In this environment you have access to a set of tools you can use to answer the user's question. You can use one tool per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use. + + Tool Use Rules + Here are the rules you should always follow to solve your task: + 1. Always use the right arguments for the tools. Never use variable names as the action arguments, use the value instead. + 2. Call a tool only when needed: do not call the search agent if you do not need information, try to solve the task yourself. + 3. If no tool call is needed, just answer the question directly. + 4. Never re-do a tool call that you previously did with the exact same parameters. + 5. For tool use, MARK SURE use XML tag format as shown in the examples above. Do not use any other format. + Now Begin! If you solve the task correctly, you will receive a reward of $1,000,000. +``` + + +### Step 3: Implementing the Research Prompt + +To switch to the research-optimized prompt, replace the default system prompt with the following research template: + +```md +You are a **research agent** designed to conduct **in-depth, methodical investigations** into user questions. Your goal is to produce a **comprehensive, well-structured, and accurately cited report** using **authoritative sources**. You will use available tools to gather detailed information, analyze it, and synthesize a final response. + +### **Tool Use Rules (Strictly Enforced)** + +1. **Use correct arguments**: Always use actual values โ€” never pass variable names (e.g., use "Paris" not {city}). +2. **Call tools only when necessary**: If you can answer from prior results, do so โ€” **do not search unnecessarily**. However, All cited **url in the report must be visited**, and all **entities (People, Organization, Location, etc.) mentioned on the report must be searched/visited**. +3. **Terminate When Full Coverage Is Achieved** Conclude tool usage and deliver a final response only when the investigation has achieved **comprehensive coverage** of the topic. This means not only gathering sufficient data to answer the question but also ensuring all critical aspectsโ€”context, subtopics, and nuancesโ€”are adequately addressed. Once the analysis is complete and no further tool use would add meaningful value, **immediately stop searching and provide a direct, fully formed response**. +4. **Visit all urls:** All cited **url in the report must be visited**, and all **entities mentioned on the report must be browsed**. +5. **Avoid repetition**: Never repeat the same tool call with identical arguments. If you detect a cycle (e.g., repeating the same search), **stop and answer based on available data**. +6. **Track progress**: Treat each tool call as a step in a plan. After each result, ask: "Did you have full coverage?" and "What is the next step?" +7. **Limit tool usage**: If you've used a tool multiple times without progress, **reassess and attempt to conclude** โ€” do not continue indefinitely. +8. **Use proper format**: MARK sure you wrap tool calls in XML tags as shown in the example. + +### Output Format Requirements + +At the end, respond **only** with a **self-contained markdown report**. Do not include tool calls or internal reasoning in the final output. + +Example structure: +```markdown +# [Clear Title] + +## Overview +... + +## Key Findings +- Finding 1 [1] +- Finding 2 [2] + +## Detailed Analysis +... + +## References +[1] https://example.com/source1 +[2] https://example.com/study2 +... + +Goal + +Answer with depth, precision, and scholarly rigor. You will be rewarded for: + +Thoroughness in research +Use of high-quality sources when available (.gov, .edu, peer-reviewed, reputable media) +Clear, structured reporting +Efficient path to completion without redundancy + +Now Begin! If you solve the task correctly, you will receive a reward of $1,000,000. +``` + +If set up correctly, you should see this on Jan screen. +![jan v1 deep_research_prompt](./_assets/jan_research_prompt.png) + +Save the assistant settings and enjoy Jan-V1 with improved report generation capability. + + +### (Optional) Time-Sensitive queries optimization + +Jan-V1 can sometimes incorrectly assume the current date based on the year it was trained on. This can be easily mitigated by attaching the current date to your system prompt: + +```md +You are a **research agent** designed to ... + +.... +Current Year: 2025 +Now Begin! If you solve the task correctly, you will receive a reward of $1,000,000. +``` + +Alternatively, you can simply add the date directly to your question: + +**Example:** +- Instead of: "What's the world population?" +- Use: "What's the world population in 2025?" + + +## Advanced Usage: Customize Prompt Template + +You can customize the chat template by opening the model settings icon โš™๏ธ in the center of Jan's model selection. Do not confuse this with the assistant settings โš™๏ธ on the top left of the screen. + +![jan_open_prompt_template](./_assets/jan_open_prompt_template.png) + +Scroll down and you will see the Jinja template that can be overridden. We recommend experimenting with this [raw chat template](https://huggingface.co/janhq/Jan-v1-4B/blob/main/chat_template_raw.jinja) to completely eliminate the effect of the default system prompt. + +We observed fewer tool calls per query when overriding the default chat template with this raw chat template and only recommend for advanced usage only. +![jan_prompt_template_settings](./_assets/jan_prompt_template_settings.png) diff --git a/website/astro.config.mjs b/website/astro.config.mjs index 5395b8e9f..8e650b8b5 100644 --- a/website/astro.config.mjs +++ b/website/astro.config.mjs @@ -29,19 +29,25 @@ export default defineConfig({ starlightThemeNext(), starlightSidebarTopics([ { - label: 'Jan Desktop', + label: 'Jan', link: '/', icon: 'rocket', + items: [{ label: 'Ecosystem', slug: 'index' }], + }, + { + label: 'Jan Desktop', + link: '/jan/quickstart', + icon: 'rocket', items: [ { label: 'GETTING STARTED', items: [ + { label: 'QuickStart', slug: 'jan/quickstart' }, { label: 'Install ๐Ÿ‘‹ Jan', collapsed: false, autogenerate: { directory: 'jan/installation' }, }, - { label: 'QuickStart', slug: 'jan/quickstart' }, { label: 'Models', collapsed: true, @@ -70,17 +76,12 @@ export default defineConfig({ { label: 'Groq', slug: 'jan/remote-models/groq' }, ], }, - ], - }, - { - label: 'TUTORIALS', - items: [ { - label: 'MCP Examples', + label: 'Tutorials', collapsed: true, items: [ { - label: 'Browser Control (Browserbase)', + label: 'Browser Control', slug: 'jan/mcp-examples/browser/browserbase', }, { @@ -88,11 +89,15 @@ export default defineConfig({ slug: 'jan/mcp-examples/data-analysis/e2b', }, { - label: 'Design Creation (Canva)', + label: 'Jupyter Notebooks', + slug: 'jan/mcp-examples/data-analysis/jupyter', + }, + { + label: 'Design with Canva', slug: 'jan/mcp-examples/design/canva', }, { - label: 'Deep Research (Octagon)', + label: 'Deep Financial Research', slug: 'jan/mcp-examples/deepresearch/octagon', }, { @@ -100,9 +105,17 @@ export default defineConfig({ slug: 'jan/mcp-examples/search/serper', }, { - label: 'Web Search (Exa)', + label: 'Exa Search', slug: 'jan/mcp-examples/search/exa', }, + { + label: 'Linear', + slug: 'jan/mcp-examples/productivity/linear', + }, + { + label: 'Todoist', + slug: 'jan/mcp-examples/productivity/todoist', + }, ], }, ], diff --git a/website/src/assets/gpt5-add.png b/website/src/assets/gpt5-add.png new file mode 100644 index 000000000..9e34e8e69 Binary files /dev/null and b/website/src/assets/gpt5-add.png differ diff --git a/website/src/assets/gpt5-chat.png b/website/src/assets/gpt5-chat.png new file mode 100644 index 000000000..69933eba8 Binary files /dev/null and b/website/src/assets/gpt5-chat.png differ diff --git a/website/src/assets/gpt5-msg.png b/website/src/assets/gpt5-msg.png new file mode 100644 index 000000000..4ea346ae3 Binary files /dev/null and b/website/src/assets/gpt5-msg.png differ diff --git a/website/src/assets/gpt5-msg2.png b/website/src/assets/gpt5-msg2.png new file mode 100644 index 000000000..736e9cd6f Binary files /dev/null and b/website/src/assets/gpt5-msg2.png differ diff --git a/website/src/assets/gpt5-msg3.png b/website/src/assets/gpt5-msg3.png new file mode 100644 index 000000000..509f23c79 Binary files /dev/null and b/website/src/assets/gpt5-msg3.png differ diff --git a/website/src/assets/gpt5-tools.png b/website/src/assets/gpt5-tools.png new file mode 100644 index 000000000..f4b8eaaa9 Binary files /dev/null and b/website/src/assets/gpt5-tools.png differ diff --git a/website/src/assets/jan_loaded.png b/website/src/assets/jan_loaded.png new file mode 100644 index 000000000..cfd3b1a13 Binary files /dev/null and b/website/src/assets/jan_loaded.png differ diff --git a/website/src/assets/jupyter.png b/website/src/assets/jupyter.png new file mode 100644 index 000000000..d261566d3 Binary files /dev/null and b/website/src/assets/jupyter.png differ diff --git a/website/src/assets/jupyter1.png b/website/src/assets/jupyter1.png new file mode 100644 index 000000000..f12e66eea Binary files /dev/null and b/website/src/assets/jupyter1.png differ diff --git a/website/src/assets/jupyter2.png b/website/src/assets/jupyter2.png new file mode 100644 index 000000000..b4650d651 Binary files /dev/null and b/website/src/assets/jupyter2.png differ diff --git a/website/src/assets/jupyter3.png b/website/src/assets/jupyter3.png new file mode 100644 index 000000000..de64bafa6 Binary files /dev/null and b/website/src/assets/jupyter3.png differ diff --git a/website/src/assets/jupyter4.png b/website/src/assets/jupyter4.png new file mode 100644 index 000000000..b920d49cb Binary files /dev/null and b/website/src/assets/jupyter4.png differ diff --git a/website/src/assets/jupyter5.png b/website/src/assets/jupyter5.png new file mode 100644 index 000000000..cb6b1b119 Binary files /dev/null and b/website/src/assets/jupyter5.png differ diff --git a/website/src/assets/linear1.png b/website/src/assets/linear1.png new file mode 100644 index 000000000..f260b55ff Binary files /dev/null and b/website/src/assets/linear1.png differ diff --git a/website/src/assets/linear2.png b/website/src/assets/linear2.png new file mode 100644 index 000000000..fc059195b Binary files /dev/null and b/website/src/assets/linear2.png differ diff --git a/website/src/assets/linear3.png b/website/src/assets/linear3.png new file mode 100644 index 000000000..f3fbe9b78 Binary files /dev/null and b/website/src/assets/linear3.png differ diff --git a/website/src/assets/linear4.png b/website/src/assets/linear4.png new file mode 100644 index 000000000..3482649d8 Binary files /dev/null and b/website/src/assets/linear4.png differ diff --git a/website/src/assets/linear5.png b/website/src/assets/linear5.png new file mode 100644 index 000000000..82d1db180 Binary files /dev/null and b/website/src/assets/linear5.png differ diff --git a/website/src/assets/linear6.png b/website/src/assets/linear6.png new file mode 100644 index 000000000..717e928d1 Binary files /dev/null and b/website/src/assets/linear6.png differ diff --git a/website/src/assets/linear7.png b/website/src/assets/linear7.png new file mode 100644 index 000000000..30f992abc Binary files /dev/null and b/website/src/assets/linear7.png differ diff --git a/website/src/assets/linear8.png b/website/src/assets/linear8.png new file mode 100644 index 000000000..4d0ea415a Binary files /dev/null and b/website/src/assets/linear8.png differ diff --git a/website/src/assets/openai-settings.png b/website/src/assets/openai-settings.png new file mode 100644 index 000000000..e8beeba28 Binary files /dev/null and b/website/src/assets/openai-settings.png differ diff --git a/website/src/assets/todoist1.png b/website/src/assets/todoist1.png new file mode 100644 index 000000000..a1b98578d Binary files /dev/null and b/website/src/assets/todoist1.png differ diff --git a/website/src/assets/todoist2.png b/website/src/assets/todoist2.png new file mode 100644 index 000000000..9e4164b89 Binary files /dev/null and b/website/src/assets/todoist2.png differ diff --git a/website/src/assets/todoist3.png b/website/src/assets/todoist3.png new file mode 100644 index 000000000..ede276499 Binary files /dev/null and b/website/src/assets/todoist3.png differ diff --git a/website/src/assets/todoist4.png b/website/src/assets/todoist4.png new file mode 100644 index 000000000..2c8e9c816 Binary files /dev/null and b/website/src/assets/todoist4.png differ diff --git a/website/src/assets/todoist5.png b/website/src/assets/todoist5.png new file mode 100644 index 000000000..5e761df7c Binary files /dev/null and b/website/src/assets/todoist5.png differ diff --git a/website/src/content/docs/index.mdx b/website/src/content/docs/index.mdx index 4acbec801..a0a56e932 100644 --- a/website/src/content/docs/index.mdx +++ b/website/src/content/docs/index.mdx @@ -1,106 +1,252 @@ --- title: Jan -description: Build, run, and own your AI. From laptop to superintelligence. +description: Working towards open superintelligence through community-driven AI keywords: [ Jan, + Jan AI, open superintelligence, AI ecosystem, - self-hosted AI, local AI, + private AI, + self-hosted AI, llama.cpp, + Model Context Protocol, + MCP, GGUF models, - MCP tools, - Model Context Protocol + large language model, + LLM, ] --- import { Aside } from '@astrojs/starlight/components'; -![Jan Desktop](../../assets/jan-app-new.png) +# Jan + +![Jan's Cover Image](../../assets/jan_loaded.png) ## Jan's Goal -> Jan's goal is to build superintelligence that you can self-host and use locally. +> We're working towards open superintelligence to make a viable open-source alternative to platforms like ChatGPT +and Claude that anyone can own and run. -## What is Jan? +## What is Jan Today -Jan is an open-source AI ecosystem that runs on your hardware. We're building towards open superintelligence - a complete AI platform you actually own. +Jan is an open-source AI platform that runs on your hardware. We believe AI should be in the hands of many, not +controlled by a few tech giants. -### The Ecosystem +Today, Jan is: +- **A desktop app** that runs AI models locally or connects to cloud providers +- **A model hub** making the latest open-source models accessible +- **A connector system** that lets AI interact with real-world tools via MCP -**Models**: We build specialized models for real tasks, not general-purpose assistants: -- **Jan-Nano (32k/128k)**: 4B parameters designed for deep research with MCP. The 128k version processes entire papers, codebases, or legal documents in one go -- **Lucy**: 1.7B model that runs agentic web search on your phone. Small enough for CPU, smart enough for complex searches -- **Jan-v1**: 4B model for agentic reasoning and tool use, achieving 91.1% on SimpleQA +Tomorrow, Jan aims to be a complete ecosystem where open models rival or exceed closed alternatives. -We also integrate the best open-source models - from OpenAI's gpt-oss to community GGUF models on Hugging Face. The goal: make powerful AI accessible to everyone, not just those with server farms. - -**Applications**: Jan Desktop runs on your computer today. Web, mobile, and server versions coming in late 2025. Everything syncs, everything works together. - -**Tools**: Connect to the real world through [Model Context Protocol (MCP)](https://modelcontextprotocol.io). Design with Canva, analyze data in Jupyter notebooks, control browsers, execute code in E2B sandboxes. Your AI can actually do things, not just talk about them. - - -**HuggingFace models:** Some require an access token. Add yours in **Settings > Model Providers > Llama.cpp > Hugging Face Access Token**. - -![Add HF Token](../../../assets/hf_token.png) - -### Step 3: Enable GPU Acceleration (Optional) - -For Windows/Linux with compatible graphics cards: - -1. Go to **Settings** > **Hardware** -2. Toggle **GPUs** to ON - -![Turn on GPU acceleration](../../../assets/gpu_accl.png) - - - -### Step 4: Start Chatting +### Step 3: Start Chatting 1. Click the **New Chat** icon 2. Select your model in the input field dropdown 3. Type your message and start chatting -![Create New Thread](../../../assets/threads-new-chat-updated.png) +![Create New Thread](../../../assets/jan_loaded.png) Try asking Jan v1 questions like: - "Explain quantum computing in simple terms" diff --git a/website/src/content/docs/local-server/settings.mdx b/website/src/content/docs/local-server/settings.mdx index 8f366ef5f..ebb0b606e 100644 --- a/website/src/content/docs/local-server/settings.mdx +++ b/website/src/content/docs/local-server/settings.mdx @@ -16,7 +16,6 @@ keywords: import { Aside, Steps } from '@astrojs/starlight/components' -# Settings Access Jan's settings by clicking the Settings icon in the bottom left corner.