diff --git a/website/.gitignore b/website/.gitignore
new file mode 100644
index 000000000..6240da8b1
--- /dev/null
+++ b/website/.gitignore
@@ -0,0 +1,21 @@
+# build output
+dist/
+# generated types
+.astro/
+
+# dependencies
+node_modules/
+
+# logs
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+pnpm-debug.log*
+
+
+# environment variables
+.env
+.env.production
+
+# macOS-specific files
+.DS_Store
diff --git a/website/.vscode/extensions.json b/website/.vscode/extensions.json
new file mode 100644
index 000000000..22a15055d
--- /dev/null
+++ b/website/.vscode/extensions.json
@@ -0,0 +1,4 @@
+{
+ "recommendations": ["astro-build.astro-vscode"],
+ "unwantedRecommendations": []
+}
diff --git a/website/.vscode/launch.json b/website/.vscode/launch.json
new file mode 100644
index 000000000..d64220976
--- /dev/null
+++ b/website/.vscode/launch.json
@@ -0,0 +1,11 @@
+{
+ "version": "0.2.0",
+ "configurations": [
+ {
+ "command": "./node_modules/.bin/astro dev",
+ "name": "Development server",
+ "request": "launch",
+ "type": "node-terminal"
+ }
+ ]
+}
diff --git a/website/README.md b/website/README.md
new file mode 100644
index 000000000..746f55b45
--- /dev/null
+++ b/website/README.md
@@ -0,0 +1,54 @@
+# Starlight Starter Kit: Basics
+
+[](https://starlight.astro.build)
+
+```
+bun create astro@latest -- --template starlight
+```
+
+[](https://stackblitz.com/github/withastro/starlight/tree/main/examples/basics)
+[](https://codesandbox.io/p/sandbox/github/withastro/starlight/tree/main/examples/basics)
+[](https://app.netlify.com/start/deploy?repository=https://github.com/withastro/starlight&create_from_path=examples/basics)
+[](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fwithastro%2Fstarlight%2Ftree%2Fmain%2Fexamples%2Fbasics&project-name=my-starlight-docs&repository-name=my-starlight-docs)
+
+> 🧑🚀 **Seasoned astronaut?** Delete this file. Have fun!
+
+## 🚀 Project Structure
+
+Inside of your Astro + Starlight project, you'll see the following folders and files:
+
+```
+.
+├── public/
+├── src/
+│ ├── assets/
+│ ├── content/
+│ │ └── docs/
+│ └── content.config.ts
+├── astro.config.mjs
+├── package.json
+└── tsconfig.json
+```
+
+Starlight looks for `.md` or `.mdx` files in the `src/content/docs/` directory. Each file is exposed as a route based on its file name.
+
+Images can be added to `src/assets/` and embedded in Markdown with a relative link.
+
+Static assets, like favicons, can be placed in the `public/` directory.
+
+## 🧞 Commands
+
+All commands are run from the root of the project, from a terminal:
+
+| Command | Action |
+| :------------------------ | :----------------------------------------------- |
+| `bun install` | Installs dependencies |
+| `bun dev` | Starts local dev server at `localhost:4321` |
+| `bun build` | Build your production site to `./dist/` |
+| `bun preview` | Preview your build locally, before deploying |
+| `bun astro ...` | Run CLI commands like `astro add`, `astro check` |
+| `bun astro -- --help` | Get help using the Astro CLI |
+
+## 👀 Want to learn more?
+
+Check out [Starlight’s docs](https://starlight.astro.build/), read [the Astro documentation](https://docs.astro.build), or jump into the [Astro Discord server](https://astro.build/chat).
diff --git a/website/astro.config.mjs b/website/astro.config.mjs
new file mode 100644
index 000000000..7f90ff7d7
--- /dev/null
+++ b/website/astro.config.mjs
@@ -0,0 +1,135 @@
+// @ts-check
+import { defineConfig } from 'astro/config'
+import starlight from '@astrojs/starlight'
+import starlightThemeRapide from 'starlight-theme-rapide'
+import starlightSidebarTopics from 'starlight-sidebar-topics'
+
+// https://astro.build/config
+export default defineConfig({
+ integrations: [
+ starlight({
+ title: '👋 Jan',
+ favicon: "jan2.png",
+ plugins: [
+ starlightThemeRapide(),
+ starlightSidebarTopics([
+ {
+ label: 'Jan',
+ link: '/',
+ icon: 'rocket',
+ items: [
+ {
+ label: 'HOW TO',
+ items: [
+ {
+ label: 'Install 👋 Jan',
+ collapsed: true,
+ autogenerate: { directory: 'jan/installation' },
+ },
+ { label: 'Start Chatting', slug: 'jan/threads' },
+ {
+ label: 'Use Jan Models',
+ collapsed: true,
+ autogenerate: { directory: 'jan/jan-models' },
+ },
+ { label: 'Assistants', slug: 'jan/assistants' },
+ ],
+ },
+ {
+ label: 'TUTORIALS',
+ items: [
+ { label: 'Translation', slug: 'jan/tutorials/translation' },
+ {
+ label: 'Creative Writing',
+ slug: 'jan/tutorials/creative-writing',
+ },
+ ],
+ },
+ {
+ label: 'EXPLANATION',
+ items: [
+ {
+ label: 'Local AI Engine',
+ slug: 'jan/explanation/llama-cpp',
+ },
+ {
+ label: 'Model Parameters',
+ slug: 'jan/explanation/model-parameters',
+ },
+ ],
+ },
+ {
+ label: 'ADVANCED',
+ items: [
+ { label: 'Manage Models', slug: 'jan/manage-models' },
+ { label: 'Model Context Protocol', slug: 'jan/mcp' },
+ {
+ label: 'MCP Examples',
+ collapsed: true,
+ autogenerate: { directory: 'jan/mcp-examples' },
+ },
+ ],
+ },
+ {
+ label: 'REFERENCE',
+ items: [
+ { label: 'Settings', slug: 'jan/settings' },
+ { label: 'Jan Data Folder', slug: 'jan/data-folder' },
+ { label: 'Troubleshooting', slug: 'jan/troubleshooting' },
+ { label: 'Privacy Policy', slug: 'jan/privacy' },
+ ],
+ },
+ ],
+ },
+ {
+ label: 'Local Server',
+ link: '/local-server/',
+ icon: 'setting',
+ items: [
+ { label: 'Server Setup', slug: 'local-server/api-server' },
+ { label: 'Jan Data Folder', slug: 'local-server/data-folder' },
+ { label: 'Settings', slug: 'local-server/settings' },
+ { label: 'Llama.cpp', slug: 'local-server/llama-cpp' },
+ {
+ label: 'Integrations',
+ collapsed: true,
+ autogenerate: { directory: 'local-server/integrations' },
+ },
+ {
+ label: 'Troubleshooting',
+ slug: 'local-server/troubleshooting',
+ },
+ ],
+ },
+ {
+ label: 'Products',
+ link: '/products/',
+ icon: 'forward-slash',
+ items: [
+ { label: 'Overview', slug: 'products' },
+ {
+ label: 'Platforms',
+ autogenerate: { directory: 'products/platforms' },
+ },
+ {
+ label: 'Tools',
+ autogenerate: { directory: 'products/tools' },
+ },
+ {
+ label: 'Models',
+ autogenerate: { directory: 'products/models' },
+ },
+ ],
+ },
+ ]),
+ ],
+ social: [
+ {
+ icon: 'github',
+ label: 'GitHub',
+ href: 'https://github.com/menloresearch/jan',
+ },
+ ],
+ }),
+ ],
+})
diff --git a/website/bun.lock b/website/bun.lock
new file mode 100644
index 000000000..3f435d7ca
--- /dev/null
+++ b/website/bun.lock
@@ -0,0 +1,952 @@
+{
+ "lockfileVersion": 1,
+ "workspaces": {
+ "": {
+ "name": "website",
+ "dependencies": {
+ "@astrojs/starlight": "^0.35.1",
+ "astro": "^5.6.1",
+ "sharp": "^0.34.2",
+ "starlight-sidebar-topics": "^0.6.0",
+ "starlight-theme-rapide": "^0.5.1",
+ },
+ },
+ },
+ "packages": {
+ "@astrojs/compiler": ["@astrojs/compiler@2.12.2", "", {}, "sha512-w2zfvhjNCkNMmMMOn5b0J8+OmUaBL1o40ipMvqcG6NRpdC+lKxmTi48DT8Xw0SzJ3AfmeFLB45zXZXtmbsjcgw=="],
+
+ "@astrojs/internal-helpers": ["@astrojs/internal-helpers@0.6.1", "", {}, "sha512-l5Pqf6uZu31aG+3Lv8nl/3s4DbUzdlxTWDof4pEpto6GUJNhhCbelVi9dEyurOVyqaelwmS9oSyOWOENSfgo9A=="],
+
+ "@astrojs/markdown-remark": ["@astrojs/markdown-remark@6.3.3", "", { "dependencies": { "@astrojs/internal-helpers": "0.6.1", "@astrojs/prism": "3.3.0", "github-slugger": "^2.0.0", "hast-util-from-html": "^2.0.3", "hast-util-to-text": "^4.0.2", "import-meta-resolve": "^4.1.0", "js-yaml": "^4.1.0", "mdast-util-definitions": "^6.0.0", "rehype-raw": "^7.0.0", "rehype-stringify": "^10.0.1", "remark-gfm": "^4.0.1", "remark-parse": "^11.0.0", "remark-rehype": "^11.1.2", "remark-smartypants": "^3.0.2", "shiki": "^3.2.1", "smol-toml": "^1.3.4", "unified": "^11.0.5", "unist-util-remove-position": "^5.0.0", "unist-util-visit": "^5.0.0", "unist-util-visit-parents": "^6.0.1", "vfile": "^6.0.3" } }, "sha512-DDRtD1sPvAuA7ms2btc9A7/7DApKqgLMNrE6kh5tmkfy8utD0Z738gqd3p5aViYYdUtHIyEJ1X4mCMxfCfu15w=="],
+
+ "@astrojs/mdx": ["@astrojs/mdx@4.3.1", "", { "dependencies": { "@astrojs/markdown-remark": "6.3.3", "@mdx-js/mdx": "^3.1.0", "acorn": "^8.14.1", "es-module-lexer": "^1.6.0", "estree-util-visit": "^2.0.0", "hast-util-to-html": "^9.0.5", "kleur": "^4.1.5", "rehype-raw": "^7.0.0", "remark-gfm": "^4.0.1", "remark-smartypants": "^3.0.2", "source-map": "^0.7.4", "unist-util-visit": "^5.0.0", "vfile": "^6.0.3" }, "peerDependencies": { "astro": "^5.0.0" } }, "sha512-0ynzkFd5p2IFDLPAfAcGizg44WyS0qUr43nP2vQkvrPlpoPEMeeoi1xWiWsVqQNaZ0FOmNqfUviUn52nm9mLag=="],
+
+ "@astrojs/prism": ["@astrojs/prism@3.3.0", "", { "dependencies": { "prismjs": "^1.30.0" } }, "sha512-q8VwfU/fDZNoDOf+r7jUnMC2//H2l0TuQ6FkGJL8vD8nw/q5KiL3DS1KKBI3QhI9UQhpJ5dc7AtqfbXWuOgLCQ=="],
+
+ "@astrojs/sitemap": ["@astrojs/sitemap@3.4.1", "", { "dependencies": { "sitemap": "^8.0.0", "stream-replace-string": "^2.0.0", "zod": "^3.24.2" } }, "sha512-VjZvr1e4FH6NHyyHXOiQgLiw94LnCVY4v06wN/D0gZKchTMkg71GrAHJz81/huafcmavtLkIv26HnpfDq6/h/Q=="],
+
+ "@astrojs/starlight": ["@astrojs/starlight@0.35.1", "", { "dependencies": { "@astrojs/markdown-remark": "^6.3.1", "@astrojs/mdx": "^4.2.3", "@astrojs/sitemap": "^3.3.0", "@pagefind/default-ui": "^1.3.0", "@types/hast": "^3.0.4", "@types/js-yaml": "^4.0.9", "@types/mdast": "^4.0.4", "astro-expressive-code": "^0.41.1", "bcp-47": "^2.1.0", "hast-util-from-html": "^2.0.1", "hast-util-select": "^6.0.2", "hast-util-to-string": "^3.0.0", "hastscript": "^9.0.0", "i18next": "^23.11.5", "js-yaml": "^4.1.0", "klona": "^2.0.6", "mdast-util-directive": "^3.0.0", "mdast-util-to-markdown": "^2.1.0", "mdast-util-to-string": "^4.0.0", "pagefind": "^1.3.0", "rehype": "^13.0.1", "rehype-format": "^5.0.0", "remark-directive": "^3.0.0", "ultrahtml": "^1.6.0", "unified": "^11.0.5", "unist-util-visit": "^5.0.0", "vfile": "^6.0.2" }, "peerDependencies": { "astro": "^5.5.0" } }, "sha512-/hshlAayMd3B+E+h8wY6JWT1lNmX/K1+ugiZPirW5XFo5QUcNMk/Bsa4oHgg+TFoU6kbxPtijo0VppATfD9XuA=="],
+
+ "@astrojs/telemetry": ["@astrojs/telemetry@3.3.0", "", { "dependencies": { "ci-info": "^4.2.0", "debug": "^4.4.0", "dlv": "^1.1.3", "dset": "^3.1.4", "is-docker": "^3.0.0", "is-wsl": "^3.1.0", "which-pm-runs": "^1.1.0" } }, "sha512-UFBgfeldP06qu6khs/yY+q1cDAaArM2/7AEIqQ9Cuvf7B1hNLq0xDrZkct+QoIGyjq56y8IaE2I3CTvG99mlhQ=="],
+
+ "@babel/helper-string-parser": ["@babel/helper-string-parser@7.27.1", "", {}, "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA=="],
+
+ "@babel/helper-validator-identifier": ["@babel/helper-validator-identifier@7.27.1", "", {}, "sha512-D2hP9eA+Sqx1kBZgzxZh0y1trbuU+JoDkiEwqhQ36nodYqJwyEIhPSdMNd7lOm/4io72luTPWH20Yda0xOuUow=="],
+
+ "@babel/parser": ["@babel/parser@7.28.0", "", { "dependencies": { "@babel/types": "^7.28.0" }, "bin": "./bin/babel-parser.js" }, "sha512-jVZGvOxOuNSsuQuLRTh13nU0AogFlw32w/MT+LV6D3sP5WdbW61E77RnkbaO2dUvmPAYrBDJXGn5gGS6tH4j8g=="],
+
+ "@babel/runtime": ["@babel/runtime@7.27.6", "", {}, "sha512-vbavdySgbTTrmFE+EsiqUTzlOr5bzlnJtUv9PynGCAKvfQqjIXbvFdumPM/GxMDfyuGMJaJAU6TO4zc1Jf1i8Q=="],
+
+ "@babel/types": ["@babel/types@7.28.1", "", { "dependencies": { "@babel/helper-string-parser": "^7.27.1", "@babel/helper-validator-identifier": "^7.27.1" } }, "sha512-x0LvFTekgSX+83TI28Y9wYPUfzrnl2aT5+5QLnO6v7mSJYtEEevuDRN0F0uSHRk1G1IWZC43o00Y0xDDrpBGPQ=="],
+
+ "@capsizecss/unpack": ["@capsizecss/unpack@2.4.0", "", { "dependencies": { "blob-to-buffer": "^1.2.8", "cross-fetch": "^3.0.4", "fontkit": "^2.0.2" } }, "sha512-GrSU71meACqcmIUxPYOJvGKF0yryjN/L1aCuE9DViCTJI7bfkjgYDPD1zbNDcINJwSSP6UaBZY9GAbYDO7re0Q=="],
+
+ "@ctrl/tinycolor": ["@ctrl/tinycolor@4.1.0", "", {}, "sha512-WyOx8cJQ+FQus4Mm4uPIZA64gbk3Wxh0so5Lcii0aJifqwoVOlfFtorjLE0Hen4OYyHZMXDWqMmaQemBhgxFRQ=="],
+
+ "@emnapi/runtime": ["@emnapi/runtime@1.4.5", "", { "dependencies": { "tslib": "^2.4.0" } }, "sha512-++LApOtY0pEEz1zrd9vy1/zXVaVJJ/EbAF3u0fXIzPJEDtnITsBGbbK0EkM72amhl/R5b+5xx0Y/QhcVOpuulg=="],
+
+ "@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.25.8", "", { "os": "aix", "cpu": "ppc64" }, "sha512-urAvrUedIqEiFR3FYSLTWQgLu5tb+m0qZw0NBEasUeo6wuqatkMDaRT+1uABiGXEu5vqgPd7FGE1BhsAIy9QVA=="],
+
+ "@esbuild/android-arm": ["@esbuild/android-arm@0.25.8", "", { "os": "android", "cpu": "arm" }, "sha512-RONsAvGCz5oWyePVnLdZY/HHwA++nxYWIX1atInlaW6SEkwq6XkP3+cb825EUcRs5Vss/lGh/2YxAb5xqc07Uw=="],
+
+ "@esbuild/android-arm64": ["@esbuild/android-arm64@0.25.8", "", { "os": "android", "cpu": "arm64" }, "sha512-OD3p7LYzWpLhZEyATcTSJ67qB5D+20vbtr6vHlHWSQYhKtzUYrETuWThmzFpZtFsBIxRvhO07+UgVA9m0i/O1w=="],
+
+ "@esbuild/android-x64": ["@esbuild/android-x64@0.25.8", "", { "os": "android", "cpu": "x64" }, "sha512-yJAVPklM5+4+9dTeKwHOaA+LQkmrKFX96BM0A/2zQrbS6ENCmxc4OVoBs5dPkCCak2roAD+jKCdnmOqKszPkjA=="],
+
+ "@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.25.8", "", { "os": "darwin", "cpu": "arm64" }, "sha512-Jw0mxgIaYX6R8ODrdkLLPwBqHTtYHJSmzzd+QeytSugzQ0Vg4c5rDky5VgkoowbZQahCbsv1rT1KW72MPIkevw=="],
+
+ "@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.25.8", "", { "os": "darwin", "cpu": "x64" }, "sha512-Vh2gLxxHnuoQ+GjPNvDSDRpoBCUzY4Pu0kBqMBDlK4fuWbKgGtmDIeEC081xi26PPjn+1tct+Bh8FjyLlw1Zlg=="],
+
+ "@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.25.8", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-YPJ7hDQ9DnNe5vxOm6jaie9QsTwcKedPvizTVlqWG9GBSq+BuyWEDazlGaDTC5NGU4QJd666V0yqCBL2oWKPfA=="],
+
+ "@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.25.8", "", { "os": "freebsd", "cpu": "x64" }, "sha512-MmaEXxQRdXNFsRN/KcIimLnSJrk2r5H8v+WVafRWz5xdSVmWLoITZQXcgehI2ZE6gioE6HirAEToM/RvFBeuhw=="],
+
+ "@esbuild/linux-arm": ["@esbuild/linux-arm@0.25.8", "", { "os": "linux", "cpu": "arm" }, "sha512-FuzEP9BixzZohl1kLf76KEVOsxtIBFwCaLupVuk4eFVnOZfU+Wsn+x5Ryam7nILV2pkq2TqQM9EZPsOBuMC+kg=="],
+
+ "@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.25.8", "", { "os": "linux", "cpu": "arm64" }, "sha512-WIgg00ARWv/uYLU7lsuDK00d/hHSfES5BzdWAdAig1ioV5kaFNrtK8EqGcUBJhYqotlUByUKz5Qo6u8tt7iD/w=="],
+
+ "@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.25.8", "", { "os": "linux", "cpu": "ia32" }, "sha512-A1D9YzRX1i+1AJZuFFUMP1E9fMaYY+GnSQil9Tlw05utlE86EKTUA7RjwHDkEitmLYiFsRd9HwKBPEftNdBfjg=="],
+
+ "@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.25.8", "", { "os": "linux", "cpu": "none" }, "sha512-O7k1J/dwHkY1RMVvglFHl1HzutGEFFZ3kNiDMSOyUrB7WcoHGf96Sh+64nTRT26l3GMbCW01Ekh/ThKM5iI7hQ=="],
+
+ "@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.25.8", "", { "os": "linux", "cpu": "none" }, "sha512-uv+dqfRazte3BzfMp8PAQXmdGHQt2oC/y2ovwpTteqrMx2lwaksiFZ/bdkXJC19ttTvNXBuWH53zy/aTj1FgGw=="],
+
+ "@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.25.8", "", { "os": "linux", "cpu": "ppc64" }, "sha512-GyG0KcMi1GBavP5JgAkkstMGyMholMDybAf8wF5A70CALlDM2p/f7YFE7H92eDeH/VBtFJA5MT4nRPDGg4JuzQ=="],
+
+ "@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.25.8", "", { "os": "linux", "cpu": "none" }, "sha512-rAqDYFv3yzMrq7GIcen3XP7TUEG/4LK86LUPMIz6RT8A6pRIDn0sDcvjudVZBiiTcZCY9y2SgYX2lgK3AF+1eg=="],
+
+ "@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.25.8", "", { "os": "linux", "cpu": "s390x" }, "sha512-Xutvh6VjlbcHpsIIbwY8GVRbwoviWT19tFhgdA7DlenLGC/mbc3lBoVb7jxj9Z+eyGqvcnSyIltYUrkKzWqSvg=="],
+
+ "@esbuild/linux-x64": ["@esbuild/linux-x64@0.25.8", "", { "os": "linux", "cpu": "x64" }, "sha512-ASFQhgY4ElXh3nDcOMTkQero4b1lgubskNlhIfJrsH5OKZXDpUAKBlNS0Kx81jwOBp+HCeZqmoJuihTv57/jvQ=="],
+
+ "@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.25.8", "", { "os": "none", "cpu": "arm64" }, "sha512-d1KfruIeohqAi6SA+gENMuObDbEjn22olAR7egqnkCD9DGBG0wsEARotkLgXDu6c4ncgWTZJtN5vcgxzWRMzcw=="],
+
+ "@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.25.8", "", { "os": "none", "cpu": "x64" }, "sha512-nVDCkrvx2ua+XQNyfrujIG38+YGyuy2Ru9kKVNyh5jAys6n+l44tTtToqHjino2My8VAY6Lw9H7RI73XFi66Cg=="],
+
+ "@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.25.8", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-j8HgrDuSJFAujkivSMSfPQSAa5Fxbvk4rgNAS5i3K+r8s1X0p1uOO2Hl2xNsGFppOeHOLAVgYwDVlmxhq5h+SQ=="],
+
+ "@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.25.8", "", { "os": "openbsd", "cpu": "x64" }, "sha512-1h8MUAwa0VhNCDp6Af0HToI2TJFAn1uqT9Al6DJVzdIBAd21m/G0Yfc77KDM3uF3T/YaOgQq3qTJHPbTOInaIQ=="],
+
+ "@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.25.8", "", { "os": "none", "cpu": "arm64" }, "sha512-r2nVa5SIK9tSWd0kJd9HCffnDHKchTGikb//9c7HX+r+wHYCpQrSgxhlY6KWV1nFo1l4KFbsMlHk+L6fekLsUg=="],
+
+ "@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.25.8", "", { "os": "sunos", "cpu": "x64" }, "sha512-zUlaP2S12YhQ2UzUfcCuMDHQFJyKABkAjvO5YSndMiIkMimPmxA+BYSBikWgsRpvyxuRnow4nS5NPnf9fpv41w=="],
+
+ "@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.25.8", "", { "os": "win32", "cpu": "arm64" }, "sha512-YEGFFWESlPva8hGL+zvj2z/SaK+pH0SwOM0Nc/d+rVnW7GSTFlLBGzZkuSU9kFIGIo8q9X3ucpZhu8PDN5A2sQ=="],
+
+ "@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.25.8", "", { "os": "win32", "cpu": "ia32" }, "sha512-hiGgGC6KZ5LZz58OL/+qVVoZiuZlUYlYHNAmczOm7bs2oE1XriPFi5ZHHrS8ACpV5EjySrnoCKmcbQMN+ojnHg=="],
+
+ "@esbuild/win32-x64": ["@esbuild/win32-x64@0.25.8", "", { "os": "win32", "cpu": "x64" }, "sha512-cn3Yr7+OaaZq1c+2pe+8yxC8E144SReCQjN6/2ynubzYjvyqZjTXfQJpAcQpsdJq3My7XADANiYGHoFC69pLQw=="],
+
+ "@expressive-code/core": ["@expressive-code/core@0.41.3", "", { "dependencies": { "@ctrl/tinycolor": "^4.0.4", "hast-util-select": "^6.0.2", "hast-util-to-html": "^9.0.1", "hast-util-to-text": "^4.0.1", "hastscript": "^9.0.0", "postcss": "^8.4.38", "postcss-nested": "^6.0.1", "unist-util-visit": "^5.0.0", "unist-util-visit-parents": "^6.0.1" } }, "sha512-9qzohqU7O0+JwMEEgQhnBPOw5DtsQRBXhW++5fvEywsuX44vCGGof1SL5OvPElvNgaWZ4pFZAFSlkNOkGyLwSQ=="],
+
+ "@expressive-code/plugin-frames": ["@expressive-code/plugin-frames@0.41.3", "", { "dependencies": { "@expressive-code/core": "^0.41.3" } }, "sha512-rFQtmf/3N2CK3Cq/uERweMTYZnBu+CwxBdHuOftEmfA9iBE7gTVvwpbh82P9ZxkPLvc40UMhYt7uNuAZexycRQ=="],
+
+ "@expressive-code/plugin-shiki": ["@expressive-code/plugin-shiki@0.41.3", "", { "dependencies": { "@expressive-code/core": "^0.41.3", "shiki": "^3.2.2" } }, "sha512-RlTARoopzhFJIOVHLGvuXJ8DCEme/hjV+ZnRJBIxzxsKVpGPW4Oshqg9xGhWTYdHstTsxO663s0cdBLzZj9TQA=="],
+
+ "@expressive-code/plugin-text-markers": ["@expressive-code/plugin-text-markers@0.41.3", "", { "dependencies": { "@expressive-code/core": "^0.41.3" } }, "sha512-SN8tkIzDpA0HLAscEYD2IVrfLiid6qEdE9QLlGVSxO1KEw7qYvjpbNBQjUjMr5/jvTJ7ys6zysU2vLPHE0sb2g=="],
+
+ "@img/sharp-darwin-arm64": ["@img/sharp-darwin-arm64@0.34.3", "", { "optionalDependencies": { "@img/sharp-libvips-darwin-arm64": "1.2.0" }, "os": "darwin", "cpu": "arm64" }, "sha512-ryFMfvxxpQRsgZJqBd4wsttYQbCxsJksrv9Lw/v798JcQ8+w84mBWuXwl+TT0WJ/WrYOLaYpwQXi3sA9nTIaIg=="],
+
+ "@img/sharp-darwin-x64": ["@img/sharp-darwin-x64@0.34.3", "", { "optionalDependencies": { "@img/sharp-libvips-darwin-x64": "1.2.0" }, "os": "darwin", "cpu": "x64" }, "sha512-yHpJYynROAj12TA6qil58hmPmAwxKKC7reUqtGLzsOHfP7/rniNGTL8tjWX6L3CTV4+5P4ypcS7Pp+7OB+8ihA=="],
+
+ "@img/sharp-libvips-darwin-arm64": ["@img/sharp-libvips-darwin-arm64@1.2.0", "", { "os": "darwin", "cpu": "arm64" }, "sha512-sBZmpwmxqwlqG9ueWFXtockhsxefaV6O84BMOrhtg/YqbTaRdqDE7hxraVE3y6gVM4eExmfzW4a8el9ArLeEiQ=="],
+
+ "@img/sharp-libvips-darwin-x64": ["@img/sharp-libvips-darwin-x64@1.2.0", "", { "os": "darwin", "cpu": "x64" }, "sha512-M64XVuL94OgiNHa5/m2YvEQI5q2cl9d/wk0qFTDVXcYzi43lxuiFTftMR1tOnFQovVXNZJ5TURSDK2pNe9Yzqg=="],
+
+ "@img/sharp-libvips-linux-arm": ["@img/sharp-libvips-linux-arm@1.2.0", "", { "os": "linux", "cpu": "arm" }, "sha512-mWd2uWvDtL/nvIzThLq3fr2nnGfyr/XMXlq8ZJ9WMR6PXijHlC3ksp0IpuhK6bougvQrchUAfzRLnbsen0Cqvw=="],
+
+ "@img/sharp-libvips-linux-arm64": ["@img/sharp-libvips-linux-arm64@1.2.0", "", { "os": "linux", "cpu": "arm64" }, "sha512-RXwd0CgG+uPRX5YYrkzKyalt2OJYRiJQ8ED/fi1tq9WQW2jsQIn0tqrlR5l5dr/rjqq6AHAxURhj2DVjyQWSOA=="],
+
+ "@img/sharp-libvips-linux-ppc64": ["@img/sharp-libvips-linux-ppc64@1.2.0", "", { "os": "linux", "cpu": "ppc64" }, "sha512-Xod/7KaDDHkYu2phxxfeEPXfVXFKx70EAFZ0qyUdOjCcxbjqyJOEUpDe6RIyaunGxT34Anf9ue/wuWOqBW2WcQ=="],
+
+ "@img/sharp-libvips-linux-s390x": ["@img/sharp-libvips-linux-s390x@1.2.0", "", { "os": "linux", "cpu": "s390x" }, "sha512-eMKfzDxLGT8mnmPJTNMcjfO33fLiTDsrMlUVcp6b96ETbnJmd4uvZxVJSKPQfS+odwfVaGifhsB07J1LynFehw=="],
+
+ "@img/sharp-libvips-linux-x64": ["@img/sharp-libvips-linux-x64@1.2.0", "", { "os": "linux", "cpu": "x64" }, "sha512-ZW3FPWIc7K1sH9E3nxIGB3y3dZkpJlMnkk7z5tu1nSkBoCgw2nSRTFHI5pB/3CQaJM0pdzMF3paf9ckKMSE9Tg=="],
+
+ "@img/sharp-libvips-linuxmusl-arm64": ["@img/sharp-libvips-linuxmusl-arm64@1.2.0", "", { "os": "linux", "cpu": "arm64" }, "sha512-UG+LqQJbf5VJ8NWJ5Z3tdIe/HXjuIdo4JeVNADXBFuG7z9zjoegpzzGIyV5zQKi4zaJjnAd2+g2nna8TZvuW9Q=="],
+
+ "@img/sharp-libvips-linuxmusl-x64": ["@img/sharp-libvips-linuxmusl-x64@1.2.0", "", { "os": "linux", "cpu": "x64" }, "sha512-SRYOLR7CXPgNze8akZwjoGBoN1ThNZoqpOgfnOxmWsklTGVfJiGJoC/Lod7aNMGA1jSsKWM1+HRX43OP6p9+6Q=="],
+
+ "@img/sharp-linux-arm": ["@img/sharp-linux-arm@0.34.3", "", { "optionalDependencies": { "@img/sharp-libvips-linux-arm": "1.2.0" }, "os": "linux", "cpu": "arm" }, "sha512-oBK9l+h6KBN0i3dC8rYntLiVfW8D8wH+NPNT3O/WBHeW0OQWCjfWksLUaPidsrDKpJgXp3G3/hkmhptAW0I3+A=="],
+
+ "@img/sharp-linux-arm64": ["@img/sharp-linux-arm64@0.34.3", "", { "optionalDependencies": { "@img/sharp-libvips-linux-arm64": "1.2.0" }, "os": "linux", "cpu": "arm64" }, "sha512-QdrKe3EvQrqwkDrtuTIjI0bu6YEJHTgEeqdzI3uWJOH6G1O8Nl1iEeVYRGdj1h5I21CqxSvQp1Yv7xeU3ZewbA=="],
+
+ "@img/sharp-linux-ppc64": ["@img/sharp-linux-ppc64@0.34.3", "", { "optionalDependencies": { "@img/sharp-libvips-linux-ppc64": "1.2.0" }, "os": "linux", "cpu": "ppc64" }, "sha512-GLtbLQMCNC5nxuImPR2+RgrviwKwVql28FWZIW1zWruy6zLgA5/x2ZXk3mxj58X/tszVF69KK0Is83V8YgWhLA=="],
+
+ "@img/sharp-linux-s390x": ["@img/sharp-linux-s390x@0.34.3", "", { "optionalDependencies": { "@img/sharp-libvips-linux-s390x": "1.2.0" }, "os": "linux", "cpu": "s390x" }, "sha512-3gahT+A6c4cdc2edhsLHmIOXMb17ltffJlxR0aC2VPZfwKoTGZec6u5GrFgdR7ciJSsHT27BD3TIuGcuRT0KmQ=="],
+
+ "@img/sharp-linux-x64": ["@img/sharp-linux-x64@0.34.3", "", { "optionalDependencies": { "@img/sharp-libvips-linux-x64": "1.2.0" }, "os": "linux", "cpu": "x64" }, "sha512-8kYso8d806ypnSq3/Ly0QEw90V5ZoHh10yH0HnrzOCr6DKAPI6QVHvwleqMkVQ0m+fc7EH8ah0BB0QPuWY6zJQ=="],
+
+ "@img/sharp-linuxmusl-arm64": ["@img/sharp-linuxmusl-arm64@0.34.3", "", { "optionalDependencies": { "@img/sharp-libvips-linuxmusl-arm64": "1.2.0" }, "os": "linux", "cpu": "arm64" }, "sha512-vAjbHDlr4izEiXM1OTggpCcPg9tn4YriK5vAjowJsHwdBIdx0fYRsURkxLG2RLm9gyBq66gwtWI8Gx0/ov+JKQ=="],
+
+ "@img/sharp-linuxmusl-x64": ["@img/sharp-linuxmusl-x64@0.34.3", "", { "optionalDependencies": { "@img/sharp-libvips-linuxmusl-x64": "1.2.0" }, "os": "linux", "cpu": "x64" }, "sha512-gCWUn9547K5bwvOn9l5XGAEjVTTRji4aPTqLzGXHvIr6bIDZKNTA34seMPgM0WmSf+RYBH411VavCejp3PkOeQ=="],
+
+ "@img/sharp-wasm32": ["@img/sharp-wasm32@0.34.3", "", { "dependencies": { "@emnapi/runtime": "^1.4.4" }, "cpu": "none" }, "sha512-+CyRcpagHMGteySaWos8IbnXcHgfDn7pO2fiC2slJxvNq9gDipYBN42/RagzctVRKgxATmfqOSulgZv5e1RdMg=="],
+
+ "@img/sharp-win32-arm64": ["@img/sharp-win32-arm64@0.34.3", "", { "os": "win32", "cpu": "arm64" }, "sha512-MjnHPnbqMXNC2UgeLJtX4XqoVHHlZNd+nPt1kRPmj63wURegwBhZlApELdtxM2OIZDRv/DFtLcNhVbd1z8GYXQ=="],
+
+ "@img/sharp-win32-ia32": ["@img/sharp-win32-ia32@0.34.3", "", { "os": "win32", "cpu": "ia32" }, "sha512-xuCdhH44WxuXgOM714hn4amodJMZl3OEvf0GVTm0BEyMeA2to+8HEdRPShH0SLYptJY1uBw+SCFP9WVQi1Q/cw=="],
+
+ "@img/sharp-win32-x64": ["@img/sharp-win32-x64@0.34.3", "", { "os": "win32", "cpu": "x64" }, "sha512-OWwz05d++TxzLEv4VnsTz5CmZ6mI6S05sfQGEMrNrQcOEERbX46332IvE7pO/EUiw7jUrrS40z/M7kPyjfl04g=="],
+
+ "@jridgewell/sourcemap-codec": ["@jridgewell/sourcemap-codec@1.5.4", "", {}, "sha512-VT2+G1VQs/9oz078bLrYbecdZKs912zQlkelYpuf+SXF+QvZDYJlbx/LSx+meSAwdDFnF8FVXW92AVjjkVmgFw=="],
+
+ "@mdx-js/mdx": ["@mdx-js/mdx@3.1.0", "", { "dependencies": { "@types/estree": "^1.0.0", "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", "@types/mdx": "^2.0.0", "collapse-white-space": "^2.0.0", "devlop": "^1.0.0", "estree-util-is-identifier-name": "^3.0.0", "estree-util-scope": "^1.0.0", "estree-walker": "^3.0.0", "hast-util-to-jsx-runtime": "^2.0.0", "markdown-extensions": "^2.0.0", "recma-build-jsx": "^1.0.0", "recma-jsx": "^1.0.0", "recma-stringify": "^1.0.0", "rehype-recma": "^1.0.0", "remark-mdx": "^3.0.0", "remark-parse": "^11.0.0", "remark-rehype": "^11.0.0", "source-map": "^0.7.0", "unified": "^11.0.0", "unist-util-position-from-estree": "^2.0.0", "unist-util-stringify-position": "^4.0.0", "unist-util-visit": "^5.0.0", "vfile": "^6.0.0" } }, "sha512-/QxEhPAvGwbQmy1Px8F899L5Uc2KZ6JtXwlCgJmjSTBedwOZkByYcBG4GceIGPXRDsmfxhHazuS+hlOShRLeDw=="],
+
+ "@oslojs/encoding": ["@oslojs/encoding@1.1.0", "", {}, "sha512-70wQhgYmndg4GCPxPPxPGevRKqTIJ2Nh4OkiMWmDAVYsTQ+Ta7Sq+rPevXyXGdzr30/qZBnyOalCszoMxlyldQ=="],
+
+ "@pagefind/darwin-arm64": ["@pagefind/darwin-arm64@1.3.0", "", { "os": "darwin", "cpu": "arm64" }, "sha512-365BEGl6ChOsauRjyVpBjXybflXAOvoMROw3TucAROHIcdBvXk9/2AmEvGFU0r75+vdQI4LJdJdpH4Y6Yqaj4A=="],
+
+ "@pagefind/darwin-x64": ["@pagefind/darwin-x64@1.3.0", "", { "os": "darwin", "cpu": "x64" }, "sha512-zlGHA23uuXmS8z3XxEGmbHpWDxXfPZ47QS06tGUq0HDcZjXjXHeLG+cboOy828QIV5FXsm9MjfkP5e4ZNbOkow=="],
+
+ "@pagefind/default-ui": ["@pagefind/default-ui@1.3.0", "", {}, "sha512-CGKT9ccd3+oRK6STXGgfH+m0DbOKayX6QGlq38TfE1ZfUcPc5+ulTuzDbZUnMo+bubsEOIypm4Pl2iEyzZ1cNg=="],
+
+ "@pagefind/linux-arm64": ["@pagefind/linux-arm64@1.3.0", "", { "os": "linux", "cpu": "arm64" }, "sha512-8lsxNAiBRUk72JvetSBXs4WRpYrQrVJXjlRRnOL6UCdBN9Nlsz0t7hWstRk36+JqHpGWOKYiuHLzGYqYAqoOnQ=="],
+
+ "@pagefind/linux-x64": ["@pagefind/linux-x64@1.3.0", "", { "os": "linux", "cpu": "x64" }, "sha512-hAvqdPJv7A20Ucb6FQGE6jhjqy+vZ6pf+s2tFMNtMBG+fzcdc91uTw7aP/1Vo5plD0dAOHwdxfkyw0ugal4kcQ=="],
+
+ "@pagefind/windows-x64": ["@pagefind/windows-x64@1.3.0", "", { "os": "win32", "cpu": "x64" }, "sha512-BR1bIRWOMqkf8IoU576YDhij1Wd/Zf2kX/kCI0b2qzCKC8wcc2GQJaaRMCpzvCCrmliO4vtJ6RITp/AnoYUUmQ=="],
+
+ "@rollup/pluginutils": ["@rollup/pluginutils@5.2.0", "", { "dependencies": { "@types/estree": "^1.0.0", "estree-walker": "^2.0.2", "picomatch": "^4.0.2" }, "peerDependencies": { "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" }, "optionalPeers": ["rollup"] }, "sha512-qWJ2ZTbmumwiLFomfzTyt5Kng4hwPi9rwCYN4SHb6eaRU1KNO4ccxINHr/VhH4GgPlt1XfSTLX2LBTme8ne4Zw=="],
+
+ "@rollup/rollup-android-arm-eabi": ["@rollup/rollup-android-arm-eabi@4.45.1", "", { "os": "android", "cpu": "arm" }, "sha512-NEySIFvMY0ZQO+utJkgoMiCAjMrGvnbDLHvcmlA33UXJpYBCvlBEbMMtV837uCkS+plG2umfhn0T5mMAxGrlRA=="],
+
+ "@rollup/rollup-android-arm64": ["@rollup/rollup-android-arm64@4.45.1", "", { "os": "android", "cpu": "arm64" }, "sha512-ujQ+sMXJkg4LRJaYreaVx7Z/VMgBBd89wGS4qMrdtfUFZ+TSY5Rs9asgjitLwzeIbhwdEhyj29zhst3L1lKsRQ=="],
+
+ "@rollup/rollup-darwin-arm64": ["@rollup/rollup-darwin-arm64@4.45.1", "", { "os": "darwin", "cpu": "arm64" }, "sha512-FSncqHvqTm3lC6Y13xncsdOYfxGSLnP+73k815EfNmpewPs+EyM49haPS105Rh4aF5mJKywk9X0ogzLXZzN9lA=="],
+
+ "@rollup/rollup-darwin-x64": ["@rollup/rollup-darwin-x64@4.45.1", "", { "os": "darwin", "cpu": "x64" }, "sha512-2/vVn/husP5XI7Fsf/RlhDaQJ7x9zjvC81anIVbr4b/f0xtSmXQTFcGIQ/B1cXIYM6h2nAhJkdMHTnD7OtQ9Og=="],
+
+ "@rollup/rollup-freebsd-arm64": ["@rollup/rollup-freebsd-arm64@4.45.1", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-4g1kaDxQItZsrkVTdYQ0bxu4ZIQ32cotoQbmsAnW1jAE4XCMbcBPDirX5fyUzdhVCKgPcrwWuucI8yrVRBw2+g=="],
+
+ "@rollup/rollup-freebsd-x64": ["@rollup/rollup-freebsd-x64@4.45.1", "", { "os": "freebsd", "cpu": "x64" }, "sha512-L/6JsfiL74i3uK1Ti2ZFSNsp5NMiM4/kbbGEcOCps99aZx3g8SJMO1/9Y0n/qKlWZfn6sScf98lEOUe2mBvW9A=="],
+
+ "@rollup/rollup-linux-arm-gnueabihf": ["@rollup/rollup-linux-arm-gnueabihf@4.45.1", "", { "os": "linux", "cpu": "arm" }, "sha512-RkdOTu2jK7brlu+ZwjMIZfdV2sSYHK2qR08FUWcIoqJC2eywHbXr0L8T/pONFwkGukQqERDheaGTeedG+rra6Q=="],
+
+ "@rollup/rollup-linux-arm-musleabihf": ["@rollup/rollup-linux-arm-musleabihf@4.45.1", "", { "os": "linux", "cpu": "arm" }, "sha512-3kJ8pgfBt6CIIr1o+HQA7OZ9mp/zDk3ctekGl9qn/pRBgrRgfwiffaUmqioUGN9hv0OHv2gxmvdKOkARCtRb8Q=="],
+
+ "@rollup/rollup-linux-arm64-gnu": ["@rollup/rollup-linux-arm64-gnu@4.45.1", "", { "os": "linux", "cpu": "arm64" }, "sha512-k3dOKCfIVixWjG7OXTCOmDfJj3vbdhN0QYEqB+OuGArOChek22hn7Uy5A/gTDNAcCy5v2YcXRJ/Qcnm4/ma1xw=="],
+
+ "@rollup/rollup-linux-arm64-musl": ["@rollup/rollup-linux-arm64-musl@4.45.1", "", { "os": "linux", "cpu": "arm64" }, "sha512-PmI1vxQetnM58ZmDFl9/Uk2lpBBby6B6rF4muJc65uZbxCs0EA7hhKCk2PKlmZKuyVSHAyIw3+/SiuMLxKxWog=="],
+
+ "@rollup/rollup-linux-loongarch64-gnu": ["@rollup/rollup-linux-loongarch64-gnu@4.45.1", "", { "os": "linux", "cpu": "none" }, "sha512-9UmI0VzGmNJ28ibHW2GpE2nF0PBQqsyiS4kcJ5vK+wuwGnV5RlqdczVocDSUfGX/Na7/XINRVoUgJyFIgipoRg=="],
+
+ "@rollup/rollup-linux-powerpc64le-gnu": ["@rollup/rollup-linux-powerpc64le-gnu@4.45.1", "", { "os": "linux", "cpu": "ppc64" }, "sha512-7nR2KY8oEOUTD3pBAxIBBbZr0U7U+R9HDTPNy+5nVVHDXI4ikYniH1oxQz9VoB5PbBU1CZuDGHkLJkd3zLMWsg=="],
+
+ "@rollup/rollup-linux-riscv64-gnu": ["@rollup/rollup-linux-riscv64-gnu@4.45.1", "", { "os": "linux", "cpu": "none" }, "sha512-nlcl3jgUultKROfZijKjRQLUu9Ma0PeNv/VFHkZiKbXTBQXhpytS8CIj5/NfBeECZtY2FJQubm6ltIxm/ftxpw=="],
+
+ "@rollup/rollup-linux-riscv64-musl": ["@rollup/rollup-linux-riscv64-musl@4.45.1", "", { "os": "linux", "cpu": "none" }, "sha512-HJV65KLS51rW0VY6rvZkiieiBnurSzpzore1bMKAhunQiECPuxsROvyeaot/tcK3A3aGnI+qTHqisrpSgQrpgA=="],
+
+ "@rollup/rollup-linux-s390x-gnu": ["@rollup/rollup-linux-s390x-gnu@4.45.1", "", { "os": "linux", "cpu": "s390x" }, "sha512-NITBOCv3Qqc6hhwFt7jLV78VEO/il4YcBzoMGGNxznLgRQf43VQDae0aAzKiBeEPIxnDrACiMgbqjuihx08OOw=="],
+
+ "@rollup/rollup-linux-x64-gnu": ["@rollup/rollup-linux-x64-gnu@4.45.1", "", { "os": "linux", "cpu": "x64" }, "sha512-+E/lYl6qu1zqgPEnTrs4WysQtvc/Sh4fC2nByfFExqgYrqkKWp1tWIbe+ELhixnenSpBbLXNi6vbEEJ8M7fiHw=="],
+
+ "@rollup/rollup-linux-x64-musl": ["@rollup/rollup-linux-x64-musl@4.45.1", "", { "os": "linux", "cpu": "x64" }, "sha512-a6WIAp89p3kpNoYStITT9RbTbTnqarU7D8N8F2CV+4Cl9fwCOZraLVuVFvlpsW0SbIiYtEnhCZBPLoNdRkjQFw=="],
+
+ "@rollup/rollup-win32-arm64-msvc": ["@rollup/rollup-win32-arm64-msvc@4.45.1", "", { "os": "win32", "cpu": "arm64" }, "sha512-T5Bi/NS3fQiJeYdGvRpTAP5P02kqSOpqiopwhj0uaXB6nzs5JVi2XMJb18JUSKhCOX8+UE1UKQufyD6Or48dJg=="],
+
+ "@rollup/rollup-win32-ia32-msvc": ["@rollup/rollup-win32-ia32-msvc@4.45.1", "", { "os": "win32", "cpu": "ia32" }, "sha512-lxV2Pako3ujjuUe9jiU3/s7KSrDfH6IgTSQOnDWr9aJ92YsFd7EurmClK0ly/t8dzMkDtd04g60WX6yl0sGfdw=="],
+
+ "@rollup/rollup-win32-x64-msvc": ["@rollup/rollup-win32-x64-msvc@4.45.1", "", { "os": "win32", "cpu": "x64" }, "sha512-M/fKi4sasCdM8i0aWJjCSFm2qEnYRR8AMLG2kxp6wD13+tMGA4Z1tVAuHkNRjud5SW2EM3naLuK35w9twvf6aA=="],
+
+ "@shikijs/core": ["@shikijs/core@3.8.1", "", { "dependencies": { "@shikijs/types": "3.8.1", "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4", "hast-util-to-html": "^9.0.5" } }, "sha512-uTSXzUBQ/IgFcUa6gmGShCHr4tMdR3pxUiiWKDm8pd42UKJdYhkAYsAmHX5mTwybQ5VyGDgTjW4qKSsRvGSang=="],
+
+ "@shikijs/engine-javascript": ["@shikijs/engine-javascript@3.8.1", "", { "dependencies": { "@shikijs/types": "3.8.1", "@shikijs/vscode-textmate": "^10.0.2", "oniguruma-to-es": "^4.3.3" } }, "sha512-rZRp3BM1llrHkuBPAdYAzjlF7OqlM0rm/7EWASeCcY7cRYZIrOnGIHE9qsLz5TCjGefxBFnwgIECzBs2vmOyKA=="],
+
+ "@shikijs/engine-oniguruma": ["@shikijs/engine-oniguruma@3.8.1", "", { "dependencies": { "@shikijs/types": "3.8.1", "@shikijs/vscode-textmate": "^10.0.2" } }, "sha512-KGQJZHlNY7c656qPFEQpIoqOuC4LrxjyNndRdzk5WKB/Ie87+NJCF1xo9KkOUxwxylk7rT6nhlZyTGTC4fCe1g=="],
+
+ "@shikijs/langs": ["@shikijs/langs@3.8.1", "", { "dependencies": { "@shikijs/types": "3.8.1" } }, "sha512-TjOFg2Wp1w07oKnXjs0AUMb4kJvujML+fJ1C5cmEj45lhjbUXtziT1x2bPQb9Db6kmPhkG5NI2tgYW1/DzhUuQ=="],
+
+ "@shikijs/themes": ["@shikijs/themes@3.8.1", "", { "dependencies": { "@shikijs/types": "3.8.1" } }, "sha512-Vu3t3BBLifc0GB0UPg2Pox1naTemrrvyZv2lkiSw3QayVV60me1ujFQwPZGgUTmwXl1yhCPW8Lieesm0CYruLQ=="],
+
+ "@shikijs/types": ["@shikijs/types@3.8.1", "", { "dependencies": { "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4" } }, "sha512-5C39Q8/8r1I26suLh+5TPk1DTrbY/kn3IdWA5HdizR0FhlhD05zx5nKCqhzSfDHH3p4S0ZefxWd77DLV+8FhGg=="],
+
+ "@shikijs/vscode-textmate": ["@shikijs/vscode-textmate@10.0.2", "", {}, "sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg=="],
+
+ "@swc/helpers": ["@swc/helpers@0.5.17", "", { "dependencies": { "tslib": "^2.8.0" } }, "sha512-5IKx/Y13RsYd+sauPb2x+U/xZikHjolzfuDgTAl/Tdf3Q8rslRvC19NKDLgAJQ6wsqADk10ntlv08nPFw/gO/A=="],
+
+ "@types/debug": ["@types/debug@4.1.12", "", { "dependencies": { "@types/ms": "*" } }, "sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ=="],
+
+ "@types/estree": ["@types/estree@1.0.8", "", {}, "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w=="],
+
+ "@types/estree-jsx": ["@types/estree-jsx@1.0.5", "", { "dependencies": { "@types/estree": "*" } }, "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg=="],
+
+ "@types/fontkit": ["@types/fontkit@2.0.8", "", { "dependencies": { "@types/node": "*" } }, "sha512-wN+8bYxIpJf+5oZdrdtaX04qUuWHcKxcDEgRS9Qm9ZClSHjzEn13SxUC+5eRM+4yXIeTYk8mTzLAWGF64847ew=="],
+
+ "@types/hast": ["@types/hast@3.0.4", "", { "dependencies": { "@types/unist": "*" } }, "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ=="],
+
+ "@types/js-yaml": ["@types/js-yaml@4.0.9", "", {}, "sha512-k4MGaQl5TGo/iipqb2UDG2UwjXziSWkh0uysQelTlJpX1qGlpUZYm8PnO4DxG1qBomtJUdYJ6qR6xdIah10JLg=="],
+
+ "@types/mdast": ["@types/mdast@4.0.4", "", { "dependencies": { "@types/unist": "*" } }, "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA=="],
+
+ "@types/mdx": ["@types/mdx@2.0.13", "", {}, "sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw=="],
+
+ "@types/ms": ["@types/ms@2.1.0", "", {}, "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA=="],
+
+ "@types/nlcst": ["@types/nlcst@2.0.3", "", { "dependencies": { "@types/unist": "*" } }, "sha512-vSYNSDe6Ix3q+6Z7ri9lyWqgGhJTmzRjZRqyq15N0Z/1/UnVsno9G/N40NBijoYx2seFDIl0+B2mgAb9mezUCA=="],
+
+ "@types/node": ["@types/node@17.0.45", "", {}, "sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw=="],
+
+ "@types/sax": ["@types/sax@1.2.7", "", { "dependencies": { "@types/node": "*" } }, "sha512-rO73L89PJxeYM3s3pPPjiPgVVcymqU490g0YO5n5By0k2Erzj6tay/4lr1CHAAU4JyOWd1rpQ8bCf6cZfHU96A=="],
+
+ "@types/unist": ["@types/unist@3.0.3", "", {}, "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q=="],
+
+ "@ungap/structured-clone": ["@ungap/structured-clone@1.3.0", "", {}, "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g=="],
+
+ "acorn": ["acorn@8.15.0", "", { "bin": { "acorn": "bin/acorn" } }, "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg=="],
+
+ "acorn-jsx": ["acorn-jsx@5.3.2", "", { "peerDependencies": { "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" } }, "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ=="],
+
+ "ansi-align": ["ansi-align@3.0.1", "", { "dependencies": { "string-width": "^4.1.0" } }, "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w=="],
+
+ "ansi-regex": ["ansi-regex@6.1.0", "", {}, "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA=="],
+
+ "ansi-styles": ["ansi-styles@6.2.1", "", {}, "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug=="],
+
+ "anymatch": ["anymatch@3.1.3", "", { "dependencies": { "normalize-path": "^3.0.0", "picomatch": "^2.0.4" } }, "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw=="],
+
+ "arg": ["arg@5.0.2", "", {}, "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg=="],
+
+ "argparse": ["argparse@2.0.1", "", {}, "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q=="],
+
+ "aria-query": ["aria-query@5.3.2", "", {}, "sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw=="],
+
+ "array-iterate": ["array-iterate@2.0.1", "", {}, "sha512-I1jXZMjAgCMmxT4qxXfPXa6SthSoE8h6gkSI9BGGNv8mP8G/v0blc+qFnZu6K42vTOiuME596QaLO0TP3Lk0xg=="],
+
+ "astring": ["astring@1.9.0", "", { "bin": { "astring": "bin/astring" } }, "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg=="],
+
+ "astro": ["astro@5.12.1", "", { "dependencies": { "@astrojs/compiler": "^2.12.2", "@astrojs/internal-helpers": "0.6.1", "@astrojs/markdown-remark": "6.3.3", "@astrojs/telemetry": "3.3.0", "@capsizecss/unpack": "^2.4.0", "@oslojs/encoding": "^1.1.0", "@rollup/pluginutils": "^5.1.4", "acorn": "^8.14.1", "aria-query": "^5.3.2", "axobject-query": "^4.1.0", "boxen": "8.0.1", "ci-info": "^4.2.0", "clsx": "^2.1.1", "common-ancestor-path": "^1.0.1", "cookie": "^1.0.2", "cssesc": "^3.0.0", "debug": "^4.4.0", "deterministic-object-hash": "^2.0.2", "devalue": "^5.1.1", "diff": "^5.2.0", "dlv": "^1.1.3", "dset": "^3.1.4", "es-module-lexer": "^1.6.0", "esbuild": "^0.25.0", "estree-walker": "^3.0.3", "flattie": "^1.1.1", "fontace": "~0.3.0", "github-slugger": "^2.0.0", "html-escaper": "3.0.3", "http-cache-semantics": "^4.1.1", "import-meta-resolve": "^4.1.0", "js-yaml": "^4.1.0", "kleur": "^4.1.5", "magic-string": "^0.30.17", "magicast": "^0.3.5", "mrmime": "^2.0.1", "neotraverse": "^0.6.18", "p-limit": "^6.2.0", "p-queue": "^8.1.0", "package-manager-detector": "^1.1.0", "picomatch": "^4.0.2", "prompts": "^2.4.2", "rehype": "^13.0.2", "semver": "^7.7.1", "shiki": "^3.2.1", "smol-toml": "^1.3.4", "tinyexec": "^0.3.2", "tinyglobby": "^0.2.12", "tsconfck": "^3.1.5", "ultrahtml": "^1.6.0", "unifont": "~0.5.0", "unist-util-visit": "^5.0.0", "unstorage": "^1.15.0", "vfile": "^6.0.3", "vite": "^6.3.4", "vitefu": "^1.0.6", "xxhash-wasm": "^1.1.0", "yargs-parser": "^21.1.1", "yocto-spinner": "^0.2.1", "zod": "^3.24.2", "zod-to-json-schema": "^3.24.5", "zod-to-ts": "^1.2.0" }, "optionalDependencies": { "sharp": "^0.33.3" }, "bin": { "astro": "astro.js" } }, "sha512-/gH9cLIp6UNdbJO1FPBVN/Ea+1I9hJdQoLJKYUsXIRIfHcyF/3NCg0QVDJGw1oWkyQT6x6poQsnbgY9UXitjiw=="],
+
+ "astro-expressive-code": ["astro-expressive-code@0.41.3", "", { "dependencies": { "rehype-expressive-code": "^0.41.3" }, "peerDependencies": { "astro": "^4.0.0-beta || ^5.0.0-beta || ^3.3.0" } }, "sha512-u+zHMqo/QNLE2eqYRCrK3+XMlKakv33Bzuz+56V1gs8H0y6TZ0hIi3VNbIxeTn51NLn+mJfUV/A0kMNfE4rANw=="],
+
+ "axobject-query": ["axobject-query@4.1.0", "", {}, "sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ=="],
+
+ "bail": ["bail@2.0.2", "", {}, "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw=="],
+
+ "base-64": ["base-64@1.0.0", "", {}, "sha512-kwDPIFCGx0NZHog36dj+tHiwP4QMzsZ3AgMViUBKI0+V5n4U0ufTCUMhnQ04diaRI8EX/QcPfql7zlhZ7j4zgg=="],
+
+ "base64-js": ["base64-js@1.5.1", "", {}, "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA=="],
+
+ "bcp-47": ["bcp-47@2.1.0", "", { "dependencies": { "is-alphabetical": "^2.0.0", "is-alphanumerical": "^2.0.0", "is-decimal": "^2.0.0" } }, "sha512-9IIS3UPrvIa1Ej+lVDdDwO7zLehjqsaByECw0bu2RRGP73jALm6FYbzI5gWbgHLvNdkvfXB5YrSbocZdOS0c0w=="],
+
+ "bcp-47-match": ["bcp-47-match@2.0.3", "", {}, "sha512-JtTezzbAibu8G0R9op9zb3vcWZd9JF6M0xOYGPn0fNCd7wOpRB1mU2mH9T8gaBGbAAyIIVgB2G7xG0GP98zMAQ=="],
+
+ "blob-to-buffer": ["blob-to-buffer@1.2.9", "", {}, "sha512-BF033y5fN6OCofD3vgHmNtwZWRcq9NLyyxyILx9hfMy1sXYy4ojFl765hJ2lP0YaN2fuxPaLO2Vzzoxy0FLFFA=="],
+
+ "boolbase": ["boolbase@1.0.0", "", {}, "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww=="],
+
+ "boxen": ["boxen@8.0.1", "", { "dependencies": { "ansi-align": "^3.0.1", "camelcase": "^8.0.0", "chalk": "^5.3.0", "cli-boxes": "^3.0.0", "string-width": "^7.2.0", "type-fest": "^4.21.0", "widest-line": "^5.0.0", "wrap-ansi": "^9.0.0" } }, "sha512-F3PH5k5juxom4xktynS7MoFY+NUWH5LC4CnH11YB8NPew+HLpmBLCybSAEyb2F+4pRXhuhWqFesoQd6DAyc2hw=="],
+
+ "brotli": ["brotli@1.3.3", "", { "dependencies": { "base64-js": "^1.1.2" } }, "sha512-oTKjJdShmDuGW94SyyaoQvAjf30dZaHnjJ8uAF+u2/vGJkJbJPJAT1gDiOJP5v1Zb6f9KEyW/1HpuaWIXtGHPg=="],
+
+ "camelcase": ["camelcase@8.0.0", "", {}, "sha512-8WB3Jcas3swSvjIeA2yvCJ+Miyz5l1ZmB6HFb9R1317dt9LCQoswg/BGrmAmkWVEszSrrg4RwmO46qIm2OEnSA=="],
+
+ "ccount": ["ccount@2.0.1", "", {}, "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg=="],
+
+ "chalk": ["chalk@5.4.1", "", {}, "sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w=="],
+
+ "character-entities": ["character-entities@2.0.2", "", {}, "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ=="],
+
+ "character-entities-html4": ["character-entities-html4@2.1.0", "", {}, "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA=="],
+
+ "character-entities-legacy": ["character-entities-legacy@3.0.0", "", {}, "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ=="],
+
+ "character-reference-invalid": ["character-reference-invalid@2.0.1", "", {}, "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw=="],
+
+ "chokidar": ["chokidar@4.0.3", "", { "dependencies": { "readdirp": "^4.0.1" } }, "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA=="],
+
+ "ci-info": ["ci-info@4.3.0", "", {}, "sha512-l+2bNRMiQgcfILUi33labAZYIWlH1kWDp+ecNo5iisRKrbm0xcRyCww71/YU0Fkw0mAFpz9bJayXPjey6vkmaQ=="],
+
+ "cli-boxes": ["cli-boxes@3.0.0", "", {}, "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g=="],
+
+ "clone": ["clone@2.1.2", "", {}, "sha512-3Pe/CF1Nn94hyhIYpjtiLhdCoEoz0DqQ+988E9gmeEdQZlojxnOb74wctFyuwWQHzqyf9X7C7MG8juUpqBJT8w=="],
+
+ "clsx": ["clsx@2.1.1", "", {}, "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA=="],
+
+ "collapse-white-space": ["collapse-white-space@2.1.0", "", {}, "sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw=="],
+
+ "color": ["color@4.2.3", "", { "dependencies": { "color-convert": "^2.0.1", "color-string": "^1.9.0" } }, "sha512-1rXeuUUiGGrykh+CeBdu5Ie7OJwinCgQY0bc7GCRxy5xVHy+moaqkpL/jqQq0MtQOeYcrqEz4abc5f0KtU7W4A=="],
+
+ "color-convert": ["color-convert@2.0.1", "", { "dependencies": { "color-name": "~1.1.4" } }, "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ=="],
+
+ "color-name": ["color-name@1.1.4", "", {}, "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="],
+
+ "color-string": ["color-string@1.9.1", "", { "dependencies": { "color-name": "^1.0.0", "simple-swizzle": "^0.2.2" } }, "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg=="],
+
+ "comma-separated-tokens": ["comma-separated-tokens@2.0.3", "", {}, "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg=="],
+
+ "common-ancestor-path": ["common-ancestor-path@1.0.1", "", {}, "sha512-L3sHRo1pXXEqX8VU28kfgUY+YGsk09hPqZiZmLacNib6XNTCM8ubYeT7ryXQw8asB1sKgcU5lkB7ONug08aB8w=="],
+
+ "cookie": ["cookie@1.0.2", "", {}, "sha512-9Kr/j4O16ISv8zBBhJoi4bXOYNTkFLOqSL3UDB0njXxCXNezjeyVrJyGOWtgfs/q2km1gwBcfH8q1yEGoMYunA=="],
+
+ "cookie-es": ["cookie-es@1.2.2", "", {}, "sha512-+W7VmiVINB+ywl1HGXJXmrqkOhpKrIiVZV6tQuV54ZyQC7MMuBt81Vc336GMLoHBq5hV/F9eXgt5Mnx0Rha5Fg=="],
+
+ "cross-fetch": ["cross-fetch@3.2.0", "", { "dependencies": { "node-fetch": "^2.7.0" } }, "sha512-Q+xVJLoGOeIMXZmbUK4HYk+69cQH6LudR0Vu/pRm2YlU/hDV9CiS0gKUMaWY5f2NeUH9C1nV3bsTlCo0FsTV1Q=="],
+
+ "crossws": ["crossws@0.3.5", "", { "dependencies": { "uncrypto": "^0.1.3" } }, "sha512-ojKiDvcmByhwa8YYqbQI/hg7MEU0NC03+pSdEq4ZUnZR9xXpwk7E43SMNGkn+JxJGPFtNvQ48+vV2p+P1ml5PA=="],
+
+ "css-selector-parser": ["css-selector-parser@3.1.3", "", {}, "sha512-gJMigczVZqYAk0hPVzx/M4Hm1D9QOtqkdQk9005TNzDIUGzo5cnHEDiKUT7jGPximL/oYb+LIitcHFQ4aKupxg=="],
+
+ "css-tree": ["css-tree@3.1.0", "", { "dependencies": { "mdn-data": "2.12.2", "source-map-js": "^1.0.1" } }, "sha512-0eW44TGN5SQXU1mWSkKwFstI/22X2bG1nYzZTYMAWjylYURhse752YgbE4Cx46AC+bAvI+/dYTPRk1LqSUnu6w=="],
+
+ "cssesc": ["cssesc@3.0.0", "", { "bin": { "cssesc": "bin/cssesc" } }, "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg=="],
+
+ "debug": ["debug@4.4.1", "", { "dependencies": { "ms": "^2.1.3" } }, "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ=="],
+
+ "decode-named-character-reference": ["decode-named-character-reference@1.2.0", "", { "dependencies": { "character-entities": "^2.0.0" } }, "sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q=="],
+
+ "defu": ["defu@6.1.4", "", {}, "sha512-mEQCMmwJu317oSz8CwdIOdwf3xMif1ttiM8LTufzc3g6kR+9Pe236twL8j3IYT1F7GfRgGcW6MWxzZjLIkuHIg=="],
+
+ "dequal": ["dequal@2.0.3", "", {}, "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA=="],
+
+ "destr": ["destr@2.0.5", "", {}, "sha512-ugFTXCtDZunbzasqBxrK93Ik/DRYsO6S/fedkWEMKqt04xZ4csmnmwGDBAb07QWNaGMAmnTIemsYZCksjATwsA=="],
+
+ "detect-libc": ["detect-libc@2.0.4", "", {}, "sha512-3UDv+G9CsCKO1WKMGw9fwq/SWJYbI0c5Y7LU1AXYoDdbhE2AHQ6N6Nb34sG8Fj7T5APy8qXDCKuuIHd1BR0tVA=="],
+
+ "deterministic-object-hash": ["deterministic-object-hash@2.0.2", "", { "dependencies": { "base-64": "^1.0.0" } }, "sha512-KxektNH63SrbfUyDiwXqRb1rLwKt33AmMv+5Nhsw1kqZ13SJBRTgZHtGbE+hH3a1mVW1cz+4pqSWVPAtLVXTzQ=="],
+
+ "devalue": ["devalue@5.1.1", "", {}, "sha512-maua5KUiapvEwiEAe+XnlZ3Rh0GD+qI1J/nb9vrJc3muPXvcF/8gXYTWF76+5DAqHyDUtOIImEuo0YKE9mshVw=="],
+
+ "devlop": ["devlop@1.1.0", "", { "dependencies": { "dequal": "^2.0.0" } }, "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA=="],
+
+ "dfa": ["dfa@1.2.0", "", {}, "sha512-ED3jP8saaweFTjeGX8HQPjeC1YYyZs98jGNZx6IiBvxW7JG5v492kamAQB3m2wop07CvU/RQmzcKr6bgcC5D/Q=="],
+
+ "diff": ["diff@5.2.0", "", {}, "sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A=="],
+
+ "direction": ["direction@2.0.1", "", { "bin": { "direction": "cli.js" } }, "sha512-9S6m9Sukh1cZNknO1CWAr2QAWsbKLafQiyM5gZ7VgXHeuaoUwffKN4q6NC4A/Mf9iiPlOXQEKW/Mv/mh9/3YFA=="],
+
+ "dlv": ["dlv@1.1.3", "", {}, "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA=="],
+
+ "dset": ["dset@3.1.4", "", {}, "sha512-2QF/g9/zTaPDc3BjNcVTGoBbXBgYfMTTceLaYcFJ/W9kggFUkhxD/hMEeuLKbugyef9SqAx8cpgwlIP/jinUTA=="],
+
+ "emoji-regex": ["emoji-regex@10.4.0", "", {}, "sha512-EC+0oUMY1Rqm4O6LLrgjtYDvcVYTy7chDnM4Q7030tP4Kwj3u/pR6gP9ygnp2CJMK5Gq+9Q2oqmrFJAz01DXjw=="],
+
+ "entities": ["entities@6.0.1", "", {}, "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g=="],
+
+ "es-module-lexer": ["es-module-lexer@1.7.0", "", {}, "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA=="],
+
+ "esast-util-from-estree": ["esast-util-from-estree@2.0.0", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "devlop": "^1.0.0", "estree-util-visit": "^2.0.0", "unist-util-position-from-estree": "^2.0.0" } }, "sha512-4CyanoAudUSBAn5K13H4JhsMH6L9ZP7XbLVe/dKybkxMO7eDyLsT8UHl9TRNrU2Gr9nz+FovfSIjuXWJ81uVwQ=="],
+
+ "esast-util-from-js": ["esast-util-from-js@2.0.1", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "acorn": "^8.0.0", "esast-util-from-estree": "^2.0.0", "vfile-message": "^4.0.0" } }, "sha512-8Ja+rNJ0Lt56Pcf3TAmpBZjmx8ZcK5Ts4cAzIOjsjevg9oSXJnl6SUQ2EevU8tv3h6ZLWmoKL5H4fgWvdvfETw=="],
+
+ "esbuild": ["esbuild@0.25.8", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.25.8", "@esbuild/android-arm": "0.25.8", "@esbuild/android-arm64": "0.25.8", "@esbuild/android-x64": "0.25.8", "@esbuild/darwin-arm64": "0.25.8", "@esbuild/darwin-x64": "0.25.8", "@esbuild/freebsd-arm64": "0.25.8", "@esbuild/freebsd-x64": "0.25.8", "@esbuild/linux-arm": "0.25.8", "@esbuild/linux-arm64": "0.25.8", "@esbuild/linux-ia32": "0.25.8", "@esbuild/linux-loong64": "0.25.8", "@esbuild/linux-mips64el": "0.25.8", "@esbuild/linux-ppc64": "0.25.8", "@esbuild/linux-riscv64": "0.25.8", "@esbuild/linux-s390x": "0.25.8", "@esbuild/linux-x64": "0.25.8", "@esbuild/netbsd-arm64": "0.25.8", "@esbuild/netbsd-x64": "0.25.8", "@esbuild/openbsd-arm64": "0.25.8", "@esbuild/openbsd-x64": "0.25.8", "@esbuild/openharmony-arm64": "0.25.8", "@esbuild/sunos-x64": "0.25.8", "@esbuild/win32-arm64": "0.25.8", "@esbuild/win32-ia32": "0.25.8", "@esbuild/win32-x64": "0.25.8" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-vVC0USHGtMi8+R4Kz8rt6JhEWLxsv9Rnu/lGYbPR8u47B+DCBksq9JarW0zOO7bs37hyOK1l2/oqtbciutL5+Q=="],
+
+ "escape-string-regexp": ["escape-string-regexp@5.0.0", "", {}, "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw=="],
+
+ "estree-util-attach-comments": ["estree-util-attach-comments@3.0.0", "", { "dependencies": { "@types/estree": "^1.0.0" } }, "sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw=="],
+
+ "estree-util-build-jsx": ["estree-util-build-jsx@3.0.1", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "devlop": "^1.0.0", "estree-util-is-identifier-name": "^3.0.0", "estree-walker": "^3.0.0" } }, "sha512-8U5eiL6BTrPxp/CHbs2yMgP8ftMhR5ww1eIKoWRMlqvltHF8fZn5LRDvTKuxD3DUn+shRbLGqXemcP51oFCsGQ=="],
+
+ "estree-util-is-identifier-name": ["estree-util-is-identifier-name@3.0.0", "", {}, "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg=="],
+
+ "estree-util-scope": ["estree-util-scope@1.0.0", "", { "dependencies": { "@types/estree": "^1.0.0", "devlop": "^1.0.0" } }, "sha512-2CAASclonf+JFWBNJPndcOpA8EMJwa0Q8LUFJEKqXLW6+qBvbFZuF5gItbQOs/umBUkjviCSDCbBwU2cXbmrhQ=="],
+
+ "estree-util-to-js": ["estree-util-to-js@2.0.0", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "astring": "^1.8.0", "source-map": "^0.7.0" } }, "sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg=="],
+
+ "estree-util-visit": ["estree-util-visit@2.0.0", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "@types/unist": "^3.0.0" } }, "sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww=="],
+
+ "estree-walker": ["estree-walker@3.0.3", "", { "dependencies": { "@types/estree": "^1.0.0" } }, "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g=="],
+
+ "eventemitter3": ["eventemitter3@5.0.1", "", {}, "sha512-GWkBvjiSZK87ELrYOSESUYeVIc9mvLLf/nXalMOS5dYrgZq9o5OVkbZAVM06CVxYsCwH9BDZFPlQTlPA1j4ahA=="],
+
+ "expressive-code": ["expressive-code@0.41.3", "", { "dependencies": { "@expressive-code/core": "^0.41.3", "@expressive-code/plugin-frames": "^0.41.3", "@expressive-code/plugin-shiki": "^0.41.3", "@expressive-code/plugin-text-markers": "^0.41.3" } }, "sha512-YLnD62jfgBZYrXIPQcJ0a51Afv9h8VlWqEGK9uU2T5nL/5rb8SnA86+7+mgCZe5D34Tff5RNEA5hjNVJYHzrFg=="],
+
+ "extend": ["extend@3.0.2", "", {}, "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g=="],
+
+ "fast-deep-equal": ["fast-deep-equal@3.1.3", "", {}, "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="],
+
+ "fdir": ["fdir@6.4.6", "", { "peerDependencies": { "picomatch": "^3 || ^4" }, "optionalPeers": ["picomatch"] }, "sha512-hiFoqpyZcfNm1yc4u8oWCf9A2c4D3QjCrks3zmoVKVxpQRzmPNar1hUJcBG2RQHvEVGDN+Jm81ZheVLAQMK6+w=="],
+
+ "flattie": ["flattie@1.1.1", "", {}, "sha512-9UbaD6XdAL97+k/n+N7JwX46K/M6Zc6KcFYskrYL8wbBV/Uyk0CTAMY0VT+qiK5PM7AIc9aTWYtq65U7T+aCNQ=="],
+
+ "fontace": ["fontace@0.3.0", "", { "dependencies": { "@types/fontkit": "^2.0.8", "fontkit": "^2.0.4" } }, "sha512-czoqATrcnxgWb/nAkfyIrRp6Q8biYj7nGnL6zfhTcX+JKKpWHFBnb8uNMw/kZr7u++3Y3wYSYoZgHkCcsuBpBg=="],
+
+ "fontkit": ["fontkit@2.0.4", "", { "dependencies": { "@swc/helpers": "^0.5.12", "brotli": "^1.3.2", "clone": "^2.1.2", "dfa": "^1.2.0", "fast-deep-equal": "^3.1.3", "restructure": "^3.0.0", "tiny-inflate": "^1.0.3", "unicode-properties": "^1.4.0", "unicode-trie": "^2.0.0" } }, "sha512-syetQadaUEDNdxdugga9CpEYVaQIxOwk7GlwZWWZ19//qW4zE5bknOKeMBDYAASwnpaSHKJITRLMF9m1fp3s6g=="],
+
+ "fsevents": ["fsevents@2.3.3", "", { "os": "darwin" }, "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw=="],
+
+ "get-east-asian-width": ["get-east-asian-width@1.3.0", "", {}, "sha512-vpeMIQKxczTD/0s2CdEWHcb0eeJe6TFjxb+J5xgX7hScxqrGuyjmv4c1D4A/gelKfyox0gJJwIHF+fLjeaM8kQ=="],
+
+ "github-slugger": ["github-slugger@2.0.0", "", {}, "sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw=="],
+
+ "h3": ["h3@1.15.3", "", { "dependencies": { "cookie-es": "^1.2.2", "crossws": "^0.3.4", "defu": "^6.1.4", "destr": "^2.0.5", "iron-webcrypto": "^1.2.1", "node-mock-http": "^1.0.0", "radix3": "^1.1.2", "ufo": "^1.6.1", "uncrypto": "^0.1.3" } }, "sha512-z6GknHqyX0h9aQaTx22VZDf6QyZn+0Nh+Ym8O/u0SGSkyF5cuTJYKlc8MkzW3Nzf9LE1ivcpmYC3FUGpywhuUQ=="],
+
+ "hast-util-embedded": ["hast-util-embedded@3.0.0", "", { "dependencies": { "@types/hast": "^3.0.0", "hast-util-is-element": "^3.0.0" } }, "sha512-naH8sld4Pe2ep03qqULEtvYr7EjrLK2QHY8KJR6RJkTUjPGObe1vnx585uzem2hGra+s1q08DZZpfgDVYRbaXA=="],
+
+ "hast-util-format": ["hast-util-format@1.1.0", "", { "dependencies": { "@types/hast": "^3.0.0", "hast-util-embedded": "^3.0.0", "hast-util-minify-whitespace": "^1.0.0", "hast-util-phrasing": "^3.0.0", "hast-util-whitespace": "^3.0.0", "html-whitespace-sensitive-tag-names": "^3.0.0", "unist-util-visit-parents": "^6.0.0" } }, "sha512-yY1UDz6bC9rDvCWHpx12aIBGRG7krurX0p0Fm6pT547LwDIZZiNr8a+IHDogorAdreULSEzP82Nlv5SZkHZcjA=="],
+
+ "hast-util-from-html": ["hast-util-from-html@2.0.3", "", { "dependencies": { "@types/hast": "^3.0.0", "devlop": "^1.1.0", "hast-util-from-parse5": "^8.0.0", "parse5": "^7.0.0", "vfile": "^6.0.0", "vfile-message": "^4.0.0" } }, "sha512-CUSRHXyKjzHov8yKsQjGOElXy/3EKpyX56ELnkHH34vDVw1N1XSQ1ZcAvTyAPtGqLTuKP/uxM+aLkSPqF/EtMw=="],
+
+ "hast-util-from-parse5": ["hast-util-from-parse5@8.0.3", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", "devlop": "^1.0.0", "hastscript": "^9.0.0", "property-information": "^7.0.0", "vfile": "^6.0.0", "vfile-location": "^5.0.0", "web-namespaces": "^2.0.0" } }, "sha512-3kxEVkEKt0zvcZ3hCRYI8rqrgwtlIOFMWkbclACvjlDw8Li9S2hk/d51OI0nr/gIpdMHNepwgOKqZ/sy0Clpyg=="],
+
+ "hast-util-has-property": ["hast-util-has-property@3.0.0", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-MNilsvEKLFpV604hwfhVStK0usFY/QmM5zX16bo7EjnAEGofr5YyI37kzopBlZJkHD4t887i+q/C8/tr5Q94cA=="],
+
+ "hast-util-is-body-ok-link": ["hast-util-is-body-ok-link@3.0.1", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-0qpnzOBLztXHbHQenVB8uNuxTnm/QBFUOmdOSsEn7GnBtyY07+ENTWVFBAnXd/zEgd9/SUG3lRY7hSIBWRgGpQ=="],
+
+ "hast-util-is-element": ["hast-util-is-element@3.0.0", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-Val9mnv2IWpLbNPqc/pUem+a7Ipj2aHacCwgNfTiK0vJKl0LF+4Ba4+v1oPHFpf3bLYmreq0/l3Gud9S5OH42g=="],
+
+ "hast-util-minify-whitespace": ["hast-util-minify-whitespace@1.0.1", "", { "dependencies": { "@types/hast": "^3.0.0", "hast-util-embedded": "^3.0.0", "hast-util-is-element": "^3.0.0", "hast-util-whitespace": "^3.0.0", "unist-util-is": "^6.0.0" } }, "sha512-L96fPOVpnclQE0xzdWb/D12VT5FabA7SnZOUMtL1DbXmYiHJMXZvFkIZfiMmTCNJHUeO2K9UYNXoVyfz+QHuOw=="],
+
+ "hast-util-parse-selector": ["hast-util-parse-selector@4.0.0", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A=="],
+
+ "hast-util-phrasing": ["hast-util-phrasing@3.0.1", "", { "dependencies": { "@types/hast": "^3.0.0", "hast-util-embedded": "^3.0.0", "hast-util-has-property": "^3.0.0", "hast-util-is-body-ok-link": "^3.0.0", "hast-util-is-element": "^3.0.0" } }, "sha512-6h60VfI3uBQUxHqTyMymMZnEbNl1XmEGtOxxKYL7stY2o601COo62AWAYBQR9lZbYXYSBoxag8UpPRXK+9fqSQ=="],
+
+ "hast-util-raw": ["hast-util-raw@9.1.0", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", "@ungap/structured-clone": "^1.0.0", "hast-util-from-parse5": "^8.0.0", "hast-util-to-parse5": "^8.0.0", "html-void-elements": "^3.0.0", "mdast-util-to-hast": "^13.0.0", "parse5": "^7.0.0", "unist-util-position": "^5.0.0", "unist-util-visit": "^5.0.0", "vfile": "^6.0.0", "web-namespaces": "^2.0.0", "zwitch": "^2.0.0" } }, "sha512-Y8/SBAHkZGoNkpzqqfCldijcuUKh7/su31kEBp67cFY09Wy0mTRgtsLYsiIxMJxlu0f6AA5SUTbDR8K0rxnbUw=="],
+
+ "hast-util-select": ["hast-util-select@6.0.4", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", "bcp-47-match": "^2.0.0", "comma-separated-tokens": "^2.0.0", "css-selector-parser": "^3.0.0", "devlop": "^1.0.0", "direction": "^2.0.0", "hast-util-has-property": "^3.0.0", "hast-util-to-string": "^3.0.0", "hast-util-whitespace": "^3.0.0", "nth-check": "^2.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0", "unist-util-visit": "^5.0.0", "zwitch": "^2.0.0" } }, "sha512-RqGS1ZgI0MwxLaKLDxjprynNzINEkRHY2i8ln4DDjgv9ZhcYVIHN9rlpiYsqtFwrgpYU361SyWDQcGNIBVu3lw=="],
+
+ "hast-util-to-estree": ["hast-util-to-estree@3.1.3", "", { "dependencies": { "@types/estree": "^1.0.0", "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", "comma-separated-tokens": "^2.0.0", "devlop": "^1.0.0", "estree-util-attach-comments": "^3.0.0", "estree-util-is-identifier-name": "^3.0.0", "hast-util-whitespace": "^3.0.0", "mdast-util-mdx-expression": "^2.0.0", "mdast-util-mdx-jsx": "^3.0.0", "mdast-util-mdxjs-esm": "^2.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0", "style-to-js": "^1.0.0", "unist-util-position": "^5.0.0", "zwitch": "^2.0.0" } }, "sha512-48+B/rJWAp0jamNbAAf9M7Uf//UVqAoMmgXhBdxTDJLGKY+LRnZ99qcG+Qjl5HfMpYNzS5v4EAwVEF34LeAj7w=="],
+
+ "hast-util-to-html": ["hast-util-to-html@9.0.5", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", "ccount": "^2.0.0", "comma-separated-tokens": "^2.0.0", "hast-util-whitespace": "^3.0.0", "html-void-elements": "^3.0.0", "mdast-util-to-hast": "^13.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0", "stringify-entities": "^4.0.0", "zwitch": "^2.0.4" } }, "sha512-OguPdidb+fbHQSU4Q4ZiLKnzWo8Wwsf5bZfbvu7//a9oTYoqD/fWpe96NuHkoS9h0ccGOTe0C4NGXdtS0iObOw=="],
+
+ "hast-util-to-jsx-runtime": ["hast-util-to-jsx-runtime@2.3.6", "", { "dependencies": { "@types/estree": "^1.0.0", "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", "comma-separated-tokens": "^2.0.0", "devlop": "^1.0.0", "estree-util-is-identifier-name": "^3.0.0", "hast-util-whitespace": "^3.0.0", "mdast-util-mdx-expression": "^2.0.0", "mdast-util-mdx-jsx": "^3.0.0", "mdast-util-mdxjs-esm": "^2.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0", "style-to-js": "^1.0.0", "unist-util-position": "^5.0.0", "vfile-message": "^4.0.0" } }, "sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg=="],
+
+ "hast-util-to-parse5": ["hast-util-to-parse5@8.0.0", "", { "dependencies": { "@types/hast": "^3.0.0", "comma-separated-tokens": "^2.0.0", "devlop": "^1.0.0", "property-information": "^6.0.0", "space-separated-tokens": "^2.0.0", "web-namespaces": "^2.0.0", "zwitch": "^2.0.0" } }, "sha512-3KKrV5ZVI8if87DVSi1vDeByYrkGzg4mEfeu4alwgmmIeARiBLKCZS2uw5Gb6nU9x9Yufyj3iudm6i7nl52PFw=="],
+
+ "hast-util-to-string": ["hast-util-to-string@3.0.1", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-XelQVTDWvqcl3axRfI0xSeoVKzyIFPwsAGSLIsKdJKQMXDYJS4WYrBNF/8J7RdhIcFI2BOHgAifggsvsxp/3+A=="],
+
+ "hast-util-to-text": ["hast-util-to-text@4.0.2", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/unist": "^3.0.0", "hast-util-is-element": "^3.0.0", "unist-util-find-after": "^5.0.0" } }, "sha512-KK6y/BN8lbaq654j7JgBydev7wuNMcID54lkRav1P0CaE1e47P72AWWPiGKXTJU271ooYzcvTAn/Zt0REnvc7A=="],
+
+ "hast-util-whitespace": ["hast-util-whitespace@3.0.0", "", { "dependencies": { "@types/hast": "^3.0.0" } }, "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw=="],
+
+ "hastscript": ["hastscript@9.0.1", "", { "dependencies": { "@types/hast": "^3.0.0", "comma-separated-tokens": "^2.0.0", "hast-util-parse-selector": "^4.0.0", "property-information": "^7.0.0", "space-separated-tokens": "^2.0.0" } }, "sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w=="],
+
+ "html-escaper": ["html-escaper@3.0.3", "", {}, "sha512-RuMffC89BOWQoY0WKGpIhn5gX3iI54O6nRA0yC124NYVtzjmFWBIiFd8M0x+ZdX0P9R4lADg1mgP8C7PxGOWuQ=="],
+
+ "html-void-elements": ["html-void-elements@3.0.0", "", {}, "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg=="],
+
+ "html-whitespace-sensitive-tag-names": ["html-whitespace-sensitive-tag-names@3.0.1", "", {}, "sha512-q+310vW8zmymYHALr1da4HyXUQ0zgiIwIicEfotYPWGN0OJVEN/58IJ3A4GBYcEq3LGAZqKb+ugvP0GNB9CEAA=="],
+
+ "http-cache-semantics": ["http-cache-semantics@4.2.0", "", {}, "sha512-dTxcvPXqPvXBQpq5dUr6mEMJX4oIEFv6bwom3FDwKRDsuIjjJGANqhBuoAn9c1RQJIdAKav33ED65E2ys+87QQ=="],
+
+ "i18next": ["i18next@23.16.8", "", { "dependencies": { "@babel/runtime": "^7.23.2" } }, "sha512-06r/TitrM88Mg5FdUXAKL96dJMzgqLE5dv3ryBAra4KCwD9mJ4ndOTS95ZuymIGoE+2hzfdaMak2X11/es7ZWg=="],
+
+ "import-meta-resolve": ["import-meta-resolve@4.1.0", "", {}, "sha512-I6fiaX09Xivtk+THaMfAwnA3MVA5Big1WHF1Dfx9hFuvNIWpXnorlkzhcQf6ehrqQiiZECRt1poOAkPmer3ruw=="],
+
+ "inline-style-parser": ["inline-style-parser@0.2.4", "", {}, "sha512-0aO8FkhNZlj/ZIbNi7Lxxr12obT7cL1moPfE4tg1LkX7LlLfC6DeX4l2ZEud1ukP9jNQyNnfzQVqwbwmAATY4Q=="],
+
+ "iron-webcrypto": ["iron-webcrypto@1.2.1", "", {}, "sha512-feOM6FaSr6rEABp/eDfVseKyTMDt+KGpeB35SkVn9Tyn0CqvVsY3EwI0v5i8nMHyJnzCIQf7nsy3p41TPkJZhg=="],
+
+ "is-alphabetical": ["is-alphabetical@2.0.1", "", {}, "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ=="],
+
+ "is-alphanumerical": ["is-alphanumerical@2.0.1", "", { "dependencies": { "is-alphabetical": "^2.0.0", "is-decimal": "^2.0.0" } }, "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw=="],
+
+ "is-arrayish": ["is-arrayish@0.3.2", "", {}, "sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ=="],
+
+ "is-decimal": ["is-decimal@2.0.1", "", {}, "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A=="],
+
+ "is-docker": ["is-docker@3.0.0", "", { "bin": { "is-docker": "cli.js" } }, "sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ=="],
+
+ "is-fullwidth-code-point": ["is-fullwidth-code-point@3.0.0", "", {}, "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="],
+
+ "is-hexadecimal": ["is-hexadecimal@2.0.1", "", {}, "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg=="],
+
+ "is-inside-container": ["is-inside-container@1.0.0", "", { "dependencies": { "is-docker": "^3.0.0" }, "bin": { "is-inside-container": "cli.js" } }, "sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA=="],
+
+ "is-plain-obj": ["is-plain-obj@4.1.0", "", {}, "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg=="],
+
+ "is-wsl": ["is-wsl@3.1.0", "", { "dependencies": { "is-inside-container": "^1.0.0" } }, "sha512-UcVfVfaK4Sc4m7X3dUSoHoozQGBEFeDC+zVo06t98xe8CzHSZZBekNXH+tu0NalHolcJ/QAGqS46Hef7QXBIMw=="],
+
+ "js-yaml": ["js-yaml@4.1.0", "", { "dependencies": { "argparse": "^2.0.1" }, "bin": { "js-yaml": "bin/js-yaml.js" } }, "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA=="],
+
+ "kleur": ["kleur@4.1.5", "", {}, "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ=="],
+
+ "klona": ["klona@2.0.6", "", {}, "sha512-dhG34DXATL5hSxJbIexCft8FChFXtmskoZYnoPWjXQuebWYCNkVeV3KkGegCK9CP1oswI/vQibS2GY7Em/sJJA=="],
+
+ "longest-streak": ["longest-streak@3.1.0", "", {}, "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g=="],
+
+ "lru-cache": ["lru-cache@10.4.3", "", {}, "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ=="],
+
+ "magic-string": ["magic-string@0.30.17", "", { "dependencies": { "@jridgewell/sourcemap-codec": "^1.5.0" } }, "sha512-sNPKHvyjVf7gyjwS4xGTaW/mCnF8wnjtifKBEhxfZ7E/S8tQ0rssrwGNn6q8JH/ohItJfSQp9mBtQYuTlH5QnA=="],
+
+ "magicast": ["magicast@0.3.5", "", { "dependencies": { "@babel/parser": "^7.25.4", "@babel/types": "^7.25.4", "source-map-js": "^1.2.0" } }, "sha512-L0WhttDl+2BOsybvEOLK7fW3UA0OQ0IQ2d6Zl2x/a6vVRs3bAY0ECOSHHeL5jD+SbOpOCUEi0y1DgHEn9Qn1AQ=="],
+
+ "markdown-extensions": ["markdown-extensions@2.0.0", "", {}, "sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q=="],
+
+ "markdown-table": ["markdown-table@3.0.4", "", {}, "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw=="],
+
+ "mdast-util-definitions": ["mdast-util-definitions@6.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", "unist-util-visit": "^5.0.0" } }, "sha512-scTllyX6pnYNZH/AIp/0ePz6s4cZtARxImwoPJ7kS42n+MnVsI4XbnG6d4ibehRIldYMWM2LD7ImQblVhUejVQ=="],
+
+ "mdast-util-directive": ["mdast-util-directive@3.1.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", "ccount": "^2.0.0", "devlop": "^1.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0", "parse-entities": "^4.0.0", "stringify-entities": "^4.0.0", "unist-util-visit-parents": "^6.0.0" } }, "sha512-I3fNFt+DHmpWCYAT7quoM6lHf9wuqtI+oCOfvILnoicNIqjh5E3dEJWiXuYME2gNe8vl1iMQwyUHa7bgFmak6Q=="],
+
+ "mdast-util-find-and-replace": ["mdast-util-find-and-replace@3.0.2", "", { "dependencies": { "@types/mdast": "^4.0.0", "escape-string-regexp": "^5.0.0", "unist-util-is": "^6.0.0", "unist-util-visit-parents": "^6.0.0" } }, "sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg=="],
+
+ "mdast-util-from-markdown": ["mdast-util-from-markdown@2.0.2", "", { "dependencies": { "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", "decode-named-character-reference": "^1.0.0", "devlop": "^1.0.0", "mdast-util-to-string": "^4.0.0", "micromark": "^4.0.0", "micromark-util-decode-numeric-character-reference": "^2.0.0", "micromark-util-decode-string": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0", "unist-util-stringify-position": "^4.0.0" } }, "sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA=="],
+
+ "mdast-util-gfm": ["mdast-util-gfm@3.1.0", "", { "dependencies": { "mdast-util-from-markdown": "^2.0.0", "mdast-util-gfm-autolink-literal": "^2.0.0", "mdast-util-gfm-footnote": "^2.0.0", "mdast-util-gfm-strikethrough": "^2.0.0", "mdast-util-gfm-table": "^2.0.0", "mdast-util-gfm-task-list-item": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ=="],
+
+ "mdast-util-gfm-autolink-literal": ["mdast-util-gfm-autolink-literal@2.0.1", "", { "dependencies": { "@types/mdast": "^4.0.0", "ccount": "^2.0.0", "devlop": "^1.0.0", "mdast-util-find-and-replace": "^3.0.0", "micromark-util-character": "^2.0.0" } }, "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ=="],
+
+ "mdast-util-gfm-footnote": ["mdast-util-gfm-footnote@2.1.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "devlop": "^1.1.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0" } }, "sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ=="],
+
+ "mdast-util-gfm-strikethrough": ["mdast-util-gfm-strikethrough@2.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg=="],
+
+ "mdast-util-gfm-table": ["mdast-util-gfm-table@2.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "markdown-table": "^3.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg=="],
+
+ "mdast-util-gfm-task-list-item": ["mdast-util-gfm-task-list-item@2.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ=="],
+
+ "mdast-util-mdx": ["mdast-util-mdx@3.0.0", "", { "dependencies": { "mdast-util-from-markdown": "^2.0.0", "mdast-util-mdx-expression": "^2.0.0", "mdast-util-mdx-jsx": "^3.0.0", "mdast-util-mdxjs-esm": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w=="],
+
+ "mdast-util-mdx-expression": ["mdast-util-mdx-expression@2.0.1", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ=="],
+
+ "mdast-util-mdx-jsx": ["mdast-util-mdx-jsx@3.2.0", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", "ccount": "^2.0.0", "devlop": "^1.1.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0", "parse-entities": "^4.0.0", "stringify-entities": "^4.0.0", "unist-util-stringify-position": "^4.0.0", "vfile-message": "^4.0.0" } }, "sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q=="],
+
+ "mdast-util-mdxjs-esm": ["mdast-util-mdxjs-esm@2.0.1", "", { "dependencies": { "@types/estree-jsx": "^1.0.0", "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "devlop": "^1.0.0", "mdast-util-from-markdown": "^2.0.0", "mdast-util-to-markdown": "^2.0.0" } }, "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg=="],
+
+ "mdast-util-phrasing": ["mdast-util-phrasing@4.1.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "unist-util-is": "^6.0.0" } }, "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w=="],
+
+ "mdast-util-to-hast": ["mdast-util-to-hast@13.2.0", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "@ungap/structured-clone": "^1.0.0", "devlop": "^1.0.0", "micromark-util-sanitize-uri": "^2.0.0", "trim-lines": "^3.0.0", "unist-util-position": "^5.0.0", "unist-util-visit": "^5.0.0", "vfile": "^6.0.0" } }, "sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA=="],
+
+ "mdast-util-to-markdown": ["mdast-util-to-markdown@2.1.2", "", { "dependencies": { "@types/mdast": "^4.0.0", "@types/unist": "^3.0.0", "longest-streak": "^3.0.0", "mdast-util-phrasing": "^4.0.0", "mdast-util-to-string": "^4.0.0", "micromark-util-classify-character": "^2.0.0", "micromark-util-decode-string": "^2.0.0", "unist-util-visit": "^5.0.0", "zwitch": "^2.0.0" } }, "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA=="],
+
+ "mdast-util-to-string": ["mdast-util-to-string@4.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0" } }, "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg=="],
+
+ "mdn-data": ["mdn-data@2.12.2", "", {}, "sha512-IEn+pegP1aManZuckezWCO+XZQDplx1366JoVhTpMpBB1sPey/SbveZQUosKiKiGYjg1wH4pMlNgXbCiYgihQA=="],
+
+ "micromark": ["micromark@4.0.2", "", { "dependencies": { "@types/debug": "^4.0.0", "debug": "^4.0.0", "decode-named-character-reference": "^1.0.0", "devlop": "^1.0.0", "micromark-core-commonmark": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-combine-extensions": "^2.0.0", "micromark-util-decode-numeric-character-reference": "^2.0.0", "micromark-util-encode": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-resolve-all": "^2.0.0", "micromark-util-sanitize-uri": "^2.0.0", "micromark-util-subtokenize": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA=="],
+
+ "micromark-core-commonmark": ["micromark-core-commonmark@2.0.3", "", { "dependencies": { "decode-named-character-reference": "^1.0.0", "devlop": "^1.0.0", "micromark-factory-destination": "^2.0.0", "micromark-factory-label": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-factory-title": "^2.0.0", "micromark-factory-whitespace": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-classify-character": "^2.0.0", "micromark-util-html-tag-name": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-resolve-all": "^2.0.0", "micromark-util-subtokenize": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg=="],
+
+ "micromark-extension-directive": ["micromark-extension-directive@3.0.2", "", { "dependencies": { "devlop": "^1.0.0", "micromark-factory-space": "^2.0.0", "micromark-factory-whitespace": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0", "parse-entities": "^4.0.0" } }, "sha512-wjcXHgk+PPdmvR58Le9d7zQYWy+vKEU9Se44p2CrCDPiLr2FMyiT4Fyb5UFKFC66wGB3kPlgD7q3TnoqPS7SZA=="],
+
+ "micromark-extension-gfm": ["micromark-extension-gfm@3.0.0", "", { "dependencies": { "micromark-extension-gfm-autolink-literal": "^2.0.0", "micromark-extension-gfm-footnote": "^2.0.0", "micromark-extension-gfm-strikethrough": "^2.0.0", "micromark-extension-gfm-table": "^2.0.0", "micromark-extension-gfm-tagfilter": "^2.0.0", "micromark-extension-gfm-task-list-item": "^2.0.0", "micromark-util-combine-extensions": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w=="],
+
+ "micromark-extension-gfm-autolink-literal": ["micromark-extension-gfm-autolink-literal@2.1.0", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-sanitize-uri": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw=="],
+
+ "micromark-extension-gfm-footnote": ["micromark-extension-gfm-footnote@2.1.0", "", { "dependencies": { "devlop": "^1.0.0", "micromark-core-commonmark": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-normalize-identifier": "^2.0.0", "micromark-util-sanitize-uri": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw=="],
+
+ "micromark-extension-gfm-strikethrough": ["micromark-extension-gfm-strikethrough@2.1.0", "", { "dependencies": { "devlop": "^1.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-classify-character": "^2.0.0", "micromark-util-resolve-all": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw=="],
+
+ "micromark-extension-gfm-table": ["micromark-extension-gfm-table@2.1.1", "", { "dependencies": { "devlop": "^1.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg=="],
+
+ "micromark-extension-gfm-tagfilter": ["micromark-extension-gfm-tagfilter@2.0.0", "", { "dependencies": { "micromark-util-types": "^2.0.0" } }, "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg=="],
+
+ "micromark-extension-gfm-task-list-item": ["micromark-extension-gfm-task-list-item@2.1.0", "", { "dependencies": { "devlop": "^1.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw=="],
+
+ "micromark-extension-mdx-expression": ["micromark-extension-mdx-expression@3.0.1", "", { "dependencies": { "@types/estree": "^1.0.0", "devlop": "^1.0.0", "micromark-factory-mdx-expression": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-events-to-acorn": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-dD/ADLJ1AeMvSAKBwO22zG22N4ybhe7kFIZ3LsDI0GlsNr2A3KYxb0LdC1u5rj4Nw+CHKY0RVdnHX8vj8ejm4Q=="],
+
+ "micromark-extension-mdx-jsx": ["micromark-extension-mdx-jsx@3.0.2", "", { "dependencies": { "@types/estree": "^1.0.0", "devlop": "^1.0.0", "estree-util-is-identifier-name": "^3.0.0", "micromark-factory-mdx-expression": "^2.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-events-to-acorn": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0", "vfile-message": "^4.0.0" } }, "sha512-e5+q1DjMh62LZAJOnDraSSbDMvGJ8x3cbjygy2qFEi7HCeUT4BDKCvMozPozcD6WmOt6sVvYDNBKhFSz3kjOVQ=="],
+
+ "micromark-extension-mdx-md": ["micromark-extension-mdx-md@2.0.0", "", { "dependencies": { "micromark-util-types": "^2.0.0" } }, "sha512-EpAiszsB3blw4Rpba7xTOUptcFeBFi+6PY8VnJ2hhimH+vCQDirWgsMpz7w1XcZE7LVrSAUGb9VJpG9ghlYvYQ=="],
+
+ "micromark-extension-mdxjs": ["micromark-extension-mdxjs@3.0.0", "", { "dependencies": { "acorn": "^8.0.0", "acorn-jsx": "^5.0.0", "micromark-extension-mdx-expression": "^3.0.0", "micromark-extension-mdx-jsx": "^3.0.0", "micromark-extension-mdx-md": "^2.0.0", "micromark-extension-mdxjs-esm": "^3.0.0", "micromark-util-combine-extensions": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-A873fJfhnJ2siZyUrJ31l34Uqwy4xIFmvPY1oj+Ean5PHcPBYzEsvqvWGaWcfEIr11O5Dlw3p2y0tZWpKHDejQ=="],
+
+ "micromark-extension-mdxjs-esm": ["micromark-extension-mdxjs-esm@3.0.0", "", { "dependencies": { "@types/estree": "^1.0.0", "devlop": "^1.0.0", "micromark-core-commonmark": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-events-to-acorn": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0", "unist-util-position-from-estree": "^2.0.0", "vfile-message": "^4.0.0" } }, "sha512-DJFl4ZqkErRpq/dAPyeWp15tGrcrrJho1hKK5uBS70BCtfrIFg81sqcTVu3Ta+KD1Tk5vAtBNElWxtAa+m8K9A=="],
+
+ "micromark-factory-destination": ["micromark-factory-destination@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA=="],
+
+ "micromark-factory-label": ["micromark-factory-label@2.0.1", "", { "dependencies": { "devlop": "^1.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg=="],
+
+ "micromark-factory-mdx-expression": ["micromark-factory-mdx-expression@2.0.3", "", { "dependencies": { "@types/estree": "^1.0.0", "devlop": "^1.0.0", "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-events-to-acorn": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0", "unist-util-position-from-estree": "^2.0.0", "vfile-message": "^4.0.0" } }, "sha512-kQnEtA3vzucU2BkrIa8/VaSAsP+EJ3CKOvhMuJgOEGg9KDC6OAY6nSnNDVRiVNRqj7Y4SlSzcStaH/5jge8JdQ=="],
+
+ "micromark-factory-space": ["micromark-factory-space@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg=="],
+
+ "micromark-factory-title": ["micromark-factory-title@2.0.1", "", { "dependencies": { "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw=="],
+
+ "micromark-factory-whitespace": ["micromark-factory-whitespace@2.0.1", "", { "dependencies": { "micromark-factory-space": "^2.0.0", "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ=="],
+
+ "micromark-util-character": ["micromark-util-character@2.1.1", "", { "dependencies": { "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q=="],
+
+ "micromark-util-chunked": ["micromark-util-chunked@2.0.1", "", { "dependencies": { "micromark-util-symbol": "^2.0.0" } }, "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA=="],
+
+ "micromark-util-classify-character": ["micromark-util-classify-character@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q=="],
+
+ "micromark-util-combine-extensions": ["micromark-util-combine-extensions@2.0.1", "", { "dependencies": { "micromark-util-chunked": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg=="],
+
+ "micromark-util-decode-numeric-character-reference": ["micromark-util-decode-numeric-character-reference@2.0.2", "", { "dependencies": { "micromark-util-symbol": "^2.0.0" } }, "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw=="],
+
+ "micromark-util-decode-string": ["micromark-util-decode-string@2.0.1", "", { "dependencies": { "decode-named-character-reference": "^1.0.0", "micromark-util-character": "^2.0.0", "micromark-util-decode-numeric-character-reference": "^2.0.0", "micromark-util-symbol": "^2.0.0" } }, "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ=="],
+
+ "micromark-util-encode": ["micromark-util-encode@2.0.1", "", {}, "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw=="],
+
+ "micromark-util-events-to-acorn": ["micromark-util-events-to-acorn@2.0.3", "", { "dependencies": { "@types/estree": "^1.0.0", "@types/unist": "^3.0.0", "devlop": "^1.0.0", "estree-util-visit": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0", "vfile-message": "^4.0.0" } }, "sha512-jmsiEIiZ1n7X1Rr5k8wVExBQCg5jy4UXVADItHmNk1zkwEVhBuIUKRu3fqv+hs4nxLISi2DQGlqIOGiFxgbfHg=="],
+
+ "micromark-util-html-tag-name": ["micromark-util-html-tag-name@2.0.1", "", {}, "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA=="],
+
+ "micromark-util-normalize-identifier": ["micromark-util-normalize-identifier@2.0.1", "", { "dependencies": { "micromark-util-symbol": "^2.0.0" } }, "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q=="],
+
+ "micromark-util-resolve-all": ["micromark-util-resolve-all@2.0.1", "", { "dependencies": { "micromark-util-types": "^2.0.0" } }, "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg=="],
+
+ "micromark-util-sanitize-uri": ["micromark-util-sanitize-uri@2.0.1", "", { "dependencies": { "micromark-util-character": "^2.0.0", "micromark-util-encode": "^2.0.0", "micromark-util-symbol": "^2.0.0" } }, "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ=="],
+
+ "micromark-util-subtokenize": ["micromark-util-subtokenize@2.1.0", "", { "dependencies": { "devlop": "^1.0.0", "micromark-util-chunked": "^2.0.0", "micromark-util-symbol": "^2.0.0", "micromark-util-types": "^2.0.0" } }, "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA=="],
+
+ "micromark-util-symbol": ["micromark-util-symbol@2.0.1", "", {}, "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q=="],
+
+ "micromark-util-types": ["micromark-util-types@2.0.2", "", {}, "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA=="],
+
+ "mrmime": ["mrmime@2.0.1", "", {}, "sha512-Y3wQdFg2Va6etvQ5I82yUhGdsKrcYox6p7FfL1LbK2J4V01F9TGlepTIhnK24t7koZibmg82KGglhA1XK5IsLQ=="],
+
+ "ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="],
+
+ "nanoid": ["nanoid@3.3.11", "", { "bin": { "nanoid": "bin/nanoid.cjs" } }, "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w=="],
+
+ "neotraverse": ["neotraverse@0.6.18", "", {}, "sha512-Z4SmBUweYa09+o6pG+eASabEpP6QkQ70yHj351pQoEXIs8uHbaU2DWVmzBANKgflPa47A50PtB2+NgRpQvr7vA=="],
+
+ "nlcst-to-string": ["nlcst-to-string@4.0.0", "", { "dependencies": { "@types/nlcst": "^2.0.0" } }, "sha512-YKLBCcUYKAg0FNlOBT6aI91qFmSiFKiluk655WzPF+DDMA02qIyy8uiRqI8QXtcFpEvll12LpL5MXqEmAZ+dcA=="],
+
+ "node-fetch": ["node-fetch@2.7.0", "", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="],
+
+ "node-fetch-native": ["node-fetch-native@1.6.6", "", {}, "sha512-8Mc2HhqPdlIfedsuZoc3yioPuzp6b+L5jRCRY1QzuWZh2EGJVQrGppC6V6cF0bLdbW0+O2YpqCA25aF/1lvipQ=="],
+
+ "node-mock-http": ["node-mock-http@1.0.1", "", {}, "sha512-0gJJgENizp4ghds/Ywu2FCmcRsgBTmRQzYPZm61wy+Em2sBarSka0OhQS5huLBg6od1zkNpnWMCZloQDFVvOMQ=="],
+
+ "normalize-path": ["normalize-path@3.0.0", "", {}, "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA=="],
+
+ "nth-check": ["nth-check@2.1.1", "", { "dependencies": { "boolbase": "^1.0.0" } }, "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w=="],
+
+ "ofetch": ["ofetch@1.4.1", "", { "dependencies": { "destr": "^2.0.3", "node-fetch-native": "^1.6.4", "ufo": "^1.5.4" } }, "sha512-QZj2DfGplQAr2oj9KzceK9Hwz6Whxazmn85yYeVuS3u9XTMOGMRx0kO95MQ+vLsj/S/NwBDMMLU5hpxvI6Tklw=="],
+
+ "ohash": ["ohash@2.0.11", "", {}, "sha512-RdR9FQrFwNBNXAr4GixM8YaRZRJ5PUWbKYbE5eOsrwAjJW0q2REGcf79oYPsLyskQCZG1PLN+S/K1V00joZAoQ=="],
+
+ "oniguruma-parser": ["oniguruma-parser@0.12.1", "", {}, "sha512-8Unqkvk1RYc6yq2WBYRj4hdnsAxVze8i7iPfQr8e4uSP3tRv0rpZcbGUDvxfQQcdwHt/e9PrMvGCsa8OqG9X3w=="],
+
+ "oniguruma-to-es": ["oniguruma-to-es@4.3.3", "", { "dependencies": { "oniguruma-parser": "^0.12.1", "regex": "^6.0.1", "regex-recursion": "^6.0.2" } }, "sha512-rPiZhzC3wXwE59YQMRDodUwwT9FZ9nNBwQQfsd1wfdtlKEyCdRV0avrTcSZ5xlIvGRVPd/cx6ZN45ECmS39xvg=="],
+
+ "p-limit": ["p-limit@6.2.0", "", { "dependencies": { "yocto-queue": "^1.1.1" } }, "sha512-kuUqqHNUqoIWp/c467RI4X6mmyuojY5jGutNU0wVTmEOOfcuwLqyMVoAi9MKi2Ak+5i9+nhmrK4ufZE8069kHA=="],
+
+ "p-queue": ["p-queue@8.1.0", "", { "dependencies": { "eventemitter3": "^5.0.1", "p-timeout": "^6.1.2" } }, "sha512-mxLDbbGIBEXTJL0zEx8JIylaj3xQ7Z/7eEVjcF9fJX4DBiH9oqe+oahYnlKKxm0Ci9TlWTyhSHgygxMxjIB2jw=="],
+
+ "p-timeout": ["p-timeout@6.1.4", "", {}, "sha512-MyIV3ZA/PmyBN/ud8vV9XzwTrNtR4jFrObymZYnZqMmW0zA8Z17vnT0rBgFE/TlohB+YCHqXMgZzb3Csp49vqg=="],
+
+ "package-manager-detector": ["package-manager-detector@1.3.0", "", {}, "sha512-ZsEbbZORsyHuO00lY1kV3/t72yp6Ysay6Pd17ZAlNGuGwmWDLCJxFpRs0IzfXfj1o4icJOkUEioexFHzyPurSQ=="],
+
+ "pagefind": ["pagefind@1.3.0", "", { "optionalDependencies": { "@pagefind/darwin-arm64": "1.3.0", "@pagefind/darwin-x64": "1.3.0", "@pagefind/linux-arm64": "1.3.0", "@pagefind/linux-x64": "1.3.0", "@pagefind/windows-x64": "1.3.0" }, "bin": { "pagefind": "lib/runner/bin.cjs" } }, "sha512-8KPLGT5g9s+olKMRTU9LFekLizkVIu9tes90O1/aigJ0T5LmyPqTzGJrETnSw3meSYg58YH7JTzhTTW/3z6VAw=="],
+
+ "pako": ["pako@0.2.9", "", {}, "sha512-NUcwaKxUxWrZLpDG+z/xZaCgQITkA/Dv4V/T6bw7VON6l1Xz/VnrBqrYjZQ12TamKHzITTfOEIYUj48y2KXImA=="],
+
+ "parse-entities": ["parse-entities@4.0.2", "", { "dependencies": { "@types/unist": "^2.0.0", "character-entities-legacy": "^3.0.0", "character-reference-invalid": "^2.0.0", "decode-named-character-reference": "^1.0.0", "is-alphanumerical": "^2.0.0", "is-decimal": "^2.0.0", "is-hexadecimal": "^2.0.0" } }, "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw=="],
+
+ "parse-latin": ["parse-latin@7.0.0", "", { "dependencies": { "@types/nlcst": "^2.0.0", "@types/unist": "^3.0.0", "nlcst-to-string": "^4.0.0", "unist-util-modify-children": "^4.0.0", "unist-util-visit-children": "^3.0.0", "vfile": "^6.0.0" } }, "sha512-mhHgobPPua5kZ98EF4HWiH167JWBfl4pvAIXXdbaVohtK7a6YBOy56kvhCqduqyo/f3yrHFWmqmiMg/BkBkYYQ=="],
+
+ "parse5": ["parse5@7.3.0", "", { "dependencies": { "entities": "^6.0.0" } }, "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw=="],
+
+ "picocolors": ["picocolors@1.1.1", "", {}, "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA=="],
+
+ "picomatch": ["picomatch@4.0.3", "", {}, "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q=="],
+
+ "postcss": ["postcss@8.5.6", "", { "dependencies": { "nanoid": "^3.3.11", "picocolors": "^1.1.1", "source-map-js": "^1.2.1" } }, "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg=="],
+
+ "postcss-nested": ["postcss-nested@6.2.0", "", { "dependencies": { "postcss-selector-parser": "^6.1.1" }, "peerDependencies": { "postcss": "^8.2.14" } }, "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ=="],
+
+ "postcss-selector-parser": ["postcss-selector-parser@6.1.2", "", { "dependencies": { "cssesc": "^3.0.0", "util-deprecate": "^1.0.2" } }, "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg=="],
+
+ "prismjs": ["prismjs@1.30.0", "", {}, "sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw=="],
+
+ "prompts": ["prompts@2.4.2", "", { "dependencies": { "kleur": "^3.0.3", "sisteransi": "^1.0.5" } }, "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q=="],
+
+ "property-information": ["property-information@7.1.0", "", {}, "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ=="],
+
+ "radix3": ["radix3@1.1.2", "", {}, "sha512-b484I/7b8rDEdSDKckSSBA8knMpcdsXudlE/LNL639wFoHKwLbEkQFZHWEYwDC0wa0FKUcCY+GAF73Z7wxNVFA=="],
+
+ "readdirp": ["readdirp@4.1.2", "", {}, "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg=="],
+
+ "recma-build-jsx": ["recma-build-jsx@1.0.0", "", { "dependencies": { "@types/estree": "^1.0.0", "estree-util-build-jsx": "^3.0.0", "vfile": "^6.0.0" } }, "sha512-8GtdyqaBcDfva+GUKDr3nev3VpKAhup1+RvkMvUxURHpW7QyIvk9F5wz7Vzo06CEMSilw6uArgRqhpiUcWp8ew=="],
+
+ "recma-jsx": ["recma-jsx@1.0.0", "", { "dependencies": { "acorn-jsx": "^5.0.0", "estree-util-to-js": "^2.0.0", "recma-parse": "^1.0.0", "recma-stringify": "^1.0.0", "unified": "^11.0.0" } }, "sha512-5vwkv65qWwYxg+Atz95acp8DMu1JDSqdGkA2Of1j6rCreyFUE/gp15fC8MnGEuG1W68UKjM6x6+YTWIh7hZM/Q=="],
+
+ "recma-parse": ["recma-parse@1.0.0", "", { "dependencies": { "@types/estree": "^1.0.0", "esast-util-from-js": "^2.0.0", "unified": "^11.0.0", "vfile": "^6.0.0" } }, "sha512-OYLsIGBB5Y5wjnSnQW6t3Xg7q3fQ7FWbw/vcXtORTnyaSFscOtABg+7Pnz6YZ6c27fG1/aN8CjfwoUEUIdwqWQ=="],
+
+ "recma-stringify": ["recma-stringify@1.0.0", "", { "dependencies": { "@types/estree": "^1.0.0", "estree-util-to-js": "^2.0.0", "unified": "^11.0.0", "vfile": "^6.0.0" } }, "sha512-cjwII1MdIIVloKvC9ErQ+OgAtwHBmcZ0Bg4ciz78FtbT8In39aAYbaA7zvxQ61xVMSPE8WxhLwLbhif4Js2C+g=="],
+
+ "regex": ["regex@6.0.1", "", { "dependencies": { "regex-utilities": "^2.3.0" } }, "sha512-uorlqlzAKjKQZ5P+kTJr3eeJGSVroLKoHmquUj4zHWuR+hEyNqlXsSKlYYF5F4NI6nl7tWCs0apKJ0lmfsXAPA=="],
+
+ "regex-recursion": ["regex-recursion@6.0.2", "", { "dependencies": { "regex-utilities": "^2.3.0" } }, "sha512-0YCaSCq2VRIebiaUviZNs0cBz1kg5kVS2UKUfNIx8YVs1cN3AV7NTctO5FOKBA+UT2BPJIWZauYHPqJODG50cg=="],
+
+ "regex-utilities": ["regex-utilities@2.3.0", "", {}, "sha512-8VhliFJAWRaUiVvREIiW2NXXTmHs4vMNnSzuJVhscgmGav3g9VDxLrQndI3dZZVVdp0ZO/5v0xmX516/7M9cng=="],
+
+ "rehype": ["rehype@13.0.2", "", { "dependencies": { "@types/hast": "^3.0.0", "rehype-parse": "^9.0.0", "rehype-stringify": "^10.0.0", "unified": "^11.0.0" } }, "sha512-j31mdaRFrwFRUIlxGeuPXXKWQxet52RBQRvCmzl5eCefn/KGbomK5GMHNMsOJf55fgo3qw5tST5neDuarDYR2A=="],
+
+ "rehype-expressive-code": ["rehype-expressive-code@0.41.3", "", { "dependencies": { "expressive-code": "^0.41.3" } }, "sha512-8d9Py4c/V6I/Od2VIXFAdpiO2kc0SV2qTJsRAaqSIcM9aruW4ASLNe2kOEo1inXAAkIhpFzAHTc358HKbvpNUg=="],
+
+ "rehype-format": ["rehype-format@5.0.1", "", { "dependencies": { "@types/hast": "^3.0.0", "hast-util-format": "^1.0.0" } }, "sha512-zvmVru9uB0josBVpr946OR8ui7nJEdzZobwLOOqHb/OOD88W0Vk2SqLwoVOj0fM6IPCCO6TaV9CvQvJMWwukFQ=="],
+
+ "rehype-parse": ["rehype-parse@9.0.1", "", { "dependencies": { "@types/hast": "^3.0.0", "hast-util-from-html": "^2.0.0", "unified": "^11.0.0" } }, "sha512-ksCzCD0Fgfh7trPDxr2rSylbwq9iYDkSn8TCDmEJ49ljEUBxDVCzCHv7QNzZOfODanX4+bWQ4WZqLCRWYLfhag=="],
+
+ "rehype-raw": ["rehype-raw@7.0.0", "", { "dependencies": { "@types/hast": "^3.0.0", "hast-util-raw": "^9.0.0", "vfile": "^6.0.0" } }, "sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww=="],
+
+ "rehype-recma": ["rehype-recma@1.0.0", "", { "dependencies": { "@types/estree": "^1.0.0", "@types/hast": "^3.0.0", "hast-util-to-estree": "^3.0.0" } }, "sha512-lqA4rGUf1JmacCNWWZx0Wv1dHqMwxzsDWYMTowuplHF3xH0N/MmrZ/G3BDZnzAkRmxDadujCjaKM2hqYdCBOGw=="],
+
+ "rehype-stringify": ["rehype-stringify@10.0.1", "", { "dependencies": { "@types/hast": "^3.0.0", "hast-util-to-html": "^9.0.0", "unified": "^11.0.0" } }, "sha512-k9ecfXHmIPuFVI61B9DeLPN0qFHfawM6RsuX48hoqlaKSF61RskNjSm1lI8PhBEM0MRdLxVVm4WmTqJQccH9mA=="],
+
+ "remark-directive": ["remark-directive@3.0.1", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-directive": "^3.0.0", "micromark-extension-directive": "^3.0.0", "unified": "^11.0.0" } }, "sha512-gwglrEQEZcZYgVyG1tQuA+h58EZfq5CSULw7J90AFuCTyib1thgHPoqQ+h9iFvU6R+vnZ5oNFQR5QKgGpk741A=="],
+
+ "remark-gfm": ["remark-gfm@4.0.1", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-gfm": "^3.0.0", "micromark-extension-gfm": "^3.0.0", "remark-parse": "^11.0.0", "remark-stringify": "^11.0.0", "unified": "^11.0.0" } }, "sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg=="],
+
+ "remark-mdx": ["remark-mdx@3.1.0", "", { "dependencies": { "mdast-util-mdx": "^3.0.0", "micromark-extension-mdxjs": "^3.0.0" } }, "sha512-Ngl/H3YXyBV9RcRNdlYsZujAmhsxwzxpDzpDEhFBVAGthS4GDgnctpDjgFl/ULx5UEDzqtW1cyBSNKqYYrqLBA=="],
+
+ "remark-parse": ["remark-parse@11.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-from-markdown": "^2.0.0", "micromark-util-types": "^2.0.0", "unified": "^11.0.0" } }, "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA=="],
+
+ "remark-rehype": ["remark-rehype@11.1.2", "", { "dependencies": { "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", "mdast-util-to-hast": "^13.0.0", "unified": "^11.0.0", "vfile": "^6.0.0" } }, "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw=="],
+
+ "remark-smartypants": ["remark-smartypants@3.0.2", "", { "dependencies": { "retext": "^9.0.0", "retext-smartypants": "^6.0.0", "unified": "^11.0.4", "unist-util-visit": "^5.0.0" } }, "sha512-ILTWeOriIluwEvPjv67v7Blgrcx+LZOkAUVtKI3putuhlZm84FnqDORNXPPm+HY3NdZOMhyDwZ1E+eZB/Df5dA=="],
+
+ "remark-stringify": ["remark-stringify@11.0.0", "", { "dependencies": { "@types/mdast": "^4.0.0", "mdast-util-to-markdown": "^2.0.0", "unified": "^11.0.0" } }, "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw=="],
+
+ "restructure": ["restructure@3.0.2", "", {}, "sha512-gSfoiOEA0VPE6Tukkrr7I0RBdE0s7H1eFCDBk05l1KIQT1UIKNc5JZy6jdyW6eYH3aR3g5b3PuL77rq0hvwtAw=="],
+
+ "retext": ["retext@9.0.0", "", { "dependencies": { "@types/nlcst": "^2.0.0", "retext-latin": "^4.0.0", "retext-stringify": "^4.0.0", "unified": "^11.0.0" } }, "sha512-sbMDcpHCNjvlheSgMfEcVrZko3cDzdbe1x/e7G66dFp0Ff7Mldvi2uv6JkJQzdRcvLYE8CA8Oe8siQx8ZOgTcA=="],
+
+ "retext-latin": ["retext-latin@4.0.0", "", { "dependencies": { "@types/nlcst": "^2.0.0", "parse-latin": "^7.0.0", "unified": "^11.0.0" } }, "sha512-hv9woG7Fy0M9IlRQloq/N6atV82NxLGveq+3H2WOi79dtIYWN8OaxogDm77f8YnVXJL2VD3bbqowu5E3EMhBYA=="],
+
+ "retext-smartypants": ["retext-smartypants@6.2.0", "", { "dependencies": { "@types/nlcst": "^2.0.0", "nlcst-to-string": "^4.0.0", "unist-util-visit": "^5.0.0" } }, "sha512-kk0jOU7+zGv//kfjXEBjdIryL1Acl4i9XNkHxtM7Tm5lFiCog576fjNC9hjoR7LTKQ0DsPWy09JummSsH1uqfQ=="],
+
+ "retext-stringify": ["retext-stringify@4.0.0", "", { "dependencies": { "@types/nlcst": "^2.0.0", "nlcst-to-string": "^4.0.0", "unified": "^11.0.0" } }, "sha512-rtfN/0o8kL1e+78+uxPTqu1Klt0yPzKuQ2BfWwwfgIUSayyzxpM1PJzkKt4V8803uB9qSy32MvI7Xep9khTpiA=="],
+
+ "rollup": ["rollup@4.45.1", "", { "dependencies": { "@types/estree": "1.0.8" }, "optionalDependencies": { "@rollup/rollup-android-arm-eabi": "4.45.1", "@rollup/rollup-android-arm64": "4.45.1", "@rollup/rollup-darwin-arm64": "4.45.1", "@rollup/rollup-darwin-x64": "4.45.1", "@rollup/rollup-freebsd-arm64": "4.45.1", "@rollup/rollup-freebsd-x64": "4.45.1", "@rollup/rollup-linux-arm-gnueabihf": "4.45.1", "@rollup/rollup-linux-arm-musleabihf": "4.45.1", "@rollup/rollup-linux-arm64-gnu": "4.45.1", "@rollup/rollup-linux-arm64-musl": "4.45.1", "@rollup/rollup-linux-loongarch64-gnu": "4.45.1", "@rollup/rollup-linux-powerpc64le-gnu": "4.45.1", "@rollup/rollup-linux-riscv64-gnu": "4.45.1", "@rollup/rollup-linux-riscv64-musl": "4.45.1", "@rollup/rollup-linux-s390x-gnu": "4.45.1", "@rollup/rollup-linux-x64-gnu": "4.45.1", "@rollup/rollup-linux-x64-musl": "4.45.1", "@rollup/rollup-win32-arm64-msvc": "4.45.1", "@rollup/rollup-win32-ia32-msvc": "4.45.1", "@rollup/rollup-win32-x64-msvc": "4.45.1", "fsevents": "~2.3.2" }, "bin": { "rollup": "dist/bin/rollup" } }, "sha512-4iya7Jb76fVpQyLoiVpzUrsjQ12r3dM7fIVz+4NwoYvZOShknRmiv+iu9CClZml5ZLGb0XMcYLutK6w9tgxHDw=="],
+
+ "sax": ["sax@1.4.1", "", {}, "sha512-+aWOz7yVScEGoKNd4PA10LZ8sk0A/z5+nXQG5giUO5rprX9jgYsTdov9qCchZiPIZezbZH+jRut8nPodFAX4Jg=="],
+
+ "semver": ["semver@7.7.2", "", { "bin": { "semver": "bin/semver.js" } }, "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA=="],
+
+ "sharp": ["sharp@0.34.3", "", { "dependencies": { "color": "^4.2.3", "detect-libc": "^2.0.4", "semver": "^7.7.2" }, "optionalDependencies": { "@img/sharp-darwin-arm64": "0.34.3", "@img/sharp-darwin-x64": "0.34.3", "@img/sharp-libvips-darwin-arm64": "1.2.0", "@img/sharp-libvips-darwin-x64": "1.2.0", "@img/sharp-libvips-linux-arm": "1.2.0", "@img/sharp-libvips-linux-arm64": "1.2.0", "@img/sharp-libvips-linux-ppc64": "1.2.0", "@img/sharp-libvips-linux-s390x": "1.2.0", "@img/sharp-libvips-linux-x64": "1.2.0", "@img/sharp-libvips-linuxmusl-arm64": "1.2.0", "@img/sharp-libvips-linuxmusl-x64": "1.2.0", "@img/sharp-linux-arm": "0.34.3", "@img/sharp-linux-arm64": "0.34.3", "@img/sharp-linux-ppc64": "0.34.3", "@img/sharp-linux-s390x": "0.34.3", "@img/sharp-linux-x64": "0.34.3", "@img/sharp-linuxmusl-arm64": "0.34.3", "@img/sharp-linuxmusl-x64": "0.34.3", "@img/sharp-wasm32": "0.34.3", "@img/sharp-win32-arm64": "0.34.3", "@img/sharp-win32-ia32": "0.34.3", "@img/sharp-win32-x64": "0.34.3" } }, "sha512-eX2IQ6nFohW4DbvHIOLRB3MHFpYqaqvXd3Tp5e/T/dSH83fxaNJQRvDMhASmkNTsNTVF2/OOopzRCt7xokgPfg=="],
+
+ "shiki": ["shiki@3.8.1", "", { "dependencies": { "@shikijs/core": "3.8.1", "@shikijs/engine-javascript": "3.8.1", "@shikijs/engine-oniguruma": "3.8.1", "@shikijs/langs": "3.8.1", "@shikijs/themes": "3.8.1", "@shikijs/types": "3.8.1", "@shikijs/vscode-textmate": "^10.0.2", "@types/hast": "^3.0.4" } }, "sha512-+MYIyjwGPCaegbpBeFN9+oOifI8CKiKG3awI/6h3JeT85c//H2wDW/xCJEGuQ5jPqtbboKNqNy+JyX9PYpGwNg=="],
+
+ "simple-swizzle": ["simple-swizzle@0.2.2", "", { "dependencies": { "is-arrayish": "^0.3.1" } }, "sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg=="],
+
+ "sisteransi": ["sisteransi@1.0.5", "", {}, "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg=="],
+
+ "sitemap": ["sitemap@8.0.0", "", { "dependencies": { "@types/node": "^17.0.5", "@types/sax": "^1.2.1", "arg": "^5.0.0", "sax": "^1.2.4" }, "bin": { "sitemap": "dist/cli.js" } }, "sha512-+AbdxhM9kJsHtruUF39bwS/B0Fytw6Fr1o4ZAIAEqA6cke2xcoO2GleBw9Zw7nRzILVEgz7zBM5GiTJjie1G9A=="],
+
+ "smol-toml": ["smol-toml@1.4.1", "", {}, "sha512-CxdwHXyYTONGHThDbq5XdwbFsuY4wlClRGejfE2NtwUtiHYsP1QtNsHb/hnj31jKYSchztJsaA8pSQoVzkfCFg=="],
+
+ "source-map": ["source-map@0.7.4", "", {}, "sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA=="],
+
+ "source-map-js": ["source-map-js@1.2.1", "", {}, "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA=="],
+
+ "space-separated-tokens": ["space-separated-tokens@2.0.2", "", {}, "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q=="],
+
+ "starlight-sidebar-topics": ["starlight-sidebar-topics@0.6.0", "", { "dependencies": { "picomatch": "^4.0.2" }, "peerDependencies": { "@astrojs/starlight": ">=0.32.0" } }, "sha512-ysmOR7zaHYKtk18/mpW4MbEMDioR/ZBsisu9bdQrq0v9BlHWpW7gAdWlqFWO9zdv1P7l0Mo1WKd0wJ0UtqOVEQ=="],
+
+ "starlight-theme-rapide": ["starlight-theme-rapide@0.5.1", "", { "peerDependencies": { "@astrojs/starlight": ">=0.34.0" } }, "sha512-QRF6mzcYHLEX5UpUvOPXVVwISS298siIJLcKextoMLhXcnF12nX+IYJ0LNxFk9XaPbX9uDXIieSBJf5Pztkteg=="],
+
+ "stream-replace-string": ["stream-replace-string@2.0.0", "", {}, "sha512-TlnjJ1C0QrmxRNrON00JvaFFlNh5TTG00APw23j74ET7gkQpTASi6/L2fuiav8pzK715HXtUeClpBTw2NPSn6w=="],
+
+ "string-width": ["string-width@7.2.0", "", { "dependencies": { "emoji-regex": "^10.3.0", "get-east-asian-width": "^1.0.0", "strip-ansi": "^7.1.0" } }, "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ=="],
+
+ "stringify-entities": ["stringify-entities@4.0.4", "", { "dependencies": { "character-entities-html4": "^2.0.0", "character-entities-legacy": "^3.0.0" } }, "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg=="],
+
+ "strip-ansi": ["strip-ansi@7.1.0", "", { "dependencies": { "ansi-regex": "^6.0.1" } }, "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ=="],
+
+ "style-to-js": ["style-to-js@1.1.17", "", { "dependencies": { "style-to-object": "1.0.9" } }, "sha512-xQcBGDxJb6jjFCTzvQtfiPn6YvvP2O8U1MDIPNfJQlWMYfktPy+iGsHE7cssjs7y84d9fQaK4UF3RIJaAHSoYA=="],
+
+ "style-to-object": ["style-to-object@1.0.9", "", { "dependencies": { "inline-style-parser": "0.2.4" } }, "sha512-G4qppLgKu/k6FwRpHiGiKPaPTFcG3g4wNVX/Qsfu+RqQM30E7Tyu/TEgxcL9PNLF5pdRLwQdE3YKKf+KF2Dzlw=="],
+
+ "tiny-inflate": ["tiny-inflate@1.0.3", "", {}, "sha512-pkY1fj1cKHb2seWDy0B16HeWyczlJA9/WW3u3c4z/NiWDsO3DOU5D7nhTLE9CF0yXv/QZFY7sEJmj24dK+Rrqw=="],
+
+ "tinyexec": ["tinyexec@0.3.2", "", {}, "sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA=="],
+
+ "tinyglobby": ["tinyglobby@0.2.14", "", { "dependencies": { "fdir": "^6.4.4", "picomatch": "^4.0.2" } }, "sha512-tX5e7OM1HnYr2+a2C/4V0htOcSQcoSTH9KgJnVvNm5zm/cyEWKJ7j7YutsH9CxMdtOkkLFy2AHrMci9IM8IPZQ=="],
+
+ "tr46": ["tr46@0.0.3", "", {}, "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="],
+
+ "trim-lines": ["trim-lines@3.0.1", "", {}, "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg=="],
+
+ "trough": ["trough@2.2.0", "", {}, "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw=="],
+
+ "tsconfck": ["tsconfck@3.1.6", "", { "peerDependencies": { "typescript": "^5.0.0" }, "optionalPeers": ["typescript"], "bin": { "tsconfck": "bin/tsconfck.js" } }, "sha512-ks6Vjr/jEw0P1gmOVwutM3B7fWxoWBL2KRDb1JfqGVawBmO5UsvmWOQFGHBPl5yxYz4eERr19E6L7NMv+Fej4w=="],
+
+ "tslib": ["tslib@2.8.1", "", {}, "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w=="],
+
+ "type-fest": ["type-fest@4.41.0", "", {}, "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA=="],
+
+ "typescript": ["typescript@5.8.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ=="],
+
+ "ufo": ["ufo@1.6.1", "", {}, "sha512-9a4/uxlTWJ4+a5i0ooc1rU7C7YOw3wT+UGqdeNNHWnOF9qcMBgLRS+4IYUqbczewFx4mLEig6gawh7X6mFlEkA=="],
+
+ "ultrahtml": ["ultrahtml@1.6.0", "", {}, "sha512-R9fBn90VTJrqqLDwyMph+HGne8eqY1iPfYhPzZrvKpIfwkWZbcYlfpsb8B9dTvBfpy1/hqAD7Wi8EKfP9e8zdw=="],
+
+ "uncrypto": ["uncrypto@0.1.3", "", {}, "sha512-Ql87qFHB3s/De2ClA9e0gsnS6zXG27SkTiSJwjCc9MebbfapQfuPzumMIUMi38ezPZVNFcHI9sUIepeQfw8J8Q=="],
+
+ "unicode-properties": ["unicode-properties@1.4.1", "", { "dependencies": { "base64-js": "^1.3.0", "unicode-trie": "^2.0.0" } }, "sha512-CLjCCLQ6UuMxWnbIylkisbRj31qxHPAurvena/0iwSVbQ2G1VY5/HjV0IRabOEbDHlzZlRdCrD4NhB0JtU40Pg=="],
+
+ "unicode-trie": ["unicode-trie@2.0.0", "", { "dependencies": { "pako": "^0.2.5", "tiny-inflate": "^1.0.0" } }, "sha512-x7bc76x0bm4prf1VLg79uhAzKw8DVboClSN5VxJuQ+LKDOVEW9CdH+VY7SP+vX7xCYQqzzgQpFqz15zeLvAtZQ=="],
+
+ "unified": ["unified@11.0.5", "", { "dependencies": { "@types/unist": "^3.0.0", "bail": "^2.0.0", "devlop": "^1.0.0", "extend": "^3.0.0", "is-plain-obj": "^4.0.0", "trough": "^2.0.0", "vfile": "^6.0.0" } }, "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA=="],
+
+ "unifont": ["unifont@0.5.2", "", { "dependencies": { "css-tree": "^3.0.0", "ofetch": "^1.4.1", "ohash": "^2.0.0" } }, "sha512-LzR4WUqzH9ILFvjLAUU7dK3Lnou/qd5kD+IakBtBK4S15/+x2y9VX+DcWQv6s551R6W+vzwgVS6tFg3XggGBgg=="],
+
+ "unist-util-find-after": ["unist-util-find-after@5.0.0", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-is": "^6.0.0" } }, "sha512-amQa0Ep2m6hE2g72AugUItjbuM8X8cGQnFoHk0pGfrFeT9GZhzN5SW8nRsiGKK7Aif4CrACPENkA6P/Lw6fHGQ=="],
+
+ "unist-util-is": ["unist-util-is@6.0.0", "", { "dependencies": { "@types/unist": "^3.0.0" } }, "sha512-2qCTHimwdxLfz+YzdGfkqNlH0tLi9xjTnHddPmJwtIG9MGsdbutfTc4P+haPD7l7Cjxf/WZj+we5qfVPvvxfYw=="],
+
+ "unist-util-modify-children": ["unist-util-modify-children@4.0.0", "", { "dependencies": { "@types/unist": "^3.0.0", "array-iterate": "^2.0.0" } }, "sha512-+tdN5fGNddvsQdIzUF3Xx82CU9sMM+fA0dLgR9vOmT0oPT2jH+P1nd5lSqfCfXAw+93NhcXNY2qqvTUtE4cQkw=="],
+
+ "unist-util-position": ["unist-util-position@5.0.0", "", { "dependencies": { "@types/unist": "^3.0.0" } }, "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA=="],
+
+ "unist-util-position-from-estree": ["unist-util-position-from-estree@2.0.0", "", { "dependencies": { "@types/unist": "^3.0.0" } }, "sha512-KaFVRjoqLyF6YXCbVLNad/eS4+OfPQQn2yOd7zF/h5T/CSL2v8NpN6a5TPvtbXthAGw5nG+PuTtq+DdIZr+cRQ=="],
+
+ "unist-util-remove-position": ["unist-util-remove-position@5.0.0", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-visit": "^5.0.0" } }, "sha512-Hp5Kh3wLxv0PHj9m2yZhhLt58KzPtEYKQQ4yxfYFEO7EvHwzyDYnduhHnY1mDxoqr7VUwVuHXk9RXKIiYS1N8Q=="],
+
+ "unist-util-stringify-position": ["unist-util-stringify-position@4.0.0", "", { "dependencies": { "@types/unist": "^3.0.0" } }, "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ=="],
+
+ "unist-util-visit": ["unist-util-visit@5.0.0", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-is": "^6.0.0", "unist-util-visit-parents": "^6.0.0" } }, "sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg=="],
+
+ "unist-util-visit-children": ["unist-util-visit-children@3.0.0", "", { "dependencies": { "@types/unist": "^3.0.0" } }, "sha512-RgmdTfSBOg04sdPcpTSD1jzoNBjt9a80/ZCzp5cI9n1qPzLZWF9YdvWGN2zmTumP1HWhXKdUWexjy/Wy/lJ7tA=="],
+
+ "unist-util-visit-parents": ["unist-util-visit-parents@6.0.1", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-is": "^6.0.0" } }, "sha512-L/PqWzfTP9lzzEa6CKs0k2nARxTdZduw3zyh8d2NVBnsyvHjSX4TWse388YrrQKbvI8w20fGjGlhgT96WwKykw=="],
+
+ "unstorage": ["unstorage@1.16.1", "", { "dependencies": { "anymatch": "^3.1.3", "chokidar": "^4.0.3", "destr": "^2.0.5", "h3": "^1.15.3", "lru-cache": "^10.4.3", "node-fetch-native": "^1.6.6", "ofetch": "^1.4.1", "ufo": "^1.6.1" }, "peerDependencies": { "@azure/app-configuration": "^1.8.0", "@azure/cosmos": "^4.2.0", "@azure/data-tables": "^13.3.0", "@azure/identity": "^4.6.0", "@azure/keyvault-secrets": "^4.9.0", "@azure/storage-blob": "^12.26.0", "@capacitor/preferences": "^6.0.3 || ^7.0.0", "@deno/kv": ">=0.9.0", "@netlify/blobs": "^6.5.0 || ^7.0.0 || ^8.1.0 || ^9.0.0 || ^10.0.0", "@planetscale/database": "^1.19.0", "@upstash/redis": "^1.34.3", "@vercel/blob": ">=0.27.1", "@vercel/kv": "^1.0.1", "aws4fetch": "^1.0.20", "db0": ">=0.2.1", "idb-keyval": "^6.2.1", "ioredis": "^5.4.2", "uploadthing": "^7.4.4" }, "optionalPeers": ["@azure/app-configuration", "@azure/cosmos", "@azure/data-tables", "@azure/identity", "@azure/keyvault-secrets", "@azure/storage-blob", "@capacitor/preferences", "@deno/kv", "@netlify/blobs", "@planetscale/database", "@upstash/redis", "@vercel/blob", "@vercel/kv", "aws4fetch", "db0", "idb-keyval", "ioredis", "uploadthing"] }, "sha512-gdpZ3guLDhz+zWIlYP1UwQ259tG5T5vYRzDaHMkQ1bBY1SQPutvZnrRjTFaWUUpseErJIgAZS51h6NOcZVZiqQ=="],
+
+ "util-deprecate": ["util-deprecate@1.0.2", "", {}, "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="],
+
+ "vfile": ["vfile@6.0.3", "", { "dependencies": { "@types/unist": "^3.0.0", "vfile-message": "^4.0.0" } }, "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q=="],
+
+ "vfile-location": ["vfile-location@5.0.3", "", { "dependencies": { "@types/unist": "^3.0.0", "vfile": "^6.0.0" } }, "sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg=="],
+
+ "vfile-message": ["vfile-message@4.0.2", "", { "dependencies": { "@types/unist": "^3.0.0", "unist-util-stringify-position": "^4.0.0" } }, "sha512-jRDZ1IMLttGj41KcZvlrYAaI3CfqpLpfpf+Mfig13viT6NKvRzWZ+lXz0Y5D60w6uJIBAOGq9mSHf0gktF0duw=="],
+
+ "vite": ["vite@6.3.5", "", { "dependencies": { "esbuild": "^0.25.0", "fdir": "^6.4.4", "picomatch": "^4.0.2", "postcss": "^8.5.3", "rollup": "^4.34.9", "tinyglobby": "^0.2.13" }, "optionalDependencies": { "fsevents": "~2.3.3" }, "peerDependencies": { "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", "jiti": ">=1.21.0", "less": "*", "lightningcss": "^1.21.0", "sass": "*", "sass-embedded": "*", "stylus": "*", "sugarss": "*", "terser": "^5.16.0", "tsx": "^4.8.1", "yaml": "^2.4.2" }, "optionalPeers": ["@types/node", "jiti", "less", "lightningcss", "sass", "sass-embedded", "stylus", "sugarss", "terser", "tsx", "yaml"], "bin": { "vite": "bin/vite.js" } }, "sha512-cZn6NDFE7wdTpINgs++ZJ4N49W2vRp8LCKrn3Ob1kYNtOo21vfDoaV5GzBfLU4MovSAB8uNRm4jgzVQZ+mBzPQ=="],
+
+ "vitefu": ["vitefu@1.1.1", "", { "peerDependencies": { "vite": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0-beta.0" }, "optionalPeers": ["vite"] }, "sha512-B/Fegf3i8zh0yFbpzZ21amWzHmuNlLlmJT6n7bu5e+pCHUKQIfXSYokrqOBGEMMe9UG2sostKQF9mml/vYaWJQ=="],
+
+ "web-namespaces": ["web-namespaces@2.0.1", "", {}, "sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ=="],
+
+ "webidl-conversions": ["webidl-conversions@3.0.1", "", {}, "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="],
+
+ "whatwg-url": ["whatwg-url@5.0.0", "", { "dependencies": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" } }, "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw=="],
+
+ "which-pm-runs": ["which-pm-runs@1.1.0", "", {}, "sha512-n1brCuqClxfFfq/Rb0ICg9giSZqCS+pLtccdag6C2HyufBrh3fBOiy9nb6ggRMvWOVH5GrdJskj5iGTZNxd7SA=="],
+
+ "widest-line": ["widest-line@5.0.0", "", { "dependencies": { "string-width": "^7.0.0" } }, "sha512-c9bZp7b5YtRj2wOe6dlj32MK+Bx/M/d+9VB2SHM1OtsUHR0aV0tdP6DWh/iMt0kWi1t5g1Iudu6hQRNd1A4PVA=="],
+
+ "wrap-ansi": ["wrap-ansi@9.0.0", "", { "dependencies": { "ansi-styles": "^6.2.1", "string-width": "^7.0.0", "strip-ansi": "^7.1.0" } }, "sha512-G8ura3S+3Z2G+mkgNRq8dqaFZAuxfsxpBB8OCTGRTCtp+l/v9nbFNmCUP1BZMts3G1142MsZfn6eeUKrr4PD1Q=="],
+
+ "xxhash-wasm": ["xxhash-wasm@1.1.0", "", {}, "sha512-147y/6YNh+tlp6nd/2pWq38i9h6mz/EuQ6njIrmW8D1BS5nCqs0P6DG+m6zTGnNz5I+uhZ0SHxBs9BsPrwcKDA=="],
+
+ "yargs-parser": ["yargs-parser@21.1.1", "", {}, "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw=="],
+
+ "yocto-queue": ["yocto-queue@1.2.1", "", {}, "sha512-AyeEbWOu/TAXdxlV9wmGcR0+yh2j3vYPGOECcIj2S7MkrLyC7ne+oye2BKTItt0ii2PHk4cDy+95+LshzbXnGg=="],
+
+ "yocto-spinner": ["yocto-spinner@0.2.3", "", { "dependencies": { "yoctocolors": "^2.1.1" } }, "sha512-sqBChb33loEnkoXte1bLg45bEBsOP9N1kzQh5JZNKj/0rik4zAPTNSAVPj3uQAdc6slYJ0Ksc403G2XgxsJQFQ=="],
+
+ "yoctocolors": ["yoctocolors@2.1.1", "", {}, "sha512-GQHQqAopRhwU8Kt1DDM8NjibDXHC8eoh1erhGAJPEyveY9qqVeXvVikNKrDz69sHowPMorbPUrH/mx8c50eiBQ=="],
+
+ "zod": ["zod@3.25.76", "", {}, "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ=="],
+
+ "zod-to-json-schema": ["zod-to-json-schema@3.24.6", "", { "peerDependencies": { "zod": "^3.24.1" } }, "sha512-h/z3PKvcTcTetyjl1fkj79MHNEjm+HpD6NXheWjzOekY7kV+lwDYnHw+ivHkijnCSMz1yJaWBD9vu/Fcmk+vEg=="],
+
+ "zod-to-ts": ["zod-to-ts@1.2.0", "", { "peerDependencies": { "typescript": "^4.9.4 || ^5.0.2", "zod": "^3" } }, "sha512-x30XE43V+InwGpvTySRNz9kB7qFU8DlyEy7BsSTCHPH1R0QasMmHWZDCzYm6bVXtj/9NNJAZF3jW8rzFvH5OFA=="],
+
+ "zwitch": ["zwitch@2.0.4", "", {}, "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A=="],
+
+ "@rollup/pluginutils/estree-walker": ["estree-walker@2.0.2", "", {}, "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w=="],
+
+ "ansi-align/string-width": ["string-width@4.2.3", "", { "dependencies": { "emoji-regex": "^8.0.0", "is-fullwidth-code-point": "^3.0.0", "strip-ansi": "^6.0.1" } }, "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g=="],
+
+ "anymatch/picomatch": ["picomatch@2.3.1", "", {}, "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="],
+
+ "astro/sharp": ["sharp@0.33.5", "", { "dependencies": { "color": "^4.2.3", "detect-libc": "^2.0.3", "semver": "^7.6.3" }, "optionalDependencies": { "@img/sharp-darwin-arm64": "0.33.5", "@img/sharp-darwin-x64": "0.33.5", "@img/sharp-libvips-darwin-arm64": "1.0.4", "@img/sharp-libvips-darwin-x64": "1.0.4", "@img/sharp-libvips-linux-arm": "1.0.5", "@img/sharp-libvips-linux-arm64": "1.0.4", "@img/sharp-libvips-linux-s390x": "1.0.4", "@img/sharp-libvips-linux-x64": "1.0.4", "@img/sharp-libvips-linuxmusl-arm64": "1.0.4", "@img/sharp-libvips-linuxmusl-x64": "1.0.4", "@img/sharp-linux-arm": "0.33.5", "@img/sharp-linux-arm64": "0.33.5", "@img/sharp-linux-s390x": "0.33.5", "@img/sharp-linux-x64": "0.33.5", "@img/sharp-linuxmusl-arm64": "0.33.5", "@img/sharp-linuxmusl-x64": "0.33.5", "@img/sharp-wasm32": "0.33.5", "@img/sharp-win32-ia32": "0.33.5", "@img/sharp-win32-x64": "0.33.5" } }, "sha512-haPVm1EkS9pgvHrQ/F3Xy+hgcuMV0Wm9vfIBSiwZ05k+xgb0PkBQpGsAA/oWdDobNaZTH5ppvHtzCFbnSEwHVw=="],
+
+ "hast-util-to-parse5/property-information": ["property-information@6.5.0", "", {}, "sha512-PgTgs/BlvHxOu8QuEN7wi5A0OmXaBcHpmCSTehcs6Uuu9IkDIEo13Hy7n898RHfrQ49vKCoGeWZSaAK01nwVig=="],
+
+ "parse-entities/@types/unist": ["@types/unist@2.0.11", "", {}, "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA=="],
+
+ "prompts/kleur": ["kleur@3.0.3", "", {}, "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w=="],
+
+ "ansi-align/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="],
+
+ "ansi-align/string-width/strip-ansi": ["strip-ansi@6.0.1", "", { "dependencies": { "ansi-regex": "^5.0.1" } }, "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A=="],
+
+ "astro/sharp/@img/sharp-darwin-arm64": ["@img/sharp-darwin-arm64@0.33.5", "", { "optionalDependencies": { "@img/sharp-libvips-darwin-arm64": "1.0.4" }, "os": "darwin", "cpu": "arm64" }, "sha512-UT4p+iz/2H4twwAoLCqfA9UH5pI6DggwKEGuaPy7nCVQ8ZsiY5PIcrRvD1DzuY3qYL07NtIQcWnBSY/heikIFQ=="],
+
+ "astro/sharp/@img/sharp-darwin-x64": ["@img/sharp-darwin-x64@0.33.5", "", { "optionalDependencies": { "@img/sharp-libvips-darwin-x64": "1.0.4" }, "os": "darwin", "cpu": "x64" }, "sha512-fyHac4jIc1ANYGRDxtiqelIbdWkIuQaI84Mv45KvGRRxSAa7o7d1ZKAOBaYbnepLC1WqxfpimdeWfvqqSGwR2Q=="],
+
+ "astro/sharp/@img/sharp-libvips-darwin-arm64": ["@img/sharp-libvips-darwin-arm64@1.0.4", "", { "os": "darwin", "cpu": "arm64" }, "sha512-XblONe153h0O2zuFfTAbQYAX2JhYmDHeWikp1LM9Hul9gVPjFY427k6dFEcOL72O01QxQsWi761svJ/ev9xEDg=="],
+
+ "astro/sharp/@img/sharp-libvips-darwin-x64": ["@img/sharp-libvips-darwin-x64@1.0.4", "", { "os": "darwin", "cpu": "x64" }, "sha512-xnGR8YuZYfJGmWPvmlunFaWJsb9T/AO2ykoP3Fz/0X5XV2aoYBPkX6xqCQvUTKKiLddarLaxpzNe+b1hjeWHAQ=="],
+
+ "astro/sharp/@img/sharp-libvips-linux-arm": ["@img/sharp-libvips-linux-arm@1.0.5", "", { "os": "linux", "cpu": "arm" }, "sha512-gvcC4ACAOPRNATg/ov8/MnbxFDJqf/pDePbBnuBDcjsI8PssmjoKMAz4LtLaVi+OnSb5FK/yIOamqDwGmXW32g=="],
+
+ "astro/sharp/@img/sharp-libvips-linux-arm64": ["@img/sharp-libvips-linux-arm64@1.0.4", "", { "os": "linux", "cpu": "arm64" }, "sha512-9B+taZ8DlyyqzZQnoeIvDVR/2F4EbMepXMc/NdVbkzsJbzkUjhXv/70GQJ7tdLA4YJgNP25zukcxpX2/SueNrA=="],
+
+ "astro/sharp/@img/sharp-libvips-linux-s390x": ["@img/sharp-libvips-linux-s390x@1.0.4", "", { "os": "linux", "cpu": "s390x" }, "sha512-u7Wz6ntiSSgGSGcjZ55im6uvTrOxSIS8/dgoVMoiGE9I6JAfU50yH5BoDlYA1tcuGS7g/QNtetJnxA6QEsCVTA=="],
+
+ "astro/sharp/@img/sharp-libvips-linux-x64": ["@img/sharp-libvips-linux-x64@1.0.4", "", { "os": "linux", "cpu": "x64" }, "sha512-MmWmQ3iPFZr0Iev+BAgVMb3ZyC4KeFc3jFxnNbEPas60e1cIfevbtuyf9nDGIzOaW9PdnDciJm+wFFaTlj5xYw=="],
+
+ "astro/sharp/@img/sharp-libvips-linuxmusl-arm64": ["@img/sharp-libvips-linuxmusl-arm64@1.0.4", "", { "os": "linux", "cpu": "arm64" }, "sha512-9Ti+BbTYDcsbp4wfYib8Ctm1ilkugkA/uscUn6UXK1ldpC1JjiXbLfFZtRlBhjPZ5o1NCLiDbg8fhUPKStHoTA=="],
+
+ "astro/sharp/@img/sharp-libvips-linuxmusl-x64": ["@img/sharp-libvips-linuxmusl-x64@1.0.4", "", { "os": "linux", "cpu": "x64" }, "sha512-viYN1KX9m+/hGkJtvYYp+CCLgnJXwiQB39damAO7WMdKWlIhmYTfHjwSbQeUK/20vY154mwezd9HflVFM1wVSw=="],
+
+ "astro/sharp/@img/sharp-linux-arm": ["@img/sharp-linux-arm@0.33.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-arm": "1.0.5" }, "os": "linux", "cpu": "arm" }, "sha512-JTS1eldqZbJxjvKaAkxhZmBqPRGmxgu+qFKSInv8moZ2AmT5Yib3EQ1c6gp493HvrvV8QgdOXdyaIBrhvFhBMQ=="],
+
+ "astro/sharp/@img/sharp-linux-arm64": ["@img/sharp-linux-arm64@0.33.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-arm64": "1.0.4" }, "os": "linux", "cpu": "arm64" }, "sha512-JMVv+AMRyGOHtO1RFBiJy/MBsgz0x4AWrT6QoEVVTyh1E39TrCUpTRI7mx9VksGX4awWASxqCYLCV4wBZHAYxA=="],
+
+ "astro/sharp/@img/sharp-linux-s390x": ["@img/sharp-linux-s390x@0.33.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-s390x": "1.0.4" }, "os": "linux", "cpu": "s390x" }, "sha512-y/5PCd+mP4CA/sPDKl2961b+C9d+vPAveS33s6Z3zfASk2j5upL6fXVPZi7ztePZ5CuH+1kW8JtvxgbuXHRa4Q=="],
+
+ "astro/sharp/@img/sharp-linux-x64": ["@img/sharp-linux-x64@0.33.5", "", { "optionalDependencies": { "@img/sharp-libvips-linux-x64": "1.0.4" }, "os": "linux", "cpu": "x64" }, "sha512-opC+Ok5pRNAzuvq1AG0ar+1owsu842/Ab+4qvU879ippJBHvyY5n2mxF1izXqkPYlGuP/M556uh53jRLJmzTWA=="],
+
+ "astro/sharp/@img/sharp-linuxmusl-arm64": ["@img/sharp-linuxmusl-arm64@0.33.5", "", { "optionalDependencies": { "@img/sharp-libvips-linuxmusl-arm64": "1.0.4" }, "os": "linux", "cpu": "arm64" }, "sha512-XrHMZwGQGvJg2V/oRSUfSAfjfPxO+4DkiRh6p2AFjLQztWUuY/o8Mq0eMQVIY7HJ1CDQUJlxGGZRw1a5bqmd1g=="],
+
+ "astro/sharp/@img/sharp-linuxmusl-x64": ["@img/sharp-linuxmusl-x64@0.33.5", "", { "optionalDependencies": { "@img/sharp-libvips-linuxmusl-x64": "1.0.4" }, "os": "linux", "cpu": "x64" }, "sha512-WT+d/cgqKkkKySYmqoZ8y3pxx7lx9vVejxW/W4DOFMYVSkErR+w7mf2u8m/y4+xHe7yY9DAXQMWQhpnMuFfScw=="],
+
+ "astro/sharp/@img/sharp-wasm32": ["@img/sharp-wasm32@0.33.5", "", { "dependencies": { "@emnapi/runtime": "^1.2.0" }, "cpu": "none" }, "sha512-ykUW4LVGaMcU9lu9thv85CbRMAwfeadCJHRsg2GmeRa/cJxsVY9Rbd57JcMxBkKHag5U/x7TSBpScF4U8ElVzg=="],
+
+ "astro/sharp/@img/sharp-win32-ia32": ["@img/sharp-win32-ia32@0.33.5", "", { "os": "win32", "cpu": "ia32" }, "sha512-T36PblLaTwuVJ/zw/LaH0PdZkRz5rd3SmMHX8GSmR7vtNSP5Z6bQkExdSK7xGWyxLw4sUknBuugTelgw2faBbQ=="],
+
+ "astro/sharp/@img/sharp-win32-x64": ["@img/sharp-win32-x64@0.33.5", "", { "os": "win32", "cpu": "x64" }, "sha512-MpY/o8/8kj+EcnxwvrP4aTJSWw/aZ7JIGR4aBeZkZw5B7/Jn+tY9/VNwtcoGmdT7GfggGIU4kygOMSbYnOrAbg=="],
+
+ "ansi-align/string-width/strip-ansi/ansi-regex": ["ansi-regex@5.0.1", "", {}, "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="],
+ }
+}
diff --git a/website/package.json b/website/package.json
new file mode 100644
index 000000000..024b879d2
--- /dev/null
+++ b/website/package.json
@@ -0,0 +1,19 @@
+{
+ "name": "website",
+ "type": "module",
+ "version": "0.0.1",
+ "scripts": {
+ "dev": "astro dev",
+ "start": "astro dev",
+ "build": "astro build",
+ "preview": "astro preview",
+ "astro": "astro"
+ },
+ "dependencies": {
+ "@astrojs/starlight": "^0.35.1",
+ "astro": "^5.6.1",
+ "sharp": "^0.34.2",
+ "starlight-sidebar-topics": "^0.6.0",
+ "starlight-theme-rapide": "^0.5.1"
+ }
+}
\ No newline at end of file
diff --git a/website/public/favicon.svg b/website/public/favicon.svg
new file mode 100644
index 000000000..cba5ac140
--- /dev/null
+++ b/website/public/favicon.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/website/public/jan.png b/website/public/jan.png
new file mode 100644
index 000000000..21ec5b15f
Binary files /dev/null and b/website/public/jan.png differ
diff --git a/website/public/jan2.png b/website/public/jan2.png
new file mode 100644
index 000000000..ef2abe4d4
Binary files /dev/null and b/website/public/jan2.png differ
diff --git a/website/src/assets/add_assistant.png b/website/src/assets/add_assistant.png
new file mode 100644
index 000000000..b8f3defb5
Binary files /dev/null and b/website/src/assets/add_assistant.png differ
diff --git a/website/src/assets/anthropic.png b/website/src/assets/anthropic.png
new file mode 100644
index 000000000..d82ba291d
Binary files /dev/null and b/website/src/assets/anthropic.png differ
diff --git a/website/src/assets/api-server-logs.png b/website/src/assets/api-server-logs.png
new file mode 100644
index 000000000..f32c01472
Binary files /dev/null and b/website/src/assets/api-server-logs.png differ
diff --git a/website/src/assets/api-server.png b/website/src/assets/api-server.png
new file mode 100644
index 000000000..e25d0a5bb
Binary files /dev/null and b/website/src/assets/api-server.png differ
diff --git a/website/src/assets/api-server2.png b/website/src/assets/api-server2.png
new file mode 100644
index 000000000..6bb0c3c0b
Binary files /dev/null and b/website/src/assets/api-server2.png differ
diff --git a/website/src/assets/assistant-add-dialog.png b/website/src/assets/assistant-add-dialog.png
new file mode 100644
index 000000000..aa2438092
Binary files /dev/null and b/website/src/assets/assistant-add-dialog.png differ
diff --git a/website/src/assets/assistant-dropdown-updated.png b/website/src/assets/assistant-dropdown-updated.png
new file mode 100644
index 000000000..b6ee0b53d
Binary files /dev/null and b/website/src/assets/assistant-dropdown-updated.png differ
diff --git a/website/src/assets/assistant-dropdown.png b/website/src/assets/assistant-dropdown.png
new file mode 100644
index 000000000..eb3ddfbe5
Binary files /dev/null and b/website/src/assets/assistant-dropdown.png differ
diff --git a/website/src/assets/assistant-edit-dialog.png b/website/src/assets/assistant-edit-dialog.png
new file mode 100644
index 000000000..b323d7b2f
Binary files /dev/null and b/website/src/assets/assistant-edit-dialog.png differ
diff --git a/website/src/assets/assistants-ui-overview.png b/website/src/assets/assistants-ui-overview.png
new file mode 100644
index 000000000..989616f13
Binary files /dev/null and b/website/src/assets/assistants-ui-overview.png differ
diff --git a/website/src/assets/cohere.png b/website/src/assets/cohere.png
new file mode 100644
index 000000000..f7d24d74c
Binary files /dev/null and b/website/src/assets/cohere.png differ
diff --git a/website/src/assets/deepseek.png b/website/src/assets/deepseek.png
new file mode 100644
index 000000000..9e1084aee
Binary files /dev/null and b/website/src/assets/deepseek.png differ
diff --git a/website/src/assets/e2b-key.png b/website/src/assets/e2b-key.png
new file mode 100644
index 000000000..67d75b06e
Binary files /dev/null and b/website/src/assets/e2b-key.png differ
diff --git a/website/src/assets/e2b-key1.png b/website/src/assets/e2b-key1.png
new file mode 100644
index 000000000..310c9c7ae
Binary files /dev/null and b/website/src/assets/e2b-key1.png differ
diff --git a/website/src/assets/e2b-key2.png b/website/src/assets/e2b-key2.png
new file mode 100644
index 000000000..8aa52a202
Binary files /dev/null and b/website/src/assets/e2b-key2.png differ
diff --git a/website/src/assets/e2b-key3.png b/website/src/assets/e2b-key3.png
new file mode 100644
index 000000000..726dd8430
Binary files /dev/null and b/website/src/assets/e2b-key3.png differ
diff --git a/website/src/assets/e2b-key4.png b/website/src/assets/e2b-key4.png
new file mode 100644
index 000000000..80855cadd
Binary files /dev/null and b/website/src/assets/e2b-key4.png differ
diff --git a/website/src/assets/e2b-key5.png b/website/src/assets/e2b-key5.png
new file mode 100644
index 000000000..f4e9441e8
Binary files /dev/null and b/website/src/assets/e2b-key5.png differ
diff --git a/website/src/assets/e2b-key6.png b/website/src/assets/e2b-key6.png
new file mode 100644
index 000000000..ad362997d
Binary files /dev/null and b/website/src/assets/e2b-key6.png differ
diff --git a/website/src/assets/e2b-key7.png b/website/src/assets/e2b-key7.png
new file mode 100644
index 000000000..d5f82e0b3
Binary files /dev/null and b/website/src/assets/e2b-key7.png differ
diff --git a/website/src/assets/exa.png b/website/src/assets/exa.png
new file mode 100644
index 000000000..c187b595d
Binary files /dev/null and b/website/src/assets/exa.png differ
diff --git a/website/src/assets/exa1.png b/website/src/assets/exa1.png
new file mode 100644
index 000000000..c4d3ea2f6
Binary files /dev/null and b/website/src/assets/exa1.png differ
diff --git a/website/src/assets/exa2.png b/website/src/assets/exa2.png
new file mode 100644
index 000000000..8da36c311
Binary files /dev/null and b/website/src/assets/exa2.png differ
diff --git a/website/src/assets/exa3.png b/website/src/assets/exa3.png
new file mode 100644
index 000000000..fffe28deb
Binary files /dev/null and b/website/src/assets/exa3.png differ
diff --git a/website/src/assets/exa4.png b/website/src/assets/exa4.png
new file mode 100644
index 000000000..dd34cbcb9
Binary files /dev/null and b/website/src/assets/exa4.png differ
diff --git a/website/src/assets/extensions-01.png b/website/src/assets/extensions-01.png
new file mode 100644
index 000000000..98fe85480
Binary files /dev/null and b/website/src/assets/extensions-01.png differ
diff --git a/website/src/assets/extensions-02.png b/website/src/assets/extensions-02.png
new file mode 100644
index 000000000..5761b6b7f
Binary files /dev/null and b/website/src/assets/extensions-02.png differ
diff --git a/website/src/assets/extensions-03.png b/website/src/assets/extensions-03.png
new file mode 100644
index 000000000..a74230a0a
Binary files /dev/null and b/website/src/assets/extensions-03.png differ
diff --git a/website/src/assets/extensions-04.png b/website/src/assets/extensions-04.png
new file mode 100644
index 000000000..20b48bcf5
Binary files /dev/null and b/website/src/assets/extensions-04.png differ
diff --git a/website/src/assets/extensions-05.png b/website/src/assets/extensions-05.png
new file mode 100644
index 000000000..3967d90a8
Binary files /dev/null and b/website/src/assets/extensions-05.png differ
diff --git a/website/src/assets/extensions-06.png b/website/src/assets/extensions-06.png
new file mode 100644
index 000000000..b169e3926
Binary files /dev/null and b/website/src/assets/extensions-06.png differ
diff --git a/website/src/assets/extensions-07.png b/website/src/assets/extensions-07.png
new file mode 100644
index 000000000..3d39f56d9
Binary files /dev/null and b/website/src/assets/extensions-07.png differ
diff --git a/website/src/assets/extensions-08.png b/website/src/assets/extensions-08.png
new file mode 100644
index 000000000..3d124e367
Binary files /dev/null and b/website/src/assets/extensions-08.png differ
diff --git a/website/src/assets/extensions-09.png b/website/src/assets/extensions-09.png
new file mode 100644
index 000000000..7d7cd6193
Binary files /dev/null and b/website/src/assets/extensions-09.png differ
diff --git a/website/src/assets/extensions-10.png b/website/src/assets/extensions-10.png
new file mode 100644
index 000000000..ecadd8475
Binary files /dev/null and b/website/src/assets/extensions-10.png differ
diff --git a/website/src/assets/google.png b/website/src/assets/google.png
new file mode 100644
index 000000000..751c473d7
Binary files /dev/null and b/website/src/assets/google.png differ
diff --git a/website/src/assets/gpu_accl.png b/website/src/assets/gpu_accl.png
new file mode 100644
index 000000000..d1cfca99f
Binary files /dev/null and b/website/src/assets/gpu_accl.png differ
diff --git a/website/src/assets/groq.png b/website/src/assets/groq.png
new file mode 100644
index 000000000..c2f87ecdd
Binary files /dev/null and b/website/src/assets/groq.png differ
diff --git a/website/src/assets/hardware.png b/website/src/assets/hardware.png
new file mode 100644
index 000000000..177d829a5
Binary files /dev/null and b/website/src/assets/hardware.png differ
diff --git a/website/src/assets/hf-unsloth.png b/website/src/assets/hf-unsloth.png
new file mode 100644
index 000000000..2e3edb422
Binary files /dev/null and b/website/src/assets/hf-unsloth.png differ
diff --git a/website/src/assets/hf_and_jan.png b/website/src/assets/hf_and_jan.png
new file mode 100644
index 000000000..5bc324a29
Binary files /dev/null and b/website/src/assets/hf_and_jan.png differ
diff --git a/website/src/assets/hf_token.png b/website/src/assets/hf_token.png
new file mode 100644
index 000000000..49b16b25a
Binary files /dev/null and b/website/src/assets/hf_token.png differ
diff --git a/website/src/assets/houston.webp b/website/src/assets/houston.webp
new file mode 100644
index 000000000..930c16497
Binary files /dev/null and b/website/src/assets/houston.webp differ
diff --git a/website/src/assets/install-engines-01.png b/website/src/assets/install-engines-01.png
new file mode 100644
index 000000000..95e824a7f
Binary files /dev/null and b/website/src/assets/install-engines-01.png differ
diff --git a/website/src/assets/install-engines-02.png b/website/src/assets/install-engines-02.png
new file mode 100644
index 000000000..b6b6a6a58
Binary files /dev/null and b/website/src/assets/install-engines-02.png differ
diff --git a/website/src/assets/install-engines-03.png b/website/src/assets/install-engines-03.png
new file mode 100644
index 000000000..a65145a4c
Binary files /dev/null and b/website/src/assets/install-engines-03.png differ
diff --git a/website/src/assets/jan-app-new.png b/website/src/assets/jan-app-new.png
new file mode 100644
index 000000000..85db5d552
Binary files /dev/null and b/website/src/assets/jan-app-new.png differ
diff --git a/website/src/assets/jan-app.png b/website/src/assets/jan-app.png
new file mode 100644
index 000000000..d7fb24181
Binary files /dev/null and b/website/src/assets/jan-app.png differ
diff --git a/website/src/assets/jan-nano-bench.png b/website/src/assets/jan-nano-bench.png
new file mode 100644
index 000000000..ce923bc22
Binary files /dev/null and b/website/src/assets/jan-nano-bench.png differ
diff --git a/website/src/assets/jan-nano-demo.gif b/website/src/assets/jan-nano-demo.gif
new file mode 100644
index 000000000..a2b87619f
Binary files /dev/null and b/website/src/assets/jan-nano-demo.gif differ
diff --git a/website/src/assets/jan-nano0.png b/website/src/assets/jan-nano0.png
new file mode 100644
index 000000000..f2da8b5f7
Binary files /dev/null and b/website/src/assets/jan-nano0.png differ
diff --git a/website/src/assets/jan-nano1.png b/website/src/assets/jan-nano1.png
new file mode 100644
index 000000000..f36427373
Binary files /dev/null and b/website/src/assets/jan-nano1.png differ
diff --git a/website/src/assets/jan_ui.png b/website/src/assets/jan_ui.png
new file mode 100644
index 000000000..01d3568df
Binary files /dev/null and b/website/src/assets/jan_ui.png differ
diff --git a/website/src/assets/llama.cpp-01-updated.png b/website/src/assets/llama.cpp-01-updated.png
new file mode 100644
index 000000000..177c26145
Binary files /dev/null and b/website/src/assets/llama.cpp-01-updated.png differ
diff --git a/website/src/assets/llama.cpp-01.png b/website/src/assets/llama.cpp-01.png
new file mode 100644
index 000000000..95b4f550e
Binary files /dev/null and b/website/src/assets/llama.cpp-01.png differ
diff --git a/website/src/assets/ls.png b/website/src/assets/ls.png
new file mode 100644
index 000000000..80a272a12
Binary files /dev/null and b/website/src/assets/ls.png differ
diff --git a/website/src/assets/martian.png b/website/src/assets/martian.png
new file mode 100644
index 000000000..840fd083d
Binary files /dev/null and b/website/src/assets/martian.png differ
diff --git a/website/src/assets/mcp-on.png b/website/src/assets/mcp-on.png
new file mode 100644
index 000000000..a8cf4f0c1
Binary files /dev/null and b/website/src/assets/mcp-on.png differ
diff --git a/website/src/assets/mcp-server.png b/website/src/assets/mcp-server.png
new file mode 100644
index 000000000..a493b4b0a
Binary files /dev/null and b/website/src/assets/mcp-server.png differ
diff --git a/website/src/assets/mcp-setup-1.png b/website/src/assets/mcp-setup-1.png
new file mode 100644
index 000000000..619d03f91
Binary files /dev/null and b/website/src/assets/mcp-setup-1.png differ
diff --git a/website/src/assets/mcp-setup-10.png b/website/src/assets/mcp-setup-10.png
new file mode 100644
index 000000000..0a8a9dbae
Binary files /dev/null and b/website/src/assets/mcp-setup-10.png differ
diff --git a/website/src/assets/mcp-setup-2.png b/website/src/assets/mcp-setup-2.png
new file mode 100644
index 000000000..2c4f7b772
Binary files /dev/null and b/website/src/assets/mcp-setup-2.png differ
diff --git a/website/src/assets/mcp-setup-3.png b/website/src/assets/mcp-setup-3.png
new file mode 100644
index 000000000..a05432c69
Binary files /dev/null and b/website/src/assets/mcp-setup-3.png differ
diff --git a/website/src/assets/mcp-setup-4.png b/website/src/assets/mcp-setup-4.png
new file mode 100644
index 000000000..35653f82e
Binary files /dev/null and b/website/src/assets/mcp-setup-4.png differ
diff --git a/website/src/assets/mcp-setup-5.png b/website/src/assets/mcp-setup-5.png
new file mode 100644
index 000000000..f8787ab16
Binary files /dev/null and b/website/src/assets/mcp-setup-5.png differ
diff --git a/website/src/assets/mcp-setup-6.png b/website/src/assets/mcp-setup-6.png
new file mode 100644
index 000000000..a84c0273a
Binary files /dev/null and b/website/src/assets/mcp-setup-6.png differ
diff --git a/website/src/assets/mcp-setup-7.png b/website/src/assets/mcp-setup-7.png
new file mode 100644
index 000000000..e8c07d03a
Binary files /dev/null and b/website/src/assets/mcp-setup-7.png differ
diff --git a/website/src/assets/mcp-setup-8.png b/website/src/assets/mcp-setup-8.png
new file mode 100644
index 000000000..a69b7bd39
Binary files /dev/null and b/website/src/assets/mcp-setup-8.png differ
diff --git a/website/src/assets/mcp-setup-9.png b/website/src/assets/mcp-setup-9.png
new file mode 100644
index 000000000..a4ebb2fdb
Binary files /dev/null and b/website/src/assets/mcp-setup-9.png differ
diff --git a/website/src/assets/mistralai.png b/website/src/assets/mistralai.png
new file mode 100644
index 000000000..98fb13eb5
Binary files /dev/null and b/website/src/assets/mistralai.png differ
diff --git a/website/src/assets/model-capabilities-edit-01.png b/website/src/assets/model-capabilities-edit-01.png
new file mode 100644
index 000000000..082430708
Binary files /dev/null and b/website/src/assets/model-capabilities-edit-01.png differ
diff --git a/website/src/assets/model-capabilities-edit-02.png b/website/src/assets/model-capabilities-edit-02.png
new file mode 100644
index 000000000..6c4f84f72
Binary files /dev/null and b/website/src/assets/model-capabilities-edit-02.png differ
diff --git a/website/src/assets/model-import-04.png b/website/src/assets/model-import-04.png
new file mode 100644
index 000000000..5b35c816d
Binary files /dev/null and b/website/src/assets/model-import-04.png differ
diff --git a/website/src/assets/model-import-05.png b/website/src/assets/model-import-05.png
new file mode 100644
index 000000000..9d54e1559
Binary files /dev/null and b/website/src/assets/model-import-05.png differ
diff --git a/website/src/assets/model-management-01.png b/website/src/assets/model-management-01.png
new file mode 100644
index 000000000..00cec174c
Binary files /dev/null and b/website/src/assets/model-management-01.png differ
diff --git a/website/src/assets/model-management-02.png b/website/src/assets/model-management-02.png
new file mode 100644
index 000000000..94e933c6f
Binary files /dev/null and b/website/src/assets/model-management-02.png differ
diff --git a/website/src/assets/model-management-03.png b/website/src/assets/model-management-03.png
new file mode 100644
index 000000000..c31a04bd0
Binary files /dev/null and b/website/src/assets/model-management-03.png differ
diff --git a/website/src/assets/model-management-04.png b/website/src/assets/model-management-04.png
new file mode 100644
index 000000000..be20e984d
Binary files /dev/null and b/website/src/assets/model-management-04.png differ
diff --git a/website/src/assets/model-management-05.png b/website/src/assets/model-management-05.png
new file mode 100644
index 000000000..4c817aecc
Binary files /dev/null and b/website/src/assets/model-management-05.png differ
diff --git a/website/src/assets/model-management-06.png b/website/src/assets/model-management-06.png
new file mode 100644
index 000000000..e0e33cbcb
Binary files /dev/null and b/website/src/assets/model-management-06.png differ
diff --git a/website/src/assets/model-management-07.png b/website/src/assets/model-management-07.png
new file mode 100644
index 000000000..ca9880ac0
Binary files /dev/null and b/website/src/assets/model-management-07.png differ
diff --git a/website/src/assets/model-management-08.png b/website/src/assets/model-management-08.png
new file mode 100644
index 000000000..98c02a19d
Binary files /dev/null and b/website/src/assets/model-management-08.png differ
diff --git a/website/src/assets/model-management-09.png b/website/src/assets/model-management-09.png
new file mode 100644
index 000000000..990b53710
Binary files /dev/null and b/website/src/assets/model-management-09.png differ
diff --git a/website/src/assets/model-parameters.png b/website/src/assets/model-parameters.png
new file mode 100644
index 000000000..777b013d5
Binary files /dev/null and b/website/src/assets/model-parameters.png differ
diff --git a/website/src/assets/nvidia-nim.png b/website/src/assets/nvidia-nim.png
new file mode 100644
index 000000000..e748756f7
Binary files /dev/null and b/website/src/assets/nvidia-nim.png differ
diff --git a/website/src/assets/openai.png b/website/src/assets/openai.png
new file mode 100644
index 000000000..eb3160982
Binary files /dev/null and b/website/src/assets/openai.png differ
diff --git a/website/src/assets/openrouter.png b/website/src/assets/openrouter.png
new file mode 100644
index 000000000..3cb114c92
Binary files /dev/null and b/website/src/assets/openrouter.png differ
diff --git a/website/src/assets/quick-start-01.png b/website/src/assets/quick-start-01.png
new file mode 100644
index 000000000..03b101aa2
Binary files /dev/null and b/website/src/assets/quick-start-01.png differ
diff --git a/website/src/assets/quick-start-02.png b/website/src/assets/quick-start-02.png
new file mode 100644
index 000000000..977d8ebdb
Binary files /dev/null and b/website/src/assets/quick-start-02.png differ
diff --git a/website/src/assets/quick-start-03.png b/website/src/assets/quick-start-03.png
new file mode 100644
index 000000000..3938f0fd1
Binary files /dev/null and b/website/src/assets/quick-start-03.png differ
diff --git a/website/src/assets/retrieval-01.png b/website/src/assets/retrieval-01.png
new file mode 100644
index 000000000..1d120e745
Binary files /dev/null and b/website/src/assets/retrieval-01.png differ
diff --git a/website/src/assets/retrieval-02.png b/website/src/assets/retrieval-02.png
new file mode 100644
index 000000000..2ec4ba029
Binary files /dev/null and b/website/src/assets/retrieval-02.png differ
diff --git a/website/src/assets/serper-mcp.png b/website/src/assets/serper-mcp.png
new file mode 100644
index 000000000..20c9fad99
Binary files /dev/null and b/website/src/assets/serper-mcp.png differ
diff --git a/website/src/assets/settings-01.png b/website/src/assets/settings-01.png
new file mode 100644
index 000000000..e2e5aead5
Binary files /dev/null and b/website/src/assets/settings-01.png differ
diff --git a/website/src/assets/settings-02.png b/website/src/assets/settings-02.png
new file mode 100644
index 000000000..6c1699a1c
Binary files /dev/null and b/website/src/assets/settings-02.png differ
diff --git a/website/src/assets/settings-03.png b/website/src/assets/settings-03.png
new file mode 100644
index 000000000..4e32c390b
Binary files /dev/null and b/website/src/assets/settings-03.png differ
diff --git a/website/src/assets/settings-04.png b/website/src/assets/settings-04.png
new file mode 100644
index 000000000..f2dc4b2ec
Binary files /dev/null and b/website/src/assets/settings-04.png differ
diff --git a/website/src/assets/settings-05.png b/website/src/assets/settings-05.png
new file mode 100644
index 000000000..489d6cd50
Binary files /dev/null and b/website/src/assets/settings-05.png differ
diff --git a/website/src/assets/settings-06.png b/website/src/assets/settings-06.png
new file mode 100644
index 000000000..c5f5ca511
Binary files /dev/null and b/website/src/assets/settings-06.png differ
diff --git a/website/src/assets/settings-07.png b/website/src/assets/settings-07.png
new file mode 100644
index 000000000..64bbebf07
Binary files /dev/null and b/website/src/assets/settings-07.png differ
diff --git a/website/src/assets/settings-08.png b/website/src/assets/settings-08.png
new file mode 100644
index 000000000..d4f10cd72
Binary files /dev/null and b/website/src/assets/settings-08.png differ
diff --git a/website/src/assets/settings-09.png b/website/src/assets/settings-09.png
new file mode 100644
index 000000000..8c61e99d4
Binary files /dev/null and b/website/src/assets/settings-09.png differ
diff --git a/website/src/assets/settings-10.png b/website/src/assets/settings-10.png
new file mode 100644
index 000000000..30c4dd4d4
Binary files /dev/null and b/website/src/assets/settings-10.png differ
diff --git a/website/src/assets/settings-11.png b/website/src/assets/settings-11.png
new file mode 100644
index 000000000..2fb3fe441
Binary files /dev/null and b/website/src/assets/settings-11.png differ
diff --git a/website/src/assets/settings-12.png b/website/src/assets/settings-12.png
new file mode 100644
index 000000000..e05cea115
Binary files /dev/null and b/website/src/assets/settings-12.png differ
diff --git a/website/src/assets/settings-13.png b/website/src/assets/settings-13.png
new file mode 100644
index 000000000..2454b4085
Binary files /dev/null and b/website/src/assets/settings-13.png differ
diff --git a/website/src/assets/settings-14.png b/website/src/assets/settings-14.png
new file mode 100644
index 000000000..520fd787b
Binary files /dev/null and b/website/src/assets/settings-14.png differ
diff --git a/website/src/assets/settings-15.png b/website/src/assets/settings-15.png
new file mode 100644
index 000000000..ccc52f497
Binary files /dev/null and b/website/src/assets/settings-15.png differ
diff --git a/website/src/assets/settings-16.png b/website/src/assets/settings-16.png
new file mode 100644
index 000000000..72a61ca31
Binary files /dev/null and b/website/src/assets/settings-16.png differ
diff --git a/website/src/assets/settings-17.png b/website/src/assets/settings-17.png
new file mode 100644
index 000000000..cd5a3de9e
Binary files /dev/null and b/website/src/assets/settings-17.png differ
diff --git a/website/src/assets/settings-18.png b/website/src/assets/settings-18.png
new file mode 100644
index 000000000..accaaa173
Binary files /dev/null and b/website/src/assets/settings-18.png differ
diff --git a/website/src/assets/settings-19.png b/website/src/assets/settings-19.png
new file mode 100644
index 000000000..634c6f1da
Binary files /dev/null and b/website/src/assets/settings-19.png differ
diff --git a/website/src/assets/sys_monitor.png b/website/src/assets/sys_monitor.png
new file mode 100644
index 000000000..85a8676a5
Binary files /dev/null and b/website/src/assets/sys_monitor.png differ
diff --git a/website/src/assets/tensorrt-llm-01.png b/website/src/assets/tensorrt-llm-01.png
new file mode 100644
index 000000000..2f839f7a5
Binary files /dev/null and b/website/src/assets/tensorrt-llm-01.png differ
diff --git a/website/src/assets/tensorrt-llm-02.png b/website/src/assets/tensorrt-llm-02.png
new file mode 100644
index 000000000..de9841874
Binary files /dev/null and b/website/src/assets/tensorrt-llm-02.png differ
diff --git a/website/src/assets/threads-context-menu-updated.png b/website/src/assets/threads-context-menu-updated.png
new file mode 100644
index 000000000..e90baf2ab
Binary files /dev/null and b/website/src/assets/threads-context-menu-updated.png differ
diff --git a/website/src/assets/threads-context-menu.png b/website/src/assets/threads-context-menu.png
new file mode 100644
index 000000000..8ef05a2cb
Binary files /dev/null and b/website/src/assets/threads-context-menu.png differ
diff --git a/website/src/assets/threads-favorites-and-recents-updated.png b/website/src/assets/threads-favorites-and-recents-updated.png
new file mode 100644
index 000000000..5d62974e4
Binary files /dev/null and b/website/src/assets/threads-favorites-and-recents-updated.png differ
diff --git a/website/src/assets/threads-favorites-and-recents.png b/website/src/assets/threads-favorites-and-recents.png
new file mode 100644
index 000000000..116fdf022
Binary files /dev/null and b/website/src/assets/threads-favorites-and-recents.png differ
diff --git a/website/src/assets/threads-new-chat-updated.png b/website/src/assets/threads-new-chat-updated.png
new file mode 100644
index 000000000..6780e7df3
Binary files /dev/null and b/website/src/assets/threads-new-chat-updated.png differ
diff --git a/website/src/assets/threads-new-chat.png b/website/src/assets/threads-new-chat.png
new file mode 100644
index 000000000..4d991e959
Binary files /dev/null and b/website/src/assets/threads-new-chat.png differ
diff --git a/website/src/assets/together.png b/website/src/assets/together.png
new file mode 100644
index 000000000..bc2fd9d49
Binary files /dev/null and b/website/src/assets/together.png differ
diff --git a/website/src/assets/trouble-shooting-01.png b/website/src/assets/trouble-shooting-01.png
new file mode 100644
index 000000000..22d1a6d68
Binary files /dev/null and b/website/src/assets/trouble-shooting-01.png differ
diff --git a/website/src/assets/trouble-shooting-02.png b/website/src/assets/trouble-shooting-02.png
new file mode 100644
index 000000000..8c61e99d4
Binary files /dev/null and b/website/src/assets/trouble-shooting-02.png differ
diff --git a/website/src/assets/trouble-shooting-03.png b/website/src/assets/trouble-shooting-03.png
new file mode 100644
index 000000000..d07ed56d7
Binary files /dev/null and b/website/src/assets/trouble-shooting-03.png differ
diff --git a/website/src/assets/trouble-shooting-04.png b/website/src/assets/trouble-shooting-04.png
new file mode 100644
index 000000000..f7166e180
Binary files /dev/null and b/website/src/assets/trouble-shooting-04.png differ
diff --git a/website/src/content.config.ts b/website/src/content.config.ts
new file mode 100644
index 000000000..d9ee8c9d1
--- /dev/null
+++ b/website/src/content.config.ts
@@ -0,0 +1,7 @@
+import { defineCollection } from 'astro:content';
+import { docsLoader } from '@astrojs/starlight/loaders';
+import { docsSchema } from '@astrojs/starlight/schema';
+
+export const collections = {
+ docs: defineCollection({ loader: docsLoader(), schema: docsSchema() }),
+};
diff --git a/website/src/content/docs/index.mdx b/website/src/content/docs/index.mdx
new file mode 100644
index 000000000..85d4f3a7e
--- /dev/null
+++ b/website/src/content/docs/index.mdx
@@ -0,0 +1,171 @@
+---
+title: Jan
+description: Jan is an open-source ChatGPT-alternative and self-hosted AI platform - build and run AI on your own desktop or server.
+keywords:
+ [
+ Jan,
+ Jan AI,
+ ChatGPT alternative,
+ OpenAI platform alternative,
+ local API,
+ local AI,
+ private AI,
+ conversational AI,
+ no-subscription fee,
+ large language model,
+ LLM,
+ ]
+---
+
+import { Aside } from '@astrojs/starlight/components';
+
+
+
+
+Jan is a ChatGPT alternative that runs 100% offline on your desktop and (*soon*) on mobile. Our goal is to
+make it easy for anyone, with or without coding skills, to download and use AI models with full control and
+[privacy](https://www.reuters.com/legal/legalindustry/privacy-paradox-with-ai-2023-10-31/).
+
+Jan is powered by [Llama.cpp](https://github.com/ggerganov/llama.cpp), a local AI engine that provides an OpenAI-compatible
+API that can run in the background by default at `https://localhost:1337` (or your custom port). This enables you to power all sorts of
+applications with AI capabilities from your laptop/PC. For example, you can connect local tools like [Continue](https://jan.ai/docs/server-examples/continue-dev)
+and [Cline](https://cline.bot/) to Jan and power them using your favorite models.
+
+Jan doesn't limit you to locally hosted models, meaning, you can create an API key from your favorite model provider,
+add it to Jan via the configuration's page and start talking to your favorite models.
+
+### Features
+
+- Download popular open-source LLMs (Llama3, Gemma3, Qwen3, and more) from the HuggingFace [Model Hub](./docs/manage-models.mdx)
+or import any GGUF files (the model format used by llama.cpp) available locally
+- Connect to [cloud services](/docs/remote-models/openai) (OpenAI, Anthropic, Mistral, Groq, etc.)
+- [Chat](./docs/threads.mdx) with AI models & [customize their parameters](/docs/model-parameters.mdx) via our
+intuitive interface
+- Use our [local API server](https://jan.ai/api-reference) with an OpenAI-equivalent API to power other apps.
+
+### Philosophy
+
+Jan is built to be [user-owned](about#-user-owned), this means that Jan is:
+- Truly open source via the [Apache 2.0 license](https://github.com/menloresearch/jan/blob/dev/LICENSE)
+- [Data is stored locally, following one of the many local-first principles](https://www.inkandswitch.com/local-first)
+- Internet is optional, Jan can run 100% offline
+- Free choice of AI models, both local and cloud-based
+- We do not collect or sell user data. See our [Privacy Policy](./privacy).
+
+
+
+### Inspirations
+
+Jan is inspired by the concepts of [Calm Computing](https://en.wikipedia.org/wiki/Calm_technology), and the Disappearing Computer.
+
+## Acknowledgements
+
+Jan is built on the shoulders of many open-source projects like:
+
+- [Llama.cpp](https://github.com/ggerganov/llama.cpp/blob/master/LICENSE)
+- [Scalar](https://github.com/scalar/scalar)
+
+## FAQs
+
+
+What is Jan?
+
+Jan is a customizable AI assistant that can run offline on your computer - a privacy-focused alternative to tools like ChatGPT, Anthropic's Claude, and Google Gemini, with optional cloud AI support.
+
+
+
+
+How do I get started with Jan?
+
+Download Jan on your computer, download a model or add API key for a cloud-based one, and start chatting. For detailed setup instructions, see our [Quick Start](/docs/quickstart) guide.
+
+
+
+
+Is Jan compatible with my system?
+
+Jan supports all major operating systems:
+- [Mac](/docs/desktop/mac#compatibility)
+- [Windows](/docs/desktop/windows#compatibility)
+- [Linux](/docs/desktop/linux)
+
+Hardware compatibility includes:
+- NVIDIA GPUs (CUDA)
+- AMD GPUs (Vulkan)
+- Intel Arc GPUs (Vulkan)
+- Any GPU with Vulkan support
+
+
+
+
+How does Jan protect my privacy?
+
+Jan prioritizes privacy by:
+- Running 100% offline with locally-stored data
+- Using open-source models that keep your conversations private
+- Storing all files and chat history on your device in the [Jan Data Folder](/docs/data-folder)
+- Never collecting or selling your data
+
+
+
+You can optionally share anonymous usage statistics to help improve Jan, but your conversations are never shared. See our complete [Privacy Policy](./docs/privacy).
+
+
+
+
+What models can I use with Jan?
+
+- Download optimized models from the [Jan Hub](/docs/manage-models)
+- Import GGUF models from Hugging Face or your local files
+- Connect to cloud providers like OpenAI, Anthropic, Mistral and Groq (requires your own API keys)
+
+
+
+
+Is Jan really free? What's the catch?
+
+Jan is completely free and open-source with no subscription fees for local models and features. When using cloud-based models (like GPT-4o or Claude Sonnet 3.7), you'll only pay the standard rates to those providers—we add no markup.
+
+
+
+
+Can I use Jan offline?
+
+Yes! Once you've downloaded a local model, Jan works completely offline with no internet connection needed.
+
+
+
+
+How can I contribute or get community help?
+
+- Join our [Discord community](https://discord.gg/qSwXFx6Krr) to connect with other users
+- Contribute through [GitHub](https://github.com/menloresearch/jan) (no permission needed!)
+- Get troubleshooting help in our [Discord](https://discord.com/invite/FTk2MvZwJH) channel [#🆘|jan-help](https://discord.com/channels/1107178041848909847/1192090449725358130)
+- Check our [Troubleshooting](./docs/troubleshooting) guide for common issues
+
+
+
+
+Can I self-host Jan?
+
+Yes! We fully support the self-hosted movement. Either download Jan directly or fork it on [GitHub repository](https://github.com/menloresearch/jan) and build it from source.
+
+
+
+
+What does Jan stand for?
+
+Jan stands for "Just a Name". We are, admittedly, bad at marketing 😂.
+
+
+
+
+Are you hiring?
+
+Yes! We love hiring from our community. Check out our open positions at [Careers](https://menlo.bamboohr.com/careers).
+
+
diff --git a/website/src/content/docs/jan/assistants.mdx b/website/src/content/docs/jan/assistants.mdx
new file mode 100644
index 000000000..9e0bada5b
--- /dev/null
+++ b/website/src/content/docs/jan/assistants.mdx
@@ -0,0 +1,100 @@
+---
+title: Assistants
+description: A step-by-step guide on customizing and managing your assistants.
+keywords:
+ [
+ Jan,
+ Customizable Intelligence, LLM,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ manage assistants,
+ assistants,
+ ]
+---
+
+
+# Assistants
+
+Jan allows you to give models specific sets of instructions without having to repeat yourself. We called these
+models with your instructions, Assistants. Each of these assistants can also have their own set of configuration
+which can help guide how the AI model should behave and respond to your inputs. You can add, edit, or delete
+assistants, and customize their instructions and settings from the Assistants tab.
+
+
+
+To find the Assistants tab:
+
+1. Open Jan and look at the left sidebar.
+2. Click on the **Assistants** tab (see highlighted section in the screenshot above).
+3. The main panel will display all your current assistants.
+
+## Managing Assistants
+
+- **Add a New Assistant**: Click the `+` button in the Assistants panel to create a new assistant with your instructions.
+- **Edit an Assistant**: Click the pencil (✏️) icon on any assistant card to update its name, description, or instructions.
+- **Delete an Assistant**: Click the trash (🗑️) icon to remove an assistant you no longer need.
+
+## Customizing Assistant Instructions
+
+Each assistant can have its own set of instructions to guide its behavior. For example:
+
+```
+Act as a software engineering mentor focused on Python and JavaScript.
+Provide detailed explanations with code examples when relevant.
+Use markdown formatting for code blocks.
+```
+
+Or:
+
+```
+Respond in a casual, friendly tone. Keep explanations brief and use simple language.
+Provide examples when explaining complex topics.
+```
+
+Or:
+
+```
+Respond in a casual, friendly tone. Keep explanations brief and use simple language.
+Provide examples when explaining complex topics.
+```
+
+## Best Practices
+- Be clear and specific about the desired behavior for each assistant.
+- Include preferences for formatting, tone, or style.
+- Include examples to increase the model's compliance with your request.
+- Use different assistants for different tasks (e.g., translation, travel planning, financial advice).
+
+
+## Switching and Managing Assistants in Chat
+
+You can quickly switch between assistants, or create and edit them, directly from the Chat screen using the
+assistant dropdown menu at the top:
+
+
+
+- Click the assistant's name (e.g., "Travel Planner") at the top of the Chat screen to open the dropdown menu.
+- The dropdown lists all of your assistants. Click on any of the assistants available to switch to it for the
+current chat session.
+- To create a new assistant, select **Create Assistant** at the bottom of the dropdown. This opens the Add Assistant dialog:
+
+
+
+- To edit an existing assistant, click the gear (⚙️) icon next to its name in the dropdown. This opens the Edit Assistant dialog:
+
+
+
+### Add/Edit Assistant Dialogs
+- Set an (optional) emoji and name for your assistant.
+- Optionally add a description.
+- Enter detailed instructions to guide the assistant's behavior.
+- Adjust the predefined parameters (like Temperature, Top P, etc.) or add custom parameters as needed.
+- Click **Save** to apply your changes.
+
+This workflow allows you to seamlessly manage and switch between assistants while chatting, making it easy to tailor
+Jan to your needs in real time.
diff --git a/website/src/content/docs/jan/data-folder.mdx b/website/src/content/docs/jan/data-folder.mdx
new file mode 100644
index 000000000..629b2e2b8
--- /dev/null
+++ b/website/src/content/docs/jan/data-folder.mdx
@@ -0,0 +1,217 @@
+---
+title: Jan Data Folder
+description: A guide to Jan's data structure.
+sidebar_position: 2
+keywords:
+ [
+ Jan,
+ Customizable Intelligence, LLM,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ quickstart,
+ getting started,
+ using AI model,
+ ]
+---
+
+
+
+# Jan Data Folder
+Jan stores your data locally in JSON format. Your data is yours alone.
+
+## Open Jan Data Folder
+
+Via Jan:
+1. Settings () > Advanced Settings
+2. Click
+
+
+
+
+
+Via Terminal:
+
+```bash
+# Windows
+cd %APPDATA%/Jan/data
+
+# Mac
+cd ~/Library/Application\ Support/Jan/data
+
+# Linux
+cd $XDG_CONFIG_HOME/Jan/data # Custom install
+cd ~/.config/Jan/data # Default install
+```
+
+## Directory Structure
+
+
+
+```sh
+/assistants/
+ /jan/
+ assistant.json
+/engines/
+ /llama.cpp/
+/extensions/
+ extensions.json
+/@janhq/
+ /assistant-extension/
+ /conversational-extension/
+ /download-extension/
+ /engine-management-extension/
+ /hardware-management-extension/
+ /inference-cortex-extension/
+ /model-extension/
+/files/
+/logs/
+ app.log
+/models/
+ /huggingface.co/
+ /Model_Provider_A/
+ /Model_A
+ model_A.gguf
+ model_A.yaml
+/threads/
+ /thread_A/
+ messages.jsonl
+ thread.json
+
+```
+
+### `assistants/`
+Where AI personalities live. The default one (`/assistants/jan/`):
+
+```json
+{
+ "avatar": "👋",
+ "id": "jan",
+ "object": "assistant",
+ "created_at": 1750945742.536,
+ "name": "Jan",
+ "description": "Jan is a helpful AI assistant that can use tools and help complete tasks for its users.",
+ "model": "*",
+ "instructions": "You have access to a set of tools to help you answer the user’s question. You can use only one tool per message, and you’ll receive the result of that tool in the user’s next response. To complete a task, use tools step by step—each step should be guided by the outcome of the previous one.\nTool Usage Rules:\n1. Always provide the correct values as arguments when using tools. Do not pass variable names—use actual values instead.\n2. You may perform multiple tool steps to complete a task.\n3. Avoid repeating a tool call with exactly the same parameters to prevent infinite loops.",
+ "tools": [
+ {
+ "type": "retrieval",
+ "enabled": false,
+ "useTimeWeightedRetriever": false,
+ "settings": {
+ "top_k": 2,
+ "chunk_size": 1024,
+ "chunk_overlap": 64,
+ "retrieval_template": "Use the following pieces of context to answer the question at the end.\n----------------\nCONTEXT: {CONTEXT}\n----------------\nQUESTION: {QUESTION}\n----------------\nHelpful Answer:"
+ }
+ }
+ ],
+ "file_ids": []
+}
+```
+
+Parameters:
+
+| Parameter | Description | Type | Default |
+|------------------------|--------------------------------------------------------------|---------|---------|
+| id | Assistant identifier | string | jan |
+| avatar | Assistant image | string | None |
+| object | OpenAI API compatibility marker | string | None |
+| created_at | Creation timestamp | string | None |
+| name | Display name | string | Jan |
+| description | Role description | string | Default |
+| model | Allowed models (* = all) | string | * |
+| instructions | Default thread instructions | string | None |
+| file_ids | OpenAI compatibility field | string | None |
+| tools | Available tools (retrieval only currently) | array | retrieval|
+| type | Tool type | string | retrieval|
+| enabled | Tool status | boolean | true |
+| useTimeWeightedRetriever| Time-weighted retrieval toggle | boolean | false |
+| settings | Tool configuration | object | None |
+| top_k | Max retrieval results | number | 2 |
+| chunk_size | Text chunk size | number | 1024 |
+| chunk_overlap | Chunk overlap amount | number | 64 |
+| retrieval_template | Response format template | string | None |
+
+### `extensions/`
+Add-on central. Organization extensions live in `@janhq/`, solo ones in root.
+
+### `logs/`
+Debugging headquarters (`/logs/app.txt`):
+- **[APP]**: Core logs
+- **[SERVER]**: API drama
+- **[SPECS]**: Hardware confessions
+
+### `models/`
+The silicon brain collection. Each model has its own `model.json`.
+
+
+
+### `threads/`
+Chat archive. Each thread (`/threads/jan_unixstamp/`) contains:
+
+- `messages.jsonl`:
+```json
+ {
+ "completed_at": 0,
+ "content": [
+ {
+ "text": {
+ "annotations": [],
+ "value": "Hello! I can help you with various tasks. I can search for information on the internet, including news, videos, images, shopping, and more. I can also scrape webpages to extract specific information. Let me know what you need!"
+ },
+ "type": "text"
+ }
+ ],
+ "created_at": 1751012639307,
+ "id": "01JYR7S0JB5ZBGMJV52KWMW5VW",
+ "metadata": {
+ "assistant": {
+ "avatar": "👋",
+ "id": "jan",
+ "instructions": "You have access to a set of tools to help you answer the user's question. You can use only one tool per message, and you'll receive the result of that tool in the user's next response. To complete a task, use tools step by step—each step should be guided by the outcome of the previous one.\nTool Usage Rules:\n1. Always provide the correct values as arguments when using tools. Do not pass variable names—use actual values instead.\n2. You may perform multiple tool steps to complete a task.\n3. Avoid repeating a tool call with exactly the same parameters to prevent infinite loops.",
+ "name": "Jan",
+ "parameters": ""
+ },
+ "tokenSpeed": {
+ "lastTimestamp": 1751012637097,
+ "message": "01JYR7S0GW5M9PSHMRE7T8VQJM",
+ "tokenCount": 49,
+ "tokenSpeed": 22.653721682847895
+ }
+ },
+ "object": "thread.message",
+ "role": "assistant",
+ "status": "ready",
+ "thread_id": "8f2c9922-db49-4d1e-8620-279c05baf2d0",
+ "type": "text"
+ }
+```
+
+- `thread.json`:
+
+| Parameter | Description |
+|------------|------------------------------------------------|
+| assistants | Assistant configuration clone |
+| created | Creation timestamp |
+| id | Thread identifier |
+| metadata | Additional thread data |
+| model | Active model settings |
+| object | OpenAI compatibility marker |
+| title | Thread name |
+| updated | Updated timestamp |
+
+
+
+
+## Delete Jan Data
+Uninstall guides: [Mac](/docs/desktop/mac#step-2-clean-up-data-optional),
+[Windows](/docs/desktop/windows#step-2-handle-jan-data), or [Linux](docs/desktop/linux#uninstall-jan).
diff --git a/website/src/content/docs/jan/explanation/llama-cpp.mdx b/website/src/content/docs/jan/explanation/llama-cpp.mdx
new file mode 100644
index 000000000..7f4a49c51
--- /dev/null
+++ b/website/src/content/docs/jan/explanation/llama-cpp.mdx
@@ -0,0 +1,204 @@
+---
+title: llama.cpp Engine
+description: Understand and configure Jan's local AI engine for running models on your hardware.
+keywords:
+ [
+ Jan,
+ Customizable Intelligence, LLM,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ Llama CPP integration,
+ llama.cpp Engine,
+ Intel CPU,
+ AMD CPU,
+ NVIDIA GPU,
+ AMD GPU Radeon,
+ Apple Silicon,
+ Intel Arc GPU,
+ ]
+---
+
+
+# Local AI Engine (llama.cpp)
+
+## What is llama.cpp?
+
+llama.cpp is the engine that runs AI models locally on your computer. Think of it as the software that takes an AI model file and makes it actually work on your hardware - whether that's your CPU, graphics card, or Apple's M-series chips.
+
+Originally created by Georgi Gerganov, llama.cpp is designed to run large language models efficiently on consumer hardware without requiring specialized AI accelerators or cloud connections.
+
+## Why This Matters
+
+**Privacy**: Your conversations never leave your computer
+**Cost**: No monthly subscription fees or API costs
+**Speed**: No internet required once models are downloaded
+**Control**: Choose exactly which models to run and how they behave
+
+## Accessing Engine Settings
+
+Find llama.cpp settings at **Settings** () > **Local Engine** > **llama.cpp**:
+
+
+
+
+
+## Engine Management
+
+| Feature | What It Does | When You Need It |
+|---------|-------------|------------------|
+| **Engine Version** | Shows which version of llama.cpp you're running | Check compatibility with newer models |
+| **Check Updates** | Downloads newer engine versions | When new models require updated engine |
+| **Backend Selection** | Choose the version optimized for your hardware | After installing new graphics cards or when performance is poor |
+
+## Hardware Backends
+
+Jan offers different backend versions optimized for your specific hardware. Think of these as different "drivers" - each one is tuned for particular processors or graphics cards.
+
+
+
+
+
+
+
+### NVIDIA Graphics Cards (Recommended for Speed)
+Choose based on your CUDA version (check NVIDIA Control Panel):
+
+**For CUDA 12.0:**
+- `llama.cpp-avx2-cuda-12-0` (most common)
+- `llama.cpp-avx512-cuda-12-0` (newer Intel/AMD CPUs)
+
+**For CUDA 11.7:**
+- `llama.cpp-avx2-cuda-11-7` (most common)
+- `llama.cpp-avx512-cuda-11-7` (newer Intel/AMD CPUs)
+
+### CPU Only (No Graphics Card Acceleration)
+- `llama.cpp-avx2` (most modern CPUs)
+- `llama.cpp-avx512` (newer Intel/AMD CPUs)
+- `llama.cpp-avx` (older CPUs)
+- `llama.cpp-noavx` (very old CPUs)
+
+### Other Graphics Cards
+- `llama.cpp-vulkan` (AMD, Intel Arc, some others)
+
+
+
+
+
+
+
+### NVIDIA Graphics Cards
+Same CUDA options as Windows:
+- `llama.cpp-avx2-cuda-12-0` (most common)
+- `llama.cpp-avx2-cuda-11-7` (older drivers)
+
+### CPU Only
+- `llama.cpp-avx2` (most modern CPUs)
+- `llama.cpp-avx512` (newer Intel/AMD CPUs)
+- `llama.cpp-arm64` (ARM processors like Raspberry Pi)
+
+### Other Graphics Cards
+- `llama.cpp-vulkan` (AMD, Intel graphics)
+
+
+
+
+
+### Apple Silicon (M1/M2/M3/M4)
+- `llama.cpp-mac-arm64` (recommended)
+
+### Intel Macs
+- `llama.cpp-mac-amd64`
+
+
+
+
+
+
+
+## Performance Settings
+
+These control how efficiently models run:
+
+| Setting | What It Does | Recommended Value | Impact |
+|---------|-------------|------------------|---------|
+| **Continuous Batching** | Process multiple requests at once | Enabled | Faster when using multiple tools or having multiple conversations |
+| **Parallel Operations** | How many requests to handle simultaneously | 4 | Higher = more multitasking, but uses more memory |
+| **CPU Threads** | How many processor cores to use | Auto-detected | More threads can speed up CPU processing |
+
+## Memory Settings
+
+These control how models use your computer's memory:
+
+| Setting | What It Does | Recommended Value | When to Change |
+|---------|-------------|------------------|----------------|
+| **Flash Attention** | More efficient memory usage | Enabled | Leave enabled unless you have problems |
+| **Caching** | Remember recent conversations | Enabled | Speeds up follow-up questions |
+| **KV Cache Type** | Memory precision trade-off | f16 | Change to q8_0 or q4_0 if running out of memory |
+| **mmap** | Load models more efficiently | Enabled | Helps with large models |
+| **Context Shift** | Handle very long conversations | Disabled | Enable for very long chats or multiple tool calls |
+
+### KV Cache Types Explained
+- **f16**: Most stable, uses more memory
+- **q8_0**: Balanced memory usage and quality
+- **q4_0**: Uses least memory, slight quality loss
+
+## Troubleshooting Common Issues
+
+**Models won't load:**
+- Try a different backend (switch from CUDA to CPU or vice versa)
+- Check if you have enough RAM/VRAM
+- Update to latest engine version
+
+**Very slow performance:**
+- Make sure you're using GPU acceleration (CUDA/Metal/Vulkan backend)
+- Increase GPU Layers in model settings
+- Close other memory-intensive programs
+
+**Out of memory errors:**
+- Reduce Context Size in model settings
+- Switch KV Cache Type to q8_0 or q4_0
+- Try a smaller model variant
+
+**Random crashes:**
+- Switch to a more stable backend (try avx instead of avx2)
+- Disable overclocking if you have it enabled
+- Update graphics drivers
+
+## Quick Setup Guide
+
+**For most users:**
+1. Use the default backend that Jan installs
+2. Leave all performance settings at defaults
+3. Only adjust if you experience problems
+
+**If you have an NVIDIA graphics card:**
+1. Download the appropriate CUDA backend
+2. Make sure GPU Layers is set high in model settings
+3. Enable Flash Attention
+
+**If models are too slow:**
+1. Check you're using GPU acceleration
+2. Try enabling Continuous Batching
+3. Close other applications using memory
+
+**If running out of memory:**
+1. Change KV Cache Type to q8_0
+2. Reduce Context Size in model settings
+3. Try a smaller model
+
+
diff --git a/website/src/content/docs/jan/explanation/model-parameters.mdx b/website/src/content/docs/jan/explanation/model-parameters.mdx
new file mode 100644
index 000000000..b03b9222b
--- /dev/null
+++ b/website/src/content/docs/jan/explanation/model-parameters.mdx
@@ -0,0 +1,109 @@
+---
+title: Model Parameters
+description: Customize how your AI models behave and perform.
+keywords:
+ [
+ Jan,
+ Customizable Intelligence, LLM,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ model settings,
+ parameters,
+ ]
+---
+
+# Model Parameters
+
+Model parameters control how your AI thinks and responds. Think of them as the AI's personality settings and performance controls.
+
+## How to Access Settings
+
+**For individual conversations:**
+- In **Threads**, click the **gear icon** next to your selected model
+
+**For permanent model settings:**
+- Go to **Settings > Model Providers > Llama.cpp**, click the **gear icon** next to a model
+
+**For model capabilities:**
+- Click the **edit button** next to a model to enable features like vision or tools
+
+## Performance Settings (Gear Icon)
+
+These settings control how the model thinks and performs:
+
+| Setting | What It Does | Simple Explanation |
+|---------|-------------|-------------------|
+| **Context Size** | How much text the model remembers | Like the model's working memory. Larger = remembers more of your conversation, but uses more computer memory. |
+| **GPU Layers** | How much work your graphics card does | More layers on GPU = faster responses, but needs more graphics memory. Start high and reduce if you get errors. |
+| **Temperature** | How creative vs. predictable responses are | Low (0.1-0.3) = focused, consistent answers. High (0.7-1.0) = creative, varied responses. Try 0.7 for general use. |
+| **Top K** | How many word choices the model considers | Smaller numbers (20-40) = more focused. Larger numbers (80-100) = more variety. Most people don't need to change this. |
+| **Top P** | Another way to control word variety | Works with Top K. Values like 0.9 work well. Lower = more focused, higher = more creative. |
+| **Min P** | Minimum chance a word needs to be chosen | Prevents very unlikely words. Usually fine at default settings. |
+| **Repeat Last N** | How far back to check for repetition | Helps prevent the model from repeating itself. Default values usually work well. |
+| **Repeat Penalty** | How much to avoid repeating words | Higher values (1.1-1.3) reduce repetition. Too high makes responses awkward. |
+| **Presence Penalty** | Encourages talking about new topics | Higher values make the model explore new subjects instead of staying on one topic. |
+| **Frequency Penalty** | Reduces word repetition | Similar to repeat penalty but focuses on how often words are used. |
+
+
+
+## Model Capabilities (Edit Button)
+
+These toggle switches enable special features:
+
+- **Vision**: Let the model see and analyze images you share
+- **Tools**: Enable advanced features like web search, file operations, and code execution
+- **Embeddings**: Allow the model to create numerical representations of text (for advanced users)
+- **Web Search**: Let the model search the internet for current information
+- **Reasoning**: Enable step-by-step thinking for complex problems
+
+
+
+
+## Hardware Settings
+
+These control how efficiently the model runs on your computer:
+
+### GPU Layers
+Think of your model as a stack of layers, like a cake. Each layer can run on either your main processor (CPU) or graphics card (GPU). Your graphics card is usually much faster.
+
+- **More GPU layers** = Faster responses, but uses more graphics memory
+- **Fewer GPU layers** = Slower responses, but uses less graphics memory
+
+Start with the maximum number and reduce if you get out-of-memory errors.
+
+### Context Length
+This is like the model's short-term memory - how much of your conversation it can remember at once.
+
+- **Longer context** = Remembers more of your conversation, better for long discussions
+- **Shorter context** = Uses less memory, runs faster, but might "forget" earlier parts of long conversations
+
+
+
+## Quick Setup Guide
+
+**For most users:**
+1. Enable **Tools** if you want web search and code execution
+2. Set **Temperature** to 0.7 for balanced creativity
+3. Max out **GPU Layers** (reduce only if you get memory errors)
+4. Leave other settings at defaults
+
+**For creative writing:**
+- Increase **Temperature** to 0.8-1.0
+- Increase **Top P** to 0.95
+
+**For factual/technical work:**
+- Decrease **Temperature** to 0.1-0.3
+- Enable **Tools** for web search and calculations
+
+**Troubleshooting:**
+- **Responses too repetitive?** Increase Temperature or Repeat Penalty
+- **Out of memory errors?** Reduce GPU Layers or Context Size
+- **Responses too random?** Decrease Temperature
+- **Model running slowly?** Increase GPU Layers (if you have VRAM) or reduce Context Size
diff --git a/website/src/content/docs/jan/installation/linux.mdx b/website/src/content/docs/jan/installation/linux.mdx
new file mode 100644
index 000000000..3bb55fae3
--- /dev/null
+++ b/website/src/content/docs/jan/installation/linux.mdx
@@ -0,0 +1,295 @@
+---
+title: Linux
+description: Get started quickly with Jan, an AI chat application that runs 100% offline on your desktop & mobile (*coming soon*).
+keywords:
+ [
+ Jan,
+ Customizable Intelligence, LLM,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ quickstart,
+ getting started,
+ using AI model,
+ installation,
+ "desktop"
+ ]
+---
+
+
+
+
+
+# Linux Installation
+Instructions for installing Jan on Linux.
+
+## Compatibility
+System requirements:
+
+
+
+
+
+#### Debian-based (Supports `.deb` and `AppImage`)
+
+- Debian
+- Ubuntu and derivatives:
+ - Ubuntu Desktop LTS (official)/Ubuntu Server LTS (only for server)
+ - Edubuntu
+ - Kubuntu
+ - Lubuntu
+ - Ubuntu Budgie
+ - Ubuntu Cinnamon
+ - Ubuntu Kylin
+ - Ubuntu MATE
+ - Linux Mint
+ - Pop!_OS
+
+#### RHEL-based (Supports `.rpm` and `AppImage`)
+
+- RHEL-based (Server only)
+- Fedora
+
+#### Arch-based
+
+- Arch Linux
+- SteamOS
+
+#### Independent
+
+- openSUSE
+
+
+
+
+- Haswell processors (Q2 2013) and newer
+- Tiger Lake (Q3 2020) and newer for Celeron and Pentium processors
+- Excavator processors (Q2 2015) and newer
+
+
+
+
+
+- 8GB → up to 3B parameter models (int4)
+- 16GB → up to 7B parameter models (int4)
+- 32GB → up to 13B parameter models (int4)
+
+
+
+
+
+- 6GB → up to 3B parameter models (int4)
+- 8GB → up to 7B parameter models (int4)
+- 12GB → up to 13B parameter models (int4)
+
+
+
+
+
+Minimum 10GB of free disk space required.
+
+
+
+
+## Install Jan
+
+Installation steps:
+
+
+
+### Step 1: Download Application
+
+Available releases:
+
+
+Stable release:
+ - Ubuntu: [jan.deb](https://app.jan.ai/download/latest/linux-amd64-deb)
+ - Others: [Jan.AppImage](https://app.jan.ai/download/latest/linux-amd64-appimage)
+- Official Website: https://jan.ai/download
+
+
+
+
+Beta release:
+ - Ubuntu: [jan.deb](https://app.jan.ai/download/beta/linux-amd64-deb)
+ - Others: [Jan.AppImage](https://app.jan.ai/download/beta/linux-amd64-appimage)
+
+
+
+
+
+Development build:
+ - Ubuntu: [jan.deb](https://app.jan.ai/download/nightly/linux-amd64-deb)
+ - Others: [Jan.AppImage](https://app.jan.ai/download/nightly/linux-amd64-appimage)
+
+
+
+
+
+### Step 2: Install Application
+
+Installation commands:
+
+
+
+
+##### dpkg
+
+```bash
+sudo dpkg -i jan-linux-amd64-{version}.deb
+```
+
+##### apt-get
+
+```bash
+sudo apt-get install ./jan-linux-amd64-{version}.deb
+```
+
+
+
+
+
+From the terminal, run the following commands:
+
+```bash
+chmod +x jan-linux-x86_64-{version}.AppImage
+./jan-linux-x86_64-{version}.AppImage
+```
+
+
+
+
+
+
+
+## Data Folder
+
+Default locations:
+
+```bash
+# Custom installation directory
+$XDG_CONFIG_HOME = /home/username/custom_config
+
+# or
+
+# Default installation directory
+~/.config/Jan/data
+
+```
+See [Jan Data Folder](/docs/data-folder) for details.
+
+
+## GPU Acceleration
+Configuration for GPU support:
+
+
+
+
+
+
+### Step 1: Verify Hardware & Install Dependencies
+
+**1.1. Check GPU Detection**
+
+```sh
+lspci | grep -i nvidia
+```
+
+**1.2. Install Required components**
+
+**NVIDIA Driver:**
+
+1. Install the [NVIDIA Driver](https://www.nvidia.com/en-us/drivers/), ideally via your package manager.
+2. Verify:
+
+```sh
+nvidia-smi
+```
+
+**CUDA Toolkit:**
+
+1. Install the [CUDA toolkit](https://developer.nvidia.com/cuda-downloads), ideally from your package manager (**11.7+**)
+2. Verify:
+
+```sh
+nvcc --version
+```
+
+**Additional Requirements:**
+
+```sh
+sudo apt update
+sudo apt install gcc-11 g++-11 cpp-11
+export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64
+```
+[Documentation](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#post-installation-actions)
+
+### Step 2: Enable GPU Acceleration
+
+1. Navigate to **Settings** () > **Local Engine** > **Llama.cpp**
+2. Select appropriate backend in **llama-cpp Backend**. Details in our [guide](/docs/local-engines/llama-cpp).
+
+
+
+
+
+
+
+
+Requires Vulkan support.
+
+1. Navigate to **Settings** () > **Hardware** > **GPUs**
+2. Select appropriate backend in **llama-cpp Backend**. Details in our [guide](/docs/local-engines/llama-cpp).
+
+
+
+
+Requires Vulkan support.
+
+1. Navigate to **Settings** () > **Hardware** > **GPUs**
+2. Select appropriate backend in **llama-cpp Backend**. Details in our [guide](/docs/local-engines/llama-cpp).
+
+
+
+
+## Uninstall Jan
+
+Removal commands:
+
+
+
+```bash
+sudo apt-get remove jan
+rm -rf Jan
+rm -rf ~/.config/Jan/data
+rm -rf ~/.config/Jan/cache
+```
+
+
+
+```bash
+rm jan-linux-x86_64-{version}.AppImage
+rm -rf ~/.config/Jan
+```
+
+
+
+
diff --git a/website/src/content/docs/jan/installation/mac.mdx b/website/src/content/docs/jan/installation/mac.mdx
new file mode 100644
index 000000000..a40136c44
--- /dev/null
+++ b/website/src/content/docs/jan/installation/mac.mdx
@@ -0,0 +1,166 @@
+---
+title: Mac
+description: Get started quickly with Jan - a local AI that runs on your computer. Install Jan and pick your model to start chatting.
+keywords:
+ [
+ Jan,
+ Customizable Intelligence, LLM,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ quickstart,
+ getting started,
+ using AI model,
+ installation,
+ "desktop"
+ ]
+---
+
+
+
+
+# Mac Installation
+Jan runs natively on both Apple Silicon and Intel-based Macs.
+
+## Compatibility
+### Minimum Requirements
+Your Mac needs:
+- **Operating System:** MacOSX 13.6 or higher
+- **Memory:**
+ - 8GB → up to 3B parameter models
+ - 16GB → up to 7B parameter models
+ - 32GB → up to 13B parameter models
+- **Storage:** 10GB+ free space
+
+### Mac Performance Guide
+
+**Apple Silicon (M1, M2, M3)**
+- Metal acceleration enabled by default
+- GPU-accelerated processing
+
+**Intel-based Mac**
+- CPU processing only
+- Standard performance
+
+_Check your Mac's processor: Apple menu → About This Mac_
+
+## Install Jan
+
+Installation steps:
+
+
+
+### Step 1: Download Application
+
+Select version:
+
+
+Get Jan from here:
+
+- [Download Jan's Stable Version](https://app.jan.ai/download/latest/mac-universal)
+- Official Website: https://jan.ai/download
+
+
+
+
+Beta: New features with potential instability.
+
+[Download Jan's Beta Version](https://app.jan.ai/download/beta/mac-universal)
+
+
+
+
+
+Nightly: Latest features, less stable.
+
+[Download Jan's Nightly Version](https://app.jan.ai/download/nightly/mac-universal)
+
+
+
+
+
+
+### Step 2: Install Application
+
+1. Open the Jan installer (`.dmg` file)
+2. Drag Jan to **Applications**
+3. Wait a moment
+4. Launch Jan
+
+
+
+
+## Jan Data Folder
+
+Default location:
+
+```sh
+# Default installation directory
+~/Library/Application\ Support/Jan/data
+```
+See [Jan Data Folder](/docs/data-folder) for details.
+
+
+## Uninstall Jan
+
+
+
+### Step 1: Remove Application
+1. Close Jan if it's running
+2. Open **Finder**
+3. Go to **Applications**
+4. Find Jan
+5. Pick your removal method:
+ - Drag to **Trash**
+ - Right-click → **Move to Trash**
+ - **Command-Delete**
+
+### Step 2: Clean Up Data (Optional)
+
+Run this in **Terminal** to remove all data:
+```bash
+rm -rf ~/Library/Application\ Support/Jan/data
+```
+
+
+
+
+{/* ## FAQs
+
+## What are Nightly Releases, and how can I access them?
+
+Nightly Releases allow you to test new features and previews of upcoming stable releases. You can download
+them from Jan's GitHub repository. However, remember that these builds might contain bugs and crash frequently.
+
+## Can I move the Jan data folder to a different location?
+
+Yes, you can move the Jan data folder.
+
+## How do I enable GPU acceleration for better performance
+
+Depending on your Mac type (Apple Silicon or Intel), you won't be able to utilize the GPU acceleration feature
+if you have a Mac with an Intel processor.
+
+## Can I recover the deleted Jan data folder after uninstallation?
+
+No, it cannot be restored once you delete the Jan data folder during uninstallation.
+
+
+
+*/}
diff --git a/website/src/content/docs/jan/installation/windows.mdx b/website/src/content/docs/jan/installation/windows.mdx
new file mode 100644
index 000000000..e8c2ced1f
--- /dev/null
+++ b/website/src/content/docs/jan/installation/windows.mdx
@@ -0,0 +1,209 @@
+---
+title: Windows
+description: Run AI models locally on your Windows machine with Jan. Quick setup guide for local inference and chat.
+keywords:
+ [
+ Jan,
+ Customizable Intelligence, LLM,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ quickstart,
+ getting started,
+ using AI model,
+ installation,
+ "desktop"
+ ]
+---
+
+
+
+# Windows Installation
+
+## Compatibility
+
+**System requirements:**
+- **Operating System**: Windows 10 or higher.
+- **CPU**
+
+
+
+- Intel: Haswell (Q2 2013) or newer
+- Intel Celeron/Pentium: Tiger Lake (Q3 2020) or newer
+
+
+- Excavator processors (Q2 2015) and newer.
+
+
+
+
+
+**Memory (RAM)**
+- 8GB → up to 3B parameter models (int4)
+- 16GB → up to 7B parameter models (int4)
+- 32GB → up to 13B parameter models (int4)
+
+
+
+**GPU**:
+- 6GB → up to 3B parameter models
+- 8GB → up to 7B parameter models
+- 12GB → up to 13B parameter models
+
+
+
+**Storage:** 10GB free space minimum for app and models
+
+
+## Install Jan
+
+
+
+### Step 1: Download Application
+
+
+
+- [Download Stable Jan](https://app.jan.ai/download/latest/win-x64)
+- Official Website: [Download Jan](https://jan.ai/download)
+
+
+
+Beta: Contains newer features but may be unstable
+
+[Download Beta Jan](https://app.jan.ai/download/beta/win-x64)
+
+
+
+
+
+Nightly: Development build with latest features
+
+[Download Nightly Jan](https://app.jan.ai/download/nightly/win-x64)
+
+
+
+
+
+### Step 2: Install Application
+
+1. Run the downloaded `.exe` file
+2. Wait for installation to complete
+3. Launch Jan
+
+
+
+## Data Folder
+
+Default installation path:
+
+```sh
+# Default installation directory
+~\Users\\AppData\Roaming\Jan\data
+```
+
+See [Jan Data Folder](/docs/data-folder) for complete folder structure details.
+
+
+## GPU Acceleration
+
+
+
+
+
+
+
+### Step 1: Verify Hardware & Install Dependencies
+**1.1. Check GPU Detection**
+
+Verify GPU is recognized:
+- Right-click desktop > NVIDIA Control Panel
+- Or check Device Manager > Display Adapters
+
+**1.2. Install Required components**
+**NVIDIA Driver:**
+1. Install [NVIDIA Driver](https://www.nvidia.com/en-us/drivers/) (version **470.63.01 or higher**)
+2. Verify installation:
+
+```sh
+nvidia-smi
+```
+
+**CUDA Toolkit:**
+1. Install [CUDA toolkit](https://developer.nvidia.com/cuda-downloads) (**11.7 or higher**)
+2. Verify installation:
+
+```sh
+nvcc --version
+```
+### Step 2: Enable GPU Acceleration
+
+Navigate to **Settings** () > **Hardware** > **GPUs**
+and toggle the **ON** switch if not enabled.
+
+
+
+
+
+
+AMD GPUs require **Vulkan** support.
+
+Navigate to **Settings** () > **Hardware** > **GPUs**
+and toggle the **ON** switch if not enabled.
+
+
+
+Intel Arc GPUs require **Vulkan** support.
+
+Navigate to **Settings** () > **Hardware** > **GPUs**
+and toggle the **ON** switch if not enabled.
+
+
+
+
+## Uninstall Jan
+
+
+
+### Step 1: Remove Application through Control Panel
+
+1. Open **Control Panels**
+2. Go to **Programs** section
+3. Click **Uninstall Program**
+4. Search for **Jan**
+5. Click the **Three Dots Icon** > **Uninstall**
+6. Click **Uninstall** again to confirm
+7. Click **OK**
+
+### Step 2: Clean Up Remaining Files
+
+Remove app data:
+
+1. Navigate to `C:\Users\[username]\AppData\Roaming`
+2. Delete Jan folder
+
+or via **Terminal**:
+
+```sh
+cd C:\Users\%USERNAME%\AppData\Roaming
+rmdir /S Jan
+```
+
+
+
diff --git a/website/src/content/docs/jan/jan-models/jan-nano-128.mdx b/website/src/content/docs/jan/jan-models/jan-nano-128.mdx
new file mode 100644
index 000000000..9fb5e0544
--- /dev/null
+++ b/website/src/content/docs/jan/jan-models/jan-nano-128.mdx
@@ -0,0 +1,139 @@
+---
+title: Jan Nano 128k
+description: Jan Models
+keywords:
+ [
+ Jan,
+ Jan Models,
+ Jan Model,
+ Jan Model List,
+ Menlo Models,
+ Menlo Model,
+ Jan-Nano-Gguf,
+ ReZero,
+ Model Context Protocol,
+ MCP,
+ ]
+---
+
+import { Aside } from '@astrojs/starlight/components';
+
+# Jan-Nano-128k
+
+> Enabling deeper research through extended context understanding.
+
+Jan-Nano-128k represents a notable advancement in compact language models for different applications. Building upon the
+success of Jan-Nano-32k, this enhanced version features a native 128k context window that enables deeper, more comprehensive
+research capabilities without the performance degradation typically associated with context extension methods.
+
+You can have a look at all of our models, and download them from the HuggingFace [Menlo Models page](https://huggingface.co/Menlo).
+
+**Key Improvements:**
+
+- 🔍 Deeper Research: Extended context allows for processing entire research papers, lengthy documents, and complex multi-turn conversations
+- ⚡ Native 128k Window: Built to handle long contexts efficiently, maintaining performance across the full context range
+- 📈 Enhanced Performance: Unlike traditional context extension methods, Jan-Nano-128k's performance remains consistent with longer contexts
+
+This model maintains full compatibility with Model Context Protocol (MCP) servers while dramatically expanding the scope of research
+tasks it can handle in a single session.
+
+
+## Why Jan-Nano-128k?
+
+Most small models hit a wall at 8-32k tokens. Jan-Nano-128k goes beyond this limitation with a native 128k context window—that's roughly
+300 pages of text or an entire novel's worth of information processed simultaneously.
+
+Unlike YaRN or PI methods that retrofit models beyond their limits and degrade performance, Jan-Nano-128k was architecturally rewired for
+128k contexts from the ground up. The result: an inverse scaling behavior where performance actually improves with longer contexts,
+maintaining consistent accuracy from 1k to 128k tokens as the model leverages more information for synthesis.
+
+
+
+
+**Applications unlocked:**
+- **Academic**: Extract key findings from 50+ papers simultaneously
+- **Legal**: Pinpoint relevant clauses across thousand-page contracts
+- **Code**: Trace specific functions through massive codebases
+- **Business**: Distill insights from quarters of financial data
+- **Content**: Maintain narrative coherence across book-length outputs
+
+**MCP Usage:** Jan-Nano-128k doesn't memorize, it orchestrates. With MCP integration, it becomes a research conductor that fetches dozens
+of sources, holds everything in active memory, extracts precisely what's needed, and synthesizes findings across a marathon research session. It's
+not about understanding every word; it's about finding the needle in a haystack of haystacks.
+
+## Evaluation
+
+Jan-Nano-128k has been rigorously evaluated on the SimpleQA benchmark using our MCP-based methodology, demonstrating superior performance compared to its predecessor:
+
+
+
+**Key findings:**
+- 15% improvement over Jan-Nano-32k on complex multi-document tasks
+- Consistent performance across all context lengths (no cliff at 64k like other extended models)
+- Superior citation accuracy when handling 10+ sources simultaneously
+
+## 🖥️ How to Run Locally
+
+### Demo
+
+
+
+### Quick Start Guide
+
+1. **Download Jan**
+2. **Download Jan-Nano-128k**
+3. **Enable MCP**, the serper or the exa MCPs work very well with Jan-Nano-128k
+4. **Start researching**
+
+### Usage
+
+Deploy using VLLM:
+
+```bash
+vllm serve Menlo/Jan-nano-128k \
+ --host 0.0.0.0 \
+ --port 1234 \
+ --enable-auto-tool-choice \
+ --tool-call-parser hermes \
+ --rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' --max-model-len 131072
+```
+
+Or with `llama-server` from `llama.cpp`:
+
+```bash
+llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960
+```
+
+**Note:** The chat template is included in the tokenizer. For troubleshooting, download the [Non-think chat template](https://qwen.readthedocs.io/en/latest/_downloads/c101120b5bebcc2f12ec504fc93a965e/qwen3_nonthinking.jinja).
+
+### Recommended Sampling Parameters
+
+```yaml
+Temperature: 0.7
+Top-p: 0.8
+Top-k: 20
+Min-p: 0.0
+```
+
+### Hardware Requirements
+- **Minimum**: 16GB RAM for Q4 quantization
+- **Recommended**: 24GB RAM for Q8 quantization
+- **Optimal**: 32GB+ RAM for full precision
+
+## 🤝 Community & Support
+- **Discussions**: [HuggingFace Community](https://huggingface.co/Menlo/Jan-nano-128k/discussions)
+- **Issues**: [GitHub Repository](https://github.com/menloresearch/deep-research/issues)
+- **Discord**: Join our research community for tips and best practices
diff --git a/website/src/content/docs/jan/jan-models/jan-nano-32.mdx b/website/src/content/docs/jan/jan-models/jan-nano-32.mdx
new file mode 100644
index 000000000..5f9d94534
--- /dev/null
+++ b/website/src/content/docs/jan/jan-models/jan-nano-32.mdx
@@ -0,0 +1,136 @@
+---
+title: Jan Nano 32k
+description: Jan-Nano-Gguf Model
+keywords:
+ [
+ Jan,
+ Jan Models,
+ Jan Model,
+ Jan Model List,
+ Menlo Models,
+ Menlo Model,
+ Jan-Nano-Gguf,
+ ReZero,
+ Model Context Protocol,
+ MCP,
+ ]
+---
+
+import { Aside } from '@astrojs/starlight/components';
+
+# Jan Nano
+
+
+
+## Why Jan Nano?
+
+Most language models face a fundamental tradeoff where powerful capabilities require a lot of computational resources. Jan
+Nano breaks this constraint through a focused design philosophy where instead of trying to know everything, it excels at
+knowing how to find anything.
+
+
+## What is Jan Nano?
+
+Jan Nano is a compact 4-billion parameter language model specifically designed and trained for deep research tasks.
+This model has been optimized to work seamlessly with Model Context Protocol (MCP) servers, enabling efficient integration
+with various research tools and data sources.
+
+The model and its different model variants are fully supported by Jan.
+
+
+
+
+## System Requirements
+
+- Minimum Requirements:
+ - 8GB RAM (with iQ4_XS quantization)
+ - 12GB VRAM (for Q8 quantization)
+ - CUDA-compatible GPU
+- Recommended Setup:
+ - 16GB+ RAM
+ - 16GB+ VRAM
+ - Latest CUDA drivers
+ - RTX 30/40 series or newer
+
+
+## Using Jan-Nano-32k
+
+**Step 1**
+Download Jan from [here](https://jan.ai/docs/desktop/).
+
+**Step 2**
+Go to the Hub Tab, search for Jan-Nano-Gguf, and click on the download button to the best model size for your system.
+
+
+
+**Step 3**
+Go to **Settings** > **Model Providers** > **Llama.cpp** click on the pencil icon and enable tool use for Jan-Nano-Gguf.
+
+**Step 4**
+To take advantage of Jan-Nano's full capabilities, you need to enable MCP support. We're going to use it with Serper's
+API. You can get a free API key from [here](https://serper.dev/). Sign up and they will immediately generate one for you.
+
+**Step 5**
+Add the serper MCP to Jan via the **Settings** > **MCP Servers** tab.
+
+
+
+**Step 6**
+Open up a new chat and ask Jan-Nano to search the web for you.
+
+
+
+## Queries to Try
+
+Here are some example queries to showcase Jan-Nano's web search capabilities:
+
+1. **Current Events**: What are the latest developments in renewable energy adoption in Germany and Denmark?
+2. **International Business**: What is the current status of Tesla's Gigafactory in Berlin and how has it impacted the local economy?
+3. **Technology Trends**: What are the newest AI developments from Google, Microsoft, and Meta that were announced this week?
+4. **Global Weather**: What's the current weather forecast for Tokyo, Japan for the next 5 days?
+5. **Stock Market**: What are the current stock prices for Apple, Samsung, and Huawei, and how have they performed this month?
+6. **Sports Updates**: What are the latest results from the Premier League matches played this weekend?
+7. **Scientific Research**: What are the most recent findings about climate change impacts in the Arctic region?
+8. **Cultural Events**: What major music festivals are happening in Europe this summer and who are the headliners?
+9. **Health & Medicine**: What are the latest developments in mRNA vaccine technology and its applications beyond COVID-19?
+10. **Space Exploration**: What are the current missions being conducted by NASA, ESA, and China's space program?
+
+
+## FAQ
+
+- What are the recommended GGUF quantizations?
+ - Q8 GGUF is recommended for best performance
+ - iQ4_XS GGUF for very limited VRAM setups
+ - Avoid Q4_0 and Q4_K_M as they show significant performance degradation
+
+- Can I run this on a laptop with 8GB RAM?
+ - Yes, but use the recommended quantizations (iQ4_XS)
+ - Note that performance may be limited with Q4 quantizations
+
+- How much did the training cost?
+ - Training was done on internal A6000 clusters
+ - Estimated cost on RunPod would be under $100 using H200
+ - Hardware used:
+ - 8xA6000 for training code
+ - 4xA6000 for vllm server (inferencing)
+
+- What frontend should I use?
+ - Jan Beta (recommended) - Minimalistic and polished interface
+ - Download link: https://jan.ai/docs/desktop/beta
+
+- Getting Jinja errors in LM Studio?
+ - Use Qwen3 template from other LM Studio compatible models
+ - Disable “thinking” and add the required system prompt
+ - Fix coming soon in future GGUF releases
+- Having model loading issues in Jan?
+ - Use latest beta version: Jan-beta_0.5.18-rc6-beta
+ - Ensure proper CUDA support for your GPU
+ - Check VRAM requirements match your quantization choice
+
+## Resources
+
+- [Jan-Nano Model on Hugging Face](https://huggingface.co/Menlo/Jan-nano)
+- [Jan-Nano GGUF on Hugging Face](https://huggingface.co/Menlo/Jan-nano-gguf)
diff --git a/website/src/content/docs/jan/manage-models.mdx b/website/src/content/docs/jan/manage-models.mdx
new file mode 100644
index 000000000..01bc6ce91
--- /dev/null
+++ b/website/src/content/docs/jan/manage-models.mdx
@@ -0,0 +1,202 @@
+---
+title: Managing Models
+description: Manage your interaction with AI models locally.
+keywords:
+ [
+ Jan,
+ Customizable Intelligence, LLM,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ threads,
+ chat history,
+ thread history,
+ ]
+---
+
+
+# Model Management
+
+This guide shows you how to add, customize, and delete models within Jan.
+
+## Local Model
+
+Local models are managed through [Llama.cpp](https://github.com/ggerganov/llama.cpp), and these models are in a
+format called GGUF. When you run them locally, they will use your computer's memory (RAM) and processing power, so
+please make sure that you download models that match the hardware specifications for your operating system:
+- [Mac](/docs/desktop/mac#compatibility)
+- [Windows](/docs/desktop/windows#compatibility)
+- [Linux](/docs/desktop/linux#compatibility).
+
+### Adding Models
+
+#### 1. Download from Jan Hub (Recommended)
+
+The easiest way to get started is using Jan's built-in model hub (which is connected to [HuggingFace's Model Hub](https://huggingface.co/models):
+1. Go to the **Hub** tab
+2. Browse available models and click on any model to see details about it
+3. Choose a model that fits your needs & hardware specifications
+4. Click **Download** on your chosen model
+
+
+
+
+
+
+
+#### 2. Import from [Hugging Face](https://huggingface.co/)
+
+You can download models with a direct link from Hugging Face:
+
+**Note:** Some models require a Hugging Face Access Token. Enter your token in **Settings > Model Providers > Hugging Face** before importing.
+
+1. Visit the [Hugging Face Models](https://huggingface.co/models) page.
+2. Find the model you want to use, make sure it is a GGUF file that fits in your computer.
+3. Copy the **model ID** (e.g., TheBloke/Mistral-7B-v0.1-GGUF)
+4. In Jan, paste the model ID to the **Search** bar in **Hub** page
+5. Select your preferred quantized version to download (if the option is available)
+
+
+**Copy the model ID.**
+
+
+
+**Paste it in Jan's Hub Search Bar.**
+
+
+
+#### 3. Import Local Files
+
+If you already have one or many GGUF model files on your computer:
+1. In Jan, go to **Settings > Model Providers > Llama.cpp**
+2. Click **Import** and select your GGUF file(s)
+3. Choose how you want to import:
+ - **Link Files:** Creates symbolic links to your model files (saves space)
+ - **Duplicate:** Makes a copy of model files in Jan's directory
+4. Click **Import** to complete (check the [Jan Data Folder](./data-folder) section for more info)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#### 4. Manual Setup
+
+For advanced users who want to add a specific model that is not available within the Jan **Hub**:
+
+
+##### Step 1: Create Model File
+1. Navigate to the [Jan Data Folder](./data-folder)
+2. Open `models` folder
+3. Create a new **Folder** for your model
+4. Add your `model.gguf` file
+5. Add your `model.json` file with your configuration. Here's an example with "TinyLlama Chat 1.1B Q4":
+
+```json
+{
+ "sources": [
+ {
+ "filename": "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf",
+ "url": "https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf"
+ }
+ ],
+ "id": "tinyllama-1.1b",
+ "object": "model",
+ "name": "TinyLlama Chat 1.1B Q4",
+ "version": "1.0",
+ "description": "TinyLlama is a tiny model with only 1.1B. It's a good model for less powerful computers.",
+ "format": "gguf",
+ "settings": {
+ "ctx_len": 4096,
+ "prompt_template": "<|system|>\n{system_message}<|user|>\n{prompt}<|assistant|>",
+ "llama_model_path": "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf"
+ },
+ "parameters": {
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 2048,
+ "stop": [],
+ "frequency_penalty": 0,
+ "presence_penalty": 0
+ },
+ "metadata": {
+ "author": "TinyLlama",
+ "tags": [
+ "Tiny",
+ "Foundation Model"
+ ],
+ "size": 669000000
+ },
+ "engine": "nitro"
+}
+```
+##### Step 2: Modify Model Parameters
+
+Key fields to configure:
+1. The **Settings** array is where you can set the path or location of your model in your computer, the context
+length allowed, and the chat template expected by your model.
+2. The [**Parameters**](/docs/model-parameters) are the adjustable settings that affect how your model operates or
+processes the data. The fields in the parameters array are typically general and can be used across different
+models. Here is an example of model parameters:
+
+```json
+"parameters":{
+ "temperature": 0.7,
+ "top_p": 0.95,
+ "stream": true,
+ "max_tokens": 4096,
+ "frequency_penalty": 0,
+ "presence_penalty": 0,
+}
+```
+
+
+### Delete Models
+1. Go to **Settings > Model Providers > Llama.cpp**
+2. Find the model you want to remove
+3. Select the three dots icon next to it and select **Delete Model**
+
+
+
+
+
+
+## Cloud Models
+
+
+
+Jan supports connecting to various AI cloud providers that are OpenAI API-compatible, including: OpenAI (GPT-4o, o3,...),
+Anthropic (Claude), Groq, Mistral, and more.
+1. Navigate to **Settings** ()
+2. Under **Model Providers** section in the left sidebar, choose your preferred provider (OpenAI, Anthropic, etc.)
+3. Enter your API key
+4. The activated cloud models will be available in your model selector inside the **Chat** panel
+
+
+
+
+As soon as you add your key for a model provider like Anthropic or OpenAI, you will be able to pick one of their models to chat with.
+
+
diff --git a/website/src/content/docs/jan/mcp-examples/data-analysis/e2b.mdx b/website/src/content/docs/jan/mcp-examples/data-analysis/e2b.mdx
new file mode 100644
index 000000000..38e71b59a
--- /dev/null
+++ b/website/src/content/docs/jan/mcp-examples/data-analysis/e2b.mdx
@@ -0,0 +1,289 @@
+---
+title: E2B Code Sandbox
+description: Execute Python code securely in isolated sandbox environments with E2B.
+keywords:
+ [
+ Jan,
+ MCP,
+ Model Context Protocol,
+ E2B,
+ code execution,
+ sandbox,
+ data analysis,
+ Python,
+ secure computing,
+ tool calling,
+ ]
+---
+
+
+# E2B Code Sandbox MCP
+
+E2B MCP provides isolated Python execution environments. Your AI can run actual code instead of just describing what code might do.
+
+The real value emerges when you combine secure remote execution with Jan's flexible model selection. You can use
+local models for conversation and reasoning while offloading actual computation to E2B's sandboxes. This means you
+get the privacy and control of local models plus the computational power of cloud infrastructure, without the
+complexity of managing Python environments or dependencies locally.
+
+## Setup
+
+### Prerequisites
+
+- Jan with MCP enabled
+- E2B API key from [e2b.dev](https://e2b.dev/)
+- Node.js installed
+- Model with tool calling support
+
+### Configuration
+
+1. **Enable MCP**: Go to **Settings** > **MCP Servers**, toggle **Allow All MCP Tool Permission** ON
+
+
+
+
+
+2. **Get API Key**: Sign up at [e2b.dev](https://e2b.dev/), generate an API key
+
+
+
+Add a meaningful name to your key.
+
+
+
+3. **Add MCP Server**: Click `+` in MCP Servers section
+
+Configure:
+- **Server Name**: `e2b-server`
+- **Command**: `npx`
+- **Arguments**: `@e2b/mcp-server`
+- **Environment Variables**:
+ - Key: `E2B_API_KEY`
+ - Value: `your-api-key`
+
+
+
+4. **Verify**: Check server shows as active
+
+
+
+
+## Pre-installed Libraries
+
+The sandbox includes these packages by default:
+
+**Data Analysis & Science:**
+- `pandas` (1.5.3) - Data manipulation
+- `numpy` (1.26.4) - Numerical computing
+- `scipy` (1.12.0) - Scientific computing
+- `scikit-learn` (1.4.1) - Machine learning
+- `sympy` (1.12) - Symbolic mathematics
+
+**Visualization:**
+- `matplotlib` (3.8.3) - Static plots
+- `seaborn` (0.13.2) - Statistical visualization
+- `plotly` (5.19.0) - Interactive charts
+- `bokeh` (3.3.4) - Web-ready visualizations
+
+**Data Processing:**
+- `requests` (2.26.0) - HTTP requests
+- `beautifulsoup4` (4.12.3) - HTML/XML parsing
+- `openpyxl` (3.1.2) - Excel files
+- `python-docx` (1.1.0) - Word documents
+
+**Text & NLP:**
+- `nltk` (3.8.1) - Natural language processing
+- `spacy` (3.7.4) - Advanced NLP
+- `textblob` (0.18.0) - Text processing
+- `gensim` (4.3.2) - Topic modeling
+
+**Image & Audio:**
+- `opencv-python` (4.9.0) - Computer vision
+- `scikit-image` (0.22.0) - Image processing
+- `imageio` (2.34.0) - Image I/O
+- `librosa` (0.10.1) - Audio analysis
+
+Additional packages can be installed as needed.
+
+## Examples
+
+
+For the following examples, we'll use Claude 4 Sonnet but you can use any local or remote
+model with tool calling capabilities you'd like.
+
+
+
+
+
+### Basic Data Analysis
+
+Start small. Open a new chat, confirm that the model has tools enabled and ask it to create a small dataset of 100 students with grades and study hours.
+
+
+
+
+```
+Create a small dataset of 100 students with grades and study hours.
+Calculate the correlation and create a scatter plot.
+```
+
+The model will:
+1. Generate data with pandas (100 rows)
+2. Calculate correlation coefficient
+3. Create a matplotlib scatter plot
+4. Add trend line
+
+
+
+
+
+
+
+
+
+
+### Statistical Computing
+
+```
+Run a Monte Carlo simulation with 10,000 iterations to estimate π.
+```
+
+Expected output:
+- Numerical computation with numpy
+- Convergence plot showing estimate improvement
+- Final π estimate
+
+
+For more intensive simulations, increase iterations gradually and monitor performance.
+
+### Machine Learning
+
+```
+Create a simple 2-class dataset with 200 samples. Train a logistic regression
+model and visualize the decision boundary.
+```
+
+The model will:
+- Generate synthetic 2D classification data
+- Train a single scikit-learn model
+- Plot data points and decision boundary
+
+
+### Time Series Analysis
+
+```
+Generate daily temperature data for one year. Calculate moving averages
+and identify seasonal patterns.
+```
+
+Output includes:
+- Line plot of temperature data
+- Moving average overlay
+- Simple seasonal decomposition
+
+
+### Scaling Up
+
+Once basic examples work, you can increase complexity:
+- Larger datasets (1000+ samples)
+- Multiple models for comparison
+- Complex visualizations with subplots
+- Advanced statistical tests
+
+The sandbox handles moderate computational loads well. For very large datasets or intensive ML training, consider breaking work into smaller chunks.
+
+## Chart Generation
+
+E2B automatically detects and extracts charts from matplotlib code. Charts are returned as base64-encoded images and downloadable files.
+
+### Static Charts
+
+```python
+import matplotlib.pyplot as plt
+import numpy as np
+
+x = np.linspace(0, 10, 100)
+y = np.sin(x)
+
+plt.figure(figsize=(10, 6))
+plt.plot(x, y)
+plt.title('Sine Wave')
+plt.xlabel('x')
+plt.ylabel('sin(x)')
+plt.show()
+```
+
+E2B captures the plot and makes it available for download.
+
+### Interactive Charts
+
+The system extracts chart data for frontend visualization:
+
+```python
+plt.bar(['A', 'B', 'C'], [10, 20, 15])
+plt.title('Sample Bar Chart')
+plt.show()
+```
+
+Returns structured data:
+```json
+{
+ "type": "bar",
+ "title": "Sample Bar Chart",
+ "elements": [
+ {"label": "A", "value": 10},
+ {"label": "B", "value": 20},
+ {"label": "C", "value": 15}
+ ]
+}
+```
+
+Supported chart types: line, bar, scatter, pie, box plots.
+
+## Available Tools
+
+- **run_code**: Execute Python code
+- **install_package**: Add Python packages
+- **create_file**: Save files to sandbox
+- **read_file**: Access sandbox files
+- **list_files**: Browse sandbox contents
+
+## Troubleshooting
+
+**Connection Issues:**
+- Verify API key is correct
+- Check Node.js installation
+- Restart Jan if server won't start
+
+**Execution Problems:**
+- Free sandboxes have 2 cores and 1GB RAM - start with small datasets
+- Large computations may time out or run out of memory
+- Scale up complexity gradually after testing basic examples
+- Some packages may require explicit installation
+
+**Package Installation:**
+- Most data science packages install successfully
+- System dependencies may cause failures for some packages
+- Try alternative packages if installation fails
+
+
+
+## Use Cases
+
+E2B is useful for:
+
+- **Academic Research**: Statistical analysis, data visualization, hypothesis testing
+- **Data Science**: Exploratory data analysis, model prototyping, result validation
+- **Financial Analysis**: Portfolio optimization, risk calculations, market simulations
+- **Scientific Computing**: Numerical simulations, mathematical modeling, algorithm testing
+- **Prototyping**: Quick algorithm validation, proof-of-concept development
+
+The sandbox provides isolated execution without local environment setup or dependency management.
\ No newline at end of file
diff --git a/website/src/content/docs/jan/mcp-examples/search/exa.mdx b/website/src/content/docs/jan/mcp-examples/search/exa.mdx
new file mode 100644
index 000000000..a33a9cf6b
--- /dev/null
+++ b/website/src/content/docs/jan/mcp-examples/search/exa.mdx
@@ -0,0 +1,215 @@
+---
+title: Exa Search MCP
+description: Connect Jan to real-time web search with Exa's AI-powered search engine.
+keywords:
+ [
+ Jan,
+ MCP,
+ Model Context Protocol,
+ Exa,
+ web search,
+ real-time search,
+ research,
+ AI search,
+ tool calling,
+ ]
+---
+
+
+# Exa Search MCP
+
+[Exa MCP](https://docs.exa.ai/examples/exa-mcp) provides real-time web search capabilities for AI models. Instead of relying on training data,
+models can access current web content through Exa's search API.
+
+## Available Tools
+
+Exa MCP includes eight search functions:
+
+- `web_search_exa`: General web search with content extraction
+- `research_paper_search`: Academic papers and research content
+- `company_research`: Company analysis and business intelligence
+- `crawling`: Extract content from specific URLs
+- `competitor_finder`: Find business competitors
+- `linkedin_search`: Search LinkedIn profiles and companies
+- `wikipedia_search_exa`: Wikipedia content retrieval
+- `github_search`: Repository and code search
+
+## Prerequisites
+
+- Jan with MCP enabled
+- Exa API key from [dashboard.exa.ai](https://dashboard.exa.ai/api-keys)
+- Model with tool calling support
+- Node.js installed
+
+
+
+## Setup
+
+### Enable MCP
+
+1. Go to **Settings** > **MCP Servers**
+2. Toggle **Allow All MCP Tool Permission** ON
+
+
+
+### Get API Key
+
+1. Visit [dashboard.exa.ai/api-keys](https://dashboard.exa.ai/api-keys)
+2. Create account or sign in
+3. Generate API key
+4. Save the key
+
+
+
+### Configure MCP Server
+
+Click `+` in MCP Servers section:
+
+
+**Configuration:**
+- **Server Name**: `exa`
+- **Command**: `npx`
+- **Arguments**: `-y exa-mcp-server`
+- **Environment Variables**:
+ - Key: `EXA_API_KEY`
+ - Value: `your-api-key`
+
+
+
+### Verify Setup
+
+Check server status in the MCP Servers list.
+
+
+
+### Model Configuration
+
+Use a compatible model provider:
+
+- **Jan Nano 32k**
+- **Anthropic**
+- **OpenAI**
+- **OpenRouter**
+
+
+
+## Usage
+
+Start a new chat with a tool-enabled model. Exa tools will appear in the available tools list.
+
+
+
+### Example Queries
+
+**Current Events & Activities:**
+```
+What is happening this week, mid July 2025, in Sydney, Australia?
+```
+
+
+
+**Investment Research:**
+```
+Find recent research papers about quantum computing startups that received Series A funding in 2024-2025
+```
+
+**Tech Discovery:**
+```
+Find GitHub repositories for WebAssembly runtime engines written in Rust with active development
+```
+
+**Career Intelligence:**
+```
+Search LinkedIn for AI safety researchers at major tech companies who published papers in the last 6 months
+```
+
+**Competitive Analysis:**
+```
+Research emerging competitors to OpenAI in the large language model space, focusing on companies founded after 2023
+```
+
+**Travel & Local Research:**
+```
+Find authentic local food experiences in Tokyo that aren't in typical tourist guides, mentioned in recent travel blogs
+```
+
+**Academic Research:**
+```
+Find recent papers about carbon capture technology breakthroughs published in Nature or Science during 2025
+```
+
+**Creator Economy:**
+```
+Research successful creators who transitioned from TikTok to longer-form content platforms in 2024-2025
+```
+
+**Emerging Tech Trends:**
+```
+Find startups working on brain-computer interfaces that have raised funding in the past 12 months
+```
+
+**Health & Wellness:**
+```
+Extract information about the latest longevity research findings from Peter Attia's recent podcast episodes
+```
+
+**Regulatory Intelligence:**
+```
+Find recent AI regulation developments in the EU that could impact US companies, focusing on July 2025 updates
+```
+
+**Supply Chain Research:**
+```
+Research companies developing sustainable packaging alternatives that have partnerships with major retailers
+```
+
+## Use Cases
+
+### Academic Research
+Literature reviews, finding recent papers, tracking research trends.
+
+### Business Intelligence
+Competitor analysis, market research, company information gathering.
+
+### Technical Research
+Finding libraries, tools, and code repositories. Documentation research.
+
+### Content Analysis
+Extracting and analyzing content from specific URLs for research.
+
+### Professional Search
+LinkedIn searches for industry connections and expertise.
+
+## Troubleshooting
+
+**Connection Issues:**
+- Verify API key accuracy
+- Check Node.js installation
+- Restart Jan
+- Make sure you have enough credits in your Exa account
+
+**Tool Calling Problems:**
+- Confirm tool calling is enabled for your model
+- Try Jan Nano 32k, Claude, Gemini, GPT-4o and above models
+- Check MCP server status
+
+**Search Quality:**
+- Use specific, descriptive queries
+- Prefer natural language over keywords
+
+**API Errors:**
+- Verify API key at [dashboard.exa.ai](https://dashboard.exa.ai)
+- Check rate limits on your plan
+- Regenerate API key if needed
+
+
+
+## Next Steps
+
+Exa MCP enables real-time web search within Jan's privacy-focused environment. Models can access current information while maintaining
+local conversation processing.
\ No newline at end of file
diff --git a/website/src/content/docs/jan/mcp.mdx b/website/src/content/docs/jan/mcp.mdx
new file mode 100644
index 000000000..d175bd979
--- /dev/null
+++ b/website/src/content/docs/jan/mcp.mdx
@@ -0,0 +1,200 @@
+---
+title: Model Context Protocol
+description: Manage your interaction with AI locally.
+keywords:
+ [
+ Jan,
+ Customizable Intelligence, LLM,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ threads,
+ chat history,
+ thread history,
+ ]
+---
+
+# Using the Model Context Protocol (MCP) in Jan
+
+```mermaid
+graph TD
+ subgraph "What is MCP?"
+ You[You using Jan Desktop]
+ Claude[Jan AI Assistant]
+
+ subgraph "Your Connected Tools"
+ Files[📁 Your Files Documents, folders, text files]
+ Database[📊 Your Data Spreadsheets, databases]
+ WebServices[🌐 Online Services GitHub, Slack, Google Drive]
+ Custom[🔧 Custom Tools Special programs you've added]
+ end
+
+ subgraph "What Jan Can Do"
+ Read[Read & Understand - View your files - Check your data - See updates]
+ Action[Take Actions - Search for info - Create content - Run commands]
+ Templates[Use Templates - Common tasks - Saved prompts - Workflows]
+ end
+ end
+
+ You --> Claude
+ Claude -->|"Can I see this file?"| Files
+ Claude -->|"What's in my database?"| Database
+ Claude -->|"Check my GitHub"| WebServices
+ Claude -->|"Run this tool"| Custom
+
+ Files --> Read
+ Database --> Read
+ WebServices --> Action
+ Custom --> Templates
+
+ style You fill:transparent
+ style Claude fill:transparent
+ style Files fill:transparent
+ style Database fill:transparent
+ style WebServices fill:transparent
+ style Custom fill:transparent
+ style Read fill:transparent
+ style Action fill:transparent
+ style Templates fill:transparent
+```
+
+
+Jan now supports the **Model Context Protocol (MCP)**, an open standard designed to allow language models to
+interact with external tools and data sources.
+
+MCPs act as a common interface, standardizing the way an AI model can interact with external tools and data
+sources. This enables a model to connect to any MCP-compliant tool without requiring custom
+integration work. The way this works is via clients and servers. Clients are connected to an AI model and a host
+where a user will describe the task needed to be done. These applications hosting client will want to connect
+to different data sources to accomplish a task, for example, notion, google sheets, or even custom APIs. These
+applications will be connected to a server with prompts, tools, and data sources which will be used to complete
+the task.
+
+Jan is an MCP host that allows you to download different clients and servers and use them to accomplish a task.
+
+This document outlines the benefits, risks, and implementation of MCPs within Jan.
+
+## Core Benefits of MCP
+
+Integrating MCP provides a structured way to extend the capabilities of the models you use in Jan. Here are the three
+
+* **Standardization:** MCP aims to solve the "M x N" integration problem, where every model (M) needs a
+unique connector for every tool (N). By adapting to a single standard, any compliant model can interface with any compliant tool.
+* **Extensibility:** This allows you to augment your models with new abilities. For instance, an AI can be granted
+access to search your local codebase, query a database, or interact with web APIs, all through the same protocol.
+* **Flexibility:** Because the interface is standardized, you can swap out models or tools with minimal friction,
+making your workflows more modular and adaptable over time.
+
+
+
+
+
+## Considerations and Risks
+
+While powerful, MCP is an evolving standard, and its use requires careful consideration of the following points:
+
+* **Security:** Granting a model access to external tools is a significant security consideration. A compromised
+tool or a malicious prompt could potentially lead to unintended actions or data exposure. Jan's implementation
+focuses on user-managed permissions to mitigate this risk, meaning, you have to turn on the permission for each
+tool individually.
+* **Standard Maturity:** As a relatively new protocol, best practices or sensible defaults are still being
+established. Users should be aware of potential issues like prompt injection, where an input could be crafted to
+misuse a tool's capabilities.
+* **Resource Management:** Active MCP connections may consume a portion of a model's context window, which could
+affect performance (i.e., the more tools the model and the larger the context of the conversation has the longer
+you will need to wait for a response). Efficient management of tools and their outputs is important.
+
+
+## Configure and Use MCPs within Jan
+
+To illustrate how MCPs can be used within Jan, we will walk through an example using the [Browser MCP](https://browsermcp.io/).
+
+Before we begin, you will need to enable experimental features at `General` > `Advanced`. Next, go to `Settings` > `MCP Servers`, and toggle
+the `Allow All MCP Tool Permission` switch ON.
+
+
+
+Please note that you will also need to have **NodeJS** and/or **Python** installed on your machine. In case you don't
+have either, you can download them from the official websites at the links below:
+- [Node.js](https://nodejs.org/)
+- [Python](https://www.python.org/)
+
+
+### Browser MCP
+
+- Click on the `+` sign on the upper right-hand corner of the MCP box.
+
+
+
+- Enter the following details to configure the BrowserMCP:
+ - **Server Name**: `browsermcp`
+ - **Command**: `npx`
+ - **Arguments**: `@browsermcp/mcp`
+ - **Environment Variables**: You can leave this field empty.
+
+
+
+- Check that the server has been activated successfully.
+
+
+
+- Open your favorite chrome-based browser (e.g., Google Chrome, Brave, Vivaldi, Microsoft Edge, etc...) and navigate to the
+[Browser MCP Extension Page](https://chromewebstore.google.com/detail/browser-mcp-automate-your/bjfgambnhccakkhmkepdoekmckoijdlc).
+
+
+
+- Make sure to enable the extension to run on private windows. Since Browser Use will have access to all sites you've
+already logged into in your regular browser session, it is best to give it a clean slate to start from.
+
+
+
+- Enable the extension to run on private windows by clicking on it and Connecting to the Browser MCP server.
+
+
+
+- Go back to Jan and pick a model with good tool use capabilities, for example, Claude 3.7 and 4 Sonnet, or Claude 4 Opus,
+and make sure to enable tool calling via the UI by going to **Model Providers > Anthropic** and, after you have entered your
+API key, enable tool from the **+** button.
+
+
+
+You can check and see if this was accurate below.
+
+
+
+
+## Troubleshooting
+
+- The MCP server won't connect even though I've already added it to my list of MCP Servers?
+ - Make sure you have NodeJS and Python installed
+ - Make sure you typed the commands correctly in the MCP Server form
+ - Make sure the model you are using has tools enabled
+ - Restart Jan
+- The open source model I picked won't use the MCPs I enabled.
+ - Make sure the model you are using has tools enabled
+ - Lots of open source models are not designed to use tools or simply don't work well with them, so you may need to try a different model
+ - The model you have selected might be good at tool calling but it is possible that it does not support images, effectively making it unsuitable for some tools that take screenshots of a website like the Browser MCP
+
+## Future Potential
+
+This integration is the foundation for creating more capable and context-aware AI assistants within Jan. The
+long-term goal is to enable more sophisticated workflows that make use of your local environment securely as
+well as your favorite tools.
+
+For example, an AI could cross-reference information between a local document and a remote API, or use a
+local script toanalyze data and then summarize the findings, all orchestrated through Jan's interface. As
+the MCP ecosystem grows, so will the potential applications within Jan.
diff --git a/website/src/content/docs/jan/privacy-policy.mdx b/website/src/content/docs/jan/privacy-policy.mdx
new file mode 100644
index 000000000..525e10091
--- /dev/null
+++ b/website/src/content/docs/jan/privacy-policy.mdx
@@ -0,0 +1,125 @@
+---
+title: Jan Privacy Policy
+description: Jan's data collection practices, privacy measures, and your rights. Learn how we protect your data and maintain transparency.
+---
+
+# Privacy Policy
+
+
+ Last Updated: January 16, 2025
+
+
+## Introduction
+
+We are committed to protecting your privacy and ensuring you have control over your data. This Privacy Policy outlines what information Menlo Research Pte Ltd (the "Company") collects from users of the Jan desktop app and website (the "Services"), how the Company uses that information, and the measures the Company takes to safeguard that information.
+
+## 1. Data Collection and Consent
+
+### Explicit Consent
+
+The Company does not collect any data until you explicitly allow tracking.
+
+### Tracking Preferences
+
+Upon first launching the Jan desktop app or visiting the website, you will be prompted to set your tracking preferences. These preferences can be modified at any time via the app's Settings menu or the website's Privacy Settings.
+
+### Legal Basis
+
+Pursuant to the European Union's General Data Protection Regulation (EU) 2016/679 (the "GDPR"), the Company processes data based on your explicit consent (GDPR Article 6(1)(a)). This means:
+
+- The Company only processes your data after receiving clear, affirmative consent from you.
+- You may withdraw your consent at any time through the app's Settings menu or the website's Privacy Settings.
+- If you withdraw your consent, the Company will stop optional data collection from the effective date of withdrawal.
+- Your withdrawal of consent will not affect the lawfulness of processing before its withdrawal.
+
+## 2. Data We Do Not Collect
+
+Regardless of your analytics permissions, the Company does not collect the following:
+
+- Chat History: Your conversations with the Jan app are private and inaccessible to the Company.
+- Chat Settings: Your personalized settings remain solely with you.
+- Language Models: The specific language models you use are not tracked.
+
+## 3. Uses of Information
+
+To build a reliable and user-friendly product offering, understanding how the Jan app is used is essential. If you permit tracking, the Company collects product analytics data to:
+
+- Improve User Experience: Enhance app functionality based on usage patterns; and
+- Measure Engagement: Assess active users and retention rates to ensure ongoing value.
+
+## 4. Product Analytics
+
+### Data Collected
+
+When you opt-in to tracking, we collect the following anonymous data:
+
+- Active Users: Number of daily active users to gauge engagement.
+- Retention Rates: Track if users continue to find value in the Jan app over time.
+
+### Data Anonymity
+
+- User ID: Analytics data is tied to a randomly generated user ID, ensuring no link to your personal identity.
+- Privacy Assurance: Your chat history and personal data are not tracked or linked to your usage data.
+
+## 5. What We Do Not Track
+
+Even with analytics permissions granted, the Company does not track the following:
+
+- Conversations: Your interactions with the Jan app remain private.
+- Files: The Company does not scan, upload, or view your files.
+- Personal Identity: The Company does not collect personally identifiable information about users.
+- Prompts: Your prompts and prompt templates are not monitored.
+- Conversation Metrics: The Company does not track context length or conversation length.
+- Model Usage: The specific models you use or their types are not tracked.
+- Storage: You retain full control over storing your files and logs, and your privacy is prioritized.
+
+## 6. Using Cloud Models
+
+The Jan app allows you to connect to cloud-based model APIs (e.g. GPT, Claude models).
+
+- Data Handling: The API provider processes your messages directly; the Jan app does not access or store these messages.
+- Local Models: Choosing local models ensures all data remains on your device, with no external access.
+
+## 7. Data Storage and Processing
+
+### Analytics Provider
+
+The Company uses PostHog EU for analytics, which ensures all data is processed within the European Union.
+
+### Data Security
+
+- Encryption: All data transfers are encrypted using Transport Layer Security (TLS) to ensure secure transmission.
+- Storage: PostHog securely manages the data the Company collects. For more information, please refer to PostHog's GDPR documentation.
+
+## 8. Data Retention
+
+- Retention Period: The Company retains analytics data for up to 12 months unless otherwise required to comply with any applicable legal requirements.
+- Deletion Requests: If you wish to request the deletion of your analytics data, you may do so by sending a written request to hello@jan.ai.
+
+## 9. Your Rights and Choices
+
+- Access and Control: You may access, modify, or delete your tracking preferences at any time through the Jan app or website settings.
+- Data Requests: If you have any requests related to your data, please address them to hello@jan.ai.
+
+## 10. Children's Privacy
+
+Our Services are not targeted at children under the age of 13. The Company does not knowingly collect data from children under the age of 13. If the Company becomes aware that data of persons under the age of 13 has been collected without verifiable parental consent, the Company will take appropriate actions to delete this information.
+
+## 11. Changes to the Privacy Policy
+
+The Company reserves the right, at its sole discretion, to update this Privacy Policy at any time to reflect changes in the practices or legal requirements of the Company. The Company will use reasonable efforts to notify you of any significant changes via app notifications, the website, or email. Your continued use of the Services following such updates means you accept those changes.
+
+## 12. Cookies and Tracking Technologies
+
+Our website utilizes cookies to:
+
+- Enhance user experience; and
+- Measure website traffic and usage patterns.
+
+Most browsers allow you to remove or manage cookie functions and adjust your privacy and security preferences.
+
+For more details, please refer to our Cookie Policy.
+
+## 13. Contact Us
+
+For any questions or concerns about this Privacy Policy or our data practices, please contact hello@jan.ai.
\ No newline at end of file
diff --git a/website/src/content/docs/jan/privacy.mdx b/website/src/content/docs/jan/privacy.mdx
new file mode 100644
index 000000000..08a91e633
--- /dev/null
+++ b/website/src/content/docs/jan/privacy.mdx
@@ -0,0 +1,77 @@
+---
+title: Jan's Privacy Approach
+description: Jan is an app that allows you to own your AI. We prioritize your control over your data and explain what data we collect and why.
+keywords:
+ [
+ Jan AI,
+ Jan,
+ local AI,
+ private AI,
+ conversational AI,
+ no-subscription fee,
+ large language model,
+ about Jan,
+ desktop application,
+ thinking machine,
+ jan vision,
+ ]
+---
+
+import { Aside } from '@astrojs/starlight/components';
+
+---
+
+import { Aside } from '@astrojs/starlight/components';
+
+# Privacy
+
+Jan is your AI. Period. Here's what we do with data.
+
+
+
+
+
+You'll choose tracking preferences at first launch. Change them anytime in Settings.
+
+Jan will **never** peek at your chats, settings, or model choices. Not even if you ask nicely.
+
+## Data We Track (With Permission)
+
+We track basic app usage to improve Jan. That's it.
+
+### Product Analytics
+
+When allowed, we count:
+
+- **Active Users**: Daily Jan-thusiasts
+- **Retention**: Who sticks around
+
+Everything's tied to a random ID - not you. Your chats stay yours.
+
+## What We Don't Track
+
+
+
+- No chat snooping
+- No file scanning
+- No identity tracking
+- No prompt logging
+- No conversation monitoring
+- No model tracking
+
+Your private stuff stays private.
+
+## Cloud Model Use
+
+Cloud models (like GPT, Claude) need to see your messages to work. That's between you and them - Jan just makes
+the introduction. Local models keep everything at home where the neighbors can't gossip.
+
+## Data Storage
+[PostHog EU](https://posthog.com/eu) handles our analytics. All EU-based, GDPR-compliant, properly buttoned
+up. Details in their [GDPR docs](https://posthog.com/docs/privacy/gdpr-compliance).
diff --git a/website/src/content/docs/jan/remote-models/anthropic.mdx b/website/src/content/docs/jan/remote-models/anthropic.mdx
new file mode 100644
index 000000000..4b44f7a02
--- /dev/null
+++ b/website/src/content/docs/jan/remote-models/anthropic.mdx
@@ -0,0 +1,81 @@
+---
+title: Anthropic
+description: Learn how to integrate Anthropic with Jan for enhanced functionality.
+keywords:
+ [
+ Anthropic API,
+ Jan,
+ Jan AI,
+ ChatGPT alternative,
+ conversational AI,
+ large language model,
+ integration,
+ Anthropic integration,
+ API integration
+ ]
+---
+
+
+# Anthropic
+
+Jan supports all of [Anthropic's models](https://anthropic.com/) via API integration, allowing
+you to chat with Claude's latest Opus, Sonnet and Haiku models.
+
+## Integrate Anthropic API with Jan
+
+
+
+### Step 1: Get Your API Key
+1. Visit [Anthropic Console](https://console.anthropic.com/settings/keys) and sign in
+2. Create & copy a new API key or copy your existing one
+
+
+
+### Step 2: Configure Jan
+
+1. Navigate to **Settings** ()
+2. Under **Model Providers**, select **Anthropic**
+3. Insert your **API Key**
+
+
+
+
+
+### Step 3: Start Using Anthropic's Models
+
+1. In any existing **Chat** or create a new one
+2. Select an Anthropic model from **model selector**
+3. Start chatting
+
+
+
+## Available Anthropic Models
+
+Jan automatically includes Anthropic's available models. In case you want to use a specific Anthropic model
+that you cannot find in **Jan**, follow instructions in [Add Cloud Models](/docs/manage-models#add-models-1):
+- See list of available models in [Anthropic Models](https://docs.anthropic.com/claude/docs/models-overview).
+- The `id` property must match the model name in the list. For example, `claude-opus-4@20250514`, `claude-sonnet-4@20250514`, or `claude-3-5-haiku@20241022`.
+
+## Troubleshooting
+
+Common issues and solutions:
+
+**1. API Key Issues**
+- Verify your API key is correct and not expired
+- Check if you have billing set up on your Anthropic account
+- Ensure you have access to the model you're trying to use
+
+**2. Connection Problems**
+- Check your internet connection
+- Verify Anthropic's system status
+- Look for error messages in [Jan's logs](/docs/troubleshooting#how-to-get-error-logs)
+
+**3. Model Unavailable**
+- Confirm your API key has access to the model
+- Check if you're using the correct model ID
+- Verify your Anthropic account has the necessary permissions
+
+Need more help? Join our [Discord community](https://discord.gg/FTk2MvZwJH) or check the
+[Anthropic's documentation](https://docs.anthropic.com/claude/docs).
diff --git a/website/src/content/docs/jan/remote-models/cohere.mdx b/website/src/content/docs/jan/remote-models/cohere.mdx
new file mode 100644
index 000000000..5eee7cb67
--- /dev/null
+++ b/website/src/content/docs/jan/remote-models/cohere.mdx
@@ -0,0 +1,79 @@
+---
+title: Cohere
+description: Learn how to integrate Cohere with Jan for enhanced functionality.
+keywords:
+ [
+ Cohere API,
+ Jan,
+ Jan AI,
+ ChatGPT alternative,
+ conversational AI,
+ large language model,
+ integration,
+ Cohere integration,
+ API integration
+ ]
+---
+
+
+# Cohere
+
+Jan supports [Cohere](https://cohere.com/) API integration, allowing you to use Cohere's
+models (Command, Command-R and more) through Jan's interface.
+
+## Integrate Cohere API with Jan
+
+
+
+### Step 1: Get Your API Key
+1. Visit [Cohere Dashboard](https://dashboard.cohere.com/api-keys) and sign in
+2. Create a new API key and/or copy your existing one
+
+
+
+### Step 2: Configure Jan
+
+1. Navigate to **Settings** ()
+2. Under **Model Providers**, select **Cohere**
+3. Insert your **API Key**
+
+
+
+
+
+### Step 3: Start Using Cohere's Models
+
+1. Jump into any existing **Chat** or create a new one
+2. Select a Cohere model from **model selector** options
+3. Start chatting
+
+
+## Available Cohere Models
+
+Jan automatically includes Cohere's available models. In case you want to use a specific
+Cohere model that you cannot find in **Jan**, follow instructions in [Add Cloud Models](/docs/manage-models):
+- See list of available models in [Cohere Documentation](https://docs.cohere.com/v2/docs/models).
+- The `id` property must match the model name in the list. For example, `command-nightly` or `command-light`.
+
+## Troubleshooting
+
+Common issues and solutions:
+
+**1. API Key Issues**
+- Verify your API key is correct and not expired
+- Check if you have billing set up on your Cohere account
+- Ensure you have access to the model you're trying to use
+
+**2. Connection Problems**
+- Check your internet connection
+- Verify Cohere's [system status](https://status.cohere.com/)
+- Look for error messages in [Jan's logs](/docs/troubleshooting#how-to-get-error-logs)
+
+**3. Model Unavailable**
+- Confirm your API key has access to the model
+- Check if you're using the correct model ID
+- Verify your Cohere account has the necessary permissions
+
+Need more help? Join our [Discord community](https://discord.gg/FTk2MvZwJH) or check the [Cohere documentation](https://docs.cohere.com).
diff --git a/website/src/content/docs/jan/remote-models/google.mdx b/website/src/content/docs/jan/remote-models/google.mdx
new file mode 100644
index 000000000..ad80bacd2
--- /dev/null
+++ b/website/src/content/docs/jan/remote-models/google.mdx
@@ -0,0 +1,77 @@
+---
+title: Google
+description: Learn how to integrate Google with Jan for enhanced functionality.
+keywords:
+ [
+ Anthropic API,
+ Jan,
+ Jan AI,
+ ChatGPT alternative,
+ conversational AI,
+ large language model,
+ integration,
+ Anthropic integration,
+ API integration
+ ]
+---
+
+
+# Google
+
+Jan supports [Google](https://ai.google/get-started/our-models/) API integration, allowing you to use Google models (like Gemini series) through Jan's interface.
+
+## Integrate Google API with Jan
+
+
+### Step 1: Get Your API Key
+1. Visit [Google AI Studio](https://aistudio.google.com/app/apikey) and sign in
+2. Create & copy a new API key or copy your existing one
+
+
+
+### Step 2: Configure Jan
+
+1. Navigate to the **Settings** page ()
+2. Under **Model Providers**, select **Gemini**
+3. Insert your **API Key**
+
+
+
+
+
+### Step 3: Start Using Google's Models
+
+1. Got to any existing **Chat** or create a new one
+2. Select an Gemini model from **model selector**
+3. Start chatting
+
+
+## Available Google Models
+
+Jan automatically includes Google's available models like Gemini series. In case you want to use a specific
+Gemini model that you cannot find in **Jan**, follow instructions in [Add Cloud Models](/docs/manage-models#add-models-1):
+- See list of available models in [Google Models](https://ai.google.dev/gemini-api/docs/models/gemini).
+- The `id` property must match the model name in the list. For example, `gemini-1.5-pro` or `gemini-2.0-flash-lite-preview`.
+
+## Troubleshooting
+
+Common issues and solutions:
+
+**1. API Key Issues**
+- Verify your API key is correct and not expired
+- Check if you have billing set up on your Google account
+- Ensure you have access to the model you're trying to use
+
+**2. Connection Problems**
+- Check your internet connection
+- Verify [Gemini's system status](https://www.google.com/appsstatus/dashboard/)
+- Look for error messages in [Jan's logs](/docs/troubleshooting#how-to-get-error-logs)
+
+**3. Model Unavailable**
+- Confirm your API key has access to the model
+- Check if you're using the correct model ID
+- Verify your Google account has the necessary permissions
+
+Need more help? Join our [Discord community](https://discord.gg/FTk2MvZwJH).
diff --git a/website/src/content/docs/jan/remote-models/groq.mdx b/website/src/content/docs/jan/remote-models/groq.mdx
new file mode 100644
index 000000000..0691b109a
--- /dev/null
+++ b/website/src/content/docs/jan/remote-models/groq.mdx
@@ -0,0 +1,78 @@
+---
+title: Groq API
+description: Learn how to integrate Groq API with Jan for enhanced functionality.
+keywords:
+ [
+ Groq API,
+ Jan,
+ Jan AI,
+ ChatGPT alternative,
+ conversational AI,
+ large language model,
+ integration,
+ Groq integration,
+ API integration
+ ]
+---
+
+
+# Groq
+
+Jan supports [Groq](https://groq.com/) API integration, allowing you to use Groq's high-performance LLM models (LLaMA 2, Mixtral and more) through Jan's interface.
+
+## Integrate Groq API with Jan
+
+
+### Step 1: Get Your API Key
+1. Visit [Groq Console](https://console.groq.com/keys) and sign in
+2. Create & copy a new API key or copy your existing one
+
+
+
+### Step 2: Configure Jan
+
+1. Navigate to the **Settings** ()
+2. Under **Model Providers**, select **Groq**
+3. Insert your **API Key**
+
+
+
+
+
+
+### Step 3: Start Using Groq's Models
+
+1. Jump into any existing **Chat** or create a new one
+2. Select a Groq model from **model selector**
+3. Start chatting
+
+
+## Available Models Through Groq
+
+Jan automatically includes Groq's available models. In case you want to use a specific Groq model that
+you cannot find in **Jan**, follow the instructions in the [Add Cloud Models](/docs/manage-models#add-models-1):
+- See list of available models in [Groq Documentation](https://console.groq.com/docs/models).
+- The `id` property must match the model name in the list. For example, if you want to use Llama3.3 70B, you must set the `id` property to `llama-3.3-70b-versatile`.
+
+## Troubleshooting
+
+Common issues and solutions:
+
+**1. API Key Issues**
+- Verify your API key is correct and not expired
+- Check if you have billing set up on your Groq account
+- Ensure you have access to the model you're trying to use
+
+**2. Connection Problems**
+- Check your internet connection
+- Verify Groq's system status
+- Look for error messages in [Jan's logs](/docs/troubleshooting#how-to-get-error-logs)
+
+**3. Model Unavailable**
+- Confirm your API key has access to the model
+- Check if you're using the correct model ID
+- Verify your Groq account has the necessary permissions
+
+Need more help? Join our [Discord community](https://discord.gg/FTk2MvZwJH) or check the [Groq documentation](https://console.groq.com/docs).
diff --git a/website/src/content/docs/jan/remote-models/mistralai.mdx b/website/src/content/docs/jan/remote-models/mistralai.mdx
new file mode 100644
index 000000000..9baa55a2a
--- /dev/null
+++ b/website/src/content/docs/jan/remote-models/mistralai.mdx
@@ -0,0 +1,81 @@
+---
+title: Mistral AI API
+description: A step-by-step guide on integrating Jan with Mistral AI.
+keywords:
+ [
+ Jan,
+ Customizable Intelligence, LLM,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ Mistral integration,
+ ]
+---
+
+
+# Mistral AI
+
+Jan supports all models available via the [Mistral AI](https://mistral.ai/) API, allowing you to use Mistral's
+powerful models (Mistral Large, Mistral Medium, Mistral Small and more) through Jan's interface.
+
+## Integrate Mistral AI with Jan
+
+
+
+### Step 1: Get Your API Key
+1. Visit the [Mistral AI Platform](https://console.mistral.ai/api-keys/) and sign in
+2. Create & copy a new API key or copy your existing one
+
+
+
+### Step 2: Configure Jan
+
+1. Navigate to the **Settings** page ()
+2. Under **Model Providers**, select **Mistral AI**
+3. Insert your **API Key**
+
+
+
+
+
+### Step 3: Start Using Mistral's Models
+
+1. Open any existing **Chat** or create a new one
+2. Select a Mistral model from **model selector**
+3. Start chatting
+
+
+## Available Mistral Models
+
+Jan automatically includes Mistral's available models. In case you want to use a specific Mistral model
+that you cannot find in **Jan**, follow the instructions in [Add Cloud Models](/docs/manage-models#add-models-1):
+- See list of available models in [Mistral AI Documentation](https://docs.mistral.ai/platform/endpoints).
+- The `id` property must match the model name in the list. For example, if you want to use
+Mistral Large, you must set the `id` property to `mistral-large-latest`
+
+## Troubleshooting
+
+Common issues and solutions:
+
+**1. API Key Issues**
+- Verify your API key is correct and not expired
+- Check if you have billing set up on your Mistral AI account
+- Ensure you have access to the model you're trying to use
+
+**2. Connection Problems**
+- Check your internet connection
+- Verify Mistral AI's system status
+- Look for error messages in [Jan's logs](/docs/troubleshooting#how-to-get-error-logs)
+
+**3. Model Unavailable**
+- Confirm your API key has access to the model
+- Check if you're using the correct model ID
+- Verify your Mistral AI account has the necessary permissions
+
+Need more help? Join our [Discord community](https://discord.gg/FTk2MvZwJH) or check the [Mistral AI documentation](https://docs.mistral.ai/).
diff --git a/website/src/content/docs/jan/remote-models/openai.mdx b/website/src/content/docs/jan/remote-models/openai.mdx
new file mode 100644
index 000000000..6cf8f5082
--- /dev/null
+++ b/website/src/content/docs/jan/remote-models/openai.mdx
@@ -0,0 +1,85 @@
+---
+title: OpenAI API
+description: A step-by-step guide on integrating Jan with Azure OpenAI.
+keywords:
+ [
+ Jan,
+ Customizable Intelligence, LLM,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ integration,
+ Azure OpenAI Service,
+ ]
+---
+
+# OpenAI
+
+Jan supports most [OpenAI](https://openai.com/) as well as the many OpenAI-compatible APIs out there,
+allowing you to use all models from OpenAI (GPT-4o, o3 and even those from Together AI, DeepSeek, Fireworks
+and more) through Jan's interface.
+
+## Integrate OpenAI API with Jan
+
+
+
+### Step 1: Get Your API Key
+1. Visit the [OpenAI Platform](https://platform.openai.com/api-keys) and sign in
+2. Create & copy a new API key or copy your existing one
+
+
+
+### Step 2: Configure Jan
+
+1. Navigate to the Settings page ()
+2. Under Remote Engines, select OpenAI
+3. Insert your API Key
+
+
+
+
+
+### Step 3: Start Using OpenAI's Models
+
+In any existing Threads or create a new one
+Select an OpenAI model from model selector
+Start chatting
+
+
+
+## Available OpenAI Models
+
+Jan automatically includes popular OpenAI models. In case you want to use a specific model that you
+cannot find in Jan, follow instructions in [Add Cloud Models](/docs/manage-models#add-models-1):
+- See list of available models in [OpenAI Platform](https://platform.openai.com/docs/models/overview).
+- The id property must match the model name in the list. For example, if you want to use the
+[GPT-4.5](https://platform.openai.com/docs/models/), you must set the id property
+to respective one.
+
+## Troubleshooting
+
+Common issues and solutions:
+
+1. API Key Issues
+- Verify your API key is correct and not expired
+- Check if you have billing set up on your OpenAI account
+- Ensure you have access to the model you're trying to use
+
+2. Connection Problems
+- Check your internet connection
+- Verify OpenAI's [system status](https://status.openai.com)
+- Look for error messages in [Jan's logs](/docs/troubleshooting#how-to-get-error-logs)
+
+3. Model Unavailable
+- Confirm your API key has access to the model
+- Check if you're using the correct model ID
+- Verify your OpenAI account has the necessary permissions
+
+Need more help? Join our [Discord community](https://discord.gg/FTk2MvZwJH) or check the
+[OpenAI documentation](https://platform.openai.com/docs).
diff --git a/website/src/content/docs/jan/remote-models/openrouter.mdx b/website/src/content/docs/jan/remote-models/openrouter.mdx
new file mode 100644
index 000000000..28e74d82f
--- /dev/null
+++ b/website/src/content/docs/jan/remote-models/openrouter.mdx
@@ -0,0 +1,97 @@
+---
+title: OpenRouter
+description: A step-by-step guide on integrating Jan with OpenRouter.
+keywords:
+ [
+ Jan,
+ Customizable Intelligence, LLM,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ OpenRouter integration,
+ OpenRouter,
+ ]
+---
+
+import { Aside } from '@astrojs/starlight/components';
+
+# OpenRouter
+
+## Integrate OpenRouter with Jan
+
+[OpenRouter](https://openrouter.ai/) is a tool that gathers AI model APIs and provides access to all
+via a unified API. Developers can use the API to interact with LLMs, generative image models, and
+even models that generate 3D objects, all with a competitive pricing.
+
+Jan supports the OpenRouter API, allowing you to use models from various providers (Anthropic, Google,
+Meta and more) and helping you avoid having to get an API from all of your favorite ones.
+
+OpenRouter even offers a few free models! 🙌
+
+## Integrate OpenRouter with Jan
+
+
+
+### Step 1: Get Your API Key
+1. Visit [OpenRouter](https://openrouter.ai/keys) and sign in
+2. Create & copy a new API key or copy your existing one
+
+
+
+### Step 2: Configure Jan
+
+1. Navigate to the **Settings** page ()
+2. Under **Model Providers**, select **OpenRouter**
+3. Insert your **API Key**
+
+
+
+
+
+### Step 3: Start Using OpenRouter Models
+
+1. Pick any existing **Chat** or create a new one
+2. Select any model from **model selector** under OpenRouter
+3. Start chatting
+
+
+## Available Models Through OpenRouter
+
+Jan automatically use your default OpenRouter's available models. For custom configurations:
+
+**Model Field Settings:**
+- Leave empty to use your account's default model
+- Specify a model using the format: `organization/model-name`
+- Available options can be found in [OpenRouter's Model Reference](https://openrouter.ai/models)
+
+**Examples of Model IDs:**
+- Claude 4 Opus: `anthropic/claude-opus-4`
+- Google Gemini 2.5 Pro: `google/gemini-2.5-pro-preview`
+- DeepSeek R1 Latest: `deepseek/deepseek-r1-0528`
+
+## Troubleshooting
+
+Common issues and solutions:
+
+**1. API Key Issues**
+- Verify your API key is correct and not expired
+- Check if you have sufficient credits in your OpenRouter account
+- Ensure you have access to the model you're trying to use
+
+**2. Connection Problems**
+- Check your internet connection
+- Verify OpenRouter's [system status](https://status.openrouter.ai)
+- Look for error messages in [Jan's logs](/docs/troubleshooting#how-to-get-error-logs)
+
+**3. Model Unavailable**
+- Confirm the model is currently available on OpenRouter
+- Check if you're using the correct model ID format
+- Verify the model provider is currently operational
+
+Need more help? Join our [Discord community](https://discord.gg/FTk2MvZwJH) or check the [OpenRouter documentation](https://openrouter.ai/docs).
diff --git a/website/src/content/docs/jan/settings.mdx b/website/src/content/docs/jan/settings.mdx
new file mode 100644
index 000000000..dc9090551
--- /dev/null
+++ b/website/src/content/docs/jan/settings.mdx
@@ -0,0 +1,230 @@
+---
+title: Settings
+description: Explore how to adjust Jan's settings to suit your specific requirements.
+keywords:
+ [
+ Jan,
+ Customizable Intelligence, LLM,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ Advanced Settings,
+ HTTPS Proxy,
+ SSL,
+ settings,
+ Jan settings,
+ ]
+---
+
+
+
+# Settings
+
+To access the **Settings**, click icon in the bottom left corner of Jan.
+
+## Model Management
+
+Manage your installed AI models in **Settings** > **Model Providers**:
+
+### Import Models
+- **From Hugging Face:**
+ - Enter a model's Hugging Face ID (e.g., `org/model_name_or_id`) in the Hub's search bar.
+ - **Note:** Some models require a Hugging Face Access Token. Enter your token in **Settings > Model Providers > Hugging Face Access Token**.
+- **From Local Files:**
+ - Click **Import Model** and select your GGUF files.
+
+### Remove Models
+
+- Click the trash icon next to the **Start** button and then click **Delete**.
+
+### Start Models
+
+1. Open a new chat and select the model you want to start.
+2. Click the **Start** button on the **Settings > Model Providers**
+
+### Hugging Face Access Token
+To download models from Hugging Face that require authentication, for example, like the llama models from meta:
+1. Get your token from [Hugging Face Tokens](https://huggingface.co/docs/hub/en/security-tokens)
+2. Enter it in **Settings > Model Providers > Hugging Face**.
+
+## Model Settings (Gear Icon)
+
+
+
+Click the gear icon next to a model to configure advanced settings:
+- **Context Size**: Maximum prompt context length
+- **GPU Layers**: Number of model layers to offload to GPU. If you have an NVIDIA GPU and notice that your model won't fully load in it, you can reduce this value to load smaller parts of the model and try again.
+- **Temperature**: Controls randomness (higher = more random)
+- **Top K**: Limits token selection to K most likely next tokens (smaller K = more focused responses)
+- **Top P**: Limits token selection to tokens comprising P probability mass (smaller P = more focused responses)
+- **Min P**: Sets a minimum threshold for words the model can select (higher values filter out less likely words)
+- **Repeat Last N**: Determines how many recent words the model checks to avoid repetition
+- **Repeat Penalty**: Controls how strongly the model avoids repeating phrases (higher values reduce repetition)
+- **Presence Penalty**: Discourages reusing words that already appeared in the text (helps with variety)
+
+_See [Model Parameters](/docs/model-parameters) for a more detailed explanation._
+
+
+## Hardware
+
+Monitor and manage system resources at **Settings > Hardware**:
+- **CPU, RAM, GPU**: View usage and specs
+- **GPU Acceleration**: Enable/disable and configure GPU settings
+
+
+
+
+
+## Preferences
+
+### Appearance & Theme
+Control the visual theme of Jan's interface with any color combo you'd like. You can also control the color use in the code blocks.
+
+
+
+
+
+### Spell Check
+Jan includes a built-in spell check feature to help catch typing errors in your messages.
+
+
+
+
+
+
+## Privacy
+At **Settings** () > **Privacy**, you can control analytics & logs in Jan:
+
+### Analytics
+Jan is built with privacy at its core. By default, no data is collected. Everything stays local on your device.
+You can help improve Jan by sharing anonymous usage data:
+1. Toggle on **Analytics** to share anonymous data
+2. You can change this setting at any time
+
+
+
+
+
+
+
+### Log Management
+
+**1. View Logs**
+- Logs are stored at:
+ - App log: `~/Library/Application\ Support/jan/data/logs/app.log`
+ - Cortex log: `~/Library/Application\ Support/jan/data/logs/cortex.log`
+- To open logs from Jan's interface: at **Logs**, click icon to open App Logs & Cortex Logs:
+
+
+
+
+
+**2. Clear Logs**
+
+Jan retains your logs for only **24 hours**. To remove all logs from Jan, at **Clear Logs**, click the **Clear** button:
+
+
+
+
+
+
+
+
+### Jan Data Folder
+Jan stores your data locally in your own filesystem in a universal file format. See detailed [Jan Folder Structure](docs/data-folder#folder-structure).
+
+**1. Open Jan Data Folder**
+
+At **Jan Data Folder**, click icon to open Jan application's folder:
+
+
+
+
+**2. Edit Jan Data Folder**
+
+1. At **Jan Data Folder**, click icon to edit Jan application's folder
+2. Choose a new directory & click **Select**, make sure the new folder is empty
+3. Confirmation pop-up shows up:
+
+> Are you sure you want to relocate Jan Data Folder to `new directory`?
+Jan Data Folder will be duplicated into the new location while the original folder remains intact.
+An app restart will be required afterward.
+
+4. Click **Yes, Proceed**
+
+
+
+
+
+### HTTPs Proxy
+HTTPS Proxy encrypts data between your browser and the internet, making it hard for outsiders to intercept or read. It also helps you maintain your privacy and security while bypassing regional restrictions on the internet.
+
+
+
+1. **Enable** the proxy toggle
+2. Enter your proxy server details in the following format:
+```
+http://:@:
+```
+Where:
+- ``: Your proxy username (if authentication is required)
+- ``: Your proxy password (if authentication is required)
+- ``: Your proxy server's domain name or IP address
+- ``: The port number for the proxy server
+
+
+
+
+
+**Ignore SSL Certificates**
+
+This setting allows Jan to accept self-signed or unverified SSL certificates. This may be necessary when:
+- Working with corporate proxies using internal certificates
+- Testing in development environments
+- Connecting through specialized network security setups
+
+
+
+
+
+
+
+
+### Factory Reset
+Reset to Factory Settings restores Jan to its initial state by erasing all user data, including downloaded models and chat history. This action is irreversible and should only be used as a last resort when experiencing serious application issues.
+
+
+
+Only use factory reset if:
+- The application is corrupted
+- You're experiencing persistent technical issues that other solutions haven't fixed
+- You want to completely start fresh with a clean installation
+
+To begin the process:
+1. At **Reset to Factory Settings**, click **Reset** button
+
+
+
+2. In the confirmation dialog:
+- Type the word **RESET** to confirm
+- Optionally check **Keep the current app data location** to maintain the same data folder
+- Click **Reset Now**
+3. App restart is required upon confirmation
+
+
+
diff --git a/website/src/content/docs/jan/threads.mdx b/website/src/content/docs/jan/threads.mdx
new file mode 100644
index 000000000..0c02a99e0
--- /dev/null
+++ b/website/src/content/docs/jan/threads.mdx
@@ -0,0 +1,143 @@
+---
+title: Start Chatting
+description: Download models and manage your conversations with AI models locally.
+keywords:
+ [
+ Jan,
+ local AI,
+ LLM,
+ chat,
+ threads,
+ models,
+ download,
+ installation,
+ conversations,
+ ]
+---
+
+
+# Start Chatting
+
+
+
+### Step 1: Install Jan
+
+1. [Download Jan](/download)
+2. Install the app ([Mac](/docs/desktop/mac), [Windows](/docs/desktop/windows), [Linux](/docs/desktop/linux))
+3. Launch Jan
+
+### Step 2: Download a Model
+
+Jan requires a model to chat. Download one from the Hub:
+
+1. Go to the **Hub Tab**
+2. Browse available models (must be GGUF format)
+3. Select one matching your hardware specs
+4. Click **Download**
+
+
+
+
+
+**HuggingFace models:** Some require an access token. Add yours in **Settings > Model Providers > Llama.cpp > Hugging Face Access Token**.
+
+
+
+### Step 3: Enable GPU Acceleration (Optional)
+
+For Windows/Linux with compatible graphics cards:
+
+1. Go to **() Settings** > **Hardware**
+2. Toggle **GPUs** to ON
+
+
+
+
+
+### Step 4: Start Chatting
+
+1. Click **New Chat** () icon
+2. Select your model in the input field dropdown
+3. Type your message and start chatting
+
+
+
+
+
+## Managing Conversations
+
+Jan organizes conversations into threads for easy tracking and revisiting.
+
+### View Chat History
+
+- **Left sidebar** shows all conversations
+- Click any chat to open the full conversation
+- **Favorites**: Pin important threads for quick access
+- **Recents**: Access recently used threads
+
+
+
+### Edit Chat Titles
+
+1. Hover over a conversation in the sidebar
+2. Click **three dots** () icon
+3. Click **Rename**
+4. Enter new title and save
+
+
+
+### Delete Threads
+
+
+
+**Single thread:**
+1. Hover over thread in sidebar
+2. Click **three dots** () icon
+3. Click **Delete**
+
+**All threads:**
+1. Hover over `Recents` category
+2. Click **three dots** () icon
+3. Select **Delete All**
+
+## Advanced Features
+
+### Custom Assistant Instructions
+
+Customize how models respond:
+
+1. Use the assistant dropdown in the input field
+2. Or go to the **Assistant tab** to create custom instructions
+3. Instructions work across all models
+
+
+
+
+
+### Model Parameters
+
+Fine-tune model behavior:
+- Click the **Gear icon** next to your model
+- Adjust parameters in **Assistant Settings**
+- Switch models via the **model selector**
+
+
+
+### Connect Cloud Models (Optional)
+
+Connect to OpenAI, Anthropic, Groq, Mistral, and others:
+
+1. Open any thread
+2. Select a cloud model from the dropdown
+3. Click the **Gear icon** beside the provider
+4. Add your API key (ensure sufficient credits)
+
+
+
+For detailed setup, see [Remote APIs](/docs/remote-models/openai).
diff --git a/website/src/content/docs/jan/troubleshooting.mdx b/website/src/content/docs/jan/troubleshooting.mdx
new file mode 100644
index 000000000..58e207814
--- /dev/null
+++ b/website/src/content/docs/jan/troubleshooting.mdx
@@ -0,0 +1,399 @@
+---
+title: Troubleshooting
+description: Explore solutions for common issues and optimize Jan's performance with this comprehensive troubleshooting guide.
+keywords:
+ [
+ Jan,
+ Customizable Intelligence, LLM,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ troubleshooting,
+ error codes,
+ broken build,
+ something amiss,
+ unexpected token,
+ undefined issue,
+ permission denied,
+ ]
+---
+
+
+
+# Troubleshooting
+
+## How to Get Error Logs
+
+Error logs are essential for troubleshooting issues and getting help from Jan team. To get error logs from Jan, follow the steps below:
+
+#### Through Jan Interface
+
+1. Open **System Monitor** in the footer
+2. Choose **App Log**
+
+
+
+
+
+#### Through Terminal
+**Application Logs**
+```bash
+tail -n 50 ~/Library/Application\ Support/Jan/data/logs/app.log
+```
+**Server Logs**
+```bash
+tail -n 50 ~/Library/Application\ Support/Jan/data/logs/cortex.log
+```
+
+
+
+
+## Broken Build
+
+To resolve the issue where Jan is stuck in a broken build after installation:
+
+
+
+ 1. **Uninstall** Jan
+
+ 2. **Delete** Application Data, Cache, and User Data:
+
+ ```zsh
+ rm -rf ~/Library/Application\ Support/Jan
+ ```
+
+ 3. If you are using a version before `0.4.2`, you need to run the following commands:
+
+ ```zsh
+ ps aux | grep nitro
+ # Looks for processes like `nitro` and `nitro_arm_64`, and kill them one by one by process ID
+ kill -9
+ ```
+
+ 4. **Download** the [latest version of Jan](/download)
+
+
+
+ 1. **Uninstall** Jan, using the [Windows Control Panel](https://support.microsoft.com/en-us/windows/uninstall-or-remove-apps-and-programs-in-windows-4b55f974-2cc6-2d2b-d092-5905080eaf98)
+
+ 2. **Delete** Application Data, Cache, and User Data:
+
+ ```cmd
+ cd C:\Users\%USERNAME%\AppData\Roaming
+ rmdir /S Jan
+ ```
+
+ 3. If you are using a version before `0.4.2`, you need to run the following commands:
+
+ ```bash
+ # Find the process ID (PID) of the nitro process by filtering the list by process name
+ tasklist | findstr "nitro"
+ # Once you have the PID of the process you want to terminate, run the `taskkill`
+ taskkill /F /PID
+ ```
+
+ 4. **Download** the [latest version of Jan](/download)
+
+
+
+ 1. **Uninstall** Jan
+
+ Choose the appropriate method based on how you installed Jan:
+
+ **For Debian/Ubuntu:**
+ ```
+ sudo apt-get remove Jan
+ ```
+ **For Others:** Delete the Jan `.AppImage` file from your system
+
+ 2. Delete Application Data, Cache, and User Data:
+
+ ```bash
+ # Default dir
+ ~/.config/Jan
+ # Custom installation directory
+ $XDG_CONFIG_HOME = /home/username/custom_config/Jan
+ ```
+
+ 3. If you are using a version before `0.4.2`, you need to run the following commands:
+
+ ```zsh
+ ps aux | grep nitro
+ # Looks for processes like `nitro` and `nitro_arm_64`, and kill them one by one by process ID
+ kill -9
+ ```
+
+ 4. **Download** the [latest version of Jan](/download)
+
+
+
+
+Following these steps, you can cleanly uninstall and reinstall Jan, ensuring a smooth and error-free experience with the latest version.
+
+
+
+## Troubleshooting NVIDIA GPU
+To resolve issues when Jan does not utilize the NVIDIA GPU on Windows and Linux systems.
+
+
+
+### Step 1: Verify Hardware and System Requirements
+
+#### 1.1. Check GPU Detection
+First, verify that your system recognizes the NVIDIA GPU:
+**Windows:**
+- Right-click desktop → NVIDIA Control Panel
+- Or check Device Manager → Display Adapters
+**Linux:**
+```
+lspci | grep -i nvidia
+```
+#### 1.2. Install Required components
+**NVIDIA Driver:**
+1. Install [NVIDIA Driver](https://www.nvidia.com/en-us/drivers/) for your GPU (NVIDIA driver **470.63.01 or higher**).
+2. Verify installation:
+
+```
+nvidia-smi
+```
+Expected output should show your GPU model and driver version.
+
+**CUDA Toolkit:**
+1. Download and install [CUDA toolkit](https://developer.nvidia.com/cuda-downloads) (**CUDA 11.7 or higher**)
+2. Verify installation:
+
+```
+nvcc --version
+```
+**Linux Additional Requirements:**
+1. Required packages are installed:
+```
+sudo apt update
+sudo apt install gcc-11 g++-11 cpp-11
+```
+See [detailed instructions](https://gcc.gnu.org/projects/cxx-status.html#cxx17).
+
+2. Set up CUDA environment:
+```
+export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64
+```
+See [detailed instructions](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#post-installation-actions).
+
+
+
+### Step 2: Turn on GPU acceleration
+
+Jan manages GPU usage automatically:
+- Switches to GPU mode when supported
+- Automatically selects GPU with highest VRAM
+
+To verify GPU acceleration is turned on:
+1. Open **Settings** > **Hardware**
+2. Verify that **GPU Acceleration** is turned on
+3. Verify your selected GPU(s) are visible in **System Monitor** from Jan's footer
+
+
+
+
+
+
+### Step 3: GPU Settings Check
+
+1. Go to **Settings** > **General** > **Data Folder**
+2. Click on **Open Containing Folder**
+3. Open `settings.json` file
+
+Example `settings.json`:
+
+```
+{
+ "notify": true,
+ "run_mode": "gpu",
+ "nvidia_driver": {
+ "exist": true,
+ "version": "531.18"
+ },
+ "cuda": {
+ "exist": true,
+ "version": "12"
+ },
+ "gpus": [
+ {
+ "id": "0",
+ "vram": "12282"
+ },
+ {
+ "id": "1",
+ "vram": "6144"
+ },
+ {
+ "id": "2",
+ "vram": "6144"
+ }
+ ],
+ "gpu_highest_vram": "0"
+}
+```
+**Key Configuration Values:**
+- `run_mode`: Should be "gpu" for GPU acceleration
+- `nvidia_driver`: Shows driver status and version
+- `cuda`: Shows CUDA toolkit status and version
+- `gpus`: Lists available GPUs and their VRAM (in MB)
+- `gpu_highest_vram`: ID of GPU with most VRAM
+
+
+
+### Step 4: Restart Jan
+
+Restart Jan to make sure it works.
+
+
+
+### Tested Configurations
+These configurations have been verified to work with Jan's GPU acceleration. You can use them as reference points for your setup.
+
+**Bare Metal Installations**
+
+Windows 11 Pro (64-bit)
+| Component | Version/Model |
+|-----------|--------------|
+| GPU | NVIDIA GeForce RTX 4070Ti |
+| CUDA | 12.2 |
+| NVIDIA Driver | 531.18 |
+| OS | Windows 11 Pro 64-bit |
+| RAM | 32GB |
+
+Ubuntu 22.04 LTS
+| Component | Version/Model |
+|-----------|--------------|
+| GPU | NVIDIA GeForce RTX 4070Ti |
+| CUDA | 12.2 |
+| NVIDIA Driver | 545 |
+| OS | Ubuntu 22.04 LTS |
+
+**Virtual Machine Setups**
+
+Ubuntu on Proxmox VM
+| Component | Version/Model |
+|-----------|--------------|
+| GPU | NVIDIA GeForce GTX 1660Ti |
+| CUDA | 12.1 |
+| NVIDIA Driver | 535 |
+| OS | Ubuntu 20.04/18.04 LTS |
+| VM Type | Proxmox |
+
+**Performance Notes**
+- Bare metal installations provide better performance
+- VM setups require proper GPU passthrough configuration
+- Some laptop GPUs may have reduced performance
+- Hybrid graphics (Optimus) may need additional configuration
+
+## Permission Denied
+
+When running Jan, you might encounter the following error message:
+
+```
+Uncaught (in promise) Error: Error invoking layout-480796bff433a3a3.js:538 remote method 'installExtension':
+Error Package /Applications/Jan.app/Contents/Resources/app.asar.unpacked/pre-install/janhq-assistant-extension-1.0.0.tgz does not contain a valid manifest:
+Error EACCES: permission denied, mkdtemp '/Users/username/.npm/_cacache/tmp/ueCMn4'
+```
+
+Permission problems mainly cause this error during installation. To resolve this issue, follow these steps:
+
+1. Open your **Terminal**
+
+2. Execute the following command to change ownership of the `~/.npm` directory to the current user:
+
+```bash
+sudo chown -R $(whoami) ~/.npm
+```
+
+This command ensures that the necessary permissions are granted for Jan's installation.
+
+
+## "Failed to fetch" or "Something's Amiss" errors
+
+When you start a chat with a model and encounter a **Failed to Fetch** or **Something's Amiss** error, here are some possible solutions to resolve it:
+
+**1. Check System & Hardware Requirements**
+- Hardware dependencies: Ensure your device meets all [hardware requirements](docs/troubleshooting#step-1-verify-hardware-and-system-requirements)
+- OS: Ensure your operating system meets the minimum requirements ([Mac](/docs/desktop/mac#minimum-requirements), [Windows](/docs/desktop/windows#compatibility), [Linux](docs/desktop/linux#compatibility))
+- RAM: Choose models that use less than 80% of your available RAM
+ - For 8GB systems: Use models under 6GB
+ - For 16GB systems: Use models under 13GB
+
+**2. Check Model Parameters**
+- In **Engine Settings** in right sidebar, check your `ngl` ([number of GPU layers](/docs/models/model-parameters#engine-parameters)) setting to see if it's too high
+- Start with a lower NGL value and increase gradually based on your GPU memory
+
+**3. Port Conflicts**
+
+If you check your [app logs](/docs/troubleshooting#how-to-get-error-logs) & see "Bind address failed at 127.0.0.1:39291", check port availability:
+```
+# Mac
+netstat -an | grep 39291
+
+# Windows
+netstat -ano | find "39291"
+tasklist /fi "PID eq 39291"
+
+# Linux
+netstat -anpe | grep "39291"
+```
+
+
+Default Jan ports:
+- Jan and Cortex API Server: `1337`
+- Jan Documentation: `3001`
+
+**4. Factory Reset**
+
+A factory reset can resolve persistent issues by returning Jan to its original state. This will remove all custom settings, downloaded models, and chat history.
+1. Go to **Settings** > **Advanced Settings**
+2. At **Reset To Factory Settings**, click **Reset**
+
+
+
+**5. Try a clean installation**
+- Uninstall Jan & clean Jan data folders ([Mac](/docs/desktop/mac#uninstall-jan), [Windows](/docs/desktop/windows#uninstall-jan), [Linux](docs/desktop/linux#uninstall-jan))
+- Install the latest [stable release](/download)
+
+
+
+## OpenAI Unexpected Token Issue
+The "Unexpected token" error usually relates to OpenAI API authentication or regional restrictions.
+
+**Step 1: API Key Sepup**
+1. Get a valid API key from [OpenAI's developer platform](https://platform.openai.com/)
+2. Ensure the key has sufficient credits & appropriate permissions
+
+**Step 2: Regional Access**
+1. If you're in a region with restricted access, use a VPN service from a supported region
+2. Verify your network can reach OpenAI's API endpoints
+
+
+## Need Further Support?
+If you can't find what you need in our troubleshooting guide, feel free reach out to us for extra help:
+- **Copy** your [app logs](/docs/troubleshooting#how-to-get-error-logs)
+- Go to our [Discord](https://discord.com/invite/FTk2MvZwJH) & send it to **#🆘|jan-help** channel for further support.
+
+
+
diff --git a/website/src/content/docs/jan/tutorials/creative-writing.mdx b/website/src/content/docs/jan/tutorials/creative-writing.mdx
new file mode 100644
index 000000000..32f5e148a
--- /dev/null
+++ b/website/src/content/docs/jan/tutorials/creative-writing.mdx
@@ -0,0 +1,16 @@
+---
+title: Creative Writing
+description: Download models and manage your conversations with AI models locally.
+keywords:
+ [
+ Jan,
+ local AI,
+ LLM,
+ chat,
+ threads,
+ models,
+ download,
+ installation,
+ conversations,
+ ]
+---
diff --git a/website/src/content/docs/jan/tutorials/translation.mdx b/website/src/content/docs/jan/tutorials/translation.mdx
new file mode 100644
index 000000000..23f08c6cb
--- /dev/null
+++ b/website/src/content/docs/jan/tutorials/translation.mdx
@@ -0,0 +1,17 @@
+---
+title: Translation
+description: Download models and manage your conversations with AI models locally.
+keywords:
+ [
+ Jan,
+ local AI,
+ LLM,
+ chat,
+ threads,
+ models,
+ translation,
+ download,
+ installation,
+ conversations,
+ ]
+---
diff --git a/website/src/content/docs/local-server/api-server.mdx b/website/src/content/docs/local-server/api-server.mdx
new file mode 100644
index 000000000..88e60d145
--- /dev/null
+++ b/website/src/content/docs/local-server/api-server.mdx
@@ -0,0 +1,136 @@
+---
+title: Server Setup
+description: Learn how to run Jan's local API server.
+keywords:
+ [
+ Jan,
+ Customizable Intelligence, LLM,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ Jan Extensions,
+ Extensions,
+ ]
+---
+
+import { Aside } from '@astrojs/starlight/components';
+import { Steps } from '@astrojs/starlight/components';
+import { Tabs, TabItem } from '@astrojs/starlight/components';
+
+
+Configure and start Jan's built-in API server.
+
+## Prerequisites
+
+- Jan installed and running
+- At least one AI model downloaded or configured (see [Model Management](/docs/manage-models))
+
+For an overview of Jan Local Server, see the [Local Server introduction](./index).
+
+
+
+
+
+## Start Server
+
+1. Navigate to **Local API Server**
+2. Add an API Key (can be anything)
+3. Configure settings (see [Server Configuration](#server-configuration) below)
+4. Click **Start Server**
+5. Wait for confirmation: `JAN API listening at: http://127.0.0.1:1337`
+
+
+
+## Test Server
+
+1. Click **API Playground**
+2. Select a model
+3. Send a test request
+
+## API Usage
+
+```jan/website/src/content/docs/local-server/api-server.mdx#L69-80
+curl http://127.0.0.1:1337/v1/chat/completions \
+ -H "Content-Type: application/json" \
+ -H "Authorization: Bearer testing-something" \
+ -d '{
+ "model": "jan-nano-gguf",
+ "messages": [
+ {
+ "role": "user",
+ "content": "Write a one-sentence bedtime story about a unicorn."
+ }
+ ]
+ }'
+```
+
+Include your API key in the `Authorization` header for all requests.
+
+## Server Configuration
+
+### Host Address Options
+- **127.0.0.1 (Recommended)**:
+ - Only accessible from your computer
+ - Most secure option for personal use
+- **0.0.0.0**:
+ - Makes server accessible from other devices on your network
+ - Use with caution and only when necessary
+
+### Port Number
+- Default: `1337`
+- Can be any number between 1-65535
+- Avoid common ports (80, 443, 3000, 8080) that might be used by other applications
+
+### API Prefix
+- Default: `/v1`
+- Defines the base path for all API endpoints
+- Example: http://127.0.0.1:1337/v1/chat/completions
+
+### Cross-Origin Resource Sharing (CORS)
+CORS controls which websites can access your API, which is important for web applications running in browsers.
+
+**When to enable:**
+- If you're building a web application that needs to access the API
+- If you're using browser extensions
+
+**When to leave disabled:**
+- If you're only using the API from your local applications
+- If you're concerned about security
+
+### Verbose Server Logs
+Enable to show:
+- Detailed information about each API request
+- Error messages and debugging information
+- Server status updates
+
+## Troubleshooting
+
+
+
+### Common Server Issues
+- Server not running
+- Model not loaded in Jan
+- Port already in use
+- Check admin/sudo rights
+- API endpoint doesn't match server settings
+- Model name in request doesn't match Jan model name
+- Invalid JSON format
+- Firewall blocking connection
+- Missing API key in request headers
+
+### CORS Errors
+- Enable CORS in server settings
+- Check request origin
+- Verify URL matches server address
+- Check browser console
+
+### Performance Issues
+- Monitor CPU, RAM, GPU usage
+- Reduce context length or GPU layers
+- Close other resource-intensive applications
diff --git a/website/src/content/docs/local-server/data-folder.mdx b/website/src/content/docs/local-server/data-folder.mdx
new file mode 100644
index 000000000..a4542d4b3
--- /dev/null
+++ b/website/src/content/docs/local-server/data-folder.mdx
@@ -0,0 +1,179 @@
+---
+title: Jan Data Folder
+description: Understand where Jan stores your data and how to monitor server logs.
+keywords:
+ [
+ Jan,
+ local AI,
+ data folder,
+ logs,
+ server logs,
+ troubleshooting,
+ privacy,
+ local storage,
+ file structure,
+ ]
+---
+
+import { Tabs, TabItem } from '@astrojs/starlight/components';
+import { Aside } from '@astrojs/starlight/components';
+import { Steps } from '@astrojs/starlight/components';
+
+# Jan Data Folder
+
+Jan stores all your data locally on your computer. No cloud storage, no external servers - everything stays on your machine.
+
+## Quick Access
+
+**Via Jan Interface:**
+1. Go to Settings (⚙️) > Advanced Settings
+2. Click the folder icon 📁
+
+
+
+**Via File Explorer:**
+
+
+
+```cmd
+%APPDATA%\Jan\data
+```
+
+
+```bash
+~/Library/Application Support/Jan/data
+```
+
+
+```bash
+# Default installation
+~/.config/Jan/data
+
+# Custom installation
+$XDG_CONFIG_HOME/Jan/data
+```
+
+
+
+## Monitoring Server Logs
+
+When Jan's local server is running, you can monitor real-time activity in the logs folder:
+
+
+
+### Live Log Monitoring
+
+**Real-time logs show:**
+- API requests and responses
+- Model loading and inference activity
+- Error messages and warnings
+- Performance metrics
+- Connection attempts from external applications
+
+**Accessing logs:**
+- **In Jan**: System Monitor (footer) > App Log
+- **File location**: `/logs/app.log`
+
+### Log Categories
+
+| Log Type | What It Shows | When It's Useful |
+|----------|---------------|------------------|
+| **[APP]** | Core application events | Startup issues, crashes, general errors |
+| **[SERVER]** | API server activity | Connection problems, request failures |
+| **[SPECS]** | Hardware information | Performance issues, compatibility problems |
+| **[MODEL]** | Model operations | Loading failures, inference errors |
+
+## Data Structure
+
+```
+jan/
+├── assistants/ # AI personality settings
+│ └── jan/
+│ └── assistant.json
+├── engines/ # Engine configurations
+│ └── llama.cpp/
+├── extensions/ # Add-on modules
+│ └── extensions.json
+├── logs/ # Server and application logs
+│ └── app.log # Main log file
+├── models/ # Downloaded AI models
+│ └── huggingface.co/
+└── threads/ # Chat conversations
+ └── thread_id/
+ ├── messages.jsonl
+ └── thread.json
+```
+
+## Key Folders Explained
+
+### `/logs/` - Server Activity Hub
+Contains all application and server logs. Essential for troubleshooting and monitoring API activity.
+
+**What you'll find:**
+- Real-time server requests
+- Model loading status
+- Error diagnostics
+- Performance data
+
+### `/models/` - AI Model Storage
+Where your downloaded models live. Each model includes:
+- `model.gguf` - The actual AI model file
+- `model.json` - Configuration and metadata
+
+### `/threads/` - Chat History
+Every conversation gets its own folder with:
+- `messages.jsonl` - Complete chat history
+- `thread.json` - Thread metadata and settings
+
+### `/assistants/` - AI Personalities
+Configuration files that define how your AI assistants behave, including their instructions and available tools.
+
+## Privacy & Security
+
+**Your data stays local:**
+- No cloud backups or syncing
+- Files stored in standard JSON/JSONL formats
+- Complete control over your data
+- Easy to backup or migrate
+
+**File permissions:**
+- Only you and Jan can access these files
+- Standard user-level permissions
+- No elevated access required
+
+
+
+## Common Tasks
+
+### Backup Your Data
+Copy the entire Jan data folder to backup:
+- All chat history
+- Model configurations
+- Assistant settings
+- Extension data
+
+### Clear Chat History
+Delete individual thread folders in `/threads/` or use Jan's interface to delete conversations.
+
+### Export Conversations
+Thread files are in standard JSON format - readable by any text editor or compatible with other applications.
+
+### Troubleshooting Data Issues
+1. Check `/logs/app.log` for error messages
+2. Verify folder permissions
+3. Ensure sufficient disk space
+4. Restart Jan if files appear corrupted
+
+## Uninstalling Jan
+
+If you need to completely remove Jan and all data:
+
+**Keep data (reinstall later):** Just uninstall the application
+**Remove everything:** Delete the Jan data folder after uninstalling
+
+Detailed uninstall guides:
+- [macOS](/docs/desktop/mac#step-2-clean-up-data-optional)
+- [Windows](/docs/desktop/windows#step-2-handle-jan-data)
+- [Linux](/docs/desktop/linux#uninstall-jan)
\ No newline at end of file
diff --git a/website/src/content/docs/local-server/index.mdx b/website/src/content/docs/local-server/index.mdx
new file mode 100644
index 000000000..d81b48b90
--- /dev/null
+++ b/website/src/content/docs/local-server/index.mdx
@@ -0,0 +1,195 @@
+---
+title: Jan Local Server
+description: Run Jan as a local AI server with OpenAI-compatible API for building AI applications.
+---
+
+import { Aside } from '@astrojs/starlight/components';
+
+
+
+Jan Local Server provides an OpenAI-compatible API that runs entirely on your computer. Build AI applications using familiar API patterns while keeping complete control over your data and models.
+
+## How It Works
+
+Jan runs a local server powered by [llama.cpp](https://github.com/ggerganov/llama.cpp) that provides an OpenAI-compatible API. By default, it runs at `https://localhost:1337` and works completely offline.
+
+**What this enables:**
+- Connect development tools like [Continue](./continue-dev) and [Cline](https://cline.bot/) to Jan
+- Build AI applications without cloud dependencies
+- Use both local and cloud models through the same API
+- Maintain full privacy for local model interactions
+
+## Key Features
+
+**Local AI Models**
+- Download popular open-source models (Llama, Gemma, Qwen) from Hugging Face
+- Import any GGUF files from your computer
+- Run models completely offline
+
+**Cloud Integration**
+- Connect to cloud services (OpenAI, Anthropic, Mistral, Groq)
+- Use your own API keys
+- Switch between local and cloud models seamlessly
+
+**Developer-Friendly**
+- OpenAI-compatible API for easy integration
+- Chat interface for testing and configuration
+- Model parameter customization
+
+**Complete Privacy**
+- All data stored locally
+- No cloud dependencies for local models
+- You control what data leaves your machine
+
+## Why Choose Jan?
+
+**Truly Open Source**
+- Apache 2.0 license - no restrictions
+- Community-driven development
+- Full transparency
+
+**Local-First Design**
+- Works 100% offline with local models
+- Data stays on your machine
+- No vendor lock-in
+
+**Flexible Model Support**
+- Your choice of AI models
+- Both local and cloud options
+- Easy model switching
+
+**No Data Collection**
+- We don't collect or sell user data
+- Local conversations stay local
+- [Read our Privacy Policy](./privacy)
+
+
+
+## Philosophy
+
+Jan is built to be **user-owned**. This means:
+- **True open source** - Apache 2.0 license with no hidden restrictions
+- **Local data storage** - following [local-first principles](https://www.inkandswitch.com/local-first)
+- **Internet optional** - works completely offline
+- **Free choice** - use any AI models you want
+- **No surveillance** - we don't collect or sell your data
+
+Read more about our [philosophy](/about#philosophy).
+
+## Inspiration
+
+Jan draws inspiration from [Calm Computing](https://en.wikipedia.org/wiki/Calm_technology) and the Disappearing Computer - technology that works quietly in the background without demanding constant attention.
+
+## Built With
+
+Jan stands on the shoulders of excellent open-source projects:
+- [llama.cpp](https://github.com/ggerganov/llama.cpp) - Local AI model inference
+- [Scalar](https://github.com/scalar/scalar) - API documentation
+
+## Frequently Asked Questions
+
+## What is Jan?
+
+ Jan is a privacy-focused AI assistant that runs locally on your computer. It's an alternative to ChatGPT, Claude, and other cloud-based AI tools, with optional cloud AI support when you want it.
+
+
+## How do I get started?
+
+ Download Jan, add a model (either download locally or add a cloud API key), and start chatting. Check our [Quick Start guide](/docs/quickstart) for detailed setup instructions.
+
+
+## What systems does Jan support?
+
+ Jan works on all major operating systems:
+ - [macOS](/docs/desktop/mac#compatibility) - Intel and Apple Silicon
+ - [Windows](/docs/desktop/windows#compatibility) - x64 systems
+ - [Linux](/docs/desktop/linux) - Most distributions
+
+ Jan supports various hardware:
+ - NVIDIA GPUs (CUDA acceleration)
+ - AMD GPUs (Vulkan support)
+ - Intel Arc GPUs (Vulkan support)
+ - Any GPU with Vulkan support
+ - CPU-only operation
+
+
+## How does Jan protect my privacy?
+
+ Jan prioritizes privacy through:
+ - **100% offline operation** with local models
+ - **Local data storage** - everything stays on your device
+ - **Open-source transparency** - you can verify what Jan does
+ - **No data collection** - we never see your conversations
+
+
+
+ All your files and chat history are stored locally in the [Jan Data Folder](./data-folder). See our complete [Privacy Policy](./privacy).
+
+
+## What AI models can I use?
+
+ **Local models:**
+ - Download optimized models from the [Jan Hub](/docs/manage-models)
+ - Import GGUF models from Hugging Face
+ - Use any compatible local model files
+
+ **Cloud models:**
+ - OpenAI (GPT-4, ChatGPT)
+ - Anthropic (Claude)
+ - Mistral, Groq, and others
+ - Bring your own API keys
+
+
+## Is Jan really free?
+
+ Yes! Jan is completely free and open-source with no subscription fees.
+
+ **What's free:**
+ - Jan application and all features
+ - Local model usage (once downloaded)
+ - Local server and API
+
+ **What costs money:**
+ - Cloud model usage (you pay providers directly)
+ - We add no markup to cloud service costs
+
+
+## Can Jan work offline?
+
+ Absolutely! Once you download a local model, Jan works completely offline with no internet connection needed. This is one of Jan's core features.
+
+
+## How can I get help or contribute?
+
+ **Get help:**
+ - Join our [Discord community](https://discord.gg/qSwXFx6Krr)
+ - Check the [Troubleshooting guide](./troubleshooting)
+ - Ask in [#🆘|jan-help](https://discord.com/channels/1107178041848909847/1192090449725358130)
+
+ **Contribute:**
+ - Contribute on [GitHub](https://github.com/menloresearch/jan)
+ - No permission needed to submit improvements
+ - Help other users in Discord
+
+
+## Can I self-host Jan?
+
+ Yes! We fully support self-hosting. You can:
+ - Download Jan directly for personal use
+ - Fork the [GitHub repository](https://github.com/menloresearch/jan)
+ - Build from source
+ - Deploy on your own infrastructure
+
+
+## What does 'Jan' stand for?
+
+ "Just a Name" - we admit we're not great at marketing! 😄
+
+
+## Are you hiring?
+
+ Yes! We love hiring from our community. Check our open positions at [Careers](https://menlo.bamboohr.com/careers).
diff --git a/website/src/content/docs/local-server/integrations/continue-dev.mdx b/website/src/content/docs/local-server/integrations/continue-dev.mdx
new file mode 100644
index 000000000..3c7e20283
--- /dev/null
+++ b/website/src/content/docs/local-server/integrations/continue-dev.mdx
@@ -0,0 +1,114 @@
+---
+title: Continue.dev
+description: A step-by-step guide on integrating Jan with Continue and VS Code.
+keywords:
+ [
+ Jan,
+ Customizable Intelligence, LLM,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ Continue integration,
+ VSCode integration,
+ ]
+---
+
+import { Tabs, TabItem } from '@astrojs/starlight/components';
+import { Steps } from '@astrojs/starlight/components';
+
+## Integrate with Continue VS Code
+
+[Continue](https://continue.dev/docs/intro) is an open-source autopilot compatible with Visual Studio Code and JetBrains, offering the simplest method to code with any LLM (Local Language Model).
+
+To integrate Jan with a local AI language model, follow the steps below:
+
+
+1. **Installing Continue on Visual Studio Code**
+
+ Follow this [guide](https://continue.dev/docs/quickstart) to install the Continue extension on Visual Studio Code.
+
+2. **Enable the Jan API Server**
+
+ To set up Continue for use with Jan's Local Server, you must activate the Jan API Server with your chosen model.
+
+ 1. Press the `⚙️ Settings` button.
+
+ 2. Locate `Local API Server`.
+
+ 3. Setup the server, which includes the **IP Port**, **Cross-Origin-Resource-Sharing (CORS)** and **Verbose Server Logs**.
+
+ 4. Include your user-defined API Key.
+
+ 5. Press the **Start Server** button
+
+3. **Configure Continue to Use Jan's Local Server**
+
+ 1. Go to the `~/.continue` directory.
+
+
+
+ ```bash
+ cd ~/.continue
+ ```
+
+
+ ```bash
+ C:/Users//.continue
+ ```
+
+
+ ```bash
+ cd ~/.continue
+ ```
+
+
+
+ ```yaml title="~/.continue/config.yaml"
+ name: Local Assistant
+ version: 1.0.0
+ schema: v1
+ models:
+ - name: Jan
+ provider: openai
+ model: #MODEL_NAME (e.g. qwen3:0.6b)
+ apiKey: #YOUR_USER_DEFINED_API_KEY_HERE (e.g. hello)
+ apiBase: http://localhost:1337/v1
+ context:
+ - provider: code
+ - provider: docs
+ - provider: diff
+ - provider: terminal
+ - provider: problems
+ - provider: folder
+ - provider: codebase
+ ```
+
+ 2. Ensure the file has the following configurations:
+ - Ensure `openai` is selected as the `provider`.
+ - Match the `model` with the one enabled in the Jan API Server.
+ - Set `apiBase` to `http://localhost:1337/v1`.
+
+4. **Ensure the Using Model Is Activated in Jan**
+
+ 1. Navigate to `Settings` > `Model Providers`.
+ 2. Under Llama.cpp, find the model that you would want to use.
+ 3. Select the **Start Model** button to activate the model.
+
+
+## How to Use Jan Integration with Continue in Visual Studio Code
+
+### 1. Exploring Code with Jan
+
+1. Highlight a code.
+2. Press `Command + Shift + M` to open the Left Panel.
+3. Click "Jan" at the bottom of the panel and submit your query, such as `Explain this code`.
+
+### 2. Enhancing Code with the Help of a Large Language Model
+
+1. Select a code snippet.
+2. Press `Command + Shift + L`.
+3. Type in your specific request, for example, `Add comments to this code`.
diff --git a/website/src/content/docs/local-server/integrations/llmcord.mdx b/website/src/content/docs/local-server/integrations/llmcord.mdx
new file mode 100644
index 000000000..fb28e523a
--- /dev/null
+++ b/website/src/content/docs/local-server/integrations/llmcord.mdx
@@ -0,0 +1,79 @@
+---
+title: llmcord (Discord)
+description: A step-by-step guide on integrating Jan with a Discord bot.
+keywords:
+ [
+ Jan,
+ Customizable Intelligence, LLM,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ Discord integration,
+ Discord,
+ bot,
+ ]
+---
+
+import { Aside } from '@astrojs/starlight/components';
+import { Steps } from '@astrojs/starlight/components';
+
+# llmcord (Discord)
+
+## Integrate llmcord.py with Jan
+
+[llmcord.py](https://github.com/jakobdylanc/discord-llm-chatbot) lets you and your friends chat with LLMs directly in your Discord server.
+
+To integrate Jan with llmcord.py, follow the steps below:
+
+
+
+1. **Clone the Repository**
+
+ Clone the discord bot's [repository](https://github.com/jakobdylanc/discord-llm-chatbot) by using the following command:
+ ```bash
+ git clone https://github.com/jakobdylanc/discord-llm-chatbot.git
+ ```
+
+2. **Install the Required Libraries**
+
+ After cloning the repository, run the following command:
+
+ ```bash
+ pip install -r requirements.txt
+ ```
+
+
+
+3. **Set the Environment**
+
+ 1. Create a copy of `.env.example`.
+ 2. Change the name to `.env`.
+ 3. Set the environment with the following options:
+
+ | Setting | Instructions |
+ | ----------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+ | `DISCORD_BOT_TOKEN` | Create a new Discord bot at [discord.com/developers/applications](https://discord.com/developers/applications), obtain a token from the Bot tab, and enable MESSAGE CONTENT INTENT. |
+ | `DISCORD_CLIENT_ID` | Found under the OAuth2 tab of the Discord bot you just made. |
+ | `LLM` | For Jan, set to `local/openai/(MODEL_NAME)`, where `(MODEL_NAME)` is your loaded model's name. |
+ | `LLM_SYSTEM_PROMPT` | Adjust the bot's behavior as needed. |
+ | `LOCAL_SERVER_URL` | URL of your local API server. For Jan, set it to `http://localhost:1337/v1`. |
+
+ For more configuration options, refer to llmcord.py's [README](https://github.com/jakobdylanc/discord-llm-chatbot/tree/main?tab=readme-ov-file#instructions).
+
+4. **Run the Bot**
+
+ Run the bot by using the following command in your command prompt:
+
+ ```bash
+ python llmcord.py
+ ```
+
+ The bot's invite URL will be printed in the console. Use it to add the bot to your server.
+
+
\ No newline at end of file
diff --git a/website/src/content/docs/local-server/integrations/n8n.mdx b/website/src/content/docs/local-server/integrations/n8n.mdx
new file mode 100644
index 000000000..ec79cb18c
--- /dev/null
+++ b/website/src/content/docs/local-server/integrations/n8n.mdx
@@ -0,0 +1,80 @@
+---
+title: n8n
+keywords:
+ [
+ Jan,
+ Customizable Intelligence, LLM,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ n8n integration,
+ n8n,
+ ]
+description: A step-by-step guide on integrating Jan with n8n.
+---
+
+import { Steps } from '@astrojs/starlight/components';
+
+# n8n
+
+## Integrate n8n with Jan
+
+[n8n](https://n8n.io/) is an open-source workflow automation tool that allows you to connect to more than 400+ integrations and services to automate repetitive tasks. With its visual interface, you can create complex workflows conveniently. To integrate n8n with Jan, follow the steps below:
+
+
+1. **Run your preferred model with Jan server**
+
+ 1. Open Jan app.
+ 2. Go to the **Hub** and download your preferred model
+ 3. Run the Jan server
+
+2. **Start n8n service**
+ Start n8n immediately using npx:
+
+ ```
+ npx n8n
+ ```
+
+ Or deploy with Docker:
+
+ ```
+ docker run -it --rm --name n8n -p 5678:5678 docker.n8n.io/n8nio/n8n
+ ```
+
+3. **Integrate Jan with n8n service using HTTP Request**
+
+ Integrate Jan by selecting the HTTP Request node in n8n and importing the following cURL command:
+
+ ```bash
+ curl -X 'POST' \
+ 'http://127.0.0.1:1337/v1/chat/completions' \
+ -H 'accept: application/json' \
+ -H 'Content-Type: application/json' \
+ -d '{
+ "messages": [
+ {
+ "content": "You are a helpful assistant.",
+ "role": "system"
+ },
+ {
+ "content": "Hello!",
+ "role": "user"
+ }
+ ],
+ "model": "tinyllama-1.1b",
+ "stream": true,
+ "max_tokens": 2048,
+ "stop": [
+ "hello"
+ ],
+ "frequency_penalty": 0,
+ "presence_penalty": 0,
+ "temperature": 0.7,
+ "top_p": 0.95
+ }'
+ ```
+
\ No newline at end of file
diff --git a/website/src/content/docs/local-server/integrations/tabby.mdx b/website/src/content/docs/local-server/integrations/tabby.mdx
new file mode 100644
index 000000000..1e8fdcebf
--- /dev/null
+++ b/website/src/content/docs/local-server/integrations/tabby.mdx
@@ -0,0 +1,101 @@
+---
+title: Tabby
+description: A step-by-step guide on integrating Jan with Tabby and VSCode, JetBrains, or other IDEs.
+keywords:
+ [
+ Jan,
+ Customizable Intelligence, LLM,
+ local AI,
+ privacy focus,
+ free and open source,
+ private and offline,
+ conversational AI,
+ no-subscription fee,
+ large language models,
+ Tabby integration,
+ VSCode integration,
+ JetBrains integration,
+ ]
+---
+
+import { Tabs, TabItem } from '@astrojs/starlight/components';
+import { Steps } from '@astrojs/starlight/components';
+
+# Tabby
+
+## Integrate Jan with Tabby and Your Favorite IDEs
+
+[Tabby](https://www.tabbyml.com/) is an open-source, self-hosted AI coding assistant.
+With Tabby, teams can easily set up their own LLM-powered code completion server.
+
+Tabby provides integrations with VSCode, JetBrains, and other IDEs to help developers code more efficiently,
+and it can be used with various LLM services, including Jan.
+
+To integrate Jan with Tabby, follow these steps:
+
+
+
+1. **Enable the Jan API Server**
+
+ To set up Tabby with Jan's Local Server, you must activate the Jan API Server with your chosen model.
+
+ 1. Click the `Local API Server` (`<>`) button above the Settings. Jan will direct you to the **Local API Server** section.
+ 2. Configure the server, including the **IP Port**, **Cross-Origin Resource Sharing (CORS)**, and **Verbose Server Logs**.
+ 3. Press the **Start Server** button.
+
+2. **Find the Model ID and Ensure the Model is Activated**
+
+ 1. Go to `Settings` > `My Models`.
+ 2. Models are listed with their **Model ID** beneath their names.
+ 3. Click the **three dots (⋮)** button next to the model.
+ 4. Select **Start Model** to activate the model.
+
+3. **Installing Tabby Server**
+
+ Use the following documentation to install the Tabby server:
+ - [Docker](https://tabby.tabbyml.com/docs/quick-start/installation/docker/)
+ - [Apple Silicon](https://tabby.tabbyml.com/docs/quick-start/installation/apple/)
+ - [Linux](https://tabby.tabbyml.com/docs/quick-start/installation/linux/)
+ - [Windows](https://tabby.tabbyml.com/docs/quick-start/installation/windows/)
+
+ Then, follow the steps to connect Jan with the Tabby server:
+ [Connect Jan with Tabby](https://tabby.tabbyml.com/docs/references/models-http-api/jan.ai/).
+
+ For example, to connect Jan with Tabby, save the following configuration under `~/.tabby/config.toml`:
+
+ ```toml title="~/.tabby/config.toml"
+ # Chat model
+ [model.chat.http]
+ kind = "openai/chat"
+ model_name = "model_id"
+ api_endpoint = "http://localhost:1337/v1"
+ api_key = ""
+ ```
+
+ Currently, the Jan completion and embedding API is under construction.
+ Once completed, you can also connect Jan with Tabby for completion and embedding tasks.
+
+4. **Installing Tabby on Your Favorite IDEs**
+
+ Refer to the following documentation to install the Tabby extension on your favorite IDEs:
+ - [Visual Studio Code](https://tabby.tabbyml.com/docs/extensions/installation/vscode/)
+ - [JetBrains IntelliJ Platform](https://tabby.tabbyml.com/docs/extensions/installation/intellij/)
+ - [VIM / NeoVIM](https://tabby.tabbyml.com/docs/extensions/installation/vim/)
+
+
+
+## How to Use Tabby with Jan Integration
+
+### Answer Engine: Chat with Your Codes and Documentation
+
+Tabby offers an [Answer Engine](https://tabby.tabbyml.com/docs/administration/answer-engine/) on the homepage,
+which can leverage the Jan LLM and related contexts like code, documentation, and web pages to answer user questions.
+
+Simply open the Tabby homepage at [localhost:8080](http://localhost:8080) and ask your questions.
+
+### IDE Chat Sidebar
+
+After installing the Tabby extension on your preferred IDEs, you can engage in a conversation with Jan to:
+
+1. Discuss your code, receive suggestions, and seek assistance.
+2. Request Jan to inline edit your code, and then review and accept the proposed changes.
\ No newline at end of file
diff --git a/website/src/content/docs/local-server/llama-cpp.mdx b/website/src/content/docs/local-server/llama-cpp.mdx
new file mode 100644
index 000000000..5872dd9f9
--- /dev/null
+++ b/website/src/content/docs/local-server/llama-cpp.mdx
@@ -0,0 +1,167 @@
+---
+title: llama.cpp Engine
+description: Configure Jan's local AI engine for optimal performance.
+keywords:
+ [
+ Jan,
+ local AI,
+ llama.cpp,
+ AI engine,
+ local models,
+ performance,
+ GPU acceleration,
+ CPU processing,
+ model optimization,
+ ]
+---
+
+import { Tabs, TabItem } from '@astrojs/starlight/components';
+import { Aside } from '@astrojs/starlight/components';
+import { Steps } from '@astrojs/starlight/components';
+
+# Local AI Engine (llama.cpp)
+
+llama.cpp is the engine that runs AI models locally on your computer. It's what makes Jan work without needing internet or cloud services.
+
+## Accessing Engine Settings
+
+Find llama.cpp settings at **Settings** (⚙️) > **Local Engine** > **llama.cpp**:
+
+
+
+
+
+## When to Adjust Settings
+
+You might need to modify these settings if:
+- Models load slowly or don't work
+- You've installed new hardware (like a graphics card)
+- You want to optimize performance for your specific setup
+
+## Engine Management
+
+| Feature | What It Does | When You Need It |
+|---------|-------------|------------------|
+| **Engine Version** | Shows current llama.cpp version | Check compatibility with newer models |
+| **Check Updates** | Downloads engine updates | When new models require updated engine |
+| **Backend Selection** | Choose hardware-optimized version | After hardware changes or performance issues |
+
+## Hardware Backends
+
+Different backends are optimized for different hardware. Pick the one that matches your computer:
+
+
+
+
+### NVIDIA Graphics Cards (Fastest)
+**For CUDA 12.0:**
+- `llama.cpp-avx2-cuda-12-0` (most common)
+- `llama.cpp-avx512-cuda-12-0` (newer Intel/AMD CPUs)
+
+**For CUDA 11.7:**
+- `llama.cpp-avx2-cuda-11-7` (older drivers)
+
+### CPU Only
+- `llama.cpp-avx2` (modern CPUs)
+- `llama.cpp-avx` (older CPUs)
+- `llama.cpp-noavx` (very old CPUs)
+
+### Other Graphics Cards
+- `llama.cpp-vulkan` (AMD, Intel Arc)
+
+
+
+
+### NVIDIA Graphics Cards
+- `llama.cpp-avx2-cuda-12-0` (recommended)
+- `llama.cpp-avx2-cuda-11-7` (older drivers)
+
+### CPU Only
+- `llama.cpp-avx2` (modern CPUs)
+- `llama.cpp-arm64` (ARM processors)
+
+### Other Graphics Cards
+- `llama.cpp-vulkan` (AMD, Intel graphics)
+
+
+
+
+### Apple Silicon (M1/M2/M3/M4)
+- `llama.cpp-mac-arm64` (recommended)
+
+### Intel Macs
+- `llama.cpp-mac-amd64`
+
+
+
+
+
+
+## Performance Settings
+
+| Setting | What It Does | Recommended | Impact |
+|---------|-------------|-------------|---------|
+| **Continuous Batching** | Handle multiple requests simultaneously | Enabled | Faster when using tools or multiple chats |
+| **Parallel Operations** | Number of concurrent requests | 4 | Higher = more multitasking, uses more memory |
+| **CPU Threads** | Processor cores to use | Auto | More threads can speed up CPU processing |
+
+## Memory Settings
+
+| Setting | What It Does | Recommended | When to Change |
+|---------|-------------|-------------|----------------|
+| **Flash Attention** | Efficient memory usage | Enabled | Leave enabled unless problems occur |
+| **Caching** | Remember recent conversations | Enabled | Speeds up follow-up questions |
+| **KV Cache Type** | Memory vs quality trade-off | f16 | Change to q8_0 if low on memory |
+| **mmap** | Efficient model loading | Enabled | Helps with large models |
+| **Context Shift** | Handle very long conversations | Disabled | Enable for very long chats |
+
+### Memory Options Explained
+- **f16**: Best quality, uses more memory
+- **q8_0**: Balanced memory and quality
+- **q4_0**: Least memory, slight quality reduction
+
+## Quick Troubleshooting
+
+**Models won't load:**
+- Try a different backend
+- Check available RAM/VRAM
+- Update engine version
+
+**Slow performance:**
+- Verify GPU acceleration is active
+- Close memory-intensive applications
+- Increase GPU Layers in model settings
+
+**Out of memory:**
+- Change KV Cache Type to q8_0
+- Reduce Context Size in model settings
+- Try a smaller model
+
+**Crashes or errors:**
+- Switch to a more stable backend (avx instead of avx2)
+- Update graphics drivers
+- Check system temperature
+
+## Quick Setup Guide
+
+**Most users:**
+1. Use default settings
+2. Only change if problems occur
+
+**NVIDIA GPU users:**
+1. Download CUDA backend
+2. Ensure GPU Layers is set high
+3. Enable Flash Attention
+
+**Performance optimization:**
+1. Enable Continuous Batching
+2. Use appropriate backend for hardware
+3. Monitor memory usage
+
+
\ No newline at end of file
diff --git a/website/src/content/docs/local-server/settings.mdx b/website/src/content/docs/local-server/settings.mdx
new file mode 100644
index 000000000..8819442ba
--- /dev/null
+++ b/website/src/content/docs/local-server/settings.mdx
@@ -0,0 +1,245 @@
+---
+title: Settings
+description: Configure Jan to work best for your needs and hardware.
+keywords:
+ [
+ Jan,
+ settings,
+ configuration,
+ model management,
+ privacy,
+ hardware settings,
+ local AI,
+ customization,
+ ]
+---
+
+import { Tabs, TabItem } from '@astrojs/starlight/components';
+import { Steps } from '@astrojs/starlight/components';
+import { Aside } from '@astrojs/starlight/components';
+
+Access Jan's settings by clicking the ⚙️ icon in the bottom left corner.
+
+## Managing AI Models
+
+Find all model options at **Settings** > **Model Providers**:
+
+### Adding Models
+
+**From Hugging Face:**
+- Enter a model's ID (like `microsoft/DialoGPT-medium`) in the search bar
+- **Need authentication?** Some models require a Hugging Face token - add yours at **Settings > Model Providers > Hugging Face Access Token**
+
+**From Your Computer:**
+- Click **Import Model** and select GGUF files from your computer
+- Works with any compatible model files you've downloaded
+
+### Managing Existing Models
+
+**Start a model:**
+1. Open a new chat and select the model you want
+2. Or go to **Settings > Model Providers** and click the **Start** button
+
+**Remove a model:**
+- Click the trash icon next to the **Start** button
+- Confirm deletion when prompted
+
+### Hugging Face Token Setup
+
+For restricted models (like Meta's Llama models):
+1. Get your token from [Hugging Face Tokens](https://huggingface.co/docs/hub/en/security-tokens)
+2. Add it at **Settings > Model Providers > Hugging Face**
+
+## Model Configuration (Gear Icon)
+
+
+
+Click the gear icon next to any model to adjust how it behaves:
+
+**Basic Settings:**
+- **Context Size**: How much conversation history the model remembers
+- **GPU Layers**: How much of the model runs on your graphics card (higher = faster, but uses more GPU memory)
+- **Temperature**: Controls creativity (0.1 = focused, 1.0 = creative)
+
+**Advanced Controls:**
+- **Top K & Top P**: Fine-tune how the model picks words (lower = more focused)
+- **Min P**: Minimum probability threshold for word selection
+- **Repeat Penalty**: Prevents the model from repeating itself too much
+- **Presence Penalty**: Encourages the model to use varied vocabulary
+
+
+
+## Hardware Monitoring
+
+Check your computer's performance at **Settings** > **Hardware**:
+
+- **CPU, RAM, GPU**: Real-time usage and specifications
+- **GPU Acceleration**: Turn GPU acceleration on/off
+- **Temperature monitoring**: Keep an eye on system heat
+
+
+
+
+
+## Personalization
+
+### Visual Appearance
+
+Customize Jan's look at **Settings** > **Appearance**:
+- **Theme**: Choose light or dark mode
+- **Colors**: Pick your preferred color scheme
+- **Code highlighting**: Adjust syntax colors for programming discussions
+
+
+
+### Writing Assistance
+
+**Spell Check:** Jan can help catch typing mistakes in your messages.
+
+
+
+## Privacy & Data Control
+
+Access privacy settings at **Settings** > **Privacy**:
+
+### Usage Analytics
+
+**Default: No data collection.** Everything stays on your computer.
+
+**Optional: Help improve Jan**
+- Toggle **Analytics** to share anonymous usage patterns
+- No conversations or personal data ever shared
+- Change this setting anytime
+
+
+
+
+
+### Log Management
+
+**Viewing System Logs:**
+- Logs help troubleshoot problems
+- Click the folder icon to open App Logs and System Logs
+- Logs are automatically deleted after 24 hours
+
+
+
+**Clearing Logs:**
+- Click **Clear** to remove all log files immediately
+- Useful before sharing your computer or troubleshooting
+
+
+
+
+
+### Data Folder Management
+
+Jan stores everything locally on your computer in standard file formats.
+
+**Access Your Data:**
+- Click the folder icon to open Jan's data directory
+- Find your chat history, models, and settings
+- All files are yours to backup, move, or examine
+
+
+
+**Change Storage Location:**
+1. Click the pencil icon to edit the data folder location
+2. Choose an empty directory
+3. Confirm the move (original folder stays intact)
+4. Restart Jan to complete the change
+
+
+
+
+
+## Network Settings
+
+### HTTPS Proxy Setup
+
+If you need to connect through a corporate network or want enhanced privacy:
+
+1. **Enable** the proxy toggle
+2. Enter your proxy details:
+```
+http://:@:
+```
+
+**Example:**
+```
+http://user:pass@proxy.company.com:8080
+```
+
+
+
+
+
+### SSL Certificate Handling
+
+**Ignore SSL Certificates:** Only enable this for:
+- Corporate networks with internal certificates
+- Development/testing environments
+- Trusted network setups
+
+
+
+
+
+## Emergency Options
+
+### Factory Reset
+
+**When to use:** Only as a last resort for serious problems that other solutions can't fix.
+
+**What it does:** Returns Jan to its original state - deletes everything.
+
+**Steps:**
+1. Click **Reset** under "Reset to Factory Settings"
+2. Type **RESET** to confirm you understand this deletes everything
+3. Optionally keep your current data folder location
+4. Click **Reset Now**
+5. Restart Jan
+
+
+
+
+
+
+
+**Try these first:**
+- Restart Jan
+- Check the [Troubleshooting Guide](./troubleshooting)
+- Ask for help on [Discord](https://discord.gg/qSwXFx6Krr)
+
+## Quick Tips
+
+**For new users:**
+- Start with default settings
+- Try a few different models to find what works best
+- Enable GPU acceleration if you have a graphics card
+
+**For performance:**
+- Monitor hardware usage in real-time
+- Adjust GPU layers based on your graphics card memory
+- Use smaller models on older hardware
+
+**For privacy:**
+- All data stays local by default
+- Check the data folder to see exactly what's stored
+- Analytics are opt-in only
diff --git a/website/src/content/docs/local-server/troubleshooting.mdx b/website/src/content/docs/local-server/troubleshooting.mdx
new file mode 100644
index 000000000..d6a3c5c3f
--- /dev/null
+++ b/website/src/content/docs/local-server/troubleshooting.mdx
@@ -0,0 +1,328 @@
+---
+title: Troubleshooting
+description: Fix common issues and optimize Jan's performance with this comprehensive guide.
+keywords:
+ [
+ Jan,
+ troubleshooting,
+ error fixes,
+ performance issues,
+ GPU problems,
+ installation issues,
+ common errors,
+ local AI,
+ technical support,
+ ]
+---
+
+import { Tabs, TabItem } from '@astrojs/starlight/components';
+import { Aside } from '@astrojs/starlight/components';
+import { Steps } from '@astrojs/starlight/components';
+
+# Troubleshooting
+
+## Getting Help: Error Logs
+
+When Jan isn't working properly, error logs help identify the problem. Here's how to get them:
+
+### Quick Access to Logs
+
+**In Jan Interface:**
+1. Look for **System Monitor** in the footer
+2. Click **App Log**
+
+
+
+**Via Terminal:**
+```bash
+# macOS/Linux
+tail -n 50 ~/Library/Application\ Support/Jan/data/logs/app.log
+
+# Windows
+type %APPDATA%\Jan\data\logs\app.log
+```
+
+
+
+## Common Issues & Solutions
+
+### Jan Won't Start (Broken Installation)
+
+If Jan gets stuck after installation or won't start properly:
+
+
+
+
+**Clean Reinstall Steps:**
+
+1. **Uninstall Jan** from Applications folder
+
+2. **Delete all Jan data:**
+```bash
+rm -rf ~/Library/Application\ Support/Jan
+```
+
+3. **Kill any background processes** (for versions before 0.4.2):
+```bash
+ps aux | grep nitro
+# Find process IDs and kill them:
+kill -9
+```
+
+4. **Download fresh copy** from [jan.ai](/download)
+
+
+
+
+**Clean Reinstall Steps:**
+
+1. **Uninstall Jan** via Control Panel
+
+2. **Delete application data:**
+```cmd
+cd C:\Users\%USERNAME%\AppData\Roaming
+rmdir /S Jan
+```
+
+3. **Kill background processes** (for versions before 0.4.2):
+```cmd
+# Find nitro processes
+tasklist | findstr "nitro"
+# Kill them by PID
+taskkill /F /PID
+```
+
+4. **Download fresh copy** from [jan.ai](/download)
+
+
+
+
+**Clean Reinstall Steps:**
+
+1. **Uninstall Jan:**
+```bash
+# For Debian/Ubuntu
+sudo apt-get remove jan
+
+# For AppImage - just delete the file
+```
+
+2. **Delete application data:**
+```bash
+# Default location
+rm -rf ~/.config/Jan
+
+# Or custom location
+rm -rf $XDG_CONFIG_HOME/Jan
+```
+
+3. **Kill background processes** (for versions before 0.4.2):
+```bash
+ps aux | grep nitro
+kill -9
+```
+
+4. **Download fresh copy** from [jan.ai](/download)
+
+
+
+
+
+
+### NVIDIA GPU Not Working
+
+If Jan isn't using your NVIDIA graphics card for acceleration:
+
+
+
+1. **Check Your Hardware Setup**
+
+ **Verify GPU Detection:**
+
+ *Windows:* Right-click desktop → NVIDIA Control Panel, or check Device Manager → Display Adapters
+
+ *Linux:* Run `lspci | grep -i nvidia`
+
+ **Install Required Software:**
+
+ **NVIDIA Driver (470.63.01 or newer):**
+ 1. Download from [nvidia.com/drivers](https://www.nvidia.com/drivers/)
+ 2. Test: Run `nvidia-smi` in terminal
+
+ **CUDA Toolkit (11.7 or newer):**
+ 1. Download from [CUDA Downloads](https://developer.nvidia.com/cuda-downloads)
+ 2. Test: Run `nvcc --version`
+
+ **Linux Additional Requirements:**
+ ```bash
+ # Install required packages
+ sudo apt update && sudo apt install gcc-11 g++-11 cpp-11
+
+ # Set CUDA environment
+ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda/lib64
+ ```
+
+2. **Enable GPU Acceleration in Jan**
+
+ 1. Open **Settings** > **Hardware**
+ 2. Turn on **GPU Acceleration**
+ 3. Check **System Monitor** (footer) to verify GPU is detected
+
+ 
+
+3. **Verify Configuration**
+
+ 1. Go to **Settings** > **Advanced Settings** > **Data Folder**
+ 2. Open `settings.json` file
+ 3. Check these settings:
+
+ ```json
+ {
+ "run_mode": "gpu", // Should be "gpu"
+ "nvidia_driver": {
+ "exist": true, // Should be true
+ "version": "531.18"
+ },
+ "cuda": {
+ "exist": true, // Should be true
+ "version": "12"
+ },
+ "gpus": [
+ {
+ "id": "0",
+ "vram": "12282" // Your GPU memory in MB
+ }
+ ]
+ }
+ ```
+
+4. **Restart Jan**
+
+ Close and restart Jan to apply changes.
+
+
+
+#### Tested Working Configurations
+
+**Desktop Systems:**
+- Windows 11 + RTX 4070Ti + CUDA 12.2 + Driver 531.18
+- Ubuntu 22.04 + RTX 4070Ti + CUDA 12.2 + Driver 545
+
+**Virtual Machines:**
+- Ubuntu on Proxmox + GTX 1660Ti + CUDA 12.1 + Driver 535
+
+
+
+### "Failed to Fetch" or "Something's Amiss" Errors
+
+When models won't respond or show these errors:
+
+**1. Check System Requirements**
+- **RAM:** Use models under 80% of available memory
+ - 8GB system: Use models under 6GB
+ - 16GB system: Use models under 13GB
+- **Hardware:** Verify your system meets [minimum requirements](/docs/troubleshooting#step-1-verify-hardware-and-system-requirements)
+
+**2. Adjust Model Settings**
+- Open model settings in the chat sidebar
+- Lower the **GPU Layers (ngl)** setting
+- Start low and increase gradually
+
+**3. Check Port Conflicts**
+If logs show "Bind address failed":
+
+```bash
+# Check if ports are in use
+# macOS/Linux
+netstat -an | grep 1337
+
+# Windows
+netstat -ano | find "1337"
+```
+
+**Default Jan ports:**
+- API Server: `1337`
+- Documentation: `3001`
+
+**4. Try Factory Reset**
+1. **Settings** > **Advanced Settings**
+2. Click **Reset** under "Reset To Factory Settings"
+
+
+
+**5. Clean Reinstall**
+If problems persist, do a complete clean installation (see "Jan Won't Start" section above).
+
+### Permission Denied Errors
+
+If you see permission errors during installation:
+
+```bash
+# Fix npm permissions (macOS/Linux)
+sudo chown -R $(whoami) ~/.npm
+
+# Windows - run as administrator
+```
+
+### OpenAI API Issues ("Unexpected Token")
+
+For OpenAI connection problems:
+
+**1. Verify API Key**
+- Get valid key from [OpenAI Platform](https://platform.openai.com/)
+- Ensure sufficient credits and permissions
+
+**2. Check Regional Access**
+- Some regions have API restrictions
+- Try using a VPN from a supported region
+- Test network connectivity to OpenAI endpoints
+
+### Performance Issues
+
+**Models Running Slowly:**
+- Enable GPU acceleration (see NVIDIA section)
+- Use appropriate model size for your hardware
+- Close other memory-intensive applications
+- Check Task Manager/Activity Monitor for resource usage
+
+**High Memory Usage:**
+- Switch to smaller model variants
+- Reduce context length in model settings
+- Enable model offloading in engine settings
+
+**Frequent Crashes:**
+- Update graphics drivers
+- Check system temperature
+- Reduce GPU layers if using GPU acceleration
+- Verify adequate power supply (desktop systems)
+
+## Need More Help?
+
+If these solutions don't work:
+
+**1. Gather Information:**
+- Copy your error logs (see top of this page)
+- Note your system specifications
+- Describe what you were trying to do when the problem occurred
+
+**2. Get Community Support:**
+- Join our [Discord](https://discord.com/invite/FTk2MvZwJH)
+- Post in the **#🆘|jan-help** channel
+- Include your logs and system info
+
+**3. Check Resources:**
+- [System requirements](/docs/troubleshooting#step-1-verify-hardware-and-system-requirements)
+- [Model compatibility guides](/docs/manage-models)
+- [Hardware setup guides](/docs/desktop/)
+
+
\ No newline at end of file
diff --git a/website/src/content/docs/products/index.mdx b/website/src/content/docs/products/index.mdx
new file mode 100644
index 000000000..31eccb01d
--- /dev/null
+++ b/website/src/content/docs/products/index.mdx
@@ -0,0 +1,110 @@
+---
+title: Jan's Product Vision
+description: AI that runs where you need it, how you need it
+sidebar:
+ order: 0
+---
+
+import { Aside, Card, CardGrid } from '@astrojs/starlight/components';
+
+
+Jan is evolving from a local AI application to a complete AI Agent platform. We combine models, applications, and tools to solve real problems.
+
+## What We're Building
+
+**Jan Agent** = Jan Models + Jan Application + Jan Tools
+
+Unlike other AI assistants that are just wrappers around Claude or GPT-4, Jan provides:
+- Our own models optimized for local and private use
+- Applications that work across all your devices
+- Tools that actually get things done
+
+## Two Modes, One Experience
+
+### Local (Incognito) Mode
+Your AI runs entirely on your device. Complete privacy, no internet required.
+
+### Cloud Mode
+Connect to more powerful models when needed - either self-hosted or via jan.ai.
+
+
+
+## Available on Every Device
+
+
+
+ **Available Now**
+ - Runs models locally
+ - Optional cloud connection
+ - Powers other devices
+
+
+
+ **Coming Soon**
+ - Self-hosted for teams
+ - 5-10 concurrent users
+ - Your own private cloud
+
+
+
+ **In Development**
+ - Connect to Desktop/Server
+ - Local mode with Jan Nano
+ - Same experience everywhere
+
+
+
+ **Beta Launch Soon**
+ - SaaS version of Jan Server
+ - Default for mobile/desktop cloud mode
+ - No setup required
+
+
+
+## Our Product Principles
+
+### It Just Works
+Open Jan, start chatting. No setup wizards, no API keys, no model selection. We handle the complexity.
+
+### Privacy First, Cloud When Needed
+Start with complete privacy by default. Add cloud capabilities only when you choose to.
+
+### Solve Problems, Not Settings
+Users want answers, not configuration options. Power users can dig deeper, but it's never required.
+
+## What Makes Jan Different
+
+| Feature | Other AI Assistants | Jan |
+|---------|---------------------|-----|
+| Models | Wrapper around Claude/GPT | Our own models + others |
+| Privacy | Your data on their servers | Your data stays yours |
+| Deployment | Cloud only | Local, self-hosted, or cloud |
+| Cost | Subscription forever | Free locally, pay for cloud |
+
+## The Roadmap Simplified
+
+### Today
+- Desktop app with local models
+- Basic cloud connections
+- Developer-friendly API
+
+### Next 6 Months
+- Simplified Local/Cloud modes
+- jan.ai cloud service
+- Mobile apps
+- Self-hosted server
+
+### Future Vision
+- Complete AI Agent platform
+- Compete directly with Claude/ChatGPT
+- Open superintelligence
+
+
+
+---
+
+[Download Jan Desktop](/download) | [Try jan.ai (Beta)](/beta) | [Documentation](/docs)
diff --git a/website/src/content/docs/products/models/jan-nano.mdx b/website/src/content/docs/products/models/jan-nano.mdx
new file mode 100644
index 000000000..57e6ea3fe
--- /dev/null
+++ b/website/src/content/docs/products/models/jan-nano.mdx
@@ -0,0 +1,207 @@
+---
+title: Jan Nano
+description: Compact research model optimized for finding answers
+sidebar:
+ order: 2
+---
+
+import { Aside, Card, CardGrid } from '@astrojs/starlight/components';
+
+Jan Nano is a 4-billion parameter model designed for research and information retrieval. Instead of trying to know everything, it excels at finding anything through deep integration with Model Context Protocol (MCP) tools.
+
+## Two Variants
+
+| Model | Context Window | Size | Use Case |
+|-------|----------------|------|----------|
+| Jan Nano 32k | 32,768 tokens | 4-8GB | Quick research, general queries |
+| Jan Nano 128k | 131,072 tokens | 8-12GB | Deep research, document analysis |
+
+
+
+## What Makes Nano Different
+
+### Research-First Design
+Jan Nano isn't trained to memorize facts. It's trained to:
+- Find relevant information quickly
+- Synthesize findings from multiple sources
+- Maintain context across long research sessions
+- Provide accurate citations
+
+### MCP Integration
+Works seamlessly with:
+- Web search (Serper, Exa)
+- Document analysis
+- Code repositories
+- Custom data sources
+
+### Extended Context
+The 128k variant can process:
+- 50+ research papers simultaneously
+- Entire codebases
+- Book-length documents
+- Thousand-page contracts
+
+## Performance
+
+### Hardware Requirements
+
+
+
+ - 8GB RAM
+ - Any modern CPU
+ - Works on most devices
+
+
+
+ - 16GB RAM
+ - GPU with 8GB+ VRAM
+ - CUDA-compatible
+
+
+
+### Speed Benchmarks
+
+| Device | Variant | Quantization | Speed |
+|--------|---------|--------------|-------|
+| M2 MacBook | 32k | Q4 | 80 tokens/s |
+| RTX 4090 | 32k | Q8 | 200+ tokens/s |
+| M2 MacBook | 128k | Q4 | 40 tokens/s |
+| RTX 4090 | 128k | Q8 | 100+ tokens/s |
+
+## Getting Started
+
+### 1. Enable MCP
+Go to **Settings** → **Model Providers** → **Llama.cpp** and enable tool use for Jan Nano.
+
+### 2. Add Search Tool
+Add a search MCP like Serper:
+- Get free API key from [serper.dev](https://serper.dev)
+- Add to **Settings** → **MCP Servers**
+
+### 3. Start Researching
+Open a chat and ask Jan Nano to search for information:
+
+```
+"What are the latest developments in quantum computing?"
+"Search for recent breakthroughs in mRNA vaccine technology"
+"Find and compare different approaches to carbon capture"
+```
+
+## Example Queries
+
+### Current Events
+- Latest renewable energy developments in Europe
+- Recent AI announcements from major tech companies
+- Current economic indicators across G7 nations
+
+### Deep Research
+- Analyze 20 papers on climate change impacts
+- Compare different programming paradigms with examples
+- Synthesize findings from multiple medical studies
+
+### Business Intelligence
+- Market analysis across competitors
+- Technology trends in specific sectors
+- Regulatory changes affecting industries
+
+
+
+## Technical Details
+
+### Architecture
+- **Base**: 4B parameter transformer
+- **Training**: Optimized for tool use and retrieval
+- **Context**: Native support (not retrofitted)
+- **Quantization**: Q4, Q8, FP16 variants
+
+### Why Native Context Matters
+Unlike models extended with YaRN or PI methods, Jan Nano 128k:
+- Maintains consistent performance across full context
+- Actually improves with more context (inverse scaling)
+- No performance cliff at higher token counts
+
+### Deployment Options
+
+#### VLLM (Recommended for 128k)
+```bash
+vllm serve Menlo/Jan-nano-128k \
+ --host 0.0.0.0 \
+ --port 1234 \
+ --enable-auto-tool-choice \
+ --tool-call-parser hermes \
+ --rope-scaling '{"rope_type":"yarn","factor":3.2,"original_max_position_embeddings":40960}' \
+ --max-model-len 131072
+```
+
+#### llama.cpp
+```bash
+llama-server ... --rope-scaling yarn --rope-scale 3.2 --yarn-orig-ctx 40960
+```
+
+## Limitations
+
+### Not Designed For
+- Creative writing
+- Complex reasoning without data
+- Mathematical proofs
+- Code generation from scratch
+
+### Best Used For
+- Information retrieval
+- Research synthesis
+- Document analysis
+- Fact-finding missions
+
+## Choosing Between Variants
+
+### Use 32k When
+- Running on limited hardware
+- Need quick responses
+- Researching focused topics
+- Battery life matters
+
+### Use 128k When
+- Analyzing multiple documents
+- Deep research projects
+- Processing entire codebases
+- Hardware isn't a constraint
+
+## Integration Examples
+
+### With Web Search
+```python
+# Jan Nano automatically uses search when needed
+response = jan.chat(
+ model="jan-nano-32k",
+ message="Find the latest SpaceX launch details",
+ tools=["search"]
+)
+```
+
+### With Document Analysis
+```python
+# Process long documents efficiently
+response = jan.chat(
+ model="jan-nano-128k",
+ message="Summarize key findings from these papers",
+ attachments=["paper1.pdf", "paper2.pdf", "paper3.pdf"]
+)
+```
+
+## The Philosophy
+
+Most models try to be encyclopedias. Jan Nano is a research assistant. It doesn't memorize the internet - it knows how to navigate it.
+
+This focused approach means:
+- Smaller model size
+- Better accuracy with sources
+- More reliable information
+- Efficient hardware usage
+
+---
+
+[Download Jan Desktop](https://jan.ai/download) | [Model Details](https://huggingface.co/Menlo/Jan-nano) | [MCP Documentation](https://jan.ai/docs/mcp)
diff --git a/website/src/content/docs/products/models/jan-v1.mdx b/website/src/content/docs/products/models/jan-v1.mdx
new file mode 100644
index 000000000..1457a6781
--- /dev/null
+++ b/website/src/content/docs/products/models/jan-v1.mdx
@@ -0,0 +1,194 @@
+---
+title: Jan V1
+description: Our own family of AI models, not another wrapper
+sidebar:
+ order: 1
+---
+
+import { Aside, Card, CardGrid, Tabs, TabItem } from '@astrojs/starlight/components';
+
+Jan V1 is our own model family designed to compete directly with Claude and GPT-4. We're not just fine-tuning someone else's work - we're building models that solve real problems.
+
+## Why Jan V1 Matters
+
+Most AI applications are just wrappers around Claude or OpenAI. We're different. Jan V1 models are:
+- Trained by us for real-world use cases
+- Optimized to run locally or in the cloud
+- Designed for both power and privacy
+
+
+
+## Model Lineup
+
+| Model | Size | Best For | Availability |
+|-------|------|----------|--------------|
+| Jan V1-7B | 4-8GB | Quick tasks, older hardware | Now |
+| Jan V1-13B | 8-16GB | Daily use, good balance | Now |
+| Jan V1-70B | 40-64GB | Professional work | Now |
+| Jan V1-180B | 100GB+ | Research, complex tasks | Coming 2026 |
+
+## Three Ways to Run
+
+
+
+ Run on your own hardware for complete privacy.
+
+ ```bash
+ # In Jan Desktop
+ # Models download automatically when needed
+ ```
+
+ **Requirements:**
+ - 7B: Any modern computer
+ - 13B: 16GB RAM
+ - 70B: 64GB RAM + GPU
+
+
+
+ Access via our API for maximum convenience.
+
+ ```python
+ # Same API, cloud power
+ response = client.chat.completions.create(
+ model="jan-v1-70b",
+ messages=[...]
+ )
+ ```
+
+ **Benefits:**
+ - No hardware requirements
+ - Always latest version
+ - Scale as needed
+
+
+
+ Deploy on your infrastructure for teams.
+
+ ```yaml
+ # Your server, your rules
+ jan-server:
+ model: jan-v1-70b
+ users: 50
+ gpu: A100
+ ```
+
+ **Perfect for:**
+ - Compliance requirements
+ - Team collaboration
+ - Custom deployments
+
+
+
+## What Makes V1 Different
+
+### Actually Understands Context
+Jan V1 maintains conversation context better than most open models. No more repeating yourself every few messages.
+
+### Trained for Real Work
+- Writing that sounds human
+- Code that actually runs
+- Analysis that makes sense
+- Answers that help
+
+### Optimized for Deployment
+- Quantized versions (Q4, Q5, Q8) for different needs
+- Hardware acceleration support
+- Efficient memory usage
+- Fast inference
+
+
+
+## Performance Reality
+
+| Task | V1-7B | V1-13B | V1-70B | GPT-3.5 | GPT-4 |
+|------|-------|--------|--------|---------|-------|
+| General Chat | Good | Great | Excellent | Great | Excellent |
+| Coding | Basic | Good | Great | Good | Excellent |
+| Analysis | Basic | Good | Excellent | Good | Excellent |
+| Speed (local) | Very Fast | Fast | Slower | N/A | N/A |
+| Privacy | Complete | Complete | Complete | None | None |
+
+## Common Use Cases
+
+
+
+ V1-13B and above handle emails, reports, and documentation with natural language.
+
+
+
+ All V1 models understand code. Larger models can handle complex refactoring.
+
+
+
+ V1-70B excels at synthesizing information and drawing insights.
+
+
+
+ V1 models can be fine-tuned for your specific domain and terminology.
+
+
+
+## For Developers
+
+### Local Inference
+```python
+# Runs on your machine
+from jan import Client
+client = Client(base_url="http://localhost:1337")
+
+response = client.chat.completions.create(
+ model="jan-v1-13b",
+ messages=[{"role": "user", "content": "Explain async/await"}]
+)
+```
+
+### Fine-Tuning
+```python
+# Make it yours
+jan.finetune(
+ base_model="jan-v1-13b",
+ dataset="your-data.jsonl",
+ output="custom-model"
+)
+```
+
+### Model Switching
+```python
+# Use the right tool for the job
+simple_query → "jan-v1-7b" # Fast
+normal_work → "jan-v1-13b" # Balanced
+complex_task → "jan-v1-70b" # Powerful
+```
+
+## The Future
+
+### V1 Series Roadmap
+- **Now**: 7B, 13B, 70B models
+- **2025**: Improved versions with better tool use
+- **2026**: 180B+ models competing with GPT-4
+- **Beyond**: Agentic capabilities built-in
+
+### Our Commitment
+We're building models that:
+- Respect user privacy
+- Run where you need them
+- Solve real problems
+- Keep improving
+
+
+
+## Why We Built This
+
+Every major AI lab keeps their best models locked in the cloud. We believe you should be able to run competitive AI on your own terms. Jan V1 is our answer to that belief.
+
+We're not trying to win benchmarks. We're trying to build AI that actually helps.
+
+---
+
+[Try Jan V1](https://jan.ai/download) | [Model Benchmarks](https://jan.ai/benchmarks) | [API Documentation](https://jan.ai/docs/api)
diff --git a/website/src/content/docs/products/platforms/desktop.mdx b/website/src/content/docs/products/platforms/desktop.mdx
new file mode 100644
index 000000000..7d65ae14a
--- /dev/null
+++ b/website/src/content/docs/products/platforms/desktop.mdx
@@ -0,0 +1,184 @@
+---
+title: Jan Desktop
+description: AI that runs on your computer, not someone else's
+sidebar:
+ order: 2
+---
+
+import { Aside, Card, CardGrid, Tabs, TabItem } from '@astrojs/starlight/components';
+
+Jan Desktop is where it all starts. Download it, open it, start chatting. Your AI runs on your computer with zero setup required.
+
+## Two Modes, Zero Complexity
+
+### Local Mode (Default)
+Your conversations stay on your computer. No internet needed. Complete privacy.
+
+### Cloud Mode
+Connect to more powerful models when you need them. Your choice of provider.
+
+
+
+## What You Get
+
+
+
+ Download once, use forever. Internet is optional.
+
+
+
+ Everything stored in `~/jan`. No cloud backups unless you want them.
+
+
+
+ Your desktop becomes an AI server for your phone and other computers.
+
+
+
+ Local API at `localhost:1337`. Works with any OpenAI-compatible tool.
+
+
+
+## System Requirements
+
+| Component | Minimum | Recommended |
+|-----------|---------|-------------|
+| RAM | 8GB | 16GB+ |
+| Storage | 10GB | 20GB+ |
+| OS | Windows 10, macOS 12, Ubuntu 20.04 | Latest versions |
+
+
+
+## Getting Started
+
+1. **Download Jan** from [jan.ai/download](https://jan.ai/download)
+2. **Open the app** - it loads with everything ready
+3. **Start chatting** - that's it
+
+## Local Mode Features
+
+### Built-in Model
+Jan comes with a model that works immediately. No downloading, no waiting.
+
+### Smart Defaults
+- Automatically uses your GPU if available
+- Adjusts to your system's capabilities
+- Works on older hardware too
+
+### Complete Privacy
+- No telemetry by default
+- No account required
+- No data leaves your machine
+
+## Cloud Mode (Optional)
+
+Connect to external AI providers when you need more power:
+
+
+
+ Our cloud service (coming soon). One click to enable.
+
+
+
+ Use your OpenAI API key for GPT-4 access.
+
+
+
+ Connect to your own Jan Server.
+
+
+
+## Desktop as Your AI Hub
+
+Your desktop can power AI across all your devices:
+
+```
+┌─────────────┐
+│ Jan Desktop │ ← Your AI hub
+└──────┬──────┘
+ │
+ ┌───┴────┬─────────┬────────┐
+ │ │ │ │
+Mobile Tablet Other PCs Apps
+```
+
+## For Developers
+
+### Local API Server
+```bash
+# Always running at localhost:1337
+curl http://localhost:1337/v1/chat/completions \
+ -H "Content-Type: application/json" \
+ -d '{"model": "default", "messages": [{"role": "user", "content": "Hello"}]}'
+```
+
+### Works With
+- Continue (AI coding assistant)
+- Cursor (AI editor)
+- Any OpenAI SDK
+- Your custom scripts
+
+## Common Questions
+
+### Do I need to download models?
+No. Jan comes with a default model that works immediately.
+
+### Can I use it offline?
+Yes. Local Mode works completely offline once installed.
+
+### How do I switch models?
+Most users don't need to. Power users can explore the Model Hub.
+
+### Is it really private?
+Yes. In Local Mode, nothing leaves your computer.
+
+## Performance Expectations
+
+| Task | Local Mode | Cloud Mode |
+|------|------------|------------|
+| Simple questions | Instant | Instant |
+| Complex analysis | 2-5 seconds | 1-2 seconds |
+| Code generation | Good | Excellent |
+| Privacy | Complete | Depends on provider |
+
+## Coming Soon
+
+### Simplified Onboarding (v0.7.0)
+- Hardware detection and optimization
+- One-click setup
+- Automatic model selection
+
+### Enhanced Local Mode
+- Better default model (Jan Nano)
+- Faster responses
+- Lower memory usage
+
+### Quick Access
+- System-wide hotkey
+- Floating window
+- Voice input
+
+
+
+## Why Desktop?
+
+Your computer is powerful. It has GPUs, CPUs, and memory that can run AI locally. Jan Desktop uses that power to give you:
+
+- **Privacy**: Your data never leaves your machine
+- **Control**: Update when you want, use what you want
+- **Reliability**: Works offline, no API limits
+- **Speed**: No network latency for local models
+
+## The Bottom Line
+
+Jan Desktop is AI that respects your computer is YOUR computer. Not a terminal to someone else's server. Just software that works for you.
+
+---
+
+[Download Jan Desktop](https://jan.ai/download) | [Documentation](/docs) | [Discord Community](https://discord.gg/jan)
diff --git a/website/src/content/docs/products/platforms/jan-ai.mdx b/website/src/content/docs/products/platforms/jan-ai.mdx
new file mode 100644
index 000000000..448d8736c
--- /dev/null
+++ b/website/src/content/docs/products/platforms/jan-ai.mdx
@@ -0,0 +1,162 @@
+---
+title: jan.ai
+description: Cloud AI that respects your privacy
+sidebar:
+ order: 1
+---
+
+import { Aside, Card, CardGrid } from '@astrojs/starlight/components';
+
+jan.ai is our cloud service that brings powerful AI to your browser and powers Cloud Mode in Jan Desktop and Mobile.
+
+## What is jan.ai?
+
+jan.ai is the hosted version of Jan Server. Same models, same experience, but running in the cloud when you need more power than your device can provide.
+
+
+
+## How It Works
+
+
+
+ When you switch to Cloud Mode in Jan Desktop, it connects to jan.ai automatically. No configuration needed.
+
+
+
+ jan.ai is the default cloud provider for Jan Mobile when not connected to your desktop.
+
+
+
+ Visit jan.ai directly in your browser for instant access to AI without downloading anything.
+
+
+
+## Key Features
+
+### No Setup Required
+Open jan.ai, start chatting. No API keys, no credit cards, no account required for basic use.
+
+### Privacy-Respecting Cloud
+- Anonymous usage by default
+- No training on your data
+- Encrypted connections
+- Auto-delete options
+
+### Powerful Models
+Access our best models without needing expensive hardware:
+- **Jan V1-70B**: Our most capable model
+- **Jan V1-13B**: Fast and efficient
+- **Jan Nano**: Quick responses for simple tasks
+
+### Seamless Integration
+Works perfectly with:
+- Jan Desktop (Cloud Mode)
+- Jan Mobile
+- API access for developers
+- Browser-based chat
+
+## Pricing
+
+| Tier | Features | Price |
+|------|----------|--------|
+| Free | 50 queries/day, Jan Nano & V1-13B | $0 |
+| Pro | Unlimited queries, all models, priority | Coming Soon |
+| Team | Multiple users, admin controls | Coming Soon |
+
+
+
+## Why Use jan.ai?
+
+### When Local Isn't Enough
+- Need more powerful models than your device can run
+- Want to access your AI from any device
+- Require faster responses for complex tasks
+
+### But Still Want Privacy
+- No account required for basic use
+- Anonymous by default
+- Your data isn't used for training
+- Clear data deletion policies
+
+### Without the Hassle
+- No API keys to manage
+- No complex pricing calculators
+- No surprise bills
+- Just AI that works
+
+## For Developers
+
+### API Access
+```javascript
+// Same API as local Jan
+const response = await fetch('https://api.jan.ai/v1/chat/completions', {
+ headers: { 'Authorization': 'Bearer YOUR_KEY' },
+ body: JSON.stringify({
+ model: 'jan-v1-70b',
+ messages: [{ role: 'user', content: 'Hello' }]
+ })
+});
+```
+
+### OpenAI Compatible
+Drop-in replacement for OpenAI API:
+```python
+# Just change the base URL
+client = OpenAI(
+ base_url="https://api.jan.ai/v1",
+ api_key="your-jan-key"
+)
+```
+
+## Common Questions
+
+### How is this different from ChatGPT?
+- We don't train on your data
+- Anonymous usage available
+- Same experience as local Jan
+- You own your conversations
+
+### Can I use my own models?
+Not on jan.ai. For custom models, use Jan Desktop or self-host Jan Server.
+
+### Is it really private?
+More private than most cloud AI:
+- Optional accounts
+- No behavioral tracking
+- Encrypted everything
+- Regular data purges
+
+### When will it fully launch?
+Beta starts Q3 2025. Full launch Q4 2025.
+
+## Coming Soon
+
+### Beta Features
+- Basic chat interface
+- API access
+- Jan Desktop/Mobile integration
+
+### Full Launch
+- Team accounts
+- Advanced tools (search, browser use)
+- Model customization
+- Usage analytics (your own data)
+
+
+
+## The Philosophy
+
+Cloud AI doesn't have to mean giving up control. jan.ai proves you can have:
+- Convenience without surveillance
+- Power without privacy invasion
+- Simplicity without lock-in
+
+---
+
+[Join Beta Waitlist](https://jan.ai/beta) | [Pricing Details](https://jan.ai/pricing) | [API Documentation](https://jan.ai/docs/api)
diff --git a/website/src/content/docs/products/platforms/mobile.mdx b/website/src/content/docs/products/platforms/mobile.mdx
new file mode 100644
index 000000000..846a32df8
--- /dev/null
+++ b/website/src/content/docs/products/platforms/mobile.mdx
@@ -0,0 +1,182 @@
+---
+title: Jan Mobile
+description: Your AI assistant on the go
+sidebar:
+ order: 3
+---
+
+import { Aside, Card, CardGrid } from '@astrojs/starlight/components';
+
+Jan Mobile brings the same AI experience to your phone. Connect to your desktop, your server, or run models locally.
+
+
+
+## How It Works
+
+Jan Mobile adapts to your situation:
+
+| At Home | At Work | On the Go |
+|---------|---------|-----------|
+| Connect to your Jan Desktop over WiFi | Connect to company Jan Server | Run Jan Nano locally on your phone |
+
+No configuration needed. It just works.
+
+## Three Modes, One Experience
+
+### Desktop Mode
+When you're near your computer, your phone uses its models and processing power.
+
+```
+Your Phone → WiFi → Your Desktop → Response
+ (automatic) (powerful models)
+```
+
+**Benefits:**
+- Access to larger models
+- Faster processing
+- Shared conversations
+- No phone battery drain
+
+### Server Mode
+Connect to your organization's Jan Server for team collaboration.
+
+```
+Your Phone → Internet → Company Server → Response
+ (secure) (shared models)
+```
+
+**Benefits:**
+- Team knowledge base
+- Consistent models
+- Central management
+- Work anywhere
+
+### Local Mode
+No connection? No problem. Jan Nano runs directly on your phone.
+
+```
+Your Phone → Jan Nano (6GB) → Response
+ (private & offline)
+```
+
+**Benefits:**
+- Complete privacy
+- Works offline
+- No data usage
+- Always available
+
+
+
+## Device Requirements
+
+### iOS
+- iPhone 11 or newer
+- iOS 15+
+- 8GB storage for Local Mode
+
+### Android
+- 8GB RAM minimum
+- Android 11+
+- 8GB storage for Local Mode
+
+## Key Features
+
+
+
+ Move from home to office to airplane. Jan adapts automatically.
+
+
+
+ Talk to Jan naturally. Responses can be spoken too.
+
+
+
+ Conversations, settings, and preferences follow you across devices.
+
+
+
+## Privacy & Security
+
+### Your Data, Your Control
+- Local Mode: Everything stays on your phone
+- Desktop Mode: Direct encrypted connection
+- Server Mode: Your organization's policies apply
+
+### No Compromises
+- Biometric app lock
+- Encrypted storage
+- No cloud backups without permission
+- Clear data anytime
+
+## Common Questions
+
+### Do I need to download models to my phone?
+No. In Desktop or Server mode, models run remotely. For Local Mode, Jan Nano downloads automatically when needed (6GB).
+
+### How fast is Local Mode?
+On modern phones, expect 30-50 tokens per second. Good enough for most conversations.
+
+### Can I use it without Jan Desktop?
+Yes. Use Local Mode or connect to jan.ai (when available).
+
+### Will it drain my battery?
+Desktop/Server modes use minimal battery. Local Mode is like using any intensive app - noticeable but manageable.
+
+### Is it the same as desktop?
+Same Jan, smaller screen. All the privacy, none of the complexity.
+
+## Why Mobile Matters
+
+Your phone is with you always. Your AI assistant should be too. But that doesn't mean sacrificing privacy or control.
+
+Jan Mobile proves you can have:
+- Powerful AI anywhere
+- Complete privacy when needed
+- Simple experience always
+- No subscriptions or tracking
+
+## Coming Features
+
+### Launch (Q1 2026)
+- iOS and Android apps
+- Basic chat interface
+- Three connection modes
+- Voice input
+
+### Post-Launch
+- Widget support
+- Siri/Assistant integration
+- Background sync
+- Notification summaries
+
+
+
+## The Vision
+
+Most mobile AI apps are just cloud wrappers. Jan Mobile is different:
+
+| Feature | Other AI Apps | Jan Mobile |
+|---------|---------------|------------|
+| Offline Mode | ❌ | ✅ Jan Nano |
+| Desktop Connection | ❌ | ✅ Your models |
+| Privacy | Your data in cloud | Your data stays yours |
+| Cost | Monthly subscription | Free with your hardware |
+
+## Get Ready
+
+While you wait for Jan Mobile:
+
+1. **Set up Jan Desktop** - It will power your mobile experience
+2. **Try jan.ai** - Get familiar with the interface
+3. **Join the waitlist** - Be first to know when it launches
+4. **Tell us what you need** - Shape the mobile experience
+
+---
+
+[Join Mobile Waitlist](https://jan.ai/mobile) | [Desktop Download](https://jan.ai/download) | [Discord Community](https://discord.gg/jan)
diff --git a/website/src/content/docs/products/platforms/server.mdx b/website/src/content/docs/products/platforms/server.mdx
new file mode 100644
index 000000000..dca084a02
--- /dev/null
+++ b/website/src/content/docs/products/platforms/server.mdx
@@ -0,0 +1,235 @@
+---
+title: Jan Server
+description: Your own private AI cloud
+sidebar:
+ order: 4
+---
+
+import { Aside, Card, CardGrid } from '@astrojs/starlight/components';
+
+Jan Server is Jan Desktop with multi-user support. Deploy it on your hardware to create your own private AI cloud for your team or organization.
+
+
+
+## What is Jan Server?
+
+```
+Jan Server = Jan Desktop + Multi-user support + Real hardware
+```
+
+It's the same engine that powers Jan Desktop, scaled up for teams. Your data stays on your servers, your models run on your GPUs, your AI remains yours.
+
+## Why Organizations Need This
+
+### The Problem
+Every API call to ChatGPT or Claude is:
+- Your intellectual property leaving your network
+- Potential training data for someone else's model
+- A compliance nightmare waiting to happen
+- A monthly bill that never ends
+
+### The Solution
+Jan Server gives you:
+- **Complete control**: Your hardware, your rules
+- **Total privacy**: Nothing leaves your network
+- **Predictable costs**: One-time hardware investment
+- **Compliance ready**: GDPR, HIPAA, SOC2 friendly
+
+## Deployment Options
+
+
+
+ **Hardware**: Single RTX 6000 Ada (48GB)
+ **RAM**: 128GB
+ **Models**: Up to 70B parameters
+ **Cost**: ~$15k one-time
+
+
+
+ **Hardware**: 2-4 GPU nodes
+ **RAM**: 256GB per node
+ **Models**: Multiple concurrent
+ **Cost**: ~$50-100k one-time
+
+
+
+ **Hardware**: DGX cluster
+ **RAM**: As needed
+ **Models**: Full range
+ **Cost**: Custom quote
+
+
+
+## Simple Deployment
+
+### Docker (Recommended)
+```yaml
+version: '3'
+services:
+ jan-server:
+ image: jan.ai/server:latest
+ ports:
+ - "80:80"
+ - "1337:1337"
+ volumes:
+ - ./models:/models
+ - ./data:/data
+ deploy:
+ resources:
+ reservations:
+ devices:
+ - capabilities: [gpu]
+```
+
+### Kubernetes
+For larger deployments with auto-scaling and high availability.
+
+### Bare Metal
+For maximum performance and custom configurations.
+
+## Key Features
+
+### Multi-User Management
+- Individual accounts and API keys
+- Usage tracking and quotas
+- Model access controls
+- Team collaboration
+
+### Same API as Desktop
+```python
+# Your code doesn't change
+client = OpenAI(
+ base_url="https://jan.company.internal/v1",
+ api_key="user-specific-key"
+)
+```
+
+### Model Governance
+- Control which models are available
+- Set user permissions
+- Monitor usage
+- Ensure compliance
+
+
+
+## Real Deployments
+
+| Use Case | Setup | Result |
+|----------|-------|---------|
+| Law Firm | 2x RTX 6000, 200 users | Client data never leaves network |
+| Hospital | DGX node, 500 users | HIPAA compliant AI assistant |
+| Tech Startup | 4x RTX 4090, 50 users | 90% cost reduction vs. OpenAI |
+| University | Multi-node cluster | Unrestricted research |
+
+## Hardware Guide
+
+### Minimum Requirements
+- **GPU**: RTX 3090 or better (24GB VRAM)
+- **CPU**: 16+ cores
+- **RAM**: 64GB minimum
+- **Storage**: 1TB NVMe SSD
+
+### Recommended Setup
+- **GPU**: RTX 6000 Ada or A100
+- **CPU**: Dual socket Xeon/EPYC
+- **RAM**: 128-256GB
+- **Storage**: RAID NVMe array
+
+### Scaling Considerations
+- 1 GPU can serve ~5-10 concurrent users
+- 70B models need 40-80GB VRAM
+- CPU inference possible for smaller models
+- Network: 10Gbps recommended
+
+## Why Self-Host?
+
+### For IT Teams
+- No data leaves your network
+- Complete audit trails
+- Integrate with existing auth (LDAP/AD)
+- Predictable resource usage
+
+### For Security Teams
+- Air-gapped deployment options
+- End-to-end encryption
+- No third-party access
+- Full compliance control
+
+### For Finance Teams
+- One-time hardware cost
+- No per-token pricing
+- Predictable TCO
+- Use existing infrastructure
+
+## Coming Features
+
+### Phase 1 (Launch)
+- Basic multi-user support
+- Web interface
+- API compatibility
+- Usage monitoring
+
+### Phase 2 (Post-Launch)
+- Advanced governance
+- Fine-tuning interface
+- Automated scaling
+- Backup/restore
+
+### Phase 3 (Future)
+- Federated deployments
+- Cross-region sync
+- Advanced analytics
+- Custom model training
+
+
+
+## Migration Path
+
+### From Cloud AI
+1. Deploy Jan Server
+2. Import your workflows
+3. Update API endpoints
+4. Migrate users gradually
+
+### From Jan Desktop
+1. Same models work instantly
+2. Add user management
+3. Scale as needed
+
+## The Philosophy
+
+We believe organizations should own their AI infrastructure just like they own their data. Jan Server makes this possible without compromising on capabilities.
+
+This isn't about avoiding the cloud - it's about having a choice. Run your AI where it makes sense for your organization.
+
+## Support Options
+
+### Community Edition
+- Full features
+- Community support
+- Perfect for small teams
+
+### Enterprise Edition
+- Priority support
+- Custom deployment help
+- SLA guarantees
+- Training included
+
+## Get Started
+
+Jan Server is coming soon. While you wait:
+
+1. **Plan your hardware**: Check our requirements above
+2. **Join early access**: Get notified when available
+3. **Test with Desktop**: Same models, same experience
+4. **Prepare your team**: AI that respects your infrastructure
+
+---
+
+[Join Early Access](https://jan.ai/server) | [Hardware Guide](https://jan.ai/docs/server/hardware) | [Enterprise Contact](https://jan.ai/enterprise)
diff --git a/website/src/content/docs/products/tools/browseruse.mdx b/website/src/content/docs/products/tools/browseruse.mdx
new file mode 100644
index 000000000..87a8e6f54
--- /dev/null
+++ b/website/src/content/docs/products/tools/browseruse.mdx
@@ -0,0 +1,276 @@
+---
+title: Browser Use
+description: Web automation capabilities for AI assistants
+sidebar:
+ order: 2
+---
+
+import { Aside, Card, CardGrid, Tabs, TabItem } from '@astrojs/starlight/components';
+
+
+Browser Use enables Jan to interact with web pages programmatically. It can fill forms, extract data, and automate multi-step workflows while respecting site policies.
+
+## Overview
+
+Browser Use operates in two modes:
+
+| Mode | Execution | Use Case | Requirements |
+|------|-----------|----------|--------------|
+| Local | Your device | Personal automation | Browser installed |
+| Cloud | Jan servers | Scheduled tasks | Internet connection |
+
+## Capabilities
+
+
+
+ - Fill application forms
+ - Extract structured data
+ - Handle multi-page flows
+ - Screenshot verification
+
+
+
+ - Price monitoring
+ - Content aggregation
+ - Competitor analysis
+ - Research automation
+
+
+
+ - Appointment booking
+ - Status checking
+ - Report generation
+ - Workflow execution
+
+
+
+## Safety Boundaries
+
+### Allowed Actions
+- Read public content
+- Fill non-financial forms
+- Click buttons and links
+- Take screenshots
+- Extract text and data
+- Handle JavaScript sites
+
+### Restricted Actions
+- Payment processing
+- Financial transactions
+- CAPTCHA bypassing
+- Terms of Service violations
+- Credential harvesting
+- Malicious automation
+
+
+
+## Implementation
+
+
+
+ Runs on your device using Playwright.
+
+ ```javascript
+ await jan.browser.use({
+ task: "Fill job application",
+ url: "https://careers.example.com",
+ mode: "local",
+ headless: false // Watch it work
+ });
+ ```
+
+ **Advantages:**
+ - Uses your sessions
+ - Access internal tools
+ - Complete privacy
+ - Visual feedback
+
+ **Limitations:**
+ - Uses local resources
+ - Requires browser
+ - Limited to one task
+
+
+
+ Runs on Jan's infrastructure.
+
+ ```javascript
+ await jan.browser.use({
+ task: "Monitor prices daily",
+ urls: ["site1.com", "site2.com"],
+ mode: "cloud",
+ schedule: "0 9 * * *"
+ });
+ ```
+
+ **Advantages:**
+ - Runs 24/7
+ - Parallel execution
+ - No local resources
+ - Scheduled tasks
+
+ **Limitations:**
+ - No personal accounts
+ - Public sites only
+ - Internet required
+
+
+
+## Common Use Cases
+
+### Research Automation
+```javascript
+// Gather pricing from multiple vendors
+const prices = await jan.browser.use({
+ task: "Extract pricing tables",
+ urls: competitorUrls,
+ extract: ["price", "features", "limits"]
+});
+```
+
+### Form Processing
+```javascript
+// Fill repetitive applications
+await jan.browser.use({
+ task: "Complete application",
+ data: applicationData,
+ confirmation: "review" // Stop before submit
+});
+```
+
+### Monitoring
+```javascript
+// Track availability
+await jan.browser.use({
+ task: "Check stock status",
+ url: "https://store.example.com/product",
+ notify: "when_available"
+});
+```
+
+## Integration with Other Tools
+
+### Search + Browser Use
+```
+User: "Book a restaurant for Saturday"
+Jan: [searches restaurants] → [checks availability] →
+ [fills reservation form] → [confirms details]
+```
+
+### Deep Research + Browser Use
+```
+User: "Compare SaaS pricing across competitors"
+Jan: [identifies competitors] → [visits pricing pages] →
+ [extracts data] → [creates comparison]
+```
+
+## Technical Architecture
+
+### Execution Pipeline
+1. **Task Analysis**: Understand user intent
+2. **Site Navigation**: Load and interact with pages
+3. **Action Execution**: Click, type, select
+4. **Data Extraction**: Capture results
+5. **Verification**: Screenshot confirmation
+
+### Performance Metrics
+| Operation | Local Time | Cloud Time | Success Rate |
+|-----------|------------|------------|--------------|
+| Page load | 1-3s | 2-4s | 95% |
+| Form fill | 5-10s | 8-15s | 90% |
+| Multi-page | 30-60s | 45-90s | 85% |
+| Data extract | 10-20s | 15-30s | 92% |
+
+
+
+## Configuration
+
+### Basic Settings
+```json
+{
+ "browserUse": {
+ "defaultMode": "local",
+ "timeout": 60000,
+ "retries": 3,
+ "screenshotOnError": true
+ }
+}
+```
+
+### Advanced Options
+```json
+{
+ "browserUse": {
+ "userAgent": "Mozilla/5.0...",
+ "viewport": { "width": 1920, "height": 1080 },
+ "cloudWorkers": 5,
+ "rateLimit": {
+ "requests": 100,
+ "window": "1h"
+ }
+ }
+}
+```
+
+## Error Handling
+
+| Error Type | Cause | Resolution |
+|------------|-------|------------|
+| Navigation failed | Site down/blocked | Retry or alternative site |
+| Element not found | Page structure changed | Update selectors |
+| Timeout | Slow site/connection | Increase timeout |
+| Blocked by site | Anti-bot measures | Use local mode |
+
+## Privacy Considerations
+
+### Local Mode
+- All data stays on device
+- Uses your browser profile
+- No external connections
+- Complete user control
+
+### Cloud Mode
+- Tasks are anonymized
+- No personal data stored
+- Results encrypted
+- Auto-deletion after delivery
+
+
+
+## Roadmap
+
+### v0.8.0 (Q4 2025)
+- Basic browser automation
+- Form filling
+- Screenshot capture
+- Local mode only
+
+### v0.8.x (Q1 2026)
+- Cloud execution
+- Scheduled tasks
+- Parallel processing
+- Result caching
+
+### v0.9.0 (Q2 2026)
+- Visual element detection
+- Natural language navigation
+- Advanced error recovery
+- Workflow templates
+
+## Best Practices
+
+1. **Start Simple**: Test with single-page tasks first
+2. **Use Confirmations**: Review before critical actions
+3. **Handle Errors**: Expect and plan for failures
+4. **Respect Sites**: Follow robots.txt and rate limits
+5. **Local First**: Use cloud only when necessary
+
+---
+
+[Documentation](https://jan.ai/docs/browser-use) | [Examples](https://github.com/janhq/browser-use-examples) | [API Reference](https://jan.ai/docs/api/browser)
diff --git a/website/src/content/docs/products/tools/deepresearch.mdx b/website/src/content/docs/products/tools/deepresearch.mdx
new file mode 100644
index 000000000..bac7cdf58
--- /dev/null
+++ b/website/src/content/docs/products/tools/deepresearch.mdx
@@ -0,0 +1,292 @@
+---
+title: Deep Research
+description: Comprehensive research and analysis capabilities
+sidebar:
+ order: 3
+---
+
+import { Aside, Card, CardGrid, Tabs, TabItem } from '@astrojs/starlight/components';
+
+
+Deep Research provides comprehensive investigation capabilities beyond simple search. It reads sources, synthesizes information, and produces analysis with proper citations.
+
+## Research Levels
+
+| Level | Duration | Sources | Output | Use Case |
+|-------|----------|---------|--------|----------|
+| Quick | 5 min | 10-20 | Summary with key points | Overview needed |
+| Standard | 30 min | 20-50 | Detailed analysis | Work projects |
+| Comprehensive | 2+ hours | 50-200 | Full literature review | Academic research |
+
+## How It Works
+
+### Research Pipeline
+1. **Query Analysis**: Understand scope and requirements
+2. **Source Discovery**: Find relevant materials
+3. **Content Extraction**: Read and process sources
+4. **Synthesis**: Connect information across sources
+5. **Output Generation**: Create structured analysis
+
+### Source Types
+
+
+ - Academic papers (arXiv, PubMed)
+ - Technical documentation
+ - Patent databases
+ - Code repositories
+ - Official reports
+
+
+
+ - Expert blog posts
+ - Conference proceedings
+ - Tutorial content
+ - Forum discussions
+ - News articles
+
+
+
+## Research Modes
+
+
+
+ Fast overview for immediate needs.
+
+ ```javascript
+ await jan.research({
+ query: "quantum computing recent advances",
+ depth: "quick",
+ maxSources: 20
+ });
+ ```
+
+ **Output includes:**
+ - Executive summary
+ - Key developments
+ - Major players
+ - 5-10 citations
+
+
+
+ Balanced depth for professional work.
+
+ ```javascript
+ await jan.research({
+ query: "comparison of vector databases",
+ depth: "standard",
+ includeBenchmarks: true
+ });
+ ```
+
+ **Output includes:**
+ - Detailed analysis
+ - Comparative tables
+ - Performance metrics
+ - 20-50 citations
+
+
+
+ Academic-level investigation.
+
+ ```javascript
+ await jan.research({
+ query: "mRNA vaccine mechanisms",
+ depth: "comprehensive",
+ timeRange: "2020-2024",
+ includePatents: true
+ });
+ ```
+
+ **Output includes:**
+ - Literature review
+ - Methodology analysis
+ - Future directions
+ - 100+ citations
+
+
+
+## Key Features
+
+### Citation Management
+- Every claim backed by sources
+- Proper academic formatting
+- Direct links to materials
+- Citation graph exploration
+
+### Quality Assurance
+| Check Type | Description | Automatic |
+|------------|-------------|-----------|
+| Source verification | Validates publication status | Yes |
+| Date checking | Ensures currency of information | Yes |
+| Contradiction detection | Flags conflicting claims | Yes |
+| Bias identification | Notes potential conflicts | Partial |
+| Retraction alerts | Warns about retracted papers | Yes |
+
+### Output Formats
+- **Markdown**: With inline citations
+- **PDF**: Formatted report
+- **LaTeX**: Academic papers
+- **JSON**: Structured data
+- **Bibtex**: Reference management
+
+
+
+## Real-World Examples
+
+### Technical Research
+```
+Query: "Rust async runtime implementations"
+Duration: 45 minutes
+Sources analyzed: 67
+
+Output structure:
+1. Current landscape overview
+2. Implementation comparison table
+3. Performance benchmarks
+4. Code examples
+5. Best practices
+6. 43 verified citations
+```
+
+### Market Analysis
+```
+Query: "Edge AI hardware market 2024"
+Duration: 2 hours
+Sources analyzed: 124
+
+Output structure:
+1. Market size and projections
+2. Key players analysis
+3. Technology comparison
+4. Investment trends
+5. Future outlook
+6. 89 citations from reports, papers, and news
+```
+
+### Literature Review
+```
+Query: "CRISPR applications in agriculture"
+Duration: 3 hours
+Sources analyzed: 198
+
+Output structure:
+1. Historical development
+2. Current applications
+3. Technical challenges
+4. Regulatory landscape
+5. Future possibilities
+6. 156 academic citations
+```
+
+## Configuration Options
+
+### Basic Settings
+```json
+{
+ "research": {
+ "defaultDepth": "standard",
+ "cacheResults": true,
+ "verifyDates": true,
+ "includePreprints": false
+ }
+}
+```
+
+### Advanced Options
+```json
+{
+ "research": {
+ "sources": {
+ "academic": ["arxiv", "pubmed", "ieee"],
+ "exclude": ["reddit", "quora"],
+ "requirePeerReview": true
+ },
+ "output": {
+ "style": "academic",
+ "citationFormat": "APA",
+ "includeSummaries": true
+ }
+ }
+}
+```
+
+## Integration with Other Tools
+
+### Search + Deep Research
+```
+User: "Latest transformer architecture improvements"
+Jan: [searches for papers] → [reads 30 papers] →
+ [analyzes improvements] → [creates taxonomy]
+```
+
+### Browser Use + Deep Research
+```
+User: "Analyze all YC startups in AI space"
+Jan: [crawls YC directory] → [visits company sites] →
+ [extracts data] → [produces market analysis]
+```
+
+
+
+## Limitations
+
+### Cannot Access
+- Paywalled content (in local mode)
+- Private databases
+- Real-time data streams
+- Proprietary research
+
+### Won't Perform
+- Original research
+- Data collection experiments
+- Statistical analysis of raw data
+- Peer review functions
+
+## Performance Expectations
+
+| Research Type | Sources | Time | Accuracy |
+|---------------|---------|------|----------|
+| News summary | 10-20 | 5 min | High |
+| Technical review | 30-50 | 30 min | Very High |
+| Academic survey | 100+ | 2+ hours | Excellent |
+| Patent analysis | 50-100 | 1-2 hours | High |
+
+## Local vs Cloud Processing
+
+### Local Mode
+- Downloads papers to cache
+- Processes on your device
+- Complete privacy
+- Slower processing
+
+### Cloud Mode
+- Access to more sources
+- Faster processing
+- Parallel analysis
+- Anonymous queries
+
+## Roadmap
+
+### v0.7.0 (Current)
+- Basic research pipeline
+- Academic paper parsing
+- Simple citation management
+
+### v0.8.0
+- Visual paper understanding
+- Code extraction from papers
+- Patent search integration
+
+### v0.9.0
+- Multi-language sources
+- Collaborative research
+- Real-time monitoring
+- Custom research agents
+
+---
+
+[Examples](https://jan.ai/docs/deep-research/examples) | [API Reference](https://jan.ai/docs/api/research) | [Research Templates](https://github.com/janhq/research-templates)
diff --git a/website/src/content/docs/products/tools/search.mdx b/website/src/content/docs/products/tools/search.mdx
new file mode 100644
index 000000000..938f986c9
--- /dev/null
+++ b/website/src/content/docs/products/tools/search.mdx
@@ -0,0 +1,249 @@
+---
+title: Search
+description: Privacy-respecting web search for AI assistants
+sidebar:
+ order: 1
+---
+
+import { Aside, Card, CardGrid, Tabs, TabItem } from '@astrojs/starlight/components';
+
+
+Jan Search provides web search capabilities while maintaining user privacy. It offers local crawling, anonymized cloud search, and intelligent routing.
+
+## Architecture
+
+Jan Search operates in three modes:
+
+| Mode | Description | Privacy | Speed |
+|------|-------------|---------|--------|
+| Local | Direct web crawling from your device | Maximum | Slower |
+| Cloud | Anonymized search via Jan API | High | Fast |
+| Hybrid | Automatic routing based on query | Variable | Optimal |
+
+## Implementation
+
+
+
+ Everything happens on your machine using Crawl4AI.
+
+ ```javascript
+ const results = await jan.tools.search({
+ query: "your search query",
+ mode: "local",
+ maxResults: 10
+ });
+ ```
+
+ **Characteristics:**
+ - No data leaves your device
+ - Direct website crawling
+ - Limited to public content
+ - 2-5 seconds per query
+
+
+
+ Anonymized queries through Jan's search infrastructure.
+
+ ```javascript
+ const results = await jan.tools.search({
+ query: "your search query",
+ mode: "cloud"
+ });
+ ```
+
+ **Characteristics:**
+ - No user accounts required
+ - Cached common queries
+ - Sub-second response time
+ - No search history stored
+
+
+
+ Intelligent routing based on query sensitivity.
+
+ ```javascript
+ const results = await jan.tools.search({
+ query: userQuery,
+ mode: "auto"
+ });
+ ```
+
+ **Routing logic:**
+ - Medical/financial → Local only
+ - General queries → Cloud for speed
+ - User preferences → Configurable
+
+
+
+## MCP Integration
+
+Search functions as a Model Context Protocol tool, enabling:
+
+### Basic Usage
+```
+User: "What's the latest on AI regulations?"
+Jan: [searches web] → [reads articles] → [summarizes findings]
+```
+
+### Advanced Workflows
+```
+User: "Compare Python web frameworks from 2024"
+Jan: [searches multiple sources] → [analyzes documentation] →
+ [creates comparison table] → [provides recommendations]
+```
+
+### Developer Integration
+```python
+async def research_topic(topic):
+ # Search for information
+ results = await jan.search(topic)
+
+ # Read top results
+ content = await jan.read_urls(results[:5])
+
+ # Generate analysis
+ summary = await jan.analyze(content)
+
+ return summary
+```
+
+
+
+## Features
+
+
+
+ - Current information, not training data
+ - JavaScript rendering support
+ - Dynamic content handling
+ - Multi-page crawling
+
+
+
+ - No user tracking
+ - Anonymous queries
+ - Local storage only
+ - No cookies or fingerprinting
+
+
+
+ - Semantic understanding
+ - Result ranking
+ - Content extraction
+ - Source verification
+
+
+
+## Search Types
+
+### Quick Answer
+Fast factual responses for simple queries.
+```
+Input: "weather tokyo"
+Output: "18°C, cloudy, 70% humidity"
+Time: < 1 second
+```
+
+### Deep Research
+Comprehensive analysis with multiple sources.
+```
+Input: "impact of LLMs on software development"
+Output: Multi-page report with citations
+Time: 30-60 seconds
+```
+
+### Continuous Monitoring
+Track topics over time (coming v0.8.0).
+```
+Input: "monitor AI model releases"
+Output: Daily summaries of new models
+```
+
+## Privacy Details
+
+### Local Mode Data
+| Data Type | Storage | Duration | Access |
+|-----------|---------|----------|--------|
+| Queries | ~/jan/search | Session | Local only |
+| Results | Memory | Temporary | Process only |
+| Cache | ~/jan/cache | 7 days | User controlled |
+
+### Cloud Mode Data
+| Data Type | Collection | Storage | Usage |
+|-----------|------------|---------|-------|
+| Query text | Anonymized | None | Processing only |
+| IP address | Country only | None | Rate limiting |
+| User ID | Not collected | N/A | N/A |
+
+
+
+## Performance Benchmarks
+
+| Query Type | Local Mode | Cloud Mode | Hybrid |
+|------------|------------|------------|--------|
+| Simple fact | 2-3s | < 0.5s | < 0.5s |
+| News search | 3-5s | < 1s | < 1s |
+| Deep research | 20-30s | 5-10s | 5-15s |
+| Multi-site | 30-60s | 10-20s | 10-30s |
+
+## Configuration
+
+### Basic Settings
+```json
+{
+ "search": {
+ "defaultMode": "hybrid",
+ "maxResults": 20,
+ "timeout": 30000,
+ "cacheEnabled": true
+ }
+}
+```
+
+### Advanced Options
+```json
+{
+ "search": {
+ "localOnly": ["medical", "finance", "personal"],
+ "preferCloud": ["news", "weather", "general"],
+ "customRouting": {
+ "enabled": true,
+ "rules": [...]
+ }
+ }
+}
+```
+
+## Roadmap
+
+### v0.6.7 (Current)
+- Basic search MCP
+- Local crawling
+- Simple cloud search
+
+### v0.7.0
+- Semantic search
+- Multi-source aggregation
+- Improved result ranking
+
+### v0.8.0
+- Visual search
+- Continuous monitoring
+- Custom search engines
+
+### v0.9.0
+- Federated search networks
+- User-contributed indices
+- Advanced filtering
+
+
+
+---
+
+[API Reference](https://jan.ai/docs/api/search) | [MCP Documentation](https://jan.ai/docs/mcp) | [Privacy Policy](https://jan.ai/privacy)
diff --git a/website/tsconfig.json b/website/tsconfig.json
new file mode 100644
index 000000000..8bf91d3bb
--- /dev/null
+++ b/website/tsconfig.json
@@ -0,0 +1,5 @@
+{
+ "extends": "astro/tsconfigs/strict",
+ "include": [".astro/types.d.ts", "**/*"],
+ "exclude": ["dist"]
+}