diff --git a/docs/public/assets/images/changelog/jan-import-vlm-model.gif b/docs/public/assets/images/changelog/jan-import-vlm-model.gif
new file mode 100644
index 000000000..d9f5bb7ba
Binary files /dev/null and b/docs/public/assets/images/changelog/jan-import-vlm-model.gif differ
diff --git a/docs/public/assets/images/changelog/jan-v0.6.10-auto-optimize.gif b/docs/public/assets/images/changelog/jan-v0.6.10-auto-optimize.gif
new file mode 100644
index 000000000..cbd0c7a1c
Binary files /dev/null and b/docs/public/assets/images/changelog/jan-v0.6.10-auto-optimize.gif differ
diff --git a/docs/src/pages/changelog/2025-09-18-auto-optimize-vision-imports.mdx b/docs/src/pages/changelog/2025-09-18-auto-optimize-vision-imports.mdx
new file mode 100644
index 000000000..e9d814e1a
--- /dev/null
+++ b/docs/src/pages/changelog/2025-09-18-auto-optimize-vision-imports.mdx
@@ -0,0 +1,48 @@
+---
+title: "Jan v0.6.10: Auto Optimize, custom backends, and vision model imports"
+version: 0.6.10
+description: "New experimental Auto Optimize feature, custom llama.cpp backend support, vision model imports, and critical bug fixes"
+date: 2025-09-18
+ogImage: "/assets/images/changelog/jan-v0.6.10-auto-optimize.gif"
+---
+
+import ChangelogHeader from "@/components/Changelog/ChangelogHeader"
+import { Callout } from 'nextra/components'
+
+
+
+## Highlights 🎉
+
+- **Auto Optimize**: One-click hardware-aware performance tuning for llama.cpp.
+- **Custom Backend Support**: Import and manage your preferred llama.cpp versions.
+- **Import Vision Models**: Seamlessly import and use vision-capable models.
+
+### 🚀 Auto Optimize (Experimental)
+
+**Intelligent performance tuning** — Jan can now apply the best llama.cpp settings for your specific hardware:
+- **Hardware analysis**: Automatically detects your CPU, GPU, and memory configuration
+- **One-click optimization**: Applies optimal parameters with a single click in model settings
+
+
+Auto Optimize is currently experimental and will be refined based on user feedback. It analyzes your system specs and applies proven configurations for optimal llama.cpp performance.
+
+
+### 👁️ Vision Model Imports
+
+
+
+**Enhanced multimodal support** — Import and use vision models seamlessly:
+- **Direct vision model import**: Import vision-capable models from any source
+- **Improved compatibility**: Better handling of multimodal model formats
+
+### 🔧 Custom Backend Support
+
+**Import your preferred llama.cpp version** — Full control over your AI backend:
+- **Custom llama.cpp versions**: Import and use any llama.cpp build you prefer
+- **Version flexibility**: Use bleeding-edge builds or stable releases
+- **Backup CDN**: New CDN fallback when GitHub downloads fail
+- **User confirmation**: Prompts before auto-updating llama.cpp
+
+Update your Jan or [download the latest](https://jan.ai/).
+
+For the complete list of changes, see the [GitHub release notes](https://github.com/janhq/jan/releases/tag/v0.6.10).