* feat: Enhance Llama.cpp backend management with persistence This commit introduces significant improvements to how the Llama.cpp extension manages and updates its backend installations, focusing on user preference persistence and smarter auto-updates. Key changes include: * **Persistent Backend Type Preference:** The extension now stores the user's preferred backend type (e.g., `cuda`, `cpu`, `metal`) in `localStorage`. This ensures that even after updates or restarts, the system attempts to use the user's previously selected backend type, if available. * **Intelligent Auto-Update:** The auto-update mechanism has been refined to prioritize updating to the **latest version of the *currently selected backend type*** rather than always defaulting to the "best available" backend (which might change). This respects user choice while keeping the chosen backend type up-to-date. * **Improved Initial Installation/Configuration:** For fresh installations or cases where the `version_backend` setting is invalid, the system now intelligently determines and installs the best available backend, then persists its type. * **Refined Old Backend Cleanup:** The `removeOldBackends` function has been renamed to `removeOldBackend` and modified to specifically clean up *older versions of the currently selected backend type*, preventing the accumulation of unnecessary files while preserving other backend types the user might switch to. * **Robust Local Storage Handling:** New private methods (`getStoredBackendType`, `setStoredBackendType`, `clearStoredBackendType`) are introduced to safely interact with `localStorage`, including error handling for potential `localStorage` access issues. * **Version Filtering Utility:** A new utility `findLatestVersionForBackend` helps in identifying the latest available version for a specific backend type from a list of supported backends. These changes provide a more stable, user-friendly, and maintainable backend management experience for the Llama.cpp extension. Fixes: #5883 * fix: cortex models migration should be done once * feat: Optimize Llama.cpp backend preference storage and UI updates This commit refines the Llama.cpp extension's backend management by: * **Optimizing `localStorage` Writes:** The system now only writes the backend type preference to `localStorage` if the new value is different from the currently stored one. This reduces unnecessary `localStorage` operations. * **Ensuring UI Consistency on Initial Setup:** When a fresh installation or an invalid backend configuration is detected, the UI settings are now explicitly updated to reflect the newly determined `effectiveBackendString`, ensuring the displayed setting matches the active configuration. These changes improve performance by reducing redundant storage operations and enhance user experience by maintaining UI synchronization with the backend state. * Revert "fix: provider settings should be refreshed on page load (#5887)" This reverts commit ce6af62c7df4a7e7ea8c0896f307309d6bf38771. * fix: add loader version backend llamacpp * fix: wrong key name * fix: model setting issues * fix: virtual dom hub * chore: cleanup * chore: hide device ofload setting --------- Co-authored-by: Louis <louis@jan.ai> Co-authored-by: Faisal Amir <urmauur@gmail.com>
64 lines
2.2 KiB
JSON
64 lines
2.2 KiB
JSON
{
|
|
"name": "jan-app",
|
|
"private": true,
|
|
"workspaces": {
|
|
"packages": [
|
|
"core",
|
|
"web-app"
|
|
]
|
|
},
|
|
"scripts": {
|
|
"lint": "yarn workspace @janhq/web-app lint",
|
|
"dev": "yarn dev:tauri",
|
|
"build": "yarn build:web && yarn build:tauri",
|
|
"test": "vitest run",
|
|
"test:watch": "vitest",
|
|
"test:ui": "vitest --ui",
|
|
"test:coverage": "vitest run --coverage",
|
|
"test:prepare": "yarn build:icon && yarn copy:assets:tauri && yarn build --no-bundle ",
|
|
"dev:web": "yarn workspace @janhq/web-app dev",
|
|
"dev:tauri": "yarn build:icon && yarn copy:assets:tauri && tauri dev",
|
|
"copy:assets:tauri": "cpx \"pre-install/*.tgz\" \"src-tauri/resources/pre-install/\"",
|
|
"download:lib": "node ./scripts/download-lib.mjs",
|
|
"download:bin": "node ./scripts/download-bin.mjs",
|
|
"build:tauri:win32": "yarn download:bin && yarn tauri build",
|
|
"build:tauri:linux": "yarn download:bin && ./src-tauri/build-utils/shim-linuxdeploy.sh yarn tauri build && ./src-tauri/build-utils/buildAppImage.sh",
|
|
"build:tauri:darwin": "yarn tauri build --target universal-apple-darwin",
|
|
"build:tauri": "yarn build:icon && yarn copy:assets:tauri && run-script-os",
|
|
"build:icon": "tauri icon ./src-tauri/icons/icon.png",
|
|
"build:core": "cd core && yarn build && yarn pack",
|
|
"build:web": "yarn workspace @janhq/web-app build",
|
|
"build:extensions": "rimraf ./pre-install/*.tgz || true && yarn workspace @janhq/core build && cd extensions && yarn install && yarn workspaces foreach -Apt run build:publish",
|
|
"prepare": "husky"
|
|
},
|
|
"devDependencies": {
|
|
"@tauri-apps/cli": "^2.7.0",
|
|
"@vitest/coverage-v8": "^3.1.3",
|
|
"concurrently": "^9.1.0",
|
|
"cpx": "^1.5.0",
|
|
"cross-env": "^7.0.3",
|
|
"happy-dom": "^15.11.6",
|
|
"husky": "^9.1.5",
|
|
"jsdom": "^26.1.0",
|
|
"nyc": "^17.1.0",
|
|
"rimraf": "^3.0.2",
|
|
"run-script-os": "^1.1.6",
|
|
"tar": "^4.4.19",
|
|
"unzipper": "^0.12.3",
|
|
"vitest": "^3.1.3",
|
|
"wait-on": "^7.0.1"
|
|
},
|
|
"version": "0.0.0",
|
|
"installConfig": {
|
|
"hoistingLimits": "workspaces"
|
|
},
|
|
"resolutions": {
|
|
"yallist": "4.0.0"
|
|
},
|
|
"packageManager": "yarn@4.5.3",
|
|
"dependencies": {
|
|
"@tanstack/react-virtual": "^3.13.12",
|
|
"download-cli": "^1.1.1"
|
|
}
|
|
}
|